mm: fold and remove lru_cache_add_anon() and lru_cache_add_file()
They're the same function, and for the purpose of all callers they are
equivalent to lru_cache_add().
[akpm@linux-foundation.org: fix it for local_lock changes]
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Rik van Riel <riel@surriel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Acked-by: Minchan Kim <minchan@kernel.org>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Link: http://lkml.kernel.org/r/20200520232525.798933-5-hannes@cmpxchg.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index f29038c..3f03248 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1879,13 +1879,9 @@ static void collapse_file(struct mm_struct *mm,
SetPageUptodate(new_page);
page_ref_add(new_page, HPAGE_PMD_NR - 1);
-
- if (is_shmem) {
+ if (is_shmem)
set_page_dirty(new_page);
- lru_cache_add_anon(new_page);
- } else {
- lru_cache_add_file(new_page);
- }
+ lru_cache_add(new_page);
/*
* Remove pte page tables, so we can re-fault the page as huge.
diff --git a/mm/memory.c b/mm/memory.c
index d50d8b4..3431e76 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3139,7 +3139,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
if (err)
goto out_page;
- lru_cache_add_anon(page);
+ lru_cache_add(page);
swap_readpage(page, true);
}
} else {
diff --git a/mm/shmem.c b/mm/shmem.c
index e83de27..ea95a3e 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1609,7 +1609,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
*/
oldpage = newpage;
} else {
- lru_cache_add_anon(newpage);
+ lru_cache_add(newpage);
*pagep = newpage;
}
@@ -1860,7 +1860,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
charge_mm);
if (error)
goto unacct;
- lru_cache_add_anon(page);
+ lru_cache_add(page);
spin_lock_irq(&info->lock);
info->alloced += compound_nr(page);
@@ -2376,7 +2376,7 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
if (!pte_none(*dst_pte))
goto out_release_unlock;
- lru_cache_add_anon(page);
+ lru_cache_add(page);
spin_lock_irq(&info->lock);
info->alloced++;
diff --git a/mm/swap.c b/mm/swap.c
index f7026f7..6196d79 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -424,37 +424,6 @@ void mark_page_accessed(struct page *page)
}
EXPORT_SYMBOL(mark_page_accessed);
-static void __lru_cache_add(struct page *page)
-{
- struct pagevec *pvec;
-
- local_lock(&lru_pvecs.lock);
- pvec = this_cpu_ptr(&lru_pvecs.lru_add);
- get_page(page);
- if (!pagevec_add(pvec, page) || PageCompound(page))
- __pagevec_lru_add(pvec);
- local_unlock(&lru_pvecs.lock);
-}
-
-/**
- * lru_cache_add_anon - add a page to the page lists
- * @page: the page to add
- */
-void lru_cache_add_anon(struct page *page)
-{
- if (PageActive(page))
- ClearPageActive(page);
- __lru_cache_add(page);
-}
-
-void lru_cache_add_file(struct page *page)
-{
- if (PageActive(page))
- ClearPageActive(page);
- __lru_cache_add(page);
-}
-EXPORT_SYMBOL(lru_cache_add_file);
-
/**
* lru_cache_add - add a page to a page list
* @page: the page to be added to the LRU.
@@ -466,10 +435,19 @@ EXPORT_SYMBOL(lru_cache_add_file);
*/
void lru_cache_add(struct page *page)
{
+ struct pagevec *pvec;
+
VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
VM_BUG_ON_PAGE(PageLRU(page), page);
- __lru_cache_add(page);
+
+ get_page(page);
+ local_lock(&lru_pvecs.lock);
+ pvec = this_cpu_ptr(&lru_pvecs.lru_add);
+ if (!pagevec_add(pvec, page) || PageCompound(page))
+ __pagevec_lru_add(pvec);
+ local_unlock(&lru_pvecs.lock);
}
+EXPORT_SYMBOL(lru_cache_add);
/**
* lru_cache_add_active_or_unevictable
diff --git a/mm/swap_state.c b/mm/swap_state.c
index ab04628..fa08900 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -442,7 +442,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
/* Caller will initiate read into locked page */
SetPageWorkingset(page);
- lru_cache_add_anon(page);
+ lru_cache_add(page);
*new_page_allocated = true;
return page;