mm: fold and remove lru_cache_add_anon() and lru_cache_add_file()

They're the same function, and for the purpose of all callers they are
equivalent to lru_cache_add().

[akpm@linux-foundation.org: fix it for local_lock changes]
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Rik van Riel <riel@surriel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Acked-by: Minchan Kim <minchan@kernel.org>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Link: http://lkml.kernel.org/r/20200520232525.798933-5-hannes@cmpxchg.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Johannes Weiner 2020-06-03 16:02:40 -07:00 committed by Linus Torvalds
parent c843966c55
commit 6058eaec81
8 changed files with 23 additions and 51 deletions

View File

@ -4162,7 +4162,7 @@ cifs_readv_complete(struct work_struct *work)
for (i = 0; i < rdata->nr_pages; i++) {
struct page *page = rdata->pages[i];
lru_cache_add_file(page);
lru_cache_add(page);
if (rdata->result == 0 ||
(rdata->result == -EAGAIN && got_bytes)) {
@ -4232,7 +4232,7 @@ readpages_fill_pages(struct TCP_Server_Info *server,
* fill them until the writes are flushed.
*/
zero_user(page, 0, PAGE_SIZE);
lru_cache_add_file(page);
lru_cache_add(page);
flush_dcache_page(page);
SetPageUptodate(page);
unlock_page(page);
@ -4242,7 +4242,7 @@ readpages_fill_pages(struct TCP_Server_Info *server,
continue;
} else {
/* no need to hold page hostage */
lru_cache_add_file(page);
lru_cache_add(page);
unlock_page(page);
put_page(page);
rdata->pages[i] = NULL;
@ -4437,7 +4437,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
/* best to give up if we're out of mem */
list_for_each_entry_safe(page, tpage, &tmplist, lru) {
list_del(&page->lru);
lru_cache_add_file(page);
lru_cache_add(page);
unlock_page(page);
put_page(page);
}
@ -4475,7 +4475,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
add_credits_and_wake_if(server, &rdata->credits, 0);
for (i = 0; i < rdata->nr_pages; i++) {
page = rdata->pages[i];
lru_cache_add_file(page);
lru_cache_add(page);
unlock_page(page);
put_page(page);
}

View File

@ -840,7 +840,7 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
get_page(newpage);
if (!(buf->flags & PIPE_BUF_FLAG_LRU))
lru_cache_add_file(newpage);
lru_cache_add(newpage);
err = 0;
spin_lock(&cs->req->waitq.lock);

View File

@ -335,8 +335,6 @@ extern unsigned long nr_free_pagecache_pages(void);
/* linux/mm/swap.c */
extern void lru_cache_add(struct page *);
extern void lru_cache_add_anon(struct page *page);
extern void lru_cache_add_file(struct page *page);
extern void lru_add_page_tail(struct page *page, struct page *page_tail,
struct lruvec *lruvec, struct list_head *head);
extern void activate_page(struct page *);

View File

@ -1879,13 +1879,9 @@ xa_unlocked:
SetPageUptodate(new_page);
page_ref_add(new_page, HPAGE_PMD_NR - 1);
if (is_shmem) {
if (is_shmem)
set_page_dirty(new_page);
lru_cache_add_anon(new_page);
} else {
lru_cache_add_file(new_page);
}
lru_cache_add(new_page);
/*
* Remove pte page tables, so we can re-fault the page as huge.

View File

@ -3139,7 +3139,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
if (err)
goto out_page;
lru_cache_add_anon(page);
lru_cache_add(page);
swap_readpage(page, true);
}
} else {

View File

@ -1609,7 +1609,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
*/
oldpage = newpage;
} else {
lru_cache_add_anon(newpage);
lru_cache_add(newpage);
*pagep = newpage;
}
@ -1860,7 +1860,7 @@ alloc_nohuge:
charge_mm);
if (error)
goto unacct;
lru_cache_add_anon(page);
lru_cache_add(page);
spin_lock_irq(&info->lock);
info->alloced += compound_nr(page);
@ -2376,7 +2376,7 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
if (!pte_none(*dst_pte))
goto out_release_unlock;
lru_cache_add_anon(page);
lru_cache_add(page);
spin_lock_irq(&info->lock);
info->alloced++;

View File

@ -424,37 +424,6 @@ void mark_page_accessed(struct page *page)
}
EXPORT_SYMBOL(mark_page_accessed);
static void __lru_cache_add(struct page *page)
{
struct pagevec *pvec;
local_lock(&lru_pvecs.lock);
pvec = this_cpu_ptr(&lru_pvecs.lru_add);
get_page(page);
if (!pagevec_add(pvec, page) || PageCompound(page))
__pagevec_lru_add(pvec);
local_unlock(&lru_pvecs.lock);
}
/**
* lru_cache_add_anon - add a page to the page lists
* @page: the page to add
*/
void lru_cache_add_anon(struct page *page)
{
if (PageActive(page))
ClearPageActive(page);
__lru_cache_add(page);
}
void lru_cache_add_file(struct page *page)
{
if (PageActive(page))
ClearPageActive(page);
__lru_cache_add(page);
}
EXPORT_SYMBOL(lru_cache_add_file);
/**
* lru_cache_add - add a page to a page list
* @page: the page to be added to the LRU.
@ -466,10 +435,19 @@ EXPORT_SYMBOL(lru_cache_add_file);
*/
void lru_cache_add(struct page *page)
{
struct pagevec *pvec;
VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
VM_BUG_ON_PAGE(PageLRU(page), page);
__lru_cache_add(page);
get_page(page);
local_lock(&lru_pvecs.lock);
pvec = this_cpu_ptr(&lru_pvecs.lru_add);
if (!pagevec_add(pvec, page) || PageCompound(page))
__pagevec_lru_add(pvec);
local_unlock(&lru_pvecs.lock);
}
EXPORT_SYMBOL(lru_cache_add);
/**
* lru_cache_add_active_or_unevictable

View File

@ -442,7 +442,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
/* Caller will initiate read into locked page */
SetPageWorkingset(page);
lru_cache_add_anon(page);
lru_cache_add(page);
*new_page_allocated = true;
return page;