mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 04:18:39 +08:00
mm/filemap: add helper for finding pages
There is a lot of common code in find_get_entries(), find_get_pages_range() and find_get_pages_range_tag(). Factor out find_get_entry() which simplifies all three functions. [willy@infradead.org: remove VM_BUG_ON_PAGE()] Link: https://lkml.kernel.org/r/20201124041507.28996-2-willy@infradead.orgLink: https://lkml.kernel.org/r/20201112212641.27837-7-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Jan Kara <jack@suse.cz> Reviewed-by: William Kucharski <william.kucharski@oracle.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Cc: Dave Chinner <dchinner@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Yang Shi <yang.shi@linux.alibaba.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
bc5a301120
commit
c7bad633e6
97
mm/filemap.c
97
mm/filemap.c
@ -1825,6 +1825,42 @@ no_page:
|
||||
}
|
||||
EXPORT_SYMBOL(pagecache_get_page);
|
||||
|
||||
static inline struct page *find_get_entry(struct xa_state *xas, pgoff_t max,
|
||||
xa_mark_t mark)
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
retry:
|
||||
if (mark == XA_PRESENT)
|
||||
page = xas_find(xas, max);
|
||||
else
|
||||
page = xas_find_marked(xas, max, mark);
|
||||
|
||||
if (xas_retry(xas, page))
|
||||
goto retry;
|
||||
/*
|
||||
* A shadow entry of a recently evicted page, a swap
|
||||
* entry from shmem/tmpfs or a DAX entry. Return it
|
||||
* without attempting to raise page count.
|
||||
*/
|
||||
if (!page || xa_is_value(page))
|
||||
return page;
|
||||
|
||||
if (!page_cache_get_speculative(page))
|
||||
goto reset;
|
||||
|
||||
/* Has the page moved or been split? */
|
||||
if (unlikely(page != xas_reload(xas))) {
|
||||
put_page(page);
|
||||
goto reset;
|
||||
}
|
||||
|
||||
return page;
|
||||
reset:
|
||||
xas_reset(xas);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
/**
|
||||
* find_get_entries - gang pagecache lookup
|
||||
* @mapping: The address_space to search
|
||||
@ -1864,42 +1900,21 @@ unsigned find_get_entries(struct address_space *mapping,
|
||||
return 0;
|
||||
|
||||
rcu_read_lock();
|
||||
xas_for_each(&xas, page, ULONG_MAX) {
|
||||
if (xas_retry(&xas, page))
|
||||
continue;
|
||||
/*
|
||||
* A shadow entry of a recently evicted page, a swap
|
||||
* entry from shmem/tmpfs or a DAX entry. Return it
|
||||
* without attempting to raise page count.
|
||||
*/
|
||||
if (xa_is_value(page))
|
||||
goto export;
|
||||
|
||||
if (!page_cache_get_speculative(page))
|
||||
goto retry;
|
||||
|
||||
/* Has the page moved or been split? */
|
||||
if (unlikely(page != xas_reload(&xas)))
|
||||
goto put_page;
|
||||
|
||||
while ((page = find_get_entry(&xas, ULONG_MAX, XA_PRESENT))) {
|
||||
/*
|
||||
* Terminate early on finding a THP, to allow the caller to
|
||||
* handle it all at once; but continue if this is hugetlbfs.
|
||||
*/
|
||||
if (PageTransHuge(page) && !PageHuge(page)) {
|
||||
if (!xa_is_value(page) && PageTransHuge(page) &&
|
||||
!PageHuge(page)) {
|
||||
page = find_subpage(page, xas.xa_index);
|
||||
nr_entries = ret + 1;
|
||||
}
|
||||
export:
|
||||
|
||||
indices[ret] = xas.xa_index;
|
||||
entries[ret] = page;
|
||||
if (++ret == nr_entries)
|
||||
break;
|
||||
continue;
|
||||
put_page:
|
||||
put_page(page);
|
||||
retry:
|
||||
xas_reset(&xas);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
@ -1938,30 +1953,16 @@ unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
|
||||
return 0;
|
||||
|
||||
rcu_read_lock();
|
||||
xas_for_each(&xas, page, end) {
|
||||
if (xas_retry(&xas, page))
|
||||
continue;
|
||||
while ((page = find_get_entry(&xas, end, XA_PRESENT))) {
|
||||
/* Skip over shadow, swap and DAX entries */
|
||||
if (xa_is_value(page))
|
||||
continue;
|
||||
|
||||
if (!page_cache_get_speculative(page))
|
||||
goto retry;
|
||||
|
||||
/* Has the page moved or been split? */
|
||||
if (unlikely(page != xas_reload(&xas)))
|
||||
goto put_page;
|
||||
|
||||
pages[ret] = find_subpage(page, xas.xa_index);
|
||||
if (++ret == nr_pages) {
|
||||
*start = xas.xa_index + 1;
|
||||
goto out;
|
||||
}
|
||||
continue;
|
||||
put_page:
|
||||
put_page(page);
|
||||
retry:
|
||||
xas_reset(&xas);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2061,9 +2062,7 @@ unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
|
||||
return 0;
|
||||
|
||||
rcu_read_lock();
|
||||
xas_for_each_marked(&xas, page, end, tag) {
|
||||
if (xas_retry(&xas, page))
|
||||
continue;
|
||||
while ((page = find_get_entry(&xas, end, tag))) {
|
||||
/*
|
||||
* Shadow entries should never be tagged, but this iteration
|
||||
* is lockless so there is a window for page reclaim to evict
|
||||
@ -2072,23 +2071,11 @@ unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
|
||||
if (xa_is_value(page))
|
||||
continue;
|
||||
|
||||
if (!page_cache_get_speculative(page))
|
||||
goto retry;
|
||||
|
||||
/* Has the page moved or been split? */
|
||||
if (unlikely(page != xas_reload(&xas)))
|
||||
goto put_page;
|
||||
|
||||
pages[ret] = page;
|
||||
if (++ret == nr_pages) {
|
||||
*index = page->index + thp_nr_pages(page);
|
||||
goto out;
|
||||
}
|
||||
continue;
|
||||
put_page:
|
||||
put_page(page);
|
||||
retry:
|
||||
xas_reset(&xas);
|
||||
}
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user