filemap: Remove find_get_pages_range() and associated functions

All callers of find_get_pages_range(), pagevec_lookup_range() and
pagevec_lookup() have now been removed.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Christian Brauner (Microsoft) <brauner@kernel.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2022-06-04 17:46:02 -04:00
parent 105c988f5d
commit bb4b42ba92
4 changed files with 0 additions and 109 deletions

View File

@ -720,9 +720,6 @@ static inline struct page *find_subpage(struct page *head, pgoff_t index)
unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start, unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
pgoff_t end, struct folio_batch *fbatch); pgoff_t end, struct folio_batch *fbatch);
unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
pgoff_t end, unsigned int nr_pages,
struct page **pages);
unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
unsigned int nr_pages, struct page **pages); unsigned int nr_pages, struct page **pages);
unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,

View File

@ -27,16 +27,6 @@ struct pagevec {
void __pagevec_release(struct pagevec *pvec); void __pagevec_release(struct pagevec *pvec);
void __pagevec_lru_add(struct pagevec *pvec); void __pagevec_lru_add(struct pagevec *pvec);
unsigned pagevec_lookup_range(struct pagevec *pvec,
struct address_space *mapping,
pgoff_t *start, pgoff_t end);
static inline unsigned pagevec_lookup(struct pagevec *pvec,
struct address_space *mapping,
pgoff_t *start)
{
return pagevec_lookup_range(pvec, mapping, start, (pgoff_t)-1);
}
unsigned pagevec_lookup_range_tag(struct pagevec *pvec, unsigned pagevec_lookup_range_tag(struct pagevec *pvec,
struct address_space *mapping, pgoff_t *index, pgoff_t end, struct address_space *mapping, pgoff_t *index, pgoff_t end,
xa_mark_t tag); xa_mark_t tag);

View File

@ -2196,73 +2196,6 @@ bool folio_more_pages(struct folio *folio, pgoff_t index, pgoff_t max)
return index < folio->index + folio_nr_pages(folio) - 1; return index < folio->index + folio_nr_pages(folio) - 1;
} }
/**
* find_get_pages_range - gang pagecache lookup
* @mapping: The address_space to search
* @start: The starting page index
* @end: The final page index (inclusive)
* @nr_pages: The maximum number of pages
* @pages: Where the resulting pages are placed
*
* find_get_pages_range() will search for and return a group of up to @nr_pages
* pages in the mapping starting at index @start and up to index @end
* (inclusive). The pages are placed at @pages. find_get_pages_range() takes
* a reference against the returned pages.
*
* The search returns a group of mapping-contiguous pages with ascending
* indexes. There may be holes in the indices due to not-present pages.
* We also update @start to index the next page for the traversal.
*
* Return: the number of pages which were found. If this number is
* smaller than @nr_pages, the end of specified range has been
* reached.
*/
unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
pgoff_t end, unsigned int nr_pages,
struct page **pages)
{
XA_STATE(xas, &mapping->i_pages, *start);
struct folio *folio;
unsigned ret = 0;
if (unlikely(!nr_pages))
return 0;
rcu_read_lock();
while ((folio = find_get_entry(&xas, end, XA_PRESENT))) {
/* Skip over shadow, swap and DAX entries */
if (xa_is_value(folio))
continue;
again:
pages[ret] = folio_file_page(folio, xas.xa_index);
if (++ret == nr_pages) {
*start = xas.xa_index + 1;
goto out;
}
if (folio_more_pages(folio, xas.xa_index, end)) {
xas.xa_index++;
folio_ref_inc(folio);
goto again;
}
}
/*
* We come here when there is no page beyond @end. We take care to not
* overflow the index @start as it confuses some of the callers. This
* breaks the iteration when there is a page at index -1 but that is
* already broken anyway.
*/
if (end == (pgoff_t)-1)
*start = (pgoff_t)-1;
else
*start = end + 1;
out:
rcu_read_unlock();
return ret;
}
/** /**
* find_get_pages_contig - gang contiguous pagecache lookup * find_get_pages_contig - gang contiguous pagecache lookup
* @mapping: The address_space to search * @mapping: The address_space to search

View File

@ -1086,35 +1086,6 @@ void folio_batch_remove_exceptionals(struct folio_batch *fbatch)
fbatch->nr = j; fbatch->nr = j;
} }
/**
* pagevec_lookup_range - gang pagecache lookup
* @pvec: Where the resulting pages are placed
* @mapping: The address_space to search
* @start: The starting page index
* @end: The final page index
*
* pagevec_lookup_range() will search for & return a group of up to PAGEVEC_SIZE
* pages in the mapping starting from index @start and upto index @end
* (inclusive). The pages are placed in @pvec. pagevec_lookup() takes a
* reference against the pages in @pvec.
*
* The search returns a group of mapping-contiguous pages with ascending
* indexes. There may be holes in the indices due to not-present pages. We
* also update @start to index the next page for the traversal.
*
* pagevec_lookup_range() returns the number of pages which were found. If this
* number is smaller than PAGEVEC_SIZE, the end of specified range has been
* reached.
*/
unsigned pagevec_lookup_range(struct pagevec *pvec,
struct address_space *mapping, pgoff_t *start, pgoff_t end)
{
pvec->nr = find_get_pages_range(mapping, start, end, PAGEVEC_SIZE,
pvec->pages);
return pagevec_count(pvec);
}
EXPORT_SYMBOL(pagevec_lookup_range);
unsigned pagevec_lookup_range_tag(struct pagevec *pvec, unsigned pagevec_lookup_range_tag(struct pagevec *pvec,
struct address_space *mapping, pgoff_t *index, pgoff_t end, struct address_space *mapping, pgoff_t *index, pgoff_t end,
xa_mark_t tag) xa_mark_t tag)