mm/rmap: Turn page_lock_anon_vma_read() into folio_lock_anon_vma_read()

Add back page_lock_anon_vma_read() as a wrapper.  This saves a few calls
to compound_head().  If any callers were passing a tail page before,
this would have failed to lock the anon VMA as page->mapping is not
valid for tail pages.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2022-02-01 23:33:08 -05:00
parent c842318607
commit 9595d76942
4 changed files with 16 additions and 7 deletions

View File

@ -267,6 +267,7 @@ void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked);
* Called by memory-failure.c to kill processes.
*/
struct anon_vma *page_lock_anon_vma_read(struct page *page);
struct anon_vma *folio_lock_anon_vma_read(struct folio *folio);
void page_unlock_anon_vma_read(struct anon_vma *anon_vma);
int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);

View File

@ -164,3 +164,10 @@ void putback_lru_page(struct page *page)
{
folio_putback_lru(page_folio(page));
}
#ifdef CONFIG_MMU
struct anon_vma *page_lock_anon_vma_read(struct page *page)
{
return folio_lock_anon_vma_read(page_folio(page));
}
#endif

View File

@ -487,12 +487,13 @@ static struct task_struct *task_early_kill(struct task_struct *tsk,
static void collect_procs_anon(struct page *page, struct list_head *to_kill,
int force_early)
{
struct folio *folio = page_folio(page);
struct vm_area_struct *vma;
struct task_struct *tsk;
struct anon_vma *av;
pgoff_t pgoff;
av = page_lock_anon_vma_read(page);
av = folio_lock_anon_vma_read(folio);
if (av == NULL) /* Not actually mapped anymore */
return;

View File

@ -526,28 +526,28 @@ out:
* atomic op -- the trylock. If we fail the trylock, we fall back to getting a
* reference like with page_get_anon_vma() and then block on the mutex.
*/
struct anon_vma *page_lock_anon_vma_read(struct page *page)
struct anon_vma *folio_lock_anon_vma_read(struct folio *folio)
{
struct anon_vma *anon_vma = NULL;
struct anon_vma *root_anon_vma;
unsigned long anon_mapping;
rcu_read_lock();
anon_mapping = (unsigned long)READ_ONCE(page->mapping);
anon_mapping = (unsigned long)READ_ONCE(folio->mapping);
if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
goto out;
if (!page_mapped(page))
if (!folio_mapped(folio))
goto out;
anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
root_anon_vma = READ_ONCE(anon_vma->root);
if (down_read_trylock(&root_anon_vma->rwsem)) {
/*
* If the page is still mapped, then this anon_vma is still
* If the folio is still mapped, then this anon_vma is still
* its anon_vma, and holding the mutex ensures that it will
* not go away, see anon_vma_free().
*/
if (!page_mapped(page)) {
if (!folio_mapped(folio)) {
up_read(&root_anon_vma->rwsem);
anon_vma = NULL;
}
@ -560,7 +560,7 @@ struct anon_vma *page_lock_anon_vma_read(struct page *page)
goto out;
}
if (!page_mapped(page)) {
if (!folio_mapped(folio)) {
rcu_read_unlock();
put_anon_vma(anon_vma);
return NULL;