mm/memory: page_add_anon_rmap() -> folio_add_anon_rmap_pte()

Let's convert restore_exclusive_pte() and do_swap_page().  While at it,
perform some folio conversion in restore_exclusive_pte().

Link: https://lkml.kernel.org/r/20231220224504.646757-21-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Yin Fengwei <fengwei.yin@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
David Hildenbrand 2023-12-20 23:44:44 +01:00 committed by Andrew Morton
parent da7dc0afe2
commit b832a354d7

View File

@ -710,6 +710,7 @@ static void restore_exclusive_pte(struct vm_area_struct *vma,
struct page *page, unsigned long address,
pte_t *ptep)
{
struct folio *folio = page_folio(page);
pte_t orig_pte;
pte_t pte;
swp_entry_t entry;
@ -725,14 +726,15 @@ static void restore_exclusive_pte(struct vm_area_struct *vma,
else if (is_writable_device_exclusive_entry(entry))
pte = maybe_mkwrite(pte_mkdirty(pte), vma);
VM_BUG_ON(pte_write(pte) && !(PageAnon(page) && PageAnonExclusive(page)));
VM_BUG_ON_FOLIO(pte_write(pte) && (!folio_test_anon(folio) &&
PageAnonExclusive(page)), folio);
/*
* No need to take a page reference as one was already
* created when the swap entry was made.
*/
if (PageAnon(page))
page_add_anon_rmap(page, vma, address, RMAP_NONE);
if (folio_test_anon(folio))
folio_add_anon_rmap_pte(folio, page, vma, address, RMAP_NONE);
else
/*
* Currently device exclusive access only supports anonymous
@ -4076,7 +4078,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
folio_add_new_anon_rmap(folio, vma, vmf->address);
folio_add_lru_vma(folio, vma);
} else {
page_add_anon_rmap(page, vma, vmf->address, rmap_flags);
folio_add_anon_rmap_pte(folio, page, vma, vmf->address,
rmap_flags);
}
VM_BUG_ON(!folio_test_anon(folio) ||