mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 04:18:39 +08:00
mm/rmap: pass folio to hugepage_add_anon_rmap()
Let's pass a folio; we are always mapping the entire thing. Link: https://lkml.kernel.org/r/20230913125113.313322-7-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Muchun Song <muchun.song@linux.dev> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
132b180f06
commit
09c550508a
@ -203,7 +203,7 @@ void folio_add_file_rmap_range(struct folio *, struct page *, unsigned int nr,
|
||||
void page_remove_rmap(struct page *, struct vm_area_struct *,
|
||||
bool compound);
|
||||
|
||||
void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
|
||||
void hugepage_add_anon_rmap(struct folio *, struct vm_area_struct *,
|
||||
unsigned long address, rmap_t flags);
|
||||
void hugepage_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
|
||||
unsigned long address);
|
||||
|
@ -249,7 +249,7 @@ static bool remove_migration_pte(struct folio *folio,
|
||||
|
||||
pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
|
||||
if (folio_test_anon(folio))
|
||||
hugepage_add_anon_rmap(new, vma, pvmw.address,
|
||||
hugepage_add_anon_rmap(folio, vma, pvmw.address,
|
||||
rmap_flags);
|
||||
else
|
||||
page_dup_file_rmap(new, true);
|
||||
|
@ -2542,18 +2542,16 @@ void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc)
|
||||
*
|
||||
* RMAP_COMPOUND is ignored.
|
||||
*/
|
||||
void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
|
||||
void hugepage_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
|
||||
unsigned long address, rmap_t flags)
|
||||
{
|
||||
struct folio *folio = page_folio(page);
|
||||
|
||||
VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
|
||||
|
||||
atomic_inc(&folio->_entire_mapcount);
|
||||
if (flags & RMAP_EXCLUSIVE)
|
||||
SetPageAnonExclusive(page);
|
||||
SetPageAnonExclusive(&folio->page);
|
||||
VM_WARN_ON_FOLIO(folio_entire_mapcount(folio) > 1 &&
|
||||
PageAnonExclusive(page), folio);
|
||||
PageAnonExclusive(&folio->page), folio);
|
||||
}
|
||||
|
||||
void hugepage_add_new_anon_rmap(struct folio *folio,
|
||||
|
Loading…
Reference in New Issue
Block a user