mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 04:34:08 +08:00
mm: huge_memory: use a folio in change_huge_pmd()
Use a folio in change_huge_pmd(), which helps to remove last xchg_page_access_time() caller. Link: https://lkml.kernel.org/r/20231018140806.2783514-11-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> Cc: David Hildenbrand <david@redhat.com> Cc: Huang Ying <ying.huang@intel.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Juri Lelli <juri.lelli@redhat.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Vincent Guittot <vincent.guittot@linaro.org> Cc: Zi Yan <ziy@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
ec1778807a
commit
d986ba2b19
@ -1856,7 +1856,7 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
||||
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
|
||||
if (is_swap_pmd(*pmd)) {
|
||||
swp_entry_t entry = pmd_to_swp_entry(*pmd);
|
||||
struct page *page = pfn_swap_entry_to_page(entry);
|
||||
struct folio *folio = page_folio(pfn_swap_entry_to_page(entry));
|
||||
pmd_t newpmd;
|
||||
|
||||
VM_BUG_ON(!is_pmd_migration_entry(*pmd));
|
||||
@ -1865,7 +1865,7 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
||||
* A protection check is difficult so
|
||||
* just be safe and disable write
|
||||
*/
|
||||
if (PageAnon(page))
|
||||
if (folio_test_anon(folio))
|
||||
entry = make_readable_exclusive_migration_entry(swp_offset(entry));
|
||||
else
|
||||
entry = make_readable_migration_entry(swp_offset(entry));
|
||||
@ -1887,7 +1887,7 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
||||
#endif
|
||||
|
||||
if (prot_numa) {
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
bool toptier;
|
||||
/*
|
||||
* Avoid trapping faults against the zero page. The read-only
|
||||
@ -1900,8 +1900,8 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
||||
if (pmd_protnone(*pmd))
|
||||
goto unlock;
|
||||
|
||||
page = pmd_page(*pmd);
|
||||
toptier = node_is_toptier(page_to_nid(page));
|
||||
folio = page_folio(pmd_page(*pmd));
|
||||
toptier = node_is_toptier(folio_nid(folio));
|
||||
/*
|
||||
* Skip scanning top tier node if normal numa
|
||||
* balancing is disabled
|
||||
@ -1912,7 +1912,8 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
||||
|
||||
if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
|
||||
!toptier)
|
||||
xchg_page_access_time(page, jiffies_to_msecs(jiffies));
|
||||
folio_xchg_access_time(folio,
|
||||
jiffies_to_msecs(jiffies));
|
||||
}
|
||||
/*
|
||||
* In case prot_numa, we are under mmap_read_lock(mm). It's critical
|
||||
|
Loading…
Reference in New Issue
Block a user