mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-13 14:24:11 +08:00
mm/ksm: convert break_ksm() from walk_page_range_vma() to folio_walk
Let's simplify by reusing folio_walk. Keep the existing behavior by handling migration entries and zeropages. Link: https://lkml.kernel.org/r/20240802155524.517137-12-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Claudio Imbrenda <imbrenda@linux.ibm.com> Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Janosch Frank <frankja@linux.ibm.com> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Matthew Wilcox <willy@infradead.org> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: Zi Yan <ziy@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
7290840de6
commit
e317a8d8b4
63
mm/ksm.c
63
mm/ksm.c
@ -608,47 +608,6 @@ static inline bool ksm_test_exit(struct mm_struct *mm)
|
||||
return atomic_read(&mm->mm_users) == 0;
|
||||
}
|
||||
|
||||
static int break_ksm_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long next,
|
||||
struct mm_walk *walk)
|
||||
{
|
||||
struct page *page = NULL;
|
||||
spinlock_t *ptl;
|
||||
pte_t *pte;
|
||||
pte_t ptent;
|
||||
int ret;
|
||||
|
||||
pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
|
||||
if (!pte)
|
||||
return 0;
|
||||
ptent = ptep_get(pte);
|
||||
if (pte_present(ptent)) {
|
||||
page = vm_normal_page(walk->vma, addr, ptent);
|
||||
} else if (!pte_none(ptent)) {
|
||||
swp_entry_t entry = pte_to_swp_entry(ptent);
|
||||
|
||||
/*
|
||||
* As KSM pages remain KSM pages until freed, no need to wait
|
||||
* here for migration to end.
|
||||
*/
|
||||
if (is_migration_entry(entry))
|
||||
page = pfn_swap_entry_to_page(entry);
|
||||
}
|
||||
/* return 1 if the page is an normal ksm page or KSM-placed zero page */
|
||||
ret = (page && PageKsm(page)) || is_ksm_zero_pte(ptent);
|
||||
pte_unmap_unlock(pte, ptl);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct mm_walk_ops break_ksm_ops = {
|
||||
.pmd_entry = break_ksm_pmd_entry,
|
||||
.walk_lock = PGWALK_RDLOCK,
|
||||
};
|
||||
|
||||
static const struct mm_walk_ops break_ksm_lock_vma_ops = {
|
||||
.pmd_entry = break_ksm_pmd_entry,
|
||||
.walk_lock = PGWALK_WRLOCK,
|
||||
};
|
||||
|
||||
/*
|
||||
* We use break_ksm to break COW on a ksm page by triggering unsharing,
|
||||
* such that the ksm page will get replaced by an exclusive anonymous page.
|
||||
@ -665,16 +624,26 @@ static const struct mm_walk_ops break_ksm_lock_vma_ops = {
|
||||
static int break_ksm(struct vm_area_struct *vma, unsigned long addr, bool lock_vma)
|
||||
{
|
||||
vm_fault_t ret = 0;
|
||||
const struct mm_walk_ops *ops = lock_vma ?
|
||||
&break_ksm_lock_vma_ops : &break_ksm_ops;
|
||||
|
||||
if (lock_vma)
|
||||
vma_start_write(vma);
|
||||
|
||||
do {
|
||||
int ksm_page;
|
||||
bool ksm_page = false;
|
||||
struct folio_walk fw;
|
||||
struct folio *folio;
|
||||
|
||||
cond_resched();
|
||||
ksm_page = walk_page_range_vma(vma, addr, addr + 1, ops, NULL);
|
||||
if (WARN_ON_ONCE(ksm_page < 0))
|
||||
return ksm_page;
|
||||
folio = folio_walk_start(&fw, vma, addr,
|
||||
FW_MIGRATION | FW_ZEROPAGE);
|
||||
if (folio) {
|
||||
/* Small folio implies FW_LEVEL_PTE. */
|
||||
if (!folio_test_large(folio) &&
|
||||
(folio_test_ksm(folio) || is_ksm_zero_pte(fw.pte)))
|
||||
ksm_page = true;
|
||||
folio_walk_end(&fw, vma);
|
||||
}
|
||||
|
||||
if (!ksm_page)
|
||||
return 0;
|
||||
ret = handle_mm_fault(vma, addr,
|
||||
|
Loading…
Reference in New Issue
Block a user