mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-25 13:43:55 +08:00
mm/khugepaged: unify collapse pmd clear, flush and free
Unify the code that flushes, clears pmd entry, and frees the PTE table level into a new function collapse_and_free_pmd(). This cleanup is useful as in the next patch we will add another call to this function to iterate through PTE prior to freeing the level for page table check. Link: https://lkml.kernel.org/r/20220131203249.2832273-4-pasha.tatashin@soleen.com Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com> Acked-by: David Rientjes <rientjes@google.com> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Cc: Anshuman Khandual <anshuman.khandual@arm.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Greg Thelen <gthelen@google.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Hugh Dickins <hughd@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jiri Slaby <jirislaby@kernel.org> Cc: Mike Rapoport <rppt@kernel.org> Cc: Muchun Song <songmuchun@bytedance.com> Cc: Paul Turner <pjt@google.com> Cc: Wei Xu <weixugc@google.com> Cc: Will Deacon <will@kernel.org> Cc: Zi Yan <ziy@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
64d8b9e145
commit
e59a47b8a4
@ -1416,6 +1416,19 @@ static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
unsigned long addr, pmd_t *pmdp)
|
||||
{
|
||||
spinlock_t *ptl;
|
||||
pmd_t pmd;
|
||||
|
||||
ptl = pmd_lock(vma->vm_mm, pmdp);
|
||||
pmd = pmdp_collapse_flush(vma, addr, pmdp);
|
||||
spin_unlock(ptl);
|
||||
mm_dec_nr_ptes(mm);
|
||||
pte_free(mm, pmd_pgtable(pmd));
|
||||
}
|
||||
|
||||
/**
|
||||
* collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
|
||||
* address haddr.
|
||||
@ -1433,7 +1446,7 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
|
||||
struct vm_area_struct *vma = find_vma(mm, haddr);
|
||||
struct page *hpage;
|
||||
pte_t *start_pte, *pte;
|
||||
pmd_t *pmd, _pmd;
|
||||
pmd_t *pmd;
|
||||
spinlock_t *ptl;
|
||||
int count = 0;
|
||||
int i;
|
||||
@ -1509,12 +1522,7 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
|
||||
}
|
||||
|
||||
/* step 4: collapse pmd */
|
||||
ptl = pmd_lock(vma->vm_mm, pmd);
|
||||
_pmd = pmdp_collapse_flush(vma, haddr, pmd);
|
||||
spin_unlock(ptl);
|
||||
mm_dec_nr_ptes(mm);
|
||||
pte_free(mm, pmd_pgtable(_pmd));
|
||||
|
||||
collapse_and_free_pmd(mm, vma, haddr, pmd);
|
||||
drop_hpage:
|
||||
unlock_page(hpage);
|
||||
put_page(hpage);
|
||||
@ -1552,7 +1560,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
|
||||
struct vm_area_struct *vma;
|
||||
struct mm_struct *mm;
|
||||
unsigned long addr;
|
||||
pmd_t *pmd, _pmd;
|
||||
pmd_t *pmd;
|
||||
|
||||
i_mmap_lock_write(mapping);
|
||||
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
|
||||
@ -1591,14 +1599,8 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
|
||||
* reverse order. Trylock is a way to avoid deadlock.
|
||||
*/
|
||||
if (mmap_write_trylock(mm)) {
|
||||
if (!khugepaged_test_exit(mm)) {
|
||||
spinlock_t *ptl = pmd_lock(mm, pmd);
|
||||
/* assume page table is clear */
|
||||
_pmd = pmdp_collapse_flush(vma, addr, pmd);
|
||||
spin_unlock(ptl);
|
||||
mm_dec_nr_ptes(mm);
|
||||
pte_free(mm, pmd_pgtable(_pmd));
|
||||
}
|
||||
if (!khugepaged_test_exit(mm))
|
||||
collapse_and_free_pmd(mm, vma, addr, pmd);
|
||||
mmap_write_unlock(mm);
|
||||
} else {
|
||||
/* Try again later */
|
||||
|
Loading…
Reference in New Issue
Block a user