mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-28 22:54:05 +08:00
hugetlb: do not update address in huge_pmd_unshare
As an optimization for loops sequentially processing hugetlb address ranges, huge_pmd_unshare would update a passed address if it unshared a pmd. Updating a loop control variable outside the loop like this is generally a bad idea. These loops are now using hugetlb_mask_last_page to optimize scanning when non-present ptes are discovered. The same can be done when huge_pmd_unshare returns 1 indicating a pmd was unshared. Remove address update from huge_pmd_unshare. Change the passed argument type and update all callers. In loops sequentially processing addresses use hugetlb_mask_last_page to update address if pmd is unshared. [sfr@canb.auug.org.au: fix an unused variable warning/error] Link: https://lkml.kernel.org/r/20220622171117.70850960@canb.auug.org.au Link: https://lkml.kernel.org/r/20220621235620.291305-4-mike.kravetz@oracle.com Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com> Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au> Acked-by: Muchun Song <songmuchun@bytedance.com> Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> Cc: Anshuman Khandual <anshuman.khandual@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: David Hildenbrand <david@redhat.com> Cc: James Houghton <jthoughton@google.com> Cc: kernel test robot <lkp@intel.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Mina Almasry <almasrymina@google.com> Cc: Naoya Horiguchi <naoya.horiguchi@linux.dev> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Peter Xu <peterx@redhat.com> Cc: Rolf Eike Beer <eike-kernel@sf-tec.de> Cc: Will Deacon <will@kernel.org> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
1bcdb769f9
commit
4ddb4d91b8
@ -196,7 +196,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
|
||||
unsigned long addr, unsigned long sz);
|
||||
unsigned long hugetlb_mask_last_page(struct hstate *h);
|
||||
int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
unsigned long *addr, pte_t *ptep);
|
||||
unsigned long addr, pte_t *ptep);
|
||||
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
|
||||
unsigned long *start, unsigned long *end);
|
||||
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
|
||||
@ -243,7 +243,7 @@ static inline struct address_space *hugetlb_page_mapping_lock_write(
|
||||
|
||||
static inline int huge_pmd_unshare(struct mm_struct *mm,
|
||||
struct vm_area_struct *vma,
|
||||
unsigned long *addr, pte_t *ptep)
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
44
mm/hugetlb.c
44
mm/hugetlb.c
@ -4935,7 +4935,6 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
unsigned long old_end = old_addr + len;
|
||||
unsigned long last_addr_mask;
|
||||
unsigned long old_addr_copy;
|
||||
pte_t *src_pte, *dst_pte;
|
||||
struct mmu_notifier_range range;
|
||||
bool shared_pmd = false;
|
||||
@ -4963,14 +4962,10 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
|
||||
if (huge_pte_none(huge_ptep_get(src_pte)))
|
||||
continue;
|
||||
|
||||
/* old_addr arg to huge_pmd_unshare() is a pointer and so the
|
||||
* arg may be modified. Pass a copy instead to preserve the
|
||||
* value in old_addr.
|
||||
*/
|
||||
old_addr_copy = old_addr;
|
||||
|
||||
if (huge_pmd_unshare(mm, vma, &old_addr_copy, src_pte)) {
|
||||
if (huge_pmd_unshare(mm, vma, old_addr, src_pte)) {
|
||||
shared_pmd = true;
|
||||
old_addr |= last_addr_mask;
|
||||
new_addr |= last_addr_mask;
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -5035,10 +5030,11 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
|
||||
}
|
||||
|
||||
ptl = huge_pte_lock(h, mm, ptep);
|
||||
if (huge_pmd_unshare(mm, vma, &address, ptep)) {
|
||||
if (huge_pmd_unshare(mm, vma, address, ptep)) {
|
||||
spin_unlock(ptl);
|
||||
tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
|
||||
force_flush = true;
|
||||
address |= last_addr_mask;
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -6327,7 +6323,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
|
||||
continue;
|
||||
}
|
||||
ptl = huge_pte_lock(h, mm, ptep);
|
||||
if (huge_pmd_unshare(mm, vma, &address, ptep)) {
|
||||
if (huge_pmd_unshare(mm, vma, address, ptep)) {
|
||||
/*
|
||||
* When uffd-wp is enabled on the vma, unshare
|
||||
* shouldn't happen at all. Warn about it if it
|
||||
@ -6337,6 +6333,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
|
||||
pages++;
|
||||
spin_unlock(ptl);
|
||||
shared_pmd = true;
|
||||
address |= last_addr_mask;
|
||||
continue;
|
||||
}
|
||||
pte = huge_ptep_get(ptep);
|
||||
@ -6759,11 +6756,11 @@ out:
|
||||
* 0 the underlying pte page is not shared, or it is the last user
|
||||
*/
|
||||
int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
unsigned long *addr, pte_t *ptep)
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
pgd_t *pgd = pgd_offset(mm, *addr);
|
||||
p4d_t *p4d = p4d_offset(pgd, *addr);
|
||||
pud_t *pud = pud_offset(p4d, *addr);
|
||||
pgd_t *pgd = pgd_offset(mm, addr);
|
||||
p4d_t *p4d = p4d_offset(pgd, addr);
|
||||
pud_t *pud = pud_offset(p4d, addr);
|
||||
|
||||
i_mmap_assert_write_locked(vma->vm_file->f_mapping);
|
||||
BUG_ON(page_count(virt_to_page(ptep)) == 0);
|
||||
@ -6773,14 +6770,6 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
pud_clear(pud);
|
||||
put_page(virt_to_page(ptep));
|
||||
mm_dec_nr_pmds(mm);
|
||||
/*
|
||||
* This update of passed address optimizes loops sequentially
|
||||
* processing addresses in increments of huge page size (PMD_SIZE
|
||||
* in this case). By clearing the pud, a PUD_SIZE area is unmapped.
|
||||
* Update address to the 'last page' in the cleared area so that
|
||||
* calling loop can move to first page past this area.
|
||||
*/
|
||||
*addr |= PUD_SIZE - PMD_SIZE;
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -6792,7 +6781,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
}
|
||||
|
||||
int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
unsigned long *addr, pte_t *ptep)
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
@ -6899,6 +6888,10 @@ unsigned long hugetlb_mask_last_page(struct hstate *h)
|
||||
/* See description above. Architectures can provide their own version. */
|
||||
__weak unsigned long hugetlb_mask_last_page(struct hstate *h)
|
||||
{
|
||||
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
|
||||
if (huge_page_size(h) == PMD_SIZE)
|
||||
return PUD_SIZE - PMD_SIZE;
|
||||
#endif
|
||||
return 0UL;
|
||||
}
|
||||
|
||||
@ -7125,14 +7118,11 @@ void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
i_mmap_lock_write(vma->vm_file->f_mapping);
|
||||
for (address = start; address < end; address += PUD_SIZE) {
|
||||
unsigned long tmp = address;
|
||||
|
||||
ptep = huge_pte_offset(mm, address, sz);
|
||||
if (!ptep)
|
||||
continue;
|
||||
ptl = huge_pte_lock(h, mm, ptep);
|
||||
/* We don't want 'address' to be changed */
|
||||
huge_pmd_unshare(mm, vma, &tmp, ptep);
|
||||
huge_pmd_unshare(mm, vma, address, ptep);
|
||||
spin_unlock(ptl);
|
||||
}
|
||||
flush_hugetlb_tlb_range(vma, start, end);
|
||||
|
@ -1559,7 +1559,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
|
||||
* do this outside rmap routines.
|
||||
*/
|
||||
VM_BUG_ON(!anon && !(flags & TTU_RMAP_LOCKED));
|
||||
if (!anon && huge_pmd_unshare(mm, vma, &address, pvmw.pte)) {
|
||||
if (!anon && huge_pmd_unshare(mm, vma, address, pvmw.pte)) {
|
||||
flush_tlb_range(vma, range.start, range.end);
|
||||
mmu_notifier_invalidate_range(mm, range.start,
|
||||
range.end);
|
||||
@ -1920,7 +1920,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
|
||||
* do this outside rmap routines.
|
||||
*/
|
||||
VM_BUG_ON(!anon && !(flags & TTU_RMAP_LOCKED));
|
||||
if (!anon && huge_pmd_unshare(mm, vma, &address, pvmw.pte)) {
|
||||
if (!anon && huge_pmd_unshare(mm, vma, address, pvmw.pte)) {
|
||||
flush_tlb_range(vma, range.start, range.end);
|
||||
mmu_notifier_invalidate_range(mm, range.start,
|
||||
range.end);
|
||||
|
Loading…
Reference in New Issue
Block a user