mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-04 03:14:02 +08:00
mm/hugetlb: add size parameter to huge_pte_offset()
A poisoned or migrated hugepage is stored as a swap entry in the page tables. On architectures that support hugepages consisting of contiguous page table entries (such as on arm64) this leads to ambiguity in determining the page table entry to return in huge_pte_offset() when a poisoned entry is encountered. Let's remove the ambiguity by adding a size parameter to convey additional information about the requested address. Also fixup the definition/usage of huge_pte_offset() throughout the tree. Link: http://lkml.kernel.org/r/20170522133604.11392-4-punit.agrawal@arm.com Signed-off-by: Punit Agrawal <punit.agrawal@arm.com> Acked-by: Steve Capper <steve.capper@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: James Hogan <james.hogan@imgtec.com> (odd fixer:METAG ARCHITECTURE) Cc: Ralf Baechle <ralf@linux-mips.org> (supporter:MIPS) Cc: "James E.J. Bottomley" <jejb@parisc-linux.org> Cc: Helge Deller <deller@gmx.de> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Cc: Rich Felker <dalias@libc.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: Chris Metcalf <cmetcalf@mellanox.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Michal Hocko <mhocko@suse.com> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Hillf Danton <hillf.zj@alibaba-inc.com> Cc: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
d63206ee32
commit
7868a2087e
@ -131,7 +131,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
|
|||||||
return pte;
|
return pte;
|
||||||
}
|
}
|
||||||
|
|
||||||
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
|
pte_t *huge_pte_offset(struct mm_struct *mm,
|
||||||
|
unsigned long addr, unsigned long sz)
|
||||||
{
|
{
|
||||||
pgd_t *pgd;
|
pgd_t *pgd;
|
||||||
pud_t *pud;
|
pud_t *pud;
|
||||||
|
@ -44,7 +44,7 @@ huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
|
|||||||
}
|
}
|
||||||
|
|
||||||
pte_t *
|
pte_t *
|
||||||
huge_pte_offset (struct mm_struct *mm, unsigned long addr)
|
huge_pte_offset (struct mm_struct *mm, unsigned long addr, unsigned long sz)
|
||||||
{
|
{
|
||||||
unsigned long taddr = htlbpage_to_page(addr);
|
unsigned long taddr = htlbpage_to_page(addr);
|
||||||
pgd_t *pgd;
|
pgd_t *pgd;
|
||||||
@ -92,7 +92,7 @@ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int writ
|
|||||||
if (REGION_NUMBER(addr) != RGN_HPAGE)
|
if (REGION_NUMBER(addr) != RGN_HPAGE)
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
|
|
||||||
ptep = huge_pte_offset(mm, addr);
|
ptep = huge_pte_offset(mm, addr, HPAGE_SIZE);
|
||||||
if (!ptep || pte_none(*ptep))
|
if (!ptep || pte_none(*ptep))
|
||||||
return NULL;
|
return NULL;
|
||||||
page = pte_page(*ptep);
|
page = pte_page(*ptep);
|
||||||
|
@ -74,7 +74,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
|
|||||||
return pte;
|
return pte;
|
||||||
}
|
}
|
||||||
|
|
||||||
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
|
pte_t *huge_pte_offset(struct mm_struct *mm,
|
||||||
|
unsigned long addr, unsigned long sz)
|
||||||
{
|
{
|
||||||
pgd_t *pgd;
|
pgd_t *pgd;
|
||||||
pud_t *pud;
|
pud_t *pud;
|
||||||
|
@ -36,7 +36,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr,
|
|||||||
return pte;
|
return pte;
|
||||||
}
|
}
|
||||||
|
|
||||||
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
|
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
|
||||||
|
unsigned long sz)
|
||||||
{
|
{
|
||||||
pgd_t *pgd;
|
pgd_t *pgd;
|
||||||
pud_t *pud;
|
pud_t *pud;
|
||||||
|
@ -69,7 +69,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
|
|||||||
return pte;
|
return pte;
|
||||||
}
|
}
|
||||||
|
|
||||||
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
|
pte_t *huge_pte_offset(struct mm_struct *mm,
|
||||||
|
unsigned long addr, unsigned long sz)
|
||||||
{
|
{
|
||||||
pgd_t *pgd;
|
pgd_t *pgd;
|
||||||
pud_t *pud;
|
pud_t *pud;
|
||||||
|
@ -57,7 +57,7 @@ static unsigned nr_gpages;
|
|||||||
|
|
||||||
#define hugepd_none(hpd) (hpd_val(hpd) == 0)
|
#define hugepd_none(hpd) (hpd_val(hpd) == 0)
|
||||||
|
|
||||||
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
|
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz)
|
||||||
{
|
{
|
||||||
/* Only called for hugetlbfs pages, hence can ignore THP */
|
/* Only called for hugetlbfs pages, hence can ignore THP */
|
||||||
return __find_linux_pte_or_hugepte(mm->pgd, addr, NULL, NULL);
|
return __find_linux_pte_or_hugepte(mm->pgd, addr, NULL, NULL);
|
||||||
|
@ -180,7 +180,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
|
|||||||
return (pte_t *) pmdp;
|
return (pte_t *) pmdp;
|
||||||
}
|
}
|
||||||
|
|
||||||
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
|
pte_t *huge_pte_offset(struct mm_struct *mm,
|
||||||
|
unsigned long addr, unsigned long sz)
|
||||||
{
|
{
|
||||||
pgd_t *pgdp;
|
pgd_t *pgdp;
|
||||||
p4d_t *p4dp;
|
p4d_t *p4dp;
|
||||||
|
@ -42,7 +42,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
|
|||||||
return pte;
|
return pte;
|
||||||
}
|
}
|
||||||
|
|
||||||
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
|
pte_t *huge_pte_offset(struct mm_struct *mm,
|
||||||
|
unsigned long addr, unsigned long sz)
|
||||||
{
|
{
|
||||||
pgd_t *pgd;
|
pgd_t *pgd;
|
||||||
pud_t *pud;
|
pud_t *pud;
|
||||||
|
@ -277,7 +277,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
|
|||||||
return pte;
|
return pte;
|
||||||
}
|
}
|
||||||
|
|
||||||
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
|
pte_t *huge_pte_offset(struct mm_struct *mm,
|
||||||
|
unsigned long addr, unsigned long sz)
|
||||||
{
|
{
|
||||||
pgd_t *pgd;
|
pgd_t *pgd;
|
||||||
pud_t *pud;
|
pud_t *pud;
|
||||||
|
@ -102,7 +102,8 @@ static pte_t *get_pte(pte_t *base, int index, int level)
|
|||||||
return ptep;
|
return ptep;
|
||||||
}
|
}
|
||||||
|
|
||||||
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
|
pte_t *huge_pte_offset(struct mm_struct *mm,
|
||||||
|
unsigned long addr, unsigned long sz)
|
||||||
{
|
{
|
||||||
pgd_t *pgd;
|
pgd_t *pgd;
|
||||||
pud_t *pud;
|
pud_t *pud;
|
||||||
|
@ -33,7 +33,7 @@ follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
|
|||||||
if (!vma || !is_vm_hugetlb_page(vma))
|
if (!vma || !is_vm_hugetlb_page(vma))
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
|
|
||||||
pte = huge_pte_offset(mm, address);
|
pte = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
|
||||||
|
|
||||||
/* hugetlb should be locked, and hence, prefaulted */
|
/* hugetlb should be locked, and hence, prefaulted */
|
||||||
WARN_ON(!pte || pte_none(*pte));
|
WARN_ON(!pte || pte_none(*pte));
|
||||||
|
@ -214,6 +214,7 @@ static inline struct uffd_msg userfault_msg(unsigned long address,
|
|||||||
* hugepmd ranges.
|
* hugepmd ranges.
|
||||||
*/
|
*/
|
||||||
static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
|
static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
|
||||||
|
struct vm_area_struct *vma,
|
||||||
unsigned long address,
|
unsigned long address,
|
||||||
unsigned long flags,
|
unsigned long flags,
|
||||||
unsigned long reason)
|
unsigned long reason)
|
||||||
@ -224,7 +225,7 @@ static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
|
|||||||
|
|
||||||
VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
|
VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
|
||||||
|
|
||||||
pte = huge_pte_offset(mm, address);
|
pte = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
|
||||||
if (!pte)
|
if (!pte)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
@ -243,6 +244,7 @@ out:
|
|||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
|
static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
|
||||||
|
struct vm_area_struct *vma,
|
||||||
unsigned long address,
|
unsigned long address,
|
||||||
unsigned long flags,
|
unsigned long flags,
|
||||||
unsigned long reason)
|
unsigned long reason)
|
||||||
@ -448,7 +450,8 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
|
|||||||
must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags,
|
must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags,
|
||||||
reason);
|
reason);
|
||||||
else
|
else
|
||||||
must_wait = userfaultfd_huge_must_wait(ctx, vmf->address,
|
must_wait = userfaultfd_huge_must_wait(ctx, vmf->vma,
|
||||||
|
vmf->address,
|
||||||
vmf->flags, reason);
|
vmf->flags, reason);
|
||||||
up_read(&mm->mmap_sem);
|
up_read(&mm->mmap_sem);
|
||||||
|
|
||||||
|
@ -137,7 +137,8 @@ extern struct list_head huge_boot_pages;
|
|||||||
|
|
||||||
pte_t *huge_pte_alloc(struct mm_struct *mm,
|
pte_t *huge_pte_alloc(struct mm_struct *mm,
|
||||||
unsigned long addr, unsigned long sz);
|
unsigned long addr, unsigned long sz);
|
||||||
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
|
pte_t *huge_pte_offset(struct mm_struct *mm,
|
||||||
|
unsigned long addr, unsigned long sz);
|
||||||
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
|
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
|
||||||
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
|
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
|
||||||
int write);
|
int write);
|
||||||
@ -190,7 +191,7 @@ static inline void hugetlb_show_meminfo(void)
|
|||||||
#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
|
#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
|
||||||
#define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
|
#define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
|
||||||
src_addr, pagep) ({ BUG(); 0; })
|
src_addr, pagep) ({ BUG(); 0; })
|
||||||
#define huge_pte_offset(mm, address) 0
|
#define huge_pte_offset(mm, address, sz) 0
|
||||||
static inline int dequeue_hwpoisoned_huge_page(struct page *page)
|
static inline int dequeue_hwpoisoned_huge_page(struct page *page)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
|
23
mm/hugetlb.c
23
mm/hugetlb.c
@ -3246,7 +3246,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
|
|||||||
|
|
||||||
for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
|
for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
|
||||||
spinlock_t *src_ptl, *dst_ptl;
|
spinlock_t *src_ptl, *dst_ptl;
|
||||||
src_pte = huge_pte_offset(src, addr);
|
src_pte = huge_pte_offset(src, addr, sz);
|
||||||
if (!src_pte)
|
if (!src_pte)
|
||||||
continue;
|
continue;
|
||||||
dst_pte = huge_pte_alloc(dst, addr, sz);
|
dst_pte = huge_pte_alloc(dst, addr, sz);
|
||||||
@ -3330,7 +3330,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
|||||||
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
|
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
|
||||||
address = start;
|
address = start;
|
||||||
for (; address < end; address += sz) {
|
for (; address < end; address += sz) {
|
||||||
ptep = huge_pte_offset(mm, address);
|
ptep = huge_pte_offset(mm, address, sz);
|
||||||
if (!ptep)
|
if (!ptep)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
@ -3548,7 +3548,8 @@ retry_avoidcopy:
|
|||||||
unmap_ref_private(mm, vma, old_page, address);
|
unmap_ref_private(mm, vma, old_page, address);
|
||||||
BUG_ON(huge_pte_none(pte));
|
BUG_ON(huge_pte_none(pte));
|
||||||
spin_lock(ptl);
|
spin_lock(ptl);
|
||||||
ptep = huge_pte_offset(mm, address & huge_page_mask(h));
|
ptep = huge_pte_offset(mm, address & huge_page_mask(h),
|
||||||
|
huge_page_size(h));
|
||||||
if (likely(ptep &&
|
if (likely(ptep &&
|
||||||
pte_same(huge_ptep_get(ptep), pte)))
|
pte_same(huge_ptep_get(ptep), pte)))
|
||||||
goto retry_avoidcopy;
|
goto retry_avoidcopy;
|
||||||
@ -3587,7 +3588,8 @@ retry_avoidcopy:
|
|||||||
* before the page tables are altered
|
* before the page tables are altered
|
||||||
*/
|
*/
|
||||||
spin_lock(ptl);
|
spin_lock(ptl);
|
||||||
ptep = huge_pte_offset(mm, address & huge_page_mask(h));
|
ptep = huge_pte_offset(mm, address & huge_page_mask(h),
|
||||||
|
huge_page_size(h));
|
||||||
if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
|
if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
|
||||||
ClearPagePrivate(new_page);
|
ClearPagePrivate(new_page);
|
||||||
|
|
||||||
@ -3874,7 +3876,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||||||
|
|
||||||
address &= huge_page_mask(h);
|
address &= huge_page_mask(h);
|
||||||
|
|
||||||
ptep = huge_pte_offset(mm, address);
|
ptep = huge_pte_offset(mm, address, huge_page_size(h));
|
||||||
if (ptep) {
|
if (ptep) {
|
||||||
entry = huge_ptep_get(ptep);
|
entry = huge_ptep_get(ptep);
|
||||||
if (unlikely(is_hugetlb_entry_migration(entry))) {
|
if (unlikely(is_hugetlb_entry_migration(entry))) {
|
||||||
@ -4131,7 +4133,8 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||||||
*
|
*
|
||||||
* Note that page table lock is not held when pte is null.
|
* Note that page table lock is not held when pte is null.
|
||||||
*/
|
*/
|
||||||
pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
|
pte = huge_pte_offset(mm, vaddr & huge_page_mask(h),
|
||||||
|
huge_page_size(h));
|
||||||
if (pte)
|
if (pte)
|
||||||
ptl = huge_pte_lock(h, mm, pte);
|
ptl = huge_pte_lock(h, mm, pte);
|
||||||
absent = !pte || huge_pte_none(huge_ptep_get(pte));
|
absent = !pte || huge_pte_none(huge_ptep_get(pte));
|
||||||
@ -4270,7 +4273,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
|
|||||||
i_mmap_lock_write(vma->vm_file->f_mapping);
|
i_mmap_lock_write(vma->vm_file->f_mapping);
|
||||||
for (; address < end; address += huge_page_size(h)) {
|
for (; address < end; address += huge_page_size(h)) {
|
||||||
spinlock_t *ptl;
|
spinlock_t *ptl;
|
||||||
ptep = huge_pte_offset(mm, address);
|
ptep = huge_pte_offset(mm, address, huge_page_size(h));
|
||||||
if (!ptep)
|
if (!ptep)
|
||||||
continue;
|
continue;
|
||||||
ptl = huge_pte_lock(h, mm, ptep);
|
ptl = huge_pte_lock(h, mm, ptep);
|
||||||
@ -4534,7 +4537,8 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
|
|||||||
|
|
||||||
saddr = page_table_shareable(svma, vma, addr, idx);
|
saddr = page_table_shareable(svma, vma, addr, idx);
|
||||||
if (saddr) {
|
if (saddr) {
|
||||||
spte = huge_pte_offset(svma->vm_mm, saddr);
|
spte = huge_pte_offset(svma->vm_mm, saddr,
|
||||||
|
vma_mmu_pagesize(svma));
|
||||||
if (spte) {
|
if (spte) {
|
||||||
get_page(virt_to_page(spte));
|
get_page(virt_to_page(spte));
|
||||||
break;
|
break;
|
||||||
@ -4630,7 +4634,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
|
|||||||
return pte;
|
return pte;
|
||||||
}
|
}
|
||||||
|
|
||||||
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
|
pte_t *huge_pte_offset(struct mm_struct *mm,
|
||||||
|
unsigned long addr, unsigned long sz)
|
||||||
{
|
{
|
||||||
pgd_t *pgd;
|
pgd_t *pgd;
|
||||||
p4d_t *p4d;
|
p4d_t *p4d;
|
||||||
|
@ -116,7 +116,8 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
|
|||||||
|
|
||||||
if (unlikely(PageHuge(pvmw->page))) {
|
if (unlikely(PageHuge(pvmw->page))) {
|
||||||
/* when pud is not present, pte will be NULL */
|
/* when pud is not present, pte will be NULL */
|
||||||
pvmw->pte = huge_pte_offset(mm, pvmw->address);
|
pvmw->pte = huge_pte_offset(mm, pvmw->address,
|
||||||
|
PAGE_SIZE << compound_order(page));
|
||||||
if (!pvmw->pte)
|
if (!pvmw->pte)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
@ -180,12 +180,13 @@ static int walk_hugetlb_range(unsigned long addr, unsigned long end,
|
|||||||
struct hstate *h = hstate_vma(vma);
|
struct hstate *h = hstate_vma(vma);
|
||||||
unsigned long next;
|
unsigned long next;
|
||||||
unsigned long hmask = huge_page_mask(h);
|
unsigned long hmask = huge_page_mask(h);
|
||||||
|
unsigned long sz = huge_page_size(h);
|
||||||
pte_t *pte;
|
pte_t *pte;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
next = hugetlb_entry_end(h, addr, end);
|
next = hugetlb_entry_end(h, addr, end);
|
||||||
pte = huge_pte_offset(walk->mm, addr & hmask);
|
pte = huge_pte_offset(walk->mm, addr & hmask, sz);
|
||||||
if (pte && walk->hugetlb_entry)
|
if (pte && walk->hugetlb_entry)
|
||||||
err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
|
err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
|
||||||
if (err)
|
if (err)
|
||||||
|
Loading…
Reference in New Issue
Block a user