2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2025-01-03 03:03:58 +08:00

powerpc/mm: update pmdp_invalidate to return old pmd value

It's required to avoid losing dirty and accessed bits.

Link: http://lkml.kernel.org/r/20171213105756.69879-7-kirill.shutemov@linux.intel.com
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Michal Hocko <mhocko@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Aneesh Kumar K.V 2018-01-31 16:18:02 -08:00 committed by Linus Torvalds
parent b6b34b2dfb
commit 8cc931e033
2 changed files with 7 additions and 4 deletions
arch/powerpc
include/asm/book3s/64
mm

View File

@ -1137,8 +1137,8 @@ static inline pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm,
} }
#define __HAVE_ARCH_PMDP_INVALIDATE #define __HAVE_ARCH_PMDP_INVALIDATE
extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp); pmd_t *pmdp);
#define __HAVE_ARCH_PMDP_HUGE_SPLIT_PREPARE #define __HAVE_ARCH_PMDP_HUGE_SPLIT_PREPARE
static inline void pmdp_huge_split_prepare(struct vm_area_struct *vma, static inline void pmdp_huge_split_prepare(struct vm_area_struct *vma,

View File

@ -90,16 +90,19 @@ void serialize_against_pte_lookup(struct mm_struct *mm)
* We use this to invalidate a pmdp entry before switching from a * We use this to invalidate a pmdp entry before switching from a
* hugepte to regular pmd entry. * hugepte to regular pmd entry.
*/ */
void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp) pmd_t *pmdp)
{ {
pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0); unsigned long old_pmd;
old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
/* /*
* This ensures that generic code that rely on IRQ disabling * This ensures that generic code that rely on IRQ disabling
* to prevent a parallel THP split work as expected. * to prevent a parallel THP split work as expected.
*/ */
serialize_against_pte_lookup(vma->vm_mm); serialize_against_pte_lookup(vma->vm_mm);
return __pmd(old_pmd);
} }
static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot) static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)