mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
powerpc/mm: move setting pte specific flags to pfn_pte
powerpc used to set the pte specific flags in set_pte_at(). This is different from other architectures. To be consistent with other architecture update pfn_pte to set _PAGE_PTE on ppc64. Also, drop now unused pte_mkpte. We add a VM_WARN_ON() to catch the usage of calling set_pte_at() without setting _PAGE_PTE bit. We will remove that after a few releases. With respect to huge pmd entries, pmd_mkhuge() takes care of adding the _PAGE_PTE bit. [akpm@linux-foundation.org: whitespace fix, per Christophe] Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Anshuman Khandual <anshuman.khandual@arm.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Link: https://lkml.kernel.org/r/20200902114222.181353-3-aneesh.kumar@linux.ibm.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
392b466981
commit
379c926d63
@ -615,7 +615,7 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
|
||||
VM_BUG_ON(pfn >> (64 - PAGE_SHIFT));
|
||||
VM_BUG_ON((pfn << PAGE_SHIFT) & ~PTE_RPN_MASK);
|
||||
|
||||
return __pte(((pte_basic_t)pfn << PAGE_SHIFT) | pgprot_val(pgprot));
|
||||
return __pte(((pte_basic_t)pfn << PAGE_SHIFT) | pgprot_val(pgprot) | _PAGE_PTE);
|
||||
}
|
||||
|
||||
static inline unsigned long pte_pfn(pte_t pte)
|
||||
@ -651,11 +651,6 @@ static inline pte_t pte_mkexec(pte_t pte)
|
||||
return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_EXEC));
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkpte(pte_t pte)
|
||||
{
|
||||
return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PTE));
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkwrite(pte_t pte)
|
||||
{
|
||||
/*
|
||||
@ -819,6 +814,14 @@ static inline int pte_none(pte_t pte)
|
||||
static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t pte, int percpu)
|
||||
{
|
||||
|
||||
VM_WARN_ON(!(pte_raw(pte) & cpu_to_be64(_PAGE_PTE)));
|
||||
/*
|
||||
* Keep the _PAGE_PTE added till we are sure we handle _PAGE_PTE
|
||||
* in all the callers.
|
||||
*/
|
||||
pte = __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PTE));
|
||||
|
||||
if (radix_enabled())
|
||||
return radix__set_pte_at(mm, addr, ptep, pte, percpu);
|
||||
return hash__set_pte_at(mm, addr, ptep, pte, percpu);
|
||||
|
@ -140,11 +140,6 @@ static inline pte_t pte_mkold(pte_t pte)
|
||||
return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkpte(pte_t pte)
|
||||
{
|
||||
return pte;
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkspecial(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) | _PAGE_SPECIAL);
|
||||
|
@ -184,9 +184,6 @@ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
|
||||
*/
|
||||
VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
|
||||
|
||||
/* Add the pte bit when trying to set a pte */
|
||||
pte = pte_mkpte(pte);
|
||||
|
||||
/* Note: mm->context.id might not yet have been assigned as
|
||||
* this context might not have been activated yet when this
|
||||
* is called.
|
||||
@ -275,8 +272,6 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_
|
||||
*/
|
||||
VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
|
||||
|
||||
pte = pte_mkpte(pte);
|
||||
|
||||
pte = set_pte_filter(pte);
|
||||
|
||||
val = pte_val(pte);
|
||||
|
Loading…
Reference in New Issue
Block a user