mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-24 05:04:00 +08:00
x86/mm: Fold p4d page table layer at runtime
Change page table helpers to fold p4d at runtime. The logic is the same as in <asm-generic/pgtable-nop4d.h>. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Arjan van de Ven <arjan@linux.intel.com> Cc: Borislav Petkov <bp@suse.de> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180214182542.69302-8-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
6f9dd32971
commit
98219dda2a
@ -569,13 +569,15 @@ static inline p4dval_t p4d_val(p4d_t p4d)
|
||||
|
||||
static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
|
||||
{
|
||||
pgdval_t val = native_pgd_val(pgd);
|
||||
|
||||
PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp, val);
|
||||
if (pgtable_l5_enabled)
|
||||
PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp, native_pgd_val(pgd));
|
||||
else
|
||||
set_p4d((p4d_t *)(pgdp), (p4d_t) { pgd.pgd });
|
||||
}
|
||||
|
||||
static inline void pgd_clear(pgd_t *pgdp)
|
||||
{
|
||||
if (pgtable_l5_enabled)
|
||||
set_pgd(pgdp, __pgd(0));
|
||||
}
|
||||
|
||||
|
@ -167,6 +167,8 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
|
||||
#if CONFIG_PGTABLE_LEVELS > 4
|
||||
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
|
||||
{
|
||||
if (!pgtable_l5_enabled)
|
||||
return;
|
||||
paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT);
|
||||
set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(p4d)));
|
||||
}
|
||||
@ -191,6 +193,7 @@ extern void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d);
|
||||
static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
|
||||
unsigned long address)
|
||||
{
|
||||
if (pgtable_l5_enabled)
|
||||
___p4d_free_tlb(tlb, p4d);
|
||||
}
|
||||
|
||||
|
@ -65,7 +65,7 @@ extern pmdval_t early_pmd_flags;
|
||||
|
||||
#ifndef __PAGETABLE_P4D_FOLDED
|
||||
#define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
|
||||
#define pgd_clear(pgd) native_pgd_clear(pgd)
|
||||
#define pgd_clear(pgd) (pgtable_l5_enabled ? native_pgd_clear(pgd) : 0)
|
||||
#endif
|
||||
|
||||
#ifndef set_p4d
|
||||
@ -859,6 +859,8 @@ static inline unsigned long p4d_index(unsigned long address)
|
||||
#if CONFIG_PGTABLE_LEVELS > 4
|
||||
static inline int pgd_present(pgd_t pgd)
|
||||
{
|
||||
if (!pgtable_l5_enabled)
|
||||
return 1;
|
||||
return pgd_flags(pgd) & _PAGE_PRESENT;
|
||||
}
|
||||
|
||||
@ -876,6 +878,8 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
|
||||
/* to find an entry in a page-table-directory. */
|
||||
static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
|
||||
{
|
||||
if (!pgtable_l5_enabled)
|
||||
return (p4d_t *)pgd;
|
||||
return (p4d_t *)pgd_page_vaddr(*pgd) + p4d_index(address);
|
||||
}
|
||||
|
||||
@ -883,6 +887,9 @@ static inline int pgd_bad(pgd_t pgd)
|
||||
{
|
||||
unsigned long ignore_flags = _PAGE_USER;
|
||||
|
||||
if (!pgtable_l5_enabled)
|
||||
return 0;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
|
||||
ignore_flags |= _PAGE_NX;
|
||||
|
||||
@ -891,6 +898,8 @@ static inline int pgd_bad(pgd_t pgd)
|
||||
|
||||
static inline int pgd_none(pgd_t pgd)
|
||||
{
|
||||
if (!pgtable_l5_enabled)
|
||||
return 0;
|
||||
/*
|
||||
* There is no need to do a workaround for the KNL stray
|
||||
* A/D bit erratum here. PGDs only point to page tables
|
||||
|
Loading…
Reference in New Issue
Block a user