mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-23 04:04:26 +08:00
LoongArch: Set initial pte entry with PAGE_GLOBAL for kernel space
There are two pages in one TLB entry on LoongArch system. For kernel space, it requires both two pte entries (buddies) with PAGE_GLOBAL bit set, otherwise HW treats it as non-global tlb, there will be potential problems if tlb entry for kernel space is not global. Such as fail to flush kernel tlb with the function local_flush_tlb_kernel_range() which supposed only flush tlb with global bit. Kernel address space areas include percpu, vmalloc, vmemmap, fixmap and kasan areas. For these areas both two consecutive page table entries should be enabled with PAGE_GLOBAL bit. So with function set_pte() and pte_clear(), pte buddy entry is checked and set besides its own pte entry. However it is not atomic operation to set both two pte entries, there is problem with test_vmalloc test case. So function kernel_pte_init() is added to init a pte table when it is created for kernel address space, and the default initial pte value is PAGE_GLOBAL rather than zero at beginning. Then only its own pte entry need update with function set_pte() and pte_clear(), nothing to do with the pte buddy entry. Signed-off-by: Bibo Mao <maobibo@loongson.cn> Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
This commit is contained in:
parent
134475a9ab
commit
d2f8671045
@ -10,6 +10,7 @@
|
||||
|
||||
#define __HAVE_ARCH_PMD_ALLOC_ONE
|
||||
#define __HAVE_ARCH_PUD_ALLOC_ONE
|
||||
#define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
|
||||
#include <asm-generic/pgalloc.h>
|
||||
|
||||
static inline void pmd_populate_kernel(struct mm_struct *mm,
|
||||
@ -44,6 +45,16 @@ extern void pagetable_init(void);
|
||||
|
||||
extern pgd_t *pgd_alloc(struct mm_struct *mm);
|
||||
|
||||
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
|
||||
{
|
||||
pte_t *pte = __pte_alloc_one_kernel(mm);
|
||||
|
||||
if (pte)
|
||||
kernel_pte_init(pte);
|
||||
|
||||
return pte;
|
||||
}
|
||||
|
||||
#define __pte_free_tlb(tlb, pte, address) \
|
||||
do { \
|
||||
pagetable_pte_dtor(page_ptdesc(pte)); \
|
||||
|
@ -269,6 +269,7 @@ extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pm
|
||||
extern void pgd_init(void *addr);
|
||||
extern void pud_init(void *addr);
|
||||
extern void pmd_init(void *addr);
|
||||
extern void kernel_pte_init(void *addr);
|
||||
|
||||
/*
|
||||
* Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
|
||||
@ -325,39 +326,17 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
|
||||
{
|
||||
WRITE_ONCE(*ptep, pteval);
|
||||
|
||||
if (pte_val(pteval) & _PAGE_GLOBAL) {
|
||||
pte_t *buddy = ptep_buddy(ptep);
|
||||
/*
|
||||
* Make sure the buddy is global too (if it's !none,
|
||||
* it better already be global)
|
||||
*/
|
||||
if (pte_none(ptep_get(buddy))) {
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* For SMP, multiple CPUs can race, so we need
|
||||
* to do this atomically.
|
||||
*/
|
||||
__asm__ __volatile__(
|
||||
__AMOR "$zero, %[global], %[buddy] \n"
|
||||
: [buddy] "+ZB" (buddy->pte)
|
||||
: [global] "r" (_PAGE_GLOBAL)
|
||||
: "memory");
|
||||
|
||||
DBAR(0b11000); /* o_wrw = 0b11000 */
|
||||
#else /* !CONFIG_SMP */
|
||||
WRITE_ONCE(*buddy, __pte(pte_val(ptep_get(buddy)) | _PAGE_GLOBAL));
|
||||
#endif /* CONFIG_SMP */
|
||||
}
|
||||
}
|
||||
if (pte_val(pteval) & _PAGE_GLOBAL)
|
||||
DBAR(0b11000); /* o_wrw = 0b11000 */
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
/* Preserve global status for the pair */
|
||||
if (pte_val(ptep_get(ptep_buddy(ptep))) & _PAGE_GLOBAL)
|
||||
set_pte(ptep, __pte(_PAGE_GLOBAL));
|
||||
else
|
||||
set_pte(ptep, __pte(0));
|
||||
pte_t pte = ptep_get(ptep);
|
||||
pte_val(pte) &= _PAGE_GLOBAL;
|
||||
set_pte(ptep, pte);
|
||||
}
|
||||
|
||||
#define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1)
|
||||
|
@ -201,7 +201,9 @@ pte_t * __init populate_kernel_pte(unsigned long addr)
|
||||
pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
|
||||
if (!pte)
|
||||
panic("%s: Failed to allocate memory\n", __func__);
|
||||
|
||||
pmd_populate_kernel(&init_mm, pmd, pte);
|
||||
kernel_pte_init(pte);
|
||||
}
|
||||
|
||||
return pte_offset_kernel(pmd, addr);
|
||||
|
@ -116,6 +116,26 @@ void pud_init(void *addr)
|
||||
EXPORT_SYMBOL_GPL(pud_init);
|
||||
#endif
|
||||
|
||||
void kernel_pte_init(void *addr)
|
||||
{
|
||||
unsigned long *p, *end;
|
||||
|
||||
p = (unsigned long *)addr;
|
||||
end = p + PTRS_PER_PTE;
|
||||
|
||||
do {
|
||||
p[0] = _PAGE_GLOBAL;
|
||||
p[1] = _PAGE_GLOBAL;
|
||||
p[2] = _PAGE_GLOBAL;
|
||||
p[3] = _PAGE_GLOBAL;
|
||||
p[4] = _PAGE_GLOBAL;
|
||||
p += 8;
|
||||
p[-3] = _PAGE_GLOBAL;
|
||||
p[-2] = _PAGE_GLOBAL;
|
||||
p[-1] = _PAGE_GLOBAL;
|
||||
} while (p != end);
|
||||
}
|
||||
|
||||
pmd_t mk_pmd(struct page *page, pgprot_t prot)
|
||||
{
|
||||
pmd_t pmd;
|
||||
|
@ -3818,8 +3818,9 @@ void *sparse_buffer_alloc(unsigned long size);
|
||||
struct page * __populate_section_memmap(unsigned long pfn,
|
||||
unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
|
||||
struct dev_pagemap *pgmap);
|
||||
void pmd_init(void *addr);
|
||||
void pud_init(void *addr);
|
||||
void pmd_init(void *addr);
|
||||
void kernel_pte_init(void *addr);
|
||||
pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
|
||||
p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
|
||||
pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
|
||||
|
@ -106,6 +106,10 @@ static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr,
|
||||
}
|
||||
}
|
||||
|
||||
void __weak __meminit kernel_pte_init(void *addr)
|
||||
{
|
||||
}
|
||||
|
||||
static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr,
|
||||
unsigned long end)
|
||||
{
|
||||
@ -126,8 +130,10 @@ static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr,
|
||||
|
||||
if (slab_is_available())
|
||||
p = pte_alloc_one_kernel(&init_mm);
|
||||
else
|
||||
else {
|
||||
p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
|
||||
kernel_pte_init(p);
|
||||
}
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -184,6 +184,10 @@ static void * __meminit vmemmap_alloc_block_zero(unsigned long size, int node)
|
||||
return p;
|
||||
}
|
||||
|
||||
void __weak __meminit kernel_pte_init(void *addr)
|
||||
{
|
||||
}
|
||||
|
||||
pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
|
||||
{
|
||||
pmd_t *pmd = pmd_offset(pud, addr);
|
||||
@ -191,6 +195,7 @@ pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
|
||||
void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
|
||||
if (!p)
|
||||
return NULL;
|
||||
kernel_pte_init(p);
|
||||
pmd_populate_kernel(&init_mm, pmd, p);
|
||||
}
|
||||
return pmd;
|
||||
|
Loading…
Reference in New Issue
Block a user