mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-15 16:53:54 +08:00
LoongArch: add sparse memory vmemmap support
Add sparse memory vmemmap support for LoongArch. SPARSEMEM_VMEMMAP uses a virtually mapped memmap to optimise pfn_to_page and page_to_pfn operations. This is the most efficient option when sufficient kernel resources are available. Link: https://lkml.kernel.org/r/20221027125253.3458989-3-chenhuacai@loongson.cn Signed-off-by: Min Zhou <zhoumin@loongson.cn> Signed-off-by: Feiyang Chen <chenfeiyang@loongson.cn> Signed-off-by: Huacai Chen <chenhuacai@loongson.cn> Reviewed-by: Arnd Bergmann <arnd@arndb.de> Cc: Andy Lutomirski <luto@kernel.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Dinh Nguyen <dinguyen@kernel.org> Cc: Guo Ren <guoren@kernel.org> Cc: Jiaxun Yang <jiaxun.yang@flygoat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Philippe Mathieu-Daudé <philmd@linaro.org> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Will Deacon <will@kernel.org> Cc: Xuefeng Li <lixuefeng@loongson.cn> Cc: Xuerui Wang <kernel@xen0n.name> Cc: Muchun Song <songmuchun@bytedance.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
22c4e80466
commit
7b09f5af01
@ -487,6 +487,7 @@ config ARCH_FLATMEM_ENABLE
|
||||
|
||||
config ARCH_SPARSEMEM_ENABLE
|
||||
def_bool y
|
||||
select SPARSEMEM_VMEMMAP_ENABLE
|
||||
help
|
||||
Say Y to support efficient handling of sparse physical memory,
|
||||
for architectures which are either NUMA (Non-Uniform Memory Access)
|
||||
|
@ -11,6 +11,7 @@
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <asm/addrspace.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable-bits.h>
|
||||
|
||||
#if CONFIG_PGTABLE_LEVELS == 2
|
||||
@ -59,6 +60,7 @@
|
||||
#include <linux/mm_types.h>
|
||||
#include <linux/mmzone.h>
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/sparsemem.h>
|
||||
|
||||
struct mm_struct;
|
||||
struct vm_area_struct;
|
||||
@ -86,7 +88,10 @@ extern unsigned long zero_page_mask;
|
||||
#define VMALLOC_START MODULES_END
|
||||
#define VMALLOC_END \
|
||||
(vm_map_base + \
|
||||
min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE)
|
||||
min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE - VMEMMAP_SIZE)
|
||||
|
||||
#define vmemmap ((struct page *)((VMALLOC_END + PMD_SIZE) & PMD_MASK))
|
||||
#define VMEMMAP_END ((unsigned long)vmemmap + VMEMMAP_SIZE - 1)
|
||||
|
||||
#define pte_ERROR(e) \
|
||||
pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
|
||||
|
@ -11,8 +11,16 @@
|
||||
#define SECTION_SIZE_BITS 29 /* 2^29 = Largest Huge Page Size */
|
||||
#define MAX_PHYSMEM_BITS 48
|
||||
|
||||
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
||||
#define VMEMMAP_SIZE (sizeof(struct page) * (1UL << (cpu_pabits + 1 - PAGE_SHIFT)))
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_SPARSEMEM */
|
||||
|
||||
#ifndef VMEMMAP_SIZE
|
||||
#define VMEMMAP_SIZE 0 /* 1, For FLATMEM; 2, For SPARSEMEM without VMEMMAP. */
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
int memory_add_physaddr_to_nid(u64 addr);
|
||||
#define memory_add_physaddr_to_nid memory_add_physaddr_to_nid
|
||||
|
@ -22,7 +22,7 @@
|
||||
#include <linux/pfn.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/initrd.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/mmzone.h>
|
||||
|
||||
#include <asm/asm-offsets.h>
|
||||
@ -152,6 +152,72 @@ EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
||||
static int __meminit vmemmap_populate_hugepages(unsigned long start, unsigned long end,
|
||||
int node, struct vmem_altmap *altmap)
|
||||
{
|
||||
unsigned long addr = start;
|
||||
unsigned long next;
|
||||
pgd_t *pgd;
|
||||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
|
||||
for (addr = start; addr < end; addr = next) {
|
||||
next = pmd_addr_end(addr, end);
|
||||
|
||||
pgd = vmemmap_pgd_populate(addr, node);
|
||||
if (!pgd)
|
||||
return -ENOMEM;
|
||||
p4d = vmemmap_p4d_populate(pgd, addr, node);
|
||||
if (!p4d)
|
||||
return -ENOMEM;
|
||||
pud = vmemmap_pud_populate(p4d, addr, node);
|
||||
if (!pud)
|
||||
return -ENOMEM;
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
if (pmd_none(*pmd)) {
|
||||
void *p = NULL;
|
||||
|
||||
p = vmemmap_alloc_block_buf(PMD_SIZE, node, NULL);
|
||||
if (p) {
|
||||
pmd_t entry;
|
||||
|
||||
entry = pfn_pmd(virt_to_pfn(p), PAGE_KERNEL);
|
||||
pmd_val(entry) |= _PAGE_HUGE | _PAGE_HGLOBAL;
|
||||
set_pmd_at(&init_mm, addr, pmd, entry);
|
||||
|
||||
continue;
|
||||
}
|
||||
} else if (pmd_val(*pmd) & _PAGE_HUGE) {
|
||||
vmemmap_verify((pte_t *)pmd, node, addr, next);
|
||||
continue;
|
||||
}
|
||||
if (vmemmap_populate_basepages(addr, next, node, NULL))
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __meminit vmemmap_populate(unsigned long start, unsigned long end,
|
||||
int node, struct vmem_altmap *altmap)
|
||||
{
|
||||
#if CONFIG_PGTABLE_LEVELS == 2
|
||||
return vmemmap_populate_basepages(start, end, node, NULL);
|
||||
#else
|
||||
return vmemmap_populate_hugepages(start, end, node, NULL);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
void vmemmap_free(unsigned long start, unsigned long end, struct vmem_altmap *altmap)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
static pte_t *fixmap_pte(unsigned long addr)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
@ -168,7 +234,7 @@ static pte_t *fixmap_pte(unsigned long addr)
|
||||
new = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
|
||||
pgd_populate(&init_mm, pgd, new);
|
||||
#ifndef __PAGETABLE_PUD_FOLDED
|
||||
pud_init((unsigned long)new, (unsigned long)invalid_pmd_table);
|
||||
pud_init(new);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -179,7 +245,7 @@ static pte_t *fixmap_pte(unsigned long addr)
|
||||
new = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
|
||||
pud_populate(&init_mm, pud, new);
|
||||
#ifndef __PAGETABLE_PMD_FOLDED
|
||||
pmd_init((unsigned long)new, (unsigned long)invalid_pte_table);
|
||||
pmd_init(new);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -3360,6 +3360,8 @@ void *sparse_buffer_alloc(unsigned long size);
|
||||
struct page * __populate_section_memmap(unsigned long pfn,
|
||||
unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
|
||||
struct dev_pagemap *pgmap);
|
||||
void pmd_init(void *addr);
|
||||
void pud_init(void *addr);
|
||||
pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
|
||||
p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
|
||||
pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
|
||||
|
@ -196,6 +196,10 @@ pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
|
||||
return pmd;
|
||||
}
|
||||
|
||||
void __weak __meminit pmd_init(void *addr)
|
||||
{
|
||||
}
|
||||
|
||||
pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node)
|
||||
{
|
||||
pud_t *pud = pud_offset(p4d, addr);
|
||||
@ -203,11 +207,16 @@ pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node)
|
||||
void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
|
||||
if (!p)
|
||||
return NULL;
|
||||
pmd_init(p);
|
||||
pud_populate(&init_mm, pud, p);
|
||||
}
|
||||
return pud;
|
||||
}
|
||||
|
||||
void __weak __meminit pud_init(void *addr)
|
||||
{
|
||||
}
|
||||
|
||||
p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node)
|
||||
{
|
||||
p4d_t *p4d = p4d_offset(pgd, addr);
|
||||
@ -215,6 +224,7 @@ p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node)
|
||||
void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
|
||||
if (!p)
|
||||
return NULL;
|
||||
pud_init(p);
|
||||
p4d_populate(&init_mm, p4d, p);
|
||||
}
|
||||
return p4d;
|
||||
|
Loading…
Reference in New Issue
Block a user