linux/arch/riscv/mm/init.c
Linus Torvalds 270315b823 RISC-V Patches for the 5.10 Merge Window, Part 1
This contains a handful of cleanups and new features, including:
 
 * A handful of cleanups for our page fault handling.
 * Improvements to how we fill out cacheinfo.
 * Support for EFI-based systems.
 
 ---
 
 This contains a merge from the EFI tree that was necessary as some of the EFI
 support landed over there.  It's my first time doing something like this,
 
 I haven't included the set_fs stuff because the base branch it depends on
 hasn't been merged yet.  I'll probably have another merge window PR, as
 there's more in flight (most notably the fix for new binutils I just sent out),
 but I figured there was no reason to delay this any longer.
 -----BEGIN PGP SIGNATURE-----
 
 iQJHBAABCgAxFiEEKzw3R0RoQ7JKlDp6LhMZ81+7GIkFAl+KQ6gTHHBhbG1lckBk
 YWJiZWx0LmNvbQAKCRAuExnzX7sYibmwD/4qWfOW7R/kUWi08ethcaAhNEWLvqIh
 2/KjGLORw+NTZ1F4pEFyQG5LRd3yWDT/UXh/k8gXINqmdclNV01Z3T+O7WuRlISs
 07i26W1qRpNeJ7lDVhr9foKpeOU/AXvidgoF330nGlyO4HZkYKhK2yB3t8uGWywr
 Zt/EpMJeBIRKzWiLhOgLAdYJthhZ9AlnouNnr9myHnO5Ksel+AZ/BKYvn7ZbHMns
 6vFUxp6392/LERRRIfDqPsTuxPIYMHjuEsGSESLsjAIyq/shgN1knG/C+zwU5DcK
 zUDBt1DEP7Tb45w7VBASSjn1M+cUolz9/c2dBhlVcdBlk1GKF+KILSTmWUBpQ8oP
 ETVAuQK5HTcjy9bVcJMj0Oa3mFshVAAByOH+Wyrdo+qSLkb7y3spPvsL4dyjrKjL
 +pe6C7WvavaEFoQXVWO2sTUBGYt7qDLRdrDgOGBIHylTXhTxf2wYzAF4ZmDROECT
 Qfc7Ac3aIWYvWDmxE+x8OniuclfZ0DndKLKQj6FJWUTIxFZzTxsHK75d47D1ID0S
 ZwAmUd0eYjjwMTO/6AM/Aqu3o8IP4GOXjJf4ijxH9+LjpUhm/ibmHDAUY69sU1WX
 kdX51gQzoEuW7XMVz1HoTSvaGGKtyFDuRxs8RG/tSFaRtznRz0Sro6BpLCeG968n
 k/d5WL/vZZ/NDA==
 =FYs/
 -----END PGP SIGNATURE-----

Merge tag 'riscv-for-linus-5.10-mw0' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux

Pull RISC-V updates from Palmer Dabbelt:
 "A handful of cleanups and new features:

   - A handful of cleanups for our page fault handling

   - Improvements to how we fill out cacheinfo

   - Support for EFI-based systems"

* tag 'riscv-for-linus-5.10-mw0' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux: (22 commits)
  RISC-V: Add page table dump support for uefi
  RISC-V: Add EFI runtime services
  RISC-V: Add EFI stub support.
  RISC-V: Add PE/COFF header for EFI stub
  RISC-V: Implement late mapping page table allocation functions
  RISC-V: Add early ioremap support
  RISC-V: Move DT mapping outof fixmap
  RISC-V: Fix duplicate included thread_info.h
  riscv/mm/fault: Set FAULT_FLAG_INSTRUCTION flag in do_page_fault()
  riscv/mm/fault: Fix inline placement in vmalloc_fault() declaration
  riscv: Add cache information in AUX vector
  riscv: Define AT_VECTOR_SIZE_ARCH for ARCH_DLINFO
  riscv: Set more data to cacheinfo
  riscv/mm/fault: Move access error check to function
  riscv/mm/fault: Move FAULT_FLAG_WRITE handling in do_page_fault()
  riscv/mm/fault: Simplify mm_fault_error()
  riscv/mm/fault: Move fault error handling to mm_fault_error()
  riscv/mm/fault: Simplify fault error handling
  riscv/mm/fault: Move vmalloc fault handling to vmalloc_fault()
  riscv/mm/fault: Move bad area handling to bad_area()
  ...
2020-10-19 18:18:30 -07:00

670 lines
18 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Regents of the University of California
* Copyright (C) 2019 Western Digital Corporation or its affiliates.
*/
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/memblock.h>
#include <linux/initrd.h>
#include <linux/swap.h>
#include <linux/sizes.h>
#include <linux/of_fdt.h>
#include <linux/libfdt.h>
#include <linux/set_memory.h>
#include <asm/fixmap.h>
#include <asm/tlbflush.h>
#include <asm/sections.h>
#include <asm/soc.h>
#include <asm/io.h>
#include <asm/ptdump.h>
#include "../kernel/head.h"
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
__page_aligned_bss;
EXPORT_SYMBOL(empty_zero_page);
extern char _start[];
#define DTB_EARLY_BASE_VA PGDIR_SIZE
void *dtb_early_va __initdata;
uintptr_t dtb_early_pa __initdata;
struct pt_alloc_ops {
pte_t *(*get_pte_virt)(phys_addr_t pa);
phys_addr_t (*alloc_pte)(uintptr_t va);
#ifndef __PAGETABLE_PMD_FOLDED
pmd_t *(*get_pmd_virt)(phys_addr_t pa);
phys_addr_t (*alloc_pmd)(uintptr_t va);
#endif
};
static void __init zone_sizes_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
#ifdef CONFIG_ZONE_DMA32
max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G,
(unsigned long) PFN_PHYS(max_low_pfn)));
#endif
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
free_area_init(max_zone_pfns);
}
static void setup_zero_page(void)
{
memset((void *)empty_zero_page, 0, PAGE_SIZE);
}
#if defined(CONFIG_MMU) && defined(CONFIG_DEBUG_VM)
static inline void print_mlk(char *name, unsigned long b, unsigned long t)
{
pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld kB)\n", name, b, t,
(((t) - (b)) >> 10));
}
static inline void print_mlm(char *name, unsigned long b, unsigned long t)
{
pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld MB)\n", name, b, t,
(((t) - (b)) >> 20));
}
static void print_vm_layout(void)
{
pr_notice("Virtual kernel memory layout:\n");
print_mlk("fixmap", (unsigned long)FIXADDR_START,
(unsigned long)FIXADDR_TOP);
print_mlm("pci io", (unsigned long)PCI_IO_START,
(unsigned long)PCI_IO_END);
print_mlm("vmemmap", (unsigned long)VMEMMAP_START,
(unsigned long)VMEMMAP_END);
print_mlm("vmalloc", (unsigned long)VMALLOC_START,
(unsigned long)VMALLOC_END);
print_mlm("lowmem", (unsigned long)PAGE_OFFSET,
(unsigned long)high_memory);
}
#else
static void print_vm_layout(void) { }
#endif /* CONFIG_DEBUG_VM */
void __init mem_init(void)
{
#ifdef CONFIG_FLATMEM
BUG_ON(!mem_map);
#endif /* CONFIG_FLATMEM */
high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
memblock_free_all();
mem_init_print_info(NULL);
print_vm_layout();
}
#ifdef CONFIG_BLK_DEV_INITRD
static void __init setup_initrd(void)
{
phys_addr_t start;
unsigned long size;
/* Ignore the virtul address computed during device tree parsing */
initrd_start = initrd_end = 0;
if (!phys_initrd_size)
return;
/*
* Round the memory region to page boundaries as per free_initrd_mem()
* This allows us to detect whether the pages overlapping the initrd
* are in use, but more importantly, reserves the entire set of pages
* as we don't want these pages allocated for other purposes.
*/
start = round_down(phys_initrd_start, PAGE_SIZE);
size = phys_initrd_size + (phys_initrd_start - start);
size = round_up(size, PAGE_SIZE);
if (!memblock_is_region_memory(start, size)) {
pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region",
(u64)start, size);
goto disable;
}
if (memblock_is_region_reserved(start, size)) {
pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region\n",
(u64)start, size);
goto disable;
}
memblock_reserve(start, size);
/* Now convert initrd to virtual addresses */
initrd_start = (unsigned long)__va(phys_initrd_start);
initrd_end = initrd_start + phys_initrd_size;
initrd_below_start_ok = 1;
pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
(void *)(initrd_start), size);
return;
disable:
pr_cont(" - disabling initrd\n");
initrd_start = 0;
initrd_end = 0;
}
#endif /* CONFIG_BLK_DEV_INITRD */
void __init setup_bootmem(void)
{
phys_addr_t mem_size = 0;
phys_addr_t total_mem = 0;
phys_addr_t mem_start, start, end = 0;
phys_addr_t vmlinux_end = __pa_symbol(&_end);
phys_addr_t vmlinux_start = __pa_symbol(&_start);
u64 i;
/* Find the memory region containing the kernel */
for_each_mem_range(i, &start, &end) {
phys_addr_t size = end - start;
if (!total_mem)
mem_start = start;
if (start <= vmlinux_start && vmlinux_end <= end)
BUG_ON(size == 0);
total_mem = total_mem + size;
}
/*
* Remove memblock from the end of usable area to the
* end of region
*/
mem_size = min(total_mem, (phys_addr_t)-PAGE_OFFSET);
if (mem_start + mem_size < end)
memblock_remove(mem_start + mem_size,
end - mem_start - mem_size);
/* Reserve from the start of the kernel to the end of the kernel */
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
max_pfn = PFN_DOWN(memblock_end_of_DRAM());
max_low_pfn = max_pfn;
set_max_mapnr(max_low_pfn);
#ifdef CONFIG_BLK_DEV_INITRD
setup_initrd();
#endif /* CONFIG_BLK_DEV_INITRD */
/*
* Avoid using early_init_fdt_reserve_self() since __pa() does
* not work for DTB pointers that are fixmap addresses
*/
memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
early_init_fdt_scan_reserved_mem();
memblock_allow_resize();
memblock_dump_all();
}
#ifdef CONFIG_MMU
static struct pt_alloc_ops pt_ops;
unsigned long va_pa_offset;
EXPORT_SYMBOL(va_pa_offset);
unsigned long pfn_base;
EXPORT_SYMBOL(pfn_base);
pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
#define MAX_EARLY_MAPPING_SIZE SZ_128M
pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
{
unsigned long addr = __fix_to_virt(idx);
pte_t *ptep;
BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
ptep = &fixmap_pte[pte_index(addr)];
if (pgprot_val(prot))
set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
else
pte_clear(&init_mm, addr, ptep);
local_flush_tlb_page(addr);
}
static inline pte_t *__init get_pte_virt_early(phys_addr_t pa)
{
return (pte_t *)((uintptr_t)pa);
}
static inline pte_t *__init get_pte_virt_fixmap(phys_addr_t pa)
{
clear_fixmap(FIX_PTE);
return (pte_t *)set_fixmap_offset(FIX_PTE, pa);
}
static inline pte_t *get_pte_virt_late(phys_addr_t pa)
{
return (pte_t *) __va(pa);
}
static inline phys_addr_t __init alloc_pte_early(uintptr_t va)
{
/*
* We only create PMD or PGD early mappings so we
* should never reach here with MMU disabled.
*/
BUG();
}
static inline phys_addr_t __init alloc_pte_fixmap(uintptr_t va)
{
return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
}
static phys_addr_t alloc_pte_late(uintptr_t va)
{
unsigned long vaddr;
vaddr = __get_free_page(GFP_KERNEL);
if (!vaddr || !pgtable_pte_page_ctor(virt_to_page(vaddr)))
BUG();
return __pa(vaddr);
}
static void __init create_pte_mapping(pte_t *ptep,
uintptr_t va, phys_addr_t pa,
phys_addr_t sz, pgprot_t prot)
{
uintptr_t pte_idx = pte_index(va);
BUG_ON(sz != PAGE_SIZE);
if (pte_none(ptep[pte_idx]))
ptep[pte_idx] = pfn_pte(PFN_DOWN(pa), prot);
}
#ifndef __PAGETABLE_PMD_FOLDED
pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss;
pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;
#if MAX_EARLY_MAPPING_SIZE < PGDIR_SIZE
#define NUM_EARLY_PMDS 1UL
#else
#define NUM_EARLY_PMDS (1UL + MAX_EARLY_MAPPING_SIZE / PGDIR_SIZE)
#endif
pmd_t early_pmd[PTRS_PER_PMD * NUM_EARLY_PMDS] __initdata __aligned(PAGE_SIZE);
static pmd_t *__init get_pmd_virt_early(phys_addr_t pa)
{
/* Before MMU is enabled */
return (pmd_t *)((uintptr_t)pa);
}
static pmd_t *__init get_pmd_virt_fixmap(phys_addr_t pa)
{
clear_fixmap(FIX_PMD);
return (pmd_t *)set_fixmap_offset(FIX_PMD, pa);
}
static pmd_t *get_pmd_virt_late(phys_addr_t pa)
{
return (pmd_t *) __va(pa);
}
static phys_addr_t __init alloc_pmd_early(uintptr_t va)
{
uintptr_t pmd_num;
pmd_num = (va - PAGE_OFFSET) >> PGDIR_SHIFT;
BUG_ON(pmd_num >= NUM_EARLY_PMDS);
return (uintptr_t)&early_pmd[pmd_num * PTRS_PER_PMD];
}
static phys_addr_t __init alloc_pmd_fixmap(uintptr_t va)
{
return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
}
static phys_addr_t alloc_pmd_late(uintptr_t va)
{
unsigned long vaddr;
vaddr = __get_free_page(GFP_KERNEL);
BUG_ON(!vaddr);
return __pa(vaddr);
}
static void __init create_pmd_mapping(pmd_t *pmdp,
uintptr_t va, phys_addr_t pa,
phys_addr_t sz, pgprot_t prot)
{
pte_t *ptep;
phys_addr_t pte_phys;
uintptr_t pmd_idx = pmd_index(va);
if (sz == PMD_SIZE) {
if (pmd_none(pmdp[pmd_idx]))
pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pa), prot);
return;
}
if (pmd_none(pmdp[pmd_idx])) {
pte_phys = pt_ops.alloc_pte(va);
pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pte_phys), PAGE_TABLE);
ptep = pt_ops.get_pte_virt(pte_phys);
memset(ptep, 0, PAGE_SIZE);
} else {
pte_phys = PFN_PHYS(_pmd_pfn(pmdp[pmd_idx]));
ptep = pt_ops.get_pte_virt(pte_phys);
}
create_pte_mapping(ptep, va, pa, sz, prot);
}
#define pgd_next_t pmd_t
#define alloc_pgd_next(__va) pt_ops.alloc_pmd(__va)
#define get_pgd_next_virt(__pa) pt_ops.get_pmd_virt(__pa)
#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \
create_pmd_mapping(__nextp, __va, __pa, __sz, __prot)
#define fixmap_pgd_next fixmap_pmd
#else
#define pgd_next_t pte_t
#define alloc_pgd_next(__va) pt_ops.alloc_pte(__va)
#define get_pgd_next_virt(__pa) pt_ops.get_pte_virt(__pa)
#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \
create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
#define fixmap_pgd_next fixmap_pte
#endif
void __init create_pgd_mapping(pgd_t *pgdp,
uintptr_t va, phys_addr_t pa,
phys_addr_t sz, pgprot_t prot)
{
pgd_next_t *nextp;
phys_addr_t next_phys;
uintptr_t pgd_idx = pgd_index(va);
if (sz == PGDIR_SIZE) {
if (pgd_val(pgdp[pgd_idx]) == 0)
pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(pa), prot);
return;
}
if (pgd_val(pgdp[pgd_idx]) == 0) {
next_phys = alloc_pgd_next(va);
pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(next_phys), PAGE_TABLE);
nextp = get_pgd_next_virt(next_phys);
memset(nextp, 0, PAGE_SIZE);
} else {
next_phys = PFN_PHYS(_pgd_pfn(pgdp[pgd_idx]));
nextp = get_pgd_next_virt(next_phys);
}
create_pgd_next_mapping(nextp, va, pa, sz, prot);
}
static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
{
/* Upgrade to PMD_SIZE mappings whenever possible */
if ((base & (PMD_SIZE - 1)) || (size & (PMD_SIZE - 1)))
return PAGE_SIZE;
return PMD_SIZE;
}
/*
* setup_vm() is called from head.S with MMU-off.
*
* Following requirements should be honoured for setup_vm() to work
* correctly:
* 1) It should use PC-relative addressing for accessing kernel symbols.
* To achieve this we always use GCC cmodel=medany.
* 2) The compiler instrumentation for FTRACE will not work for setup_vm()
* so disable compiler instrumentation when FTRACE is enabled.
*
* Currently, the above requirements are honoured by using custom CFLAGS
* for init.o in mm/Makefile.
*/
#ifndef __riscv_cmodel_medany
#error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
#endif
asmlinkage void __init setup_vm(uintptr_t dtb_pa)
{
uintptr_t va, pa, end_va;
uintptr_t load_pa = (uintptr_t)(&_start);
uintptr_t load_sz = (uintptr_t)(&_end) - load_pa;
uintptr_t map_size = best_map_size(load_pa, MAX_EARLY_MAPPING_SIZE);
#ifndef __PAGETABLE_PMD_FOLDED
pmd_t fix_bmap_spmd, fix_bmap_epmd;
#endif
va_pa_offset = PAGE_OFFSET - load_pa;
pfn_base = PFN_DOWN(load_pa);
/*
* Enforce boot alignment requirements of RV32 and
* RV64 by only allowing PMD or PGD mappings.
*/
BUG_ON(map_size == PAGE_SIZE);
/* Sanity check alignment and size */
BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
BUG_ON((load_pa % map_size) != 0);
BUG_ON(load_sz > MAX_EARLY_MAPPING_SIZE);
pt_ops.alloc_pte = alloc_pte_early;
pt_ops.get_pte_virt = get_pte_virt_early;
#ifndef __PAGETABLE_PMD_FOLDED
pt_ops.alloc_pmd = alloc_pmd_early;
pt_ops.get_pmd_virt = get_pmd_virt_early;
#endif
/* Setup early PGD for fixmap */
create_pgd_mapping(early_pg_dir, FIXADDR_START,
(uintptr_t)fixmap_pgd_next, PGDIR_SIZE, PAGE_TABLE);
#ifndef __PAGETABLE_PMD_FOLDED
/* Setup fixmap PMD */
create_pmd_mapping(fixmap_pmd, FIXADDR_START,
(uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE);
/* Setup trampoline PGD and PMD */
create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET,
(uintptr_t)trampoline_pmd, PGDIR_SIZE, PAGE_TABLE);
create_pmd_mapping(trampoline_pmd, PAGE_OFFSET,
load_pa, PMD_SIZE, PAGE_KERNEL_EXEC);
#else
/* Setup trampoline PGD */
create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET,
load_pa, PGDIR_SIZE, PAGE_KERNEL_EXEC);
#endif
/*
* Setup early PGD covering entire kernel which will allows
* us to reach paging_init(). We map all memory banks later
* in setup_vm_final() below.
*/
end_va = PAGE_OFFSET + load_sz;
for (va = PAGE_OFFSET; va < end_va; va += map_size)
create_pgd_mapping(early_pg_dir, va,
load_pa + (va - PAGE_OFFSET),
map_size, PAGE_KERNEL_EXEC);
/* Create two consecutive PGD mappings for FDT early scan */
pa = dtb_pa & ~(PGDIR_SIZE - 1);
create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
pa, PGDIR_SIZE, PAGE_KERNEL);
create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA + PGDIR_SIZE,
pa + PGDIR_SIZE, PGDIR_SIZE, PAGE_KERNEL);
dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PGDIR_SIZE - 1));
dtb_early_pa = dtb_pa;
/*
* Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap
* range can not span multiple pmds.
*/
BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
!= (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
#ifndef __PAGETABLE_PMD_FOLDED
/*
* Early ioremap fixmap is already created as it lies within first 2MB
* of fixmap region. We always map PMD_SIZE. Thus, both FIX_BTMAP_END
* FIX_BTMAP_BEGIN should lie in the same pmd. Verify that and warn
* the user if not.
*/
fix_bmap_spmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_BEGIN))];
fix_bmap_epmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_END))];
if (pmd_val(fix_bmap_spmd) != pmd_val(fix_bmap_epmd)) {
WARN_ON(1);
pr_warn("fixmap btmap start [%08lx] != end [%08lx]\n",
pmd_val(fix_bmap_spmd), pmd_val(fix_bmap_epmd));
pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
fix_to_virt(FIX_BTMAP_BEGIN));
pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
fix_to_virt(FIX_BTMAP_END));
pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
}
#endif
}
static void __init setup_vm_final(void)
{
uintptr_t va, map_size;
phys_addr_t pa, start, end;
u64 i;
/**
* MMU is enabled at this point. But page table setup is not complete yet.
* fixmap page table alloc functions should be used at this point
*/
pt_ops.alloc_pte = alloc_pte_fixmap;
pt_ops.get_pte_virt = get_pte_virt_fixmap;
#ifndef __PAGETABLE_PMD_FOLDED
pt_ops.alloc_pmd = alloc_pmd_fixmap;
pt_ops.get_pmd_virt = get_pmd_virt_fixmap;
#endif
/* Setup swapper PGD for fixmap */
create_pgd_mapping(swapper_pg_dir, FIXADDR_START,
__pa_symbol(fixmap_pgd_next),
PGDIR_SIZE, PAGE_TABLE);
/* Map all memory banks */
for_each_mem_range(i, &start, &end) {
if (start >= end)
break;
if (start <= __pa(PAGE_OFFSET) &&
__pa(PAGE_OFFSET) < end)
start = __pa(PAGE_OFFSET);
map_size = best_map_size(start, end - start);
for (pa = start; pa < end; pa += map_size) {
va = (uintptr_t)__va(pa);
create_pgd_mapping(swapper_pg_dir, va, pa,
map_size, PAGE_KERNEL_EXEC);
}
}
/* Clear fixmap PTE and PMD mappings */
clear_fixmap(FIX_PTE);
clear_fixmap(FIX_PMD);
/* Move to swapper page table */
csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | SATP_MODE);
local_flush_tlb_all();
/* generic page allocation functions must be used to setup page table */
pt_ops.alloc_pte = alloc_pte_late;
pt_ops.get_pte_virt = get_pte_virt_late;
#ifndef __PAGETABLE_PMD_FOLDED
pt_ops.alloc_pmd = alloc_pmd_late;
pt_ops.get_pmd_virt = get_pmd_virt_late;
#endif
}
#else
asmlinkage void __init setup_vm(uintptr_t dtb_pa)
{
#ifdef CONFIG_BUILTIN_DTB
dtb_early_va = soc_lookup_builtin_dtb();
if (!dtb_early_va) {
/* Fallback to first available DTS */
dtb_early_va = (void *) __dtb_start;
}
#else
dtb_early_va = (void *)dtb_pa;
#endif
dtb_early_pa = dtb_pa;
}
static inline void setup_vm_final(void)
{
}
#endif /* CONFIG_MMU */
#ifdef CONFIG_STRICT_KERNEL_RWX
void mark_rodata_ro(void)
{
unsigned long text_start = (unsigned long)_text;
unsigned long text_end = (unsigned long)_etext;
unsigned long rodata_start = (unsigned long)__start_rodata;
unsigned long data_start = (unsigned long)_data;
unsigned long max_low = (unsigned long)(__va(PFN_PHYS(max_low_pfn)));
set_memory_ro(text_start, (text_end - text_start) >> PAGE_SHIFT);
set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
set_memory_nx(data_start, (max_low - data_start) >> PAGE_SHIFT);
debug_checkwx();
}
#endif
static void __init resource_init(void)
{
struct memblock_region *region;
for_each_mem_region(region) {
struct resource *res;
res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
if (!res)
panic("%s: Failed to allocate %zu bytes\n", __func__,
sizeof(struct resource));
if (memblock_is_nomap(region)) {
res->name = "reserved";
res->flags = IORESOURCE_MEM;
} else {
res->name = "System RAM";
res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
}
res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
request_resource(&iomem_resource, res);
}
}
void __init paging_init(void)
{
setup_vm_final();
sparse_init();
setup_zero_page();
zone_sizes_init();
resource_init();
}
#ifdef CONFIG_SPARSEMEM_VMEMMAP
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
struct vmem_altmap *altmap)
{
return vmemmap_populate_basepages(start, end, node, NULL);
}
#endif