linux/arch/xtensa/mm/init.c
Anshuman Khandual 2de9eae10d xtensa/mm: enable ARCH_HAS_VM_GET_PAGE_PROT
This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports
standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT,
which looks up a private and static protection_map[] array.  Subsequently
all __SXXX and __PXXX macros can be dropped which are no longer needed.

Link: https://lkml.kernel.org/r/20220711070600.2378316-12-anshuman.khandual@arm.com
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: Guo Ren <guoren@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Brian Cain <bcain@quicinc.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Dinh Nguyen <dinguyen@kernel.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
Cc: Jeff Dike <jdike@addtoit.com>
Cc: Jonas Bonn <jonas@southpole.se>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Rich Felker <dalias@libc.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Sam Ravnborg <sam@ravnborg.org>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vineet Gupta <vgupta@kernel.org>
Cc: WANG Xuerui <kernel@xen0n.name>
Cc: Will Deacon <will@kernel.org>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2022-07-17 17:14:39 -07:00

241 lines
5.9 KiB
C

/*
* arch/xtensa/mm/init.c
*
* Derived from MIPS, PPC.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
* Copyright (C) 2014 - 2016 Cadence Design Systems Inc.
*
* Chris Zankel <chris@zankel.net>
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
* Marc Gauthier
* Kevin Chea
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/memblock.h>
#include <linux/gfp.h>
#include <linux/highmem.h>
#include <linux/swap.h>
#include <linux/mman.h>
#include <linux/nodemask.h>
#include <linux/mm.h>
#include <linux/of_fdt.h>
#include <linux/dma-map-ops.h>
#include <asm/bootparam.h>
#include <asm/page.h>
#include <asm/sections.h>
#include <asm/sysmem.h>
/*
* Initialize the bootmem system and give it all low memory we have available.
*/
void __init bootmem_init(void)
{
/* Reserve all memory below PHYS_OFFSET, as memory
* accounting doesn't work for pages below that address.
*
* If PHYS_OFFSET is zero reserve page at address 0:
* successfull allocations should never return NULL.
*/
memblock_reserve(0, PHYS_OFFSET ? PHYS_OFFSET : 1);
early_init_fdt_scan_reserved_mem();
if (!memblock_phys_mem_size())
panic("No memory found!\n");
min_low_pfn = PFN_UP(memblock_start_of_DRAM());
min_low_pfn = max(min_low_pfn, PFN_UP(PHYS_OFFSET));
max_pfn = PFN_DOWN(memblock_end_of_DRAM());
max_low_pfn = min(max_pfn, MAX_LOW_PFN);
early_memtest((phys_addr_t)min_low_pfn << PAGE_SHIFT,
(phys_addr_t)max_low_pfn << PAGE_SHIFT);
memblock_set_current_limit(PFN_PHYS(max_low_pfn));
dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
memblock_dump_all();
}
void __init zones_init(void)
{
/* All pages are DMA-able, so we put them all in the DMA zone. */
unsigned long max_zone_pfn[MAX_NR_ZONES] = {
[ZONE_NORMAL] = max_low_pfn,
#ifdef CONFIG_HIGHMEM
[ZONE_HIGHMEM] = max_pfn,
#endif
};
free_area_init(max_zone_pfn);
}
static void __init free_highpages(void)
{
#ifdef CONFIG_HIGHMEM
unsigned long max_low = max_low_pfn;
phys_addr_t range_start, range_end;
u64 i;
/* set highmem page free */
for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE,
&range_start, &range_end, NULL) {
unsigned long start = PFN_UP(range_start);
unsigned long end = PFN_DOWN(range_end);
/* Ignore complete lowmem entries */
if (end <= max_low)
continue;
/* Truncate partial highmem entries */
if (start < max_low)
start = max_low;
for (; start < end; start++)
free_highmem_page(pfn_to_page(start));
}
#endif
}
/*
* Initialize memory pages.
*/
void __init mem_init(void)
{
free_highpages();
max_mapnr = max_pfn - ARCH_PFN_OFFSET;
high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT);
memblock_free_all();
pr_info("virtual kernel memory layout:\n"
#ifdef CONFIG_KASAN
" kasan : 0x%08lx - 0x%08lx (%5lu MB)\n"
#endif
#ifdef CONFIG_MMU
" vmalloc : 0x%08lx - 0x%08lx (%5lu MB)\n"
#endif
#ifdef CONFIG_HIGHMEM
" pkmap : 0x%08lx - 0x%08lx (%5lu kB)\n"
" fixmap : 0x%08lx - 0x%08lx (%5lu kB)\n"
#endif
" lowmem : 0x%08lx - 0x%08lx (%5lu MB)\n"
" .text : 0x%08lx - 0x%08lx (%5lu kB)\n"
" .rodata : 0x%08lx - 0x%08lx (%5lu kB)\n"
" .data : 0x%08lx - 0x%08lx (%5lu kB)\n"
" .init : 0x%08lx - 0x%08lx (%5lu kB)\n"
" .bss : 0x%08lx - 0x%08lx (%5lu kB)\n",
#ifdef CONFIG_KASAN
KASAN_SHADOW_START, KASAN_SHADOW_START + KASAN_SHADOW_SIZE,
KASAN_SHADOW_SIZE >> 20,
#endif
#ifdef CONFIG_MMU
VMALLOC_START, VMALLOC_END,
(VMALLOC_END - VMALLOC_START) >> 20,
#ifdef CONFIG_HIGHMEM
PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE,
(LAST_PKMAP*PAGE_SIZE) >> 10,
FIXADDR_START, FIXADDR_END,
(FIXADDR_END - FIXADDR_START) >> 10,
#endif
PAGE_OFFSET, PAGE_OFFSET +
(max_low_pfn - min_low_pfn) * PAGE_SIZE,
#else
min_low_pfn * PAGE_SIZE, max_low_pfn * PAGE_SIZE,
#endif
((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20,
(unsigned long)_text, (unsigned long)_etext,
(unsigned long)(_etext - _text) >> 10,
(unsigned long)__start_rodata, (unsigned long)__end_rodata,
(unsigned long)(__end_rodata - __start_rodata) >> 10,
(unsigned long)_sdata, (unsigned long)_edata,
(unsigned long)(_edata - _sdata) >> 10,
(unsigned long)__init_begin, (unsigned long)__init_end,
(unsigned long)(__init_end - __init_begin) >> 10,
(unsigned long)__bss_start, (unsigned long)__bss_stop,
(unsigned long)(__bss_stop - __bss_start) >> 10);
}
static void __init parse_memmap_one(char *p)
{
char *oldp;
unsigned long start_at, mem_size;
if (!p)
return;
oldp = p;
mem_size = memparse(p, &p);
if (p == oldp)
return;
switch (*p) {
case '@':
start_at = memparse(p + 1, &p);
memblock_add(start_at, mem_size);
break;
case '$':
start_at = memparse(p + 1, &p);
memblock_reserve(start_at, mem_size);
break;
case 0:
memblock_reserve(mem_size, -mem_size);
break;
default:
pr_warn("Unrecognized memmap syntax: %s\n", p);
break;
}
}
static int __init parse_memmap_opt(char *str)
{
while (str) {
char *k = strchr(str, ',');
if (k)
*k++ = 0;
parse_memmap_one(str);
str = k;
}
return 0;
}
early_param("memmap", parse_memmap_opt);
#ifdef CONFIG_MMU
static const pgprot_t protection_map[16] = {
[VM_NONE] = PAGE_NONE,
[VM_READ] = PAGE_READONLY,
[VM_WRITE] = PAGE_COPY,
[VM_WRITE | VM_READ] = PAGE_COPY,
[VM_EXEC] = PAGE_READONLY_EXEC,
[VM_EXEC | VM_READ] = PAGE_READONLY_EXEC,
[VM_EXEC | VM_WRITE] = PAGE_COPY_EXEC,
[VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_EXEC,
[VM_SHARED] = PAGE_NONE,
[VM_SHARED | VM_READ] = PAGE_READONLY,
[VM_SHARED | VM_WRITE] = PAGE_SHARED,
[VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
[VM_SHARED | VM_EXEC] = PAGE_READONLY_EXEC,
[VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_EXEC,
[VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_EXEC,
[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_EXEC
};
DECLARE_VM_GET_PAGE_PROT
#endif