2019-06-03 13:44:50 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2012-03-05 19:49:27 +08:00
|
|
|
/*
|
|
|
|
* Based on arch/arm/mm/init.c
|
|
|
|
*
|
|
|
|
* Copyright (C) 1995-2005 Russell King
|
|
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/export.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/swap.h>
|
|
|
|
#include <linux/init.h>
|
2016-08-15 14:45:46 +08:00
|
|
|
#include <linux/cache.h>
|
2012-03-05 19:49:27 +08:00
|
|
|
#include <linux/mman.h>
|
|
|
|
#include <linux/nodemask.h>
|
|
|
|
#include <linux/initrd.h>
|
|
|
|
#include <linux/gfp.h>
|
|
|
|
#include <linux/memblock.h>
|
|
|
|
#include <linux/sort.h>
|
2017-04-03 10:24:32 +08:00
|
|
|
#include <linux/of.h>
|
2012-03-05 19:49:27 +08:00
|
|
|
#include <linux/of_fdt.h>
|
2019-10-15 02:31:03 +08:00
|
|
|
#include <linux/dma-direct.h>
|
2020-09-11 16:56:52 +08:00
|
|
|
#include <linux/dma-map-ops.h>
|
2014-07-29 02:03:03 +08:00
|
|
|
#include <linux/efi.h>
|
2015-02-06 02:01:53 +08:00
|
|
|
#include <linux/swiotlb.h>
|
2016-09-05 19:30:22 +08:00
|
|
|
#include <linux/vmalloc.h>
|
2017-01-11 05:35:49 +08:00
|
|
|
#include <linux/mm.h>
|
2017-04-03 10:24:32 +08:00
|
|
|
#include <linux/kexec.h>
|
arm64: kdump: provide /proc/vmcore file
Arch-specific functions are added to allow for implementing a crash dump
file interface, /proc/vmcore, which can be viewed as a ELF file.
A user space tool, like kexec-tools, is responsible for allocating
a separate region for the core's ELF header within crash kdump kernel
memory and filling it in when executing kexec_load().
Then, its location will be advertised to crash dump kernel via a new
device-tree property, "linux,elfcorehdr", and crash dump kernel preserves
the region for later use with reserve_elfcorehdr() at boot time.
On crash dump kernel, /proc/vmcore will access the primary kernel's memory
with copy_oldmem_page(), which feeds the data page-by-page by ioremap'ing
it since it does not reside in linear mapping on crash dump kernel.
Meanwhile, elfcorehdr_read() is simple as the region is always mapped.
Signed-off-by: AKASHI Takahiro <takahiro.akashi@linaro.org>
Reviewed-by: James Morse <james.morse@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2017-04-03 10:24:38 +08:00
|
|
|
#include <linux/crash_dump.h>
|
mm: hugetlb: optionally allocate gigantic hugepages using cma
Commit 944d9fec8d7a ("hugetlb: add support for gigantic page allocation
at runtime") has added the run-time allocation of gigantic pages.
However it actually works only at early stages of the system loading,
when the majority of memory is free. After some time the memory gets
fragmented by non-movable pages, so the chances to find a contiguous 1GB
block are getting close to zero. Even dropping caches manually doesn't
help a lot.
At large scale rebooting servers in order to allocate gigantic hugepages
is quite expensive and complex. At the same time keeping some constant
percentage of memory in reserved hugepages even if the workload isn't
using it is a big waste: not all workloads can benefit from using 1 GB
pages.
The following solution can solve the problem:
1) On boot time a dedicated cma area* is reserved. The size is passed
as a kernel argument.
2) Run-time allocations of gigantic hugepages are performed using the
cma allocator and the dedicated cma area
In this case gigantic hugepages can be allocated successfully with a
high probability, however the memory isn't completely wasted if nobody
is using 1GB hugepages: it can be used for pagecache, anon memory, THPs,
etc.
* On a multi-node machine a per-node cma area is allocated on each node.
Following gigantic hugetlb allocation are using the first available
numa node if the mask isn't specified by a user.
Usage:
1) configure the kernel to allocate a cma area for hugetlb allocations:
pass hugetlb_cma=10G as a kernel argument
2) allocate hugetlb pages as usual, e.g.
echo 10 > /sys/kernel/mm/hugepages/hugepages-1048576kB/nr_hugepages
If the option isn't enabled or the allocation of the cma area failed,
the current behavior of the system is preserved.
x86 and arm-64 are covered by this patch, other architectures can be
trivially added later.
The patch contains clean-ups and fixes proposed and implemented by Aslan
Bakirov and Randy Dunlap. It also contains ideas and suggestions
proposed by Rik van Riel, Michal Hocko and Mike Kravetz. Thanks!
Signed-off-by: Roman Gushchin <guro@fb.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Tested-by: Andreas Schaufler <andreas.schaufler@gmx.de>
Acked-by: Mike Kravetz <mike.kravetz@oracle.com>
Acked-by: Michal Hocko <mhocko@kernel.org>
Cc: Aslan Bakirov <aslan@fb.com>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Rik van Riel <riel@surriel.com>
Cc: Joonsoo Kim <js1304@gmail.com>
Link: http://lkml.kernel.org/r/20200407163840.92263-3-guro@fb.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-04-11 05:32:45 +08:00
|
|
|
#include <linux/hugetlb.h>
|
2012-03-05 19:49:27 +08:00
|
|
|
|
2016-02-16 20:52:42 +08:00
|
|
|
#include <asm/boot.h>
|
2014-07-17 00:42:43 +08:00
|
|
|
#include <asm/fixmap.h>
|
2016-02-16 20:52:40 +08:00
|
|
|
#include <asm/kasan.h>
|
2016-02-16 20:52:42 +08:00
|
|
|
#include <asm/kernel-pgtable.h>
|
arm64: Fix overlapping VA allocations
PCI IO space was intended to be 16MiB, at 32MiB below MODULES_VADDR, but
commit d1e6dc91b532d3d3 ("arm64: Add architectural support for PCI")
extended this to cover the full 32MiB. The final 8KiB of this 32MiB is
also allocated for the fixmap, allowing for potential clashes between
the two.
This change was masked by assumptions in mem_init and the page table
dumping code, which assumed the I/O space to be 16MiB long through
seaparte hard-coded definitions.
This patch changes the definition of the PCI I/O space allocation to
live in asm/memory.h, along with the other VA space allocations. As the
fixmap allocation depends on the number of fixmap entries, this is moved
below the PCI I/O space allocation. Both the fixmap and PCI I/O space
are guarded with 2MB of padding. Sites assuming the I/O space was 16MiB
are moved over use new PCI_IO_{START,END} definitions, which will keep
in sync with the size of the IO space (now restored to 16MiB).
As a useful side effect, the use of the new PCI_IO_{START,END}
definitions prevents a build issue in the dumping code due to a (now
redundant) missing include of io.h for PCI_IOBASE.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Laura Abbott <lauraa@codeaurora.org>
Cc: Liviu Dudau <liviu.dudau@arm.com>
Cc: Steve Capper <steve.capper@linaro.org>
Cc: Will Deacon <will.deacon@arm.com>
[catalin.marinas@arm.com: reorder FIXADDR and PCI_IO address_markers_idx enum]
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2015-01-23 02:20:35 +08:00
|
|
|
#include <asm/memory.h>
|
2016-04-09 06:50:27 +08:00
|
|
|
#include <asm/numa.h>
|
2012-03-05 19:49:27 +08:00
|
|
|
#include <asm/sections.h>
|
|
|
|
#include <asm/setup.h>
|
2019-05-15 06:46:51 +08:00
|
|
|
#include <linux/sizes.h>
|
2012-03-05 19:49:27 +08:00
|
|
|
#include <asm/tlb.h>
|
2014-11-14 23:54:08 +08:00
|
|
|
#include <asm/alternative.h>
|
2012-03-05 19:49:27 +08:00
|
|
|
|
2019-10-15 02:31:03 +08:00
|
|
|
#define ARM64_ZONE_DMA_BITS 30
|
|
|
|
|
2016-02-16 20:52:42 +08:00
|
|
|
/*
|
|
|
|
* We need to be able to catch inadvertent references to memstart_addr
|
|
|
|
* that occur (potentially in generic code) before arm64_memblock_init()
|
|
|
|
* executes, which assigns it its actual value. So use a default value
|
|
|
|
* that cannot be mistaken for a real physical address.
|
|
|
|
*/
|
2016-08-15 14:45:46 +08:00
|
|
|
s64 memstart_addr __ro_after_init = -1;
|
2018-12-08 02:08:15 +08:00
|
|
|
EXPORT_SYMBOL(memstart_addr);
|
|
|
|
|
2019-09-12 02:25:45 +08:00
|
|
|
/*
|
|
|
|
* We create both ZONE_DMA and ZONE_DMA32. ZONE_DMA covers the first 1G of
|
|
|
|
* memory as some devices, namely the Raspberry Pi 4, have peripherals with
|
|
|
|
* this limited view of the memory. ZONE_DMA32 will cover the rest of the 32
|
|
|
|
* bit addressable memory area.
|
|
|
|
*/
|
2016-08-15 14:45:46 +08:00
|
|
|
phys_addr_t arm64_dma_phys_limit __ro_after_init;
|
2019-10-29 00:45:07 +08:00
|
|
|
static phys_addr_t arm64_dma32_phys_limit __ro_after_init;
|
2012-03-05 19:49:27 +08:00
|
|
|
|
2017-04-03 10:24:32 +08:00
|
|
|
#ifdef CONFIG_KEXEC_CORE
|
|
|
|
/*
|
|
|
|
* reserve_crashkernel() - reserves memory for crash kernel
|
|
|
|
*
|
|
|
|
* This function reserves memory area given in "crashkernel=" kernel command
|
|
|
|
* line parameter. The memory reserved is used by dump capture kernel when
|
|
|
|
* primary kernel is crashing.
|
|
|
|
*/
|
|
|
|
static void __init reserve_crashkernel(void)
|
|
|
|
{
|
|
|
|
unsigned long long crash_base, crash_size;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
|
|
|
|
&crash_size, &crash_base);
|
|
|
|
/* no crashkernel= or invalid value specified */
|
|
|
|
if (ret || !crash_size)
|
|
|
|
return;
|
|
|
|
|
|
|
|
crash_size = PAGE_ALIGN(crash_size);
|
|
|
|
|
|
|
|
if (crash_base == 0) {
|
|
|
|
/* Current arm64 boot protocol requires 2MB alignment */
|
2019-11-07 17:56:11 +08:00
|
|
|
crash_base = memblock_find_in_range(0, arm64_dma32_phys_limit,
|
2017-04-03 10:24:32 +08:00
|
|
|
crash_size, SZ_2M);
|
|
|
|
if (crash_base == 0) {
|
|
|
|
pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
|
|
|
|
crash_size);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* User specifies base address explicitly. */
|
|
|
|
if (!memblock_is_region_memory(crash_base, crash_size)) {
|
|
|
|
pr_warn("cannot reserve crashkernel: region is not memory\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (memblock_is_region_reserved(crash_base, crash_size)) {
|
|
|
|
pr_warn("cannot reserve crashkernel: region overlaps reserved memory\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!IS_ALIGNED(crash_base, SZ_2M)) {
|
|
|
|
pr_warn("cannot reserve crashkernel: base address is not 2MB aligned\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
memblock_reserve(crash_base, crash_size);
|
|
|
|
|
|
|
|
pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
|
|
|
|
crash_base, crash_base + crash_size, crash_size >> 20);
|
|
|
|
|
|
|
|
crashk_res.start = crash_base;
|
|
|
|
crashk_res.end = crash_base + crash_size - 1;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static void __init reserve_crashkernel(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_KEXEC_CORE */
|
|
|
|
|
arm64: kdump: provide /proc/vmcore file
Arch-specific functions are added to allow for implementing a crash dump
file interface, /proc/vmcore, which can be viewed as a ELF file.
A user space tool, like kexec-tools, is responsible for allocating
a separate region for the core's ELF header within crash kdump kernel
memory and filling it in when executing kexec_load().
Then, its location will be advertised to crash dump kernel via a new
device-tree property, "linux,elfcorehdr", and crash dump kernel preserves
the region for later use with reserve_elfcorehdr() at boot time.
On crash dump kernel, /proc/vmcore will access the primary kernel's memory
with copy_oldmem_page(), which feeds the data page-by-page by ioremap'ing
it since it does not reside in linear mapping on crash dump kernel.
Meanwhile, elfcorehdr_read() is simple as the region is always mapped.
Signed-off-by: AKASHI Takahiro <takahiro.akashi@linaro.org>
Reviewed-by: James Morse <james.morse@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2017-04-03 10:24:38 +08:00
|
|
|
#ifdef CONFIG_CRASH_DUMP
|
|
|
|
static int __init early_init_dt_scan_elfcorehdr(unsigned long node,
|
|
|
|
const char *uname, int depth, void *data)
|
|
|
|
{
|
|
|
|
const __be32 *reg;
|
|
|
|
int len;
|
|
|
|
|
|
|
|
if (depth != 1 || strcmp(uname, "chosen") != 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
reg = of_get_flat_dt_prop(node, "linux,elfcorehdr", &len);
|
|
|
|
if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells)))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
elfcorehdr_addr = dt_mem_next_cell(dt_root_addr_cells, ®);
|
|
|
|
elfcorehdr_size = dt_mem_next_cell(dt_root_size_cells, ®);
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* reserve_elfcorehdr() - reserves memory for elf core header
|
|
|
|
*
|
|
|
|
* This function reserves the memory occupied by an elf core header
|
|
|
|
* described in the device tree. This region contains all the
|
|
|
|
* information about primary kernel's core image and is used by a dump
|
|
|
|
* capture kernel to access the system memory on primary kernel.
|
|
|
|
*/
|
|
|
|
static void __init reserve_elfcorehdr(void)
|
|
|
|
{
|
|
|
|
of_scan_flat_dt(early_init_dt_scan_elfcorehdr, NULL);
|
|
|
|
|
|
|
|
if (!elfcorehdr_size)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) {
|
|
|
|
pr_warn("elfcorehdr is overlapped\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
|
|
|
|
|
|
|
|
pr_info("Reserving %lldKB of memory at 0x%llx for elfcorehdr\n",
|
|
|
|
elfcorehdr_size >> 10, elfcorehdr_addr);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static void __init reserve_elfcorehdr(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_CRASH_DUMP */
|
2019-09-12 02:25:45 +08:00
|
|
|
|
2014-07-18 18:54:37 +08:00
|
|
|
/*
|
2020-11-19 02:58:09 +08:00
|
|
|
* Return the maximum physical address for a zone accessible by the given bits
|
|
|
|
* limit. If DRAM starts above 32-bit, expand the zone to the maximum
|
|
|
|
* available memory, otherwise cap it at 32-bit.
|
2014-07-18 18:54:37 +08:00
|
|
|
*/
|
2019-09-12 02:25:45 +08:00
|
|
|
static phys_addr_t __init max_zone_phys(unsigned int zone_bits)
|
2014-07-18 18:54:37 +08:00
|
|
|
{
|
2020-11-19 02:58:09 +08:00
|
|
|
phys_addr_t zone_mask = DMA_BIT_MASK(zone_bits);
|
|
|
|
phys_addr_t phys_start = memblock_start_of_DRAM();
|
|
|
|
|
|
|
|
if (phys_start > U32_MAX)
|
|
|
|
zone_mask = PHYS_ADDR_MAX;
|
|
|
|
else if (phys_start > zone_mask)
|
|
|
|
zone_mask = U32_MAX;
|
|
|
|
|
|
|
|
return min(zone_mask, memblock_end_of_DRAM() - 1) + 1;
|
2014-07-18 18:54:37 +08:00
|
|
|
}
|
|
|
|
|
2016-04-09 06:50:27 +08:00
|
|
|
static void __init zone_sizes_init(unsigned long min, unsigned long max)
|
|
|
|
{
|
|
|
|
unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
|
|
|
|
|
2019-09-12 02:25:45 +08:00
|
|
|
#ifdef CONFIG_ZONE_DMA
|
|
|
|
max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
|
|
|
|
#endif
|
2019-05-29 00:08:20 +08:00
|
|
|
#ifdef CONFIG_ZONE_DMA32
|
2019-09-12 02:25:44 +08:00
|
|
|
max_zone_pfns[ZONE_DMA32] = PFN_DOWN(arm64_dma32_phys_limit);
|
2019-05-29 00:08:20 +08:00
|
|
|
#endif
|
2016-04-09 06:50:27 +08:00
|
|
|
max_zone_pfns[ZONE_NORMAL] = max;
|
|
|
|
|
2020-06-04 06:57:10 +08:00
|
|
|
free_area_init(max_zone_pfns);
|
2016-04-09 06:50:27 +08:00
|
|
|
}
|
|
|
|
|
2012-03-05 19:49:27 +08:00
|
|
|
int pfn_valid(unsigned long pfn)
|
|
|
|
{
|
arm64: mm: check for upper PAGE_SHIFT bits in pfn_valid()
ARM64's pfn_valid() shifts away the upper PAGE_SHIFT bits of the input
before seeing if the PFN is valid. This leads to false positives when
some of the upper bits are set, but the lower bits match a valid PFN.
For example, the following userspace code looks up a bogus entry in
/proc/kpageflags:
int pagemap = open("/proc/self/pagemap", O_RDONLY);
int pageflags = open("/proc/kpageflags", O_RDONLY);
uint64_t pfn, val;
lseek64(pagemap, [...], SEEK_SET);
read(pagemap, &pfn, sizeof(pfn));
if (pfn & (1UL << 63)) { /* valid PFN */
pfn &= ((1UL << 55) - 1); /* clear flag bits */
pfn |= (1UL << 55);
lseek64(pageflags, pfn * sizeof(uint64_t), SEEK_SET);
read(pageflags, &val, sizeof(val));
}
On ARM64 this causes the userspace process to crash with SIGSEGV rather
than reading (1 << KPF_NOPAGE). kpageflags_read() treats the offset as
valid, and stable_page_flags() will try to access an address between the
user and kernel address ranges.
Fixes: c1cc1552616d ("arm64: MMU initialisation")
Cc: stable@vger.kernel.org
Signed-off-by: Greg Hackmann <ghackmann@google.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2018-08-16 03:51:21 +08:00
|
|
|
phys_addr_t addr = pfn << PAGE_SHIFT;
|
|
|
|
|
|
|
|
if ((addr >> PAGE_SHIFT) != pfn)
|
|
|
|
return 0;
|
2018-12-12 02:48:48 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_SPARSEMEM
|
|
|
|
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
|
|
|
|
return 0;
|
|
|
|
|
2020-05-01 00:18:58 +08:00
|
|
|
if (!valid_section(__pfn_to_section(pfn)))
|
2018-12-12 02:48:48 +08:00
|
|
|
return 0;
|
|
|
|
#endif
|
arm64: mm: check for upper PAGE_SHIFT bits in pfn_valid()
ARM64's pfn_valid() shifts away the upper PAGE_SHIFT bits of the input
before seeing if the PFN is valid. This leads to false positives when
some of the upper bits are set, but the lower bits match a valid PFN.
For example, the following userspace code looks up a bogus entry in
/proc/kpageflags:
int pagemap = open("/proc/self/pagemap", O_RDONLY);
int pageflags = open("/proc/kpageflags", O_RDONLY);
uint64_t pfn, val;
lseek64(pagemap, [...], SEEK_SET);
read(pagemap, &pfn, sizeof(pfn));
if (pfn & (1UL << 63)) { /* valid PFN */
pfn &= ((1UL << 55) - 1); /* clear flag bits */
pfn |= (1UL << 55);
lseek64(pageflags, pfn * sizeof(uint64_t), SEEK_SET);
read(pageflags, &val, sizeof(val));
}
On ARM64 this causes the userspace process to crash with SIGSEGV rather
than reading (1 << KPF_NOPAGE). kpageflags_read() treats the offset as
valid, and stable_page_flags() will try to access an address between the
user and kernel address ranges.
Fixes: c1cc1552616d ("arm64: MMU initialisation")
Cc: stable@vger.kernel.org
Signed-off-by: Greg Hackmann <ghackmann@google.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2018-08-16 03:51:21 +08:00
|
|
|
return memblock_is_map_memory(addr);
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(pfn_valid);
|
|
|
|
|
2018-06-15 06:28:02 +08:00
|
|
|
static phys_addr_t memory_limit = PHYS_ADDR_MAX;
|
2015-01-16 00:42:14 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Limit the memory size that was specified via FDT.
|
|
|
|
*/
|
|
|
|
static int __init early_mem(char *p)
|
|
|
|
{
|
|
|
|
if (!p)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
memory_limit = memparse(p, &p) & PAGE_MASK;
|
|
|
|
pr_notice("Memory limited to %lldMB\n", memory_limit >> 20);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
early_param("mem", early_mem);
|
|
|
|
|
2017-04-03 10:24:31 +08:00
|
|
|
static int __init early_init_dt_scan_usablemem(unsigned long node,
|
|
|
|
const char *uname, int depth, void *data)
|
|
|
|
{
|
|
|
|
struct memblock_region *usablemem = data;
|
|
|
|
const __be32 *reg;
|
|
|
|
int len;
|
|
|
|
|
|
|
|
if (depth != 1 || strcmp(uname, "chosen") != 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
reg = of_get_flat_dt_prop(node, "linux,usable-memory-range", &len);
|
|
|
|
if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells)))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
usablemem->base = dt_mem_next_cell(dt_root_addr_cells, ®);
|
|
|
|
usablemem->size = dt_mem_next_cell(dt_root_size_cells, ®);
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __init fdt_enforce_memory_region(void)
|
|
|
|
{
|
|
|
|
struct memblock_region reg = {
|
|
|
|
.size = 0,
|
|
|
|
};
|
|
|
|
|
|
|
|
of_scan_flat_dt(early_init_dt_scan_usablemem, ®);
|
|
|
|
|
|
|
|
if (reg.size)
|
|
|
|
memblock_cap_memory_range(reg.base, reg.size);
|
|
|
|
}
|
|
|
|
|
2012-03-05 19:49:27 +08:00
|
|
|
void __init arm64_memblock_init(void)
|
|
|
|
{
|
2019-08-07 23:55:18 +08:00
|
|
|
const s64 linear_region_size = BIT(vabits_actual - 1);
|
2016-02-16 20:52:42 +08:00
|
|
|
|
2017-04-03 10:24:31 +08:00
|
|
|
/* Handle linux,usable-memory-range property */
|
|
|
|
fdt_enforce_memory_region();
|
|
|
|
|
2018-01-19 03:13:11 +08:00
|
|
|
/* Remove memory above our supported physical address size */
|
|
|
|
memblock_remove(1ULL << PHYS_MASK_SHIFT, ULLONG_MAX);
|
|
|
|
|
2016-02-16 20:52:42 +08:00
|
|
|
/*
|
|
|
|
* Select a suitable value for the base of physical memory.
|
|
|
|
*/
|
|
|
|
memstart_addr = round_down(memblock_start_of_DRAM(),
|
|
|
|
ARM64_MEMSTART_ALIGN);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove the memory that we will not be able to cover with the
|
|
|
|
* linear mapping. Take care not to clip the kernel which may be
|
|
|
|
* high in memory.
|
|
|
|
*/
|
2017-01-11 05:35:49 +08:00
|
|
|
memblock_remove(max_t(u64, memstart_addr + linear_region_size,
|
|
|
|
__pa_symbol(_end)), ULLONG_MAX);
|
2016-03-30 20:25:46 +08:00
|
|
|
if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) {
|
|
|
|
/* ensure that memstart_addr remains sufficiently aligned */
|
|
|
|
memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size,
|
|
|
|
ARM64_MEMSTART_ALIGN);
|
|
|
|
memblock_remove(0, memstart_addr);
|
|
|
|
}
|
2016-02-16 20:52:42 +08:00
|
|
|
|
arm64: mm: use single quantity to represent the PA to VA translation
On arm64, the global variable memstart_addr represents the physical
address of PAGE_OFFSET, and so physical to virtual translations or
vice versa used to come down to simple additions or subtractions
involving the values of PAGE_OFFSET and memstart_addr.
When support for 52-bit virtual addressing was introduced, we had to
deal with PAGE_OFFSET potentially being outside of the region that
can be covered by the virtual range (as the 52-bit VA capable build
needs to be able to run on systems that are only 48-bit VA capable),
and for this reason, another translation was introduced, and recorded
in the global variable physvirt_offset.
However, if we go back to the original definition of memstart_addr,
i.e., the physical address of PAGE_OFFSET, it turns out that there is
no need for two separate translations: instead, we can simply subtract
the size of the unaddressable VA space from memstart_addr to make the
available physical memory appear in the 48-bit addressable VA region.
This simplifies things, but also fixes a bug on KASLR builds, which
may update memstart_addr later on in arm64_memblock_init(), but fails
to update vmemmap and physvirt_offset accordingly.
Fixes: 5383cc6efed1 ("arm64: mm: Introduce vabits_actual")
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Reviewed-by: Steve Capper <steve.capper@arm.com>
Link: https://lore.kernel.org/r/20201008153602.9467-2-ardb@kernel.org
Signed-off-by: Will Deacon <will@kernel.org>
2020-10-08 23:35:59 +08:00
|
|
|
/*
|
|
|
|
* If we are running with a 52-bit kernel VA config on a system that
|
|
|
|
* does not support it, we have to place the available physical
|
|
|
|
* memory in the 48-bit addressable part of the linear region, i.e.,
|
|
|
|
* we have to move it upward. Since memstart_addr represents the
|
|
|
|
* physical address of PAGE_OFFSET, we have to *subtract* from it.
|
|
|
|
*/
|
|
|
|
if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52))
|
|
|
|
memstart_addr -= _PAGE_OFFSET(48) - _PAGE_OFFSET(52);
|
|
|
|
|
2016-02-16 20:52:42 +08:00
|
|
|
/*
|
|
|
|
* Apply the memory limit if it was set. Since the kernel may be loaded
|
|
|
|
* high up in memory, add back the kernel region that must be accessible
|
|
|
|
* via the linear mapping.
|
|
|
|
*/
|
2018-06-15 06:28:02 +08:00
|
|
|
if (memory_limit != PHYS_ADDR_MAX) {
|
2016-07-29 06:48:29 +08:00
|
|
|
memblock_mem_limit_remove_map(memory_limit);
|
2017-01-11 05:35:49 +08:00
|
|
|
memblock_add(__pa_symbol(_text), (u64)(_end - _text));
|
2016-02-16 20:52:42 +08:00
|
|
|
}
|
2015-01-16 00:42:14 +08:00
|
|
|
|
2018-11-06 06:54:29 +08:00
|
|
|
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) {
|
2016-03-30 21:18:42 +08:00
|
|
|
/*
|
|
|
|
* Add back the memory we just removed if it results in the
|
|
|
|
* initrd to become inaccessible via the linear mapping.
|
|
|
|
* Otherwise, this is a no-op
|
|
|
|
*/
|
2018-11-06 06:54:29 +08:00
|
|
|
u64 base = phys_initrd_start & PAGE_MASK;
|
2019-04-18 12:29:29 +08:00
|
|
|
u64 size = PAGE_ALIGN(phys_initrd_start + phys_initrd_size) - base;
|
2016-03-30 21:18:42 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We can only add back the initrd memory if we don't end up
|
|
|
|
* with more memory than we can address via the linear mapping.
|
|
|
|
* It is up to the bootloader to position the kernel and the
|
|
|
|
* initrd reasonably close to each other (i.e., within 32 GB of
|
|
|
|
* each other) so that all granule/#levels combinations can
|
|
|
|
* always access both.
|
|
|
|
*/
|
|
|
|
if (WARN(base < memblock_start_of_DRAM() ||
|
|
|
|
base + size > memblock_start_of_DRAM() +
|
|
|
|
linear_region_size,
|
|
|
|
"initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) {
|
2019-04-04 00:58:39 +08:00
|
|
|
phys_initrd_size = 0;
|
2016-03-30 21:18:42 +08:00
|
|
|
} else {
|
|
|
|
memblock_remove(base, size); /* clear MEMBLOCK_ flags */
|
|
|
|
memblock_add(base, size);
|
|
|
|
memblock_reserve(base, size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-29 18:59:03 +08:00
|
|
|
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
|
|
|
|
extern u16 memstart_offset_seed;
|
|
|
|
u64 range = linear_region_size -
|
|
|
|
(memblock_end_of_DRAM() - memblock_start_of_DRAM());
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the size of the linear region exceeds, by a sufficient
|
|
|
|
* margin, the size of the region that the available physical
|
|
|
|
* memory spans, randomize the linear region as well.
|
|
|
|
*/
|
|
|
|
if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) {
|
2018-12-24 15:40:07 +08:00
|
|
|
range /= ARM64_MEMSTART_ALIGN;
|
2016-01-29 18:59:03 +08:00
|
|
|
memstart_addr -= ARM64_MEMSTART_ALIGN *
|
|
|
|
((range * memstart_offset_seed) >> 16);
|
|
|
|
}
|
|
|
|
}
|
2015-01-16 00:42:14 +08:00
|
|
|
|
2014-06-24 23:51:35 +08:00
|
|
|
/*
|
|
|
|
* Register the kernel text, kernel data, initrd, and initial
|
|
|
|
* pagetables with memblock.
|
|
|
|
*/
|
2017-01-11 05:35:49 +08:00
|
|
|
memblock_reserve(__pa_symbol(_text), _end - _text);
|
2018-11-06 06:54:29 +08:00
|
|
|
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) {
|
2016-02-16 20:52:41 +08:00
|
|
|
/* the generic initrd code expects virtual addresses */
|
2018-11-06 06:54:29 +08:00
|
|
|
initrd_start = __phys_to_virt(phys_initrd_start);
|
|
|
|
initrd_end = initrd_start + phys_initrd_size;
|
2016-02-16 20:52:41 +08:00
|
|
|
}
|
2012-03-05 19:49:27 +08:00
|
|
|
|
2014-09-09 01:01:08 +08:00
|
|
|
early_init_fdt_scan_reserved_mem();
|
2014-06-13 20:41:20 +08:00
|
|
|
|
2019-10-15 02:31:03 +08:00
|
|
|
if (IS_ENABLED(CONFIG_ZONE_DMA)) {
|
|
|
|
zone_dma_bits = ARM64_ZONE_DMA_BITS;
|
|
|
|
arm64_dma_phys_limit = max_zone_phys(ARM64_ZONE_DMA_BITS);
|
|
|
|
}
|
2019-09-12 02:25:45 +08:00
|
|
|
|
2017-12-24 20:52:03 +08:00
|
|
|
if (IS_ENABLED(CONFIG_ZONE_DMA32))
|
2019-09-12 02:25:45 +08:00
|
|
|
arm64_dma32_phys_limit = max_zone_phys(32);
|
2015-02-06 02:01:53 +08:00
|
|
|
else
|
2019-09-12 02:25:44 +08:00
|
|
|
arm64_dma32_phys_limit = PHYS_MASK + 1;
|
2017-04-03 10:24:32 +08:00
|
|
|
|
arm64: kdump: provide /proc/vmcore file
Arch-specific functions are added to allow for implementing a crash dump
file interface, /proc/vmcore, which can be viewed as a ELF file.
A user space tool, like kexec-tools, is responsible for allocating
a separate region for the core's ELF header within crash kdump kernel
memory and filling it in when executing kexec_load().
Then, its location will be advertised to crash dump kernel via a new
device-tree property, "linux,elfcorehdr", and crash dump kernel preserves
the region for later use with reserve_elfcorehdr() at boot time.
On crash dump kernel, /proc/vmcore will access the primary kernel's memory
with copy_oldmem_page(), which feeds the data page-by-page by ioremap'ing
it since it does not reside in linear mapping on crash dump kernel.
Meanwhile, elfcorehdr_read() is simple as the region is always mapped.
Signed-off-by: AKASHI Takahiro <takahiro.akashi@linaro.org>
Reviewed-by: James Morse <james.morse@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2017-04-03 10:24:38 +08:00
|
|
|
reserve_elfcorehdr();
|
|
|
|
|
2017-12-04 22:13:05 +08:00
|
|
|
high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
|
|
|
|
|
2019-11-07 17:56:11 +08:00
|
|
|
dma_contiguous_reserve(arm64_dma32_phys_limit);
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void __init bootmem_init(void)
|
|
|
|
{
|
|
|
|
unsigned long min, max;
|
|
|
|
|
|
|
|
min = PFN_UP(memblock_start_of_DRAM());
|
|
|
|
max = PFN_DOWN(memblock_end_of_DRAM());
|
|
|
|
|
2015-04-15 06:48:33 +08:00
|
|
|
early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT);
|
|
|
|
|
2016-04-09 06:50:27 +08:00
|
|
|
max_pfn = max_low_pfn = max;
|
2019-03-21 12:21:25 +08:00
|
|
|
min_low_pfn = min;
|
2016-04-09 06:50:27 +08:00
|
|
|
|
|
|
|
arm64_numa_init();
|
2020-06-18 05:58:28 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* must be done after arm64_numa_init() which calls numa_init() to
|
|
|
|
* initialize node_online_map that gets used in hugetlb_cma_reserve()
|
|
|
|
* while allocating required CMA size across online nodes.
|
|
|
|
*/
|
2020-07-01 12:42:01 +08:00
|
|
|
#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
|
|
|
|
arm64_hugetlb_cma_reserve();
|
2020-06-18 05:58:28 +08:00
|
|
|
#endif
|
|
|
|
|
2020-08-24 07:03:08 +08:00
|
|
|
dma_pernuma_cma_reserve();
|
|
|
|
|
2012-03-05 19:49:27 +08:00
|
|
|
/*
|
2020-08-07 14:24:02 +08:00
|
|
|
* sparse_init() tries to allocate memory from memblock, so must be
|
|
|
|
* done after the fixed reservations
|
2012-03-05 19:49:27 +08:00
|
|
|
*/
|
|
|
|
sparse_init();
|
|
|
|
zone_sizes_init(min, max);
|
|
|
|
|
2020-11-20 01:53:53 +08:00
|
|
|
/*
|
|
|
|
* request_standard_resources() depends on crashkernel's memory being
|
|
|
|
* reserved, so do it here.
|
|
|
|
*/
|
|
|
|
reserve_crashkernel();
|
|
|
|
|
2016-04-09 06:50:27 +08:00
|
|
|
memblock_dump_all();
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef CONFIG_SPARSEMEM_VMEMMAP
|
|
|
|
static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
|
|
|
|
{
|
|
|
|
struct page *start_pg, *end_pg;
|
|
|
|
unsigned long pg, pgend;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Convert start_pfn/end_pfn to a struct page pointer.
|
|
|
|
*/
|
|
|
|
start_pg = pfn_to_page(start_pfn - 1) + 1;
|
|
|
|
end_pg = pfn_to_page(end_pfn - 1) + 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Convert to physical addresses, and round start upwards and end
|
|
|
|
* downwards.
|
|
|
|
*/
|
|
|
|
pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
|
|
|
|
pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If there are free pages between these, free the section of the
|
|
|
|
* memmap array.
|
|
|
|
*/
|
|
|
|
if (pg < pgend)
|
2018-10-31 06:09:21 +08:00
|
|
|
memblock_free(pg, pgend - pg);
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The mem_map array can get very big. Free the unused area of the memory map.
|
|
|
|
*/
|
|
|
|
static void __init free_unused_memmap(void)
|
|
|
|
{
|
2020-10-14 07:58:03 +08:00
|
|
|
unsigned long start, end, prev_end = 0;
|
|
|
|
int i;
|
2012-03-05 19:49:27 +08:00
|
|
|
|
2020-10-14 07:58:03 +08:00
|
|
|
for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
|
2012-03-05 19:49:27 +08:00
|
|
|
#ifdef CONFIG_SPARSEMEM
|
|
|
|
/*
|
|
|
|
* Take care not to free memmap entries that don't exist due
|
|
|
|
* to SPARSEMEM sections which aren't present.
|
|
|
|
*/
|
|
|
|
start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
|
|
|
|
#endif
|
|
|
|
/*
|
|
|
|
* If we had a previous bank, and there is a space between the
|
|
|
|
* current bank and the previous, free it.
|
|
|
|
*/
|
|
|
|
if (prev_end && prev_end < start)
|
|
|
|
free_memmap(prev_end, start);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Align up here since the VM subsystem insists that the
|
|
|
|
* memmap entries are valid from the bank end aligned to
|
|
|
|
* MAX_ORDER_NR_PAGES.
|
|
|
|
*/
|
2020-10-14 07:58:03 +08:00
|
|
|
prev_end = ALIGN(end, MAX_ORDER_NR_PAGES);
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_SPARSEMEM
|
|
|
|
if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
|
|
|
|
free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* mem_init() marks the free areas in the mem_map and tells us how much memory
|
|
|
|
* is free. This is done after various parts of the system have claimed their
|
|
|
|
* memory after the kernel image.
|
|
|
|
*/
|
|
|
|
void __init mem_init(void)
|
|
|
|
{
|
2016-12-16 21:28:41 +08:00
|
|
|
if (swiotlb_force == SWIOTLB_FORCE ||
|
2019-09-12 02:25:45 +08:00
|
|
|
max_pfn > PFN_DOWN(arm64_dma_phys_limit ? : arm64_dma32_phys_limit))
|
2016-06-08 15:53:46 +08:00
|
|
|
swiotlb_init(1);
|
2017-01-16 19:46:33 +08:00
|
|
|
else
|
|
|
|
swiotlb_force = SWIOTLB_NO_FORCE;
|
2015-02-06 02:01:53 +08:00
|
|
|
|
2019-03-30 21:13:46 +08:00
|
|
|
set_max_mapnr(max_pfn - PHYS_PFN_OFFSET);
|
2012-03-05 19:49:27 +08:00
|
|
|
|
|
|
|
#ifndef CONFIG_SPARSEMEM_VMEMMAP
|
|
|
|
free_unused_memmap();
|
|
|
|
#endif
|
2013-07-04 06:03:49 +08:00
|
|
|
/* this will put all unused low memory onto the freelists */
|
2018-10-31 06:09:30 +08:00
|
|
|
memblock_free_all();
|
2012-03-05 19:49:27 +08:00
|
|
|
|
2013-07-04 06:04:02 +08:00
|
|
|
mem_init_print_info(NULL);
|
2012-03-05 19:49:27 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check boundaries twice: Some fundamental inconsistencies can be
|
|
|
|
* detected at build time already.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_COMPAT
|
2018-12-07 06:50:37 +08:00
|
|
|
BUILD_BUG_ON(TASK_SIZE_32 > DEFAULT_MAP_WINDOW_64);
|
2012-03-05 19:49:27 +08:00
|
|
|
#endif
|
|
|
|
|
2013-07-04 06:03:49 +08:00
|
|
|
if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
|
2012-03-05 19:49:27 +08:00
|
|
|
extern int sysctl_overcommit_memory;
|
|
|
|
/*
|
|
|
|
* On a machine this small we won't get anywhere without
|
|
|
|
* overcommit, so turn it on by default.
|
|
|
|
*/
|
|
|
|
sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void free_initmem(void)
|
|
|
|
{
|
2017-01-11 05:35:49 +08:00
|
|
|
free_reserved_area(lm_alias(__init_begin),
|
|
|
|
lm_alias(__init_end),
|
2019-10-04 12:23:58 +08:00
|
|
|
POISON_FREE_INITMEM, "unused kernel");
|
2016-09-05 19:30:22 +08:00
|
|
|
/*
|
|
|
|
* Unmap the __init region but leave the VM area in place. This
|
|
|
|
* prevents the region from being reused for kernel modules, which
|
|
|
|
* is not supported by kallsyms.
|
|
|
|
*/
|
|
|
|
unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin));
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
|
|
|
|
2020-06-29 12:38:31 +08:00
|
|
|
void dump_mem_limit(void)
|
2016-02-16 20:52:42 +08:00
|
|
|
{
|
2018-06-15 06:28:02 +08:00
|
|
|
if (memory_limit != PHYS_ADDR_MAX) {
|
2016-02-16 20:52:42 +08:00
|
|
|
pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20);
|
|
|
|
} else {
|
|
|
|
pr_emerg("Memory Limit: none\n");
|
|
|
|
}
|
|
|
|
}
|