linux/arch/arm64/mm/init.c

435 lines
13 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0-only
/*
* Based on arch/arm/mm/init.c
*
* Copyright (C) 1995-2005 Russell King
* Copyright (C) 2012 ARM Ltd.
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/errno.h>
#include <linux/swap.h>
#include <linux/init.h>
#include <linux/cache.h>
#include <linux/mman.h>
#include <linux/nodemask.h>
#include <linux/initrd.h>
#include <linux/gfp.h>
#include <linux/math.h>
#include <linux/memblock.h>
#include <linux/sort.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/dma-direct.h>
#include <linux/dma-map-ops.h>
#include <linux/efi.h>
#include <linux/swiotlb.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/kexec.h>
#include <linux/crash_dump.h>
mm: hugetlb: optionally allocate gigantic hugepages using cma Commit 944d9fec8d7a ("hugetlb: add support for gigantic page allocation at runtime") has added the run-time allocation of gigantic pages. However it actually works only at early stages of the system loading, when the majority of memory is free. After some time the memory gets fragmented by non-movable pages, so the chances to find a contiguous 1GB block are getting close to zero. Even dropping caches manually doesn't help a lot. At large scale rebooting servers in order to allocate gigantic hugepages is quite expensive and complex. At the same time keeping some constant percentage of memory in reserved hugepages even if the workload isn't using it is a big waste: not all workloads can benefit from using 1 GB pages. The following solution can solve the problem: 1) On boot time a dedicated cma area* is reserved. The size is passed as a kernel argument. 2) Run-time allocations of gigantic hugepages are performed using the cma allocator and the dedicated cma area In this case gigantic hugepages can be allocated successfully with a high probability, however the memory isn't completely wasted if nobody is using 1GB hugepages: it can be used for pagecache, anon memory, THPs, etc. * On a multi-node machine a per-node cma area is allocated on each node. Following gigantic hugetlb allocation are using the first available numa node if the mask isn't specified by a user. Usage: 1) configure the kernel to allocate a cma area for hugetlb allocations: pass hugetlb_cma=10G as a kernel argument 2) allocate hugetlb pages as usual, e.g. echo 10 > /sys/kernel/mm/hugepages/hugepages-1048576kB/nr_hugepages If the option isn't enabled or the allocation of the cma area failed, the current behavior of the system is preserved. x86 and arm-64 are covered by this patch, other architectures can be trivially added later. The patch contains clean-ups and fixes proposed and implemented by Aslan Bakirov and Randy Dunlap. It also contains ideas and suggestions proposed by Rik van Riel, Michal Hocko and Mike Kravetz. Thanks! Signed-off-by: Roman Gushchin <guro@fb.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Tested-by: Andreas Schaufler <andreas.schaufler@gmx.de> Acked-by: Mike Kravetz <mike.kravetz@oracle.com> Acked-by: Michal Hocko <mhocko@kernel.org> Cc: Aslan Bakirov <aslan@fb.com> Cc: Randy Dunlap <rdunlap@infradead.org> Cc: Rik van Riel <riel@surriel.com> Cc: Joonsoo Kim <js1304@gmail.com> Link: http://lkml.kernel.org/r/20200407163840.92263-3-guro@fb.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-04-11 05:32:45 +08:00
#include <linux/hugetlb.h>
arm64: mm: Set ZONE_DMA size based on early IORT scan We recently introduced a 1 GB sized ZONE_DMA to cater for platforms incorporating masters that can address less than 32 bits of DMA, in particular the Raspberry Pi 4, which has 4 or 8 GB of DRAM, but has peripherals that can only address up to 1 GB (and its PCIe host bridge can only access the bottom 3 GB) Instructing the DMA layer about these limitations is straight-forward, even though we had to fix some issues regarding memory limits set in the IORT for named components, and regarding the handling of ACPI _DMA methods. However, the DMA layer also needs to be able to allocate memory that is guaranteed to meet those DMA constraints, for bounce buffering as well as allocating the backing for consistent mappings. This is why the 1 GB ZONE_DMA was introduced recently. Unfortunately, it turns out the having a 1 GB ZONE_DMA as well as a ZONE_DMA32 causes problems with kdump, and potentially in other places where allocations cannot cross zone boundaries. Therefore, we should avoid having two separate DMA zones when possible. So let's do an early scan of the IORT, and only create the ZONE_DMA if we encounter any devices that need it. This puts the burden on the firmware to describe such limitations in the IORT, which may be redundant (and less precise) if _DMA methods are also being provided. However, it should be noted that this situation is highly unusual for arm64 ACPI machines. Also, the DMA subsystem still gives precedence to the _DMA method if implemented, and so we will not lose the ability to perform streaming DMA outside the ZONE_DMA if the _DMA method permits it. [nsaenz: unified implementation with DT's counterpart] Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Signed-off-by: Nicolas Saenz Julienne <nsaenzjulienne@suse.de> Tested-by: Jeremy Linton <jeremy.linton@arm.com> Acked-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Acked-by: Hanjun Guo <guohanjun@huawei.com> Cc: Jeremy Linton <jeremy.linton@arm.com> Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Cc: Nicolas Saenz Julienne <nsaenzjulienne@suse.de> Cc: Rob Herring <robh+dt@kernel.org> Cc: Christoph Hellwig <hch@lst.de> Cc: Robin Murphy <robin.murphy@arm.com> Cc: Hanjun Guo <guohanjun@huawei.com> Cc: Sudeep Holla <sudeep.holla@arm.com> Cc: Anshuman Khandual <anshuman.khandual@arm.com> Link: https://lore.kernel.org/r/20201119175400.9995-7-nsaenzjulienne@suse.de Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2020-11-20 01:53:58 +08:00
#include <linux/acpi_iort.h>
arm64: kdump: Skip kmemleak scan reserved memory for kdump Trying to boot with kdump + kmemleak, command will result in a crash: "echo scan > /sys/kernel/debug/kmemleak" crashkernel reserved: 0x0000000007c00000 - 0x0000000027c00000 (512 MB) Kernel command line: BOOT_IMAGE=(hd1,gpt2)/vmlinuz-5.14.0-rc5-next-20210809+ root=/dev/mapper/ao-root ro rd.lvm.lv=ao/root rd.lvm.lv=ao/swap crashkernel=512M Unable to handle kernel paging request at virtual address ffff000007c00000 Mem abort info: ESR = 0x96000007 EC = 0x25: DABT (current EL), IL = 32 bits SET = 0, FnV = 0 EA = 0, S1PTW = 0 FSC = 0x07: level 3 translation fault Data abort info: ISV = 0, ISS = 0x00000007 CM = 0, WnR = 0 swapper pgtable: 64k pages, 48-bit VAs, pgdp=00002024f0d80000 [ffff000007c00000] pgd=1800205ffffd0003, p4d=1800205ffffd0003, pud=1800205ffffd0003, pmd=1800205ffffc0003, pte=0068000007c00f06 Internal error: Oops: 96000007 [#1] SMP pstate: 804000c9 (Nzcv daIF +PAN -UAO -TCO -DIT -SSBS BTYPE=--) pc : scan_block+0x98/0x230 lr : scan_block+0x94/0x230 sp : ffff80008d6cfb70 x29: ffff80008d6cfb70 x28: 0000000000000000 x27: 0000000000000000 x26: 00000000000000c0 x25: 0000000000000001 x24: 0000000000000000 x23: ffffa88a6b18b398 x22: ffff000007c00ff9 x21: ffffa88a6ac7fc40 x20: ffffa88a6af6a830 x19: ffff000007c00000 x18: 0000000000000000 x17: 0000000000000000 x16: 0000000000000000 x15: ffffffffffffffff x14: ffffffff00000000 x13: ffffffffffffffff x12: 0000000000000020 x11: 0000000000000000 x10: 0000000001080000 x9 : ffffa88a6951c77c x8 : ffffa88a6a893988 x7 : ffff203ff6cfb3c0 x6 : ffffa88a6a52b3c0 x5 : ffff203ff6cfb3c0 x4 : 0000000000000000 x3 : 0000000000000000 x2 : 0000000000000001 x1 : ffff20226cb56a40 x0 : 0000000000000000 Call trace: scan_block+0x98/0x230 scan_gray_list+0x120/0x270 kmemleak_scan+0x3a0/0x648 kmemleak_write+0x3ac/0x4c8 full_proxy_write+0x6c/0xa0 vfs_write+0xc8/0x2b8 ksys_write+0x70/0xf8 __arm64_sys_write+0x24/0x30 invoke_syscall+0x4c/0x110 el0_svc_common+0x9c/0x190 do_el0_svc+0x30/0x98 el0_svc+0x28/0xd8 el0t_64_sync_handler+0x90/0xb8 el0t_64_sync+0x180/0x184 The reserved memory for kdump will be looked up by kmemleak, this area will be set invalid when kdump service is bring up. That will result in crash when kmemleak scan this area. Fixes: a7259df76702 ("memblock: make memblock_find_in_range method private") Signed-off-by: Chen Wandun <chenwandun@huawei.com> Reviewed-by: Kefeng Wang <wangkefeng.wang@huawei.com> Reviewed-by: Mike Rapoport <rppt@linux.ibm.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Link: https://lore.kernel.org/r/20210910064844.3827813-1-chenwandun@huawei.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-09-10 14:48:44 +08:00
#include <linux/kmemleak.h>
#include <asm/boot.h>
#include <asm/fixmap.h>
#include <asm/kasan.h>
#include <asm/kernel-pgtable.h>
KVM: arm64: Prepare the creation of s1 mappings at EL2 When memory protection is enabled, the EL2 code needs the ability to create and manage its own page-table. To do so, introduce a new set of hypercalls to bootstrap a memory management system at EL2. This leads to the following boot flow in nVHE Protected mode: 1. the host allocates memory for the hypervisor very early on, using the memblock API; 2. the host creates a set of stage 1 page-table for EL2, installs the EL2 vectors, and issues the __pkvm_init hypercall; 3. during __pkvm_init, the hypervisor re-creates its stage 1 page-table and stores it in the memory pool provided by the host; 4. the hypervisor then extends its stage 1 mappings to include a vmemmap in the EL2 VA space, hence allowing to use the buddy allocator introduced in a previous patch; 5. the hypervisor jumps back in the idmap page, switches from the host-provided page-table to the new one, and wraps up its initialization by enabling the new allocator, before returning to the host. 6. the host can free the now unused page-table created for EL2, and will now need to issue hypercalls to make changes to the EL2 stage 1 mappings instead of modifying them directly. Note that for the sake of simplifying the review, this patch focuses on the hypervisor side of things. In other words, this only implements the new hypercalls, but does not make use of them from the host yet. The host-side changes will follow in a subsequent patch. Credits to Will for __pkvm_init_switch_pgd. Acked-by: Will Deacon <will@kernel.org> Co-authored-by: Will Deacon <will@kernel.org> Signed-off-by: Will Deacon <will@kernel.org> Signed-off-by: Quentin Perret <qperret@google.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20210319100146.1149909-18-qperret@google.com
2021-03-19 18:01:25 +08:00
#include <asm/kvm_host.h>
arm64: Fix overlapping VA allocations PCI IO space was intended to be 16MiB, at 32MiB below MODULES_VADDR, but commit d1e6dc91b532d3d3 ("arm64: Add architectural support for PCI") extended this to cover the full 32MiB. The final 8KiB of this 32MiB is also allocated for the fixmap, allowing for potential clashes between the two. This change was masked by assumptions in mem_init and the page table dumping code, which assumed the I/O space to be 16MiB long through seaparte hard-coded definitions. This patch changes the definition of the PCI I/O space allocation to live in asm/memory.h, along with the other VA space allocations. As the fixmap allocation depends on the number of fixmap entries, this is moved below the PCI I/O space allocation. Both the fixmap and PCI I/O space are guarded with 2MB of padding. Sites assuming the I/O space was 16MiB are moved over use new PCI_IO_{START,END} definitions, which will keep in sync with the size of the IO space (now restored to 16MiB). As a useful side effect, the use of the new PCI_IO_{START,END} definitions prevents a build issue in the dumping code due to a (now redundant) missing include of io.h for PCI_IOBASE. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Kees Cook <keescook@chromium.org> Cc: Laura Abbott <lauraa@codeaurora.org> Cc: Liviu Dudau <liviu.dudau@arm.com> Cc: Steve Capper <steve.capper@linaro.org> Cc: Will Deacon <will.deacon@arm.com> [catalin.marinas@arm.com: reorder FIXADDR and PCI_IO address_markers_idx enum] Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2015-01-23 02:20:35 +08:00
#include <asm/memory.h>
#include <asm/numa.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <linux/sizes.h>
#include <asm/tlb.h>
#include <asm/alternative.h>
#include <asm/xen/swiotlb-xen.h>
/*
* We need to be able to catch inadvertent references to memstart_addr
* that occur (potentially in generic code) before arm64_memblock_init()
* executes, which assigns it its actual value. So use a default value
* that cannot be mistaken for a real physical address.
*/
s64 memstart_addr __ro_after_init = -1;
EXPORT_SYMBOL(memstart_addr);
/*
* If the corresponding config options are enabled, we create both ZONE_DMA
* and ZONE_DMA32. By default ZONE_DMA covers the 32-bit addressable memory
* unless restricted on specific platforms (e.g. 30-bit on Raspberry Pi 4).
* In such case, ZONE_DMA32 covers the rest of the 32-bit addressable memory,
* otherwise it is empty.
*/
arm64: Do not defer reserve_crashkernel() for platforms with no DMA memory zones The following patches resulted in deferring crash kernel reservation to mem_init(), mainly aimed at platforms with DMA memory zones (no IOMMU), in particular Raspberry Pi 4. commit 1a8e1cef7603 ("arm64: use both ZONE_DMA and ZONE_DMA32") commit 8424ecdde7df ("arm64: mm: Set ZONE_DMA size based on devicetree's dma-ranges") commit 0a30c53573b0 ("arm64: mm: Move reserve_crashkernel() into mem_init()") commit 2687275a5843 ("arm64: Force NO_BLOCK_MAPPINGS if crashkernel reservation is required") Above changes introduced boot slowdown due to linear map creation for all the memory banks with NO_BLOCK_MAPPINGS, see discussion[1]. The proposed changes restore crash kernel reservation to earlier behavior thus avoids slow boot, particularly for platforms with IOMMU (no DMA memory zones). Tested changes to confirm no ~150ms boot slowdown on our SoC with IOMMU and 8GB memory. Also tested with ZONE_DMA and/or ZONE_DMA32 configs to confirm no regression to deferring scheme of crash kernel memory reservation. In both cases successfully collected kernel crash dump. [1] https://lore.kernel.org/all/9436d033-579b-55fa-9b00-6f4b661c2dd7@linux.microsoft.com/ Signed-off-by: Vijay Balakrishna <vijayb@linux.microsoft.com> Cc: stable@vger.kernel.org Reviewed-by: Pasha Tatashin <pasha.tatashin@soleen.com> Link: https://lore.kernel.org/r/1646242689-20744-1-git-send-email-vijayb@linux.microsoft.com [will: Add #ifdef CONFIG_KEXEC_CORE guards to fix 'crashk_res' references in allnoconfig build] Signed-off-by: Will Deacon <will@kernel.org>
2022-03-03 01:38:09 +08:00
phys_addr_t __ro_after_init arm64_dma_phys_limit;
/*
* To make optimal use of block mappings when laying out the linear
* mapping, round down the base of physical memory to a size that can
* be mapped efficiently, i.e., either PUD_SIZE (4k granule) or PMD_SIZE
* (64k granule), or a multiple that can be mapped using contiguous bits
* in the page tables: 32 * PMD_SIZE (16k granule)
*/
#if defined(CONFIG_ARM64_4K_PAGES)
#define ARM64_MEMSTART_SHIFT PUD_SHIFT
#elif defined(CONFIG_ARM64_16K_PAGES)
#define ARM64_MEMSTART_SHIFT CONT_PMD_SHIFT
#else
#define ARM64_MEMSTART_SHIFT PMD_SHIFT
#endif
/*
* sparsemem vmemmap imposes an additional requirement on the alignment of
* memstart_addr, due to the fact that the base of the vmemmap region
* has a direct correspondence, and needs to appear sufficiently aligned
* in the virtual address space.
*/
#if ARM64_MEMSTART_SHIFT < SECTION_SIZE_BITS
#define ARM64_MEMSTART_ALIGN (1UL << SECTION_SIZE_BITS)
#else
#define ARM64_MEMSTART_ALIGN (1UL << ARM64_MEMSTART_SHIFT)
#endif
static void __init arch_reserve_crashkernel(void)
{
unsigned long long low_size = 0;
arm64: kdump: simplify the reservation behaviour of crashkernel=,high On arm64, reservation for 'crashkernel=xM,high' is taken by searching for suitable memory region top down. If the 'xM' of crashkernel high memory is reserved from high memory successfully, it will try to reserve crashkernel low memory later accoringly. Otherwise, it will try to search low memory area for the 'xM' suitable region. Please see the details in Documentation/admin-guide/kernel-parameters.txt. While we observed an unexpected case where a reserved region crosses the high and low meomry boundary. E.g on a system with 4G as low memory end, user added the kernel parameters like: 'crashkernel=512M,high', it could finally have [4G-126M, 4G+386M], [1G, 1G+128M] regions in running kernel. The crashkernel high region crossing low and high memory boudary will bring issues: 1) For crashkernel=x,high, if getting crashkernel high region across low and high memory boundary, then user will see two memory regions in low memory, and one memory region in high memory. The two crashkernel low memory regions are confusing as shown in above example. 2) If people explicityly specify "crashkernel=x,high crashkernel=y,low" and y <= 128M, when crashkernel high region crosses low and high memory boundary and the part of crashkernel high reservation below boundary is bigger than y, the expected crahskernel low reservation will be skipped. But the expected crashkernel high reservation is shrank and could not satisfy user space requirement. 3) The crossing boundary behaviour of crahskernel high reservation is different than x86 arch. On x86_64, the low memory end is 4G fixedly, and the memory near 4G is reserved by system, e.g for mapping firmware, pci mapping, so the crashkernel reservation crossing boundary never happens. From distros point of view, this brings inconsistency and confusion. Users need to dig into x86 and arm64 system details to find out why. For kernel itself, the impact of issue 3) could be slight. While issue 1) and 2) cause actual impact because it brings obscure semantics and behaviour to crashkernel=,high reservation. Here, for crashkernel=xM,high, search the high memory for the suitable region only in high memory. If failed, try reserving the suitable region only in low memory. Like this, the crashkernel high region will only exist in high memory, and crashkernel low region only exists in low memory. The reservation behaviour for crashkernel=,high is clearer and simpler. Note: RPi4 has different zone ranges than normal memory. Its DMA zone is 0~1G, and DMA32 zone is 1G~4G if CONFIG_ZONE_DMA|DMA32 are enabled by default. The low memory end is 1G in order to validate all devices, high memory starts at 1G memory. However, for being consistent with normal arm64 system, its low memory end is still 1G, while reserving crashkernel high memory from 4G if crashkernel=size,high specified. This will remove confusion. With above change applied, summary of arm64 crashkernel reservation range: 1) RPi4(zone DMA:0~1G; DMA32:1G~4G): crashkernel=size 0~1G: low memory | 1G~top: high memory crashkernel=size,high 0~1G: low memory | 4G~top: high memory 2) Other normal system: crashkernel=size crashkernel=size,high 0~4G: low memory | 4G~top: high memory 3) Systems w/o zone DMA|DMA32 crashkernel=size crashkernel=size,high 0~top: low memory Signed-off-by: Baoquan He <bhe@redhat.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Reviewed-by: Zhen Lei <thunder.leizhen@huawei.com> Link: https://lore.kernel.org/r/ZGIBSEoZ7VRVvP8H@MiWiFi-R3L-srv Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2023-05-15 17:54:16 +08:00
unsigned long long crash_base, crash_size;
char *cmdline = boot_command_line;
arm64: kdump: simplify the reservation behaviour of crashkernel=,high On arm64, reservation for 'crashkernel=xM,high' is taken by searching for suitable memory region top down. If the 'xM' of crashkernel high memory is reserved from high memory successfully, it will try to reserve crashkernel low memory later accoringly. Otherwise, it will try to search low memory area for the 'xM' suitable region. Please see the details in Documentation/admin-guide/kernel-parameters.txt. While we observed an unexpected case where a reserved region crosses the high and low meomry boundary. E.g on a system with 4G as low memory end, user added the kernel parameters like: 'crashkernel=512M,high', it could finally have [4G-126M, 4G+386M], [1G, 1G+128M] regions in running kernel. The crashkernel high region crossing low and high memory boudary will bring issues: 1) For crashkernel=x,high, if getting crashkernel high region across low and high memory boundary, then user will see two memory regions in low memory, and one memory region in high memory. The two crashkernel low memory regions are confusing as shown in above example. 2) If people explicityly specify "crashkernel=x,high crashkernel=y,low" and y <= 128M, when crashkernel high region crosses low and high memory boundary and the part of crashkernel high reservation below boundary is bigger than y, the expected crahskernel low reservation will be skipped. But the expected crashkernel high reservation is shrank and could not satisfy user space requirement. 3) The crossing boundary behaviour of crahskernel high reservation is different than x86 arch. On x86_64, the low memory end is 4G fixedly, and the memory near 4G is reserved by system, e.g for mapping firmware, pci mapping, so the crashkernel reservation crossing boundary never happens. From distros point of view, this brings inconsistency and confusion. Users need to dig into x86 and arm64 system details to find out why. For kernel itself, the impact of issue 3) could be slight. While issue 1) and 2) cause actual impact because it brings obscure semantics and behaviour to crashkernel=,high reservation. Here, for crashkernel=xM,high, search the high memory for the suitable region only in high memory. If failed, try reserving the suitable region only in low memory. Like this, the crashkernel high region will only exist in high memory, and crashkernel low region only exists in low memory. The reservation behaviour for crashkernel=,high is clearer and simpler. Note: RPi4 has different zone ranges than normal memory. Its DMA zone is 0~1G, and DMA32 zone is 1G~4G if CONFIG_ZONE_DMA|DMA32 are enabled by default. The low memory end is 1G in order to validate all devices, high memory starts at 1G memory. However, for being consistent with normal arm64 system, its low memory end is still 1G, while reserving crashkernel high memory from 4G if crashkernel=size,high specified. This will remove confusion. With above change applied, summary of arm64 crashkernel reservation range: 1) RPi4(zone DMA:0~1G; DMA32:1G~4G): crashkernel=size 0~1G: low memory | 1G~top: high memory crashkernel=size,high 0~1G: low memory | 4G~top: high memory 2) Other normal system: crashkernel=size crashkernel=size,high 0~4G: low memory | 4G~top: high memory 3) Systems w/o zone DMA|DMA32 crashkernel=size crashkernel=size,high 0~top: low memory Signed-off-by: Baoquan He <bhe@redhat.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Reviewed-by: Zhen Lei <thunder.leizhen@huawei.com> Link: https://lore.kernel.org/r/ZGIBSEoZ7VRVvP8H@MiWiFi-R3L-srv Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2023-05-15 17:54:16 +08:00
bool high = false;
int ret;
if (!IS_ENABLED(CONFIG_KEXEC_CORE))
return;
ret = parse_crashkernel(cmdline, memblock_phys_mem_size(),
&crash_size, &crash_base,
&low_size, &high);
if (ret)
return;
reserve_crashkernel_generic(cmdline, crash_size, crash_base,
low_size, high);
}
/*
* Return the maximum physical address for a zone accessible by the given bits
* limit. If DRAM starts above 32-bit, expand the zone to the maximum
* available memory, otherwise cap it at 32-bit.
*/
static phys_addr_t __init max_zone_phys(unsigned int zone_bits)
{
phys_addr_t zone_mask = DMA_BIT_MASK(zone_bits);
phys_addr_t phys_start = memblock_start_of_DRAM();
if (phys_start > U32_MAX)
zone_mask = PHYS_ADDR_MAX;
else if (phys_start > zone_mask)
zone_mask = U32_MAX;
return min(zone_mask, memblock_end_of_DRAM() - 1) + 1;
}
static void __init zone_sizes_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
arm64: mm: Set ZONE_DMA size based on early IORT scan We recently introduced a 1 GB sized ZONE_DMA to cater for platforms incorporating masters that can address less than 32 bits of DMA, in particular the Raspberry Pi 4, which has 4 or 8 GB of DRAM, but has peripherals that can only address up to 1 GB (and its PCIe host bridge can only access the bottom 3 GB) Instructing the DMA layer about these limitations is straight-forward, even though we had to fix some issues regarding memory limits set in the IORT for named components, and regarding the handling of ACPI _DMA methods. However, the DMA layer also needs to be able to allocate memory that is guaranteed to meet those DMA constraints, for bounce buffering as well as allocating the backing for consistent mappings. This is why the 1 GB ZONE_DMA was introduced recently. Unfortunately, it turns out the having a 1 GB ZONE_DMA as well as a ZONE_DMA32 causes problems with kdump, and potentially in other places where allocations cannot cross zone boundaries. Therefore, we should avoid having two separate DMA zones when possible. So let's do an early scan of the IORT, and only create the ZONE_DMA if we encounter any devices that need it. This puts the burden on the firmware to describe such limitations in the IORT, which may be redundant (and less precise) if _DMA methods are also being provided. However, it should be noted that this situation is highly unusual for arm64 ACPI machines. Also, the DMA subsystem still gives precedence to the _DMA method if implemented, and so we will not lose the ability to perform streaming DMA outside the ZONE_DMA if the _DMA method permits it. [nsaenz: unified implementation with DT's counterpart] Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Signed-off-by: Nicolas Saenz Julienne <nsaenzjulienne@suse.de> Tested-by: Jeremy Linton <jeremy.linton@arm.com> Acked-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Acked-by: Hanjun Guo <guohanjun@huawei.com> Cc: Jeremy Linton <jeremy.linton@arm.com> Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Cc: Nicolas Saenz Julienne <nsaenzjulienne@suse.de> Cc: Rob Herring <robh+dt@kernel.org> Cc: Christoph Hellwig <hch@lst.de> Cc: Robin Murphy <robin.murphy@arm.com> Cc: Hanjun Guo <guohanjun@huawei.com> Cc: Sudeep Holla <sudeep.holla@arm.com> Cc: Anshuman Khandual <anshuman.khandual@arm.com> Link: https://lore.kernel.org/r/20201119175400.9995-7-nsaenzjulienne@suse.de Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2020-11-20 01:53:58 +08:00
unsigned int __maybe_unused acpi_zone_dma_bits;
2020-11-20 01:53:57 +08:00
unsigned int __maybe_unused dt_zone_dma_bits;
phys_addr_t __maybe_unused dma32_phys_limit = max_zone_phys(32);
#ifdef CONFIG_ZONE_DMA
arm64: mm: Set ZONE_DMA size based on early IORT scan We recently introduced a 1 GB sized ZONE_DMA to cater for platforms incorporating masters that can address less than 32 bits of DMA, in particular the Raspberry Pi 4, which has 4 or 8 GB of DRAM, but has peripherals that can only address up to 1 GB (and its PCIe host bridge can only access the bottom 3 GB) Instructing the DMA layer about these limitations is straight-forward, even though we had to fix some issues regarding memory limits set in the IORT for named components, and regarding the handling of ACPI _DMA methods. However, the DMA layer also needs to be able to allocate memory that is guaranteed to meet those DMA constraints, for bounce buffering as well as allocating the backing for consistent mappings. This is why the 1 GB ZONE_DMA was introduced recently. Unfortunately, it turns out the having a 1 GB ZONE_DMA as well as a ZONE_DMA32 causes problems with kdump, and potentially in other places where allocations cannot cross zone boundaries. Therefore, we should avoid having two separate DMA zones when possible. So let's do an early scan of the IORT, and only create the ZONE_DMA if we encounter any devices that need it. This puts the burden on the firmware to describe such limitations in the IORT, which may be redundant (and less precise) if _DMA methods are also being provided. However, it should be noted that this situation is highly unusual for arm64 ACPI machines. Also, the DMA subsystem still gives precedence to the _DMA method if implemented, and so we will not lose the ability to perform streaming DMA outside the ZONE_DMA if the _DMA method permits it. [nsaenz: unified implementation with DT's counterpart] Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Signed-off-by: Nicolas Saenz Julienne <nsaenzjulienne@suse.de> Tested-by: Jeremy Linton <jeremy.linton@arm.com> Acked-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Acked-by: Hanjun Guo <guohanjun@huawei.com> Cc: Jeremy Linton <jeremy.linton@arm.com> Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Cc: Nicolas Saenz Julienne <nsaenzjulienne@suse.de> Cc: Rob Herring <robh+dt@kernel.org> Cc: Christoph Hellwig <hch@lst.de> Cc: Robin Murphy <robin.murphy@arm.com> Cc: Hanjun Guo <guohanjun@huawei.com> Cc: Sudeep Holla <sudeep.holla@arm.com> Cc: Anshuman Khandual <anshuman.khandual@arm.com> Link: https://lore.kernel.org/r/20201119175400.9995-7-nsaenzjulienne@suse.de Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2020-11-20 01:53:58 +08:00
acpi_zone_dma_bits = fls64(acpi_iort_dma_get_max_cpu_address());
2020-11-20 01:53:57 +08:00
dt_zone_dma_bits = fls64(of_dma_get_max_cpu_address(NULL));
arm64: mm: Set ZONE_DMA size based on early IORT scan We recently introduced a 1 GB sized ZONE_DMA to cater for platforms incorporating masters that can address less than 32 bits of DMA, in particular the Raspberry Pi 4, which has 4 or 8 GB of DRAM, but has peripherals that can only address up to 1 GB (and its PCIe host bridge can only access the bottom 3 GB) Instructing the DMA layer about these limitations is straight-forward, even though we had to fix some issues regarding memory limits set in the IORT for named components, and regarding the handling of ACPI _DMA methods. However, the DMA layer also needs to be able to allocate memory that is guaranteed to meet those DMA constraints, for bounce buffering as well as allocating the backing for consistent mappings. This is why the 1 GB ZONE_DMA was introduced recently. Unfortunately, it turns out the having a 1 GB ZONE_DMA as well as a ZONE_DMA32 causes problems with kdump, and potentially in other places where allocations cannot cross zone boundaries. Therefore, we should avoid having two separate DMA zones when possible. So let's do an early scan of the IORT, and only create the ZONE_DMA if we encounter any devices that need it. This puts the burden on the firmware to describe such limitations in the IORT, which may be redundant (and less precise) if _DMA methods are also being provided. However, it should be noted that this situation is highly unusual for arm64 ACPI machines. Also, the DMA subsystem still gives precedence to the _DMA method if implemented, and so we will not lose the ability to perform streaming DMA outside the ZONE_DMA if the _DMA method permits it. [nsaenz: unified implementation with DT's counterpart] Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Signed-off-by: Nicolas Saenz Julienne <nsaenzjulienne@suse.de> Tested-by: Jeremy Linton <jeremy.linton@arm.com> Acked-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Acked-by: Hanjun Guo <guohanjun@huawei.com> Cc: Jeremy Linton <jeremy.linton@arm.com> Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Cc: Nicolas Saenz Julienne <nsaenzjulienne@suse.de> Cc: Rob Herring <robh+dt@kernel.org> Cc: Christoph Hellwig <hch@lst.de> Cc: Robin Murphy <robin.murphy@arm.com> Cc: Hanjun Guo <guohanjun@huawei.com> Cc: Sudeep Holla <sudeep.holla@arm.com> Cc: Anshuman Khandual <anshuman.khandual@arm.com> Link: https://lore.kernel.org/r/20201119175400.9995-7-nsaenzjulienne@suse.de Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2020-11-20 01:53:58 +08:00
zone_dma_bits = min3(32U, dt_zone_dma_bits, acpi_zone_dma_bits);
arm64_dma_phys_limit = max_zone_phys(zone_dma_bits);
max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
#endif
#ifdef CONFIG_ZONE_DMA32
max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit);
if (!arm64_dma_phys_limit)
arm64_dma_phys_limit = dma32_phys_limit;
#endif
if (!arm64_dma_phys_limit)
arm64_dma_phys_limit = PHYS_MASK + 1;
max_zone_pfns[ZONE_NORMAL] = max_pfn;
mm: use free_area_init() instead of free_area_init_nodes() free_area_init() has effectively became a wrapper for free_area_init_nodes() and there is no point of keeping it. Still free_area_init() name is shorter and more general as it does not imply necessity to initialize multiple nodes. Rename free_area_init_nodes() to free_area_init(), update the callers and drop old version of free_area_init(). Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Tested-by: Hoan Tran <hoan@os.amperecomputing.com> [arm64] Reviewed-by: Baoquan He <bhe@redhat.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Brian Cain <bcain@codeaurora.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Ungerer <gerg@linux-m68k.org> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Guo Ren <guoren@kernel.org> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Helge Deller <deller@gmx.de> Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Ley Foon Tan <ley.foon.tan@intel.com> Cc: Mark Salter <msalter@redhat.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Hocko <mhocko@kernel.org> Cc: Michal Simek <monstr@monstr.eu> Cc: Nick Hu <nickhu@andestech.com> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Stafford Horne <shorne@gmail.com> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Tony Luck <tony.luck@intel.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Link: http://lkml.kernel.org/r/20200412194859.12663-6-rppt@kernel.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-06-04 06:57:10 +08:00
free_area_init(max_zone_pfns);
}
int pfn_is_map_memory(unsigned long pfn)
{
phys_addr_t addr = PFN_PHYS(pfn);
/* avoid false positives for bogus PFNs, see comment in pfn_valid() */
if (PHYS_PFN(addr) != pfn)
return 0;
arm64/mm: Fix pfn_valid() for ZONE_DEVICE based memory pfn_valid() validates a pfn but basically it checks for a valid struct page backing for that pfn. It should always return positive for memory ranges backed with struct page mapping. But currently pfn_valid() fails for all ZONE_DEVICE based memory types even though they have struct page mapping. pfn_valid() asserts that there is a memblock entry for a given pfn without MEMBLOCK_NOMAP flag being set. The problem with ZONE_DEVICE based memory is that they do not have memblock entries. Hence memblock_is_map_memory() will invariably fail via memblock_search() for a ZONE_DEVICE based address. This eventually fails pfn_valid() which is wrong. memblock_is_map_memory() needs to be skipped for such memory ranges. As ZONE_DEVICE memory gets hotplugged into the system via memremap_pages() called from a driver, their respective memory sections will not have SECTION_IS_EARLY set. Normal hotplug memory will never have MEMBLOCK_NOMAP set in their memblock regions. Because the flag MEMBLOCK_NOMAP was specifically designed and set for firmware reserved memory regions. memblock_is_map_memory() can just be skipped as its always going to be positive and that will be an optimization for the normal hotplug memory. Like ZONE_DEVICE based memory, all normal hotplugged memory too will not have SECTION_IS_EARLY set for their sections Skipping memblock_is_map_memory() for all non early memory sections would fix pfn_valid() problem for ZONE_DEVICE based memory and also improve its performance for normal hotplug memory as well. Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will@kernel.org> Cc: Ard Biesheuvel <ardb@kernel.org> Cc: Robin Murphy <robin.murphy@arm.com> Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Acked-by: David Hildenbrand <david@redhat.com> Fixes: 73b20c84d42d ("arm64: mm: implement pte_devmap support") Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Link: https://lore.kernel.org/r/1614921898-4099-2-git-send-email-anshuman.khandual@arm.com Signed-off-by: Will Deacon <will@kernel.org>
2021-03-05 13:24:57 +08:00
return memblock_is_map_memory(addr);
}
EXPORT_SYMBOL(pfn_is_map_memory);
static phys_addr_t memory_limit __ro_after_init = PHYS_ADDR_MAX;
/*
* Limit the memory size that was specified via FDT.
*/
static int __init early_mem(char *p)
{
if (!p)
return 1;
memory_limit = memparse(p, &p) & PAGE_MASK;
pr_notice("Memory limited to %lldMB\n", memory_limit >> 20);
return 0;
}
early_param("mem", early_mem);
void __init arm64_memblock_init(void)
{
s64 linear_region_size = PAGE_END - _PAGE_OFFSET(vabits_actual);
/*
* Corner case: 52-bit VA capable systems running KVM in nVHE mode may
* be limited in their ability to support a linear map that exceeds 51
* bits of VA space, depending on the placement of the ID map. Given
* that the placement of the ID map may be randomized, let's simply
* limit the kernel's linear map to 51 bits as well if we detect this
* configuration.
*/
if (IS_ENABLED(CONFIG_KVM) && vabits_actual == 52 &&
is_hyp_mode_available() && !is_kernel_in_hyp_mode()) {
pr_info("Capping linear region to 51 bits for KVM in nVHE mode on LVA capable hardware.\n");
linear_region_size = min_t(u64, linear_region_size, BIT(51));
}
/* Remove memory above our supported physical address size */
memblock_remove(1ULL << PHYS_MASK_SHIFT, ULLONG_MAX);
/*
* Select a suitable value for the base of physical memory.
*/
memstart_addr = round_down(memblock_start_of_DRAM(),
ARM64_MEMSTART_ALIGN);
arm64: Warn the user when a small VA_BITS value wastes memory The memblock code ignores any memory that doesn't fit in the linear mapping. In order to preserve the distance between two physical memory locations and their mappings in the linear map, any hole between two memory regions occupies the same space in the linear map. On most systems, this is hardly a problem (the memory banks are close together, and VA_BITS represents a large space compared to the available memory *and* the potential gaps). On NUMA systems, things are quite different: the gaps between the memory nodes can be pretty large compared to the memory size itself, and the range from memblock_start_of_DRAM() to memblock_end_of_DRAM() can exceed the space described by VA_BITS. Unfortunately, we're not very good at making this obvious to the user, and on a D05 system (two sockets and 4 nodes with 64GB each) accidentally configured with 39bit VA, we display something like this: [ 0.000000] NUMA: NODE_DATA [mem 0x1ffbffe100-0x1ffbffffff] [ 0.000000] NUMA: NODE_DATA [mem 0x2febfc1100-0x2febfc2fff] [ 0.000000] NUMA: Initmem setup node 2 [<memory-less node>] [ 0.000000] NUMA: NODE_DATA [mem 0x2febfbf200-0x2febfc10ff] [ 0.000000] NUMA: NODE_DATA(2) on node 1 [ 0.000000] NUMA: Initmem setup node 3 [<memory-less node>] [ 0.000000] NUMA: NODE_DATA [mem 0x2febfbd300-0x2febfbf1ff] [ 0.000000] NUMA: NODE_DATA(3) on node 1 which isn't very explicit, and doesn't tell the user why 128GB have suddently disappeared. Let's add a warning message telling the user that memory has been truncated, and offer a potential solution (bumping VA_BITS up). Signed-off-by: Marc Zyngier <maz@kernel.org> Acked-by: Mark Rutland <mark.rutland@arm.com> Link: https://lore.kernel.org/r/20201215152918.1511108-1-maz@kernel.org Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2020-12-15 23:29:18 +08:00
if ((memblock_end_of_DRAM() - memstart_addr) > linear_region_size)
pr_warn("Memory doesn't fit in the linear mapping, VA_BITS too small\n");
/*
* Remove the memory that we will not be able to cover with the
* linear mapping. Take care not to clip the kernel which may be
* high in memory.
*/
memblock_remove(max_t(u64, memstart_addr + linear_region_size,
__pa_symbol(_end)), ULLONG_MAX);
if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) {
/* ensure that memstart_addr remains sufficiently aligned */
memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size,
ARM64_MEMSTART_ALIGN);
memblock_remove(0, memstart_addr);
}
arm64: mm: use single quantity to represent the PA to VA translation On arm64, the global variable memstart_addr represents the physical address of PAGE_OFFSET, and so physical to virtual translations or vice versa used to come down to simple additions or subtractions involving the values of PAGE_OFFSET and memstart_addr. When support for 52-bit virtual addressing was introduced, we had to deal with PAGE_OFFSET potentially being outside of the region that can be covered by the virtual range (as the 52-bit VA capable build needs to be able to run on systems that are only 48-bit VA capable), and for this reason, another translation was introduced, and recorded in the global variable physvirt_offset. However, if we go back to the original definition of memstart_addr, i.e., the physical address of PAGE_OFFSET, it turns out that there is no need for two separate translations: instead, we can simply subtract the size of the unaddressable VA space from memstart_addr to make the available physical memory appear in the 48-bit addressable VA region. This simplifies things, but also fixes a bug on KASLR builds, which may update memstart_addr later on in arm64_memblock_init(), but fails to update vmemmap and physvirt_offset accordingly. Fixes: 5383cc6efed1 ("arm64: mm: Introduce vabits_actual") Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Reviewed-by: Steve Capper <steve.capper@arm.com> Link: https://lore.kernel.org/r/20201008153602.9467-2-ardb@kernel.org Signed-off-by: Will Deacon <will@kernel.org>
2020-10-08 23:35:59 +08:00
/*
* If we are running with a 52-bit kernel VA config on a system that
* does not support it, we have to place the available physical
* memory in the 48-bit addressable part of the linear region, i.e.,
* we have to move it upward. Since memstart_addr represents the
* physical address of PAGE_OFFSET, we have to *subtract* from it.
*/
if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52))
memstart_addr -= _PAGE_OFFSET(48) - _PAGE_OFFSET(52);
/*
* Apply the memory limit if it was set. Since the kernel may be loaded
* high up in memory, add back the kernel region that must be accessible
* via the linear mapping.
*/
if (memory_limit != PHYS_ADDR_MAX) {
arm64:acpi: fix the acpi alignment exception when 'mem=' specified When booting an ACPI enabled kernel with 'mem=x', there is the possibility that ACPI data regions from the firmware will lie above the memory limit. Ordinarily these will be removed by memblock_enforce_memory_limit(.). Unfortunately, this means that these regions will then be mapped by acpi_os_ioremap(.) as device memory (instead of normal) thus unaligned accessess will then provoke alignment faults. In this patch we adopt memblock_mem_limit_remove_map instead, and this preserves these ACPI data regions (marked NOMAP) thus ensuring that these regions are not mapped as device memory. For example, below is an alignment exception observed on ARM platform when booting the kernel with 'acpi=on mem=8G': ... Unable to handle kernel paging request at virtual address ffff0000080521e7 pgd = ffff000008aa0000 [ffff0000080521e7] *pgd=000000801fffe003, *pud=000000801fffd003, *pmd=000000801fffc003, *pte=00e80083ff1c1707 Internal error: Oops: 96000021 [#1] PREEMPT SMP Modules linked in: CPU: 1 PID: 1 Comm: swapper/0 Not tainted 4.7.0-rc3-next-20160616+ #172 Hardware name: AMD Overdrive/Supercharger/Default string, BIOS ROD1001A 02/09/2016 task: ffff800001ef0000 ti: ffff800001ef8000 task.ti: ffff800001ef8000 PC is at acpi_ns_lookup+0x520/0x734 LR is at acpi_ns_lookup+0x4a4/0x734 pc : [<ffff0000083b8b10>] lr : [<ffff0000083b8a94>] pstate: 60000045 sp : ffff800001efb8b0 x29: ffff800001efb8c0 x28: 000000000000001b x27: 0000000000000001 x26: 0000000000000000 x25: ffff800001efb9e8 x24: ffff000008a10000 x23: 0000000000000001 x22: 0000000000000001 x21: ffff000008724000 x20: 000000000000001b x19: ffff0000080521e7 x18: 000000000000000d x17: 00000000000038ff x16: 0000000000000002 x15: 0000000000000007 x14: 0000000000007fff x13: ffffff0000000000 x12: 0000000000000018 x11: 000000001fffd200 x10: 00000000ffffff76 x9 : 000000000000005f x8 : ffff000008725fa8 x7 : ffff000008a8df70 x6 : ffff000008a8df70 x5 : ffff000008a8d000 x4 : 0000000000000010 x3 : 0000000000000010 x2 : 000000000000000c x1 : 0000000000000006 x0 : 0000000000000000 ... acpi_ns_lookup+0x520/0x734 acpi_ds_load1_begin_op+0x174/0x4fc acpi_ps_build_named_op+0xf8/0x220 acpi_ps_create_op+0x208/0x33c acpi_ps_parse_loop+0x204/0x838 acpi_ps_parse_aml+0x1bc/0x42c acpi_ns_one_complete_parse+0x1e8/0x22c acpi_ns_parse_table+0x8c/0x128 acpi_ns_load_table+0xc0/0x1e8 acpi_tb_load_namespace+0xf8/0x2e8 acpi_load_tables+0x7c/0x110 acpi_init+0x90/0x2c0 do_one_initcall+0x38/0x12c kernel_init_freeable+0x148/0x1ec kernel_init+0x10/0xec ret_from_fork+0x10/0x40 Code: b9009fbc 2a00037b 36380057 3219037b (b9400260) ---[ end trace 03381e5eb0a24de4 ]--- Kernel panic - not syncing: Attempted to kill init! exitcode=0x0000000b With 'efi=debug', we can see those ACPI regions loaded by firmware on that board as: efi: 0x0083ff185000-0x0083ff1b4fff [Reserved | | | | | | | | |WB|WT|WC|UC]* efi: 0x0083ff1b5000-0x0083ff1c2fff [ACPI Reclaim Memory| | | | | | | | |WB|WT|WC|UC]* efi: 0x0083ff223000-0x0083ff224fff [ACPI Memory NVS | | | | | | | | |WB|WT|WC|UC]* Link: http://lkml.kernel.org/r/1468475036-5852-3-git-send-email-dennis.chen@arm.com Acked-by: Steve Capper <steve.capper@arm.com> Signed-off-by: Dennis Chen <dennis.chen@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org> Cc: Pekka Enberg <penberg@kernel.org> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Tang Chen <tangchen@cn.fujitsu.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Rafael J. Wysocki <rafael@kernel.org> Cc: Will Deacon <will.deacon@arm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Matt Fleming <matt@codeblueprint.co.uk> Cc: Kaly Xin <kaly.xin@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-07-29 06:48:29 +08:00
memblock_mem_limit_remove_map(memory_limit);
memblock_add(__pa_symbol(_text), (u64)(_end - _text));
}
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) {
/*
* Add back the memory we just removed if it results in the
* initrd to become inaccessible via the linear mapping.
* Otherwise, this is a no-op
*/
u64 base = phys_initrd_start & PAGE_MASK;
u64 size = PAGE_ALIGN(phys_initrd_start + phys_initrd_size) - base;
/*
* We can only add back the initrd memory if we don't end up
* with more memory than we can address via the linear mapping.
* It is up to the bootloader to position the kernel and the
* initrd reasonably close to each other (i.e., within 32 GB of
* each other) so that all granule/#levels combinations can
* always access both.
*/
if (WARN(base < memblock_start_of_DRAM() ||
base + size > memblock_start_of_DRAM() +
linear_region_size,
"initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) {
phys_initrd_size = 0;
} else {
memblock_add(base, size);
memblock_clear_nomap(base, size);
memblock_reserve(base, size);
}
}
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
extern u16 memstart_offset_seed;
u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
int parange = cpuid_feature_extract_unsigned_field(
mmfr0, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
s64 range = linear_region_size -
BIT(id_aa64mmfr0_parange_to_phys_shift(parange));
/*
* If the size of the linear region exceeds, by a sufficient
* margin, the size of the region that the physical memory can
* span, randomize the linear region as well.
*/
if (memstart_offset_seed > 0 && range >= (s64)ARM64_MEMSTART_ALIGN) {
range /= ARM64_MEMSTART_ALIGN;
memstart_addr -= ARM64_MEMSTART_ALIGN *
((range * memstart_offset_seed) >> 16);
}
}
/*
* Register the kernel text, kernel data, initrd, and initial
* pagetables with memblock.
*/
memblock_reserve(__pa_symbol(_stext), _end - _stext);
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) {
/* the generic initrd code expects virtual addresses */
initrd_start = __phys_to_virt(phys_initrd_start);
initrd_end = initrd_start + phys_initrd_size;
}
early_init_fdt_scan_reserved_mem();
high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
}
void __init bootmem_init(void)
{
unsigned long min, max;
min = PFN_UP(memblock_start_of_DRAM());
max = PFN_DOWN(memblock_end_of_DRAM());
early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT);
max_pfn = max_low_pfn = max;
min_low_pfn = min;
arch_numa_init();
/*
* must be done after arch_numa_init() which calls numa_init() to
* initialize node_online_map that gets used in hugetlb_cma_reserve()
* while allocating required CMA size across online nodes.
*/
#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
arm64_hugetlb_cma_reserve();
#endif
KVM: arm64: Prepare the creation of s1 mappings at EL2 When memory protection is enabled, the EL2 code needs the ability to create and manage its own page-table. To do so, introduce a new set of hypercalls to bootstrap a memory management system at EL2. This leads to the following boot flow in nVHE Protected mode: 1. the host allocates memory for the hypervisor very early on, using the memblock API; 2. the host creates a set of stage 1 page-table for EL2, installs the EL2 vectors, and issues the __pkvm_init hypercall; 3. during __pkvm_init, the hypervisor re-creates its stage 1 page-table and stores it in the memory pool provided by the host; 4. the hypervisor then extends its stage 1 mappings to include a vmemmap in the EL2 VA space, hence allowing to use the buddy allocator introduced in a previous patch; 5. the hypervisor jumps back in the idmap page, switches from the host-provided page-table to the new one, and wraps up its initialization by enabling the new allocator, before returning to the host. 6. the host can free the now unused page-table created for EL2, and will now need to issue hypercalls to make changes to the EL2 stage 1 mappings instead of modifying them directly. Note that for the sake of simplifying the review, this patch focuses on the hypervisor side of things. In other words, this only implements the new hypercalls, but does not make use of them from the host yet. The host-side changes will follow in a subsequent patch. Credits to Will for __pkvm_init_switch_pgd. Acked-by: Will Deacon <will@kernel.org> Co-authored-by: Will Deacon <will@kernel.org> Signed-off-by: Will Deacon <will@kernel.org> Signed-off-by: Quentin Perret <qperret@google.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20210319100146.1149909-18-qperret@google.com
2021-03-19 18:01:25 +08:00
kvm_hyp_reserve();
/*
* sparse_init() tries to allocate memory from memblock, so must be
* done after the fixed reservations
*/
sparse_init();
zone_sizes_init();
/*
* Reserve the CMA area after arm64_dma_phys_limit was initialised.
*/
dma_contiguous_reserve(arm64_dma_phys_limit);
/*
* request_standard_resources() depends on crashkernel's memory being
* reserved, so do it here.
*/
arch_reserve_crashkernel();
memblock_dump_all();
}
/*
* mem_init() marks the free areas in the mem_map and tells us how much memory
* is free. This is done after various parts of the system have claimed their
* memory after the kernel image.
*/
void __init mem_init(void)
{
bool swiotlb = max_pfn > PFN_DOWN(arm64_dma_phys_limit);
if (IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) && !swiotlb) {
/*
* If no bouncing needed for ZONE_DMA, reduce the swiotlb
* buffer for kmalloc() bouncing to 1MB per 1GB of RAM.
*/
unsigned long size =
DIV_ROUND_UP(memblock_phys_mem_size(), 1024);
swiotlb_adjust_size(min(swiotlb_size_or_default(), size));
swiotlb = true;
}
swiotlb_init(swiotlb, SWIOTLB_VERBOSE);
/* this will put all unused low memory onto the freelists */
memblock: rename free_all_bootmem to memblock_free_all The conversion is done using sed -i 's@free_all_bootmem@memblock_free_all@' \ $(git grep -l free_all_bootmem) Link: http://lkml.kernel.org/r/1536927045-23536-26-git-send-email-rppt@linux.vnet.ibm.com Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chris Zankel <chris@zankel.net> Cc: "David S. Miller" <davem@davemloft.net> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Ingo Molnar <mingo@redhat.com> Cc: "James E.J. Bottomley" <jejb@parisc-linux.org> Cc: Jonas Bonn <jonas@southpole.se> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Ley Foon Tan <lftan@altera.com> Cc: Mark Salter <msalter@redhat.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Palmer Dabbelt <palmer@sifive.com> Cc: Paul Burton <paul.burton@mips.com> Cc: Richard Kuo <rkuo@codeaurora.org> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Serge Semin <fancer.lancer@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-10-31 06:09:30 +08:00
memblock_free_all();
/*
* Check boundaries twice: Some fundamental inconsistencies can be
* detected at build time already.
*/
#ifdef CONFIG_COMPAT
BUILD_BUG_ON(TASK_SIZE_32 > DEFAULT_MAP_WINDOW_64);
#endif
/*
* Selected page table levels should match when derived from
* scratch using the virtual address range and page size.
*/
BUILD_BUG_ON(ARM64_HW_PGTABLE_LEVELS(CONFIG_ARM64_VA_BITS) !=
CONFIG_PGTABLE_LEVELS);
if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
extern int sysctl_overcommit_memory;
/*
* On a machine this small we won't get anywhere without
* overcommit, so turn it on by default.
*/
sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
}
}
void free_initmem(void)
{
free_reserved_area(lm_alias(__init_begin),
lm_alias(__init_end),
POISON_FREE_INITMEM, "unused kernel");
/*
* Unmap the __init region but leave the VM area in place. This
* prevents the region from being reused for kernel modules, which
* is not supported by kallsyms.
*/
vunmap_range((u64)__init_begin, (u64)__init_end);
}
void dump_mem_limit(void)
{
if (memory_limit != PHYS_ADDR_MAX) {
pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20);
} else {
pr_emerg("Memory Limit: none\n");
}
}