mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-06 02:24:14 +08:00
944a45abfa
There are following issues in arm64 kdump: 1. We use crashkernel=X to reserve crashkernel in DMA zone, which will fail when there is not enough low memory. 2. If reserving crashkernel above DMA zone, in this case, crash dump kernel will fail to boot because there is no low memory available for allocation. To solve these issues, introduce crashkernel=X,[high,low]. The "crashkernel=X,high" is used to select a region above DMA zone, and the "crashkernel=Y,low" is used to allocate specified size low memory. Signed-off-by: Chen Zhou <chenzhou10@huawei.com> Co-developed-by: Zhen Lei <thunder.leizhen@huawei.com> Signed-off-by: Zhen Lei <thunder.leizhen@huawei.com> Link: https://lore.kernel.org/r/20220506114402.365-4-thunder.leizhen@huawei.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
192 lines
4.4 KiB
C
192 lines
4.4 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* kexec_file for arm64
|
|
*
|
|
* Copyright (C) 2018 Linaro Limited
|
|
* Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
|
|
*
|
|
* Most code is derived from arm64 port of kexec-tools
|
|
*/
|
|
|
|
#define pr_fmt(fmt) "kexec_file: " fmt
|
|
|
|
#include <linux/ioport.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/kexec.h>
|
|
#include <linux/libfdt.h>
|
|
#include <linux/memblock.h>
|
|
#include <linux/of.h>
|
|
#include <linux/of_fdt.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/string.h>
|
|
#include <linux/types.h>
|
|
#include <linux/vmalloc.h>
|
|
|
|
const struct kexec_file_ops * const kexec_file_loaders[] = {
|
|
&kexec_image_ops,
|
|
NULL
|
|
};
|
|
|
|
int arch_kimage_file_post_load_cleanup(struct kimage *image)
|
|
{
|
|
kvfree(image->arch.dtb);
|
|
image->arch.dtb = NULL;
|
|
|
|
vfree(image->elf_headers);
|
|
image->elf_headers = NULL;
|
|
image->elf_headers_sz = 0;
|
|
|
|
return kexec_image_post_load_cleanup_default(image);
|
|
}
|
|
|
|
static int prepare_elf_headers(void **addr, unsigned long *sz)
|
|
{
|
|
struct crash_mem *cmem;
|
|
unsigned int nr_ranges;
|
|
int ret;
|
|
u64 i;
|
|
phys_addr_t start, end;
|
|
|
|
nr_ranges = 1; /* for exclusion of crashkernel region */
|
|
for_each_mem_range(i, &start, &end)
|
|
nr_ranges++;
|
|
|
|
cmem = kmalloc(struct_size(cmem, ranges, nr_ranges), GFP_KERNEL);
|
|
if (!cmem)
|
|
return -ENOMEM;
|
|
|
|
cmem->max_nr_ranges = nr_ranges;
|
|
cmem->nr_ranges = 0;
|
|
for_each_mem_range(i, &start, &end) {
|
|
cmem->ranges[cmem->nr_ranges].start = start;
|
|
cmem->ranges[cmem->nr_ranges].end = end - 1;
|
|
cmem->nr_ranges++;
|
|
}
|
|
|
|
/* Exclude crashkernel region */
|
|
ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
|
|
if (ret)
|
|
goto out;
|
|
|
|
if (crashk_low_res.end) {
|
|
ret = crash_exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end);
|
|
if (ret)
|
|
goto out;
|
|
}
|
|
|
|
ret = crash_prepare_elf64_headers(cmem, true, addr, sz);
|
|
|
|
out:
|
|
kfree(cmem);
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* Tries to add the initrd and DTB to the image. If it is not possible to find
|
|
* valid locations, this function will undo changes to the image and return non
|
|
* zero.
|
|
*/
|
|
int load_other_segments(struct kimage *image,
|
|
unsigned long kernel_load_addr,
|
|
unsigned long kernel_size,
|
|
char *initrd, unsigned long initrd_len,
|
|
char *cmdline)
|
|
{
|
|
struct kexec_buf kbuf;
|
|
void *headers, *dtb = NULL;
|
|
unsigned long headers_sz, initrd_load_addr = 0, dtb_len,
|
|
orig_segments = image->nr_segments;
|
|
int ret = 0;
|
|
|
|
kbuf.image = image;
|
|
/* not allocate anything below the kernel */
|
|
kbuf.buf_min = kernel_load_addr + kernel_size;
|
|
|
|
/* load elf core header */
|
|
if (image->type == KEXEC_TYPE_CRASH) {
|
|
ret = prepare_elf_headers(&headers, &headers_sz);
|
|
if (ret) {
|
|
pr_err("Preparing elf core header failed\n");
|
|
goto out_err;
|
|
}
|
|
|
|
kbuf.buffer = headers;
|
|
kbuf.bufsz = headers_sz;
|
|
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
|
|
kbuf.memsz = headers_sz;
|
|
kbuf.buf_align = SZ_64K; /* largest supported page size */
|
|
kbuf.buf_max = ULONG_MAX;
|
|
kbuf.top_down = true;
|
|
|
|
ret = kexec_add_buffer(&kbuf);
|
|
if (ret) {
|
|
vfree(headers);
|
|
goto out_err;
|
|
}
|
|
image->elf_headers = headers;
|
|
image->elf_load_addr = kbuf.mem;
|
|
image->elf_headers_sz = headers_sz;
|
|
|
|
pr_debug("Loaded elf core header at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
|
|
image->elf_load_addr, kbuf.bufsz, kbuf.memsz);
|
|
}
|
|
|
|
/* load initrd */
|
|
if (initrd) {
|
|
kbuf.buffer = initrd;
|
|
kbuf.bufsz = initrd_len;
|
|
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
|
|
kbuf.memsz = initrd_len;
|
|
kbuf.buf_align = 0;
|
|
/* within 1GB-aligned window of up to 32GB in size */
|
|
kbuf.buf_max = round_down(kernel_load_addr, SZ_1G)
|
|
+ (unsigned long)SZ_1G * 32;
|
|
kbuf.top_down = false;
|
|
|
|
ret = kexec_add_buffer(&kbuf);
|
|
if (ret)
|
|
goto out_err;
|
|
initrd_load_addr = kbuf.mem;
|
|
|
|
pr_debug("Loaded initrd at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
|
|
initrd_load_addr, kbuf.bufsz, kbuf.memsz);
|
|
}
|
|
|
|
/* load dtb */
|
|
dtb = of_kexec_alloc_and_setup_fdt(image, initrd_load_addr,
|
|
initrd_len, cmdline, 0);
|
|
if (!dtb) {
|
|
pr_err("Preparing for new dtb failed\n");
|
|
ret = -EINVAL;
|
|
goto out_err;
|
|
}
|
|
|
|
/* trim it */
|
|
fdt_pack(dtb);
|
|
dtb_len = fdt_totalsize(dtb);
|
|
kbuf.buffer = dtb;
|
|
kbuf.bufsz = dtb_len;
|
|
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
|
|
kbuf.memsz = dtb_len;
|
|
/* not across 2MB boundary */
|
|
kbuf.buf_align = SZ_2M;
|
|
kbuf.buf_max = ULONG_MAX;
|
|
kbuf.top_down = true;
|
|
|
|
ret = kexec_add_buffer(&kbuf);
|
|
if (ret)
|
|
goto out_err;
|
|
image->arch.dtb = dtb;
|
|
image->arch.dtb_mem = kbuf.mem;
|
|
|
|
pr_debug("Loaded dtb at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
|
|
kbuf.mem, kbuf.bufsz, kbuf.memsz);
|
|
|
|
return 0;
|
|
|
|
out_err:
|
|
image->nr_segments = orig_segments;
|
|
kvfree(dtb);
|
|
return ret;
|
|
}
|