mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-28 14:44:10 +08:00
Merge branch for-next/module-alloc into kvmarm/next
* for-next/module-alloc: : Drag in module VA rework to handle conflicts w/ sw feature refactor arm64: module: rework module VA range selection arm64: module: mandate MODULE_PLTS arm64: module: move module randomization to module.c arm64: kaslr: split kaslr/module initialization arm64: kasan: remove !KASAN_VMALLOC remnants arm64: module: remove old !KASAN_VMALLOC logic Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
This commit is contained in:
commit
acfdf34c7d
@ -33,8 +33,8 @@ AArch64 Linux memory layout with 4KB pages + 4 levels (48-bit)::
|
||||
0000000000000000 0000ffffffffffff 256TB user
|
||||
ffff000000000000 ffff7fffffffffff 128TB kernel logical memory map
|
||||
[ffff600000000000 ffff7fffffffffff] 32TB [kasan shadow region]
|
||||
ffff800000000000 ffff800007ffffff 128MB modules
|
||||
ffff800008000000 fffffbffefffffff 124TB vmalloc
|
||||
ffff800000000000 ffff80007fffffff 2GB modules
|
||||
ffff800080000000 fffffbffefffffff 124TB vmalloc
|
||||
fffffbfff0000000 fffffbfffdffffff 224MB fixed mappings (top down)
|
||||
fffffbfffe000000 fffffbfffe7fffff 8MB [guard region]
|
||||
fffffbfffe800000 fffffbffff7fffff 16MB PCI I/O space
|
||||
@ -50,8 +50,8 @@ AArch64 Linux memory layout with 64KB pages + 3 levels (52-bit with HW support):
|
||||
0000000000000000 000fffffffffffff 4PB user
|
||||
fff0000000000000 ffff7fffffffffff ~4PB kernel logical memory map
|
||||
[fffd800000000000 ffff7fffffffffff] 512TB [kasan shadow region]
|
||||
ffff800000000000 ffff800007ffffff 128MB modules
|
||||
ffff800008000000 fffffbffefffffff 124TB vmalloc
|
||||
ffff800000000000 ffff80007fffffff 2GB modules
|
||||
ffff800080000000 fffffbffefffffff 124TB vmalloc
|
||||
fffffbfff0000000 fffffbfffdffffff 224MB fixed mappings (top down)
|
||||
fffffbfffe000000 fffffbfffe7fffff 8MB [guard region]
|
||||
fffffbfffe800000 fffffbffff7fffff 16MB PCI I/O space
|
||||
|
@ -207,6 +207,7 @@ config ARM64
|
||||
select HAVE_IOREMAP_PROT
|
||||
select HAVE_IRQ_TIME_ACCOUNTING
|
||||
select HAVE_KVM
|
||||
select HAVE_MOD_ARCH_SPECIFIC
|
||||
select HAVE_NMI
|
||||
select HAVE_PERF_EVENTS
|
||||
select HAVE_PERF_REGS
|
||||
@ -577,7 +578,6 @@ config ARM64_ERRATUM_845719
|
||||
config ARM64_ERRATUM_843419
|
||||
bool "Cortex-A53: 843419: A load or store might access an incorrect address"
|
||||
default y
|
||||
select ARM64_MODULE_PLTS if MODULES
|
||||
help
|
||||
This option links the kernel with '--fix-cortex-a53-843419' and
|
||||
enables PLT support to replace certain ADRP instructions, which can
|
||||
@ -2107,26 +2107,6 @@ config ARM64_SME
|
||||
register state capable of holding two dimensional matrix tiles to
|
||||
enable various matrix operations.
|
||||
|
||||
config ARM64_MODULE_PLTS
|
||||
bool "Use PLTs to allow module memory to spill over into vmalloc area"
|
||||
depends on MODULES
|
||||
select HAVE_MOD_ARCH_SPECIFIC
|
||||
help
|
||||
Allocate PLTs when loading modules so that jumps and calls whose
|
||||
targets are too far away for their relative offsets to be encoded
|
||||
in the instructions themselves can be bounced via veneers in the
|
||||
module's PLT. This allows modules to be allocated in the generic
|
||||
vmalloc area after the dedicated module memory area has been
|
||||
exhausted.
|
||||
|
||||
When running with address space randomization (KASLR), the module
|
||||
region itself may be too far away for ordinary relative jumps and
|
||||
calls, and so in that case, module PLTs are required and cannot be
|
||||
disabled.
|
||||
|
||||
Specific errata workaround(s) might also force module PLTs to be
|
||||
enabled (ARM64_ERRATUM_843419).
|
||||
|
||||
config ARM64_PSEUDO_NMI
|
||||
bool "Support for NMI-like interrupts"
|
||||
select ARM_GIC_V3
|
||||
@ -2167,7 +2147,6 @@ config RELOCATABLE
|
||||
|
||||
config RANDOMIZE_BASE
|
||||
bool "Randomize the address of the kernel image"
|
||||
select ARM64_MODULE_PLTS if MODULES
|
||||
select RELOCATABLE
|
||||
help
|
||||
Randomizes the virtual address at which the kernel image is
|
||||
@ -2198,9 +2177,8 @@ config RANDOMIZE_MODULE_REGION_FULL
|
||||
When this option is not set, the module region will be randomized over
|
||||
a limited range that contains the [_stext, _etext] interval of the
|
||||
core kernel, so branch relocations are almost always in range unless
|
||||
ARM64_MODULE_PLTS is enabled and the region is exhausted. In this
|
||||
particular case of region exhaustion, modules might be able to fall
|
||||
back to a larger 2GB area.
|
||||
the region is exhausted. In this particular case of region
|
||||
exhaustion, modules might be able to fall back to a larger 2GB area.
|
||||
|
||||
config CC_HAVE_STACKPROTECTOR_SYSREG
|
||||
def_bool $(cc-option,-mstack-protector-guard=sysreg -mstack-protector-guard-reg=sp_el0 -mstack-protector-guard-offset=0)
|
||||
|
@ -46,7 +46,7 @@
|
||||
#define KIMAGE_VADDR (MODULES_END)
|
||||
#define MODULES_END (MODULES_VADDR + MODULES_VSIZE)
|
||||
#define MODULES_VADDR (_PAGE_END(VA_BITS_MIN))
|
||||
#define MODULES_VSIZE (SZ_128M)
|
||||
#define MODULES_VSIZE (SZ_2G)
|
||||
#define VMEMMAP_START (-(UL(1) << (VA_BITS - VMEMMAP_SHIFT)))
|
||||
#define VMEMMAP_END (VMEMMAP_START + VMEMMAP_SIZE)
|
||||
#define PCI_IO_END (VMEMMAP_START - SZ_8M)
|
||||
@ -204,15 +204,17 @@ static inline unsigned long kaslr_offset(void)
|
||||
return kimage_vaddr - KIMAGE_VADDR;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_RANDOMIZE_BASE
|
||||
void kaslr_init(void);
|
||||
static inline bool kaslr_enabled(void)
|
||||
{
|
||||
/*
|
||||
* The KASLR offset modulo MIN_KIMG_ALIGN is taken from the physical
|
||||
* placement of the image rather than from the seed, so a displacement
|
||||
* of less than MIN_KIMG_ALIGN means that no seed was provided.
|
||||
*/
|
||||
return kaslr_offset() >= MIN_KIMG_ALIGN;
|
||||
extern bool __kaslr_is_enabled;
|
||||
return __kaslr_is_enabled;
|
||||
}
|
||||
#else
|
||||
static inline void kaslr_init(void) { }
|
||||
static inline bool kaslr_enabled(void) { return false; }
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Allow all memory at the discovery stage. We will clip it later.
|
||||
|
@ -7,7 +7,6 @@
|
||||
|
||||
#include <asm-generic/module.h>
|
||||
|
||||
#ifdef CONFIG_ARM64_MODULE_PLTS
|
||||
struct mod_plt_sec {
|
||||
int plt_shndx;
|
||||
int plt_num_entries;
|
||||
@ -21,7 +20,6 @@ struct mod_arch_specific {
|
||||
/* for CONFIG_DYNAMIC_FTRACE */
|
||||
struct plt_entry *ftrace_trampolines;
|
||||
};
|
||||
#endif
|
||||
|
||||
u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs,
|
||||
void *loc, const Elf64_Rela *rela,
|
||||
@ -30,12 +28,6 @@ u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs,
|
||||
u64 module_emit_veneer_for_adrp(struct module *mod, Elf64_Shdr *sechdrs,
|
||||
void *loc, u64 val);
|
||||
|
||||
#ifdef CONFIG_RANDOMIZE_BASE
|
||||
extern u64 module_alloc_base;
|
||||
#else
|
||||
#define module_alloc_base ((u64)_etext - MODULES_VSIZE)
|
||||
#endif
|
||||
|
||||
struct plt_entry {
|
||||
/*
|
||||
* A program that conforms to the AArch64 Procedure Call Standard
|
||||
|
@ -1,9 +1,7 @@
|
||||
SECTIONS {
|
||||
#ifdef CONFIG_ARM64_MODULE_PLTS
|
||||
.plt 0 : { BYTE(0) }
|
||||
.init.plt 0 : { BYTE(0) }
|
||||
.text.ftrace_trampoline 0 : { BYTE(0) }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KASAN_SW_TAGS
|
||||
/*
|
||||
|
@ -42,8 +42,7 @@ obj-$(CONFIG_COMPAT) += sigreturn32.o
|
||||
obj-$(CONFIG_COMPAT_ALIGNMENT_FIXUPS) += compat_alignment.o
|
||||
obj-$(CONFIG_KUSER_HELPERS) += kuser32.o
|
||||
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
|
||||
obj-$(CONFIG_MODULES) += module.o
|
||||
obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o
|
||||
obj-$(CONFIG_MODULES) += module.o module-plts.o
|
||||
obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
|
||||
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
|
||||
obj-$(CONFIG_CPU_PM) += sleep.o suspend.o
|
||||
|
@ -197,7 +197,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
|
||||
|
||||
static struct plt_entry *get_ftrace_plt(struct module *mod)
|
||||
{
|
||||
#ifdef CONFIG_ARM64_MODULE_PLTS
|
||||
#ifdef CONFIG_MODULES
|
||||
struct plt_entry *plt = mod->arch.ftrace_trampolines;
|
||||
|
||||
return &plt[FTRACE_PLT_IDX];
|
||||
@ -249,7 +249,7 @@ static bool ftrace_find_callable_addr(struct dyn_ftrace *rec,
|
||||
* must use a PLT to reach it. We can only place PLTs for modules, and
|
||||
* only when module PLT support is built-in.
|
||||
*/
|
||||
if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
|
||||
if (!IS_ENABLED(CONFIG_MODULES))
|
||||
return false;
|
||||
|
||||
/*
|
||||
@ -431,10 +431,8 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
|
||||
*
|
||||
* Note: 'mod' is only set at module load time.
|
||||
*/
|
||||
if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS) &&
|
||||
IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && mod) {
|
||||
if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS) && mod)
|
||||
return aarch64_insn_patch_text_nosync((void *)pc, new);
|
||||
}
|
||||
|
||||
if (!ftrace_find_callable_addr(rec, mod, &addr))
|
||||
return -EINVAL;
|
||||
|
@ -4,90 +4,35 @@
|
||||
*/
|
||||
|
||||
#include <linux/cache.h>
|
||||
#include <linux/crc32.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/libfdt.h>
|
||||
#include <linux/mm_types.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/pgtable.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/printk.h>
|
||||
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/kernel-pgtable.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/mmu.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/setup.h>
|
||||
|
||||
u64 __ro_after_init module_alloc_base;
|
||||
u16 __initdata memstart_offset_seed;
|
||||
|
||||
static int __init kaslr_init(void)
|
||||
bool __ro_after_init __kaslr_is_enabled = false;
|
||||
|
||||
void __init kaslr_init(void)
|
||||
{
|
||||
u64 module_range;
|
||||
u32 seed;
|
||||
|
||||
/*
|
||||
* Set a reasonable default for module_alloc_base in case
|
||||
* we end up running with module randomization disabled.
|
||||
*/
|
||||
module_alloc_base = (u64)_etext - MODULES_VSIZE;
|
||||
|
||||
if (cpuid_feature_extract_unsigned_field(arm64_sw_feature_override.val &
|
||||
arm64_sw_feature_override.mask,
|
||||
ARM64_SW_FEATURE_OVERRIDE_NOKASLR)) {
|
||||
pr_info("KASLR disabled on command line\n");
|
||||
return 0;
|
||||
return;
|
||||
}
|
||||
|
||||
if (!kaslr_enabled()) {
|
||||
/*
|
||||
* The KASLR offset modulo MIN_KIMG_ALIGN is taken from the physical
|
||||
* placement of the image rather than from the seed, so a displacement
|
||||
* of less than MIN_KIMG_ALIGN means that no seed was provided.
|
||||
*/
|
||||
if (kaslr_offset() < MIN_KIMG_ALIGN) {
|
||||
pr_warn("KASLR disabled due to lack of seed\n");
|
||||
return 0;
|
||||
return;
|
||||
}
|
||||
|
||||
pr_info("KASLR enabled\n");
|
||||
|
||||
/*
|
||||
* KASAN without KASAN_VMALLOC does not expect the module region to
|
||||
* intersect the vmalloc region, since shadow memory is allocated for
|
||||
* each module at load time, whereas the vmalloc region will already be
|
||||
* shadowed by KASAN zero pages.
|
||||
*/
|
||||
BUILD_BUG_ON((IS_ENABLED(CONFIG_KASAN_GENERIC) ||
|
||||
IS_ENABLED(CONFIG_KASAN_SW_TAGS)) &&
|
||||
!IS_ENABLED(CONFIG_KASAN_VMALLOC));
|
||||
|
||||
seed = get_random_u32();
|
||||
|
||||
if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
|
||||
/*
|
||||
* Randomize the module region over a 2 GB window covering the
|
||||
* kernel. This reduces the risk of modules leaking information
|
||||
* about the address of the kernel itself, but results in
|
||||
* branches between modules and the core kernel that are
|
||||
* resolved via PLTs. (Branches between modules will be
|
||||
* resolved normally.)
|
||||
*/
|
||||
module_range = SZ_2G - (u64)(_end - _stext);
|
||||
module_alloc_base = max((u64)_end - SZ_2G, (u64)MODULES_VADDR);
|
||||
} else {
|
||||
/*
|
||||
* Randomize the module region by setting module_alloc_base to
|
||||
* a PAGE_SIZE multiple in the range [_etext - MODULES_VSIZE,
|
||||
* _stext) . This guarantees that the resulting region still
|
||||
* covers [_stext, _etext], and that all relative branches can
|
||||
* be resolved without veneers unless this region is exhausted
|
||||
* and we fall back to a larger 2GB window in module_alloc()
|
||||
* when ARM64_MODULE_PLTS is enabled.
|
||||
*/
|
||||
module_range = MODULES_VSIZE - (u64)(_etext - _stext);
|
||||
}
|
||||
|
||||
/* use the lower 21 bits to randomize the base of the module region */
|
||||
module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
|
||||
module_alloc_base &= PAGE_MASK;
|
||||
|
||||
return 0;
|
||||
__kaslr_is_enabled = true;
|
||||
}
|
||||
subsys_initcall(kaslr_init)
|
||||
|
@ -7,6 +7,8 @@
|
||||
* Author: Will Deacon <will.deacon@arm.com>
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "Modules: " fmt
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/elf.h>
|
||||
#include <linux/ftrace.h>
|
||||
@ -15,52 +17,131 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/moduleloader.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/scs.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/insn.h>
|
||||
#include <asm/scs.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
static u64 module_direct_base __ro_after_init = 0;
|
||||
static u64 module_plt_base __ro_after_init = 0;
|
||||
|
||||
/*
|
||||
* Choose a random page-aligned base address for a window of 'size' bytes which
|
||||
* entirely contains the interval [start, end - 1].
|
||||
*/
|
||||
static u64 __init random_bounding_box(u64 size, u64 start, u64 end)
|
||||
{
|
||||
u64 max_pgoff, pgoff;
|
||||
|
||||
if ((end - start) >= size)
|
||||
return 0;
|
||||
|
||||
max_pgoff = (size - (end - start)) / PAGE_SIZE;
|
||||
pgoff = get_random_u32_inclusive(0, max_pgoff);
|
||||
|
||||
return start - pgoff * PAGE_SIZE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Modules may directly reference data and text anywhere within the kernel
|
||||
* image and other modules. References using PREL32 relocations have a +/-2G
|
||||
* range, and so we need to ensure that the entire kernel image and all modules
|
||||
* fall within a 2G window such that these are always within range.
|
||||
*
|
||||
* Modules may directly branch to functions and code within the kernel text,
|
||||
* and to functions and code within other modules. These branches will use
|
||||
* CALL26/JUMP26 relocations with a +/-128M range. Without PLTs, we must ensure
|
||||
* that the entire kernel text and all module text falls within a 128M window
|
||||
* such that these are always within range. With PLTs, we can expand this to a
|
||||
* 2G window.
|
||||
*
|
||||
* We chose the 128M region to surround the entire kernel image (rather than
|
||||
* just the text) as using the same bounds for the 128M and 2G regions ensures
|
||||
* by construction that we never select a 128M region that is not a subset of
|
||||
* the 2G region. For very large and unusual kernel configurations this means
|
||||
* we may fall back to PLTs where they could have been avoided, but this keeps
|
||||
* the logic significantly simpler.
|
||||
*/
|
||||
static int __init module_init_limits(void)
|
||||
{
|
||||
u64 kernel_end = (u64)_end;
|
||||
u64 kernel_start = (u64)_text;
|
||||
u64 kernel_size = kernel_end - kernel_start;
|
||||
|
||||
/*
|
||||
* The default modules region is placed immediately below the kernel
|
||||
* image, and is large enough to use the full 2G relocation range.
|
||||
*/
|
||||
BUILD_BUG_ON(KIMAGE_VADDR != MODULES_END);
|
||||
BUILD_BUG_ON(MODULES_VSIZE < SZ_2G);
|
||||
|
||||
if (!kaslr_enabled()) {
|
||||
if (kernel_size < SZ_128M)
|
||||
module_direct_base = kernel_end - SZ_128M;
|
||||
if (kernel_size < SZ_2G)
|
||||
module_plt_base = kernel_end - SZ_2G;
|
||||
} else {
|
||||
u64 min = kernel_start;
|
||||
u64 max = kernel_end;
|
||||
|
||||
if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
|
||||
pr_info("2G module region forced by RANDOMIZE_MODULE_REGION_FULL\n");
|
||||
} else {
|
||||
module_direct_base = random_bounding_box(SZ_128M, min, max);
|
||||
if (module_direct_base) {
|
||||
min = module_direct_base;
|
||||
max = module_direct_base + SZ_128M;
|
||||
}
|
||||
}
|
||||
|
||||
module_plt_base = random_bounding_box(SZ_2G, min, max);
|
||||
}
|
||||
|
||||
pr_info("%llu pages in range for non-PLT usage",
|
||||
module_direct_base ? (SZ_128M - kernel_size) / PAGE_SIZE : 0);
|
||||
pr_info("%llu pages in range for PLT usage",
|
||||
module_plt_base ? (SZ_2G - kernel_size) / PAGE_SIZE : 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
subsys_initcall(module_init_limits);
|
||||
|
||||
void *module_alloc(unsigned long size)
|
||||
{
|
||||
u64 module_alloc_end = module_alloc_base + MODULES_VSIZE;
|
||||
gfp_t gfp_mask = GFP_KERNEL;
|
||||
void *p;
|
||||
void *p = NULL;
|
||||
|
||||
/* Silence the initial allocation */
|
||||
if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
|
||||
gfp_mask |= __GFP_NOWARN;
|
||||
/*
|
||||
* Where possible, prefer to allocate within direct branch range of the
|
||||
* kernel such that no PLTs are necessary.
|
||||
*/
|
||||
if (module_direct_base) {
|
||||
p = __vmalloc_node_range(size, MODULE_ALIGN,
|
||||
module_direct_base,
|
||||
module_direct_base + SZ_128M,
|
||||
GFP_KERNEL | __GFP_NOWARN,
|
||||
PAGE_KERNEL, 0, NUMA_NO_NODE,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
|
||||
IS_ENABLED(CONFIG_KASAN_SW_TAGS))
|
||||
/* don't exceed the static module region - see below */
|
||||
module_alloc_end = MODULES_END;
|
||||
if (!p && module_plt_base) {
|
||||
p = __vmalloc_node_range(size, MODULE_ALIGN,
|
||||
module_plt_base,
|
||||
module_plt_base + SZ_2G,
|
||||
GFP_KERNEL | __GFP_NOWARN,
|
||||
PAGE_KERNEL, 0, NUMA_NO_NODE,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
|
||||
p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
|
||||
module_alloc_end, gfp_mask, PAGE_KERNEL, VM_DEFER_KMEMLEAK,
|
||||
NUMA_NO_NODE, __builtin_return_address(0));
|
||||
if (!p) {
|
||||
pr_warn_ratelimited("%s: unable to allocate memory\n",
|
||||
__func__);
|
||||
}
|
||||
|
||||
if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
|
||||
(IS_ENABLED(CONFIG_KASAN_VMALLOC) ||
|
||||
(!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
|
||||
!IS_ENABLED(CONFIG_KASAN_SW_TAGS))))
|
||||
/*
|
||||
* KASAN without KASAN_VMALLOC can only deal with module
|
||||
* allocations being served from the reserved module region,
|
||||
* since the remainder of the vmalloc region is already
|
||||
* backed by zero shadow pages, and punching holes into it
|
||||
* is non-trivial. Since the module region is not randomized
|
||||
* when KASAN is enabled without KASAN_VMALLOC, it is even
|
||||
* less likely that the module region gets exhausted, so we
|
||||
* can simply omit this fallback in that case.
|
||||
*/
|
||||
p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
|
||||
module_alloc_base + SZ_2G, GFP_KERNEL,
|
||||
PAGE_KERNEL, 0, NUMA_NO_NODE,
|
||||
__builtin_return_address(0));
|
||||
|
||||
if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) {
|
||||
if (p && (kasan_alloc_module_shadow(p, size, GFP_KERNEL) < 0)) {
|
||||
vfree(p);
|
||||
return NULL;
|
||||
}
|
||||
@ -448,9 +529,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
|
||||
case R_AARCH64_CALL26:
|
||||
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
|
||||
AARCH64_INSN_IMM_26);
|
||||
|
||||
if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
|
||||
ovf == -ERANGE) {
|
||||
if (ovf == -ERANGE) {
|
||||
val = module_emit_plt_entry(me, sechdrs, loc, &rel[i], sym);
|
||||
if (!val)
|
||||
return -ENOEXEC;
|
||||
@ -487,7 +566,7 @@ static int module_init_ftrace_plt(const Elf_Ehdr *hdr,
|
||||
const Elf_Shdr *sechdrs,
|
||||
struct module *mod)
|
||||
{
|
||||
#if defined(CONFIG_ARM64_MODULE_PLTS) && defined(CONFIG_DYNAMIC_FTRACE)
|
||||
#if defined(CONFIG_DYNAMIC_FTRACE)
|
||||
const Elf_Shdr *s;
|
||||
struct plt_entry *plts;
|
||||
|
||||
|
@ -296,6 +296,8 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
|
||||
|
||||
*cmdline_p = boot_command_line;
|
||||
|
||||
kaslr_init();
|
||||
|
||||
/*
|
||||
* If know now we are going to need KPTI then use non-global
|
||||
* mappings from the start, avoiding the cost of rewriting
|
||||
|
@ -214,7 +214,7 @@ static void __init clear_pgds(unsigned long start,
|
||||
static void __init kasan_init_shadow(void)
|
||||
{
|
||||
u64 kimg_shadow_start, kimg_shadow_end;
|
||||
u64 mod_shadow_start, mod_shadow_end;
|
||||
u64 mod_shadow_start;
|
||||
u64 vmalloc_shadow_end;
|
||||
phys_addr_t pa_start, pa_end;
|
||||
u64 i;
|
||||
@ -223,7 +223,6 @@ static void __init kasan_init_shadow(void)
|
||||
kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(KERNEL_END));
|
||||
|
||||
mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
|
||||
mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
|
||||
|
||||
vmalloc_shadow_end = (u64)kasan_mem_to_shadow((void *)VMALLOC_END);
|
||||
|
||||
@ -246,17 +245,9 @@ static void __init kasan_init_shadow(void)
|
||||
kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END),
|
||||
(void *)mod_shadow_start);
|
||||
|
||||
if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
|
||||
BUILD_BUG_ON(VMALLOC_START != MODULES_END);
|
||||
kasan_populate_early_shadow((void *)vmalloc_shadow_end,
|
||||
(void *)KASAN_SHADOW_END);
|
||||
} else {
|
||||
kasan_populate_early_shadow((void *)kimg_shadow_end,
|
||||
(void *)KASAN_SHADOW_END);
|
||||
if (kimg_shadow_start > mod_shadow_end)
|
||||
kasan_populate_early_shadow((void *)mod_shadow_end,
|
||||
(void *)kimg_shadow_start);
|
||||
}
|
||||
BUILD_BUG_ON(VMALLOC_START != MODULES_END);
|
||||
kasan_populate_early_shadow((void *)vmalloc_shadow_end,
|
||||
(void *)KASAN_SHADOW_END);
|
||||
|
||||
for_each_mem_range(i, &pa_start, &pa_end) {
|
||||
void *start = (void *)__phys_to_virt(pa_start);
|
||||
|
Loading…
Reference in New Issue
Block a user