ARM development updates for 5.14-rc1:

- Make it clear __swp_entry_to_pte() uses PTE_TYPE_FAULT
 - Updates for setting vmalloc size via command line to resolve an issue
   with the 8MiB hole not properly being accounted for, and clean up the
   code.
 - ftrace support for module PLTs
 - Spelling fixes
 - kbuild updates for removing generated files and pattern rules for
   generating files
 - Clang/llvm updates
 - Change the way the kernel is mapped, placing it in vmalloc space
   instead.
 - Remove arm_pm_restart from arm and aarch64.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEuNNh8scc2k/wOAE+9OeQG+StrGQFAmDi5ewACgkQ9OeQG+St
 rGRfLg//cWUq/FBgRWggSgvLGBnbqwJABKFnynVy7c+g+kPxNudDHjL9a2A8c6aR
 oTBMzaQvfRCQA2drgGK2fZ02sCHJxStX8d6Y6WyVaVEIBZPH6y09gZy1wW0/fIZS
 S8qk82WaASddk/kvNeFrWD/5qNT4tz8COndZeYbBpEsXw/5RjIqSQqyn0k5CZqUj
 0lL95y1AW9vD9AH7OYyYMB6pLwDMt0LCTSynx/o6ZmaysX56KdM8c3ziiUllWwJB
 TIR03DeSpCZMiJMjwZUiWVl2BLjTES9WE2klZYulhgfh+ljlhkHvO+i8B+qy8kDS
 JHIXHnuMi3GjSFg6MlP/s21pLHT6yuCZ8dSGaACa+HEf1s0nRnE9wl2kzUFcJtLY
 jHAE5YyvO0BLJHCMuRGiB77rKwI92ij4yxKHvchU0BRlpgaVYcBmhZfqdVGnB4VO
 Mu2pMaHLzEdrkfLteYJ7bvKn0o5cD/G3wj/9UDAzJ6ME91LINiNqzgub68pf1KTe
 /YipxKipqcpbSBeysZAkfqTbMNB5WuxNnfmgwU15ZyfZsalcXSYEDkYex5+GGgOc
 w36VddVtQXNKd0LuCfoquda3hIjLvgCNf62ZDFNDXgOHcVu8okYXwZi9vyYg6xIn
 0gfh/T/lK0DoLWul0/CuLpSnsjw+1T7WTgKlvgLYGusWIQ2mC7w=
 =dq60
 -----END PGP SIGNATURE-----

Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm

Pull ARM development updates from Russell King:

 - Make it clear __swp_entry_to_pte() uses PTE_TYPE_FAULT

 - Updates for setting vmalloc size via command line to resolve an issue
   with the 8MiB hole not properly being accounted for, and clean up the
   code.

 - ftrace support for module PLTs

 - Spelling fixes

 - kbuild updates for removing generated files and pattern rules for
   generating files

 - Clang/llvm updates

 - Change the way the kernel is mapped, placing it in vmalloc space
   instead.

 - Remove arm_pm_restart from arm and aarch64.

* tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm: (29 commits)
  ARM: 9098/1: ftrace: MODULE_PLT: Fix build problem without DYNAMIC_FTRACE
  ARM: 9097/1: mmu: Declare section start/end correctly
  ARM: 9096/1: Remove arm_pm_restart()
  ARM: 9095/1: ARM64: Remove arm_pm_restart()
  ARM: 9094/1: Register with kernel restart handler
  ARM: 9093/1: drivers: firmwapsci: Register with kernel restart handler
  ARM: 9092/1: xen: Register with kernel restart handler
  ARM: 9091/1: Revert "mm: qsd8x50: Fix incorrect permission faults"
  ARM: 9090/1: Map the lowmem and kernel separately
  ARM: 9089/1: Define kernel physical section start and end
  ARM: 9088/1: Split KERNEL_OFFSET from PAGE_OFFSET
  ARM: 9087/1: kprobes: test-thumb: fix for LLVM_IAS=1
  ARM: 9086/1: syscalls: use pattern rules to generate syscall headers
  ARM: 9085/1: remove unneeded abi parameter to syscallnr.sh
  ARM: 9084/1: simplify the build rule of mach-types.h
  ARM: 9083/1: uncompress: atags_to_fdt: Spelling s/REturn/Return/
  ARM: 9082/1: [v2] mark prepare_page_table as __init
  ARM: 9079/1: ftrace: Add MODULE_PLTS support
  ARM: 9078/1: Add warn suppress parameter to arm_gen_branch_link()
  ARM: 9077/1: PLT: Move struct plt_entries definition to header
  ...
This commit is contained in:
Linus Torvalds 2021-07-06 11:52:58 -07:00
commit 77d34a4683
26 changed files with 323 additions and 159 deletions

View File

@ -66,6 +66,8 @@ config UNWINDER_FRAME_POINTER
config UNWINDER_ARM
bool "ARM EABI stack unwinder"
depends on AEABI && !FUNCTION_GRAPH_TRACER
# https://github.com/ClangBuiltLinux/linux/issues/732
depends on !LD_IS_LLD || LLD_VERSION >= 110000
select ARM_UNWIND
help
This option enables stack unwinding support in the kernel

View File

@ -100,7 +100,7 @@ targets := vmlinux vmlinux.lds piggy_data piggy.o \
lib1funcs.o ashldi3.o bswapsdi2.o \
head.o $(OBJS)
clean-files += piggy_data lib1funcs.S ashldi3.S bswapsdi2.S hyp-stub.S
clean-files += lib1funcs.S ashldi3.S bswapsdi2.S hyp-stub.S
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING

View File

@ -121,7 +121,7 @@ static void hex_str(char *out, uint32_t value)
/*
* Convert and fold provided ATAGs into the provided FDT.
*
* REturn values:
* Return values:
* = 0 -> pretend success
* = 1 -> bad ATAG (may retry with another possible ATAG pointer)
* < 0 -> error from libfdt

View File

@ -15,6 +15,9 @@ extern void __gnu_mcount_nc(void);
#ifdef CONFIG_DYNAMIC_FTRACE
struct dyn_arch_ftrace {
#ifdef CONFIG_ARM_MODULE_PLTS
struct module *mod;
#endif
};
static inline unsigned long ftrace_call_adjust(unsigned long addr)

View File

@ -13,18 +13,18 @@ arm_gen_nop(void)
}
unsigned long
__arm_gen_branch(unsigned long pc, unsigned long addr, bool link);
__arm_gen_branch(unsigned long pc, unsigned long addr, bool link, bool warn);
static inline unsigned long
arm_gen_branch(unsigned long pc, unsigned long addr)
{
return __arm_gen_branch(pc, addr, false);
return __arm_gen_branch(pc, addr, false, true);
}
static inline unsigned long
arm_gen_branch_link(unsigned long pc, unsigned long addr)
arm_gen_branch_link(unsigned long pc, unsigned long addr, bool warn)
{
return __arm_gen_branch(pc, addr, true);
return __arm_gen_branch(pc, addr, true, warn);
}
#endif

View File

@ -20,8 +20,14 @@
#endif
#include <asm/kasan_def.h>
/* PAGE_OFFSET - the virtual address of the start of the kernel image */
/*
* PAGE_OFFSET: the virtual address of the start of lowmem, memory above
* the virtual address range for userspace.
* KERNEL_OFFSET: the virtual address of the start of the kernel image.
* we may further offset this with TEXT_OFFSET in practice.
*/
#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
#define KERNEL_OFFSET (PAGE_OFFSET)
#ifdef CONFIG_MMU
@ -152,6 +158,13 @@ extern unsigned long vectors_base;
#ifndef __ASSEMBLY__
/*
* Physical start and end address of the kernel sections. These addresses are
* 2MB-aligned to match the section mappings placed over the kernel.
*/
extern u32 kernel_sec_start;
extern u32 kernel_sec_end;
/*
* Physical vs virtual RAM address space conversion. These are
* private definitions which should NOT be used outside memory.h

View File

@ -19,8 +19,18 @@ enum {
};
#endif
#define PLT_ENT_STRIDE L1_CACHE_BYTES
#define PLT_ENT_COUNT (PLT_ENT_STRIDE / sizeof(u32))
#define PLT_ENT_SIZE (sizeof(struct plt_entries) / PLT_ENT_COUNT)
struct plt_entries {
u32 ldr[PLT_ENT_COUNT];
u32 lit[PLT_ENT_COUNT];
};
struct mod_plt_sec {
struct elf32_shdr *plt;
struct plt_entries *plt_ent;
int plt_count;
};

View File

@ -306,7 +306,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
#define __swp_entry_to_pte(swp) __pte((swp).val | PTE_TYPE_FAULT)
/*
* It is an error for the kernel to have more swap files than we can

View File

@ -13,7 +13,6 @@
extern void cpu_init(void);
void soft_restart(unsigned long);
extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
extern void (*arm_pm_idle)(void);
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR

View File

@ -68,9 +68,10 @@ int ftrace_arch_code_modify_post_process(void)
return 0;
}
static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr,
bool warn)
{
return arm_gen_branch_link(pc, addr);
return arm_gen_branch_link(pc, addr, warn);
}
static int ftrace_modify_code(unsigned long pc, unsigned long old,
@ -104,14 +105,14 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
int ret;
pc = (unsigned long)&ftrace_call;
new = ftrace_call_replace(pc, (unsigned long)func);
new = ftrace_call_replace(pc, (unsigned long)func, true);
ret = ftrace_modify_code(pc, 0, new, false);
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
if (!ret) {
pc = (unsigned long)&ftrace_regs_call;
new = ftrace_call_replace(pc, (unsigned long)func);
new = ftrace_call_replace(pc, (unsigned long)func, true);
ret = ftrace_modify_code(pc, 0, new, false);
}
@ -124,10 +125,22 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned long new, old;
unsigned long ip = rec->ip;
unsigned long aaddr = adjust_address(rec, addr);
struct module *mod = NULL;
#ifdef CONFIG_ARM_MODULE_PLTS
mod = rec->arch.mod;
#endif
old = ftrace_nop_replace(rec);
new = ftrace_call_replace(ip, adjust_address(rec, addr));
new = ftrace_call_replace(ip, aaddr, !mod);
#ifdef CONFIG_ARM_MODULE_PLTS
if (!new && mod) {
aaddr = get_module_plt(mod, ip, aaddr);
new = ftrace_call_replace(ip, aaddr, true);
}
#endif
return ftrace_modify_code(rec->ip, old, new, true);
}
@ -140,9 +153,9 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long new, old;
unsigned long ip = rec->ip;
old = ftrace_call_replace(ip, adjust_address(rec, old_addr));
old = ftrace_call_replace(ip, adjust_address(rec, old_addr), true);
new = ftrace_call_replace(ip, adjust_address(rec, addr));
new = ftrace_call_replace(ip, adjust_address(rec, addr), true);
return ftrace_modify_code(rec->ip, old, new, true);
}
@ -152,12 +165,29 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
int ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr)
{
unsigned long aaddr = adjust_address(rec, addr);
unsigned long ip = rec->ip;
unsigned long old;
unsigned long new;
int ret;
old = ftrace_call_replace(ip, adjust_address(rec, addr));
#ifdef CONFIG_ARM_MODULE_PLTS
/* mod is only supplied during module loading */
if (!mod)
mod = rec->arch.mod;
else
rec->arch.mod = mod;
#endif
old = ftrace_call_replace(ip, aaddr,
!IS_ENABLED(CONFIG_ARM_MODULE_PLTS) || !mod);
#ifdef CONFIG_ARM_MODULE_PLTS
if (!old && mod) {
aaddr = get_module_plt(mod, ip, aaddr);
old = ftrace_call_replace(ip, aaddr, true);
}
#endif
new = ftrace_nop_replace(rec);
ret = ftrace_modify_code(ip, old, new, true);

View File

@ -23,7 +23,6 @@
#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING)
#include CONFIG_DEBUG_LL_INCLUDE
#endif
/*
* swapper_pg_dir is the virtual address of the initial page table.
* We place the page tables 16K below KERNEL_RAM_VADDR. Therefore, we must
@ -31,7 +30,7 @@
* the least significant 16 bits to be 0x8000, but we could probably
* relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x4000.
*/
#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET)
#define KERNEL_RAM_VADDR (KERNEL_OFFSET + TEXT_OFFSET)
#if (KERNEL_RAM_VADDR & 0xffff) != 0x8000
#error KERNEL_RAM_VADDR must start at 0xXXXX8000
#endif
@ -48,6 +47,20 @@
.globl swapper_pg_dir
.equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
/*
* This needs to be assigned at runtime when the linker symbols are
* resolved.
*/
.pushsection .data
.align 2
.globl kernel_sec_start
.globl kernel_sec_end
kernel_sec_start:
.long 0
kernel_sec_end:
.long 0
.popsection
.macro pgtbl, rd, phys
add \rd, \phys, #TEXT_OFFSET
sub \rd, \rd, #PG_DIR_SIZE
@ -230,16 +243,23 @@ __create_page_tables:
blo 1b
/*
* Map our RAM from the start to the end of the kernel .bss section.
* The main matter: map in the kernel using section mappings, and
* set two variables to indicate the physical start and end of the
* kernel.
*/
add r0, r4, #PAGE_OFFSET >> (SECTION_SHIFT - PMD_ORDER)
add r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ORDER)
ldr r6, =(_end - 1)
orr r3, r8, r7
adr_l r5, kernel_sec_start @ _pa(kernel_sec_start)
str r8, [r5] @ Save physical start of kernel
orr r3, r8, r7 @ Add the MMU flags
add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
1: str r3, [r0], #1 << PMD_ORDER
add r3, r3, #1 << SECTION_SHIFT
cmp r0, r6
bls 1b
eor r3, r3, r7 @ Remove the MMU flags
adr_l r5, kernel_sec_end @ _pa(kernel_sec_end)
str r3, [r5] @ Save physical end of kernel
#ifdef CONFIG_XIP_KERNEL
/*

View File

@ -3,8 +3,9 @@
#include <linux/kernel.h>
#include <asm/opcodes.h>
static unsigned long
__arm_gen_branch_thumb2(unsigned long pc, unsigned long addr, bool link)
static unsigned long __arm_gen_branch_thumb2(unsigned long pc,
unsigned long addr, bool link,
bool warn)
{
unsigned long s, j1, j2, i1, i2, imm10, imm11;
unsigned long first, second;
@ -12,7 +13,7 @@ __arm_gen_branch_thumb2(unsigned long pc, unsigned long addr, bool link)
offset = (long)addr - (long)(pc + 4);
if (offset < -16777216 || offset > 16777214) {
WARN_ON_ONCE(1);
WARN_ON_ONCE(warn);
return 0;
}
@ -33,8 +34,8 @@ __arm_gen_branch_thumb2(unsigned long pc, unsigned long addr, bool link)
return __opcode_thumb32_compose(first, second);
}
static unsigned long
__arm_gen_branch_arm(unsigned long pc, unsigned long addr, bool link)
static unsigned long __arm_gen_branch_arm(unsigned long pc, unsigned long addr,
bool link, bool warn)
{
unsigned long opcode = 0xea000000;
long offset;
@ -44,7 +45,7 @@ __arm_gen_branch_arm(unsigned long pc, unsigned long addr, bool link)
offset = (long)addr - (long)(pc + 8);
if (unlikely(offset < -33554432 || offset > 33554428)) {
WARN_ON_ONCE(1);
WARN_ON_ONCE(warn);
return 0;
}
@ -54,10 +55,10 @@ __arm_gen_branch_arm(unsigned long pc, unsigned long addr, bool link)
}
unsigned long
__arm_gen_branch(unsigned long pc, unsigned long addr, bool link)
__arm_gen_branch(unsigned long pc, unsigned long addr, bool link, bool warn)
{
if (IS_ENABLED(CONFIG_THUMB2_KERNEL))
return __arm_gen_branch_thumb2(pc, addr, link);
return __arm_gen_branch_thumb2(pc, addr, link, warn);
else
return __arm_gen_branch_arm(pc, addr, link);
return __arm_gen_branch_arm(pc, addr, link, warn);
}

View File

@ -4,6 +4,7 @@
*/
#include <linux/elf.h>
#include <linux/ftrace.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sort.h>
@ -12,10 +13,6 @@
#include <asm/cache.h>
#include <asm/opcodes.h>
#define PLT_ENT_STRIDE L1_CACHE_BYTES
#define PLT_ENT_COUNT (PLT_ENT_STRIDE / sizeof(u32))
#define PLT_ENT_SIZE (sizeof(struct plt_entries) / PLT_ENT_COUNT)
#ifdef CONFIG_THUMB2_KERNEL
#define PLT_ENT_LDR __opcode_to_mem_thumb32(0xf8dff000 | \
(PLT_ENT_STRIDE - 4))
@ -24,9 +21,11 @@
(PLT_ENT_STRIDE - 8))
#endif
struct plt_entries {
u32 ldr[PLT_ENT_COUNT];
u32 lit[PLT_ENT_COUNT];
static const u32 fixed_plts[] = {
#ifdef CONFIG_DYNAMIC_FTRACE
FTRACE_ADDR,
MCOUNT_ADDR,
#endif
};
static bool in_init(const struct module *mod, unsigned long loc)
@ -34,14 +33,40 @@ static bool in_init(const struct module *mod, unsigned long loc)
return loc - (u32)mod->init_layout.base < mod->init_layout.size;
}
static void prealloc_fixed(struct mod_plt_sec *pltsec, struct plt_entries *plt)
{
int i;
if (!ARRAY_SIZE(fixed_plts) || pltsec->plt_count)
return;
pltsec->plt_count = ARRAY_SIZE(fixed_plts);
for (i = 0; i < ARRAY_SIZE(plt->ldr); ++i)
plt->ldr[i] = PLT_ENT_LDR;
BUILD_BUG_ON(sizeof(fixed_plts) > sizeof(plt->lit));
memcpy(plt->lit, fixed_plts, sizeof(fixed_plts));
}
u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
{
struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
&mod->arch.init;
struct plt_entries *plt;
int idx;
struct plt_entries *plt = (struct plt_entries *)pltsec->plt->sh_addr;
int idx = 0;
/* cache the address, ELF header is available only during module load */
if (!pltsec->plt_ent)
pltsec->plt_ent = (struct plt_entries *)pltsec->plt->sh_addr;
plt = pltsec->plt_ent;
prealloc_fixed(pltsec, plt);
for (idx = 0; idx < ARRAY_SIZE(fixed_plts); ++idx)
if (plt->lit[idx] == val)
return (u32)&plt->ldr[idx];
idx = 0;
/*
* Look for an existing entry pointing to 'val'. Given that the
* relocations are sorted, this will be the last entry we allocated.
@ -189,8 +214,8 @@ static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base,
int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
char *secstrings, struct module *mod)
{
unsigned long core_plts = 0;
unsigned long init_plts = 0;
unsigned long core_plts = ARRAY_SIZE(fixed_plts);
unsigned long init_plts = ARRAY_SIZE(fixed_plts);
Elf32_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum;
Elf32_Sym *syms = NULL;
@ -245,6 +270,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
mod->arch.core.plt->sh_size = round_up(core_plts * PLT_ENT_SIZE,
sizeof(struct plt_entries));
mod->arch.core.plt_count = 0;
mod->arch.core.plt_ent = NULL;
mod->arch.init.plt->sh_type = SHT_NOBITS;
mod->arch.init.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
@ -252,6 +278,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
mod->arch.init.plt->sh_size = round_up(init_plts * PLT_ENT_SIZE,
sizeof(struct plt_entries));
mod->arch.init.plt_count = 0;
mod->arch.init.plt_ent = NULL;
pr_debug("%s: plt=%x, init.plt=%x\n", __func__,
mod->arch.core.plt->sh_size, mod->arch.init.plt->sh_size);

View File

@ -18,7 +18,6 @@ typedef void (*phys_reset_t)(unsigned long, bool);
/*
* Function pointers to optional machine specific functions
*/
void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
@ -138,10 +137,7 @@ void machine_restart(char *cmd)
local_irq_disable();
smp_send_stop();
if (arm_pm_restart)
arm_pm_restart(reboot_mode, cmd);
else
do_kernel_restart(cmd);
do_kernel_restart(cmd);
/* Give a grace period for failure to restart of 1s */
mdelay(1000);

View File

@ -1083,6 +1083,20 @@ void __init hyp_mode_check(void)
#endif
}
static void (*__arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
static int arm_restart(struct notifier_block *nb, unsigned long action,
void *data)
{
__arm_pm_restart(action, data);
return NOTIFY_DONE;
}
static struct notifier_block arm_restart_nb = {
.notifier_call = arm_restart,
.priority = 128,
};
void __init setup_arch(char **cmdline_p)
{
const struct machine_desc *mdesc = NULL;
@ -1151,8 +1165,10 @@ void __init setup_arch(char **cmdline_p)
kasan_init();
request_standard_resources(mdesc);
if (mdesc->restart)
arm_pm_restart = mdesc->restart;
if (mdesc->restart) {
__arm_pm_restart = mdesc->restart;
register_restart_handler(&arm_restart_nb);
}
unflatten_device_tree();

View File

@ -47,7 +47,7 @@ SECTIONS
#endif
}
. = PAGE_OFFSET + TEXT_OFFSET;
. = KERNEL_OFFSET + TEXT_OFFSET;
.head.text : {
_text = .;
HEAD_TEXT

View File

@ -601,8 +601,6 @@ config CPU_TLB_V6
config CPU_TLB_V7
bool
config VERIFY_PERMISSION_FAULT
bool
endif
config CPU_HAS_ASID

View File

@ -17,31 +17,5 @@ ENTRY(v7_early_abort)
mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR
uaccess_disable ip @ disable userspace access
/*
* V6 code adjusts the returned DFSR.
* New designs should not need to patch up faults.
*/
#if defined(CONFIG_VERIFY_PERMISSION_FAULT)
/*
* Detect erroneous permission failures and fix
*/
ldr r3, =0x40d @ On permission fault
and r3, r1, r3
cmp r3, #0x0d
bne do_DataAbort
mcr p15, 0, r0, c7, c8, 0 @ Retranslate FAR
isb
mrc p15, 0, ip, c7, c4, 0 @ Read the PAR
and r3, ip, #0x7b @ On translation fault
cmp r3, #0x0b
bne do_DataAbort
bic r1, r1, #0xf @ Fix up FSR FS[5:0]
and ip, ip, #0x7e
orr r1, r1, ip, LSR #1
#endif
b do_DataAbort
ENDPROC(v7_early_abort)

View File

@ -1121,31 +1121,32 @@ void __init debug_ll_io_init(void)
}
#endif
static void * __initdata vmalloc_min =
(void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
static unsigned long __initdata vmalloc_size = 240 * SZ_1M;
/*
* vmalloc=size forces the vmalloc area to be exactly 'size'
* bytes. This can be used to increase (or decrease) the vmalloc
* area - the default is 240m.
* area - the default is 240MiB.
*/
static int __init early_vmalloc(char *arg)
{
unsigned long vmalloc_reserve = memparse(arg, NULL);
unsigned long vmalloc_max;
if (vmalloc_reserve < SZ_16M) {
vmalloc_reserve = SZ_16M;
pr_warn("vmalloc area too small, limiting to %luMB\n",
pr_warn("vmalloc area is too small, limiting to %luMiB\n",
vmalloc_reserve >> 20);
}
if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
pr_warn("vmalloc area is too big, limiting to %luMB\n",
vmalloc_max = VMALLOC_END - (PAGE_OFFSET + SZ_32M + VMALLOC_OFFSET);
if (vmalloc_reserve > vmalloc_max) {
vmalloc_reserve = vmalloc_max;
pr_warn("vmalloc area is too big, limiting to %luMiB\n",
vmalloc_reserve >> 20);
}
vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve);
vmalloc_size = vmalloc_reserve;
return 0;
}
early_param("vmalloc", early_vmalloc);
@ -1165,7 +1166,8 @@ void __init adjust_lowmem_bounds(void)
* and may itself be outside the valid range for which phys_addr_t
* and therefore __pa() is defined.
*/
vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET;
vmalloc_limit = (u64)VMALLOC_END - vmalloc_size - VMALLOC_OFFSET -
PAGE_OFFSET + PHYS_OFFSET;
/*
* The first usable region must be PMD aligned. Mark its start
@ -1246,7 +1248,7 @@ void __init adjust_lowmem_bounds(void)
memblock_set_current_limit(memblock_limit);
}
static inline void prepare_page_table(void)
static __init void prepare_page_table(void)
{
unsigned long addr;
phys_addr_t end;
@ -1457,8 +1459,6 @@ static void __init kmap_init(void)
static void __init map_lowmem(void)
{
phys_addr_t kernel_x_start = round_down(__pa(KERNEL_START), SECTION_SIZE);
phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
phys_addr_t start, end;
u64 i;
@ -1466,55 +1466,126 @@ static void __init map_lowmem(void)
for_each_mem_range(i, &start, &end) {
struct map_desc map;
pr_debug("map lowmem start: 0x%08llx, end: 0x%08llx\n",
(long long)start, (long long)end);
if (end > arm_lowmem_limit)
end = arm_lowmem_limit;
if (start >= end)
break;
if (end < kernel_x_start) {
map.pfn = __phys_to_pfn(start);
map.virtual = __phys_to_virt(start);
map.length = end - start;
map.type = MT_MEMORY_RWX;
/*
* If our kernel image is in the VMALLOC area we need to remove
* the kernel physical memory from lowmem since the kernel will
* be mapped separately.
*
* The kernel will typically be at the very start of lowmem,
* but any placement relative to memory ranges is possible.
*
* If the memblock contains the kernel, we have to chisel out
* the kernel memory from it and map each part separately. We
* get 6 different theoretical cases:
*
* +--------+ +--------+
* +-- start --+ +--------+ | Kernel | | Kernel |
* | | | Kernel | | case 2 | | case 5 |
* | | | case 1 | +--------+ | | +--------+
* | Memory | +--------+ | | | Kernel |
* | range | +--------+ | | | case 6 |
* | | | Kernel | +--------+ | | +--------+
* | | | case 3 | | Kernel | | |
* +-- end ----+ +--------+ | case 4 | | |
* +--------+ +--------+
*/
create_mapping(&map);
} else if (start >= kernel_x_end) {
map.pfn = __phys_to_pfn(start);
map.virtual = __phys_to_virt(start);
map.length = end - start;
map.type = MT_MEMORY_RW;
/* Case 5: kernel covers range, don't map anything, should be rare */
if ((start > kernel_sec_start) && (end < kernel_sec_end))
break;
create_mapping(&map);
} else {
/* This better cover the entire kernel */
if (start < kernel_x_start) {
/* Cases where the kernel is starting inside the range */
if ((kernel_sec_start >= start) && (kernel_sec_start <= end)) {
/* Case 6: kernel is embedded in the range, we need two mappings */
if ((start < kernel_sec_start) && (end > kernel_sec_end)) {
/* Map memory below the kernel */
map.pfn = __phys_to_pfn(start);
map.virtual = __phys_to_virt(start);
map.length = kernel_x_start - start;
map.length = kernel_sec_start - start;
map.type = MT_MEMORY_RW;
create_mapping(&map);
}
map.pfn = __phys_to_pfn(kernel_x_start);
map.virtual = __phys_to_virt(kernel_x_start);
map.length = kernel_x_end - kernel_x_start;
map.type = MT_MEMORY_RWX;
create_mapping(&map);
if (kernel_x_end < end) {
map.pfn = __phys_to_pfn(kernel_x_end);
map.virtual = __phys_to_virt(kernel_x_end);
map.length = end - kernel_x_end;
/* Map memory above the kernel */
map.pfn = __phys_to_pfn(kernel_sec_end);
map.virtual = __phys_to_virt(kernel_sec_end);
map.length = end - kernel_sec_end;
map.type = MT_MEMORY_RW;
create_mapping(&map);
break;
}
/* Case 1: kernel and range start at the same address, should be common */
if (kernel_sec_start == start)
start = kernel_sec_end;
/* Case 3: kernel and range end at the same address, should be rare */
if (kernel_sec_end == end)
end = kernel_sec_start;
} else if ((kernel_sec_start < start) && (kernel_sec_end > start) && (kernel_sec_end < end)) {
/* Case 2: kernel ends inside range, starts below it */
start = kernel_sec_end;
} else if ((kernel_sec_start > start) && (kernel_sec_start < end) && (kernel_sec_end > end)) {
/* Case 4: kernel starts inside range, ends above it */
end = kernel_sec_start;
}
map.pfn = __phys_to_pfn(start);
map.virtual = __phys_to_virt(start);
map.length = end - start;
map.type = MT_MEMORY_RW;
create_mapping(&map);
}
}
static void __init map_kernel(void)
{
/*
* We use the well known kernel section start and end and split the area in the
* middle like this:
* . .
* | RW memory |
* +----------------+ kernel_x_start
* | Executable |
* | kernel memory |
* +----------------+ kernel_x_end / kernel_nx_start
* | Non-executable |
* | kernel memory |
* +----------------+ kernel_nx_end
* | RW memory |
* . .
*
* Notice that we are dealing with section sized mappings here so all of this
* will be bumped to the closest section boundary. This means that some of the
* non-executable part of the kernel memory is actually mapped as executable.
* This will only persist until we turn on proper memory management later on
* and we remap the whole kernel with page granularity.
*/
phys_addr_t kernel_x_start = kernel_sec_start;
phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
phys_addr_t kernel_nx_start = kernel_x_end;
phys_addr_t kernel_nx_end = kernel_sec_end;
struct map_desc map;
map.pfn = __phys_to_pfn(kernel_x_start);
map.virtual = __phys_to_virt(kernel_x_start);
map.length = kernel_x_end - kernel_x_start;
map.type = MT_MEMORY_RWX;
create_mapping(&map);
/* If the nx part is small it may end up covered by the tail of the RWX section */
if (kernel_x_end == kernel_nx_end)
return;
map.pfn = __phys_to_pfn(kernel_nx_start);
map.virtual = __phys_to_virt(kernel_nx_start);
map.length = kernel_nx_end - kernel_nx_start;
map.type = MT_MEMORY_RW;
create_mapping(&map);
}
#ifdef CONFIG_ARM_PV_FIXUP
typedef void pgtables_remap(long long offset, unsigned long pgd);
pgtables_remap lpae_pgtables_remap_asm;
@ -1645,9 +1716,18 @@ void __init paging_init(const struct machine_desc *mdesc)
{
void *zero_page;
pr_debug("physical kernel sections: 0x%08x-0x%08x\n",
kernel_sec_start, kernel_sec_end);
prepare_page_table();
map_lowmem();
memblock_set_current_limit(arm_lowmem_limit);
pr_debug("lowmem limit is %08llx\n", (long long)arm_lowmem_limit);
/*
* After this point early_alloc(), i.e. the memblock allocator, can
* be used
*/
map_kernel();
dma_contiguous_remap();
early_fixmap_shutdown();
devicemaps_init(mdesc);

View File

@ -441,21 +441,21 @@ void kprobe_thumb32_test_cases(void)
"3: mvn r0, r0 \n\t"
"2: nop \n\t")
TEST_RX("tbh [pc, r",7, (9f-(1f+4))>>1,"]",
TEST_RX("tbh [pc, r",7, (9f-(1f+4))>>1,", lsl #1]",
"9: \n\t"
".short (2f-1b-4)>>1 \n\t"
".short (3f-1b-4)>>1 \n\t"
"3: mvn r0, r0 \n\t"
"2: nop \n\t")
TEST_RX("tbh [pc, r",12, ((9f-(1f+4))>>1)+1,"]",
TEST_RX("tbh [pc, r",12, ((9f-(1f+4))>>1)+1,", lsl #1]",
"9: \n\t"
".short (2f-1b-4)>>1 \n\t"
".short (3f-1b-4)>>1 \n\t"
"3: mvn r0, r0 \n\t"
"2: nop \n\t")
TEST_RRX("tbh [r",1,9f, ", r",14,1,"]",
TEST_RRX("tbh [r",1,9f, ", r",14,1,", lsl #1]",
"9: \n\t"
".short (2f-1b-4)>>1 \n\t"
".short (3f-1b-4)>>1 \n\t"
@ -468,10 +468,10 @@ void kprobe_thumb32_test_cases(void)
TEST_UNSUPPORTED("strexb r0, r1, [r2]")
TEST_UNSUPPORTED("strexh r0, r1, [r2]")
TEST_UNSUPPORTED("strexd r0, r1, [r2]")
TEST_UNSUPPORTED("strexd r0, r1, r2, [r2]")
TEST_UNSUPPORTED("ldrexb r0, [r1]")
TEST_UNSUPPORTED("ldrexh r0, [r1]")
TEST_UNSUPPORTED("ldrexd r0, [r1]")
TEST_UNSUPPORTED("ldrexd r0, r1, [r1]")
TEST_GROUP("Data-processing (shifted register) and (modified immediate)")

View File

@ -33,39 +33,26 @@ _dummy := $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)') \
$(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')
quiet_cmd_gen_mach = GEN $@
cmd_gen_mach = mkdir -p $(dir $@) && \
$(AWK) -f $(filter-out $(PHONY),$^) > $@
cmd_gen_mach = $(AWK) -f $(real-prereqs) > $@
$(kapi)/mach-types.h: $(src)/gen-mach-types $(src)/mach-types FORCE
$(call if_changed,gen_mach)
quiet_cmd_syshdr = SYSHDR $@
cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --abis $(abis) \
cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --abis common,$* \
--offset __NR_SYSCALL_BASE $< $@
quiet_cmd_systbl = SYSTBL $@
cmd_systbl = $(CONFIG_SHELL) $(systbl) --abis $(abis) $< $@
cmd_systbl = $(CONFIG_SHELL) $(systbl) --abis common,$* $< $@
quiet_cmd_sysnr = SYSNR $@
cmd_sysnr = $(CONFIG_SHELL) '$(sysnr)' '$<' '$@' \
'$(syshdr_abi_$(basetarget))'
cmd_sysnr = $(CONFIG_SHELL) $(sysnr) $< $@
$(uapi)/unistd-oabi.h: abis := common,oabi
$(uapi)/unistd-oabi.h: $(syscall) $(syshdr) FORCE
$(uapi)/unistd-%.h: $(syscall) $(syshdr) FORCE
$(call if_changed,syshdr)
$(uapi)/unistd-eabi.h: abis := common,eabi
$(uapi)/unistd-eabi.h: $(syscall) $(syshdr) FORCE
$(call if_changed,syshdr)
sysnr_abi_unistd-nr := common,oabi,eabi,compat
$(kapi)/unistd-nr.h: $(syscall) $(sysnr) FORCE
$(call if_changed,sysnr)
$(gen)/calls-oabi.S: abis := common,oabi
$(gen)/calls-oabi.S: $(syscall) $(systbl) FORCE
$(call if_changed,systbl)
$(gen)/calls-eabi.S: abis := common,eabi
$(gen)/calls-eabi.S: $(syscall) $(systbl) FORCE
$(gen)/calls-%.S: $(syscall) $(systbl) FORCE
$(call if_changed,systbl)

View File

@ -2,14 +2,13 @@
# SPDX-License-Identifier: GPL-2.0
in="$1"
out="$2"
my_abis=`echo "($3)" | tr ',' '|'`
align=1
fileguard=_ASM_ARM_`basename "$out" | sed \
-e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
-e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | tail -n1 | (
grep -E "^[0-9A-Fa-fXx]+[[:space:]]+" "$in" | sort -n | tail -n1 | (
echo "#ifndef ${fileguard}
#define ${fileguard} 1

View File

@ -29,6 +29,7 @@
#include <linux/cpu.h>
#include <linux/console.h>
#include <linux/pvclock_gtod.h>
#include <linux/reboot.h>
#include <linux/time64.h>
#include <linux/timekeeping.h>
#include <linux/timekeeper_internal.h>
@ -181,11 +182,18 @@ void xen_reboot(int reason)
BUG_ON(rc);
}
static void xen_restart(enum reboot_mode reboot_mode, const char *cmd)
static int xen_restart(struct notifier_block *nb, unsigned long action,
void *data)
{
xen_reboot(SHUTDOWN_reboot);
return NOTIFY_DONE;
}
static struct notifier_block xen_restart_nb = {
.notifier_call = xen_restart,
.priority = 192,
};
static void xen_power_off(void)
{
@ -404,7 +412,7 @@ static int __init xen_pm_init(void)
return -ENODEV;
pm_power_off = xen_power_off;
arm_pm_restart = xen_restart;
register_restart_handler(&xen_restart_nb);
if (!xen_initial_domain()) {
struct timespec64 ts;
xen_read_wallclock(&ts);

View File

@ -32,8 +32,6 @@ void hook_debug_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
struct mm_struct;
extern void __show_regs(struct pt_regs *);
extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
#endif /* __ASSEMBLY__ */
#endif /* __ASM_SYSTEM_MISC_H */

View File

@ -70,8 +70,6 @@ EXPORT_SYMBOL(__stack_chk_guard);
void (*pm_power_off)(void);
EXPORT_SYMBOL_GPL(pm_power_off);
void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
#ifdef CONFIG_HOTPLUG_CPU
void arch_cpu_idle_dead(void)
{
@ -142,10 +140,7 @@ void machine_restart(char *cmd)
efi_reboot(reboot_mode, NULL);
/* Now call the architecture specific reboot code. */
if (arm_pm_restart)
arm_pm_restart(reboot_mode, cmd);
else
do_kernel_restart(cmd);
do_kernel_restart(cmd);
/*
* Whoops - the architecture was unable to reboot.

View File

@ -296,7 +296,8 @@ static int get_set_conduit_method(struct device_node *np)
return 0;
}
static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd)
static int psci_sys_reset(struct notifier_block *nb, unsigned long action,
void *data)
{
if ((reboot_mode == REBOOT_WARM || reboot_mode == REBOOT_SOFT) &&
psci_system_reset2_supported) {
@ -309,8 +310,15 @@ static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd)
} else {
invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
}
return NOTIFY_DONE;
}
static struct notifier_block psci_sys_reset_nb = {
.notifier_call = psci_sys_reset,
.priority = 129,
};
static void psci_sys_poweroff(void)
{
invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
@ -477,7 +485,7 @@ static void __init psci_0_2_set_functions(void)
.migrate_info_type = psci_migrate_info_type,
};
arm_pm_restart = psci_sys_reset;
register_restart_handler(&psci_sys_reset_nb);
pm_power_off = psci_sys_poweroff;
}