mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-18 10:13:57 +08:00
arm64 fixes:
- Fix ACPI boot due to recent broken NUMA changes - Fix remote enabling of CPU features requiring PSTATE bit manipulation - Add address range check when emulating user cache maintenance - Fix LL/SC loops that allow compiler to introduce memory accesses - Fix recently added write_sysreg_s macro - Ensure MDCR_EL2 is initialised on qemu targets without a PMU - Avoid kaslr breakage due to MODVERSIONs and DYNAMIC_FTRACE - Correctly drive recent ld when building relocatable Image - Remove junk IS_ERR check from xgene PMU driver added during merge window - pr_cont fixes after core changes in the merge window -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABCgAGBQJYCNgDAAoJELescNyEwWM0BV8IAKZLVlfKk2YTo3T/tx/2FGIW 5VKjSY13VLLC5cKQLB7Yvm7G1kzvLiN4Zb5fqvL0CK1ut8scPVbR1AAhSDngB4vU UNzUqwp1R0Tl+GhLT+IfOElWjEcB9kwic3CZV5v4FxvZg4HvwstL3zLvMkjTaDYK GjaS9iQ2zQsgsYHtluzia7q1k2fXfqdLOd5V0XF05CykJKO3j7zpqTv8PKF7PUFU utsjRdyyGmBYaamG/cO5phDbAD5VMvdWcfDeJ25JdSwHaoxjZ8tpM721R4b5GRN7 5rPn52v5Hycp++FmhuO45laVQc60LYMz17mQwSTnIX2pGuFRqjRWJztJpyQqzWo= =MXN1 -----END PGP SIGNATURE----- Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux Pull arm64 fixes from Will Deacon: "Most of these are CC'd for stable, but there are a few fixing issues introduced during the recent merge window too. There's also a fix for the xgene PMU driver, but it seemed daft to send as a separate pull request, so I've included it here with the rest of the fixes. - Fix ACPI boot due to recent broken NUMA changes - Fix remote enabling of CPU features requiring PSTATE bit manipulation - Add address range check when emulating user cache maintenance - Fix LL/SC loops that allow compiler to introduce memory accesses - Fix recently added write_sysreg_s macro - Ensure MDCR_EL2 is initialised on qemu targets without a PMU - Avoid kaslr breakage due to MODVERSIONs and DYNAMIC_FTRACE - Correctly drive recent ld when building relocatable Image - Remove junk IS_ERR check from xgene PMU driver added during merge window - pr_cont fixes after core changes in the merge window" * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: remove pr_cont abuse from mem_init arm64: fix show_regs fallout from KERN_CONT changes arm64: kernel: force ET_DYN ELF type for CONFIG_RELOCATABLE=y arm64: suspend: Reconfigure PSTATE after resume from idle arm64: mm: Set PSTATE.PAN from the cpu_enable_pan() call arm64: cpufeature: Schedule enable() calls instead of calling them via IPI arm64: Cortex-A53 errata workaround: check for kernel addresses arm64: percpu: rewrite ll/sc loops in assembly arm64: swp emulation: bound LL/SC retries before rescheduling arm64: sysreg: Fix use of XZR in write_sysreg_s arm64: kaslr: keep modules close to the kernel when DYNAMIC_FTRACE=y arm64: kernel: Init MDCR_EL2 even in the absence of a PMU perf: xgene: Remove bogus IS_ERR() check arm64: kernel: numa: fix ACPI boot cpu numa node mapping arm64: kaslr: fix breakage with CONFIG_MODVERSIONS=y
This commit is contained in:
commit
f4814e6183
@ -915,7 +915,7 @@ config RANDOMIZE_BASE
|
||||
|
||||
config RANDOMIZE_MODULE_REGION_FULL
|
||||
bool "Randomize the module region independently from the core kernel"
|
||||
depends on RANDOMIZE_BASE
|
||||
depends on RANDOMIZE_BASE && !DYNAMIC_FTRACE
|
||||
default y
|
||||
help
|
||||
Randomizes the location of the module region without considering the
|
||||
|
@ -15,7 +15,7 @@ CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
|
||||
GZFLAGS :=-9
|
||||
|
||||
ifneq ($(CONFIG_RELOCATABLE),)
|
||||
LDFLAGS_vmlinux += -pie -Bsymbolic
|
||||
LDFLAGS_vmlinux += -pie -shared -Bsymbolic
|
||||
endif
|
||||
|
||||
ifeq ($(CONFIG_ARM64_ERRATUM_843419),y)
|
||||
|
@ -94,7 +94,7 @@ struct arm64_cpu_capabilities {
|
||||
u16 capability;
|
||||
int def_scope; /* default scope */
|
||||
bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope);
|
||||
void (*enable)(void *); /* Called on all active CPUs */
|
||||
int (*enable)(void *); /* Called on all active CPUs */
|
||||
union {
|
||||
struct { /* To be used for erratum handling only */
|
||||
u32 midr_model;
|
||||
|
@ -18,6 +18,9 @@
|
||||
#ifndef __ASM_EXEC_H
|
||||
#define __ASM_EXEC_H
|
||||
|
||||
#include <linux/sched.h>
|
||||
|
||||
extern unsigned long arch_align_stack(unsigned long sp);
|
||||
void uao_thread_switch(struct task_struct *next);
|
||||
|
||||
#endif /* __ASM_EXEC_H */
|
||||
|
@ -17,6 +17,7 @@
|
||||
#define __ASM_MODULE_H
|
||||
|
||||
#include <asm-generic/module.h>
|
||||
#include <asm/memory.h>
|
||||
|
||||
#define MODULE_ARCH_VERMAGIC "aarch64"
|
||||
|
||||
@ -32,6 +33,10 @@ u64 module_emit_plt_entry(struct module *mod, const Elf64_Rela *rela,
|
||||
Elf64_Sym *sym);
|
||||
|
||||
#ifdef CONFIG_RANDOMIZE_BASE
|
||||
#ifdef CONFIG_MODVERSIONS
|
||||
#define ARCH_RELOCATES_KCRCTAB
|
||||
#define reloc_start (kimage_vaddr - KIMAGE_VADDR)
|
||||
#endif
|
||||
extern u64 module_alloc_base;
|
||||
#else
|
||||
#define module_alloc_base ((u64)_etext - MODULES_VSIZE)
|
||||
|
@ -44,48 +44,44 @@ static inline unsigned long __percpu_##op(void *ptr, \
|
||||
\
|
||||
switch (size) { \
|
||||
case 1: \
|
||||
do { \
|
||||
asm ("//__per_cpu_" #op "_1\n" \
|
||||
"ldxrb %w[ret], %[ptr]\n" \
|
||||
asm ("//__per_cpu_" #op "_1\n" \
|
||||
"1: ldxrb %w[ret], %[ptr]\n" \
|
||||
#asm_op " %w[ret], %w[ret], %w[val]\n" \
|
||||
"stxrb %w[loop], %w[ret], %[ptr]\n" \
|
||||
: [loop] "=&r" (loop), [ret] "=&r" (ret), \
|
||||
[ptr] "+Q"(*(u8 *)ptr) \
|
||||
: [val] "Ir" (val)); \
|
||||
} while (loop); \
|
||||
" stxrb %w[loop], %w[ret], %[ptr]\n" \
|
||||
" cbnz %w[loop], 1b" \
|
||||
: [loop] "=&r" (loop), [ret] "=&r" (ret), \
|
||||
[ptr] "+Q"(*(u8 *)ptr) \
|
||||
: [val] "Ir" (val)); \
|
||||
break; \
|
||||
case 2: \
|
||||
do { \
|
||||
asm ("//__per_cpu_" #op "_2\n" \
|
||||
"ldxrh %w[ret], %[ptr]\n" \
|
||||
asm ("//__per_cpu_" #op "_2\n" \
|
||||
"1: ldxrh %w[ret], %[ptr]\n" \
|
||||
#asm_op " %w[ret], %w[ret], %w[val]\n" \
|
||||
"stxrh %w[loop], %w[ret], %[ptr]\n" \
|
||||
: [loop] "=&r" (loop), [ret] "=&r" (ret), \
|
||||
[ptr] "+Q"(*(u16 *)ptr) \
|
||||
: [val] "Ir" (val)); \
|
||||
} while (loop); \
|
||||
" stxrh %w[loop], %w[ret], %[ptr]\n" \
|
||||
" cbnz %w[loop], 1b" \
|
||||
: [loop] "=&r" (loop), [ret] "=&r" (ret), \
|
||||
[ptr] "+Q"(*(u16 *)ptr) \
|
||||
: [val] "Ir" (val)); \
|
||||
break; \
|
||||
case 4: \
|
||||
do { \
|
||||
asm ("//__per_cpu_" #op "_4\n" \
|
||||
"ldxr %w[ret], %[ptr]\n" \
|
||||
asm ("//__per_cpu_" #op "_4\n" \
|
||||
"1: ldxr %w[ret], %[ptr]\n" \
|
||||
#asm_op " %w[ret], %w[ret], %w[val]\n" \
|
||||
"stxr %w[loop], %w[ret], %[ptr]\n" \
|
||||
: [loop] "=&r" (loop), [ret] "=&r" (ret), \
|
||||
[ptr] "+Q"(*(u32 *)ptr) \
|
||||
: [val] "Ir" (val)); \
|
||||
} while (loop); \
|
||||
" stxr %w[loop], %w[ret], %[ptr]\n" \
|
||||
" cbnz %w[loop], 1b" \
|
||||
: [loop] "=&r" (loop), [ret] "=&r" (ret), \
|
||||
[ptr] "+Q"(*(u32 *)ptr) \
|
||||
: [val] "Ir" (val)); \
|
||||
break; \
|
||||
case 8: \
|
||||
do { \
|
||||
asm ("//__per_cpu_" #op "_8\n" \
|
||||
"ldxr %[ret], %[ptr]\n" \
|
||||
asm ("//__per_cpu_" #op "_8\n" \
|
||||
"1: ldxr %[ret], %[ptr]\n" \
|
||||
#asm_op " %[ret], %[ret], %[val]\n" \
|
||||
"stxr %w[loop], %[ret], %[ptr]\n" \
|
||||
: [loop] "=&r" (loop), [ret] "=&r" (ret), \
|
||||
[ptr] "+Q"(*(u64 *)ptr) \
|
||||
: [val] "Ir" (val)); \
|
||||
} while (loop); \
|
||||
" stxr %w[loop], %[ret], %[ptr]\n" \
|
||||
" cbnz %w[loop], 1b" \
|
||||
: [loop] "=&r" (loop), [ret] "=&r" (ret), \
|
||||
[ptr] "+Q"(*(u64 *)ptr) \
|
||||
: [val] "Ir" (val)); \
|
||||
break; \
|
||||
default: \
|
||||
BUILD_BUG(); \
|
||||
@ -150,44 +146,40 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
|
||||
|
||||
switch (size) {
|
||||
case 1:
|
||||
do {
|
||||
asm ("//__percpu_xchg_1\n"
|
||||
"ldxrb %w[ret], %[ptr]\n"
|
||||
"stxrb %w[loop], %w[val], %[ptr]\n"
|
||||
: [loop] "=&r"(loop), [ret] "=&r"(ret),
|
||||
[ptr] "+Q"(*(u8 *)ptr)
|
||||
: [val] "r" (val));
|
||||
} while (loop);
|
||||
asm ("//__percpu_xchg_1\n"
|
||||
"1: ldxrb %w[ret], %[ptr]\n"
|
||||
" stxrb %w[loop], %w[val], %[ptr]\n"
|
||||
" cbnz %w[loop], 1b"
|
||||
: [loop] "=&r"(loop), [ret] "=&r"(ret),
|
||||
[ptr] "+Q"(*(u8 *)ptr)
|
||||
: [val] "r" (val));
|
||||
break;
|
||||
case 2:
|
||||
do {
|
||||
asm ("//__percpu_xchg_2\n"
|
||||
"ldxrh %w[ret], %[ptr]\n"
|
||||
"stxrh %w[loop], %w[val], %[ptr]\n"
|
||||
: [loop] "=&r"(loop), [ret] "=&r"(ret),
|
||||
[ptr] "+Q"(*(u16 *)ptr)
|
||||
: [val] "r" (val));
|
||||
} while (loop);
|
||||
asm ("//__percpu_xchg_2\n"
|
||||
"1: ldxrh %w[ret], %[ptr]\n"
|
||||
" stxrh %w[loop], %w[val], %[ptr]\n"
|
||||
" cbnz %w[loop], 1b"
|
||||
: [loop] "=&r"(loop), [ret] "=&r"(ret),
|
||||
[ptr] "+Q"(*(u16 *)ptr)
|
||||
: [val] "r" (val));
|
||||
break;
|
||||
case 4:
|
||||
do {
|
||||
asm ("//__percpu_xchg_4\n"
|
||||
"ldxr %w[ret], %[ptr]\n"
|
||||
"stxr %w[loop], %w[val], %[ptr]\n"
|
||||
: [loop] "=&r"(loop), [ret] "=&r"(ret),
|
||||
[ptr] "+Q"(*(u32 *)ptr)
|
||||
: [val] "r" (val));
|
||||
} while (loop);
|
||||
asm ("//__percpu_xchg_4\n"
|
||||
"1: ldxr %w[ret], %[ptr]\n"
|
||||
" stxr %w[loop], %w[val], %[ptr]\n"
|
||||
" cbnz %w[loop], 1b"
|
||||
: [loop] "=&r"(loop), [ret] "=&r"(ret),
|
||||
[ptr] "+Q"(*(u32 *)ptr)
|
||||
: [val] "r" (val));
|
||||
break;
|
||||
case 8:
|
||||
do {
|
||||
asm ("//__percpu_xchg_8\n"
|
||||
"ldxr %[ret], %[ptr]\n"
|
||||
"stxr %w[loop], %[val], %[ptr]\n"
|
||||
: [loop] "=&r"(loop), [ret] "=&r"(ret),
|
||||
[ptr] "+Q"(*(u64 *)ptr)
|
||||
: [val] "r" (val));
|
||||
} while (loop);
|
||||
asm ("//__percpu_xchg_8\n"
|
||||
"1: ldxr %[ret], %[ptr]\n"
|
||||
" stxr %w[loop], %[val], %[ptr]\n"
|
||||
" cbnz %w[loop], 1b"
|
||||
: [loop] "=&r"(loop), [ret] "=&r"(ret),
|
||||
[ptr] "+Q"(*(u64 *)ptr)
|
||||
: [val] "r" (val));
|
||||
break;
|
||||
default:
|
||||
BUILD_BUG();
|
||||
|
@ -188,8 +188,8 @@ static inline void spin_lock_prefetch(const void *ptr)
|
||||
|
||||
#endif
|
||||
|
||||
void cpu_enable_pan(void *__unused);
|
||||
void cpu_enable_uao(void *__unused);
|
||||
void cpu_enable_cache_maint_trap(void *__unused);
|
||||
int cpu_enable_pan(void *__unused);
|
||||
int cpu_enable_uao(void *__unused);
|
||||
int cpu_enable_cache_maint_trap(void *__unused);
|
||||
|
||||
#endif /* __ASM_PROCESSOR_H */
|
||||
|
@ -286,7 +286,7 @@ asm(
|
||||
|
||||
#define write_sysreg_s(v, r) do { \
|
||||
u64 __val = (u64)v; \
|
||||
asm volatile("msr_s " __stringify(r) ", %0" : : "rZ" (__val)); \
|
||||
asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \
|
||||
} while (0)
|
||||
|
||||
static inline void config_sctlr_el1(u32 clear, u32 set)
|
||||
|
@ -21,6 +21,7 @@
|
||||
/*
|
||||
* User space memory access functions
|
||||
*/
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/kasan-checks.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/thread_info.h>
|
||||
@ -102,6 +103,13 @@ static inline void set_fs(mm_segment_t fs)
|
||||
flag; \
|
||||
})
|
||||
|
||||
/*
|
||||
* When dealing with data aborts or instruction traps we may end up with
|
||||
* a tagged userland pointer. Clear the tag to get a sane pointer to pass
|
||||
* on to access_ok(), for instance.
|
||||
*/
|
||||
#define untagged_addr(addr) sign_extend64(addr, 55)
|
||||
|
||||
#define access_ok(type, addr, size) __range_ok(addr, size)
|
||||
#define user_addr_max get_fs
|
||||
|
||||
|
@ -280,35 +280,43 @@ static void __init register_insn_emulation_sysctl(struct ctl_table *table)
|
||||
/*
|
||||
* Error-checking SWP macros implemented using ldxr{b}/stxr{b}
|
||||
*/
|
||||
#define __user_swpX_asm(data, addr, res, temp, B) \
|
||||
|
||||
/* Arbitrary constant to ensure forward-progress of the LL/SC loop */
|
||||
#define __SWP_LL_SC_LOOPS 4
|
||||
|
||||
#define __user_swpX_asm(data, addr, res, temp, temp2, B) \
|
||||
__asm__ __volatile__( \
|
||||
" mov %w3, %w7\n" \
|
||||
ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \
|
||||
CONFIG_ARM64_PAN) \
|
||||
"0: ldxr"B" %w2, [%3]\n" \
|
||||
"1: stxr"B" %w0, %w1, [%3]\n" \
|
||||
"0: ldxr"B" %w2, [%4]\n" \
|
||||
"1: stxr"B" %w0, %w1, [%4]\n" \
|
||||
" cbz %w0, 2f\n" \
|
||||
" mov %w0, %w4\n" \
|
||||
" sub %w3, %w3, #1\n" \
|
||||
" cbnz %w3, 0b\n" \
|
||||
" mov %w0, %w5\n" \
|
||||
" b 3f\n" \
|
||||
"2:\n" \
|
||||
" mov %w1, %w2\n" \
|
||||
"3:\n" \
|
||||
" .pushsection .fixup,\"ax\"\n" \
|
||||
" .align 2\n" \
|
||||
"4: mov %w0, %w5\n" \
|
||||
"4: mov %w0, %w6\n" \
|
||||
" b 3b\n" \
|
||||
" .popsection" \
|
||||
_ASM_EXTABLE(0b, 4b) \
|
||||
_ASM_EXTABLE(1b, 4b) \
|
||||
ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
|
||||
CONFIG_ARM64_PAN) \
|
||||
: "=&r" (res), "+r" (data), "=&r" (temp) \
|
||||
: "r" (addr), "i" (-EAGAIN), "i" (-EFAULT) \
|
||||
: "=&r" (res), "+r" (data), "=&r" (temp), "=&r" (temp2) \
|
||||
: "r" (addr), "i" (-EAGAIN), "i" (-EFAULT), \
|
||||
"i" (__SWP_LL_SC_LOOPS) \
|
||||
: "memory")
|
||||
|
||||
#define __user_swp_asm(data, addr, res, temp) \
|
||||
__user_swpX_asm(data, addr, res, temp, "")
|
||||
#define __user_swpb_asm(data, addr, res, temp) \
|
||||
__user_swpX_asm(data, addr, res, temp, "b")
|
||||
#define __user_swp_asm(data, addr, res, temp, temp2) \
|
||||
__user_swpX_asm(data, addr, res, temp, temp2, "")
|
||||
#define __user_swpb_asm(data, addr, res, temp, temp2) \
|
||||
__user_swpX_asm(data, addr, res, temp, temp2, "b")
|
||||
|
||||
/*
|
||||
* Bit 22 of the instruction encoding distinguishes between
|
||||
@ -328,12 +336,12 @@ static int emulate_swpX(unsigned int address, unsigned int *data,
|
||||
}
|
||||
|
||||
while (1) {
|
||||
unsigned long temp;
|
||||
unsigned long temp, temp2;
|
||||
|
||||
if (type == TYPE_SWPB)
|
||||
__user_swpb_asm(*data, address, res, temp);
|
||||
__user_swpb_asm(*data, address, res, temp, temp2);
|
||||
else
|
||||
__user_swp_asm(*data, address, res, temp);
|
||||
__user_swp_asm(*data, address, res, temp, temp2);
|
||||
|
||||
if (likely(res != -EAGAIN) || signal_pending(current))
|
||||
break;
|
||||
|
@ -39,10 +39,11 @@ has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
|
||||
(arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask);
|
||||
}
|
||||
|
||||
static void cpu_enable_trap_ctr_access(void *__unused)
|
||||
static int cpu_enable_trap_ctr_access(void *__unused)
|
||||
{
|
||||
/* Clear SCTLR_EL1.UCT */
|
||||
config_sctlr_el1(SCTLR_EL1_UCT, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define MIDR_RANGE(model, min, max) \
|
||||
|
@ -19,7 +19,9 @@
|
||||
#define pr_fmt(fmt) "CPU features: " fmt
|
||||
|
||||
#include <linux/bsearch.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/sort.h>
|
||||
#include <linux/stop_machine.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/cpufeature.h>
|
||||
@ -941,7 +943,13 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
|
||||
{
|
||||
for (; caps->matches; caps++)
|
||||
if (caps->enable && cpus_have_cap(caps->capability))
|
||||
on_each_cpu(caps->enable, NULL, true);
|
||||
/*
|
||||
* Use stop_machine() as it schedules the work allowing
|
||||
* us to modify PSTATE, instead of on_each_cpu() which
|
||||
* uses an IPI, giving us a PSTATE that disappears when
|
||||
* we return.
|
||||
*/
|
||||
stop_machine(caps->enable, NULL, cpu_online_mask);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -586,8 +586,9 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
|
||||
b.lt 4f // Skip if no PMU present
|
||||
mrs x0, pmcr_el0 // Disable debug access traps
|
||||
ubfx x0, x0, #11, #5 // to EL2 and allow access to
|
||||
msr mdcr_el2, x0 // all PMU counters from EL1
|
||||
4:
|
||||
csel x0, xzr, x0, lt // all PMU counters from EL1
|
||||
msr mdcr_el2, x0 // (if they exist)
|
||||
|
||||
/* Stage-2 translation */
|
||||
msr vttbr_el2, xzr
|
||||
|
@ -49,6 +49,7 @@
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/compat.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/exec.h>
|
||||
#include <asm/fpsimd.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/processor.h>
|
||||
@ -186,10 +187,19 @@ void __show_regs(struct pt_regs *regs)
|
||||
printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n",
|
||||
regs->pc, lr, regs->pstate);
|
||||
printk("sp : %016llx\n", sp);
|
||||
for (i = top_reg; i >= 0; i--) {
|
||||
|
||||
i = top_reg;
|
||||
|
||||
while (i >= 0) {
|
||||
printk("x%-2d: %016llx ", i, regs->regs[i]);
|
||||
if (i % 2 == 0)
|
||||
printk("\n");
|
||||
i--;
|
||||
|
||||
if (i % 2 == 0) {
|
||||
pr_cont("x%-2d: %016llx ", i, regs->regs[i]);
|
||||
i--;
|
||||
}
|
||||
|
||||
pr_cont("\n");
|
||||
}
|
||||
printk("\n");
|
||||
}
|
||||
@ -301,7 +311,7 @@ static void tls_thread_switch(struct task_struct *next)
|
||||
}
|
||||
|
||||
/* Restore the UAO state depending on next's addr_limit */
|
||||
static void uao_thread_switch(struct task_struct *next)
|
||||
void uao_thread_switch(struct task_struct *next)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_ARM64_UAO)) {
|
||||
if (task_thread_info(next)->addr_limit == KERNEL_DS)
|
||||
|
@ -544,6 +544,7 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
|
||||
return;
|
||||
}
|
||||
bootcpu_valid = true;
|
||||
early_map_cpu_to_node(0, acpi_numa_get_nid(0, hwid));
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1,8 +1,11 @@
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/debug-monitors.h>
|
||||
#include <asm/exec.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/mmu_context.h>
|
||||
@ -49,6 +52,14 @@ void notrace __cpu_suspend_exit(void)
|
||||
*/
|
||||
set_my_cpu_offset(per_cpu_offset(cpu));
|
||||
|
||||
/*
|
||||
* PSTATE was not saved over suspend/resume, re-enable any detected
|
||||
* features that might not have been set correctly.
|
||||
*/
|
||||
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,
|
||||
CONFIG_ARM64_PAN));
|
||||
uao_thread_switch(current);
|
||||
|
||||
/*
|
||||
* Restore HW breakpoint registers to sane values
|
||||
* before debug exceptions are possibly reenabled
|
||||
|
@ -428,24 +428,28 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
|
||||
force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
|
||||
}
|
||||
|
||||
void cpu_enable_cache_maint_trap(void *__unused)
|
||||
int cpu_enable_cache_maint_trap(void *__unused)
|
||||
{
|
||||
config_sctlr_el1(SCTLR_EL1_UCI, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define __user_cache_maint(insn, address, res) \
|
||||
asm volatile ( \
|
||||
"1: " insn ", %1\n" \
|
||||
" mov %w0, #0\n" \
|
||||
"2:\n" \
|
||||
" .pushsection .fixup,\"ax\"\n" \
|
||||
" .align 2\n" \
|
||||
"3: mov %w0, %w2\n" \
|
||||
" b 2b\n" \
|
||||
" .popsection\n" \
|
||||
_ASM_EXTABLE(1b, 3b) \
|
||||
: "=r" (res) \
|
||||
: "r" (address), "i" (-EFAULT) )
|
||||
if (untagged_addr(address) >= user_addr_max()) \
|
||||
res = -EFAULT; \
|
||||
else \
|
||||
asm volatile ( \
|
||||
"1: " insn ", %1\n" \
|
||||
" mov %w0, #0\n" \
|
||||
"2:\n" \
|
||||
" .pushsection .fixup,\"ax\"\n" \
|
||||
" .align 2\n" \
|
||||
"3: mov %w0, %w2\n" \
|
||||
" b 2b\n" \
|
||||
" .popsection\n" \
|
||||
_ASM_EXTABLE(1b, 3b) \
|
||||
: "=r" (res) \
|
||||
: "r" (address), "i" (-EFAULT) )
|
||||
|
||||
static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
|
||||
{
|
||||
|
@ -29,7 +29,9 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/preempt.h>
|
||||
|
||||
#include <asm/bug.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/exception.h>
|
||||
#include <asm/debug-monitors.h>
|
||||
@ -670,9 +672,17 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
|
||||
NOKPROBE_SYMBOL(do_debug_exception);
|
||||
|
||||
#ifdef CONFIG_ARM64_PAN
|
||||
void cpu_enable_pan(void *__unused)
|
||||
int cpu_enable_pan(void *__unused)
|
||||
{
|
||||
/*
|
||||
* We modify PSTATE. This won't work from irq context as the PSTATE
|
||||
* is discarded once we return from the exception.
|
||||
*/
|
||||
WARN_ON_ONCE(in_interrupt());
|
||||
|
||||
config_sctlr_el1(SCTLR_EL1_SPAN, 0);
|
||||
asm(SET_PSTATE_PAN(1));
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_ARM64_PAN */
|
||||
|
||||
@ -683,8 +693,9 @@ void cpu_enable_pan(void *__unused)
|
||||
* We need to enable the feature at runtime (instead of adding it to
|
||||
* PSR_MODE_EL1h) as the feature may not be implemented by the cpu.
|
||||
*/
|
||||
void cpu_enable_uao(void *__unused)
|
||||
int cpu_enable_uao(void *__unused)
|
||||
{
|
||||
asm(SET_PSTATE_UAO(1));
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_ARM64_UAO */
|
||||
|
@ -421,35 +421,35 @@ void __init mem_init(void)
|
||||
|
||||
pr_notice("Virtual kernel memory layout:\n");
|
||||
#ifdef CONFIG_KASAN
|
||||
pr_cont(" kasan : 0x%16lx - 0x%16lx (%6ld GB)\n",
|
||||
pr_notice(" kasan : 0x%16lx - 0x%16lx (%6ld GB)\n",
|
||||
MLG(KASAN_SHADOW_START, KASAN_SHADOW_END));
|
||||
#endif
|
||||
pr_cont(" modules : 0x%16lx - 0x%16lx (%6ld MB)\n",
|
||||
pr_notice(" modules : 0x%16lx - 0x%16lx (%6ld MB)\n",
|
||||
MLM(MODULES_VADDR, MODULES_END));
|
||||
pr_cont(" vmalloc : 0x%16lx - 0x%16lx (%6ld GB)\n",
|
||||
pr_notice(" vmalloc : 0x%16lx - 0x%16lx (%6ld GB)\n",
|
||||
MLG(VMALLOC_START, VMALLOC_END));
|
||||
pr_cont(" .text : 0x%p" " - 0x%p" " (%6ld KB)\n",
|
||||
pr_notice(" .text : 0x%p" " - 0x%p" " (%6ld KB)\n",
|
||||
MLK_ROUNDUP(_text, _etext));
|
||||
pr_cont(" .rodata : 0x%p" " - 0x%p" " (%6ld KB)\n",
|
||||
pr_notice(" .rodata : 0x%p" " - 0x%p" " (%6ld KB)\n",
|
||||
MLK_ROUNDUP(__start_rodata, __init_begin));
|
||||
pr_cont(" .init : 0x%p" " - 0x%p" " (%6ld KB)\n",
|
||||
pr_notice(" .init : 0x%p" " - 0x%p" " (%6ld KB)\n",
|
||||
MLK_ROUNDUP(__init_begin, __init_end));
|
||||
pr_cont(" .data : 0x%p" " - 0x%p" " (%6ld KB)\n",
|
||||
pr_notice(" .data : 0x%p" " - 0x%p" " (%6ld KB)\n",
|
||||
MLK_ROUNDUP(_sdata, _edata));
|
||||
pr_cont(" .bss : 0x%p" " - 0x%p" " (%6ld KB)\n",
|
||||
pr_notice(" .bss : 0x%p" " - 0x%p" " (%6ld KB)\n",
|
||||
MLK_ROUNDUP(__bss_start, __bss_stop));
|
||||
pr_cont(" fixed : 0x%16lx - 0x%16lx (%6ld KB)\n",
|
||||
pr_notice(" fixed : 0x%16lx - 0x%16lx (%6ld KB)\n",
|
||||
MLK(FIXADDR_START, FIXADDR_TOP));
|
||||
pr_cont(" PCI I/O : 0x%16lx - 0x%16lx (%6ld MB)\n",
|
||||
pr_notice(" PCI I/O : 0x%16lx - 0x%16lx (%6ld MB)\n",
|
||||
MLM(PCI_IO_START, PCI_IO_END));
|
||||
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
||||
pr_cont(" vmemmap : 0x%16lx - 0x%16lx (%6ld GB maximum)\n",
|
||||
pr_notice(" vmemmap : 0x%16lx - 0x%16lx (%6ld GB maximum)\n",
|
||||
MLG(VMEMMAP_START, VMEMMAP_START + VMEMMAP_SIZE));
|
||||
pr_cont(" 0x%16lx - 0x%16lx (%6ld MB actual)\n",
|
||||
pr_notice(" 0x%16lx - 0x%16lx (%6ld MB actual)\n",
|
||||
MLM((unsigned long)phys_to_page(memblock_start_of_DRAM()),
|
||||
(unsigned long)virt_to_page(high_memory)));
|
||||
#endif
|
||||
pr_cont(" memory : 0x%16lx - 0x%16lx (%6ld MB)\n",
|
||||
pr_notice(" memory : 0x%16lx - 0x%16lx (%6ld MB)\n",
|
||||
MLM(__phys_to_virt(memblock_start_of_DRAM()),
|
||||
(unsigned long)high_memory));
|
||||
|
||||
|
@ -1011,7 +1011,7 @@ xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
|
||||
rc = acpi_dev_get_resources(adev, &resource_list,
|
||||
acpi_pmu_dev_add_resource, &res);
|
||||
acpi_dev_free_resource_list(&resource_list);
|
||||
if (rc < 0 || IS_ERR(&res)) {
|
||||
if (rc < 0) {
|
||||
dev_err(dev, "PMU type %d: No resource address found\n", type);
|
||||
goto err;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user