Merge branch 'for-next/entry' into for-next/core

The never-ending entry.S refactoring continues, putting us in a much
better place wrt compiler instrumentation whilst moving more of the code
into C.

* for-next/entry:
  arm64: idle: don't instrument idle code with KCOV
  arm64: entry: don't instrument entry code with KCOV
  arm64: entry: make NMI entry/exit functions static
  arm64: entry: split SDEI entry
  arm64: entry: split bad stack entry
  arm64: entry: fold el1_inv() into el1h_64_sync_handler()
  arm64: entry: handle all vectors with C
  arm64: entry: template the entry asm functions
  arm64: entry: improve bad_mode()
  arm64: entry: move bad_mode() to entry-common.c
  arm64: entry: consolidate EL1 exception returns
  arm64: entry: organise entry vectors consistently
  arm64: entry: organise entry handlers consistently
  arm64: entry: convert IRQ+FIQ handlers to C
  arm64: entry: add a call_on_irq_stack helper
  arm64: entry: move NMI preempt logic to C
  arm64: entry: move arm64_preempt_schedule_irq to entry-common.c
  arm64: entry: convert SError handlers to C
  arm64: entry: unmask IRQ+FIQ after EL0 handling
  arm64: remove redundant local_daif_mask() in bad_mode()
This commit is contained in:
Will Deacon 2021-06-24 14:01:55 +01:00
commit 6cf61e061e
11 changed files with 414 additions and 435 deletions

View File

@ -31,20 +31,35 @@ static inline u32 disr_to_esr(u64 disr)
return esr;
}
asmlinkage void el1_sync_handler(struct pt_regs *regs);
asmlinkage void el0_sync_handler(struct pt_regs *regs);
asmlinkage void el0_sync_compat_handler(struct pt_regs *regs);
asmlinkage void handle_bad_stack(struct pt_regs *regs);
asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs);
asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs);
asmlinkage void el1t_64_sync_handler(struct pt_regs *regs);
asmlinkage void el1t_64_irq_handler(struct pt_regs *regs);
asmlinkage void el1t_64_fiq_handler(struct pt_regs *regs);
asmlinkage void el1t_64_error_handler(struct pt_regs *regs);
asmlinkage void el1h_64_sync_handler(struct pt_regs *regs);
asmlinkage void el1h_64_irq_handler(struct pt_regs *regs);
asmlinkage void el1h_64_fiq_handler(struct pt_regs *regs);
asmlinkage void el1h_64_error_handler(struct pt_regs *regs);
asmlinkage void el0t_64_sync_handler(struct pt_regs *regs);
asmlinkage void el0t_64_irq_handler(struct pt_regs *regs);
asmlinkage void el0t_64_fiq_handler(struct pt_regs *regs);
asmlinkage void el0t_64_error_handler(struct pt_regs *regs);
asmlinkage void el0t_32_sync_handler(struct pt_regs *regs);
asmlinkage void el0t_32_irq_handler(struct pt_regs *regs);
asmlinkage void el0t_32_fiq_handler(struct pt_regs *regs);
asmlinkage void el0t_32_error_handler(struct pt_regs *regs);
asmlinkage void call_on_irq_stack(struct pt_regs *regs,
void (*func)(struct pt_regs *));
asmlinkage void enter_from_user_mode(void);
asmlinkage void exit_to_user_mode(void);
void arm64_enter_nmi(struct pt_regs *regs);
void arm64_exit_nmi(struct pt_regs *regs);
void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs);
void do_undefinstr(struct pt_regs *regs);
void do_bti(struct pt_regs *regs);
asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr);
void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr,
struct pt_regs *regs);
void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs);
@ -57,4 +72,7 @@ void do_cp15instr(unsigned int esr, struct pt_regs *regs);
void do_el0_svc(struct pt_regs *regs);
void do_el0_svc_compat(struct pt_regs *regs);
void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr);
void do_serror(struct pt_regs *regs, unsigned int esr);
void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far);
#endif /* __ASM_EXCEPTION_H */

View File

@ -257,8 +257,6 @@ void set_task_sctlr_el1(u64 sctlr);
extern struct task_struct *cpu_switch_to(struct task_struct *prev,
struct task_struct *next);
asmlinkage void arm64_preempt_schedule_irq(void);
#define task_pt_regs(p) \
((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)

View File

@ -37,6 +37,9 @@ struct sdei_registered_event;
asmlinkage unsigned long __sdei_handler(struct pt_regs *regs,
struct sdei_registered_event *arg);
unsigned long do_sdei_event(struct pt_regs *regs,
struct sdei_registered_event *arg);
unsigned long sdei_arch_get_entry_point(int conduit);
#define sdei_arch_get_entry_point(x) sdei_arch_get_entry_point(x)

View File

@ -14,6 +14,12 @@ CFLAGS_REMOVE_return_address.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_syscall.o = -fstack-protector -fstack-protector-strong
CFLAGS_syscall.o += -fno-stack-protector
# It's not safe to invoke KCOV when portions of the kernel environment aren't
# available or are out-of-sync with HW state. Since `noinstr` doesn't always
# inhibit KCOV instrumentation, disable it for the entire compilation unit.
KCOV_INSTRUMENT_entry.o := n
KCOV_INSTRUMENT_idle.o := n
# Object file lists.
obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
entry-common.o entry-fpsimd.o process.o ptrace.o \
@ -22,7 +28,7 @@ obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
return_address.o cpuinfo.o cpu_errata.o \
cpufeature.o alternative.o cacheinfo.o \
smp.o smp_spin_table.o topology.o smccc-call.o \
syscall.o proton-pack.o idreg-override.o
syscall.o proton-pack.o idreg-override.o idle.o
targets += efi-entry.o

View File

@ -6,7 +6,11 @@
*/
#include <linux/context_tracking.h>
#include <linux/linkage.h>
#include <linux/lockdep.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/thread_info.h>
#include <asm/cpufeature.h>
@ -15,7 +19,11 @@
#include <asm/exception.h>
#include <asm/kprobes.h>
#include <asm/mmu.h>
#include <asm/processor.h>
#include <asm/sdei.h>
#include <asm/stacktrace.h>
#include <asm/sysreg.h>
#include <asm/system_misc.h>
/*
* This is intended to match the logic in irqentry_enter(), handling the kernel
@ -67,7 +75,7 @@ static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
}
}
void noinstr arm64_enter_nmi(struct pt_regs *regs)
static void noinstr arm64_enter_nmi(struct pt_regs *regs)
{
regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
@ -80,7 +88,7 @@ void noinstr arm64_enter_nmi(struct pt_regs *regs)
ftrace_nmi_enter();
}
void noinstr arm64_exit_nmi(struct pt_regs *regs)
static void noinstr arm64_exit_nmi(struct pt_regs *regs)
{
bool restore = regs->lockdep_hardirqs;
@ -97,7 +105,7 @@ void noinstr arm64_exit_nmi(struct pt_regs *regs)
__nmi_exit();
}
asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
static void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
{
if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
arm64_enter_nmi(regs);
@ -105,7 +113,7 @@ asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
enter_from_kernel_mode(regs);
}
asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
static void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
{
if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
arm64_exit_nmi(regs);
@ -113,6 +121,65 @@ asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
exit_to_kernel_mode(regs);
}
static void __sched arm64_preempt_schedule_irq(void)
{
lockdep_assert_irqs_disabled();
/*
* DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC
* priority masking is used the GIC irqchip driver will clear DAIF.IF
* using gic_arch_enable_irqs() for normal IRQs. If anything is set in
* DAIF we must have handled an NMI, so skip preemption.
*/
if (system_uses_irq_prio_masking() && read_sysreg(daif))
return;
/*
* Preempting a task from an IRQ means we leave copies of PSTATE
* on the stack. cpufeature's enable calls may modify PSTATE, but
* resuming one of these preempted tasks would undo those changes.
*
* Only allow a task to be preempted once cpufeatures have been
* enabled.
*/
if (system_capabilities_finalized())
preempt_schedule_irq();
}
static void do_interrupt_handler(struct pt_regs *regs,
void (*handler)(struct pt_regs *))
{
if (on_thread_stack())
call_on_irq_stack(regs, handler);
else
handler(regs);
}
extern void (*handle_arch_irq)(struct pt_regs *);
extern void (*handle_arch_fiq)(struct pt_regs *);
static void noinstr __panic_unhandled(struct pt_regs *regs, const char *vector,
unsigned int esr)
{
arm64_enter_nmi(regs);
console_verbose();
pr_crit("Unhandled %s exception on CPU%d, ESR 0x%08x -- %s\n",
vector, smp_processor_id(), esr,
esr_get_class_string(esr));
__show_regs(regs);
panic("Unhandled exception");
}
#define UNHANDLED(el, regsize, vector) \
asmlinkage void noinstr el##_##regsize##_##vector##_handler(struct pt_regs *regs) \
{ \
const char *desc = #regsize "-bit " #el " " #vector; \
__panic_unhandled(regs, desc, read_sysreg(esr_el1)); \
}
#ifdef CONFIG_ARM64_ERRATUM_1463225
static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
@ -162,6 +229,11 @@ static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
}
#endif /* CONFIG_ARM64_ERRATUM_1463225 */
UNHANDLED(el1t, 64, sync)
UNHANDLED(el1t, 64, irq)
UNHANDLED(el1t, 64, fiq)
UNHANDLED(el1t, 64, error)
static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
{
unsigned long far = read_sysreg(far_el1);
@ -193,15 +265,6 @@ static void noinstr el1_undef(struct pt_regs *regs)
exit_to_kernel_mode(regs);
}
static void noinstr el1_inv(struct pt_regs *regs, unsigned long esr)
{
enter_from_kernel_mode(regs);
local_daif_inherit(regs);
bad_mode(regs, 0, esr);
local_daif_mask();
exit_to_kernel_mode(regs);
}
static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
{
regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
@ -245,7 +308,7 @@ static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
exit_to_kernel_mode(regs);
}
asmlinkage void noinstr el1_sync_handler(struct pt_regs *regs)
asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
@ -275,10 +338,50 @@ asmlinkage void noinstr el1_sync_handler(struct pt_regs *regs)
el1_fpac(regs, esr);
break;
default:
el1_inv(regs, esr);
__panic_unhandled(regs, "64-bit el1h sync", esr);
}
}
static void noinstr el1_interrupt(struct pt_regs *regs,
void (*handler)(struct pt_regs *))
{
write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
enter_el1_irq_or_nmi(regs);
do_interrupt_handler(regs, handler);
/*
* Note: thread_info::preempt_count includes both thread_info::count
* and thread_info::need_resched, and is not equivalent to
* preempt_count().
*/
if (IS_ENABLED(CONFIG_PREEMPTION) &&
READ_ONCE(current_thread_info()->preempt_count) == 0)
arm64_preempt_schedule_irq();
exit_el1_irq_or_nmi(regs);
}
asmlinkage void noinstr el1h_64_irq_handler(struct pt_regs *regs)
{
el1_interrupt(regs, handle_arch_irq);
}
asmlinkage void noinstr el1h_64_fiq_handler(struct pt_regs *regs)
{
el1_interrupt(regs, handle_arch_fiq);
}
asmlinkage void noinstr el1h_64_error_handler(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
local_daif_restore(DAIF_ERRCTX);
arm64_enter_nmi(regs);
do_serror(regs, esr);
arm64_exit_nmi(regs);
}
asmlinkage void noinstr enter_from_user_mode(void)
{
lockdep_hardirqs_off(CALLER_ADDR0);
@ -398,7 +501,7 @@ static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
enter_from_user_mode();
do_debug_exception(far, esr, regs);
local_daif_restore(DAIF_PROCCTX_NOIRQ);
local_daif_restore(DAIF_PROCCTX);
}
static void noinstr el0_svc(struct pt_regs *regs)
@ -415,7 +518,7 @@ static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
do_ptrauth_fault(regs, esr);
}
asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs)
asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
@ -468,6 +571,56 @@ asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs)
}
}
static void noinstr el0_interrupt(struct pt_regs *regs,
void (*handler)(struct pt_regs *))
{
enter_from_user_mode();
write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
if (regs->pc & BIT(55))
arm64_apply_bp_hardening();
do_interrupt_handler(regs, handler);
}
static void noinstr __el0_irq_handler_common(struct pt_regs *regs)
{
el0_interrupt(regs, handle_arch_irq);
}
asmlinkage void noinstr el0t_64_irq_handler(struct pt_regs *regs)
{
__el0_irq_handler_common(regs);
}
static void noinstr __el0_fiq_handler_common(struct pt_regs *regs)
{
el0_interrupt(regs, handle_arch_fiq);
}
asmlinkage void noinstr el0t_64_fiq_handler(struct pt_regs *regs)
{
__el0_fiq_handler_common(regs);
}
static void __el0_error_handler_common(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
enter_from_user_mode();
local_daif_restore(DAIF_ERRCTX);
arm64_enter_nmi(regs);
do_serror(regs, esr);
arm64_exit_nmi(regs);
local_daif_restore(DAIF_PROCCTX);
}
asmlinkage void noinstr el0t_64_error_handler(struct pt_regs *regs)
{
__el0_error_handler_common(regs);
}
#ifdef CONFIG_COMPAT
static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
{
@ -483,7 +636,7 @@ static void noinstr el0_svc_compat(struct pt_regs *regs)
do_el0_svc_compat(regs);
}
asmlinkage void noinstr el0_sync_compat_handler(struct pt_regs *regs)
asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
@ -526,4 +679,71 @@ asmlinkage void noinstr el0_sync_compat_handler(struct pt_regs *regs)
el0_inv(regs, esr);
}
}
asmlinkage void noinstr el0t_32_irq_handler(struct pt_regs *regs)
{
__el0_irq_handler_common(regs);
}
asmlinkage void noinstr el0t_32_fiq_handler(struct pt_regs *regs)
{
__el0_fiq_handler_common(regs);
}
asmlinkage void noinstr el0t_32_error_handler(struct pt_regs *regs)
{
__el0_error_handler_common(regs);
}
#else /* CONFIG_COMPAT */
UNHANDLED(el0t, 32, sync)
UNHANDLED(el0t, 32, irq)
UNHANDLED(el0t, 32, fiq)
UNHANDLED(el0t, 32, error)
#endif /* CONFIG_COMPAT */
#ifdef CONFIG_VMAP_STACK
asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs)
{
unsigned int esr = read_sysreg(esr_el1);
unsigned long far = read_sysreg(far_el1);
arm64_enter_nmi(regs);
panic_bad_stack(regs, esr, far);
}
#endif /* CONFIG_VMAP_STACK */
#ifdef CONFIG_ARM_SDE_INTERFACE
asmlinkage noinstr unsigned long
__sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
{
unsigned long ret;
/*
* We didn't take an exception to get here, so the HW hasn't
* set/cleared bits in PSTATE that we may rely on.
*
* The original SDEI spec (ARM DEN 0054A) can be read ambiguously as to
* whether PSTATE bits are inherited unchanged or generated from
* scratch, and the TF-A implementation always clears PAN and always
* clears UAO. There are no other known implementations.
*
* Subsequent revisions (ARM DEN 0054B) follow the usual rules for how
* PSTATE is modified upon architectural exceptions, and so PAN is
* either inherited or set per SCTLR_ELx.SPAN, and UAO is always
* cleared.
*
* We must explicitly reset PAN to the expected state, including
* clearing it when the host isn't using it, in case a VM had it set.
*/
if (system_uses_hw_pan())
set_pstate_pan(1);
else if (cpu_has_pan())
set_pstate_pan(0);
arm64_enter_nmi(regs);
ret = do_sdei_event(regs, arg);
arm64_exit_nmi(regs);
return ret;
}
#endif /* CONFIG_ARM_SDE_INTERFACE */

View File

@ -33,12 +33,6 @@
* Context tracking and irqflag tracing need to instrument transitions between
* user and kernel mode.
*/
.macro user_exit_irqoff
#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
bl enter_from_user_mode
#endif
.endm
.macro user_enter_irqoff
#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
bl exit_to_user_mode
@ -51,16 +45,7 @@
.endr
.endm
/*
* Bad Abort numbers
*-----------------
*/
#define BAD_SYNC 0
#define BAD_IRQ 1
#define BAD_FIQ 2
#define BAD_ERROR 3
.macro kernel_ventry, el, label, regsize = 64
.macro kernel_ventry, el:req, ht:req, regsize:req, label:req
.align 7
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
.if \el == 0
@ -87,7 +72,7 @@ alternative_else_nop_endif
tbnz x0, #THREAD_SHIFT, 0f
sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
b el\()\el\()_\label
b el\el\ht\()_\regsize\()_\label
0:
/*
@ -119,7 +104,7 @@ alternative_else_nop_endif
sub sp, sp, x0
mrs x0, tpidrro_el0
#endif
b el\()\el\()_\label
b el\el\ht\()_\regsize\()_\label
.endm
.macro tramp_alias, dst, sym
@ -486,63 +471,12 @@ SYM_CODE_START_LOCAL(__swpan_exit_el0)
SYM_CODE_END(__swpan_exit_el0)
#endif
.macro irq_stack_entry
mov x19, sp // preserve the original sp
#ifdef CONFIG_SHADOW_CALL_STACK
mov x24, scs_sp // preserve the original shadow stack
#endif
/*
* Compare sp with the base of the task stack.
* If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
* and should switch to the irq stack.
*/
ldr x25, [tsk, TSK_STACK]
eor x25, x25, x19
and x25, x25, #~(THREAD_SIZE - 1)
cbnz x25, 9998f
ldr_this_cpu x25, irq_stack_ptr, x26
mov x26, #IRQ_STACK_SIZE
add x26, x25, x26
/* switch to the irq stack */
mov sp, x26
#ifdef CONFIG_SHADOW_CALL_STACK
/* also switch to the irq shadow stack */
ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x26
#endif
9998:
.endm
/*
* The callee-saved regs (x19-x29) should be preserved between
* irq_stack_entry and irq_stack_exit, but note that kernel_entry
* uses x20-x23 to store data for later use.
*/
.macro irq_stack_exit
mov sp, x19
#ifdef CONFIG_SHADOW_CALL_STACK
mov scs_sp, x24
#endif
.endm
/* GPRs used by entry code */
tsk .req x28 // current thread_info
/*
* Interrupt handling.
*/
.macro irq_handler, handler:req
ldr_l x1, \handler
mov x0, sp
irq_stack_entry
blr x1
irq_stack_exit
.endm
.macro gic_prio_kentry_setup, tmp:req
#ifdef CONFIG_ARM64_PSEUDO_NMI
alternative_if ARM64_HAS_IRQ_PRIO_MASKING
@ -552,45 +486,6 @@ tsk .req x28 // current thread_info
#endif
.endm
.macro el1_interrupt_handler, handler:req
enable_da
mov x0, sp
bl enter_el1_irq_or_nmi
irq_handler \handler
#ifdef CONFIG_PREEMPTION
ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count
alternative_if ARM64_HAS_IRQ_PRIO_MASKING
/*
* DA were cleared at start of handling, and IF are cleared by
* the GIC irqchip driver using gic_arch_enable_irqs() for
* normal IRQs. If anything is set, it means we come back from
* an NMI instead of a normal IRQ, so skip preemption
*/
mrs x0, daif
orr x24, x24, x0
alternative_else_nop_endif
cbnz x24, 1f // preempt count != 0 || NMI return path
bl arm64_preempt_schedule_irq // irq en/disable is done inside
1:
#endif
mov x0, sp
bl exit_el1_irq_or_nmi
.endm
.macro el0_interrupt_handler, handler:req
user_exit_irqoff
enable_da
tbz x22, #55, 1f
bl do_el0_irq_bp_hardening
1:
irq_handler \handler
.endm
.text
/*
@ -600,32 +495,25 @@ alternative_else_nop_endif
.align 11
SYM_CODE_START(vectors)
kernel_ventry 1, sync_invalid // Synchronous EL1t
kernel_ventry 1, irq_invalid // IRQ EL1t
kernel_ventry 1, fiq_invalid // FIQ EL1t
kernel_ventry 1, error_invalid // Error EL1t
kernel_ventry 1, t, 64, sync // Synchronous EL1t
kernel_ventry 1, t, 64, irq // IRQ EL1t
kernel_ventry 1, t, 64, fiq // FIQ EL1h
kernel_ventry 1, t, 64, error // Error EL1t
kernel_ventry 1, sync // Synchronous EL1h
kernel_ventry 1, irq // IRQ EL1h
kernel_ventry 1, fiq // FIQ EL1h
kernel_ventry 1, error // Error EL1h
kernel_ventry 1, h, 64, sync // Synchronous EL1h
kernel_ventry 1, h, 64, irq // IRQ EL1h
kernel_ventry 1, h, 64, fiq // FIQ EL1h
kernel_ventry 1, h, 64, error // Error EL1h
kernel_ventry 0, sync // Synchronous 64-bit EL0
kernel_ventry 0, irq // IRQ 64-bit EL0
kernel_ventry 0, fiq // FIQ 64-bit EL0
kernel_ventry 0, error // Error 64-bit EL0
kernel_ventry 0, t, 64, sync // Synchronous 64-bit EL0
kernel_ventry 0, t, 64, irq // IRQ 64-bit EL0
kernel_ventry 0, t, 64, fiq // FIQ 64-bit EL0
kernel_ventry 0, t, 64, error // Error 64-bit EL0
#ifdef CONFIG_COMPAT
kernel_ventry 0, sync_compat, 32 // Synchronous 32-bit EL0
kernel_ventry 0, irq_compat, 32 // IRQ 32-bit EL0
kernel_ventry 0, fiq_compat, 32 // FIQ 32-bit EL0
kernel_ventry 0, error_compat, 32 // Error 32-bit EL0
#else
kernel_ventry 0, sync_invalid, 32 // Synchronous 32-bit EL0
kernel_ventry 0, irq_invalid, 32 // IRQ 32-bit EL0
kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0
kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0
#endif
kernel_ventry 0, t, 32, sync // Synchronous 32-bit EL0
kernel_ventry 0, t, 32, irq // IRQ 32-bit EL0
kernel_ventry 0, t, 32, fiq // FIQ 32-bit EL0
kernel_ventry 0, t, 32, error // Error 32-bit EL0
SYM_CODE_END(vectors)
#ifdef CONFIG_VMAP_STACK
@ -656,147 +544,46 @@ __bad_stack:
ASM_BUG()
#endif /* CONFIG_VMAP_STACK */
/*
* Invalid mode handlers
*/
.macro inv_entry, el, reason, regsize = 64
.macro entry_handler el:req, ht:req, regsize:req, label:req
SYM_CODE_START_LOCAL(el\el\ht\()_\regsize\()_\label)
kernel_entry \el, \regsize
mov x0, sp
mov x1, #\reason
mrs x2, esr_el1
bl bad_mode
ASM_BUG()
bl el\el\ht\()_\regsize\()_\label\()_handler
.if \el == 0
b ret_to_user
.else
b ret_to_kernel
.endif
SYM_CODE_END(el\el\ht\()_\regsize\()_\label)
.endm
SYM_CODE_START_LOCAL(el0_sync_invalid)
inv_entry 0, BAD_SYNC
SYM_CODE_END(el0_sync_invalid)
SYM_CODE_START_LOCAL(el0_irq_invalid)
inv_entry 0, BAD_IRQ
SYM_CODE_END(el0_irq_invalid)
SYM_CODE_START_LOCAL(el0_fiq_invalid)
inv_entry 0, BAD_FIQ
SYM_CODE_END(el0_fiq_invalid)
SYM_CODE_START_LOCAL(el0_error_invalid)
inv_entry 0, BAD_ERROR
SYM_CODE_END(el0_error_invalid)
SYM_CODE_START_LOCAL(el1_sync_invalid)
inv_entry 1, BAD_SYNC
SYM_CODE_END(el1_sync_invalid)
SYM_CODE_START_LOCAL(el1_irq_invalid)
inv_entry 1, BAD_IRQ
SYM_CODE_END(el1_irq_invalid)
SYM_CODE_START_LOCAL(el1_fiq_invalid)
inv_entry 1, BAD_FIQ
SYM_CODE_END(el1_fiq_invalid)
SYM_CODE_START_LOCAL(el1_error_invalid)
inv_entry 1, BAD_ERROR
SYM_CODE_END(el1_error_invalid)
/*
* EL1 mode handlers.
* Early exception handlers
*/
.align 6
SYM_CODE_START_LOCAL_NOALIGN(el1_sync)
kernel_entry 1
mov x0, sp
bl el1_sync_handler
entry_handler 1, t, 64, sync
entry_handler 1, t, 64, irq
entry_handler 1, t, 64, fiq
entry_handler 1, t, 64, error
entry_handler 1, h, 64, sync
entry_handler 1, h, 64, irq
entry_handler 1, h, 64, fiq
entry_handler 1, h, 64, error
entry_handler 0, t, 64, sync
entry_handler 0, t, 64, irq
entry_handler 0, t, 64, fiq
entry_handler 0, t, 64, error
entry_handler 0, t, 32, sync
entry_handler 0, t, 32, irq
entry_handler 0, t, 32, fiq
entry_handler 0, t, 32, error
SYM_CODE_START_LOCAL(ret_to_kernel)
kernel_exit 1
SYM_CODE_END(el1_sync)
.align 6
SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
kernel_entry 1
el1_interrupt_handler handle_arch_irq
kernel_exit 1
SYM_CODE_END(el1_irq)
SYM_CODE_START_LOCAL_NOALIGN(el1_fiq)
kernel_entry 1
el1_interrupt_handler handle_arch_fiq
kernel_exit 1
SYM_CODE_END(el1_fiq)
/*
* EL0 mode handlers.
*/
.align 6
SYM_CODE_START_LOCAL_NOALIGN(el0_sync)
kernel_entry 0
mov x0, sp
bl el0_sync_handler
b ret_to_user
SYM_CODE_END(el0_sync)
#ifdef CONFIG_COMPAT
.align 6
SYM_CODE_START_LOCAL_NOALIGN(el0_sync_compat)
kernel_entry 0, 32
mov x0, sp
bl el0_sync_compat_handler
b ret_to_user
SYM_CODE_END(el0_sync_compat)
.align 6
SYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat)
kernel_entry 0, 32
b el0_irq_naked
SYM_CODE_END(el0_irq_compat)
SYM_CODE_START_LOCAL_NOALIGN(el0_fiq_compat)
kernel_entry 0, 32
b el0_fiq_naked
SYM_CODE_END(el0_fiq_compat)
SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat)
kernel_entry 0, 32
b el0_error_naked
SYM_CODE_END(el0_error_compat)
#endif
.align 6
SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
kernel_entry 0
el0_irq_naked:
el0_interrupt_handler handle_arch_irq
b ret_to_user
SYM_CODE_END(el0_irq)
SYM_CODE_START_LOCAL_NOALIGN(el0_fiq)
kernel_entry 0
el0_fiq_naked:
el0_interrupt_handler handle_arch_fiq
b ret_to_user
SYM_CODE_END(el0_fiq)
SYM_CODE_START_LOCAL(el1_error)
kernel_entry 1
mrs x1, esr_el1
enable_dbg
mov x0, sp
bl do_serror
kernel_exit 1
SYM_CODE_END(el1_error)
SYM_CODE_START_LOCAL(el0_error)
kernel_entry 0
el0_error_naked:
mrs x25, esr_el1
user_exit_irqoff
enable_dbg
mov x0, sp
mov x1, x25
bl do_serror
enable_da
b ret_to_user
SYM_CODE_END(el0_error)
SYM_CODE_END(ret_to_kernel)
/*
* "slow" syscall return path.
@ -998,6 +785,42 @@ SYM_CODE_START(ret_from_fork)
SYM_CODE_END(ret_from_fork)
NOKPROBE(ret_from_fork)
/*
* void call_on_irq_stack(struct pt_regs *regs,
* void (*func)(struct pt_regs *));
*
* Calls func(regs) using this CPU's irq stack and shadow irq stack.
*/
SYM_FUNC_START(call_on_irq_stack)
#ifdef CONFIG_SHADOW_CALL_STACK
stp scs_sp, xzr, [sp, #-16]!
ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x17
#endif
/* Create a frame record to save our LR and SP (implicit in FP) */
stp x29, x30, [sp, #-16]!
mov x29, sp
ldr_this_cpu x16, irq_stack_ptr, x17
mov x15, #IRQ_STACK_SIZE
add x16, x16, x15
/* Move to the new stack and call the function there */
mov sp, x16
blr x1
/*
* Restore the SP from the FP, and restore the FP and LR from the frame
* record.
*/
mov sp, x29
ldp x29, x30, [sp], #16
#ifdef CONFIG_SHADOW_CALL_STACK
ldp scs_sp, xzr, [sp], #16
#endif
ret
SYM_FUNC_END(call_on_irq_stack)
NOKPROBE(call_on_irq_stack)
#ifdef CONFIG_ARM_SDE_INTERFACE
#include <asm/sdei.h>

46
arch/arm64/kernel/idle.c Normal file
View File

@ -0,0 +1,46 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Low-level idle sequences
*/
#include <linux/cpu.h>
#include <linux/irqflags.h>
#include <asm/barrier.h>
#include <asm/cpuidle.h>
#include <asm/cpufeature.h>
#include <asm/sysreg.h>
/*
* cpu_do_idle()
*
* Idle the processor (wait for interrupt).
*
* If the CPU supports priority masking we must do additional work to
* ensure that interrupts are not masked at the PMR (because the core will
* not wake up if we block the wake up signal in the interrupt controller).
*/
void noinstr cpu_do_idle(void)
{
struct arm_cpuidle_irq_context context;
arm_cpuidle_save_irq_context(&context);
dsb(sy);
wfi();
arm_cpuidle_restore_irq_context(&context);
}
/*
* This is our default idle handler.
*/
void noinstr arch_cpu_idle(void)
{
/*
* This should do all the clock switching and wait for interrupt
* tricks
*/
cpu_do_idle();
raw_local_irq_enable();
}

View File

@ -18,7 +18,6 @@
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/kernel.h>
#include <linux/lockdep.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/nospec.h>
@ -48,7 +47,6 @@
#include <asm/alternative.h>
#include <asm/compat.h>
#include <asm/cpufeature.h>
#include <asm/cpuidle.h>
#include <asm/cacheflush.h>
#include <asm/exec.h>
#include <asm/fpsimd.h>
@ -74,40 +72,6 @@ EXPORT_SYMBOL_GPL(pm_power_off);
void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
/*
* cpu_do_idle()
*
* Idle the processor (wait for interrupt).
*
* If the CPU supports priority masking we must do additional work to
* ensure that interrupts are not masked at the PMR (because the core will
* not wake up if we block the wake up signal in the interrupt controller).
*/
void noinstr cpu_do_idle(void)
{
struct arm_cpuidle_irq_context context;
arm_cpuidle_save_irq_context(&context);
dsb(sy);
wfi();
arm_cpuidle_restore_irq_context(&context);
}
/*
* This is our default idle handler.
*/
void noinstr arch_cpu_idle(void)
{
/*
* This should do all the clock switching and wait for interrupt
* tricks
*/
cpu_do_idle();
raw_local_irq_enable();
}
#ifdef CONFIG_HOTPLUG_CPU
void arch_cpu_idle_dead(void)
{
@ -723,22 +687,6 @@ static int __init tagged_addr_init(void)
core_initcall(tagged_addr_init);
#endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */
asmlinkage void __sched arm64_preempt_schedule_irq(void)
{
lockdep_assert_irqs_disabled();
/*
* Preempting a task from an IRQ means we leave copies of PSTATE
* on the stack. cpufeature's enable calls may modify PSTATE, but
* resuming one of these preempted tasks would undo those changes.
*
* Only allow a task to be preempted once cpufeatures have been
* enabled.
*/
if (system_capabilities_finalized())
preempt_schedule_irq();
}
#ifdef CONFIG_BINFMT_ELF
int arch_elf_adjust_prot(int prot, const struct arch_elf_state *state,
bool has_interp, bool is_interp)

View File

@ -233,13 +233,13 @@ out_err:
}
/*
* __sdei_handler() returns one of:
* do_sdei_event() returns one of:
* SDEI_EV_HANDLED - success, return to the interrupted context.
* SDEI_EV_FAILED - failure, return this error code to firmare.
* virtual-address - success, return to this address.
*/
static __kprobes unsigned long _sdei_handler(struct pt_regs *regs,
struct sdei_registered_event *arg)
unsigned long __kprobes do_sdei_event(struct pt_regs *regs,
struct sdei_registered_event *arg)
{
u32 mode;
int i, err = 0;
@ -294,45 +294,3 @@ static __kprobes unsigned long _sdei_handler(struct pt_regs *regs,
return vbar + 0x480;
}
static void __kprobes notrace __sdei_pstate_entry(void)
{
/*
* The original SDEI spec (ARM DEN 0054A) can be read ambiguously as to
* whether PSTATE bits are inherited unchanged or generated from
* scratch, and the TF-A implementation always clears PAN and always
* clears UAO. There are no other known implementations.
*
* Subsequent revisions (ARM DEN 0054B) follow the usual rules for how
* PSTATE is modified upon architectural exceptions, and so PAN is
* either inherited or set per SCTLR_ELx.SPAN, and UAO is always
* cleared.
*
* We must explicitly reset PAN to the expected state, including
* clearing it when the host isn't using it, in case a VM had it set.
*/
if (system_uses_hw_pan())
set_pstate_pan(1);
else if (cpu_has_pan())
set_pstate_pan(0);
}
asmlinkage noinstr unsigned long
__sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
{
unsigned long ret;
/*
* We didn't take an exception to get here, so the HW hasn't
* set/cleared bits in PSTATE that we may rely on. Initialize PAN.
*/
__sdei_pstate_entry();
arm64_enter_nmi(regs);
ret = _sdei_handler(regs, arg);
arm64_exit_nmi(regs);
return ret;
}

View File

@ -45,13 +45,6 @@
#include <asm/system_misc.h>
#include <asm/sysreg.h>
static const char *handler[] = {
"Synchronous Abort",
"IRQ",
"FIQ",
"Error"
};
int show_unhandled_signals = 0;
static void dump_kernel_instr(const char *lvl, struct pt_regs *regs)
@ -750,28 +743,9 @@ const char *esr_get_class_string(u32 esr)
return esr_class_str[ESR_ELx_EC(esr)];
}
/*
* bad_mode handles the impossible case in the exception vector. This is always
* fatal.
*/
asmlinkage void notrace bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
{
arm64_enter_nmi(regs);
console_verbose();
pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
handler[reason], smp_processor_id(), esr,
esr_get_class_string(esr));
__show_regs(regs);
local_daif_mask();
panic("bad mode");
}
/*
* bad_el0_sync handles unexpected, but potentially recoverable synchronous
* exceptions taken from EL0. Unlike bad_mode, this returns.
* exceptions taken from EL0.
*/
void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
{
@ -789,15 +763,11 @@ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
__aligned(16);
asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs)
void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far)
{
unsigned long tsk_stk = (unsigned long)current->stack;
unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr);
unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
unsigned int esr = read_sysreg(esr_el1);
unsigned long far = read_sysreg(far_el1);
arm64_enter_nmi(regs);
console_verbose();
pr_emerg("Insufficient stack space to handle exception!");
@ -870,15 +840,11 @@ bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr)
}
}
asmlinkage void noinstr do_serror(struct pt_regs *regs, unsigned int esr)
void do_serror(struct pt_regs *regs, unsigned int esr)
{
arm64_enter_nmi(regs);
/* non-RAS errors are not containable */
if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr))
arm64_serror_panic(regs, esr);
arm64_exit_nmi(regs);
}
/* GENERIC_BUG traps */

View File

@ -836,13 +836,6 @@ void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs)
}
NOKPROBE_SYMBOL(do_mem_abort);
void do_el0_irq_bp_hardening(void)
{
/* PC has already been checked in entry.S */
arm64_apply_bp_hardening();
}
NOKPROBE_SYMBOL(do_el0_irq_bp_hardening);
void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs)
{
arm64_notify_die("SP/PC alignment exception", regs, SIGBUS, BUS_ADRALN,