mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2025-01-05 21:35:04 +08:00
4d1c2ee270
In `ret_to_user` we perform some conditional work depending on the thread flags, then perform some IRQ/context tracking which is intended to balance with the IRQ/context tracking performed in the entry C code. For simplicity and consistency, it would be preferable to move this all to C. As a step towards that, this patch moves the conditional work and IRQ/context tracking into a C helper function. To aid bisectability, this is called from the `ret_to_user` assembly, and a subsequent patch will move the call to C code. As local_daif_mask() handles all necessary tracing and PMR manipulation, we no longer need to handle this explicitly. As we call exit_to_user_mode() directly, the `user_enter_irqoff` macro is no longer used, and can be removed. As enter_from_user_mode() and exit_to_user_mode() are no longer called from assembly, these can be made static, and as these are typically very small, they are marked __always_inline to avoid the overhead of a function call. For now, enablement of single-step is left in entry.S, and for this we still need to read the flags in ret_to_user(). It is safe to read this separately as TIF_SINGLESTEP is not part of _TIF_WORK_MASK. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-4-mark.rutland@arm.com [catalin.marinas@arm.com: removed unused gic_prio_kentry_setup macro] Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
80 lines
2.8 KiB
C
80 lines
2.8 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Based on arch/arm/include/asm/exception.h
|
|
*
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
*/
|
|
#ifndef __ASM_EXCEPTION_H
|
|
#define __ASM_EXCEPTION_H
|
|
|
|
#include <asm/esr.h>
|
|
#include <asm/kprobes.h>
|
|
#include <asm/ptrace.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
#define __exception_irq_entry __irq_entry
|
|
#else
|
|
#define __exception_irq_entry __kprobes
|
|
#endif
|
|
|
|
static inline u32 disr_to_esr(u64 disr)
|
|
{
|
|
unsigned int esr = ESR_ELx_EC_SERROR << ESR_ELx_EC_SHIFT;
|
|
|
|
if ((disr & DISR_EL1_IDS) == 0)
|
|
esr |= (disr & DISR_EL1_ESR_MASK);
|
|
else
|
|
esr |= (disr & ESR_ELx_ISS_MASK);
|
|
|
|
return esr;
|
|
}
|
|
|
|
asmlinkage void handle_bad_stack(struct pt_regs *regs);
|
|
|
|
asmlinkage void el1t_64_sync_handler(struct pt_regs *regs);
|
|
asmlinkage void el1t_64_irq_handler(struct pt_regs *regs);
|
|
asmlinkage void el1t_64_fiq_handler(struct pt_regs *regs);
|
|
asmlinkage void el1t_64_error_handler(struct pt_regs *regs);
|
|
|
|
asmlinkage void el1h_64_sync_handler(struct pt_regs *regs);
|
|
asmlinkage void el1h_64_irq_handler(struct pt_regs *regs);
|
|
asmlinkage void el1h_64_fiq_handler(struct pt_regs *regs);
|
|
asmlinkage void el1h_64_error_handler(struct pt_regs *regs);
|
|
|
|
asmlinkage void el0t_64_sync_handler(struct pt_regs *regs);
|
|
asmlinkage void el0t_64_irq_handler(struct pt_regs *regs);
|
|
asmlinkage void el0t_64_fiq_handler(struct pt_regs *regs);
|
|
asmlinkage void el0t_64_error_handler(struct pt_regs *regs);
|
|
|
|
asmlinkage void el0t_32_sync_handler(struct pt_regs *regs);
|
|
asmlinkage void el0t_32_irq_handler(struct pt_regs *regs);
|
|
asmlinkage void el0t_32_fiq_handler(struct pt_regs *regs);
|
|
asmlinkage void el0t_32_error_handler(struct pt_regs *regs);
|
|
|
|
asmlinkage void call_on_irq_stack(struct pt_regs *regs,
|
|
void (*func)(struct pt_regs *));
|
|
asmlinkage void asm_exit_to_user_mode(struct pt_regs *regs);
|
|
|
|
void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs);
|
|
void do_undefinstr(struct pt_regs *regs);
|
|
void do_bti(struct pt_regs *regs);
|
|
void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr,
|
|
struct pt_regs *regs);
|
|
void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs);
|
|
void do_sve_acc(unsigned int esr, struct pt_regs *regs);
|
|
void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs);
|
|
void do_sysinstr(unsigned int esr, struct pt_regs *regs);
|
|
void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs);
|
|
void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr);
|
|
void do_cp15instr(unsigned int esr, struct pt_regs *regs);
|
|
void do_el0_svc(struct pt_regs *regs);
|
|
void do_el0_svc_compat(struct pt_regs *regs);
|
|
void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr);
|
|
void do_serror(struct pt_regs *regs, unsigned int esr);
|
|
void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags);
|
|
|
|
void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far);
|
|
#endif /* __ASM_EXCEPTION_H */
|