mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-05 10:04:12 +08:00
arm64: entry: move bulk of ret_to_user to C
In `ret_to_user` we perform some conditional work depending on the thread flags, then perform some IRQ/context tracking which is intended to balance with the IRQ/context tracking performed in the entry C code. For simplicity and consistency, it would be preferable to move this all to C. As a step towards that, this patch moves the conditional work and IRQ/context tracking into a C helper function. To aid bisectability, this is called from the `ret_to_user` assembly, and a subsequent patch will move the call to C code. As local_daif_mask() handles all necessary tracing and PMR manipulation, we no longer need to handle this explicitly. As we call exit_to_user_mode() directly, the `user_enter_irqoff` macro is no longer used, and can be removed. As enter_from_user_mode() and exit_to_user_mode() are no longer called from assembly, these can be made static, and as these are typically very small, they are marked __always_inline to avoid the overhead of a function call. For now, enablement of single-step is left in entry.S, and for this we still need to read the flags in ret_to_user(). It is safe to read this separately as TIF_SINGLESTEP is not part of _TIF_WORK_MASK. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-4-mark.rutland@arm.com [catalin.marinas@arm.com: removed unused gic_prio_kentry_setup macro] Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
This commit is contained in:
parent
bc29b71f53
commit
4d1c2ee270
@ -55,8 +55,8 @@ asmlinkage void el0t_32_error_handler(struct pt_regs *regs);
|
|||||||
|
|
||||||
asmlinkage void call_on_irq_stack(struct pt_regs *regs,
|
asmlinkage void call_on_irq_stack(struct pt_regs *regs,
|
||||||
void (*func)(struct pt_regs *));
|
void (*func)(struct pt_regs *));
|
||||||
asmlinkage void enter_from_user_mode(void);
|
asmlinkage void asm_exit_to_user_mode(struct pt_regs *regs);
|
||||||
asmlinkage void exit_to_user_mode(void);
|
|
||||||
void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs);
|
void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs);
|
||||||
void do_undefinstr(struct pt_regs *regs);
|
void do_undefinstr(struct pt_regs *regs);
|
||||||
void do_bti(struct pt_regs *regs);
|
void do_bti(struct pt_regs *regs);
|
||||||
@ -73,6 +73,7 @@ void do_el0_svc(struct pt_regs *regs);
|
|||||||
void do_el0_svc_compat(struct pt_regs *regs);
|
void do_el0_svc_compat(struct pt_regs *regs);
|
||||||
void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr);
|
void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr);
|
||||||
void do_serror(struct pt_regs *regs, unsigned int esr);
|
void do_serror(struct pt_regs *regs, unsigned int esr);
|
||||||
|
void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags);
|
||||||
|
|
||||||
void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far);
|
void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far);
|
||||||
#endif /* __ASM_EXCEPTION_H */
|
#endif /* __ASM_EXCEPTION_H */
|
||||||
|
@ -104,7 +104,7 @@ static __always_inline void __enter_from_user_mode(void)
|
|||||||
trace_hardirqs_off_finish();
|
trace_hardirqs_off_finish();
|
||||||
}
|
}
|
||||||
|
|
||||||
asmlinkage void noinstr enter_from_user_mode(void)
|
static __always_inline void enter_from_user_mode(void)
|
||||||
{
|
{
|
||||||
__enter_from_user_mode();
|
__enter_from_user_mode();
|
||||||
}
|
}
|
||||||
@ -123,12 +123,29 @@ static __always_inline void __exit_to_user_mode(void)
|
|||||||
lockdep_hardirqs_on(CALLER_ADDR0);
|
lockdep_hardirqs_on(CALLER_ADDR0);
|
||||||
}
|
}
|
||||||
|
|
||||||
asmlinkage void noinstr exit_to_user_mode(void)
|
static __always_inline void exit_to_user_mode(void)
|
||||||
{
|
{
|
||||||
mte_check_tfsr_exit();
|
mte_check_tfsr_exit();
|
||||||
__exit_to_user_mode();
|
__exit_to_user_mode();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static __always_inline void prepare_exit_to_user_mode(struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
local_daif_mask();
|
||||||
|
|
||||||
|
flags = READ_ONCE(current_thread_info()->flags);
|
||||||
|
if (unlikely(flags & _TIF_WORK_MASK))
|
||||||
|
do_notify_resume(regs, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
asmlinkage void noinstr asm_exit_to_user_mode(struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
prepare_exit_to_user_mode(regs);
|
||||||
|
exit_to_user_mode();
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Handle IRQ/context state management when entering an NMI from user/kernel
|
* Handle IRQ/context state management when entering an NMI from user/kernel
|
||||||
* mode. Before this function is called it is not safe to call regular kernel
|
* mode. Before this function is called it is not safe to call regular kernel
|
||||||
|
@ -29,16 +29,6 @@
|
|||||||
#include <asm/asm-uaccess.h>
|
#include <asm/asm-uaccess.h>
|
||||||
#include <asm/unistd.h>
|
#include <asm/unistd.h>
|
||||||
|
|
||||||
/*
|
|
||||||
* Context tracking and irqflag tracing need to instrument transitions between
|
|
||||||
* user and kernel mode.
|
|
||||||
*/
|
|
||||||
.macro user_enter_irqoff
|
|
||||||
#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
|
|
||||||
bl exit_to_user_mode
|
|
||||||
#endif
|
|
||||||
.endm
|
|
||||||
|
|
||||||
.macro clear_gp_regs
|
.macro clear_gp_regs
|
||||||
.irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
|
.irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
|
||||||
mov x\n, xzr
|
mov x\n, xzr
|
||||||
@ -474,18 +464,6 @@ SYM_CODE_END(__swpan_exit_el0)
|
|||||||
/* GPRs used by entry code */
|
/* GPRs used by entry code */
|
||||||
tsk .req x28 // current thread_info
|
tsk .req x28 // current thread_info
|
||||||
|
|
||||||
/*
|
|
||||||
* Interrupt handling.
|
|
||||||
*/
|
|
||||||
.macro gic_prio_kentry_setup, tmp:req
|
|
||||||
#ifdef CONFIG_ARM64_PSEUDO_NMI
|
|
||||||
alternative_if ARM64_HAS_IRQ_PRIO_MASKING
|
|
||||||
mov \tmp, #(GIC_PRIO_PSR_I_SET | GIC_PRIO_IRQON)
|
|
||||||
msr_s SYS_ICC_PMR_EL1, \tmp
|
|
||||||
alternative_else_nop_endif
|
|
||||||
#endif
|
|
||||||
.endm
|
|
||||||
|
|
||||||
.text
|
.text
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -585,37 +563,17 @@ SYM_CODE_START_LOCAL(ret_to_kernel)
|
|||||||
kernel_exit 1
|
kernel_exit 1
|
||||||
SYM_CODE_END(ret_to_kernel)
|
SYM_CODE_END(ret_to_kernel)
|
||||||
|
|
||||||
/*
|
|
||||||
* "slow" syscall return path.
|
|
||||||
*/
|
|
||||||
SYM_CODE_START_LOCAL(ret_to_user)
|
SYM_CODE_START_LOCAL(ret_to_user)
|
||||||
disable_daif
|
mov x0, sp
|
||||||
gic_prio_kentry_setup tmp=x3
|
bl asm_exit_to_user_mode
|
||||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
|
||||||
bl trace_hardirqs_off
|
|
||||||
#endif
|
|
||||||
ldr x19, [tsk, #TSK_TI_FLAGS]
|
|
||||||
and x2, x19, #_TIF_WORK_MASK
|
|
||||||
cbnz x2, work_pending
|
|
||||||
finish_ret_to_user:
|
|
||||||
user_enter_irqoff
|
|
||||||
/* Ignore asynchronous tag check faults in the uaccess routines */
|
/* Ignore asynchronous tag check faults in the uaccess routines */
|
||||||
clear_mte_async_tcf
|
clear_mte_async_tcf
|
||||||
|
ldr x19, [tsk, #TSK_TI_FLAGS] // re-check for single-step
|
||||||
enable_step_tsk x19, x2
|
enable_step_tsk x19, x2
|
||||||
#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
|
#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
|
||||||
bl stackleak_erase
|
bl stackleak_erase
|
||||||
#endif
|
#endif
|
||||||
kernel_exit 0
|
kernel_exit 0
|
||||||
|
|
||||||
/*
|
|
||||||
* Ok, we need to do extra processing, enter the slow path.
|
|
||||||
*/
|
|
||||||
work_pending:
|
|
||||||
mov x0, sp // 'regs'
|
|
||||||
mov x1, x19
|
|
||||||
bl do_notify_resume
|
|
||||||
ldr x19, [tsk, #TSK_TI_FLAGS] // re-check for single-step
|
|
||||||
b finish_ret_to_user
|
|
||||||
SYM_CODE_END(ret_to_user)
|
SYM_CODE_END(ret_to_user)
|
||||||
|
|
||||||
.popsection // .entry.text
|
.popsection // .entry.text
|
||||||
|
@ -924,8 +924,7 @@ static bool cpu_affinity_invalid(struct pt_regs *regs)
|
|||||||
system_32bit_el0_cpumask());
|
system_32bit_el0_cpumask());
|
||||||
}
|
}
|
||||||
|
|
||||||
asmlinkage void do_notify_resume(struct pt_regs *regs,
|
void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags)
|
||||||
unsigned long thread_flags)
|
|
||||||
{
|
{
|
||||||
do {
|
do {
|
||||||
if (thread_flags & _TIF_NEED_RESCHED) {
|
if (thread_flags & _TIF_NEED_RESCHED) {
|
||||||
|
Loading…
Reference in New Issue
Block a user