mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-12 13:34:10 +08:00
arm64: convert syscall trace logic to C
Currently syscall tracing is a tricky assembly state machine, which can be rather difficult to follow, and even harder to modify. Before we start fiddling with it for pt_regs syscalls, let's convert it to C. This is not intended to have any functional change. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
This commit is contained in:
parent
4141c857fd
commit
f37099b699
@ -896,26 +896,6 @@ el0_error_naked:
|
||||
b ret_to_user
|
||||
ENDPROC(el0_error)
|
||||
|
||||
|
||||
/*
|
||||
* This is the fast syscall return path. We do as little as possible here,
|
||||
* and this includes saving x0 back into the kernel stack.
|
||||
*/
|
||||
ret_fast_syscall:
|
||||
disable_daif
|
||||
#ifndef CONFIG_DEBUG_RSEQ
|
||||
ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for syscall tracing
|
||||
and x2, x1, #_TIF_SYSCALL_WORK
|
||||
cbnz x2, ret_fast_syscall_trace
|
||||
and x2, x1, #_TIF_WORK_MASK
|
||||
cbnz x2, work_pending
|
||||
enable_step_tsk x1, x2
|
||||
kernel_exit 0
|
||||
#endif
|
||||
ret_fast_syscall_trace:
|
||||
enable_daif
|
||||
b __sys_trace_return_skipped // we already saved x0
|
||||
|
||||
/*
|
||||
* Ok, we need to do extra processing, enter the slow path.
|
||||
*/
|
||||
@ -971,44 +951,13 @@ alternative_else_nop_endif
|
||||
#endif
|
||||
|
||||
el0_svc_naked: // compat entry point
|
||||
stp x0, xscno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
|
||||
enable_daif
|
||||
ct_user_exit 1
|
||||
|
||||
tst x16, #_TIF_SYSCALL_WORK // check for syscall hooks
|
||||
b.ne __sys_trace
|
||||
mov x0, sp
|
||||
mov w1, wscno
|
||||
mov w2, wsc_nr
|
||||
mov x3, stbl
|
||||
bl invoke_syscall
|
||||
b ret_fast_syscall
|
||||
ENDPROC(el0_svc)
|
||||
|
||||
/*
|
||||
* This is the really slow path. We're going to be doing context
|
||||
* switches, and waiting for our parent to respond.
|
||||
*/
|
||||
__sys_trace:
|
||||
cmp wscno, #NO_SYSCALL // user-issued syscall(-1)?
|
||||
b.ne 1f
|
||||
mov x0, #-ENOSYS // set default errno if so
|
||||
str x0, [sp, #S_X0]
|
||||
1: mov x0, sp
|
||||
bl syscall_trace_enter
|
||||
cmp w0, #NO_SYSCALL // skip the syscall?
|
||||
b.eq __sys_trace_return_skipped
|
||||
|
||||
mov x0, sp
|
||||
mov w1, wscno
|
||||
mov w2, wsc_nr
|
||||
mov x3, stbl
|
||||
bl invoke_syscall
|
||||
|
||||
__sys_trace_return_skipped:
|
||||
mov x0, sp
|
||||
bl syscall_trace_exit
|
||||
bl el0_svc_common
|
||||
b ret_to_user
|
||||
ENDPROC(el0_svc)
|
||||
|
||||
.popsection // .entry.text
|
||||
|
||||
|
@ -1,11 +1,15 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/context_tracking.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/nospec.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/syscalls.h>
|
||||
|
||||
#include <asm/daifflags.h>
|
||||
#include <asm/syscall.h>
|
||||
#include <asm/thread_info.h>
|
||||
|
||||
long compat_arm_syscall(struct pt_regs *regs);
|
||||
|
||||
@ -29,9 +33,9 @@ static long __invoke_syscall(struct pt_regs *regs, syscall_fn_t syscall_fn)
|
||||
regs->regs[3], regs->regs[4], regs->regs[5]);
|
||||
}
|
||||
|
||||
asmlinkage void invoke_syscall(struct pt_regs *regs, unsigned int scno,
|
||||
unsigned int sc_nr,
|
||||
const syscall_fn_t syscall_table[])
|
||||
static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
|
||||
unsigned int sc_nr,
|
||||
const syscall_fn_t syscall_table[])
|
||||
{
|
||||
long ret;
|
||||
|
||||
@ -45,3 +49,50 @@ asmlinkage void invoke_syscall(struct pt_regs *regs, unsigned int scno,
|
||||
|
||||
regs->regs[0] = ret;
|
||||
}
|
||||
|
||||
static inline bool has_syscall_work(unsigned long flags)
|
||||
{
|
||||
return unlikely(flags & _TIF_SYSCALL_WORK);
|
||||
}
|
||||
|
||||
int syscall_trace_enter(struct pt_regs *regs);
|
||||
void syscall_trace_exit(struct pt_regs *regs);
|
||||
|
||||
asmlinkage void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
|
||||
const syscall_fn_t syscall_table[])
|
||||
{
|
||||
unsigned long flags = current_thread_info()->flags;
|
||||
|
||||
regs->orig_x0 = regs->regs[0];
|
||||
regs->syscallno = scno;
|
||||
|
||||
local_daif_restore(DAIF_PROCCTX);
|
||||
user_exit();
|
||||
|
||||
if (has_syscall_work(flags)) {
|
||||
/* set default errno for user-issued syscall(-1) */
|
||||
if (scno == NO_SYSCALL)
|
||||
regs->regs[0] = -ENOSYS;
|
||||
scno = syscall_trace_enter(regs);
|
||||
if (scno == NO_SYSCALL)
|
||||
goto trace_exit;
|
||||
}
|
||||
|
||||
invoke_syscall(regs, scno, sc_nr, syscall_table);
|
||||
|
||||
/*
|
||||
* The tracing status may have changed under our feet, so we have to
|
||||
* check again. However, if we were tracing entry, then we always trace
|
||||
* exit regardless, as the old entry assembly did.
|
||||
*/
|
||||
if (!has_syscall_work(flags) && !IS_ENABLED(CONFIG_DEBUG_RSEQ)) {
|
||||
local_daif_mask();
|
||||
flags = current_thread_info()->flags;
|
||||
if (!has_syscall_work(flags))
|
||||
return;
|
||||
local_daif_restore(DAIF_PROCCTX);
|
||||
}
|
||||
|
||||
trace_exit:
|
||||
syscall_trace_exit(regs);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user