linux/arch/arm64/kernel/syscall.c
Kees Cook 70918779ae arm64: entry: Enable random_kstack_offset support
Allow for a randomized stack offset on a per-syscall basis, with roughly
5 bits of entropy. (And include AAPCS rationale AAPCS thanks to Mark
Rutland.)

In order to avoid unconditional stack canaries on syscall entry (due to
the use of alloca()), also disable stack protector to avoid triggering
needless checks and slowing down the entry path. As there is no general
way to control stack protector coverage with a function attribute[1],
this must be disabled at the compilation unit level. This isn't a problem
here, though, since stack protector was not triggered before: examining
the resulting syscall.o, there are no changes in canary coverage (none
before, none now).

[1] a working __attribute__((no_stack_protector)) has been added to GCC
and Clang but has not been released in any version yet:
https://gcc.gnu.org/git/gitweb.cgi?p=gcc.git;h=346b302d09c1e6db56d9fe69048acb32fbb97845
https://reviews.llvm.org/rG4fbf84c1732fca596ad1d6e96015e19760eb8a9b

Signed-off-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20210401232347.2791257-6-keescook@chromium.org
2021-04-08 14:12:19 +02:00

194 lines
5.4 KiB
C

// SPDX-License-Identifier: GPL-2.0
#include <linux/compiler.h>
#include <linux/context_tracking.h>
#include <linux/errno.h>
#include <linux/nospec.h>
#include <linux/ptrace.h>
#include <linux/randomize_kstack.h>
#include <linux/syscalls.h>
#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
#include <asm/exception.h>
#include <asm/fpsimd.h>
#include <asm/syscall.h>
#include <asm/thread_info.h>
#include <asm/unistd.h>
long compat_arm_syscall(struct pt_regs *regs, int scno);
long sys_ni_syscall(void);
static long do_ni_syscall(struct pt_regs *regs, int scno)
{
#ifdef CONFIG_COMPAT
long ret;
if (is_compat_task()) {
ret = compat_arm_syscall(regs, scno);
if (ret != -ENOSYS)
return ret;
}
#endif
return sys_ni_syscall();
}
static long __invoke_syscall(struct pt_regs *regs, syscall_fn_t syscall_fn)
{
return syscall_fn(regs);
}
static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
unsigned int sc_nr,
const syscall_fn_t syscall_table[])
{
long ret;
add_random_kstack_offset();
if (scno < sc_nr) {
syscall_fn_t syscall_fn;
syscall_fn = syscall_table[array_index_nospec(scno, sc_nr)];
ret = __invoke_syscall(regs, syscall_fn);
} else {
ret = do_ni_syscall(regs, scno);
}
if (is_compat_task())
ret = lower_32_bits(ret);
regs->regs[0] = ret;
/*
* Ultimately, this value will get limited by KSTACK_OFFSET_MAX(),
* but not enough for arm64 stack utilization comfort. To keep
* reasonable stack head room, reduce the maximum offset to 9 bits.
*
* The actual entropy will be further reduced by the compiler when
* applying stack alignment constraints: the AAPCS mandates a
* 16-byte (i.e. 4-bit) aligned SP at function boundaries.
*
* The resulting 5 bits of entropy is seen in SP[8:4].
*/
choose_random_kstack_offset(get_random_int() & 0x1FF);
}
static inline bool has_syscall_work(unsigned long flags)
{
return unlikely(flags & _TIF_SYSCALL_WORK);
}
int syscall_trace_enter(struct pt_regs *regs);
void syscall_trace_exit(struct pt_regs *regs);
static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
const syscall_fn_t syscall_table[])
{
unsigned long flags = current_thread_info()->flags;
regs->orig_x0 = regs->regs[0];
regs->syscallno = scno;
/*
* BTI note:
* The architecture does not guarantee that SPSR.BTYPE is zero
* on taking an SVC, so we could return to userspace with a
* non-zero BTYPE after the syscall.
*
* This shouldn't matter except when userspace is explicitly
* doing something stupid, such as setting PROT_BTI on a page
* that lacks conforming BTI/PACIxSP instructions, falling
* through from one executable page to another with differing
* PROT_BTI, or messing with BTYPE via ptrace: in such cases,
* userspace should not be surprised if a SIGILL occurs on
* syscall return.
*
* So, don't touch regs->pstate & PSR_BTYPE_MASK here.
* (Similarly for HVC and SMC elsewhere.)
*/
local_daif_restore(DAIF_PROCCTX);
if (flags & _TIF_MTE_ASYNC_FAULT) {
/*
* Process the asynchronous tag check fault before the actual
* syscall. do_notify_resume() will send a signal to userspace
* before the syscall is restarted.
*/
regs->regs[0] = -ERESTARTNOINTR;
return;
}
if (has_syscall_work(flags)) {
/*
* The de-facto standard way to skip a system call using ptrace
* is to set the system call to -1 (NO_SYSCALL) and set x0 to a
* suitable error code for consumption by userspace. However,
* this cannot be distinguished from a user-issued syscall(-1)
* and so we must set x0 to -ENOSYS here in case the tracer doesn't
* issue the skip and we fall into trace_exit with x0 preserved.
*
* This is slightly odd because it also means that if a tracer
* sets the system call number to -1 but does not initialise x0,
* then x0 will be preserved for all system calls apart from a
* user-issued syscall(-1). However, requesting a skip and not
* setting the return value is unlikely to do anything sensible
* anyway.
*/
if (scno == NO_SYSCALL)
regs->regs[0] = -ENOSYS;
scno = syscall_trace_enter(regs);
if (scno == NO_SYSCALL)
goto trace_exit;
}
invoke_syscall(regs, scno, sc_nr, syscall_table);
/*
* The tracing status may have changed under our feet, so we have to
* check again. However, if we were tracing entry, then we always trace
* exit regardless, as the old entry assembly did.
*/
if (!has_syscall_work(flags) && !IS_ENABLED(CONFIG_DEBUG_RSEQ)) {
local_daif_mask();
flags = current_thread_info()->flags;
if (!has_syscall_work(flags) && !(flags & _TIF_SINGLESTEP))
return;
local_daif_restore(DAIF_PROCCTX);
}
trace_exit:
syscall_trace_exit(regs);
}
static inline void sve_user_discard(void)
{
if (!system_supports_sve())
return;
clear_thread_flag(TIF_SVE);
/*
* task_fpsimd_load() won't be called to update CPACR_EL1 in
* ret_to_user unless TIF_FOREIGN_FPSTATE is still set, which only
* happens if a context switch or kernel_neon_begin() or context
* modification (sigreturn, ptrace) intervenes.
* So, ensure that CPACR_EL1 is already correct for the fast-path case.
*/
sve_user_disable();
}
void do_el0_svc(struct pt_regs *regs)
{
sve_user_discard();
el0_svc_common(regs, regs->regs[8], __NR_syscalls, sys_call_table);
}
#ifdef CONFIG_COMPAT
void do_el0_svc_compat(struct pt_regs *regs)
{
el0_svc_common(regs, regs->regs[7], __NR_compat_syscalls,
compat_sys_call_table);
}
#endif