2020-07-23 05:59:56 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
|
|
|
|
#include <linux/context_tracking.h>
|
|
|
|
#include <linux/entry-common.h>
|
2022-02-10 02:20:45 +08:00
|
|
|
#include <linux/resume_user_mode.h>
|
2020-11-19 03:48:43 +08:00
|
|
|
#include <linux/highmem.h>
|
sched/preempt: Add PREEMPT_DYNAMIC using static keys
Where an architecture selects HAVE_STATIC_CALL but not
HAVE_STATIC_CALL_INLINE, each static call has an out-of-line trampoline
which will either branch to a callee or return to the caller.
On such architectures, a number of constraints can conspire to make
those trampolines more complicated and potentially less useful than we'd
like. For example:
* Hardware and software control flow integrity schemes can require the
addition of "landing pad" instructions (e.g. `BTI` for arm64), which
will also be present at the "real" callee.
* Limited branch ranges can require that trampolines generate or load an
address into a register and perform an indirect branch (or at least
have a slow path that does so). This loses some of the benefits of
having a direct branch.
* Interaction with SW CFI schemes can be complicated and fragile, e.g.
requiring that we can recognise idiomatic codegen and remove
indirections understand, at least until clang proves more helpful
mechanisms for dealing with this.
For PREEMPT_DYNAMIC, we don't need the full power of static calls, as we
really only need to enable/disable specific preemption functions. We can
achieve the same effect without a number of the pain points above by
using static keys to fold early returns into the preemption functions
themselves rather than in an out-of-line trampoline, effectively
inlining the trampoline into the start of the function.
For arm64, this results in good code generation. For example, the
dynamic_cond_resched() wrapper looks as follows when enabled. When
disabled, the first `B` is replaced with a `NOP`, resulting in an early
return.
| <dynamic_cond_resched>:
| bti c
| b <dynamic_cond_resched+0x10> // or `nop`
| mov w0, #0x0
| ret
| mrs x0, sp_el0
| ldr x0, [x0, #8]
| cbnz x0, <dynamic_cond_resched+0x8>
| paciasp
| stp x29, x30, [sp, #-16]!
| mov x29, sp
| bl <preempt_schedule_common>
| mov w0, #0x1
| ldp x29, x30, [sp], #16
| autiasp
| ret
... compared to the regular form of the function:
| <__cond_resched>:
| bti c
| mrs x0, sp_el0
| ldr x1, [x0, #8]
| cbz x1, <__cond_resched+0x18>
| mov w0, #0x0
| ret
| paciasp
| stp x29, x30, [sp, #-16]!
| mov x29, sp
| bl <preempt_schedule_common>
| mov w0, #0x1
| ldp x29, x30, [sp], #16
| autiasp
| ret
Any architecture which implements static keys should be able to use this
to implement PREEMPT_DYNAMIC with similar cost to non-inlined static
calls. Since this is likely to have greater overhead than (inlined)
static calls, PREEMPT_DYNAMIC is only defaulted to enabled when
HAVE_PREEMPT_DYNAMIC_CALL is selected.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Acked-by: Frederic Weisbecker <frederic@kernel.org>
Link: https://lore.kernel.org/r/20220214165216.2231574-6-mark.rutland@arm.com
2022-02-15 00:52:14 +08:00
|
|
|
#include <linux/jump_label.h>
|
2022-09-15 23:04:14 +08:00
|
|
|
#include <linux/kmsan.h>
|
2020-07-23 05:59:57 +08:00
|
|
|
#include <linux/livepatch.h>
|
|
|
|
#include <linux/audit.h>
|
2021-05-27 19:34:41 +08:00
|
|
|
#include <linux/tick.h>
|
2020-07-23 05:59:56 +08:00
|
|
|
|
2020-11-28 03:32:35 +08:00
|
|
|
#include "common.h"
|
|
|
|
|
2020-07-23 05:59:56 +08:00
|
|
|
#define CREATE_TRACE_POINTS
|
|
|
|
#include <trace/events/syscalls.h>
|
|
|
|
|
2020-12-01 22:27:53 +08:00
|
|
|
/* See comment for enter_from_user_mode() in entry-common.h */
|
2020-12-01 22:27:51 +08:00
|
|
|
static __always_inline void __enter_from_user_mode(struct pt_regs *regs)
|
2020-07-23 05:59:56 +08:00
|
|
|
{
|
2022-05-04 14:23:50 +08:00
|
|
|
arch_enter_from_user_mode(regs);
|
2020-07-23 05:59:56 +08:00
|
|
|
lockdep_hardirqs_off(CALLER_ADDR0);
|
|
|
|
|
2023-02-26 08:01:36 +08:00
|
|
|
CT_WARN_ON(__ct_state() != CONTEXT_USER);
|
2020-07-23 05:59:56 +08:00
|
|
|
user_exit_irqoff();
|
|
|
|
|
|
|
|
instrumentation_begin();
|
2022-09-15 23:04:14 +08:00
|
|
|
kmsan_unpoison_entry_regs(regs);
|
2020-07-23 05:59:56 +08:00
|
|
|
trace_hardirqs_off_finish();
|
|
|
|
instrumentation_end();
|
|
|
|
}
|
|
|
|
|
2020-12-01 22:27:53 +08:00
|
|
|
void noinstr enter_from_user_mode(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
__enter_from_user_mode(regs);
|
|
|
|
}
|
|
|
|
|
2020-07-23 05:59:56 +08:00
|
|
|
static inline void syscall_enter_audit(struct pt_regs *regs, long syscall)
|
|
|
|
{
|
|
|
|
if (unlikely(audit_context())) {
|
|
|
|
unsigned long args[6];
|
|
|
|
|
|
|
|
syscall_get_arguments(current, regs, args);
|
|
|
|
audit_syscall_entry(syscall, args[0], args[1], args[2], args[3]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static long syscall_trace_enter(struct pt_regs *regs, long syscall,
|
2020-11-17 01:42:05 +08:00
|
|
|
unsigned long work)
|
2020-07-23 05:59:56 +08:00
|
|
|
{
|
|
|
|
long ret = 0;
|
|
|
|
|
2020-11-28 03:32:35 +08:00
|
|
|
/*
|
|
|
|
* Handle Syscall User Dispatch. This must comes first, since
|
|
|
|
* the ABI here can be something that doesn't make sense for
|
|
|
|
* other syscall_work features.
|
|
|
|
*/
|
|
|
|
if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) {
|
|
|
|
if (syscall_user_dispatch(regs))
|
|
|
|
return -1L;
|
|
|
|
}
|
|
|
|
|
2020-07-23 05:59:56 +08:00
|
|
|
/* Handle ptrace */
|
2020-11-17 01:42:03 +08:00
|
|
|
if (work & (SYSCALL_WORK_SYSCALL_TRACE | SYSCALL_WORK_SYSCALL_EMU)) {
|
2022-01-28 02:00:55 +08:00
|
|
|
ret = ptrace_report_syscall_entry(regs);
|
2020-11-17 01:42:03 +08:00
|
|
|
if (ret || (work & SYSCALL_WORK_SYSCALL_EMU))
|
2020-07-23 05:59:56 +08:00
|
|
|
return -1L;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Do seccomp after ptrace, to catch any tracer changes. */
|
2020-11-17 01:42:00 +08:00
|
|
|
if (work & SYSCALL_WORK_SECCOMP) {
|
2020-07-23 05:59:56 +08:00
|
|
|
ret = __secure_computing(NULL);
|
|
|
|
if (ret == -1L)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-09-12 08:58:26 +08:00
|
|
|
/* Either of the above might have changed the syscall number */
|
|
|
|
syscall = syscall_get_nr(current, regs);
|
|
|
|
|
2020-11-17 01:42:01 +08:00
|
|
|
if (unlikely(work & SYSCALL_WORK_SYSCALL_TRACEPOINT))
|
2020-07-23 05:59:56 +08:00
|
|
|
trace_sys_enter(regs, syscall);
|
|
|
|
|
|
|
|
syscall_enter_audit(regs, syscall);
|
|
|
|
|
|
|
|
return ret ? : syscall;
|
|
|
|
}
|
|
|
|
|
2020-09-02 07:50:54 +08:00
|
|
|
static __always_inline long
|
|
|
|
__syscall_enter_from_user_work(struct pt_regs *regs, long syscall)
|
2020-07-23 05:59:56 +08:00
|
|
|
{
|
2020-11-17 01:41:59 +08:00
|
|
|
unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
|
2020-07-23 05:59:56 +08:00
|
|
|
|
2020-11-17 01:42:05 +08:00
|
|
|
if (work & SYSCALL_WORK_ENTER)
|
|
|
|
syscall = syscall_trace_enter(regs, syscall, work);
|
2020-07-23 05:59:56 +08:00
|
|
|
|
|
|
|
return syscall;
|
|
|
|
}
|
|
|
|
|
2020-09-02 07:50:54 +08:00
|
|
|
long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall)
|
|
|
|
{
|
|
|
|
return __syscall_enter_from_user_work(regs, syscall);
|
|
|
|
}
|
|
|
|
|
|
|
|
noinstr long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall)
|
|
|
|
{
|
|
|
|
long ret;
|
|
|
|
|
2020-12-01 22:27:51 +08:00
|
|
|
__enter_from_user_mode(regs);
|
2020-09-02 07:50:54 +08:00
|
|
|
|
|
|
|
instrumentation_begin();
|
|
|
|
local_irq_enable();
|
|
|
|
ret = __syscall_enter_from_user_work(regs, syscall);
|
|
|
|
instrumentation_end();
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
noinstr void syscall_enter_from_user_mode_prepare(struct pt_regs *regs)
|
|
|
|
{
|
2020-12-01 22:27:51 +08:00
|
|
|
__enter_from_user_mode(regs);
|
2020-09-02 07:50:54 +08:00
|
|
|
instrumentation_begin();
|
|
|
|
local_irq_enable();
|
|
|
|
instrumentation_end();
|
|
|
|
}
|
|
|
|
|
2020-12-01 22:27:54 +08:00
|
|
|
/* See comment for exit_to_user_mode() in entry-common.h */
|
2020-12-01 22:27:52 +08:00
|
|
|
static __always_inline void __exit_to_user_mode(void)
|
2020-07-23 05:59:57 +08:00
|
|
|
{
|
|
|
|
instrumentation_begin();
|
|
|
|
trace_hardirqs_on_prepare();
|
2022-03-15 06:19:03 +08:00
|
|
|
lockdep_hardirqs_on_prepare();
|
2020-07-23 05:59:57 +08:00
|
|
|
instrumentation_end();
|
|
|
|
|
|
|
|
user_enter_irqoff();
|
|
|
|
arch_exit_to_user_mode();
|
|
|
|
lockdep_hardirqs_on(CALLER_ADDR0);
|
|
|
|
}
|
|
|
|
|
2020-12-01 22:27:54 +08:00
|
|
|
void noinstr exit_to_user_mode(void)
|
|
|
|
{
|
|
|
|
__exit_to_user_mode();
|
|
|
|
}
|
|
|
|
|
2020-07-23 05:59:57 +08:00
|
|
|
/* Workaround to allow gradual conversion of architecture code */
|
2022-02-09 23:51:14 +08:00
|
|
|
void __weak arch_do_signal_or_restart(struct pt_regs *regs) { }
|
2020-07-23 05:59:57 +08:00
|
|
|
|
|
|
|
static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
|
|
|
|
unsigned long ti_work)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Before returning to user space ensure that all pending work
|
|
|
|
* items have been completed.
|
|
|
|
*/
|
|
|
|
while (ti_work & EXIT_TO_USER_MODE_WORK) {
|
|
|
|
|
|
|
|
local_irq_enable_exit_to_user(ti_work);
|
|
|
|
|
|
|
|
if (ti_work & _TIF_NEED_RESCHED)
|
|
|
|
schedule();
|
|
|
|
|
|
|
|
if (ti_work & _TIF_UPROBE)
|
|
|
|
uprobe_notify_resume(regs);
|
|
|
|
|
|
|
|
if (ti_work & _TIF_PATCH_PENDING)
|
|
|
|
klp_update_patch_state(current);
|
|
|
|
|
2020-10-27 04:32:28 +08:00
|
|
|
if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
|
2022-02-09 23:51:14 +08:00
|
|
|
arch_do_signal_or_restart(regs);
|
2020-07-23 05:59:57 +08:00
|
|
|
|
2021-09-02 04:30:27 +08:00
|
|
|
if (ti_work & _TIF_NOTIFY_RESUME)
|
2022-02-10 02:20:45 +08:00
|
|
|
resume_user_mode_work(regs);
|
2020-07-23 05:59:57 +08:00
|
|
|
|
|
|
|
/* Architecture specific TIF work */
|
|
|
|
arch_exit_to_user_mode_work(regs, ti_work);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Disable interrupts and reevaluate the work flags as they
|
|
|
|
* might have changed while interrupts and preemption was
|
|
|
|
* enabled above.
|
|
|
|
*/
|
|
|
|
local_irq_disable_exit_to_user();
|
2021-02-01 07:05:47 +08:00
|
|
|
|
|
|
|
/* Check if any of the above work has queued a deferred wakeup */
|
2021-05-27 19:34:41 +08:00
|
|
|
tick_nohz_user_enter_prepare();
|
2021-02-01 07:05:47 +08:00
|
|
|
|
2021-11-29 21:06:44 +08:00
|
|
|
ti_work = read_thread_flags();
|
2020-07-23 05:59:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Return the latest work state for arch_exit_to_user_mode() */
|
|
|
|
return ti_work;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void exit_to_user_mode_prepare(struct pt_regs *regs)
|
|
|
|
{
|
2023-03-16 03:43:43 +08:00
|
|
|
unsigned long ti_work;
|
2020-07-23 05:59:57 +08:00
|
|
|
|
|
|
|
lockdep_assert_irqs_disabled();
|
|
|
|
|
2021-02-01 07:05:47 +08:00
|
|
|
/* Flush pending rcuog wakeup before the last need_resched() check */
|
2021-05-27 19:34:41 +08:00
|
|
|
tick_nohz_user_enter_prepare();
|
2021-02-01 07:05:47 +08:00
|
|
|
|
2023-03-16 03:43:43 +08:00
|
|
|
ti_work = read_thread_flags();
|
2020-07-23 05:59:57 +08:00
|
|
|
if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
|
|
|
|
ti_work = exit_to_user_mode_loop(regs, ti_work);
|
|
|
|
|
|
|
|
arch_exit_to_user_mode_prepare(regs, ti_work);
|
|
|
|
|
2023-08-22 00:35:26 +08:00
|
|
|
/* Ensure that kernel state is sane for a return to userspace */
|
2020-11-19 03:48:43 +08:00
|
|
|
kmap_assert_nomap();
|
2020-07-23 05:59:57 +08:00
|
|
|
lockdep_assert_irqs_disabled();
|
|
|
|
lockdep_sys_exit();
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2020-11-17 01:42:03 +08:00
|
|
|
* If SYSCALL_EMU is set, then the only reason to report is when
|
2021-02-04 02:00:48 +08:00
|
|
|
* SINGLESTEP is set (i.e. PTRACE_SYSEMU_SINGLESTEP). This syscall
|
2020-09-19 16:09:36 +08:00
|
|
|
* instruction has been already reported in syscall_enter_from_user_mode().
|
2020-07-23 05:59:57 +08:00
|
|
|
*/
|
2020-11-17 01:42:03 +08:00
|
|
|
static inline bool report_single_step(unsigned long work)
|
2020-07-23 05:59:57 +08:00
|
|
|
{
|
2021-01-23 19:21:32 +08:00
|
|
|
if (work & SYSCALL_WORK_SYSCALL_EMU)
|
2020-11-17 01:42:03 +08:00
|
|
|
return false;
|
|
|
|
|
2021-02-04 02:00:48 +08:00
|
|
|
return work & SYSCALL_WORK_SYSCALL_EXIT_TRAP;
|
2020-07-23 05:59:57 +08:00
|
|
|
}
|
2020-11-17 01:42:05 +08:00
|
|
|
|
|
|
|
static void syscall_exit_work(struct pt_regs *regs, unsigned long work)
|
2020-07-23 05:59:57 +08:00
|
|
|
{
|
|
|
|
bool step;
|
|
|
|
|
2020-11-28 03:32:35 +08:00
|
|
|
/*
|
|
|
|
* If the syscall was rolled back due to syscall user dispatching,
|
|
|
|
* then the tracers below are not invoked for the same reason as
|
|
|
|
* the entry side was not invoked in syscall_trace_enter(): The ABI
|
|
|
|
* of these syscalls is unknown.
|
|
|
|
*/
|
|
|
|
if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) {
|
|
|
|
if (unlikely(current->syscall_dispatch.on_dispatch)) {
|
|
|
|
current->syscall_dispatch.on_dispatch = false;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-23 05:59:57 +08:00
|
|
|
audit_syscall_exit(regs);
|
|
|
|
|
2020-11-17 01:42:01 +08:00
|
|
|
if (work & SYSCALL_WORK_SYSCALL_TRACEPOINT)
|
2020-07-23 05:59:57 +08:00
|
|
|
trace_sys_exit(regs, syscall_get_return_value(current, regs));
|
|
|
|
|
2020-11-17 01:42:03 +08:00
|
|
|
step = report_single_step(work);
|
2020-11-17 01:42:02 +08:00
|
|
|
if (step || work & SYSCALL_WORK_SYSCALL_TRACE)
|
2022-01-28 02:00:55 +08:00
|
|
|
ptrace_report_syscall_exit(regs, step);
|
2020-07-23 05:59:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Syscall specific exit to user mode preparation. Runs with interrupts
|
|
|
|
* enabled.
|
|
|
|
*/
|
|
|
|
static void syscall_exit_to_user_mode_prepare(struct pt_regs *regs)
|
|
|
|
{
|
2020-11-17 01:41:59 +08:00
|
|
|
unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
|
2020-07-23 05:59:57 +08:00
|
|
|
unsigned long nr = syscall_get_nr(current, regs);
|
|
|
|
|
|
|
|
CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
|
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
|
|
|
|
if (WARN(irqs_disabled(), "syscall %lu left IRQs disabled", nr))
|
|
|
|
local_irq_enable();
|
|
|
|
}
|
|
|
|
|
|
|
|
rseq_syscall(regs);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do one-time syscall specific work. If these work items are
|
|
|
|
* enabled, we want to run them exactly once per syscall exit with
|
|
|
|
* interrupts enabled.
|
|
|
|
*/
|
2020-11-17 01:42:05 +08:00
|
|
|
if (unlikely(work & SYSCALL_WORK_EXIT))
|
|
|
|
syscall_exit_work(regs, work);
|
2020-07-23 05:59:57 +08:00
|
|
|
}
|
|
|
|
|
2020-12-01 22:27:55 +08:00
|
|
|
static __always_inline void __syscall_exit_to_user_mode_work(struct pt_regs *regs)
|
2020-07-23 05:59:57 +08:00
|
|
|
{
|
|
|
|
syscall_exit_to_user_mode_prepare(regs);
|
|
|
|
local_irq_disable_exit_to_user();
|
|
|
|
exit_to_user_mode_prepare(regs);
|
2020-12-01 22:27:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void syscall_exit_to_user_mode_work(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
__syscall_exit_to_user_mode_work(regs);
|
|
|
|
}
|
|
|
|
|
|
|
|
__visible noinstr void syscall_exit_to_user_mode(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
instrumentation_begin();
|
|
|
|
__syscall_exit_to_user_mode_work(regs);
|
2020-07-23 05:59:57 +08:00
|
|
|
instrumentation_end();
|
2020-12-01 22:27:52 +08:00
|
|
|
__exit_to_user_mode();
|
2020-07-23 05:59:57 +08:00
|
|
|
}
|
|
|
|
|
2020-07-23 05:59:56 +08:00
|
|
|
noinstr void irqentry_enter_from_user_mode(struct pt_regs *regs)
|
|
|
|
{
|
2020-12-01 22:27:51 +08:00
|
|
|
__enter_from_user_mode(regs);
|
2020-07-23 05:59:56 +08:00
|
|
|
}
|
2020-07-23 05:59:57 +08:00
|
|
|
|
|
|
|
noinstr void irqentry_exit_to_user_mode(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
instrumentation_begin();
|
|
|
|
exit_to_user_mode_prepare(regs);
|
|
|
|
instrumentation_end();
|
2020-12-01 22:27:52 +08:00
|
|
|
__exit_to_user_mode();
|
2020-07-23 05:59:57 +08:00
|
|
|
}
|
2020-07-23 05:59:58 +08:00
|
|
|
|
2020-07-25 17:19:51 +08:00
|
|
|
noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs)
|
2020-07-23 05:59:58 +08:00
|
|
|
{
|
|
|
|
irqentry_state_t ret = {
|
|
|
|
.exit_rcu = false,
|
|
|
|
};
|
|
|
|
|
|
|
|
if (user_mode(regs)) {
|
|
|
|
irqentry_enter_from_user_mode(regs);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2022-06-08 22:40:26 +08:00
|
|
|
* If this entry hit the idle task invoke ct_irq_enter() whether
|
2020-07-23 05:59:58 +08:00
|
|
|
* RCU is watching or not.
|
|
|
|
*
|
2020-11-05 07:01:57 +08:00
|
|
|
* Interrupts can nest when the first interrupt invokes softirq
|
2020-07-23 05:59:58 +08:00
|
|
|
* processing on return which enables interrupts.
|
|
|
|
*
|
|
|
|
* Scheduler ticks in the idle task can mark quiescent state and
|
|
|
|
* terminate a grace period, if and only if the timer interrupt is
|
|
|
|
* not nested into another interrupt.
|
|
|
|
*
|
2020-08-18 01:37:22 +08:00
|
|
|
* Checking for rcu_is_watching() here would prevent the nesting
|
2022-06-08 22:40:26 +08:00
|
|
|
* interrupt to invoke ct_irq_enter(). If that nested interrupt is
|
2020-07-23 05:59:58 +08:00
|
|
|
* the tick then rcu_flavor_sched_clock_irq() would wrongfully
|
2021-03-22 10:55:50 +08:00
|
|
|
* assume that it is the first interrupt and eventually claim
|
2020-11-05 07:01:57 +08:00
|
|
|
* quiescent state and end grace periods prematurely.
|
2020-07-23 05:59:58 +08:00
|
|
|
*
|
2022-06-08 22:40:26 +08:00
|
|
|
* Unconditionally invoke ct_irq_enter() so RCU state stays
|
2020-07-23 05:59:58 +08:00
|
|
|
* consistent.
|
|
|
|
*
|
|
|
|
* TINY_RCU does not support EQS, so let the compiler eliminate
|
|
|
|
* this part when enabled.
|
|
|
|
*/
|
|
|
|
if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
|
|
|
|
/*
|
|
|
|
* If RCU is not watching then the same careful
|
|
|
|
* sequence vs. lockdep and tracing is required
|
2020-10-29 00:36:32 +08:00
|
|
|
* as in irqentry_enter_from_user_mode().
|
2020-07-23 05:59:58 +08:00
|
|
|
*/
|
|
|
|
lockdep_hardirqs_off(CALLER_ADDR0);
|
2022-06-08 22:40:26 +08:00
|
|
|
ct_irq_enter();
|
2020-07-23 05:59:58 +08:00
|
|
|
instrumentation_begin();
|
2022-09-15 23:04:14 +08:00
|
|
|
kmsan_unpoison_entry_regs(regs);
|
2020-07-23 05:59:58 +08:00
|
|
|
trace_hardirqs_off_finish();
|
|
|
|
instrumentation_end();
|
|
|
|
|
|
|
|
ret.exit_rcu = true;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If RCU is watching then RCU only wants to check whether it needs
|
|
|
|
* to restart the tick in NOHZ mode. rcu_irq_enter_check_tick()
|
|
|
|
* already contains a warning when RCU is not watching, so no point
|
|
|
|
* in having another one here.
|
|
|
|
*/
|
2020-11-04 21:06:23 +08:00
|
|
|
lockdep_hardirqs_off(CALLER_ADDR0);
|
2020-07-23 05:59:58 +08:00
|
|
|
instrumentation_begin();
|
2022-09-15 23:04:14 +08:00
|
|
|
kmsan_unpoison_entry_regs(regs);
|
2020-07-23 05:59:58 +08:00
|
|
|
rcu_irq_enter_check_tick();
|
2020-11-04 21:06:23 +08:00
|
|
|
trace_hardirqs_off_finish();
|
2020-07-23 05:59:58 +08:00
|
|
|
instrumentation_end();
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-02-15 00:52:12 +08:00
|
|
|
void raw_irqentry_exit_cond_resched(void)
|
2020-07-23 05:59:58 +08:00
|
|
|
{
|
|
|
|
if (!preempt_count()) {
|
|
|
|
/* Sanity check RCU and thread stack */
|
|
|
|
rcu_irq_exit_check_preempt();
|
|
|
|
if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
|
|
|
|
WARN_ON_ONCE(!on_thread_stack());
|
|
|
|
if (need_resched())
|
|
|
|
preempt_schedule_irq();
|
|
|
|
}
|
|
|
|
}
|
2021-01-18 22:12:22 +08:00
|
|
|
#ifdef CONFIG_PREEMPT_DYNAMIC
|
sched/preempt: Add PREEMPT_DYNAMIC using static keys
Where an architecture selects HAVE_STATIC_CALL but not
HAVE_STATIC_CALL_INLINE, each static call has an out-of-line trampoline
which will either branch to a callee or return to the caller.
On such architectures, a number of constraints can conspire to make
those trampolines more complicated and potentially less useful than we'd
like. For example:
* Hardware and software control flow integrity schemes can require the
addition of "landing pad" instructions (e.g. `BTI` for arm64), which
will also be present at the "real" callee.
* Limited branch ranges can require that trampolines generate or load an
address into a register and perform an indirect branch (or at least
have a slow path that does so). This loses some of the benefits of
having a direct branch.
* Interaction with SW CFI schemes can be complicated and fragile, e.g.
requiring that we can recognise idiomatic codegen and remove
indirections understand, at least until clang proves more helpful
mechanisms for dealing with this.
For PREEMPT_DYNAMIC, we don't need the full power of static calls, as we
really only need to enable/disable specific preemption functions. We can
achieve the same effect without a number of the pain points above by
using static keys to fold early returns into the preemption functions
themselves rather than in an out-of-line trampoline, effectively
inlining the trampoline into the start of the function.
For arm64, this results in good code generation. For example, the
dynamic_cond_resched() wrapper looks as follows when enabled. When
disabled, the first `B` is replaced with a `NOP`, resulting in an early
return.
| <dynamic_cond_resched>:
| bti c
| b <dynamic_cond_resched+0x10> // or `nop`
| mov w0, #0x0
| ret
| mrs x0, sp_el0
| ldr x0, [x0, #8]
| cbnz x0, <dynamic_cond_resched+0x8>
| paciasp
| stp x29, x30, [sp, #-16]!
| mov x29, sp
| bl <preempt_schedule_common>
| mov w0, #0x1
| ldp x29, x30, [sp], #16
| autiasp
| ret
... compared to the regular form of the function:
| <__cond_resched>:
| bti c
| mrs x0, sp_el0
| ldr x1, [x0, #8]
| cbz x1, <__cond_resched+0x18>
| mov w0, #0x0
| ret
| paciasp
| stp x29, x30, [sp, #-16]!
| mov x29, sp
| bl <preempt_schedule_common>
| mov w0, #0x1
| ldp x29, x30, [sp], #16
| autiasp
| ret
Any architecture which implements static keys should be able to use this
to implement PREEMPT_DYNAMIC with similar cost to non-inlined static
calls. Since this is likely to have greater overhead than (inlined)
static calls, PREEMPT_DYNAMIC is only defaulted to enabled when
HAVE_PREEMPT_DYNAMIC_CALL is selected.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Acked-by: Frederic Weisbecker <frederic@kernel.org>
Link: https://lore.kernel.org/r/20220214165216.2231574-6-mark.rutland@arm.com
2022-02-15 00:52:14 +08:00
|
|
|
#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
|
2022-02-15 00:52:12 +08:00
|
|
|
DEFINE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched);
|
sched/preempt: Add PREEMPT_DYNAMIC using static keys
Where an architecture selects HAVE_STATIC_CALL but not
HAVE_STATIC_CALL_INLINE, each static call has an out-of-line trampoline
which will either branch to a callee or return to the caller.
On such architectures, a number of constraints can conspire to make
those trampolines more complicated and potentially less useful than we'd
like. For example:
* Hardware and software control flow integrity schemes can require the
addition of "landing pad" instructions (e.g. `BTI` for arm64), which
will also be present at the "real" callee.
* Limited branch ranges can require that trampolines generate or load an
address into a register and perform an indirect branch (or at least
have a slow path that does so). This loses some of the benefits of
having a direct branch.
* Interaction with SW CFI schemes can be complicated and fragile, e.g.
requiring that we can recognise idiomatic codegen and remove
indirections understand, at least until clang proves more helpful
mechanisms for dealing with this.
For PREEMPT_DYNAMIC, we don't need the full power of static calls, as we
really only need to enable/disable specific preemption functions. We can
achieve the same effect without a number of the pain points above by
using static keys to fold early returns into the preemption functions
themselves rather than in an out-of-line trampoline, effectively
inlining the trampoline into the start of the function.
For arm64, this results in good code generation. For example, the
dynamic_cond_resched() wrapper looks as follows when enabled. When
disabled, the first `B` is replaced with a `NOP`, resulting in an early
return.
| <dynamic_cond_resched>:
| bti c
| b <dynamic_cond_resched+0x10> // or `nop`
| mov w0, #0x0
| ret
| mrs x0, sp_el0
| ldr x0, [x0, #8]
| cbnz x0, <dynamic_cond_resched+0x8>
| paciasp
| stp x29, x30, [sp, #-16]!
| mov x29, sp
| bl <preempt_schedule_common>
| mov w0, #0x1
| ldp x29, x30, [sp], #16
| autiasp
| ret
... compared to the regular form of the function:
| <__cond_resched>:
| bti c
| mrs x0, sp_el0
| ldr x1, [x0, #8]
| cbz x1, <__cond_resched+0x18>
| mov w0, #0x0
| ret
| paciasp
| stp x29, x30, [sp, #-16]!
| mov x29, sp
| bl <preempt_schedule_common>
| mov w0, #0x1
| ldp x29, x30, [sp], #16
| autiasp
| ret
Any architecture which implements static keys should be able to use this
to implement PREEMPT_DYNAMIC with similar cost to non-inlined static
calls. Since this is likely to have greater overhead than (inlined)
static calls, PREEMPT_DYNAMIC is only defaulted to enabled when
HAVE_PREEMPT_DYNAMIC_CALL is selected.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Acked-by: Frederic Weisbecker <frederic@kernel.org>
Link: https://lore.kernel.org/r/20220214165216.2231574-6-mark.rutland@arm.com
2022-02-15 00:52:14 +08:00
|
|
|
#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
|
|
|
|
DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
|
|
|
|
void dynamic_irqentry_exit_cond_resched(void)
|
|
|
|
{
|
2022-03-30 16:43:28 +08:00
|
|
|
if (!static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
|
sched/preempt: Add PREEMPT_DYNAMIC using static keys
Where an architecture selects HAVE_STATIC_CALL but not
HAVE_STATIC_CALL_INLINE, each static call has an out-of-line trampoline
which will either branch to a callee or return to the caller.
On such architectures, a number of constraints can conspire to make
those trampolines more complicated and potentially less useful than we'd
like. For example:
* Hardware and software control flow integrity schemes can require the
addition of "landing pad" instructions (e.g. `BTI` for arm64), which
will also be present at the "real" callee.
* Limited branch ranges can require that trampolines generate or load an
address into a register and perform an indirect branch (or at least
have a slow path that does so). This loses some of the benefits of
having a direct branch.
* Interaction with SW CFI schemes can be complicated and fragile, e.g.
requiring that we can recognise idiomatic codegen and remove
indirections understand, at least until clang proves more helpful
mechanisms for dealing with this.
For PREEMPT_DYNAMIC, we don't need the full power of static calls, as we
really only need to enable/disable specific preemption functions. We can
achieve the same effect without a number of the pain points above by
using static keys to fold early returns into the preemption functions
themselves rather than in an out-of-line trampoline, effectively
inlining the trampoline into the start of the function.
For arm64, this results in good code generation. For example, the
dynamic_cond_resched() wrapper looks as follows when enabled. When
disabled, the first `B` is replaced with a `NOP`, resulting in an early
return.
| <dynamic_cond_resched>:
| bti c
| b <dynamic_cond_resched+0x10> // or `nop`
| mov w0, #0x0
| ret
| mrs x0, sp_el0
| ldr x0, [x0, #8]
| cbnz x0, <dynamic_cond_resched+0x8>
| paciasp
| stp x29, x30, [sp, #-16]!
| mov x29, sp
| bl <preempt_schedule_common>
| mov w0, #0x1
| ldp x29, x30, [sp], #16
| autiasp
| ret
... compared to the regular form of the function:
| <__cond_resched>:
| bti c
| mrs x0, sp_el0
| ldr x1, [x0, #8]
| cbz x1, <__cond_resched+0x18>
| mov w0, #0x0
| ret
| paciasp
| stp x29, x30, [sp, #-16]!
| mov x29, sp
| bl <preempt_schedule_common>
| mov w0, #0x1
| ldp x29, x30, [sp], #16
| autiasp
| ret
Any architecture which implements static keys should be able to use this
to implement PREEMPT_DYNAMIC with similar cost to non-inlined static
calls. Since this is likely to have greater overhead than (inlined)
static calls, PREEMPT_DYNAMIC is only defaulted to enabled when
HAVE_PREEMPT_DYNAMIC_CALL is selected.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Acked-by: Frederic Weisbecker <frederic@kernel.org>
Link: https://lore.kernel.org/r/20220214165216.2231574-6-mark.rutland@arm.com
2022-02-15 00:52:14 +08:00
|
|
|
return;
|
|
|
|
raw_irqentry_exit_cond_resched();
|
|
|
|
}
|
|
|
|
#endif
|
2021-01-18 22:12:22 +08:00
|
|
|
#endif
|
2020-07-23 05:59:58 +08:00
|
|
|
|
2020-07-25 17:19:51 +08:00
|
|
|
noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state)
|
2020-07-23 05:59:58 +08:00
|
|
|
{
|
|
|
|
lockdep_assert_irqs_disabled();
|
|
|
|
|
|
|
|
/* Check whether this returns to user mode */
|
|
|
|
if (user_mode(regs)) {
|
|
|
|
irqentry_exit_to_user_mode(regs);
|
|
|
|
} else if (!regs_irqs_disabled(regs)) {
|
|
|
|
/*
|
|
|
|
* If RCU was not watching on entry this needs to be done
|
|
|
|
* carefully and needs the same ordering of lockdep/tracing
|
|
|
|
* and RCU as the return to user mode path.
|
|
|
|
*/
|
|
|
|
if (state.exit_rcu) {
|
|
|
|
instrumentation_begin();
|
|
|
|
/* Tell the tracer that IRET will enable interrupts */
|
|
|
|
trace_hardirqs_on_prepare();
|
2022-03-15 06:19:03 +08:00
|
|
|
lockdep_hardirqs_on_prepare();
|
2020-07-23 05:59:58 +08:00
|
|
|
instrumentation_end();
|
2022-06-08 22:40:26 +08:00
|
|
|
ct_irq_exit();
|
2020-07-23 05:59:58 +08:00
|
|
|
lockdep_hardirqs_on(CALLER_ADDR0);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
instrumentation_begin();
|
2022-02-15 00:52:12 +08:00
|
|
|
if (IS_ENABLED(CONFIG_PREEMPTION))
|
2020-07-23 05:59:58 +08:00
|
|
|
irqentry_exit_cond_resched();
|
2022-02-15 00:52:12 +08:00
|
|
|
|
2020-07-23 05:59:58 +08:00
|
|
|
/* Covers both tracing and lockdep */
|
|
|
|
trace_hardirqs_on();
|
|
|
|
instrumentation_end();
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* IRQ flags state is correct already. Just tell RCU if it
|
|
|
|
* was not watching on entry.
|
|
|
|
*/
|
|
|
|
if (state.exit_rcu)
|
2022-06-08 22:40:26 +08:00
|
|
|
ct_irq_exit();
|
2020-07-23 05:59:58 +08:00
|
|
|
}
|
|
|
|
}
|
2020-11-03 04:53:16 +08:00
|
|
|
|
|
|
|
irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
irqentry_state_t irq_state;
|
|
|
|
|
|
|
|
irq_state.lockdep = lockdep_hardirqs_enabled();
|
|
|
|
|
|
|
|
__nmi_enter();
|
|
|
|
lockdep_hardirqs_off(CALLER_ADDR0);
|
|
|
|
lockdep_hardirq_enter();
|
2022-06-08 22:40:27 +08:00
|
|
|
ct_nmi_enter();
|
2020-11-03 04:53:16 +08:00
|
|
|
|
|
|
|
instrumentation_begin();
|
2022-09-15 23:04:14 +08:00
|
|
|
kmsan_unpoison_entry_regs(regs);
|
2020-11-03 04:53:16 +08:00
|
|
|
trace_hardirqs_off_finish();
|
|
|
|
ftrace_nmi_enter();
|
|
|
|
instrumentation_end();
|
|
|
|
|
|
|
|
return irq_state;
|
|
|
|
}
|
|
|
|
|
|
|
|
void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state)
|
|
|
|
{
|
|
|
|
instrumentation_begin();
|
|
|
|
ftrace_nmi_exit();
|
|
|
|
if (irq_state.lockdep) {
|
|
|
|
trace_hardirqs_on_prepare();
|
2022-03-15 06:19:03 +08:00
|
|
|
lockdep_hardirqs_on_prepare();
|
2020-11-03 04:53:16 +08:00
|
|
|
}
|
|
|
|
instrumentation_end();
|
|
|
|
|
2022-06-08 22:40:27 +08:00
|
|
|
ct_nmi_exit();
|
2020-11-03 04:53:16 +08:00
|
|
|
lockdep_hardirq_exit();
|
|
|
|
if (irq_state.lockdep)
|
|
|
|
lockdep_hardirqs_on(CALLER_ADDR0);
|
|
|
|
__nmi_exit();
|
|
|
|
}
|