mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-21 18:14:48 +08:00
1b2d3451ee
This patch enables support for PREEMPT_DYNAMIC on arm64, allowing the preemption model to be chosen at boot time. Specifically, this patch selects HAVE_PREEMPT_DYNAMIC_KEY, so that each preemption function is an out-of-line call with an early return depending upon a static key. This leaves almost all the codegen up to the compiler, and side-steps a number of pain points with static calls (e.g. interaction with CFI schemes). This should have no worse overhead than using non-inline static calls, as those use out-of-line trampolines with early returns. For example, the dynamic_cond_resched() wrapper looks as follows when enabled. When disabled, the first `B` is replaced with a `NOP`, resulting in an early return. | <dynamic_cond_resched>: | bti c | b <dynamic_cond_resched+0x10> // or `nop` | mov w0, #0x0 | ret | mrs x0, sp_el0 | ldr x0, [x0, #8] | cbnz x0, <dynamic_cond_resched+0x8> | paciasp | stp x29, x30, [sp, #-16]! | mov x29, sp | bl <preempt_schedule_common> | mov w0, #0x1 | ldp x29, x30, [sp], #16 | autiasp | ret ... compared to the regular form of the function: | <__cond_resched>: | bti c | mrs x0, sp_el0 | ldr x1, [x0, #8] | cbz x1, <__cond_resched+0x18> | mov w0, #0x0 | ret | paciasp | stp x29, x30, [sp, #-16]! | mov x29, sp | bl <preempt_schedule_common> | mov w0, #0x1 | ldp x29, x30, [sp], #16 | autiasp | ret Since arm64 does not yet use the generic entry code, we must define our own `sk_dynamic_irqentry_exit_cond_resched`, which will be enabled/disabled by the common code in kernel/sched/core.c. All other preemption functions and associated static keys are defined there. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Ard Biesheuvel <ardb@kernel.org> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Frederic Weisbecker <frederic@kernel.org> Link: https://lore.kernel.org/r/20220214165216.2231574-8-mark.rutland@arm.com
105 lines
2.7 KiB
C
105 lines
2.7 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __ASM_PREEMPT_H
|
|
#define __ASM_PREEMPT_H
|
|
|
|
#include <linux/jump_label.h>
|
|
#include <linux/thread_info.h>
|
|
|
|
#define PREEMPT_NEED_RESCHED BIT(32)
|
|
#define PREEMPT_ENABLED (PREEMPT_NEED_RESCHED)
|
|
|
|
static inline int preempt_count(void)
|
|
{
|
|
return READ_ONCE(current_thread_info()->preempt.count);
|
|
}
|
|
|
|
static inline void preempt_count_set(u64 pc)
|
|
{
|
|
/* Preserve existing value of PREEMPT_NEED_RESCHED */
|
|
WRITE_ONCE(current_thread_info()->preempt.count, pc);
|
|
}
|
|
|
|
#define init_task_preempt_count(p) do { \
|
|
task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \
|
|
} while (0)
|
|
|
|
#define init_idle_preempt_count(p, cpu) do { \
|
|
task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
|
|
} while (0)
|
|
|
|
static inline void set_preempt_need_resched(void)
|
|
{
|
|
current_thread_info()->preempt.need_resched = 0;
|
|
}
|
|
|
|
static inline void clear_preempt_need_resched(void)
|
|
{
|
|
current_thread_info()->preempt.need_resched = 1;
|
|
}
|
|
|
|
static inline bool test_preempt_need_resched(void)
|
|
{
|
|
return !current_thread_info()->preempt.need_resched;
|
|
}
|
|
|
|
static inline void __preempt_count_add(int val)
|
|
{
|
|
u32 pc = READ_ONCE(current_thread_info()->preempt.count);
|
|
pc += val;
|
|
WRITE_ONCE(current_thread_info()->preempt.count, pc);
|
|
}
|
|
|
|
static inline void __preempt_count_sub(int val)
|
|
{
|
|
u32 pc = READ_ONCE(current_thread_info()->preempt.count);
|
|
pc -= val;
|
|
WRITE_ONCE(current_thread_info()->preempt.count, pc);
|
|
}
|
|
|
|
static inline bool __preempt_count_dec_and_test(void)
|
|
{
|
|
struct thread_info *ti = current_thread_info();
|
|
u64 pc = READ_ONCE(ti->preempt_count);
|
|
|
|
/* Update only the count field, leaving need_resched unchanged */
|
|
WRITE_ONCE(ti->preempt.count, --pc);
|
|
|
|
/*
|
|
* If we wrote back all zeroes, then we're preemptible and in
|
|
* need of a reschedule. Otherwise, we need to reload the
|
|
* preempt_count in case the need_resched flag was cleared by an
|
|
* interrupt occurring between the non-atomic READ_ONCE/WRITE_ONCE
|
|
* pair.
|
|
*/
|
|
return !pc || !READ_ONCE(ti->preempt_count);
|
|
}
|
|
|
|
static inline bool should_resched(int preempt_offset)
|
|
{
|
|
u64 pc = READ_ONCE(current_thread_info()->preempt_count);
|
|
return pc == preempt_offset;
|
|
}
|
|
|
|
#ifdef CONFIG_PREEMPTION
|
|
|
|
void preempt_schedule(void);
|
|
void preempt_schedule_notrace(void);
|
|
|
|
#ifdef CONFIG_PREEMPT_DYNAMIC
|
|
|
|
DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
|
|
void dynamic_preempt_schedule(void);
|
|
#define __preempt_schedule() dynamic_preempt_schedule()
|
|
void dynamic_preempt_schedule_notrace(void);
|
|
#define __preempt_schedule_notrace() dynamic_preempt_schedule_notrace()
|
|
|
|
#else /* CONFIG_PREEMPT_DYNAMIC */
|
|
|
|
#define __preempt_schedule() preempt_schedule()
|
|
#define __preempt_schedule_notrace() preempt_schedule_notrace()
|
|
|
|
#endif /* CONFIG_PREEMPT_DYNAMIC */
|
|
#endif /* CONFIG_PREEMPTION */
|
|
|
|
#endif /* __ASM_PREEMPT_H */
|