mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-16 23:45:31 +08:00
f1a0a376ca
As pointed out by commit
de9b8f5dcb
("sched: Fix crash trying to dequeue/enqueue the idle thread")
init_idle() can and will be invoked more than once on the same idle
task. At boot time, it is invoked for the boot CPU thread by
sched_init(). Then smp_init() creates the threads for all the secondary
CPUs and invokes init_idle() on them.
As the hotplug machinery brings the secondaries to life, it will issue
calls to idle_thread_get(), which itself invokes init_idle() yet again.
In this case it's invoked twice more per secondary: at _cpu_up(), and at
bringup_cpu().
Given smp_init() already initializes the idle tasks for all *possible*
CPUs, no further initialization should be required. Now, removing
init_idle() from idle_thread_get() exposes some interesting expectations
with regards to the idle task's preempt_count: the secondary startup always
issues a preempt_disable(), requiring some reset of the preempt count to 0
between hot-unplug and hotplug, which is currently served by
idle_thread_get() -> idle_init().
Given the idle task is supposed to have preemption disabled once and never
see it re-enabled, it seems that what we actually want is to initialize its
preempt_count to PREEMPT_DISABLED and leave it there. Do that, and remove
init_idle() from idle_thread_get().
Secondary startups were patched via coccinelle:
@begone@
@@
-preempt_disable();
...
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
Signed-off-by: Valentin Schneider <valentin.schneider@arm.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20210512094636.2958515-1-valentin.schneider@arm.com
89 lines
1.9 KiB
C
89 lines
1.9 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __ASM_PREEMPT_H
|
|
#define __ASM_PREEMPT_H
|
|
|
|
#include <linux/thread_info.h>
|
|
|
|
#define PREEMPT_ENABLED (0)
|
|
|
|
static __always_inline int preempt_count(void)
|
|
{
|
|
return READ_ONCE(current_thread_info()->preempt_count);
|
|
}
|
|
|
|
static __always_inline volatile int *preempt_count_ptr(void)
|
|
{
|
|
return ¤t_thread_info()->preempt_count;
|
|
}
|
|
|
|
static __always_inline void preempt_count_set(int pc)
|
|
{
|
|
*preempt_count_ptr() = pc;
|
|
}
|
|
|
|
/*
|
|
* must be macros to avoid header recursion hell
|
|
*/
|
|
#define init_task_preempt_count(p) do { \
|
|
task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \
|
|
} while (0)
|
|
|
|
#define init_idle_preempt_count(p, cpu) do { \
|
|
task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
|
|
} while (0)
|
|
|
|
static __always_inline void set_preempt_need_resched(void)
|
|
{
|
|
}
|
|
|
|
static __always_inline void clear_preempt_need_resched(void)
|
|
{
|
|
}
|
|
|
|
static __always_inline bool test_preempt_need_resched(void)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
/*
|
|
* The various preempt_count add/sub methods
|
|
*/
|
|
|
|
static __always_inline void __preempt_count_add(int val)
|
|
{
|
|
*preempt_count_ptr() += val;
|
|
}
|
|
|
|
static __always_inline void __preempt_count_sub(int val)
|
|
{
|
|
*preempt_count_ptr() -= val;
|
|
}
|
|
|
|
static __always_inline bool __preempt_count_dec_and_test(void)
|
|
{
|
|
/*
|
|
* Because of load-store architectures cannot do per-cpu atomic
|
|
* operations; we cannot use PREEMPT_NEED_RESCHED because it might get
|
|
* lost.
|
|
*/
|
|
return !--*preempt_count_ptr() && tif_need_resched();
|
|
}
|
|
|
|
/*
|
|
* Returns true when we need to resched and can (barring IRQ state).
|
|
*/
|
|
static __always_inline bool should_resched(int preempt_offset)
|
|
{
|
|
return unlikely(preempt_count() == preempt_offset &&
|
|
tif_need_resched());
|
|
}
|
|
|
|
#ifdef CONFIG_PREEMPTION
|
|
extern asmlinkage void preempt_schedule(void);
|
|
#define __preempt_schedule() preempt_schedule()
|
|
extern asmlinkage void preempt_schedule_notrace(void);
|
|
#define __preempt_schedule_notrace() preempt_schedule_notrace()
|
|
#endif /* CONFIG_PREEMPTION */
|
|
|
|
#endif /* __ASM_PREEMPT_H */
|