mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-20 11:13:58 +08:00
sched: Fix missing preemption opportunity
If an interrupt fires in cond_resched(), between the call to __schedule() and the PREEMPT_ACTIVE count decrementation, and that interrupt sets TIF_NEED_RESCHED, the call to preempt_schedule_irq() will be ignored due to the PREEMPT_ACTIVE count. This kind of scenario, with irq preemption being delayed because it's interrupting a preempt-disabled area, is usually fixed up after preemption is re-enabled back with an explicit call to preempt_schedule(). This is what preempt_enable() does but a raw preempt count decrement as performed by __preempt_count_sub(PREEMPT_ACTIVE) doesn't handle delayed preemption check. Therefore when such a race happens, the rescheduling is going to be delayed until the next scheduler or preemption entrypoint. This can be a problem for scheduler latency sensitive workloads. Lets fix that by consolidating cond_resched() with preempt_schedule() internals. Reported-by: Linus Torvalds <torvalds@linux-foundation.org> Reported-by: Ingo Molnar <mingo@kernel.org> Original-patch-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: http://lkml.kernel.org/r/1421946484-9298-1-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
80e3d87b2c
commit
a18b5d0181
@ -2884,6 +2884,21 @@ void __sched schedule_preempt_disabled(void)
|
||||
preempt_disable();
|
||||
}
|
||||
|
||||
static void preempt_schedule_common(void)
|
||||
{
|
||||
do {
|
||||
__preempt_count_add(PREEMPT_ACTIVE);
|
||||
__schedule();
|
||||
__preempt_count_sub(PREEMPT_ACTIVE);
|
||||
|
||||
/*
|
||||
* Check again in case we missed a preemption opportunity
|
||||
* between schedule and now.
|
||||
*/
|
||||
barrier();
|
||||
} while (need_resched());
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
/*
|
||||
* this is the entry point to schedule() from in-kernel preemption
|
||||
@ -2899,17 +2914,7 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
|
||||
if (likely(!preemptible()))
|
||||
return;
|
||||
|
||||
do {
|
||||
__preempt_count_add(PREEMPT_ACTIVE);
|
||||
__schedule();
|
||||
__preempt_count_sub(PREEMPT_ACTIVE);
|
||||
|
||||
/*
|
||||
* Check again in case we missed a preemption opportunity
|
||||
* between schedule and now.
|
||||
*/
|
||||
barrier();
|
||||
} while (need_resched());
|
||||
preempt_schedule_common();
|
||||
}
|
||||
NOKPROBE_SYMBOL(preempt_schedule);
|
||||
EXPORT_SYMBOL(preempt_schedule);
|
||||
@ -4209,17 +4214,10 @@ SYSCALL_DEFINE0(sched_yield)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __cond_resched(void)
|
||||
{
|
||||
__preempt_count_add(PREEMPT_ACTIVE);
|
||||
__schedule();
|
||||
__preempt_count_sub(PREEMPT_ACTIVE);
|
||||
}
|
||||
|
||||
int __sched _cond_resched(void)
|
||||
{
|
||||
if (should_resched()) {
|
||||
__cond_resched();
|
||||
preempt_schedule_common();
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
@ -4244,7 +4242,7 @@ int __cond_resched_lock(spinlock_t *lock)
|
||||
if (spin_needbreak(lock) || resched) {
|
||||
spin_unlock(lock);
|
||||
if (resched)
|
||||
__cond_resched();
|
||||
preempt_schedule_common();
|
||||
else
|
||||
cpu_relax();
|
||||
ret = 1;
|
||||
@ -4260,7 +4258,7 @@ int __sched __cond_resched_softirq(void)
|
||||
|
||||
if (should_resched()) {
|
||||
local_bh_enable();
|
||||
__cond_resched();
|
||||
preempt_schedule_common();
|
||||
local_bh_disable();
|
||||
return 1;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user