mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-27 22:24:11 +08:00
x86: Fix CPUIDLE_FLAG_IRQ_ENABLE leaking timer reprogram
[ Upstream commitedc8fc01f6
] intel_idle_irq() re-enables IRQs very early. As a result, an interrupt may fire before mwait() is eventually called. If such an interrupt queues a timer, it may go unnoticed until mwait returns and the idle loop handles the tick re-evaluation. And monitoring TIF_NEED_RESCHED doesn't help because a local timer enqueue doesn't set that flag. The issue is mitigated by the fact that this idle handler is only invoked for shallow C-states when, presumably, the next tick is supposed to be close enough. There may still be rare cases though when the next tick is far away and the selected C-state is shallow, resulting in a timer getting ignored for a while. Fix this with using sti_mwait() whose IRQ-reenablement only triggers upon calling mwait(), dealing with the race while keeping the interrupt latency within acceptable bounds. Fixes:c227233ad6
(intel_idle: enable interrupts before C1 on Xeons) Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Frederic Weisbecker <frederic@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Rafael J. Wysocki <rafael@kernel.org> Link: https://lkml.kernel.org/r/20231115151325.6262-3-frederic@kernel.org Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
f7aac5fede
commit
08beb0d436
@ -115,8 +115,15 @@ static __always_inline void mwait_idle_with_hints(unsigned long eax, unsigned lo
|
||||
}
|
||||
|
||||
__monitor((void *)¤t_thread_info()->flags, 0, 0);
|
||||
if (!need_resched())
|
||||
__mwait(eax, ecx);
|
||||
|
||||
if (!need_resched()) {
|
||||
if (ecx & 1) {
|
||||
__mwait(eax, ecx);
|
||||
} else {
|
||||
__sti_mwait(eax, ecx);
|
||||
raw_local_irq_disable();
|
||||
}
|
||||
}
|
||||
}
|
||||
current_clr_polling();
|
||||
}
|
||||
|
@ -131,11 +131,12 @@ static unsigned int mwait_substates __initdata;
|
||||
#define MWAIT2flg(eax) ((eax & 0xFF) << 24)
|
||||
|
||||
static __always_inline int __intel_idle(struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv, int index)
|
||||
struct cpuidle_driver *drv,
|
||||
int index, bool irqoff)
|
||||
{
|
||||
struct cpuidle_state *state = &drv->states[index];
|
||||
unsigned long eax = flg2MWAIT(state->flags);
|
||||
unsigned long ecx = 1; /* break on interrupt flag */
|
||||
unsigned long ecx = 1*irqoff; /* break on interrupt flag */
|
||||
|
||||
mwait_idle_with_hints(eax, ecx);
|
||||
|
||||
@ -159,19 +160,13 @@ static __always_inline int __intel_idle(struct cpuidle_device *dev,
|
||||
static __cpuidle int intel_idle(struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv, int index)
|
||||
{
|
||||
return __intel_idle(dev, drv, index);
|
||||
return __intel_idle(dev, drv, index, true);
|
||||
}
|
||||
|
||||
static __cpuidle int intel_idle_irq(struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv, int index)
|
||||
{
|
||||
int ret;
|
||||
|
||||
raw_local_irq_enable();
|
||||
ret = __intel_idle(dev, drv, index);
|
||||
raw_local_irq_disable();
|
||||
|
||||
return ret;
|
||||
return __intel_idle(dev, drv, index, false);
|
||||
}
|
||||
|
||||
static __cpuidle int intel_idle_ibrs(struct cpuidle_device *dev,
|
||||
@ -184,7 +179,7 @@ static __cpuidle int intel_idle_ibrs(struct cpuidle_device *dev,
|
||||
if (smt_active)
|
||||
native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
|
||||
|
||||
ret = __intel_idle(dev, drv, index);
|
||||
ret = __intel_idle(dev, drv, index, true);
|
||||
|
||||
if (smt_active)
|
||||
native_wrmsrl(MSR_IA32_SPEC_CTRL, spec_ctrl);
|
||||
@ -196,7 +191,7 @@ static __cpuidle int intel_idle_xstate(struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv, int index)
|
||||
{
|
||||
fpu_idle_fpregs();
|
||||
return __intel_idle(dev, drv, index);
|
||||
return __intel_idle(dev, drv, index, true);
|
||||
}
|
||||
|
||||
/**
|
||||
|
Loading…
Reference in New Issue
Block a user