mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-15 00:34:10 +08:00
watchdog: make sure the watchdog thread gets CPU on loaded system
If the system is loaded while hotplugging a CPU we might end up with a bogus hardlockup detection. This has been seen during LTP pounder test executed in parallel with hotplug test. The main problem is that enable_watchdog (called when CPU is brought up) registers perf event which periodically checks per-cpu counter (hrtimer_interrupts), updated from a hrtimer callback, but the hrtimer is fired from the kernel thread. This means that while we already do check for the hard lockup the kernel thread might be sitting on the runqueue with zillions of tasks so there is nobody to update the value we rely on and so we KABOOM. Let's fix this by boosting the watchdog thread priority before we wake it up rather than when it's already running. This still doesn't handle a case where we have the same amount of high prio FIFO tasks but that doesn't seem to be common. The current implementation doesn't handle that case anyway so this is not worse at least. Unfortunately, we cannot start perf counter from the watchdog thread because we could miss a real lock up and also we cannot start the hrtimer watchdog_enable because we there is no way (at least I don't know any) to start a hrtimer from a different CPU. [dzickus@redhat.com: fix compile issue with param] Cc: Ingo Molnar <mingo@elte.hu> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Reviewed-by: Mandeep Singh Baines <msb@chromium.org> Signed-off-by: Michal Hocko <mhocko@suse.cz> Signed-off-by: Don Zickus <dzickus@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
397a21f24d
commit
7a05c0f7bb
@ -319,11 +319,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
|
|||||||
*/
|
*/
|
||||||
static int watchdog(void *unused)
|
static int watchdog(void *unused)
|
||||||
{
|
{
|
||||||
struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
|
struct sched_param param = { .sched_priority = 0 };
|
||||||
struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
|
struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
|
||||||
|
|
||||||
sched_setscheduler(current, SCHED_FIFO, ¶m);
|
|
||||||
|
|
||||||
/* initialize timestamp */
|
/* initialize timestamp */
|
||||||
__touch_watchdog();
|
__touch_watchdog();
|
||||||
|
|
||||||
@ -350,7 +348,6 @@ static int watchdog(void *unused)
|
|||||||
set_current_state(TASK_INTERRUPTIBLE);
|
set_current_state(TASK_INTERRUPTIBLE);
|
||||||
}
|
}
|
||||||
__set_current_state(TASK_RUNNING);
|
__set_current_state(TASK_RUNNING);
|
||||||
param.sched_priority = 0;
|
|
||||||
sched_setscheduler(current, SCHED_NORMAL, ¶m);
|
sched_setscheduler(current, SCHED_NORMAL, ¶m);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -439,6 +436,7 @@ static int watchdog_enable(int cpu)
|
|||||||
|
|
||||||
/* create the watchdog thread */
|
/* create the watchdog thread */
|
||||||
if (!p) {
|
if (!p) {
|
||||||
|
struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
|
||||||
p = kthread_create_on_node(watchdog, NULL, cpu_to_node(cpu), "watchdog/%d", cpu);
|
p = kthread_create_on_node(watchdog, NULL, cpu_to_node(cpu), "watchdog/%d", cpu);
|
||||||
if (IS_ERR(p)) {
|
if (IS_ERR(p)) {
|
||||||
printk(KERN_ERR "softlockup watchdog for %i failed\n", cpu);
|
printk(KERN_ERR "softlockup watchdog for %i failed\n", cpu);
|
||||||
@ -450,6 +448,7 @@ static int watchdog_enable(int cpu)
|
|||||||
}
|
}
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
sched_setscheduler(p, SCHED_FIFO, ¶m);
|
||||||
kthread_bind(p, cpu);
|
kthread_bind(p, cpu);
|
||||||
per_cpu(watchdog_touch_ts, cpu) = 0;
|
per_cpu(watchdog_touch_ts, cpu) = 0;
|
||||||
per_cpu(softlockup_watchdog, cpu) = p;
|
per_cpu(softlockup_watchdog, cpu) = p;
|
||||||
|
Loading…
Reference in New Issue
Block a user