mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-27 14:43:58 +08:00
9809b18fcf
watchdog_tresh controls how often nmi perf event counter checks per-cpu hrtimer_interrupts counter and blows up if the counter hasn't changed since the last check. The counter is updated by per-cpu watchdog_hrtimer hrtimer which is scheduled with 2/5 watchdog_thresh period which guarantees that hrtimer is scheduled 2 times per the main period. Both hrtimer and perf event are started together when the watchdog is enabled. So far so good. But... But what happens when watchdog_thresh is updated from sysctl handler? proc_dowatchdog will set a new sampling period and hrtimer callback (watchdog_timer_fn) will use the new value in the next round. The problem, however, is that nobody tells the perf event that the sampling period has changed so it is ticking with the period configured when it has been set up. This might result in an ear ripping dissonance between perf and hrtimer parts if the watchdog_thresh is increased. And even worse it might lead to KABOOM if the watchdog is configured to panic on such a spurious lockup. This patch fixes the issue by updating both nmi perf even counter and hrtimers if the threshold value has changed. The nmi one is disabled and then reinitialized from scratch. This has an unpleasant side effect that the allocation of the new event might fail theoretically so the hard lockup detector would be disabled for such cpus. On the other hand such a memory allocation failure is very unlikely because the original event is deallocated right before. It would be much nicer if we just changed perf event period but there doesn't seem to be any API to do that right now. It is also unfortunate that perf_event_alloc uses GFP_KERNEL allocation unconditionally so we cannot use on_each_cpu() and do the same thing from the per-cpu context. The update from the current CPU should be safe because perf_event_disable removes the event atomically before it clears the per-cpu watchdog_ev so it cannot change anything under running handler feet. The hrtimer is simply restarted (thanks to Don Zickus who has pointed this out) if it is queued because we cannot rely it will fire&adopt to the new sampling period before a new nmi event triggers (when the treshold is decreased). [akpm@linux-foundation.org: the UP version of __smp_call_function_single ended up in the wrong place] Signed-off-by: Michal Hocko <mhocko@suse.cz> Acked-by: Don Zickus <dzickus@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@kernel.org> Cc: Fabio Estevam <festevam@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
200 lines
5.0 KiB
C
200 lines
5.0 KiB
C
#ifndef __LINUX_SMP_H
|
|
#define __LINUX_SMP_H
|
|
|
|
/*
|
|
* Generic SMP support
|
|
* Alan Cox. <alan@redhat.com>
|
|
*/
|
|
|
|
#include <linux/errno.h>
|
|
#include <linux/types.h>
|
|
#include <linux/list.h>
|
|
#include <linux/cpumask.h>
|
|
#include <linux/init.h>
|
|
|
|
extern void cpu_idle(void);
|
|
|
|
typedef void (*smp_call_func_t)(void *info);
|
|
struct call_single_data {
|
|
struct list_head list;
|
|
smp_call_func_t func;
|
|
void *info;
|
|
u16 flags;
|
|
};
|
|
|
|
/* total number of cpus in this system (may exceed NR_CPUS) */
|
|
extern unsigned int total_cpus;
|
|
|
|
int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
|
|
int wait);
|
|
|
|
/*
|
|
* Call a function on all processors
|
|
*/
|
|
int on_each_cpu(smp_call_func_t func, void *info, int wait);
|
|
|
|
/*
|
|
* Call a function on processors specified by mask, which might include
|
|
* the local one.
|
|
*/
|
|
void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
|
|
void *info, bool wait);
|
|
|
|
/*
|
|
* Call a function on each processor for which the supplied function
|
|
* cond_func returns a positive value. This may include the local
|
|
* processor.
|
|
*/
|
|
void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
|
|
smp_call_func_t func, void *info, bool wait,
|
|
gfp_t gfp_flags);
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
#include <linux/preempt.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/compiler.h>
|
|
#include <linux/thread_info.h>
|
|
#include <asm/smp.h>
|
|
|
|
/*
|
|
* main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc.
|
|
* (defined in asm header):
|
|
*/
|
|
|
|
/*
|
|
* stops all CPUs but the current one:
|
|
*/
|
|
extern void smp_send_stop(void);
|
|
|
|
/*
|
|
* sends a 'reschedule' event to another CPU:
|
|
*/
|
|
extern void smp_send_reschedule(int cpu);
|
|
|
|
|
|
/*
|
|
* Prepare machine for booting other CPUs.
|
|
*/
|
|
extern void smp_prepare_cpus(unsigned int max_cpus);
|
|
|
|
/*
|
|
* Bring a CPU up
|
|
*/
|
|
extern int __cpu_up(unsigned int cpunum, struct task_struct *tidle);
|
|
|
|
/*
|
|
* Final polishing of CPUs
|
|
*/
|
|
extern void smp_cpus_done(unsigned int max_cpus);
|
|
|
|
/*
|
|
* Call a function on all other processors
|
|
*/
|
|
int smp_call_function(smp_call_func_t func, void *info, int wait);
|
|
void smp_call_function_many(const struct cpumask *mask,
|
|
smp_call_func_t func, void *info, bool wait);
|
|
|
|
void __smp_call_function_single(int cpuid, struct call_single_data *data,
|
|
int wait);
|
|
|
|
int smp_call_function_any(const struct cpumask *mask,
|
|
smp_call_func_t func, void *info, int wait);
|
|
|
|
void kick_all_cpus_sync(void);
|
|
|
|
/*
|
|
* Generic and arch helpers
|
|
*/
|
|
#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
|
|
void __init call_function_init(void);
|
|
void generic_smp_call_function_single_interrupt(void);
|
|
#define generic_smp_call_function_interrupt \
|
|
generic_smp_call_function_single_interrupt
|
|
#else
|
|
static inline void call_function_init(void) { }
|
|
#endif
|
|
|
|
/*
|
|
* Mark the boot cpu "online" so that it can call console drivers in
|
|
* printk() and can access its per-cpu storage.
|
|
*/
|
|
void smp_prepare_boot_cpu(void);
|
|
|
|
extern unsigned int setup_max_cpus;
|
|
extern void __init setup_nr_cpu_ids(void);
|
|
extern void __init smp_init(void);
|
|
|
|
#else /* !SMP */
|
|
|
|
static inline void smp_send_stop(void) { }
|
|
|
|
/*
|
|
* These macros fold the SMP functionality into a single CPU system
|
|
*/
|
|
#define raw_smp_processor_id() 0
|
|
static inline int up_smp_call_function(smp_call_func_t func, void *info)
|
|
{
|
|
return 0;
|
|
}
|
|
#define smp_call_function(func, info, wait) \
|
|
(up_smp_call_function(func, info))
|
|
|
|
static inline void smp_send_reschedule(int cpu) { }
|
|
#define smp_prepare_boot_cpu() do {} while (0)
|
|
#define smp_call_function_many(mask, func, info, wait) \
|
|
(up_smp_call_function(func, info))
|
|
static inline void call_function_init(void) { }
|
|
|
|
static inline int
|
|
smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
|
|
void *info, int wait)
|
|
{
|
|
return smp_call_function_single(0, func, info, wait);
|
|
}
|
|
|
|
static inline void kick_all_cpus_sync(void) { }
|
|
|
|
static inline void __smp_call_function_single(int cpuid,
|
|
struct call_single_data *data, int wait)
|
|
{
|
|
on_each_cpu(data->func, data->info, wait);
|
|
}
|
|
|
|
#endif /* !SMP */
|
|
|
|
/*
|
|
* smp_processor_id(): get the current CPU ID.
|
|
*
|
|
* if DEBUG_PREEMPT is enabled then we check whether it is
|
|
* used in a preemption-safe way. (smp_processor_id() is safe
|
|
* if it's used in a preemption-off critical section, or in
|
|
* a thread that is bound to the current CPU.)
|
|
*
|
|
* NOTE: raw_smp_processor_id() is for internal use only
|
|
* (smp_processor_id() is the preferred variant), but in rare
|
|
* instances it might also be used to turn off false positives
|
|
* (i.e. smp_processor_id() use that the debugging code reports but
|
|
* which use for some reason is legal). Don't use this to hack around
|
|
* the warning message, as your code might not work under PREEMPT.
|
|
*/
|
|
#ifdef CONFIG_DEBUG_PREEMPT
|
|
extern unsigned int debug_smp_processor_id(void);
|
|
# define smp_processor_id() debug_smp_processor_id()
|
|
#else
|
|
# define smp_processor_id() raw_smp_processor_id()
|
|
#endif
|
|
|
|
#define get_cpu() ({ preempt_disable(); smp_processor_id(); })
|
|
#define put_cpu() preempt_enable()
|
|
|
|
/*
|
|
* Callback to arch code if there's nosmp or maxcpus=0 on the
|
|
* boot command line:
|
|
*/
|
|
extern void arch_disable_smp_support(void);
|
|
|
|
void smp_setup_processor_id(void);
|
|
|
|
#endif /* __LINUX_SMP_H */
|