mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-21 19:53:59 +08:00
cafcd80d21
Combining the softlockup and hardlockup code causes watchdog.c to build even without the hardlockup detection support. So if an arch, that has the previous and the new nmi watchdog implementations cohabiting, wants to know if the generic one is in use, CONFIG_LOCKUP_DETECTOR is not a reliable check. We need to use CONFIG_HARDLOCKUP_DETECTOR instead. Fixes: kernel/built-in.o: In function `touch_nmi_watchdog': (.text+0x449bc): multiple definition of `touch_nmi_watchdog' arch/sparc/kernel/built-in.o:(.text+0x11b28): first defined here Signed-off-by: Don Zickus <dzickus@redhat.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Don Zickus <dzickus@redhat.com> Cc: Cyrill Gorcunov <gorcunov@gmail.com> LKML-Reference: <20100514151121.GR15159@redhat.com> [ use CONFIG_HARDLOCKUP_DETECTOR instead of CONFIG_PERF_EVENTS_NMI] Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
64 lines
1.5 KiB
C
64 lines
1.5 KiB
C
/*
|
|
* linux/include/linux/nmi.h
|
|
*/
|
|
#ifndef LINUX_NMI_H
|
|
#define LINUX_NMI_H
|
|
|
|
#include <linux/sched.h>
|
|
#include <asm/irq.h>
|
|
|
|
/**
|
|
* touch_nmi_watchdog - restart NMI watchdog timeout.
|
|
*
|
|
* If the architecture supports the NMI watchdog, touch_nmi_watchdog()
|
|
* may be used to reset the timeout - for code which intentionally
|
|
* disables interrupts for a long time. This call is stateless.
|
|
*/
|
|
#ifdef ARCH_HAS_NMI_WATCHDOG
|
|
#include <asm/nmi.h>
|
|
extern void touch_nmi_watchdog(void);
|
|
extern void acpi_nmi_disable(void);
|
|
extern void acpi_nmi_enable(void);
|
|
#else
|
|
#ifndef CONFIG_HARDLOCKUP_DETECTOR
|
|
static inline void touch_nmi_watchdog(void)
|
|
{
|
|
touch_softlockup_watchdog();
|
|
}
|
|
#else
|
|
extern void touch_nmi_watchdog(void);
|
|
#endif
|
|
static inline void acpi_nmi_disable(void) { }
|
|
static inline void acpi_nmi_enable(void) { }
|
|
#endif
|
|
|
|
/*
|
|
* Create trigger_all_cpu_backtrace() out of the arch-provided
|
|
* base function. Return whether such support was available,
|
|
* to allow calling code to fall back to some other mechanism:
|
|
*/
|
|
#ifdef arch_trigger_all_cpu_backtrace
|
|
static inline bool trigger_all_cpu_backtrace(void)
|
|
{
|
|
arch_trigger_all_cpu_backtrace();
|
|
|
|
return true;
|
|
}
|
|
#else
|
|
static inline bool trigger_all_cpu_backtrace(void)
|
|
{
|
|
return false;
|
|
}
|
|
#endif
|
|
|
|
#ifdef CONFIG_LOCKUP_DETECTOR
|
|
int hw_nmi_is_cpu_stuck(struct pt_regs *);
|
|
u64 hw_nmi_get_sample_period(void);
|
|
extern int watchdog_enabled;
|
|
struct ctl_table;
|
|
extern int proc_dowatchdog_enabled(struct ctl_table *, int ,
|
|
void __user *, size_t *, loff_t *);
|
|
#endif
|
|
|
|
#endif
|