mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-27 14:43:58 +08:00
6f0a2fc1fe
Pull NMI backtrace update from Russell King: "These changes convert the x86 NMI handling to be a library implementation which other architectures can make use of. Thomas Gleixner has reviewed and tested these changes, and wishes me to send these rather than taking them through the tip tree. The final patch in the set adds an initial implementation using this infrastructure to ARM, even though it doesn't send the IPI at "NMI" level. Patches are in progress to add the ARM equivalent of NMI, but we still need the IRQ-level fallback for systems where the "NMI" isn't available due to secure firmware denying access to it" * 'nmi' of git://ftp.arm.linux.org.uk/~rmk/linux-arm: ARM: add basic support for on-demand backtrace of other CPUs nmi: x86: convert to generic nmi handler nmi: create generic NMI backtrace implementation
105 lines
2.6 KiB
C
105 lines
2.6 KiB
C
/*
|
|
* linux/include/linux/nmi.h
|
|
*/
|
|
#ifndef LINUX_NMI_H
|
|
#define LINUX_NMI_H
|
|
|
|
#include <linux/sched.h>
|
|
#include <asm/irq.h>
|
|
|
|
/**
|
|
* touch_nmi_watchdog - restart NMI watchdog timeout.
|
|
*
|
|
* If the architecture supports the NMI watchdog, touch_nmi_watchdog()
|
|
* may be used to reset the timeout - for code which intentionally
|
|
* disables interrupts for a long time. This call is stateless.
|
|
*/
|
|
#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
|
|
#include <asm/nmi.h>
|
|
extern void touch_nmi_watchdog(void);
|
|
#else
|
|
static inline void touch_nmi_watchdog(void)
|
|
{
|
|
touch_softlockup_watchdog();
|
|
}
|
|
#endif
|
|
|
|
#if defined(CONFIG_HARDLOCKUP_DETECTOR)
|
|
extern void hardlockup_detector_disable(void);
|
|
#else
|
|
static inline void hardlockup_detector_disable(void) {}
|
|
#endif
|
|
|
|
/*
|
|
* Create trigger_all_cpu_backtrace() out of the arch-provided
|
|
* base function. Return whether such support was available,
|
|
* to allow calling code to fall back to some other mechanism:
|
|
*/
|
|
#ifdef arch_trigger_all_cpu_backtrace
|
|
static inline bool trigger_all_cpu_backtrace(void)
|
|
{
|
|
arch_trigger_all_cpu_backtrace(true);
|
|
|
|
return true;
|
|
}
|
|
static inline bool trigger_allbutself_cpu_backtrace(void)
|
|
{
|
|
arch_trigger_all_cpu_backtrace(false);
|
|
return true;
|
|
}
|
|
|
|
/* generic implementation */
|
|
void nmi_trigger_all_cpu_backtrace(bool include_self,
|
|
void (*raise)(cpumask_t *mask));
|
|
bool nmi_cpu_backtrace(struct pt_regs *regs);
|
|
|
|
#else
|
|
static inline bool trigger_all_cpu_backtrace(void)
|
|
{
|
|
return false;
|
|
}
|
|
static inline bool trigger_allbutself_cpu_backtrace(void)
|
|
{
|
|
return false;
|
|
}
|
|
#endif
|
|
|
|
#ifdef CONFIG_LOCKUP_DETECTOR
|
|
int hw_nmi_is_cpu_stuck(struct pt_regs *);
|
|
u64 hw_nmi_get_sample_period(int watchdog_thresh);
|
|
extern int nmi_watchdog_enabled;
|
|
extern int soft_watchdog_enabled;
|
|
extern int watchdog_user_enabled;
|
|
extern int watchdog_thresh;
|
|
extern unsigned long *watchdog_cpumask_bits;
|
|
extern int sysctl_softlockup_all_cpu_backtrace;
|
|
struct ctl_table;
|
|
extern int proc_watchdog(struct ctl_table *, int ,
|
|
void __user *, size_t *, loff_t *);
|
|
extern int proc_nmi_watchdog(struct ctl_table *, int ,
|
|
void __user *, size_t *, loff_t *);
|
|
extern int proc_soft_watchdog(struct ctl_table *, int ,
|
|
void __user *, size_t *, loff_t *);
|
|
extern int proc_watchdog_thresh(struct ctl_table *, int ,
|
|
void __user *, size_t *, loff_t *);
|
|
extern int proc_watchdog_cpumask(struct ctl_table *, int,
|
|
void __user *, size_t *, loff_t *);
|
|
extern int lockup_detector_suspend(void);
|
|
extern void lockup_detector_resume(void);
|
|
#else
|
|
static inline int lockup_detector_suspend(void)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void lockup_detector_resume(void)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
#ifdef CONFIG_HAVE_ACPI_APEI_NMI
|
|
#include <asm/nmi.h>
|
|
#endif
|
|
|
|
#endif
|