mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-25 13:43:55 +08:00
82a1fcb902
this patch extends the soft-lockup detector to automatically detect hung TASK_UNINTERRUPTIBLE tasks. Such hung tasks are printed the following way: ------------------> INFO: task prctl:3042 blocked for more than 120 seconds. "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message prctl D fd5e3793 0 3042 2997 f6050f38 00000046 00000001 fd5e3793 00000009 c06d8264 c06dae80 00000286 f6050f40 f6050f00 f7d34d90 f7d34fc8 c1e1be80 00000001 f6050000 00000000 f7e92d00 00000286 f6050f18 c0489d1a f6050f40 00006605 00000000 c0133a5b Call Trace: [<c04883a5>] schedule_timeout+0x6d/0x8b [<c04883d8>] schedule_timeout_uninterruptible+0x15/0x17 [<c0133a76>] msleep+0x10/0x16 [<c0138974>] sys_prctl+0x30/0x1e2 [<c0104c52>] sysenter_past_esp+0x5f/0xa5 ======================= 2 locks held by prctl/3042: #0: (&sb->s_type->i_mutex_key#5){--..}, at: [<c0197d11>] do_fsync+0x38/0x7a #1: (jbd_handle){--..}, at: [<c01ca3d2>] journal_start+0xc7/0xe9 <------------------ the current default timeout is 120 seconds. Such messages are printed up to 10 times per bootup. If the system has crashed already then the messages are not printed. if lockdep is enabled then all held locks are printed as well. this feature is a natural extension to the softlockup-detector (kernel locked up without scheduling) and to the NMI watchdog (kernel locked up with IRQs disabled). [ Gautham R Shenoy <ego@in.ibm.com>: CPU hotplug fixes. ] [ Andrew Morton <akpm@linux-foundation.org>: build warning fix. ] Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
79 lines
1.8 KiB
C
79 lines
1.8 KiB
C
#ifndef __LINUX_DEBUG_LOCKING_H
|
|
#define __LINUX_DEBUG_LOCKING_H
|
|
|
|
struct task_struct;
|
|
|
|
extern int debug_locks;
|
|
extern int debug_locks_silent;
|
|
|
|
/*
|
|
* Generic 'turn off all lock debugging' function:
|
|
*/
|
|
extern int debug_locks_off(void);
|
|
|
|
/*
|
|
* In the debug case we carry the caller's instruction pointer into
|
|
* other functions, but we dont want the function argument overhead
|
|
* in the nondebug case - hence these macros:
|
|
*/
|
|
#define _RET_IP_ (unsigned long)__builtin_return_address(0)
|
|
#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
|
|
|
|
#define DEBUG_LOCKS_WARN_ON(c) \
|
|
({ \
|
|
int __ret = 0; \
|
|
\
|
|
if (unlikely(c)) { \
|
|
if (debug_locks_off() && !debug_locks_silent) \
|
|
WARN_ON(1); \
|
|
__ret = 1; \
|
|
} \
|
|
__ret; \
|
|
})
|
|
|
|
#ifdef CONFIG_SMP
|
|
# define SMP_DEBUG_LOCKS_WARN_ON(c) DEBUG_LOCKS_WARN_ON(c)
|
|
#else
|
|
# define SMP_DEBUG_LOCKS_WARN_ON(c) do { } while (0)
|
|
#endif
|
|
|
|
#ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS
|
|
extern void locking_selftest(void);
|
|
#else
|
|
# define locking_selftest() do { } while (0)
|
|
#endif
|
|
|
|
struct task_struct;
|
|
|
|
#ifdef CONFIG_LOCKDEP
|
|
extern void debug_show_all_locks(void);
|
|
extern void __debug_show_held_locks(struct task_struct *task);
|
|
extern void debug_show_held_locks(struct task_struct *task);
|
|
extern void debug_check_no_locks_freed(const void *from, unsigned long len);
|
|
extern void debug_check_no_locks_held(struct task_struct *task);
|
|
#else
|
|
static inline void debug_show_all_locks(void)
|
|
{
|
|
}
|
|
|
|
static inline void __debug_show_held_locks(struct task_struct *task)
|
|
{
|
|
}
|
|
|
|
static inline void debug_show_held_locks(struct task_struct *task)
|
|
{
|
|
}
|
|
|
|
static inline void
|
|
debug_check_no_locks_freed(const void *from, unsigned long len)
|
|
{
|
|
}
|
|
|
|
static inline void
|
|
debug_check_no_locks_held(struct task_struct *task)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
#endif
|