mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 20:54:10 +08:00
lockdep/selftest: Add wait context selftests
These tests are added for two purposes: * Test the implementation of wait context checks and related annotations. * Semi-document the rules for wait context nesting when PROVE_RAW_LOCK_NESTING=y. The test cases are only avaible for PROVE_RAW_LOCK_NESTING=y, as wait context checking makes more sense for that configuration. Signed-off-by: Boqun Feng <boqun.feng@gmail.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20201208103112.2838119-5-boqun.feng@gmail.com
This commit is contained in:
parent
e71ba9452f
commit
9271a40d2a
@ -64,6 +64,9 @@ static DEFINE_SPINLOCK(lock_B);
|
||||
static DEFINE_SPINLOCK(lock_C);
|
||||
static DEFINE_SPINLOCK(lock_D);
|
||||
|
||||
static DEFINE_RAW_SPINLOCK(raw_lock_A);
|
||||
static DEFINE_RAW_SPINLOCK(raw_lock_B);
|
||||
|
||||
static DEFINE_RWLOCK(rwlock_A);
|
||||
static DEFINE_RWLOCK(rwlock_B);
|
||||
static DEFINE_RWLOCK(rwlock_C);
|
||||
@ -1306,6 +1309,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_wlock)
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
# define I_SPINLOCK(x) lockdep_reset_lock(&lock_##x.dep_map)
|
||||
# define I_RAW_SPINLOCK(x) lockdep_reset_lock(&raw_lock_##x.dep_map)
|
||||
# define I_RWLOCK(x) lockdep_reset_lock(&rwlock_##x.dep_map)
|
||||
# define I_MUTEX(x) lockdep_reset_lock(&mutex_##x.dep_map)
|
||||
# define I_RWSEM(x) lockdep_reset_lock(&rwsem_##x.dep_map)
|
||||
@ -1315,6 +1319,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_wlock)
|
||||
#endif
|
||||
#else
|
||||
# define I_SPINLOCK(x)
|
||||
# define I_RAW_SPINLOCK(x)
|
||||
# define I_RWLOCK(x)
|
||||
# define I_MUTEX(x)
|
||||
# define I_RWSEM(x)
|
||||
@ -1358,9 +1363,12 @@ static void reset_locks(void)
|
||||
I1(A); I1(B); I1(C); I1(D);
|
||||
I1(X1); I1(X2); I1(Y1); I1(Y2); I1(Z1); I1(Z2);
|
||||
I_WW(t); I_WW(t2); I_WW(o.base); I_WW(o2.base); I_WW(o3.base);
|
||||
I_RAW_SPINLOCK(A); I_RAW_SPINLOCK(B);
|
||||
lockdep_reset();
|
||||
I2(A); I2(B); I2(C); I2(D);
|
||||
init_shared_classes();
|
||||
raw_spin_lock_init(&raw_lock_A);
|
||||
raw_spin_lock_init(&raw_lock_B);
|
||||
|
||||
ww_mutex_init(&o, &ww_lockdep); ww_mutex_init(&o2, &ww_lockdep); ww_mutex_init(&o3, &ww_lockdep);
|
||||
memset(&t, 0, sizeof(t)); memset(&t2, 0, sizeof(t2));
|
||||
@ -2419,6 +2427,226 @@ static void fs_reclaim_tests(void)
|
||||
pr_cont("\n");
|
||||
}
|
||||
|
||||
#define __guard(cleanup) __maybe_unused __attribute__((__cleanup__(cleanup)))
|
||||
|
||||
static void hardirq_exit(int *_)
|
||||
{
|
||||
HARDIRQ_EXIT();
|
||||
}
|
||||
|
||||
#define HARDIRQ_CONTEXT(name, ...) \
|
||||
int hardirq_guard_##name __guard(hardirq_exit); \
|
||||
HARDIRQ_ENTER();
|
||||
|
||||
#define NOTTHREADED_HARDIRQ_CONTEXT(name, ...) \
|
||||
int notthreaded_hardirq_guard_##name __guard(hardirq_exit); \
|
||||
local_irq_disable(); \
|
||||
__irq_enter(); \
|
||||
WARN_ON(!in_irq());
|
||||
|
||||
static void softirq_exit(int *_)
|
||||
{
|
||||
SOFTIRQ_EXIT();
|
||||
}
|
||||
|
||||
#define SOFTIRQ_CONTEXT(name, ...) \
|
||||
int softirq_guard_##name __guard(softirq_exit); \
|
||||
SOFTIRQ_ENTER();
|
||||
|
||||
static void rcu_exit(int *_)
|
||||
{
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
#define RCU_CONTEXT(name, ...) \
|
||||
int rcu_guard_##name __guard(rcu_exit); \
|
||||
rcu_read_lock();
|
||||
|
||||
static void rcu_bh_exit(int *_)
|
||||
{
|
||||
rcu_read_unlock_bh();
|
||||
}
|
||||
|
||||
#define RCU_BH_CONTEXT(name, ...) \
|
||||
int rcu_bh_guard_##name __guard(rcu_bh_exit); \
|
||||
rcu_read_lock_bh();
|
||||
|
||||
static void rcu_sched_exit(int *_)
|
||||
{
|
||||
rcu_read_unlock_sched();
|
||||
}
|
||||
|
||||
#define RCU_SCHED_CONTEXT(name, ...) \
|
||||
int rcu_sched_guard_##name __guard(rcu_sched_exit); \
|
||||
rcu_read_lock_sched();
|
||||
|
||||
static void rcu_callback_exit(int *_)
|
||||
{
|
||||
rcu_lock_release(&rcu_callback_map);
|
||||
}
|
||||
|
||||
#define RCU_CALLBACK_CONTEXT(name, ...) \
|
||||
int rcu_callback_guard_##name __guard(rcu_callback_exit); \
|
||||
rcu_lock_acquire(&rcu_callback_map);
|
||||
|
||||
|
||||
static void raw_spinlock_exit(raw_spinlock_t **lock)
|
||||
{
|
||||
raw_spin_unlock(*lock);
|
||||
}
|
||||
|
||||
#define RAW_SPINLOCK_CONTEXT(name, lock) \
|
||||
raw_spinlock_t *raw_spinlock_guard_##name __guard(raw_spinlock_exit) = &(lock); \
|
||||
raw_spin_lock(&(lock));
|
||||
|
||||
static void spinlock_exit(spinlock_t **lock)
|
||||
{
|
||||
spin_unlock(*lock);
|
||||
}
|
||||
|
||||
#define SPINLOCK_CONTEXT(name, lock) \
|
||||
spinlock_t *spinlock_guard_##name __guard(spinlock_exit) = &(lock); \
|
||||
spin_lock(&(lock));
|
||||
|
||||
static void mutex_exit(struct mutex **lock)
|
||||
{
|
||||
mutex_unlock(*lock);
|
||||
}
|
||||
|
||||
#define MUTEX_CONTEXT(name, lock) \
|
||||
struct mutex *mutex_guard_##name __guard(mutex_exit) = &(lock); \
|
||||
mutex_lock(&(lock));
|
||||
|
||||
#define GENERATE_2_CONTEXT_TESTCASE(outer, outer_lock, inner, inner_lock) \
|
||||
\
|
||||
static void __maybe_unused inner##_in_##outer(void) \
|
||||
{ \
|
||||
outer##_CONTEXT(_, outer_lock); \
|
||||
{ \
|
||||
inner##_CONTEXT(_, inner_lock); \
|
||||
} \
|
||||
}
|
||||
|
||||
/*
|
||||
* wait contexts (considering PREEMPT_RT)
|
||||
*
|
||||
* o: inner is allowed in outer
|
||||
* x: inner is disallowed in outer
|
||||
*
|
||||
* \ inner | RCU | RAW_SPIN | SPIN | MUTEX
|
||||
* outer \ | | | |
|
||||
* ---------------+-------+----------+------+-------
|
||||
* HARDIRQ | o | o | o | x
|
||||
* ---------------+-------+----------+------+-------
|
||||
* NOTTHREADED_IRQ| o | o | x | x
|
||||
* ---------------+-------+----------+------+-------
|
||||
* SOFTIRQ | o | o | o | x
|
||||
* ---------------+-------+----------+------+-------
|
||||
* RCU | o | o | o | x
|
||||
* ---------------+-------+----------+------+-------
|
||||
* RCU_BH | o | o | o | x
|
||||
* ---------------+-------+----------+------+-------
|
||||
* RCU_CALLBACK | o | o | o | x
|
||||
* ---------------+-------+----------+------+-------
|
||||
* RCU_SCHED | o | o | x | x
|
||||
* ---------------+-------+----------+------+-------
|
||||
* RAW_SPIN | o | o | x | x
|
||||
* ---------------+-------+----------+------+-------
|
||||
* SPIN | o | o | o | x
|
||||
* ---------------+-------+----------+------+-------
|
||||
* MUTEX | o | o | o | o
|
||||
* ---------------+-------+----------+------+-------
|
||||
*/
|
||||
|
||||
#define GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(inner, inner_lock) \
|
||||
GENERATE_2_CONTEXT_TESTCASE(HARDIRQ, , inner, inner_lock) \
|
||||
GENERATE_2_CONTEXT_TESTCASE(NOTTHREADED_HARDIRQ, , inner, inner_lock) \
|
||||
GENERATE_2_CONTEXT_TESTCASE(SOFTIRQ, , inner, inner_lock) \
|
||||
GENERATE_2_CONTEXT_TESTCASE(RCU, , inner, inner_lock) \
|
||||
GENERATE_2_CONTEXT_TESTCASE(RCU_BH, , inner, inner_lock) \
|
||||
GENERATE_2_CONTEXT_TESTCASE(RCU_CALLBACK, , inner, inner_lock) \
|
||||
GENERATE_2_CONTEXT_TESTCASE(RCU_SCHED, , inner, inner_lock) \
|
||||
GENERATE_2_CONTEXT_TESTCASE(RAW_SPINLOCK, raw_lock_A, inner, inner_lock) \
|
||||
GENERATE_2_CONTEXT_TESTCASE(SPINLOCK, lock_A, inner, inner_lock) \
|
||||
GENERATE_2_CONTEXT_TESTCASE(MUTEX, mutex_A, inner, inner_lock)
|
||||
|
||||
GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(RCU, )
|
||||
GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(RAW_SPINLOCK, raw_lock_B)
|
||||
GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(SPINLOCK, lock_B)
|
||||
GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(MUTEX, mutex_B)
|
||||
|
||||
/* the outer context allows all kinds of preemption */
|
||||
#define DO_CONTEXT_TESTCASE_OUTER_PREEMPTIBLE(outer) \
|
||||
dotest(RCU_in_##outer, SUCCESS, LOCKTYPE_RWLOCK); \
|
||||
dotest(RAW_SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
|
||||
dotest(SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
|
||||
dotest(MUTEX_in_##outer, SUCCESS, LOCKTYPE_MUTEX); \
|
||||
|
||||
/*
|
||||
* the outer context only allows the preemption introduced by spinlock_t (which
|
||||
* is a sleepable lock for PREEMPT_RT)
|
||||
*/
|
||||
#define DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(outer) \
|
||||
dotest(RCU_in_##outer, SUCCESS, LOCKTYPE_RWLOCK); \
|
||||
dotest(RAW_SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
|
||||
dotest(SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
|
||||
dotest(MUTEX_in_##outer, FAILURE, LOCKTYPE_MUTEX); \
|
||||
|
||||
/* the outer doesn't allows any kind of preemption */
|
||||
#define DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(outer) \
|
||||
dotest(RCU_in_##outer, SUCCESS, LOCKTYPE_RWLOCK); \
|
||||
dotest(RAW_SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
|
||||
dotest(SPINLOCK_in_##outer, FAILURE, LOCKTYPE_SPIN); \
|
||||
dotest(MUTEX_in_##outer, FAILURE, LOCKTYPE_MUTEX); \
|
||||
|
||||
static void wait_context_tests(void)
|
||||
{
|
||||
printk(" --------------------------------------------------------------------------\n");
|
||||
printk(" | wait context tests |\n");
|
||||
printk(" --------------------------------------------------------------------------\n");
|
||||
printk(" | rcu | raw | spin |mutex |\n");
|
||||
printk(" --------------------------------------------------------------------------\n");
|
||||
print_testname("in hardirq context");
|
||||
DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(HARDIRQ);
|
||||
pr_cont("\n");
|
||||
|
||||
print_testname("in hardirq context (not threaded)");
|
||||
DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(NOTTHREADED_HARDIRQ);
|
||||
pr_cont("\n");
|
||||
|
||||
print_testname("in softirq context");
|
||||
DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(SOFTIRQ);
|
||||
pr_cont("\n");
|
||||
|
||||
print_testname("in RCU context");
|
||||
DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(RCU);
|
||||
pr_cont("\n");
|
||||
|
||||
print_testname("in RCU-bh context");
|
||||
DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(RCU_BH);
|
||||
pr_cont("\n");
|
||||
|
||||
print_testname("in RCU callback context");
|
||||
DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(RCU_CALLBACK);
|
||||
pr_cont("\n");
|
||||
|
||||
print_testname("in RCU-sched context");
|
||||
DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(RCU_SCHED);
|
||||
pr_cont("\n");
|
||||
|
||||
print_testname("in RAW_SPINLOCK context");
|
||||
DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(RAW_SPINLOCK);
|
||||
pr_cont("\n");
|
||||
|
||||
print_testname("in SPINLOCK context");
|
||||
DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(SPINLOCK);
|
||||
pr_cont("\n");
|
||||
|
||||
print_testname("in MUTEX context");
|
||||
DO_CONTEXT_TESTCASE_OUTER_PREEMPTIBLE(MUTEX);
|
||||
pr_cont("\n");
|
||||
}
|
||||
|
||||
void locking_selftest(void)
|
||||
{
|
||||
/*
|
||||
@ -2542,6 +2770,10 @@ void locking_selftest(void)
|
||||
|
||||
fs_reclaim_tests();
|
||||
|
||||
/* Wait context test cases that are specific for RAW_LOCK_NESTING */
|
||||
if (IS_ENABLED(CONFIG_PROVE_RAW_LOCK_NESTING))
|
||||
wait_context_tests();
|
||||
|
||||
if (unexpected_testcase_failures) {
|
||||
printk("-----------------------------------------------------------------\n");
|
||||
debug_locks = 0;
|
||||
|
Loading…
Reference in New Issue
Block a user