mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-28 22:54:05 +08:00
locking/lockdep: Change the range of class_idx in held_lock struct
held_lock->class_idx is used to point to the class of the held lock. The index is shifted by 1 to make index 0 mean no class, which results in class index shifting back and forth but is not worth doing so. The reason is: (1) there will be no "no-class" held_lock to begin with, and (2) index 0 seems to be used for error checking, but if something wrong indeed happened, the index can't be counted on to distinguish it as that something won't set the class_idx to 0 on purpose to tell us it is wrong. Therefore, change the index to start from 0. This saves a lot of back-and-forth shifts and a class slot back to lock_classes. Since index 0 is now used for lock class, we change the initial chain key to -1 to avoid key collision, which is due to the fact that __jhash_mix(0, 0, 0) = 0. Actually, the initial chain key can be any arbitrary value other than 0. In addition, a bitmap is maintained to keep track of the used lock classes, and we check the validity of the held lock against that bitmap. Signed-off-by: Yuyang Du <duyuyang@gmail.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: bvanassche@acm.org Cc: frederic@kernel.org Cc: ming.lei@redhat.com Cc: will.deacon@arm.com Link: https://lkml.kernel.org/r/20190506081939.74287-10-duyuyang@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
f6ec8829ac
commit
01bb6f0af9
@ -223,13 +223,8 @@ struct lock_chain {
|
|||||||
};
|
};
|
||||||
|
|
||||||
#define MAX_LOCKDEP_KEYS_BITS 13
|
#define MAX_LOCKDEP_KEYS_BITS 13
|
||||||
/*
|
#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS)
|
||||||
* Subtract one because we offset hlock->class_idx by 1 in order
|
#define INITIAL_CHAIN_KEY -1
|
||||||
* to make 0 mean no class. This avoids overflowing the class_idx
|
|
||||||
* bitfield and hitting the BUG in hlock_class().
|
|
||||||
*/
|
|
||||||
#define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
|
|
||||||
#define INITIAL_CHAIN_KEY 0
|
|
||||||
|
|
||||||
struct held_lock {
|
struct held_lock {
|
||||||
/*
|
/*
|
||||||
@ -254,6 +249,11 @@ struct held_lock {
|
|||||||
u64 waittime_stamp;
|
u64 waittime_stamp;
|
||||||
u64 holdtime_stamp;
|
u64 holdtime_stamp;
|
||||||
#endif
|
#endif
|
||||||
|
/*
|
||||||
|
* class_idx is zero-indexed; it points to the element in
|
||||||
|
* lock_classes this held lock instance belongs to. class_idx is in
|
||||||
|
* the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive.
|
||||||
|
*/
|
||||||
unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
|
unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
|
||||||
/*
|
/*
|
||||||
* The lock-stack is unified in that the lock chains of interrupt
|
* The lock-stack is unified in that the lock chains of interrupt
|
||||||
|
@ -151,17 +151,28 @@ unsigned long nr_lock_classes;
|
|||||||
static
|
static
|
||||||
#endif
|
#endif
|
||||||
struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
|
struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
|
||||||
|
static DECLARE_BITMAP(lock_classes_in_use, MAX_LOCKDEP_KEYS);
|
||||||
|
|
||||||
static inline struct lock_class *hlock_class(struct held_lock *hlock)
|
static inline struct lock_class *hlock_class(struct held_lock *hlock)
|
||||||
{
|
{
|
||||||
if (!hlock->class_idx) {
|
unsigned int class_idx = hlock->class_idx;
|
||||||
|
|
||||||
|
/* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfield */
|
||||||
|
barrier();
|
||||||
|
|
||||||
|
if (!test_bit(class_idx, lock_classes_in_use)) {
|
||||||
/*
|
/*
|
||||||
* Someone passed in garbage, we give up.
|
* Someone passed in garbage, we give up.
|
||||||
*/
|
*/
|
||||||
DEBUG_LOCKS_WARN_ON(1);
|
DEBUG_LOCKS_WARN_ON(1);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
return lock_classes + hlock->class_idx - 1;
|
|
||||||
|
/*
|
||||||
|
* At this point, if the passed hlock->class_idx is still garbage,
|
||||||
|
* we just have to live with it
|
||||||
|
*/
|
||||||
|
return lock_classes + class_idx;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_LOCK_STAT
|
#ifdef CONFIG_LOCK_STAT
|
||||||
@ -590,19 +601,22 @@ static void print_lock(struct held_lock *hlock)
|
|||||||
/*
|
/*
|
||||||
* We can be called locklessly through debug_show_all_locks() so be
|
* We can be called locklessly through debug_show_all_locks() so be
|
||||||
* extra careful, the hlock might have been released and cleared.
|
* extra careful, the hlock might have been released and cleared.
|
||||||
|
*
|
||||||
|
* If this indeed happens, lets pretend it does not hurt to continue
|
||||||
|
* to print the lock unless the hlock class_idx does not point to a
|
||||||
|
* registered class. The rationale here is: since we don't attempt
|
||||||
|
* to distinguish whether we are in this situation, if it just
|
||||||
|
* happened we can't count on class_idx to tell either.
|
||||||
*/
|
*/
|
||||||
unsigned int class_idx = hlock->class_idx;
|
struct lock_class *lock = hlock_class(hlock);
|
||||||
|
|
||||||
/* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfields: */
|
if (!lock) {
|
||||||
barrier();
|
|
||||||
|
|
||||||
if (!class_idx || (class_idx - 1) >= MAX_LOCKDEP_KEYS) {
|
|
||||||
printk(KERN_CONT "<RELEASED>\n");
|
printk(KERN_CONT "<RELEASED>\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
printk(KERN_CONT "%p", hlock->instance);
|
printk(KERN_CONT "%p", hlock->instance);
|
||||||
print_lock_name(lock_classes + class_idx - 1);
|
print_lock_name(lock);
|
||||||
printk(KERN_CONT ", at: %pS\n", (void *)hlock->acquire_ip);
|
printk(KERN_CONT ", at: %pS\n", (void *)hlock->acquire_ip);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -861,7 +875,7 @@ static bool check_lock_chain_key(struct lock_chain *chain)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = chain->base; i < chain->base + chain->depth; i++)
|
for (i = chain->base; i < chain->base + chain->depth; i++)
|
||||||
chain_key = iterate_chain_key(chain_key, chain_hlocks[i] + 1);
|
chain_key = iterate_chain_key(chain_key, chain_hlocks[i]);
|
||||||
/*
|
/*
|
||||||
* The 'unsigned long long' casts avoid that a compiler warning
|
* The 'unsigned long long' casts avoid that a compiler warning
|
||||||
* is reported when building tools/lib/lockdep.
|
* is reported when building tools/lib/lockdep.
|
||||||
@ -1136,6 +1150,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
nr_lock_classes++;
|
nr_lock_classes++;
|
||||||
|
__set_bit(class - lock_classes, lock_classes_in_use);
|
||||||
debug_atomic_inc(nr_unused_locks);
|
debug_atomic_inc(nr_unused_locks);
|
||||||
class->key = key;
|
class->key = key;
|
||||||
class->name = lock->name;
|
class->name = lock->name;
|
||||||
@ -2550,7 +2565,7 @@ static void print_chain_keys_chain(struct lock_chain *chain)
|
|||||||
printk("depth: %u\n", chain->depth);
|
printk("depth: %u\n", chain->depth);
|
||||||
for (i = 0; i < chain->depth; i++) {
|
for (i = 0; i < chain->depth; i++) {
|
||||||
class_id = chain_hlocks[chain->base + i];
|
class_id = chain_hlocks[chain->base + i];
|
||||||
chain_key = print_chain_key_iteration(class_id + 1, chain_key);
|
chain_key = print_chain_key_iteration(class_id, chain_key);
|
||||||
|
|
||||||
print_lock_name(lock_classes + class_id);
|
print_lock_name(lock_classes + class_id);
|
||||||
printk("\n");
|
printk("\n");
|
||||||
@ -2601,7 +2616,7 @@ static int check_no_collision(struct task_struct *curr,
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (j = 0; j < chain->depth - 1; j++, i++) {
|
for (j = 0; j < chain->depth - 1; j++, i++) {
|
||||||
id = curr->held_locks[i].class_idx - 1;
|
id = curr->held_locks[i].class_idx;
|
||||||
|
|
||||||
if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) {
|
if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) {
|
||||||
print_collision(curr, hlock, chain);
|
print_collision(curr, hlock, chain);
|
||||||
@ -2684,7 +2699,7 @@ static inline int add_chain_cache(struct task_struct *curr,
|
|||||||
if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
|
if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
|
||||||
chain->base = nr_chain_hlocks;
|
chain->base = nr_chain_hlocks;
|
||||||
for (j = 0; j < chain->depth - 1; j++, i++) {
|
for (j = 0; j < chain->depth - 1; j++, i++) {
|
||||||
int lock_id = curr->held_locks[i].class_idx - 1;
|
int lock_id = curr->held_locks[i].class_idx;
|
||||||
chain_hlocks[chain->base + j] = lock_id;
|
chain_hlocks[chain->base + j] = lock_id;
|
||||||
}
|
}
|
||||||
chain_hlocks[chain->base + j] = class - lock_classes;
|
chain_hlocks[chain->base + j] = class - lock_classes;
|
||||||
@ -2864,10 +2879,12 @@ static void check_chain_key(struct task_struct *curr)
|
|||||||
(unsigned long long)hlock->prev_chain_key);
|
(unsigned long long)hlock->prev_chain_key);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Whoops ran out of static storage again?
|
* hlock->class_idx can't go beyond MAX_LOCKDEP_KEYS, but is
|
||||||
|
* it registered lock class index?
|
||||||
*/
|
*/
|
||||||
if (DEBUG_LOCKS_WARN_ON(hlock->class_idx > MAX_LOCKDEP_KEYS))
|
if (DEBUG_LOCKS_WARN_ON(!test_bit(hlock->class_idx, lock_classes_in_use)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (prev_hlock && (prev_hlock->irq_context !=
|
if (prev_hlock && (prev_hlock->irq_context !=
|
||||||
@ -3715,7 +3732,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
|||||||
if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
|
if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
class_idx = class - lock_classes + 1;
|
class_idx = class - lock_classes;
|
||||||
|
|
||||||
if (depth) {
|
if (depth) {
|
||||||
hlock = curr->held_locks + depth - 1;
|
hlock = curr->held_locks + depth - 1;
|
||||||
@ -3777,9 +3794,9 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
|||||||
* the hash, not class->key.
|
* the hash, not class->key.
|
||||||
*/
|
*/
|
||||||
/*
|
/*
|
||||||
* Whoops, we did it again.. ran straight out of our static allocation.
|
* Whoops, we did it again.. class_idx is invalid.
|
||||||
*/
|
*/
|
||||||
if (DEBUG_LOCKS_WARN_ON(class_idx > MAX_LOCKDEP_KEYS))
|
if (DEBUG_LOCKS_WARN_ON(!test_bit(class_idx, lock_classes_in_use)))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
chain_key = curr->curr_chain_key;
|
chain_key = curr->curr_chain_key;
|
||||||
@ -3894,7 +3911,7 @@ static int match_held_lock(const struct held_lock *hlock,
|
|||||||
if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
|
if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (hlock->class_idx == class - lock_classes + 1)
|
if (hlock->class_idx == class - lock_classes)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3988,7 +4005,7 @@ __lock_set_class(struct lockdep_map *lock, const char *name,
|
|||||||
|
|
||||||
lockdep_init_map(lock, name, key, 0);
|
lockdep_init_map(lock, name, key, 0);
|
||||||
class = register_lock_class(lock, subclass, 0);
|
class = register_lock_class(lock, subclass, 0);
|
||||||
hlock->class_idx = class - lock_classes + 1;
|
hlock->class_idx = class - lock_classes;
|
||||||
|
|
||||||
curr->lockdep_depth = i;
|
curr->lockdep_depth = i;
|
||||||
curr->curr_chain_key = hlock->prev_chain_key;
|
curr->curr_chain_key = hlock->prev_chain_key;
|
||||||
@ -4638,7 +4655,7 @@ static void remove_class_from_lock_chain(struct pending_free *pf,
|
|||||||
recalc:
|
recalc:
|
||||||
chain_key = INITIAL_CHAIN_KEY;
|
chain_key = INITIAL_CHAIN_KEY;
|
||||||
for (i = chain->base; i < chain->base + chain->depth; i++)
|
for (i = chain->base; i < chain->base + chain->depth; i++)
|
||||||
chain_key = iterate_chain_key(chain_key, chain_hlocks[i] + 1);
|
chain_key = iterate_chain_key(chain_key, chain_hlocks[i]);
|
||||||
if (chain->depth && chain->chain_key == chain_key)
|
if (chain->depth && chain->chain_key == chain_key)
|
||||||
return;
|
return;
|
||||||
/* Overwrite the chain key for concurrent RCU readers. */
|
/* Overwrite the chain key for concurrent RCU readers. */
|
||||||
@ -4712,6 +4729,7 @@ static void zap_class(struct pending_free *pf, struct lock_class *class)
|
|||||||
WRITE_ONCE(class->key, NULL);
|
WRITE_ONCE(class->key, NULL);
|
||||||
WRITE_ONCE(class->name, NULL);
|
WRITE_ONCE(class->name, NULL);
|
||||||
nr_lock_classes--;
|
nr_lock_classes--;
|
||||||
|
__clear_bit(class - lock_classes, lock_classes_in_use);
|
||||||
} else {
|
} else {
|
||||||
WARN_ONCE(true, "%s() failed for class %s\n", __func__,
|
WARN_ONCE(true, "%s() failed for class %s\n", __func__,
|
||||||
class->name);
|
class->name);
|
||||||
@ -5057,6 +5075,7 @@ void __init lockdep_init(void)
|
|||||||
|
|
||||||
printk(" memory used by lock dependency info: %zu kB\n",
|
printk(" memory used by lock dependency info: %zu kB\n",
|
||||||
(sizeof(lock_classes) +
|
(sizeof(lock_classes) +
|
||||||
|
sizeof(lock_classes_in_use) +
|
||||||
sizeof(classhash_table) +
|
sizeof(classhash_table) +
|
||||||
sizeof(list_entries) +
|
sizeof(list_entries) +
|
||||||
sizeof(list_entries_in_use) +
|
sizeof(list_entries_in_use) +
|
||||||
|
Loading…
Reference in New Issue
Block a user