mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-11-29 04:54:49 +08:00
lglock: Update lockdep annotations to report recursive local locks
Oleg Nesterov recently noticed that the lockdep annotations in lglock.c are not sufficient to detect some obvious deadlocks, such as lg_local_lock(LOCK) + lg_local_lock(LOCK) or spin_lock(X) + lg_local_lock(Y) vs lg_local_lock(Y) + spin_lock(X). Both issues are easily fixed by indicating to lockdep that lglock's local locks are not recursive. We shouldn't use the rwlock acquire/release functions here, as lglock doesn't share the same semantics. Instead we can base our lockdep annotations on the lock_acquire_shared (for local lglock) and lock_acquire_exclusive (for global lglock) helpers. I am not proposing new lglock specific helpers as I don't see the point of the existing second level of helpers :) Noticed-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Michel Lespinasse <walken@google.com> Cc: Lai Jiangshan <laijs@cn.fujitsu.com> Cc: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Andi Kleen <ak@linux.intel.com> Cc: "Paul E. McKenney" <paulmck@us.ibm.com> Cc: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20130708212352.1769031C15E@corp2gmr1-1.hot.corp.google.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
a51805efae
commit
c4be9cb4f1
@ -21,7 +21,7 @@ void lg_local_lock(struct lglock *lg)
|
||||
arch_spinlock_t *lock;
|
||||
|
||||
preempt_disable();
|
||||
rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_);
|
||||
lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
|
||||
lock = this_cpu_ptr(lg->lock);
|
||||
arch_spin_lock(lock);
|
||||
}
|
||||
@ -31,7 +31,7 @@ void lg_local_unlock(struct lglock *lg)
|
||||
{
|
||||
arch_spinlock_t *lock;
|
||||
|
||||
rwlock_release(&lg->lock_dep_map, 1, _RET_IP_);
|
||||
lock_release(&lg->lock_dep_map, 1, _RET_IP_);
|
||||
lock = this_cpu_ptr(lg->lock);
|
||||
arch_spin_unlock(lock);
|
||||
preempt_enable();
|
||||
@ -43,7 +43,7 @@ void lg_local_lock_cpu(struct lglock *lg, int cpu)
|
||||
arch_spinlock_t *lock;
|
||||
|
||||
preempt_disable();
|
||||
rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_);
|
||||
lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
|
||||
lock = per_cpu_ptr(lg->lock, cpu);
|
||||
arch_spin_lock(lock);
|
||||
}
|
||||
@ -53,7 +53,7 @@ void lg_local_unlock_cpu(struct lglock *lg, int cpu)
|
||||
{
|
||||
arch_spinlock_t *lock;
|
||||
|
||||
rwlock_release(&lg->lock_dep_map, 1, _RET_IP_);
|
||||
lock_release(&lg->lock_dep_map, 1, _RET_IP_);
|
||||
lock = per_cpu_ptr(lg->lock, cpu);
|
||||
arch_spin_unlock(lock);
|
||||
preempt_enable();
|
||||
@ -65,7 +65,7 @@ void lg_global_lock(struct lglock *lg)
|
||||
int i;
|
||||
|
||||
preempt_disable();
|
||||
rwlock_acquire(&lg->lock_dep_map, 0, 0, _RET_IP_);
|
||||
lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
|
||||
for_each_possible_cpu(i) {
|
||||
arch_spinlock_t *lock;
|
||||
lock = per_cpu_ptr(lg->lock, i);
|
||||
@ -78,7 +78,7 @@ void lg_global_unlock(struct lglock *lg)
|
||||
{
|
||||
int i;
|
||||
|
||||
rwlock_release(&lg->lock_dep_map, 1, _RET_IP_);
|
||||
lock_release(&lg->lock_dep_map, 1, _RET_IP_);
|
||||
for_each_possible_cpu(i) {
|
||||
arch_spinlock_t *lock;
|
||||
lock = per_cpu_ptr(lg->lock, i);
|
||||
|
Loading…
Reference in New Issue
Block a user