mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 20:54:10 +08:00
block: fix lockdep warning on io_context release put_io_context()
11a3122f6c
"block: strip out locking optimization in put_io_context()"
removed ioc_lock depth lockdep annoation along with locking
optimization; however, while recursing from put_io_context() is no
longer possible, ioc_release_fn() may still end up putting the last
reference of another ioc through elevator, which wlil grab ioc->lock
triggering spurious (as the ioc is always different one) A-A deadlock
warning.
As this can only happen one time from ioc_release_fn(), using non-zero
subclass from ioc_release_fn() is enough. Use subclass 1.
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
f6302f1bcd
commit
d8c66c5d59
@ -80,8 +80,15 @@ static void ioc_release_fn(struct work_struct *work)
|
||||
struct io_context *ioc = container_of(work, struct io_context,
|
||||
release_work);
|
||||
struct request_queue *last_q = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irq(&ioc->lock);
|
||||
/*
|
||||
* Exiting icq may call into put_io_context() through elevator
|
||||
* which will trigger lockdep warning. The ioc's are guaranteed to
|
||||
* be different, use a different locking subclass here. Use
|
||||
* irqsave variant as there's no spin_lock_irq_nested().
|
||||
*/
|
||||
spin_lock_irqsave_nested(&ioc->lock, flags, 1);
|
||||
|
||||
while (!hlist_empty(&ioc->icq_list)) {
|
||||
struct io_cq *icq = hlist_entry(ioc->icq_list.first,
|
||||
@ -103,15 +110,15 @@ static void ioc_release_fn(struct work_struct *work)
|
||||
*/
|
||||
if (last_q) {
|
||||
spin_unlock(last_q->queue_lock);
|
||||
spin_unlock_irq(&ioc->lock);
|
||||
spin_unlock_irqrestore(&ioc->lock, flags);
|
||||
blk_put_queue(last_q);
|
||||
} else {
|
||||
spin_unlock_irq(&ioc->lock);
|
||||
spin_unlock_irqrestore(&ioc->lock, flags);
|
||||
}
|
||||
|
||||
last_q = this_q;
|
||||
spin_lock_irq(this_q->queue_lock);
|
||||
spin_lock(&ioc->lock);
|
||||
spin_lock_irqsave(this_q->queue_lock, flags);
|
||||
spin_lock_nested(&ioc->lock, 1);
|
||||
continue;
|
||||
}
|
||||
ioc_exit_icq(icq);
|
||||
@ -119,10 +126,10 @@ static void ioc_release_fn(struct work_struct *work)
|
||||
|
||||
if (last_q) {
|
||||
spin_unlock(last_q->queue_lock);
|
||||
spin_unlock_irq(&ioc->lock);
|
||||
spin_unlock_irqrestore(&ioc->lock, flags);
|
||||
blk_put_queue(last_q);
|
||||
} else {
|
||||
spin_unlock_irq(&ioc->lock);
|
||||
spin_unlock_irqrestore(&ioc->lock, flags);
|
||||
}
|
||||
|
||||
kmem_cache_free(iocontext_cachep, ioc);
|
||||
|
Loading…
Reference in New Issue
Block a user