mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-16 01:04:08 +08:00
dm rq: fix a race condition in rq_completed()
It is required to hold the queue lock when calling blk_run_queue_async() to avoid that a race between blk_run_queue_async() and blk_cleanup_queue() is triggered. Cc: stable@vger.kernel.org Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
This commit is contained in:
parent
2e8ed71102
commit
d15bb3a646
@ -226,6 +226,9 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig)
|
||||
*/
|
||||
static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
|
||||
{
|
||||
struct request_queue *q = md->queue;
|
||||
unsigned long flags;
|
||||
|
||||
atomic_dec(&md->pending[rw]);
|
||||
|
||||
/* nudge anyone waiting on suspend queue */
|
||||
@ -238,8 +241,11 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
|
||||
* back into ->request_fn() could deadlock attempting to grab the
|
||||
* queue lock again.
|
||||
*/
|
||||
if (!md->queue->mq_ops && run_queue)
|
||||
blk_run_queue_async(md->queue);
|
||||
if (!q->mq_ops && run_queue) {
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
blk_run_queue_async(q);
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* dm_put() must be at the end of this function. See the comment above
|
||||
|
Loading…
Reference in New Issue
Block a user