diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 86d522004a20..df8bd5ab3c0b 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -434,6 +434,8 @@ static void scsi_run_queue(struct request_queue *q) list_splice_init(&shost->starved_list, &starved_list); while (!list_empty(&starved_list)) { + struct request_queue *slq; + /* * As long as shost is accepting commands and we have * starved queues, call blk_run_queue. scsi_request_fn @@ -456,11 +458,25 @@ static void scsi_run_queue(struct request_queue *q) continue; } - spin_unlock(shost->host_lock); - spin_lock(sdev->request_queue->queue_lock); - __blk_run_queue(sdev->request_queue); - spin_unlock(sdev->request_queue->queue_lock); - spin_lock(shost->host_lock); + /* + * Once we drop the host lock, a racing scsi_remove_device() + * call may remove the sdev from the starved list and destroy + * it and the queue. Mitigate by taking a reference to the + * queue and never touching the sdev again after we drop the + * host lock. Note: if __scsi_remove_device() invokes + * blk_cleanup_queue() before the queue is run from this + * function then blk_run_queue() will return immediately since + * blk_cleanup_queue() marks the queue with QUEUE_FLAG_DYING. + */ + slq = sdev->request_queue; + if (!blk_get_queue(slq)) + continue; + spin_unlock_irqrestore(shost->host_lock, flags); + + blk_run_queue(slq); + blk_put_queue(slq); + + spin_lock_irqsave(shost->host_lock, flags); } /* put any unprocessed entries back */ list_splice(&starved_list, &shost->starved_list);