2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-11-19 16:14:13 +08:00

[SCSI] Make scsi_free_queue() kill pending SCSI commands

Make sure that SCSI device removal via scsi_remove_host() does finish
all pending SCSI commands. Currently that's not the case and hence
removal of a SCSI host during I/O can cause a deadlock. See also
"blkdev_issue_discard() hangs forever if underlying storage device is
removed" (http://bugzilla.kernel.org/show_bug.cgi?id=40472). See also
http://lkml.org/lkml/2011/8/27/6.

Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Cc: <stable@kernel.org>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
This commit is contained in:
Bart Van Assche 2011-09-23 19:48:18 +02:00 committed by James Bottomley
parent 21208ae5a2
commit 3308511c93
2 changed files with 15 additions and 3 deletions

View File

@ -286,6 +286,7 @@ static void scsi_host_dev_release(struct device *dev)
{ {
struct Scsi_Host *shost = dev_to_shost(dev); struct Scsi_Host *shost = dev_to_shost(dev);
struct device *parent = dev->parent; struct device *parent = dev->parent;
struct request_queue *q;
scsi_proc_hostdir_rm(shost->hostt); scsi_proc_hostdir_rm(shost->hostt);
@ -293,9 +294,11 @@ static void scsi_host_dev_release(struct device *dev)
kthread_stop(shost->ehandler); kthread_stop(shost->ehandler);
if (shost->work_q) if (shost->work_q)
destroy_workqueue(shost->work_q); destroy_workqueue(shost->work_q);
if (shost->uspace_req_q) { q = shost->uspace_req_q;
kfree(shost->uspace_req_q->queuedata); if (q) {
scsi_free_queue(shost->uspace_req_q); kfree(q->queuedata);
q->queuedata = NULL;
scsi_free_queue(q);
} }
scsi_destroy_command_freelist(shost); scsi_destroy_command_freelist(shost);

View File

@ -1698,6 +1698,15 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
void scsi_free_queue(struct request_queue *q) void scsi_free_queue(struct request_queue *q)
{ {
unsigned long flags;
WARN_ON(q->queuedata);
/* cause scsi_request_fn() to kill all non-finished requests */
spin_lock_irqsave(q->queue_lock, flags);
q->request_fn(q);
spin_unlock_irqrestore(q->queue_lock, flags);
blk_cleanup_queue(q); blk_cleanup_queue(q);
} }