mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-27 22:24:11 +08:00
SCSI fixes on 20150814
This patch consists of 2 libfc fixes causing rare crashes, one iscsi one causing a potential hang on shutdown, an I/O blocksize issue which caused a regression and a memory leak in scsi-mq. Signed-off-by: James Bottomley <JBottomley@Odin.com> -----BEGIN PGP SIGNATURE----- Version: GnuPG v2 iQEcBAABAgAGBQJVzsOiAAoJEDeqqVYsXL0M4wQIAIUe1SGv66OcAQLBcxxJUL2T i+Ph2RE/er1iyzJepN56i77Mn2hnBgnQtmB/ibnjQfUsz/zo5PANjIfy+eFcG53G qcb8l7a/BFCH3JHWXL7rJYN9G64sirADDL6SDLpX1JFsL21bAGdcEgbmefysvDmr qFkiGH0Ty9YH58W+6j1pzQhh437rRgcM1KuY08sJsbKmyCVdzG5ketzBkONmBcTh OTfPQjL32L4KR3THDUbpCiK6YAUtDvHjVB51lwoiB1ER7Ke+E+nqlCuxhXvZfMoD lGfvWgwaQnoN1fun6c85zcqnl72kymropWWiJUhhPPjeZEj8sgn/eaaAAST9mlQ= =Fztd -----END PGP SIGNATURE----- Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi Pull SCSI fixes from James Bottomley: "This has two libfc fixes for bugs causing rare crashes, one iscsi fix for a potential hang on shutdown, and a fix for an I/O blocksize issue which caused a regression" * tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: sd: Fix maximum I/O size for BLOCK_PC requests libfc: Fix fc_fcp_cleanup_each_cmd() libfc: Fix fc_exch_recv_req() error path libiscsi: Fix host busy blocking during connection teardown
This commit is contained in:
commit
1efdb5f0a9
@ -241,8 +241,8 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
|
||||
* Description:
|
||||
* Enables a low level driver to set a hard upper limit,
|
||||
* max_hw_sectors, on the size of requests. max_hw_sectors is set by
|
||||
* the device driver based upon the combined capabilities of I/O
|
||||
* controller and storage device.
|
||||
* the device driver based upon the capabilities of the I/O
|
||||
* controller.
|
||||
*
|
||||
* max_sectors is a soft limit imposed by the block layer for
|
||||
* filesystem type requests. This value can be overridden on a
|
||||
|
@ -733,8 +733,6 @@ static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp,
|
||||
if (resp) {
|
||||
resp(sp, fp, arg);
|
||||
res = true;
|
||||
} else if (!IS_ERR(fp)) {
|
||||
fc_frame_free(fp);
|
||||
}
|
||||
|
||||
spin_lock_bh(&ep->ex_lock);
|
||||
@ -1596,7 +1594,8 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
|
||||
* If new exch resp handler is valid then call that
|
||||
* first.
|
||||
*/
|
||||
fc_invoke_resp(ep, sp, fp);
|
||||
if (!fc_invoke_resp(ep, sp, fp))
|
||||
fc_frame_free(fp);
|
||||
|
||||
fc_exch_release(ep);
|
||||
return;
|
||||
@ -1695,7 +1694,8 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
|
||||
fc_exch_hold(ep);
|
||||
if (!rc)
|
||||
fc_exch_delete(ep);
|
||||
fc_invoke_resp(ep, sp, fp);
|
||||
if (!fc_invoke_resp(ep, sp, fp))
|
||||
fc_frame_free(fp);
|
||||
if (has_rec)
|
||||
fc_exch_timer_set(ep, ep->r_a_tov);
|
||||
fc_exch_release(ep);
|
||||
|
@ -1039,11 +1039,26 @@ restart:
|
||||
fc_fcp_pkt_hold(fsp);
|
||||
spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
|
||||
|
||||
if (!fc_fcp_lock_pkt(fsp)) {
|
||||
spin_lock_bh(&fsp->scsi_pkt_lock);
|
||||
if (!(fsp->state & FC_SRB_COMPL)) {
|
||||
fsp->state |= FC_SRB_COMPL;
|
||||
/*
|
||||
* TODO: dropping scsi_pkt_lock and then reacquiring
|
||||
* again around fc_fcp_cleanup_cmd() is required,
|
||||
* since fc_fcp_cleanup_cmd() calls into
|
||||
* fc_seq_set_resp() and that func preempts cpu using
|
||||
* schedule. May be schedule and related code should be
|
||||
* removed instead of unlocking here to avoid scheduling
|
||||
* while atomic bug.
|
||||
*/
|
||||
spin_unlock_bh(&fsp->scsi_pkt_lock);
|
||||
|
||||
fc_fcp_cleanup_cmd(fsp, error);
|
||||
|
||||
spin_lock_bh(&fsp->scsi_pkt_lock);
|
||||
fc_io_compl(fsp);
|
||||
fc_fcp_unlock_pkt(fsp);
|
||||
}
|
||||
spin_unlock_bh(&fsp->scsi_pkt_lock);
|
||||
|
||||
fc_fcp_pkt_release(fsp);
|
||||
spin_lock_irqsave(&si->scsi_queue_lock, flags);
|
||||
|
@ -2941,10 +2941,10 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
|
||||
{
|
||||
struct iscsi_conn *conn = cls_conn->dd_data;
|
||||
struct iscsi_session *session = conn->session;
|
||||
unsigned long flags;
|
||||
|
||||
del_timer_sync(&conn->transport_timer);
|
||||
|
||||
mutex_lock(&session->eh_mutex);
|
||||
spin_lock_bh(&session->frwd_lock);
|
||||
conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
|
||||
if (session->leadconn == conn) {
|
||||
@ -2956,28 +2956,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
|
||||
}
|
||||
spin_unlock_bh(&session->frwd_lock);
|
||||
|
||||
/*
|
||||
* Block until all in-progress commands for this connection
|
||||
* time out or fail.
|
||||
*/
|
||||
for (;;) {
|
||||
spin_lock_irqsave(session->host->host_lock, flags);
|
||||
if (!atomic_read(&session->host->host_busy)) { /* OK for ERL == 0 */
|
||||
spin_unlock_irqrestore(session->host->host_lock, flags);
|
||||
break;
|
||||
}
|
||||
spin_unlock_irqrestore(session->host->host_lock, flags);
|
||||
msleep_interruptible(500);
|
||||
iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): "
|
||||
"host_busy %d host_failed %d\n",
|
||||
atomic_read(&session->host->host_busy),
|
||||
session->host->host_failed);
|
||||
/*
|
||||
* force eh_abort() to unblock
|
||||
*/
|
||||
wake_up(&conn->ehwait);
|
||||
}
|
||||
|
||||
/* flush queued up work because we free the connection below */
|
||||
iscsi_suspend_tx(conn);
|
||||
|
||||
@ -2994,6 +2972,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
|
||||
if (session->leadconn == conn)
|
||||
session->leadconn = NULL;
|
||||
spin_unlock_bh(&session->frwd_lock);
|
||||
mutex_unlock(&session->eh_mutex);
|
||||
|
||||
iscsi_destroy_conn(cls_conn);
|
||||
}
|
||||
|
@ -2770,9 +2770,9 @@ static int sd_revalidate_disk(struct gendisk *disk)
|
||||
max_xfer = sdkp->max_xfer_blocks;
|
||||
max_xfer <<= ilog2(sdp->sector_size) - 9;
|
||||
|
||||
max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue),
|
||||
max_xfer);
|
||||
blk_queue_max_hw_sectors(sdkp->disk->queue, max_xfer);
|
||||
sdkp->disk->queue->limits.max_sectors =
|
||||
min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), max_xfer);
|
||||
|
||||
set_capacity(disk, sdkp->capacity);
|
||||
sd_config_write_same(sdkp);
|
||||
kfree(buffer);
|
||||
|
Loading…
Reference in New Issue
Block a user