Commit 1efdb5f0 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI fixes from James Bottomley:
 "This has two libfc fixes for bugs causing rare crashes, one iscsi fix
  for a potential hang on shutdown, and a fix for an I/O blocksize issue
  which caused a regression"

* tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi:
  sd: Fix maximum I/O size for BLOCK_PC requests
  libfc: Fix fc_fcp_cleanup_each_cmd()
  libfc: Fix fc_exch_recv_req() error path
  libiscsi: Fix host busy blocking during connection teardown
parents 45e38cff 4f258a46
...@@ -241,8 +241,8 @@ EXPORT_SYMBOL(blk_queue_bounce_limit); ...@@ -241,8 +241,8 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
* Description: * Description:
* Enables a low level driver to set a hard upper limit, * Enables a low level driver to set a hard upper limit,
* max_hw_sectors, on the size of requests. max_hw_sectors is set by * max_hw_sectors, on the size of requests. max_hw_sectors is set by
* the device driver based upon the combined capabilities of I/O * the device driver based upon the capabilities of the I/O
* controller and storage device. * controller.
* *
* max_sectors is a soft limit imposed by the block layer for * max_sectors is a soft limit imposed by the block layer for
* filesystem type requests. This value can be overridden on a * filesystem type requests. This value can be overridden on a
......
...@@ -733,8 +733,6 @@ static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp, ...@@ -733,8 +733,6 @@ static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp,
if (resp) { if (resp) {
resp(sp, fp, arg); resp(sp, fp, arg);
res = true; res = true;
} else if (!IS_ERR(fp)) {
fc_frame_free(fp);
} }
spin_lock_bh(&ep->ex_lock); spin_lock_bh(&ep->ex_lock);
...@@ -1596,7 +1594,8 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) ...@@ -1596,7 +1594,8 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
* If new exch resp handler is valid then call that * If new exch resp handler is valid then call that
* first. * first.
*/ */
fc_invoke_resp(ep, sp, fp); if (!fc_invoke_resp(ep, sp, fp))
fc_frame_free(fp);
fc_exch_release(ep); fc_exch_release(ep);
return; return;
...@@ -1695,7 +1694,8 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp) ...@@ -1695,7 +1694,8 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
fc_exch_hold(ep); fc_exch_hold(ep);
if (!rc) if (!rc)
fc_exch_delete(ep); fc_exch_delete(ep);
fc_invoke_resp(ep, sp, fp); if (!fc_invoke_resp(ep, sp, fp))
fc_frame_free(fp);
if (has_rec) if (has_rec)
fc_exch_timer_set(ep, ep->r_a_tov); fc_exch_timer_set(ep, ep->r_a_tov);
fc_exch_release(ep); fc_exch_release(ep);
......
...@@ -1039,11 +1039,26 @@ static void fc_fcp_cleanup_each_cmd(struct fc_lport *lport, unsigned int id, ...@@ -1039,11 +1039,26 @@ static void fc_fcp_cleanup_each_cmd(struct fc_lport *lport, unsigned int id,
fc_fcp_pkt_hold(fsp); fc_fcp_pkt_hold(fsp);
spin_unlock_irqrestore(&si->scsi_queue_lock, flags); spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
if (!fc_fcp_lock_pkt(fsp)) { spin_lock_bh(&fsp->scsi_pkt_lock);
if (!(fsp->state & FC_SRB_COMPL)) {
fsp->state |= FC_SRB_COMPL;
/*
* TODO: dropping scsi_pkt_lock and then reacquiring
* again around fc_fcp_cleanup_cmd() is required,
* since fc_fcp_cleanup_cmd() calls into
* fc_seq_set_resp() and that func preempts cpu using
* schedule. May be schedule and related code should be
* removed instead of unlocking here to avoid scheduling
* while atomic bug.
*/
spin_unlock_bh(&fsp->scsi_pkt_lock);
fc_fcp_cleanup_cmd(fsp, error); fc_fcp_cleanup_cmd(fsp, error);
spin_lock_bh(&fsp->scsi_pkt_lock);
fc_io_compl(fsp); fc_io_compl(fsp);
fc_fcp_unlock_pkt(fsp);
} }
spin_unlock_bh(&fsp->scsi_pkt_lock);
fc_fcp_pkt_release(fsp); fc_fcp_pkt_release(fsp);
spin_lock_irqsave(&si->scsi_queue_lock, flags); spin_lock_irqsave(&si->scsi_queue_lock, flags);
......
...@@ -2941,10 +2941,10 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) ...@@ -2941,10 +2941,10 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
{ {
struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_session *session = conn->session; struct iscsi_session *session = conn->session;
unsigned long flags;
del_timer_sync(&conn->transport_timer); del_timer_sync(&conn->transport_timer);
mutex_lock(&session->eh_mutex);
spin_lock_bh(&session->frwd_lock); spin_lock_bh(&session->frwd_lock);
conn->c_stage = ISCSI_CONN_CLEANUP_WAIT; conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
if (session->leadconn == conn) { if (session->leadconn == conn) {
...@@ -2956,28 +2956,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) ...@@ -2956,28 +2956,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
} }
spin_unlock_bh(&session->frwd_lock); spin_unlock_bh(&session->frwd_lock);
/*
* Block until all in-progress commands for this connection
* time out or fail.
*/
for (;;) {
spin_lock_irqsave(session->host->host_lock, flags);
if (!atomic_read(&session->host->host_busy)) { /* OK for ERL == 0 */
spin_unlock_irqrestore(session->host->host_lock, flags);
break;
}
spin_unlock_irqrestore(session->host->host_lock, flags);
msleep_interruptible(500);
iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): "
"host_busy %d host_failed %d\n",
atomic_read(&session->host->host_busy),
session->host->host_failed);
/*
* force eh_abort() to unblock
*/
wake_up(&conn->ehwait);
}
/* flush queued up work because we free the connection below */ /* flush queued up work because we free the connection below */
iscsi_suspend_tx(conn); iscsi_suspend_tx(conn);
...@@ -2994,6 +2972,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) ...@@ -2994,6 +2972,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
if (session->leadconn == conn) if (session->leadconn == conn)
session->leadconn = NULL; session->leadconn = NULL;
spin_unlock_bh(&session->frwd_lock); spin_unlock_bh(&session->frwd_lock);
mutex_unlock(&session->eh_mutex);
iscsi_destroy_conn(cls_conn); iscsi_destroy_conn(cls_conn);
} }
......
...@@ -2770,9 +2770,9 @@ static int sd_revalidate_disk(struct gendisk *disk) ...@@ -2770,9 +2770,9 @@ static int sd_revalidate_disk(struct gendisk *disk)
max_xfer = sdkp->max_xfer_blocks; max_xfer = sdkp->max_xfer_blocks;
max_xfer <<= ilog2(sdp->sector_size) - 9; max_xfer <<= ilog2(sdp->sector_size) - 9;
max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), sdkp->disk->queue->limits.max_sectors =
max_xfer); min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), max_xfer);
blk_queue_max_hw_sectors(sdkp->disk->queue, max_xfer);
set_capacity(disk, sdkp->capacity); set_capacity(disk, sdkp->capacity);
sd_config_write_same(sdkp); sd_config_write_same(sdkp);
kfree(buffer); kfree(buffer);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment