Commit 6764c317 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI fixes from James Bottomley:
 "Six small fixes. Five are obvious and in drivers. The last one is a
  core fix to remove the host lock acquisition and release, caused by a
  dynamic check of host_busy, in the error handling loop which has been
  reported to cause lockups"

* tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi:
  scsi: storvsc: Fix ring buffer size calculation
  scsi: core: Move scsi_host_busy() out of host lock for waking up EH handler
  scsi: MAINTAINERS: Update ibmvscsi_tgt maintainer
  scsi: initio: Remove redundant variable 'rb'
  scsi: virtio_scsi: Remove duplicate check if queue is broken
  scsi: isci: Fix an error code problem in isci_io_request_build()
parents 1bbb19b6 f4469f38
...@@ -10283,7 +10283,7 @@ F: drivers/scsi/ibmvscsi/ibmvscsi* ...@@ -10283,7 +10283,7 @@ F: drivers/scsi/ibmvscsi/ibmvscsi*
F: include/scsi/viosrp.h F: include/scsi/viosrp.h
IBM Power Virtual SCSI Device Target Driver IBM Power Virtual SCSI Device Target Driver
M: Michael Cyr <mikecyr@linux.ibm.com> M: Tyrel Datwyler <tyreld@linux.ibm.com>
L: linux-scsi@vger.kernel.org L: linux-scsi@vger.kernel.org
L: target-devel@vger.kernel.org L: target-devel@vger.kernel.org
S: Supported S: Supported
......
...@@ -371,7 +371,6 @@ static u16 initio_se2_rd(unsigned long base, u8 addr) ...@@ -371,7 +371,6 @@ static u16 initio_se2_rd(unsigned long base, u8 addr)
*/ */
static void initio_se2_wr(unsigned long base, u8 addr, u16 val) static void initio_se2_wr(unsigned long base, u8 addr, u16 val)
{ {
u8 rb;
u8 instr; u8 instr;
int i; int i;
...@@ -400,7 +399,7 @@ static void initio_se2_wr(unsigned long base, u8 addr, u16 val) ...@@ -400,7 +399,7 @@ static void initio_se2_wr(unsigned long base, u8 addr, u16 val)
udelay(30); udelay(30);
outb(SE2CS, base + TUL_NVRAM); /* -CLK */ outb(SE2CS, base + TUL_NVRAM); /* -CLK */
udelay(30); udelay(30);
if ((rb = inb(base + TUL_NVRAM)) & SE2DI) if (inb(base + TUL_NVRAM) & SE2DI)
break; /* write complete */ break; /* write complete */
} }
outb(0, base + TUL_NVRAM); /* -CS */ outb(0, base + TUL_NVRAM); /* -CS */
......
...@@ -3387,7 +3387,7 @@ static enum sci_status isci_io_request_build(struct isci_host *ihost, ...@@ -3387,7 +3387,7 @@ static enum sci_status isci_io_request_build(struct isci_host *ihost,
return SCI_FAILURE; return SCI_FAILURE;
} }
return SCI_SUCCESS; return status;
} }
static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag) static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag)
......
...@@ -61,11 +61,11 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd); ...@@ -61,11 +61,11 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd);
static enum scsi_disposition scsi_try_to_abort_cmd(const struct scsi_host_template *, static enum scsi_disposition scsi_try_to_abort_cmd(const struct scsi_host_template *,
struct scsi_cmnd *); struct scsi_cmnd *);
void scsi_eh_wakeup(struct Scsi_Host *shost) void scsi_eh_wakeup(struct Scsi_Host *shost, unsigned int busy)
{ {
lockdep_assert_held(shost->host_lock); lockdep_assert_held(shost->host_lock);
if (scsi_host_busy(shost) == shost->host_failed) { if (busy == shost->host_failed) {
trace_scsi_eh_wakeup(shost); trace_scsi_eh_wakeup(shost);
wake_up_process(shost->ehandler); wake_up_process(shost->ehandler);
SCSI_LOG_ERROR_RECOVERY(5, shost_printk(KERN_INFO, shost, SCSI_LOG_ERROR_RECOVERY(5, shost_printk(KERN_INFO, shost,
...@@ -88,7 +88,7 @@ void scsi_schedule_eh(struct Scsi_Host *shost) ...@@ -88,7 +88,7 @@ void scsi_schedule_eh(struct Scsi_Host *shost)
if (scsi_host_set_state(shost, SHOST_RECOVERY) == 0 || if (scsi_host_set_state(shost, SHOST_RECOVERY) == 0 ||
scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY) == 0) { scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY) == 0) {
shost->host_eh_scheduled++; shost->host_eh_scheduled++;
scsi_eh_wakeup(shost); scsi_eh_wakeup(shost, scsi_host_busy(shost));
} }
spin_unlock_irqrestore(shost->host_lock, flags); spin_unlock_irqrestore(shost->host_lock, flags);
...@@ -286,7 +286,7 @@ static void scsi_eh_inc_host_failed(struct rcu_head *head) ...@@ -286,7 +286,7 @@ static void scsi_eh_inc_host_failed(struct rcu_head *head)
spin_lock_irqsave(shost->host_lock, flags); spin_lock_irqsave(shost->host_lock, flags);
shost->host_failed++; shost->host_failed++;
scsi_eh_wakeup(shost); scsi_eh_wakeup(shost, scsi_host_busy(shost));
spin_unlock_irqrestore(shost->host_lock, flags); spin_unlock_irqrestore(shost->host_lock, flags);
} }
......
...@@ -280,7 +280,7 @@ static void scsi_dec_host_busy(struct Scsi_Host *shost, struct scsi_cmnd *cmd) ...@@ -280,7 +280,7 @@ static void scsi_dec_host_busy(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
if (unlikely(scsi_host_in_recovery(shost))) { if (unlikely(scsi_host_in_recovery(shost))) {
spin_lock_irqsave(shost->host_lock, flags); spin_lock_irqsave(shost->host_lock, flags);
if (shost->host_failed || shost->host_eh_scheduled) if (shost->host_failed || shost->host_eh_scheduled)
scsi_eh_wakeup(shost); scsi_eh_wakeup(shost, scsi_host_busy(shost));
spin_unlock_irqrestore(shost->host_lock, flags); spin_unlock_irqrestore(shost->host_lock, flags);
} }
rcu_read_unlock(); rcu_read_unlock();
......
...@@ -92,7 +92,7 @@ extern void scmd_eh_abort_handler(struct work_struct *work); ...@@ -92,7 +92,7 @@ extern void scmd_eh_abort_handler(struct work_struct *work);
extern enum blk_eh_timer_return scsi_timeout(struct request *req); extern enum blk_eh_timer_return scsi_timeout(struct request *req);
extern int scsi_error_handler(void *host); extern int scsi_error_handler(void *host);
extern enum scsi_disposition scsi_decide_disposition(struct scsi_cmnd *cmd); extern enum scsi_disposition scsi_decide_disposition(struct scsi_cmnd *cmd);
extern void scsi_eh_wakeup(struct Scsi_Host *shost); extern void scsi_eh_wakeup(struct Scsi_Host *shost, unsigned int busy);
extern void scsi_eh_scmd_add(struct scsi_cmnd *); extern void scsi_eh_scmd_add(struct scsi_cmnd *);
void scsi_eh_ready_devs(struct Scsi_Host *shost, void scsi_eh_ready_devs(struct Scsi_Host *shost,
struct list_head *work_q, struct list_head *work_q,
......
...@@ -330,6 +330,7 @@ enum storvsc_request_type { ...@@ -330,6 +330,7 @@ enum storvsc_request_type {
*/ */
static int storvsc_ringbuffer_size = (128 * 1024); static int storvsc_ringbuffer_size = (128 * 1024);
static int aligned_ringbuffer_size;
static u32 max_outstanding_req_per_channel; static u32 max_outstanding_req_per_channel;
static int storvsc_change_queue_depth(struct scsi_device *sdev, int queue_depth); static int storvsc_change_queue_depth(struct scsi_device *sdev, int queue_depth);
...@@ -687,8 +688,8 @@ static void handle_sc_creation(struct vmbus_channel *new_sc) ...@@ -687,8 +688,8 @@ static void handle_sc_creation(struct vmbus_channel *new_sc)
new_sc->next_request_id_callback = storvsc_next_request_id; new_sc->next_request_id_callback = storvsc_next_request_id;
ret = vmbus_open(new_sc, ret = vmbus_open(new_sc,
storvsc_ringbuffer_size, aligned_ringbuffer_size,
storvsc_ringbuffer_size, aligned_ringbuffer_size,
(void *)&props, (void *)&props,
sizeof(struct vmstorage_channel_properties), sizeof(struct vmstorage_channel_properties),
storvsc_on_channel_callback, new_sc); storvsc_on_channel_callback, new_sc);
...@@ -1973,7 +1974,7 @@ static int storvsc_probe(struct hv_device *device, ...@@ -1973,7 +1974,7 @@ static int storvsc_probe(struct hv_device *device,
dma_set_min_align_mask(&device->device, HV_HYP_PAGE_SIZE - 1); dma_set_min_align_mask(&device->device, HV_HYP_PAGE_SIZE - 1);
stor_device->port_number = host->host_no; stor_device->port_number = host->host_no;
ret = storvsc_connect_to_vsp(device, storvsc_ringbuffer_size, is_fc); ret = storvsc_connect_to_vsp(device, aligned_ringbuffer_size, is_fc);
if (ret) if (ret)
goto err_out1; goto err_out1;
...@@ -2164,7 +2165,7 @@ static int storvsc_resume(struct hv_device *hv_dev) ...@@ -2164,7 +2165,7 @@ static int storvsc_resume(struct hv_device *hv_dev)
{ {
int ret; int ret;
ret = storvsc_connect_to_vsp(hv_dev, storvsc_ringbuffer_size, ret = storvsc_connect_to_vsp(hv_dev, aligned_ringbuffer_size,
hv_dev_is_fc(hv_dev)); hv_dev_is_fc(hv_dev));
return ret; return ret;
} }
...@@ -2198,8 +2199,9 @@ static int __init storvsc_drv_init(void) ...@@ -2198,8 +2199,9 @@ static int __init storvsc_drv_init(void)
* the ring buffer indices) by the max request size (which is * the ring buffer indices) by the max request size (which is
* vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64) * vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64)
*/ */
aligned_ringbuffer_size = VMBUS_RING_SIZE(storvsc_ringbuffer_size);
max_outstanding_req_per_channel = max_outstanding_req_per_channel =
((storvsc_ringbuffer_size - PAGE_SIZE) / ((aligned_ringbuffer_size - PAGE_SIZE) /
ALIGN(MAX_MULTIPAGE_BUFFER_PACKET + ALIGN(MAX_MULTIPAGE_BUFFER_PACKET +
sizeof(struct vstor_packet) + sizeof(u64), sizeof(struct vstor_packet) + sizeof(u64),
sizeof(u64))); sizeof(u64)));
......
...@@ -188,8 +188,6 @@ static void virtscsi_vq_done(struct virtio_scsi *vscsi, ...@@ -188,8 +188,6 @@ static void virtscsi_vq_done(struct virtio_scsi *vscsi,
while ((buf = virtqueue_get_buf(vq, &len)) != NULL) while ((buf = virtqueue_get_buf(vq, &len)) != NULL)
fn(vscsi, buf); fn(vscsi, buf);
if (unlikely(virtqueue_is_broken(vq)))
break;
} while (!virtqueue_enable_cb(vq)); } while (!virtqueue_enable_cb(vq));
spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags); spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment