Commit cd9070c9 authored by Christoph Hellwig's avatar Christoph Hellwig

scsi: fix the {host,target,device}_blocked counter mess

Seems like these counters are missing any sort of synchronization for
updates, as a over 10 year old comment from me noted.  Fix this by
using atomic counters, and while we're at it also make sure they are
in the same cacheline as the _busy counters and not needlessly stored
to in every I/O completion.

With the new model the _busy counters can temporarily go negative,
so all the readers are updated to check for > 0 values.  Longer
term every successful I/O completion will reset the counters to zero,
so the temporarily negative values will not cause any harm.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
Reviewed-by: default avatarWebb Scales <webbnh@hp.com>
Acked-by: default avatarJens Axboe <axboe@kernel.dk>
Tested-by: default avatarBart Van Assche <bvanassche@acm.org>
Tested-by: default avatarRobert Elliott <elliott@hp.com>
parent 71e75c97
...@@ -726,17 +726,16 @@ void scsi_finish_command(struct scsi_cmnd *cmd) ...@@ -726,17 +726,16 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
scsi_device_unbusy(sdev); scsi_device_unbusy(sdev);
/* /*
* Clear the flags which say that the device/host is no longer * Clear the flags that say that the device/target/host is no longer
* capable of accepting new commands. These are set in scsi_queue.c * capable of accepting new commands.
* for both the queue full condition on a device, and for a */
* host full condition on the host. if (atomic_read(&shost->host_blocked))
* atomic_set(&shost->host_blocked, 0);
* XXX(hch): What about locking? if (atomic_read(&starget->target_blocked))
*/ atomic_set(&starget->target_blocked, 0);
shost->host_blocked = 0; if (atomic_read(&sdev->device_blocked))
starget->target_blocked = 0; atomic_set(&sdev->device_blocked, 0);
sdev->device_blocked = 0;
/* /*
* If we have valid sense information, then some kind of recovery * If we have valid sense information, then some kind of recovery
......
...@@ -99,14 +99,16 @@ scsi_set_blocked(struct scsi_cmnd *cmd, int reason) ...@@ -99,14 +99,16 @@ scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
*/ */
switch (reason) { switch (reason) {
case SCSI_MLQUEUE_HOST_BUSY: case SCSI_MLQUEUE_HOST_BUSY:
host->host_blocked = host->max_host_blocked; atomic_set(&host->host_blocked, host->max_host_blocked);
break; break;
case SCSI_MLQUEUE_DEVICE_BUSY: case SCSI_MLQUEUE_DEVICE_BUSY:
case SCSI_MLQUEUE_EH_RETRY: case SCSI_MLQUEUE_EH_RETRY:
device->device_blocked = device->max_device_blocked; atomic_set(&device->device_blocked,
device->max_device_blocked);
break; break;
case SCSI_MLQUEUE_TARGET_BUSY: case SCSI_MLQUEUE_TARGET_BUSY:
starget->target_blocked = starget->max_target_blocked; atomic_set(&starget->target_blocked,
starget->max_target_blocked);
break; break;
} }
} }
...@@ -351,29 +353,35 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev) ...@@ -351,29 +353,35 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev)
spin_unlock_irqrestore(shost->host_lock, flags); spin_unlock_irqrestore(shost->host_lock, flags);
} }
static inline int scsi_device_is_busy(struct scsi_device *sdev) static inline bool scsi_device_is_busy(struct scsi_device *sdev)
{ {
if (atomic_read(&sdev->device_busy) >= sdev->queue_depth || if (atomic_read(&sdev->device_busy) >= sdev->queue_depth)
sdev->device_blocked) return true;
return 1; if (atomic_read(&sdev->device_blocked) > 0)
return 0; return true;
return false;
} }
static inline int scsi_target_is_busy(struct scsi_target *starget) static inline bool scsi_target_is_busy(struct scsi_target *starget)
{ {
return ((starget->can_queue > 0 && if (starget->can_queue > 0 &&
atomic_read(&starget->target_busy) >= starget->can_queue) || atomic_read(&starget->target_busy) >= starget->can_queue)
starget->target_blocked); return true;
if (atomic_read(&starget->target_blocked) > 0)
return true;
return false;
} }
static inline int scsi_host_is_busy(struct Scsi_Host *shost) static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
{ {
if ((shost->can_queue > 0 && if (shost->can_queue > 0 &&
atomic_read(&shost->host_busy) >= shost->can_queue) || atomic_read(&shost->host_busy) >= shost->can_queue)
shost->host_blocked || shost->host_self_blocked) return true;
return 1; if (atomic_read(&shost->host_blocked) > 0)
return true;
return 0; if (shost->host_self_blocked)
return true;
return false;
} }
static void scsi_starved_list_run(struct Scsi_Host *shost) static void scsi_starved_list_run(struct Scsi_Host *shost)
...@@ -1256,14 +1264,14 @@ static inline int scsi_dev_queue_ready(struct request_queue *q, ...@@ -1256,14 +1264,14 @@ static inline int scsi_dev_queue_ready(struct request_queue *q,
unsigned int busy; unsigned int busy;
busy = atomic_inc_return(&sdev->device_busy) - 1; busy = atomic_inc_return(&sdev->device_busy) - 1;
if (sdev->device_blocked) { if (atomic_read(&sdev->device_blocked)) {
if (busy) if (busy)
goto out_dec; goto out_dec;
/* /*
* unblock after device_blocked iterates to zero * unblock after device_blocked iterates to zero
*/ */
if (--sdev->device_blocked != 0) { if (atomic_dec_return(&sdev->device_blocked) > 0) {
blk_delay_queue(q, SCSI_QUEUE_DELAY); blk_delay_queue(q, SCSI_QUEUE_DELAY);
goto out_dec; goto out_dec;
} }
...@@ -1302,19 +1310,15 @@ static inline int scsi_target_queue_ready(struct Scsi_Host *shost, ...@@ -1302,19 +1310,15 @@ static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
} }
busy = atomic_inc_return(&starget->target_busy) - 1; busy = atomic_inc_return(&starget->target_busy) - 1;
if (starget->target_blocked) { if (atomic_read(&starget->target_blocked) > 0) {
if (busy) if (busy)
goto starved; goto starved;
/* /*
* unblock after target_blocked iterates to zero * unblock after target_blocked iterates to zero
*/ */
spin_lock_irq(shost->host_lock); if (atomic_dec_return(&starget->target_blocked) > 0)
if (--starget->target_blocked != 0) {
spin_unlock_irq(shost->host_lock);
goto out_dec; goto out_dec;
}
spin_unlock_irq(shost->host_lock);
SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget, SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
"unblocking target at zero depth\n")); "unblocking target at zero depth\n"));
...@@ -1349,19 +1353,15 @@ static inline int scsi_host_queue_ready(struct request_queue *q, ...@@ -1349,19 +1353,15 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
return 0; return 0;
busy = atomic_inc_return(&shost->host_busy) - 1; busy = atomic_inc_return(&shost->host_busy) - 1;
if (shost->host_blocked) { if (atomic_read(&shost->host_blocked) > 0) {
if (busy) if (busy)
goto starved; goto starved;
/* /*
* unblock after host_blocked iterates to zero * unblock after host_blocked iterates to zero
*/ */
spin_lock_irq(shost->host_lock); if (atomic_dec_return(&shost->host_blocked) > 0)
if (--shost->host_blocked != 0) {
spin_unlock_irq(shost->host_lock);
goto out_dec; goto out_dec;
}
spin_unlock_irq(shost->host_lock);
SCSI_LOG_MLQUEUE(3, SCSI_LOG_MLQUEUE(3,
shost_printk(KERN_INFO, shost, shost_printk(KERN_INFO, shost,
......
...@@ -584,7 +584,6 @@ static int scsi_sdev_check_buf_bit(const char *buf) ...@@ -584,7 +584,6 @@ static int scsi_sdev_check_buf_bit(const char *buf)
/* /*
* Create the actual show/store functions and data structures. * Create the actual show/store functions and data structures.
*/ */
sdev_rd_attr (device_blocked, "%d\n");
sdev_rd_attr (type, "%d\n"); sdev_rd_attr (type, "%d\n");
sdev_rd_attr (scsi_level, "%d\n"); sdev_rd_attr (scsi_level, "%d\n");
sdev_rd_attr (vendor, "%.8s\n"); sdev_rd_attr (vendor, "%.8s\n");
...@@ -600,6 +599,15 @@ sdev_show_device_busy(struct device *dev, struct device_attribute *attr, ...@@ -600,6 +599,15 @@ sdev_show_device_busy(struct device *dev, struct device_attribute *attr,
} }
static DEVICE_ATTR(device_busy, S_IRUGO, sdev_show_device_busy, NULL); static DEVICE_ATTR(device_busy, S_IRUGO, sdev_show_device_busy, NULL);
static ssize_t
sdev_show_device_blocked(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct scsi_device *sdev = to_scsi_device(dev);
return snprintf(buf, 20, "%d\n", atomic_read(&sdev->device_blocked));
}
static DEVICE_ATTR(device_blocked, S_IRUGO, sdev_show_device_blocked, NULL);
/* /*
* TODO: can we make these symlinks to the block layer ones? * TODO: can we make these symlinks to the block layer ones?
*/ */
......
...@@ -82,6 +82,8 @@ struct scsi_device { ...@@ -82,6 +82,8 @@ struct scsi_device {
struct list_head same_target_siblings; /* just the devices sharing same target id */ struct list_head same_target_siblings; /* just the devices sharing same target id */
atomic_t device_busy; /* commands actually active on LLDD */ atomic_t device_busy; /* commands actually active on LLDD */
atomic_t device_blocked; /* Device returned QUEUE_FULL. */
spinlock_t list_lock; spinlock_t list_lock;
struct list_head cmd_list; /* queue of in use SCSI Command structures */ struct list_head cmd_list; /* queue of in use SCSI Command structures */
struct list_head starved_entry; struct list_head starved_entry;
...@@ -180,8 +182,6 @@ struct scsi_device { ...@@ -180,8 +182,6 @@ struct scsi_device {
struct list_head event_list; /* asserted events */ struct list_head event_list; /* asserted events */
struct work_struct event_work; struct work_struct event_work;
unsigned int device_blocked; /* Device returned QUEUE_FULL. */
unsigned int max_device_blocked; /* what device_blocked counts down from */ unsigned int max_device_blocked; /* what device_blocked counts down from */
#define SCSI_DEFAULT_DEVICE_BLOCKED 3 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
...@@ -291,12 +291,13 @@ struct scsi_target { ...@@ -291,12 +291,13 @@ struct scsi_target {
* the same target will also. */ * the same target will also. */
/* commands actually active on LLD. */ /* commands actually active on LLD. */
atomic_t target_busy; atomic_t target_busy;
atomic_t target_blocked;
/* /*
* LLDs should set this in the slave_alloc host template callout. * LLDs should set this in the slave_alloc host template callout.
* If set to zero then there is not limit. * If set to zero then there is not limit.
*/ */
unsigned int can_queue; unsigned int can_queue;
unsigned int target_blocked;
unsigned int max_target_blocked; unsigned int max_target_blocked;
#define SCSI_DEFAULT_TARGET_BLOCKED 3 #define SCSI_DEFAULT_TARGET_BLOCKED 3
......
...@@ -583,6 +583,8 @@ struct Scsi_Host { ...@@ -583,6 +583,8 @@ struct Scsi_Host {
struct blk_queue_tag *bqt; struct blk_queue_tag *bqt;
atomic_t host_busy; /* commands actually active on low-level */ atomic_t host_busy; /* commands actually active on low-level */
atomic_t host_blocked;
unsigned int host_failed; /* commands that failed. unsigned int host_failed; /* commands that failed.
protected by host_lock */ protected by host_lock */
unsigned int host_eh_scheduled; /* EH scheduled without command */ unsigned int host_eh_scheduled; /* EH scheduled without command */
...@@ -681,11 +683,6 @@ struct Scsi_Host { ...@@ -681,11 +683,6 @@ struct Scsi_Host {
*/ */
struct workqueue_struct *tmf_work_q; struct workqueue_struct *tmf_work_q;
/*
* Host has rejected a command because it was busy.
*/
unsigned int host_blocked;
/* /*
* Value host_blocked counts down from * Value host_blocked counts down from
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment