Commit 71e75c97 authored by Christoph Hellwig's avatar Christoph Hellwig

scsi: convert device_busy to atomic_t

Avoid taking the queue_lock to check the per-device queue limit.  Instead
we do an atomic_inc_return early on to grab our slot in the queue,
and if necessary decrement it after finishing all checks.

Unlike the host and target busy counters this doesn't allow us to avoid the
queue_lock in the request_fn due to the way the interface works, but it'll
allow us to prepare for using the blk-mq code, which doesn't use the
queue_lock at all, and it at least avoids a queue_lock round trip in
scsi_device_unbusy, which is still important given how busy the queue_lock
is.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Reviewed-by: default avatarWebb Scales <webbnh@hp.com>
Acked-by: default avatarJens Axboe <axboe@kernel.dk>
Tested-by: default avatarBart Van Assche <bvanassche@acm.org>
Tested-by: default avatarRobert Elliott <elliott@hp.com>
parent 74665016
...@@ -3763,7 +3763,7 @@ mptsas_send_link_status_event(struct fw_event_work *fw_event) ...@@ -3763,7 +3763,7 @@ mptsas_send_link_status_event(struct fw_event_work *fw_event)
printk(MYIOC_s_DEBUG_FMT printk(MYIOC_s_DEBUG_FMT
"SDEV OUTSTANDING CMDS" "SDEV OUTSTANDING CMDS"
"%d\n", ioc->name, "%d\n", ioc->name,
sdev->device_busy)); atomic_read(&sdev->device_busy)));
} }
} }
......
...@@ -302,9 +302,7 @@ void scsi_device_unbusy(struct scsi_device *sdev) ...@@ -302,9 +302,7 @@ void scsi_device_unbusy(struct scsi_device *sdev)
spin_unlock_irqrestore(shost->host_lock, flags); spin_unlock_irqrestore(shost->host_lock, flags);
} }
spin_lock_irqsave(sdev->request_queue->queue_lock, flags); atomic_dec(&sdev->device_busy);
sdev->device_busy--;
spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
} }
/* /*
...@@ -355,9 +353,9 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev) ...@@ -355,9 +353,9 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev)
static inline int scsi_device_is_busy(struct scsi_device *sdev) static inline int scsi_device_is_busy(struct scsi_device *sdev)
{ {
if (sdev->device_busy >= sdev->queue_depth || sdev->device_blocked) if (atomic_read(&sdev->device_busy) >= sdev->queue_depth ||
sdev->device_blocked)
return 1; return 1;
return 0; return 0;
} }
...@@ -1204,7 +1202,7 @@ scsi_prep_return(struct request_queue *q, struct request *req, int ret) ...@@ -1204,7 +1202,7 @@ scsi_prep_return(struct request_queue *q, struct request *req, int ret)
* queue must be restarted, so we schedule a callback to happen * queue must be restarted, so we schedule a callback to happen
* shortly. * shortly.
*/ */
if (sdev->device_busy == 0) if (atomic_read(&sdev->device_busy) == 0)
blk_delay_queue(q, SCSI_QUEUE_DELAY); blk_delay_queue(q, SCSI_QUEUE_DELAY);
break; break;
default: default:
...@@ -1255,26 +1253,33 @@ static void scsi_unprep_fn(struct request_queue *q, struct request *req) ...@@ -1255,26 +1253,33 @@ static void scsi_unprep_fn(struct request_queue *q, struct request *req)
static inline int scsi_dev_queue_ready(struct request_queue *q, static inline int scsi_dev_queue_ready(struct request_queue *q,
struct scsi_device *sdev) struct scsi_device *sdev)
{ {
if (sdev->device_busy == 0 && sdev->device_blocked) { unsigned int busy;
busy = atomic_inc_return(&sdev->device_busy) - 1;
if (sdev->device_blocked) {
if (busy)
goto out_dec;
/* /*
* unblock after device_blocked iterates to zero * unblock after device_blocked iterates to zero
*/ */
if (--sdev->device_blocked == 0) { if (--sdev->device_blocked != 0) {
SCSI_LOG_MLQUEUE(3,
sdev_printk(KERN_INFO, sdev,
"unblocking device at zero depth\n"));
} else {
blk_delay_queue(q, SCSI_QUEUE_DELAY); blk_delay_queue(q, SCSI_QUEUE_DELAY);
return 0; goto out_dec;
} }
SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev,
"unblocking device at zero depth\n"));
} }
if (scsi_device_is_busy(sdev))
return 0; if (busy >= sdev->queue_depth)
goto out_dec;
return 1; return 1;
out_dec:
atomic_dec(&sdev->device_busy);
return 0;
} }
/* /*
* scsi_target_queue_ready: checks if there we can send commands to target * scsi_target_queue_ready: checks if there we can send commands to target
* @sdev: scsi device on starget to check. * @sdev: scsi device on starget to check.
...@@ -1448,7 +1453,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q) ...@@ -1448,7 +1453,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
* bump busy counts. To bump the counters, we need to dance * bump busy counts. To bump the counters, we need to dance
* with the locks as normal issue path does. * with the locks as normal issue path does.
*/ */
sdev->device_busy++; atomic_inc(&sdev->device_busy);
atomic_inc(&shost->host_busy); atomic_inc(&shost->host_busy);
atomic_inc(&starget->target_busy); atomic_inc(&starget->target_busy);
...@@ -1544,7 +1549,7 @@ static void scsi_request_fn(struct request_queue *q) ...@@ -1544,7 +1549,7 @@ static void scsi_request_fn(struct request_queue *q)
* accept it. * accept it.
*/ */
req = blk_peek_request(q); req = blk_peek_request(q);
if (!req || !scsi_dev_queue_ready(q, sdev)) if (!req)
break; break;
if (unlikely(!scsi_device_online(sdev))) { if (unlikely(!scsi_device_online(sdev))) {
...@@ -1554,13 +1559,14 @@ static void scsi_request_fn(struct request_queue *q) ...@@ -1554,13 +1559,14 @@ static void scsi_request_fn(struct request_queue *q)
continue; continue;
} }
if (!scsi_dev_queue_ready(q, sdev))
break;
/* /*
* Remove the request from the request list. * Remove the request from the request list.
*/ */
if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req))) if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
blk_start_request(req); blk_start_request(req);
sdev->device_busy++;
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
cmd = req->special; cmd = req->special;
...@@ -1630,9 +1636,9 @@ static void scsi_request_fn(struct request_queue *q) ...@@ -1630,9 +1636,9 @@ static void scsi_request_fn(struct request_queue *q)
*/ */
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
blk_requeue_request(q, req); blk_requeue_request(q, req);
sdev->device_busy--; atomic_dec(&sdev->device_busy);
out_delay: out_delay:
if (sdev->device_busy == 0 && !scsi_device_blocked(sdev)) if (atomic_read(&sdev->device_busy) && !scsi_device_blocked(sdev))
blk_delay_queue(q, SCSI_QUEUE_DELAY); blk_delay_queue(q, SCSI_QUEUE_DELAY);
} }
...@@ -2371,7 +2377,7 @@ scsi_device_quiesce(struct scsi_device *sdev) ...@@ -2371,7 +2377,7 @@ scsi_device_quiesce(struct scsi_device *sdev)
return err; return err;
scsi_run_queue(sdev->request_queue); scsi_run_queue(sdev->request_queue);
while (sdev->device_busy) { while (atomic_read(&sdev->device_busy)) {
msleep_interruptible(200); msleep_interruptible(200);
scsi_run_queue(sdev->request_queue); scsi_run_queue(sdev->request_queue);
} }
......
...@@ -585,13 +585,21 @@ static int scsi_sdev_check_buf_bit(const char *buf) ...@@ -585,13 +585,21 @@ static int scsi_sdev_check_buf_bit(const char *buf)
* Create the actual show/store functions and data structures. * Create the actual show/store functions and data structures.
*/ */
sdev_rd_attr (device_blocked, "%d\n"); sdev_rd_attr (device_blocked, "%d\n");
sdev_rd_attr (device_busy, "%d\n");
sdev_rd_attr (type, "%d\n"); sdev_rd_attr (type, "%d\n");
sdev_rd_attr (scsi_level, "%d\n"); sdev_rd_attr (scsi_level, "%d\n");
sdev_rd_attr (vendor, "%.8s\n"); sdev_rd_attr (vendor, "%.8s\n");
sdev_rd_attr (model, "%.16s\n"); sdev_rd_attr (model, "%.16s\n");
sdev_rd_attr (rev, "%.4s\n"); sdev_rd_attr (rev, "%.4s\n");
static ssize_t
sdev_show_device_busy(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct scsi_device *sdev = to_scsi_device(dev);
return snprintf(buf, 20, "%d\n", atomic_read(&sdev->device_busy));
}
static DEVICE_ATTR(device_busy, S_IRUGO, sdev_show_device_busy, NULL);
/* /*
* TODO: can we make these symlinks to the block layer ones? * TODO: can we make these symlinks to the block layer ones?
*/ */
......
...@@ -2574,7 +2574,7 @@ static int sg_proc_seq_show_dev(struct seq_file *s, void *v) ...@@ -2574,7 +2574,7 @@ static int sg_proc_seq_show_dev(struct seq_file *s, void *v)
scsidp->id, scsidp->lun, (int) scsidp->type, scsidp->id, scsidp->lun, (int) scsidp->type,
1, 1,
(int) scsidp->queue_depth, (int) scsidp->queue_depth,
(int) scsidp->device_busy, (int) atomic_read(&scsidp->device_busy),
(int) scsi_device_online(scsidp)); (int) scsi_device_online(scsidp));
} }
read_unlock_irqrestore(&sg_index_lock, iflags); read_unlock_irqrestore(&sg_index_lock, iflags);
......
...@@ -81,9 +81,7 @@ struct scsi_device { ...@@ -81,9 +81,7 @@ struct scsi_device {
struct list_head siblings; /* list of all devices on this host */ struct list_head siblings; /* list of all devices on this host */
struct list_head same_target_siblings; /* just the devices sharing same target id */ struct list_head same_target_siblings; /* just the devices sharing same target id */
/* this is now protected by the request_queue->queue_lock */ atomic_t device_busy; /* commands actually active on LLDD */
unsigned int device_busy; /* commands actually active on
* low-level. protected by queue_lock. */
spinlock_t list_lock; spinlock_t list_lock;
struct list_head cmd_list; /* queue of in use SCSI Command structures */ struct list_head cmd_list; /* queue of in use SCSI Command structures */
struct list_head starved_entry; struct list_head starved_entry;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment