Commit 7ae65c0f authored by Christoph Hellwig's avatar Christoph Hellwig

scsi: convert target_busy to an atomic_t

Avoid taking the host-wide host_lock to check the per-target queue limit.
Instead we do an atomic_inc_return early on to grab our slot in the queue,
and if necessary decrement it after finishing all checks.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Reviewed-by: default avatarWebb Scales <webbnh@hp.com>
Acked-by: default avatarJens Axboe <axboe@kernel.dk>
Tested-by: default avatarBart Van Assche <bvanassche@acm.org>
Tested-by: default avatarRobert Elliott <elliott@hp.com>
parent cf68d334
...@@ -294,7 +294,7 @@ void scsi_device_unbusy(struct scsi_device *sdev) ...@@ -294,7 +294,7 @@ void scsi_device_unbusy(struct scsi_device *sdev)
spin_lock_irqsave(shost->host_lock, flags); spin_lock_irqsave(shost->host_lock, flags);
shost->host_busy--; shost->host_busy--;
starget->target_busy--; atomic_dec(&starget->target_busy);
if (unlikely(scsi_host_in_recovery(shost) && if (unlikely(scsi_host_in_recovery(shost) &&
(shost->host_failed || shost->host_eh_scheduled))) (shost->host_failed || shost->host_eh_scheduled)))
scsi_eh_wakeup(shost); scsi_eh_wakeup(shost);
...@@ -361,7 +361,7 @@ static inline int scsi_device_is_busy(struct scsi_device *sdev) ...@@ -361,7 +361,7 @@ static inline int scsi_device_is_busy(struct scsi_device *sdev)
static inline int scsi_target_is_busy(struct scsi_target *starget) static inline int scsi_target_is_busy(struct scsi_target *starget)
{ {
return ((starget->can_queue > 0 && return ((starget->can_queue > 0 &&
starget->target_busy >= starget->can_queue) || atomic_read(&starget->target_busy) >= starget->can_queue) ||
starget->target_blocked); starget->target_blocked);
} }
...@@ -1279,37 +1279,50 @@ static inline int scsi_target_queue_ready(struct Scsi_Host *shost, ...@@ -1279,37 +1279,50 @@ static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
struct scsi_device *sdev) struct scsi_device *sdev)
{ {
struct scsi_target *starget = scsi_target(sdev); struct scsi_target *starget = scsi_target(sdev);
int ret = 0; unsigned int busy;
spin_lock_irq(shost->host_lock);
if (starget->single_lun) { if (starget->single_lun) {
spin_lock_irq(shost->host_lock);
if (starget->starget_sdev_user && if (starget->starget_sdev_user &&
starget->starget_sdev_user != sdev) starget->starget_sdev_user != sdev) {
goto out; spin_unlock_irq(shost->host_lock);
return 0;
}
starget->starget_sdev_user = sdev; starget->starget_sdev_user = sdev;
spin_unlock_irq(shost->host_lock);
} }
if (starget->target_busy == 0 && starget->target_blocked) { busy = atomic_inc_return(&starget->target_busy) - 1;
if (starget->target_blocked) {
if (busy)
goto starved;
/* /*
* unblock after target_blocked iterates to zero * unblock after target_blocked iterates to zero
*/ */
if (--starget->target_blocked != 0) spin_lock_irq(shost->host_lock);
goto out; if (--starget->target_blocked != 0) {
spin_unlock_irq(shost->host_lock);
goto out_dec;
}
spin_unlock_irq(shost->host_lock);
SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget, SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
"unblocking target at zero depth\n")); "unblocking target at zero depth\n"));
} }
if (scsi_target_is_busy(starget)) { if (starget->can_queue > 0 && busy >= starget->can_queue)
list_move_tail(&sdev->starved_entry, &shost->starved_list); goto starved;
goto out;
}
scsi_target(sdev)->target_busy++; return 1;
ret = 1;
out: starved:
spin_lock_irq(shost->host_lock);
list_move_tail(&sdev->starved_entry, &shost->starved_list);
spin_unlock_irq(shost->host_lock); spin_unlock_irq(shost->host_lock);
return ret; out_dec:
atomic_dec(&starget->target_busy);
return 0;
} }
/* /*
...@@ -1419,7 +1432,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q) ...@@ -1419,7 +1432,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
spin_unlock(sdev->request_queue->queue_lock); spin_unlock(sdev->request_queue->queue_lock);
spin_lock(shost->host_lock); spin_lock(shost->host_lock);
shost->host_busy++; shost->host_busy++;
starget->target_busy++; atomic_inc(&starget->target_busy);
spin_unlock(shost->host_lock); spin_unlock(shost->host_lock);
spin_lock(sdev->request_queue->queue_lock); spin_lock(sdev->request_queue->queue_lock);
...@@ -1589,9 +1602,7 @@ static void scsi_request_fn(struct request_queue *q) ...@@ -1589,9 +1602,7 @@ static void scsi_request_fn(struct request_queue *q)
return; return;
host_not_ready: host_not_ready:
spin_lock_irq(shost->host_lock); atomic_dec(&scsi_target(sdev)->target_busy);
scsi_target(sdev)->target_busy--;
spin_unlock_irq(shost->host_lock);
not_ready: not_ready:
/* /*
* lock q, handle tag, requeue req, and decrement device_busy. We * lock q, handle tag, requeue req, and decrement device_busy. We
......
...@@ -291,8 +291,8 @@ struct scsi_target { ...@@ -291,8 +291,8 @@ struct scsi_target {
unsigned int expecting_lun_change:1; /* A device has reported unsigned int expecting_lun_change:1; /* A device has reported
* a 3F/0E UA, other devices on * a 3F/0E UA, other devices on
* the same target will also. */ * the same target will also. */
/* commands actually active on LLD. protected by host lock. */ /* commands actually active on LLD. */
unsigned int target_busy; atomic_t target_busy;
/* /*
* LLDs should set this in the slave_alloc host template callout. * LLDs should set this in the slave_alloc host template callout.
* If set to zero then there is not limit. * If set to zero then there is not limit.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment