Commit 5ee62ae2 authored by Christoph Hellwig's avatar Christoph Hellwig

[PATCH] scsi_requeuest_fn

Okay, when doing some other stuff I looked over this one, and it's
a bit confusing to read:

 - using a goto completed where a simple break would be sufficient
 - using for (;;) for a perfectly fine while loop
 - ...

but what's more interesting is that the spinlock handling in here,
when we switch from sdev_lock/queue_lock to host_lock we
do a spin_unlock_irq followed by a spin_lock_irqsave - but we
we just enabled interrupts so the save isn't nessecary at all, even
more we can just do spin_unlock/spin_lock when keeping them
disabled.  Also we drop host_lock in the middle of this function,
just to reacquire it a tad later in scsi_dispatch_cmd, but fixing
that need a bit more thinking as there's another caller for
scsi_dispatch_cmd.
parent 22aab371
...@@ -1141,66 +1141,61 @@ static inline int scsi_host_queue_ready(struct request_queue *q, ...@@ -1141,66 +1141,61 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
* *
* Lock status: IO request lock assumed to be held when called. * Lock status: IO request lock assumed to be held when called.
*/ */
static void scsi_request_fn(request_queue_t *q) static void scsi_request_fn(struct request_queue *q)
{ {
struct scsi_device *sdev = q->queuedata; struct scsi_device *sdev = q->queuedata;
struct Scsi_Host *shost = sdev->host; struct Scsi_Host *shost = sdev->host;
struct scsi_cmnd *cmd; struct scsi_cmnd *cmd;
struct request *req; struct request *req;
unsigned long flags;
/* /*
* To start with, we keep looping until the queue is empty, or until * To start with, we keep looping until the queue is empty, or until
* the host is no longer able to accept any more requests. * the host is no longer able to accept any more requests.
*/ */
for (;;) { while (!blk_queue_plugged(q)) {
if (blk_queue_plugged(q))
goto completed;
/* /*
* get next queueable request. We do this early to make sure * get next queueable request. We do this early to make sure
* that the request is fully prepared even if we cannot * that the request is fully prepared even if we cannot
* accept it. * accept it.
*/ */
req = elv_next_request(q); req = elv_next_request(q);
if (!req || !scsi_dev_queue_ready(q, sdev))
if (!req) break;
goto completed;
if (!scsi_dev_queue_ready(q, sdev))
goto completed;
/* /*
* Remove the request from the request list. * Remove the request from the request list.
*/ */
if (!(blk_queue_tagged(q) && (blk_queue_start_tag(q, req) == 0))) if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
blkdev_dequeue_request(req); blkdev_dequeue_request(req);
sdev->device_busy++; sdev->device_busy++;
spin_unlock_irq(q->queue_lock);
spin_lock_irqsave(shost->host_lock, flags); spin_unlock(q->queue_lock);
if (!scsi_host_queue_ready(q, shost, sdev)) spin_lock(shost->host_lock);
goto host_lock_held;
if (!scsi_host_queue_ready(q, shost, sdev))
goto not_ready;
if (sdev->single_lun) { if (sdev->single_lun) {
if (sdev->sdev_target->starget_sdev_user && if (sdev->sdev_target->starget_sdev_user &&
(sdev->sdev_target->starget_sdev_user != sdev)) sdev->sdev_target->starget_sdev_user != sdev)
goto host_lock_held; goto not_ready;
else
sdev->sdev_target->starget_sdev_user = sdev; sdev->sdev_target->starget_sdev_user = sdev;
} }
shost->host_busy++; shost->host_busy++;
spin_unlock_irqrestore(shost->host_lock, flags);
cmd = req->special;
/* /*
* Should be impossible for a correctly prepared request * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
* please mail the stack trace to linux-scsi@vger.kernel.org * take the lock again.
*/ */
BUG_ON(!cmd); spin_unlock_irq(shost->host_lock);
cmd = req->special;
if (unlikely(cmd == NULL)) {
printk(KERN_CRIT "impossible request in %s.\n"
"please mail a stack trace to "
"linux-scsi@vger.kernel.org",
__FUNCTION__);
BUG();
}
/* /*
* Finally, initialize any error handling parameters, and set up * Finally, initialize any error handling parameters, and set up
...@@ -1212,18 +1207,14 @@ static void scsi_request_fn(request_queue_t *q) ...@@ -1212,18 +1207,14 @@ static void scsi_request_fn(request_queue_t *q)
* Dispatch the command to the low-level driver. * Dispatch the command to the low-level driver.
*/ */
scsi_dispatch_cmd(cmd); scsi_dispatch_cmd(cmd);
/*
* Now we need to grab the lock again. We are about to mess
* with the request queue and try to find another command.
*/
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
} }
completed:
return; return;
host_lock_held: not_ready:
spin_unlock_irqrestore(shost->host_lock, flags); spin_unlock_irq(shost->host_lock);
/* /*
* lock q, handle tag, requeue req, and decrement device_busy. We * lock q, handle tag, requeue req, and decrement device_busy. We
* must return with queue_lock held. * must return with queue_lock held.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment