Commit 3e17fda6 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by James Bottomley

[PATCH] rationalize scsi_queue_next & friends

(1)  second arg to scsi_queue_next_request() is only ever non-NULL
     inside scsi_lib.c and only used in the first conditional inside
     that function - ripped out of scsi_queue_next_request() and
     put into a new helper scsi_requeue_command().
(2)  Most remaining callers of are in the form
	scsi_put_command(cmd);
	scsi_queue_next_request(q, NULL);
     add a new helper, scsi_next_command() for them.
(2b) many callers of that still contain a repeated codepath, namely
     everything from scsi_release_request except the final kfree.
     New helper __scsi_release_request() for those.
(3)  All remaining callers loop over the devices of a host and call
     scsi_queue_next_request() on them - new helper
     scsi_run_host_queues().
(4)  scsi_queue_next_request() renamed to scsi_run_queue(), second
     arg is gone and it's static to scsi_lib.c now.
parent 95152a4c
...@@ -176,6 +176,16 @@ struct scsi_request *scsi_allocate_request(struct scsi_device *sdev) ...@@ -176,6 +176,16 @@ struct scsi_request *scsi_allocate_request(struct scsi_device *sdev)
return sreq; return sreq;
} }
void __scsi_release_request(struct scsi_request *sreq)
{
if (likely(sreq->sr_command != NULL)) {
struct scsi_cmnd *cmd = sreq->sr_command;
sreq->sr_command = NULL;
scsi_next_command(cmd);
}
}
/* /*
* Function: scsi_release_request * Function: scsi_release_request
* *
......
...@@ -1412,8 +1412,7 @@ static void scsi_restart_operations(struct Scsi_Host *shost) ...@@ -1412,8 +1412,7 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
* now that error recovery is done, we will need to ensure that these * now that error recovery is done, we will need to ensure that these
* requests are started. * requests are started.
*/ */
list_for_each_entry(sdev, &shost->my_devices, siblings) scsi_run_host_queues(shost);
blk_run_queue(sdev->request_queue);
} }
/** /**
...@@ -1649,7 +1648,6 @@ scsi_reset_provider(struct scsi_device *dev, int flag) ...@@ -1649,7 +1648,6 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
struct scsi_cmnd *scmd = scsi_get_command(dev, GFP_KERNEL); struct scsi_cmnd *scmd = scsi_get_command(dev, GFP_KERNEL);
struct request req; struct request req;
int rtn; int rtn;
struct request_queue *q;
scmd->request = &req; scmd->request = &req;
memset(&scmd->eh_timeout, 0, sizeof(scmd->eh_timeout)); memset(&scmd->eh_timeout, 0, sizeof(scmd->eh_timeout));
...@@ -1701,8 +1699,6 @@ scsi_reset_provider(struct scsi_device *dev, int flag) ...@@ -1701,8 +1699,6 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
} }
scsi_delete_timer(scmd); scsi_delete_timer(scmd);
q = scmd->device->request_queue; scsi_next_command(scmd);
scsi_put_command(scmd);
scsi_queue_next_request(q, NULL);
return rtn; return rtn;
} }
...@@ -181,19 +181,12 @@ void scsi_do_req(struct scsi_request *sreq, const void *cmnd, ...@@ -181,19 +181,12 @@ void scsi_do_req(struct scsi_request *sreq, const void *cmnd,
void (*done)(struct scsi_cmnd *), void (*done)(struct scsi_cmnd *),
int timeout, int retries) int timeout, int retries)
{ {
struct request_queue *q;
/* /*
* If the upper level driver is reusing these things, then * If the upper level driver is reusing these things, then
* we should release the low-level block now. Another one will * we should release the low-level block now. Another one will
* be allocated later when this request is getting queued. * be allocated later when this request is getting queued.
*/ */
if (sreq->sr_command) { __scsi_release_request(sreq);
q = sreq->sr_command->device->request_queue;
scsi_put_command(sreq->sr_command);
sreq->sr_command = NULL;
scsi_queue_next_request(q, NULL);
}
/* /*
* Our own function scsi_done (which marks the host as not busy, * Our own function scsi_done (which marks the host as not busy,
...@@ -239,7 +232,6 @@ static void scsi_wait_done(struct scsi_cmnd *cmd) ...@@ -239,7 +232,6 @@ static void scsi_wait_done(struct scsi_cmnd *cmd)
void scsi_wait_req(struct scsi_request *sreq, const void *cmnd, void *buffer, void scsi_wait_req(struct scsi_request *sreq, const void *cmnd, void *buffer,
unsigned bufflen, int timeout, int retries) unsigned bufflen, int timeout, int retries)
{ {
struct request_queue *q;
DECLARE_COMPLETION(wait); DECLARE_COMPLETION(wait);
sreq->sr_request->waiting = &wait; sreq->sr_request->waiting = &wait;
...@@ -250,12 +242,7 @@ void scsi_wait_req(struct scsi_request *sreq, const void *cmnd, void *buffer, ...@@ -250,12 +242,7 @@ void scsi_wait_req(struct scsi_request *sreq, const void *cmnd, void *buffer,
wait_for_completion(&wait); wait_for_completion(&wait);
sreq->sr_request->waiting = NULL; sreq->sr_request->waiting = NULL;
if (sreq->sr_command) { __scsi_release_request(sreq);
q = sreq->sr_command->device->request_queue;
scsi_put_command(sreq->sr_command);
scsi_queue_next_request(q, NULL);
sreq->sr_command = NULL;
}
} }
/* /*
...@@ -370,77 +357,26 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev) ...@@ -370,77 +357,26 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev)
} }
/* /*
* Function: scsi_queue_next_request() * Function: scsi_run_queue()
* *
* Purpose: Handle post-processing of completed commands. * Purpose: Select a proper request queue to serve next
* *
* Arguments: cmd - command that may need to be requeued. * Arguments: q - last request's queue
* *
* Returns: Nothing * Returns: Nothing
* *
* Notes: After command completion, there may be blocks left * Notes: The previous command was completely finished, start
* over which weren't finished by the previous command * a new one if possible.
* this can be for a number of reasons - the main one is */
* that a medium error occurred, and the sectors after static void scsi_run_queue(struct request_queue *q)
* the bad block need to be re-read.
*
* If cmd is NULL, it means that the previous command
* was completely finished, and we should simply start
* a new command, if possible.
*
* This is where a lot of special case code has begun to
* accumulate. It doesn't really affect readability or
* anything, but it might be considered architecturally
* inelegant. If more of these special cases start to
* accumulate, I am thinking along the lines of implementing
* an atexit() like technology that gets run when commands
* complete. I am not convinced that it is worth the
* added overhead, however. Right now as things stand,
* there are simple conditional checks, and most hosts
* would skip past.
*
* Another possible solution would be to tailor different
* handler functions, sort of like what we did in scsi_merge.c.
* This is probably a better solution, but the number of different
* permutations grows as 2**N, and if too many more special cases
* get added, we start to get screwed.
*/
void scsi_queue_next_request(request_queue_t *q, struct scsi_cmnd *cmd)
{ {
struct scsi_device *sdev; struct scsi_device *sdev = q->queuedata;
struct Scsi_Host *shost; struct Scsi_Host *shost = sdev->host;
unsigned long flags; unsigned long flags;
if (cmd != NULL) {
/*
* For some reason, we are not done with this request.
* This happens for I/O errors in the middle of the request,
* in which case we need to request the blocks that come after
* the bad sector.
*/
spin_lock_irqsave(q->queue_lock, flags);
cmd->request->special = cmd;
if (blk_rq_tagged(cmd->request))
blk_queue_end_tag(q, cmd->request);
/*
* set REQ_SPECIAL - we have a command
* clear REQ_DONTPREP - we assume the sg table has been
* nuked so we need to set it up again.
*/
cmd->request->flags |= REQ_SPECIAL;
cmd->request->flags &= ~REQ_DONTPREP;
__elv_add_request(q, cmd->request, 0, 0);
spin_unlock_irqrestore(q->queue_lock, flags);
}
sdev = q->queuedata;
if (sdev->single_lun) if (sdev->single_lun)
scsi_single_lun_run(sdev); scsi_single_lun_run(sdev);
shost = sdev->host;
spin_lock_irqsave(shost->host_lock, flags); spin_lock_irqsave(shost->host_lock, flags);
while (!list_empty(&shost->starved_list) && while (!list_empty(&shost->starved_list) &&
!shost->host_blocked && !shost->host_self_blocked && !shost->host_blocked && !shost->host_self_blocked &&
...@@ -477,6 +413,61 @@ void scsi_queue_next_request(request_queue_t *q, struct scsi_cmnd *cmd) ...@@ -477,6 +413,61 @@ void scsi_queue_next_request(request_queue_t *q, struct scsi_cmnd *cmd)
blk_run_queue(q); blk_run_queue(q);
} }
/*
* Function: scsi_requeue_command()
*
* Purpose: Handle post-processing of completed commands.
*
* Arguments: q - queue to operate on
* cmd - command that may need to be requeued.
*
* Returns: Nothing
*
* Notes: After command completion, there may be blocks left
* over which weren't finished by the previous command
* this can be for a number of reasons - the main one is
* I/O errors in the middle of the request, in which case
* we need to request the blocks that come after the bad
* sector.
*/
static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
{
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
cmd->request->special = cmd;
if (blk_rq_tagged(cmd->request))
blk_queue_end_tag(q, cmd->request);
/*
* set REQ_SPECIAL - we have a command
* clear REQ_DONTPREP - we assume the sg table has been
* nuked so we need to set it up again.
*/
cmd->request->flags |= REQ_SPECIAL;
cmd->request->flags &= ~REQ_DONTPREP;
__elv_add_request(q, cmd->request, 0, 0);
spin_unlock_irqrestore(q->queue_lock, flags);
scsi_run_queue(q);
}
void scsi_next_command(struct scsi_cmnd *cmd)
{
struct request_queue *q = cmd->device->request_queue;
scsi_put_command(cmd);
scsi_run_queue(q);
}
void scsi_run_host_queues(struct Scsi_Host *shost)
{
struct scsi_device *sdev;
list_for_each_entry(sdev, &shost->my_devices, siblings)
scsi_run_queue(sdev->request_queue);
}
/* /*
* Function: scsi_end_request() * Function: scsi_end_request()
* *
...@@ -517,7 +508,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate, ...@@ -517,7 +508,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
* Bleah. Leftovers again. Stick the leftovers in * Bleah. Leftovers again. Stick the leftovers in
* the front of the queue, and goose the queue again. * the front of the queue, and goose the queue again.
*/ */
scsi_queue_next_request(q, cmd); scsi_requeue_command(q, cmd);
} }
return cmd; return cmd;
} }
...@@ -534,8 +525,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate, ...@@ -534,8 +525,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
* This will goose the queue request function at the end, so we don't * This will goose the queue request function at the end, so we don't
* need to worry about launching another command. * need to worry about launching another command.
*/ */
scsi_put_command(cmd); scsi_next_command(cmd);
scsi_queue_next_request(q, NULL);
return NULL; return NULL;
} }
...@@ -661,6 +651,18 @@ static struct Scsi_Device_Template *scsi_get_request_dev(struct request *req) ...@@ -661,6 +651,18 @@ static struct Scsi_Device_Template *scsi_get_request_dev(struct request *req)
* In other words, if there are no bounce buffers * In other words, if there are no bounce buffers
* (the normal case for most drivers), we don't need * (the normal case for most drivers), we don't need
* the logic to deal with cleaning up afterwards. * the logic to deal with cleaning up afterwards.
*
* We must do one of several things here:
*
* a) Call scsi_end_request. This will finish off the
* specified number of sectors. If we are done, the
* command block will be released, and the queue
* function will be goosed. If we are not done, then
* scsi_end_request will directly goose the queue.
*
* b) We can just use scsi_requeue_command() here. This would
* be used if we just wanted to retry, for example.
*
*/ */
void scsi_io_completion(struct scsi_cmnd *cmd, int good_sectors, void scsi_io_completion(struct scsi_cmnd *cmd, int good_sectors,
int block_sectors) int block_sectors)
...@@ -671,19 +673,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, int good_sectors, ...@@ -671,19 +673,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, int good_sectors,
struct request *req = cmd->request; struct request *req = cmd->request;
int clear_errors = 1; int clear_errors = 1;
/*
* We must do one of several things here:
*
* Call scsi_end_request. This will finish off the specified
* number of sectors. If we are done, the command block will
* be released, and the queue function will be goosed. If we
* are not done, then scsi_end_request will directly goose
* the queue.
*
* We can just use scsi_queue_next_request() here. This
* would be used if we just wanted to retry, for example.
*
*/
/* /*
* Free up any indirection buffers we allocated for DMA purposes. * Free up any indirection buffers we allocated for DMA purposes.
* For the case of a READ, we need to copy the data out of the * For the case of a READ, we need to copy the data out of the
...@@ -783,7 +772,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, int good_sectors, ...@@ -783,7 +772,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, int good_sectors,
*/ */
if (cmd->sense_buffer[12] == 0x04 && if (cmd->sense_buffer[12] == 0x04 &&
cmd->sense_buffer[13] == 0x01) { cmd->sense_buffer[13] == 0x01) {
scsi_queue_next_request(q, cmd); scsi_requeue_command(q, cmd);
return; return;
} }
if ((cmd->sense_buffer[2] & 0xf) == UNIT_ATTENTION) { if ((cmd->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
...@@ -802,7 +791,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, int good_sectors, ...@@ -802,7 +791,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, int good_sectors,
* media change, so we just retry the * media change, so we just retry the
* request and see what happens. * request and see what happens.
*/ */
scsi_queue_next_request(q, cmd); scsi_requeue_command(q, cmd);
return; return;
} }
} }
...@@ -822,7 +811,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, int good_sectors, ...@@ -822,7 +811,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, int good_sectors,
* This will cause a retry with a 6-byte * This will cause a retry with a 6-byte
* command. * command.
*/ */
scsi_queue_next_request(q, cmd); scsi_requeue_command(q, cmd);
result = 0; result = 0;
} else { } else {
cmd = scsi_end_request(cmd, 0, this_count, 1); cmd = scsi_end_request(cmd, 0, this_count, 1);
...@@ -854,7 +843,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, int good_sectors, ...@@ -854,7 +843,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, int good_sectors,
* recovery reasons. Just retry the request * recovery reasons. Just retry the request
* and see what happens. * and see what happens.
*/ */
scsi_queue_next_request(q, cmd); scsi_requeue_command(q, cmd);
return; return;
} }
if (result) { if (result) {
...@@ -1320,15 +1309,8 @@ void scsi_block_requests(struct Scsi_Host *shost) ...@@ -1320,15 +1309,8 @@ void scsi_block_requests(struct Scsi_Host *shost)
*/ */
void scsi_unblock_requests(struct Scsi_Host *shost) void scsi_unblock_requests(struct Scsi_Host *shost)
{ {
struct scsi_device *sdev;
shost->host_self_blocked = 0; shost->host_self_blocked = 0;
scsi_run_host_queues(shost);
/*
* Now that we are unblocked, try to start the queues.
*/
list_for_each_entry(sdev, &shost->my_devices, siblings)
scsi_queue_next_request(sdev->request_queue, NULL);
} }
/* /*
......
...@@ -73,6 +73,7 @@ extern int scsi_get_device_flags(unsigned char *vendor, unsigned char *model); ...@@ -73,6 +73,7 @@ extern int scsi_get_device_flags(unsigned char *vendor, unsigned char *model);
extern int scsi_insert_special_req(struct scsi_request *sreq, int); extern int scsi_insert_special_req(struct scsi_request *sreq, int);
extern void scsi_init_cmd_from_req(struct scsi_cmnd *cmd, extern void scsi_init_cmd_from_req(struct scsi_cmnd *cmd,
struct scsi_request *sreq); struct scsi_request *sreq);
extern void __scsi_release_request(struct scsi_request *sreq);
/* scsi_error.c */ /* scsi_error.c */
extern void scsi_times_out(struct scsi_cmnd *cmd); extern void scsi_times_out(struct scsi_cmnd *cmd);
...@@ -84,8 +85,8 @@ extern int scsi_eh_scmd_add(struct scsi_cmnd *, int); ...@@ -84,8 +85,8 @@ extern int scsi_eh_scmd_add(struct scsi_cmnd *, int);
extern int scsi_maybe_unblock_host(struct scsi_device *sdev); extern int scsi_maybe_unblock_host(struct scsi_device *sdev);
extern void scsi_setup_cmd_retry(struct scsi_cmnd *cmd); extern void scsi_setup_cmd_retry(struct scsi_cmnd *cmd);
extern int scsi_queue_insert(struct scsi_cmnd *cmd, int reason); extern int scsi_queue_insert(struct scsi_cmnd *cmd, int reason);
extern void scsi_queue_next_request(struct request_queue *q, extern void scsi_next_command(struct scsi_cmnd *cmd);
struct scsi_cmnd *cmd); extern void scsi_run_host_queues(struct Scsi_Host *shost);
extern struct request_queue *scsi_alloc_queue(struct scsi_device *sdev); extern struct request_queue *scsi_alloc_queue(struct scsi_device *sdev);
extern void scsi_free_queue(struct request_queue *q); extern void scsi_free_queue(struct request_queue *q);
extern int scsi_init_queue(void); extern int scsi_init_queue(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment