Commit bee139b3 authored by James Bottomley's avatar James Bottomley

Merge raven.il.steeleye.com:/home/jejb/BK/scsi-aic7xxx-2.5

into raven.il.steeleye.com:/home/jejb/BK/scsi-misc-2.5
parents e1da10d0 f4760183
......@@ -383,6 +383,7 @@ struct Scsi_Host * scsi_register(Scsi_Host_Template *shost_tp, int xtr_bytes)
scsi_assign_lock(shost, &shost->default_lock);
INIT_LIST_HEAD(&shost->my_devices);
INIT_LIST_HEAD(&shost->eh_cmd_q);
INIT_LIST_HEAD(&shost->starved_list);
init_waitqueue_head(&shost->host_wait);
shost->dma_channel = 0xff;
......@@ -619,7 +620,6 @@ void scsi_host_busy_dec_and_test(struct Scsi_Host *shost, Scsi_Device *sdev)
spin_lock_irqsave(shost->host_lock, flags);
shost->host_busy--;
sdev->device_busy--;
if (shost->in_recovery && shost->host_failed &&
(shost->host_busy == shost->host_failed))
{
......
......@@ -380,6 +380,7 @@ struct Scsi_Host
struct scsi_host_cmd_pool *cmd_pool;
spinlock_t free_list_lock;
struct list_head free_list; /* backup store of cmd structs */
struct list_head starved_list;
spinlock_t default_lock;
spinlock_t *host_lock;
......@@ -470,12 +471,6 @@ struct Scsi_Host
*/
unsigned reverse_ordering:1;
/*
* Indicates that one or more devices on this host were starved, and
* when the device becomes less busy that we need to feed them.
*/
unsigned some_device_starved:1;
/*
* Host has rejected a command because it was busy.
*/
......
......@@ -447,8 +447,6 @@ int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt)
host = SCpnt->device->host;
ASSERT_LOCK(host->host_lock, 0);
/* Assign a unique nonzero serial_number. */
if (++serial_number == 0)
serial_number = 1;
......@@ -574,8 +572,6 @@ void scsi_init_cmd_from_req(Scsi_Cmnd * SCpnt, Scsi_Request * SRpnt)
{
struct Scsi_Host *host = SCpnt->device->host;
ASSERT_LOCK(host->host_lock, 0);
SCpnt->owner = SCSI_OWNER_MIDLEVEL;
SRpnt->sr_command = SCpnt;
......@@ -819,12 +815,11 @@ void scsi_finish_command(Scsi_Cmnd * SCpnt)
struct Scsi_Host *host;
Scsi_Device *device;
Scsi_Request * SRpnt;
unsigned int flags;
host = SCpnt->device->host;
device = SCpnt->device;
ASSERT_LOCK(host->host_lock, 0);
/*
* We need to protect the decrement, as otherwise a race condition
* would exist. Fiddling with SCpnt isn't a problem as the
......@@ -833,6 +828,9 @@ void scsi_finish_command(Scsi_Cmnd * SCpnt)
* shared.
*/
scsi_host_busy_dec_and_test(host, device);
spin_lock_irqsave(SCpnt->device->request_queue->queue_lock, flags);
SCpnt->device->device_busy--;
spin_unlock_irqrestore(SCpnt->device->request_queue->queue_lock, flags);
/*
* Clear the flags which say that the device/host is no longer
......
......@@ -417,7 +417,8 @@ extern void scsi_setup_cmd_retry(Scsi_Cmnd *SCpnt);
extern void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors,
int block_sectors);
extern int scsi_queue_insert(struct scsi_cmnd *cmd, int reason);
extern request_queue_t *scsi_alloc_queue(struct Scsi_Host *shost);
extern void scsi_queue_next_request(request_queue_t *q, struct scsi_cmnd *cmd);
extern request_queue_t *scsi_alloc_queue(struct scsi_device *sdev);
extern void scsi_free_queue(request_queue_t *q);
extern int scsi_init_queue(void);
extern void scsi_exit_queue(void);
......@@ -530,6 +531,16 @@ struct scsi_dev_info_list {
extern struct list_head scsi_dev_info_list;
extern int scsi_dev_info_list_add_str(char *);
/*
* scsi_target: representation of a scsi target, for now, this is only
* used for single_lun devices. If no one has active IO to the target,
* starget_sdev_user is NULL, else it points to the active sdev.
*/
struct scsi_target {
struct scsi_device *starget_sdev_user;
unsigned int starget_refcnt;
};
/*
* The scsi_device struct contains what we know about each given scsi
* device.
......@@ -554,8 +565,10 @@ struct scsi_device {
struct Scsi_Host *host;
request_queue_t *request_queue;
volatile unsigned short device_busy; /* commands actually active on low-level */
spinlock_t sdev_lock; /* also the request queue_lock */
spinlock_t list_lock;
struct list_head cmd_list; /* queue of in use SCSI Command structures */
struct list_head starved_entry;
Scsi_Cmnd *current_cmnd; /* currently active command */
unsigned short queue_depth; /* How deep of a queue we want */
unsigned short last_queue_full_depth; /* These two are used by */
......@@ -586,6 +599,7 @@ struct scsi_device {
unsigned char current_tag; /* current tag */
// unsigned char sync_min_period; /* Not less than this period */
// unsigned char sync_max_offset; /* Not greater than this offset */
struct scsi_target *sdev_target; /* used only for single_lun */
unsigned online:1;
unsigned writeable:1;
......@@ -616,8 +630,6 @@ struct scsi_device {
* because we did a bus reset. */
unsigned ten:1; /* support ten byte read / write */
unsigned remap:1; /* support remapping */
unsigned starved:1; /* unable to process commands because
host busy */
// unsigned sync:1; /* Sync transfer state, managed by host */
// unsigned wide:1; /* WIDE transfer state, managed by host */
......
......@@ -431,8 +431,6 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout)
unsigned long flags;
int rtn = SUCCESS;
ASSERT_LOCK(host->host_lock, 0);
/*
* we will use a queued command if possible, otherwise we will
* emulate the queuing and calling of completion function ourselves.
......@@ -1405,8 +1403,6 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
struct scsi_device *sdev;
unsigned long flags;
ASSERT_LOCK(shost->host_lock, 0);
/*
* If the door was locked, we need to insert a door lock request
* onto the head of the SCSI request queue for the device. There
......@@ -1434,18 +1430,11 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
* now that error recovery is done, we will need to ensure that these
* requests are started.
*/
spin_lock_irqsave(shost->host_lock, flags);
list_for_each_entry(sdev, &shost->my_devices, siblings) {
if ((shost->can_queue > 0 &&
(shost->host_busy >= shost->can_queue))
|| (shost->host_blocked)
|| (shost->host_self_blocked)) {
break;
}
spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
__blk_run_queue(sdev->request_queue);
spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
}
spin_unlock_irqrestore(shost->host_lock, flags);
}
/**
......@@ -1681,6 +1670,7 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
struct scsi_cmnd *scmd = scsi_get_command(dev, GFP_KERNEL);
struct request req;
int rtn;
struct request_queue *q;
scmd->request = &req;
memset(&scmd->eh_timeout, 0, sizeof(scmd->eh_timeout));
......@@ -1735,6 +1725,8 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
}
scsi_delete_timer(scmd);
q = scmd->device->request_queue;
scsi_put_command(scmd);
scsi_queue_next_request(q, NULL);
return rtn;
}
......@@ -92,6 +92,7 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
{
struct Scsi_Host *host = cmd->device->host;
struct scsi_device *device = cmd->device;
unsigned long flags;
SCSI_LOG_MLQUEUE(1,
printk("Inserting command %p into mlqueue\n", cmd));
......@@ -130,6 +131,9 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
* Decrement the counters, since these commands are no longer
* active on the host/device.
*/
spin_lock_irqsave(device->request_queue->queue_lock, flags);
device->device_busy--;
spin_unlock_irqrestore(device->request_queue->queue_lock, flags);
scsi_host_busy_dec_and_test(host, device);
/*
......@@ -174,14 +178,18 @@ void scsi_do_req(struct scsi_request *sreq, const void *cmnd,
void (*done)(struct scsi_cmnd *),
int timeout, int retries)
{
struct request_queue *q;
/*
* If the upper level driver is reusing these things, then
* we should release the low-level block now. Another one will
* be allocated later when this request is getting queued.
*/
if (sreq->sr_command) {
q = sreq->sr_command->device->request_queue;
scsi_put_command(sreq->sr_command);
sreq->sr_command = NULL;
scsi_queue_next_request(q, NULL);
}
/*
......@@ -228,6 +236,7 @@ static void scsi_wait_done(struct scsi_cmnd *cmd)
void scsi_wait_req(struct scsi_request *sreq, const void *cmnd, void *buffer,
unsigned bufflen, int timeout, int retries)
{
struct request_queue *q;
DECLARE_COMPLETION(wait);
sreq->sr_request->waiting = &wait;
......@@ -239,7 +248,9 @@ void scsi_wait_req(struct scsi_request *sreq, const void *cmnd, void *buffer,
sreq->sr_request->waiting = NULL;
if (sreq->sr_command) {
q = sreq->sr_command->device->request_queue;
scsi_put_command(sreq->sr_command);
scsi_queue_next_request(q, NULL);
sreq->sr_command = NULL;
}
}
......@@ -315,6 +326,53 @@ void scsi_setup_cmd_retry(struct scsi_cmnd *cmd)
cmd->underflow = cmd->old_underflow;
}
/*
* Called for single_lun devices on IO completion. Clear starget_sdev_user,
* and call __blk_run_queue for all the scsi_devices on the target -
* including current_sdev first.
*
* Called with *no* scsi locks held.
*/
static void scsi_single_lun_run(struct scsi_device *current_sdev)
{
struct scsi_device *sdev;
unsigned int flags, flags2;
spin_lock_irqsave(current_sdev->host->host_lock, flags);
WARN_ON(!current_sdev->sdev_target->starget_sdev_user);
current_sdev->sdev_target->starget_sdev_user = NULL;
spin_unlock_irqrestore(current_sdev->host->host_lock, flags);
/*
* Call __blk_run_queue for all LUNs on the target, starting with
* current_sdev. We race with others (to set starget_sdev_user),
* but in most cases, we will be first. Ideally, each LU on the
* target would get some limited time or requests on the target.
*/
spin_lock_irqsave(current_sdev->request_queue->queue_lock, flags2);
__blk_run_queue(current_sdev->request_queue);
spin_unlock_irqrestore(current_sdev->request_queue->queue_lock, flags2);
spin_lock_irqsave(current_sdev->host->host_lock, flags);
if (current_sdev->sdev_target->starget_sdev_user) {
/*
* After unlock, this races with anyone clearing
* starget_sdev_user, but we (should) always enter this
* function again, avoiding any problems.
*/
spin_unlock_irqrestore(current_sdev->host->host_lock, flags);
return;
}
spin_unlock_irqrestore(current_sdev->host->host_lock, flags);
list_for_each_entry(sdev, &current_sdev->same_target_siblings,
same_target_siblings) {
spin_lock_irqsave(sdev->request_queue->queue_lock, flags2);
__blk_run_queue(sdev->request_queue);
spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags2);
}
}
/*
* Function: scsi_queue_next_request()
*
......@@ -351,16 +409,12 @@ void scsi_setup_cmd_retry(struct scsi_cmnd *cmd)
* permutations grows as 2**N, and if too many more special cases
* get added, we start to get screwed.
*/
static void scsi_queue_next_request(request_queue_t *q, struct scsi_cmnd *cmd)
void scsi_queue_next_request(request_queue_t *q, struct scsi_cmnd *cmd)
{
struct scsi_device *sdev, *sdev2;
struct scsi_device *sdev;
struct Scsi_Host *shost;
unsigned long flags;
int all_clear;
ASSERT_LOCK(q->queue_lock, 0);
spin_lock_irqsave(q->queue_lock, flags);
if (cmd != NULL) {
/*
......@@ -369,6 +423,7 @@ static void scsi_queue_next_request(request_queue_t *q, struct scsi_cmnd *cmd)
* in which case we need to request the blocks that come after
* the bad sector.
*/
spin_lock_irqsave(q->queue_lock, flags);
cmd->request->special = cmd;
if (blk_rq_tagged(cmd->request))
blk_queue_end_tag(q, cmd->request);
......@@ -381,62 +436,52 @@ static void scsi_queue_next_request(request_queue_t *q, struct scsi_cmnd *cmd)
cmd->request->flags |= REQ_SPECIAL;
cmd->request->flags &= ~REQ_DONTPREP;
__elv_add_request(q, cmd->request, 0, 0);
spin_unlock_irqrestore(q->queue_lock, flags);
}
/*
* Just hit the requeue function for the queue.
*/
__blk_run_queue(q);
sdev = q->queuedata;
shost = sdev->host;
/*
* If this is a single-lun device, and we are currently finished
* with this device, then see if we need to get another device
* started. FIXME(eric) - if this function gets too cluttered
* with special case code, then spin off separate versions and
* use function pointers to pick the right one.
*/
if (sdev->single_lun && sdev->device_busy == 0 &&
!shost->host_blocked && !shost->host_self_blocked &&
!((shost->can_queue > 0) && (shost->host_busy >= shost->can_queue))
&& elv_queue_empty(q)) {
list_for_each_entry(sdev2, &sdev->same_target_siblings,
same_target_siblings) {
if (!sdev2->device_blocked &&
!elv_queue_empty(sdev2->request_queue)) {
__blk_run_queue(sdev2->request_queue);
break;
}
}
}
if (sdev->single_lun)
scsi_single_lun_run(sdev);
/*
* Now see whether there are other devices on the bus which
* might be starved. If so, hit the request function. If we
* don't find any, then it is safe to reset the flag. If we
* find any device that it is starved, it isn't safe to reset the
* flag as the queue function releases the lock and thus some
* other device might have become starved along the way.
*/
all_clear = 1;
if (shost->some_device_starved) {
list_for_each_entry(sdev, &shost->my_devices, siblings) {
if (shost->can_queue > 0 &&
shost->host_busy >= shost->can_queue)
break;
if (shost->host_blocked || shost->host_self_blocked)
break;
if (sdev->device_blocked || !sdev->starved)
continue;
__blk_run_queue(sdev->request_queue);
all_clear = 0;
}
shost = sdev->host;
spin_lock_irqsave(shost->host_lock, flags);
while (!list_empty(&shost->starved_list) &&
!shost->host_blocked && !shost->host_self_blocked &&
!((shost->can_queue > 0) &&
(shost->host_busy >= shost->can_queue))) {
/*
* As long as shost is accepting commands and we have
* starved queues, call __blk_run_queue. scsi_request_fn
* drops the queue_lock and can add us back to the
* starved_list.
*
* host_lock protects the starved_list and starved_entry.
* scsi_request_fn must get the host_lock before checking
* or modifying starved_list or starved_entry.
*/
sdev = list_entry(shost->starved_list.next,
struct scsi_device, starved_entry);
list_del_init(&sdev->starved_entry);
spin_unlock_irqrestore(shost->host_lock, flags);
spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
__blk_run_queue(sdev->request_queue);
spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
if (sdev == NULL && all_clear)
shost->some_device_starved = 0;
spin_lock_irqsave(shost->host_lock, flags);
if (unlikely(!list_empty(&sdev->starved_entry)))
/*
* sdev lost a race, and was put back on the
* starved list. This is unlikely but without this
* in theory we could loop forever.
*/
break;
}
spin_unlock_irqrestore(shost->host_lock, flags);
spin_lock_irqsave(q->queue_lock, flags);
__blk_run_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
......@@ -470,8 +515,6 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
struct request *req = cmd->request;
unsigned long flags;
ASSERT_LOCK(q->queue_lock, 0);
/*
* If there are blocks left over at the end, set up the command
* to queue the remainder of them.
......@@ -569,8 +612,6 @@ static void scsi_release_buffers(struct scsi_cmnd *cmd)
{
struct request *req = cmd->request;
ASSERT_LOCK(cmd->device->host->host_lock, 0);
/*
* Free up any indirection buffers we allocated for DMA purposes.
*/
......@@ -651,8 +692,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, int good_sectors,
* would be used if we just wanted to retry, for example.
*
*/
ASSERT_LOCK(q->queue_lock, 0);
/*
* Free up any indirection buffers we allocated for DMA purposes.
* For the case of a READ, we need to copy the data out of the
......@@ -923,22 +962,6 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
return BLKPREP_KILL;
}
/*
* The target associated with myself can only handle one active command at
* a time. Scan through all of the luns on the same target as myself,
* return 1 if any are active.
*/
static int check_all_luns(struct scsi_device *myself)
{
struct scsi_device *sdev;
list_for_each_entry(sdev, &myself->same_target_siblings,
same_target_siblings)
if (sdev->device_busy)
return 1;
return 0;
}
static int scsi_prep_fn(struct request_queue *q, struct request *req)
{
struct Scsi_Device_Template *sdt;
......@@ -1039,6 +1062,74 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
return BLKPREP_OK;
}
/*
* scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
* return 0.
*
* Called with the queue_lock held.
*/
static inline int scsi_dev_queue_ready(struct request_queue *q,
struct scsi_device *sdev)
{
if (sdev->device_busy >= sdev->queue_depth)
return 0;
if (sdev->device_busy == 0 && sdev->device_blocked) {
/*
* unblock after device_blocked iterates to zero
*/
if (--sdev->device_blocked == 0) {
SCSI_LOG_MLQUEUE(3,
printk("scsi%d (%d:%d) unblocking device at"
" zero depth\n", sdev->host->host_no,
sdev->id, sdev->lun));
} else {
blk_plug_device(q);
return 0;
}
}
if (sdev->device_blocked)
return 0;
return 1;
}
/*
* scsi_host_queue_ready: if we can send requests to shost, return 1 else
* return 0. We must end up running the queue again whenever 0 is
* returned, else IO can hang.
*
* Called with host_lock held.
*/
static inline int scsi_host_queue_ready(struct request_queue *q,
struct Scsi_Host *shost,
struct scsi_device *sdev)
{
if (shost->in_recovery)
return 0;
if (shost->host_busy == 0 && shost->host_blocked) {
/*
* unblock after host_blocked iterates to zero
*/
if (--shost->host_blocked == 0) {
SCSI_LOG_MLQUEUE(3,
printk("scsi%d unblocking host at zero depth\n",
shost->host_no));
} else {
blk_plug_device(q);
return 0;
}
}
if (!list_empty(&sdev->starved_entry))
return 0;
if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
shost->host_blocked || shost->host_self_blocked) {
list_add_tail(&sdev->starved_entry, &shost->starved_list);
return 0;
}
return 1;
}
/*
* Function: scsi_request_fn()
*
......@@ -1056,92 +1147,61 @@ static void scsi_request_fn(request_queue_t *q)
struct Scsi_Host *shost = sdev->host;
struct scsi_cmnd *cmd;
struct request *req;
ASSERT_LOCK(q->queue_lock, 1);
unsigned int flags;
/*
* To start with, we keep looping until the queue is empty, or until
* the host is no longer able to accept any more requests.
*/
for (;;) {
if (blk_queue_plugged(q))
goto completed;
/*
* Check this again - each time we loop through we will have
* released the lock and grabbed it again, so each time
* we need to check to see if the queue is plugged or not.
* get next queueable request. We do this early to make sure
* that the request is fully prepared even if we cannot
* accept it.
*/
if (shost->in_recovery || blk_queue_plugged(q))
return;
if (sdev->device_busy >= sdev->queue_depth)
break;
if (sdev->single_lun && check_all_luns(sdev))
break;
req = elv_next_request(q);
if (shost->host_busy == 0 && shost->host_blocked) {
/* unblock after host_blocked iterates to zero */
if (--shost->host_blocked == 0) {
SCSI_LOG_MLQUEUE(3,
printk("scsi%d unblocking host at zero depth\n",
shost->host_no));
} else {
if (!req) {
/*
* If the device is busy, a returning I/O will
* restart the queue. Otherwise, we have to plug
* the queue
*/
if (sdev->device_busy == 0)
blk_plug_device(q);
break;
}
goto completed;
}
if (sdev->device_busy == 0 && sdev->device_blocked) {
/* unblock after device_blocked iterates to zero */
if (--sdev->device_blocked == 0) {
SCSI_LOG_MLQUEUE(3,
printk("scsi%d (%d:%d) unblocking device at zero depth\n",
shost->host_no, sdev->id, sdev->lun));
} else {
blk_plug_device(q);
break;
}
}
if (!scsi_dev_queue_ready(q, sdev))
goto completed;
/*
* If the device cannot accept another request, then quit.
* Remove the request from the request list.
*/
if (sdev->device_blocked)
break;
if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
shost->host_blocked || shost->host_self_blocked) {
/*
* If we are unable to process any commands at all for
* this device, then we consider it to be starved.
* What this means is that there are no outstanding
* commands for this device and hence we need a
* little help getting it started again
* once the host isn't quite so busy.
*/
if (sdev->device_busy == 0) {
sdev->starved = 1;
shost->some_device_starved = 1;
}
break;
} else
sdev->starved = 0;
if (!(blk_queue_tagged(q) && (blk_queue_start_tag(q, req) == 0)))
blkdev_dequeue_request(req);
/*
* get next queueable request. We do this early to make sure
* that the request is fully prepared even if we cannot
* accept it. If there is no request, we'll detect this
* lower down.
*/
req = elv_next_request(q);
sdev->device_busy++;
spin_unlock_irq(q->queue_lock);
if (!req) {
/* If the device is busy, a returning I/O
* will restart the queue. Otherwise, we have
* to plug the queue */
if(sdev->device_busy == 0)
blk_plug_device(q);
break;
spin_lock_irqsave(shost->host_lock, flags);
if (!scsi_host_queue_ready(q, shost, sdev))
goto host_lock_held;
if (sdev->single_lun) {
if (sdev->sdev_target->starget_sdev_user &&
(sdev->sdev_target->starget_sdev_user != sdev))
goto host_lock_held;
else
sdev->sdev_target->starget_sdev_user = sdev;
}
shost->host_busy++;
spin_unlock_irqrestore(shost->host_lock, flags);
cmd = req->special;
/*
......@@ -1150,25 +1210,6 @@ static void scsi_request_fn(request_queue_t *q)
*/
BUG_ON(!cmd);
/*
* Finally, before we release the lock, we copy the
* request to the command block, and remove the
* request from the request list. Note that we always
* operate on the queue head - there is absolutely no
* reason to search the list, because all of the
* commands in this queue are for the same device.
*/
if (!(blk_queue_tagged(q) && (blk_queue_start_tag(q, req) == 0)))
blkdev_dequeue_request(req);
/*
* Now bump the usage count for both the host and the
* device.
*/
shost->host_busy++;
sdev->device_busy++;
spin_unlock_irq(q->queue_lock);
/*
* Finally, initialize any error handling parameters, and set up
* the timers for timeouts.
......@@ -1186,6 +1227,24 @@ static void scsi_request_fn(request_queue_t *q)
*/
spin_lock_irq(q->queue_lock);
}
completed:
return;
host_lock_held:
spin_unlock_irqrestore(shost->host_lock, flags);
/*
* lock q, handle tag, requeue req, and decrement device_busy. We
* must return with queue_lock held.
*
* Decrementing device_busy without checking it is OK, as all such
* cases (host limits or settings) should run the queue at some
* later time.
*/
spin_lock_irq(q->queue_lock);
if (blk_rq_tagged(req))
blk_queue_end_tag(q, req);
__elv_add_request(q, req, 0, 0);
sdev->device_busy--;
}
u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
......@@ -1207,15 +1266,20 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
return BLK_BOUNCE_HIGH;
}
request_queue_t *scsi_alloc_queue(struct Scsi_Host *shost)
request_queue_t *scsi_alloc_queue(struct scsi_device *sdev)
{
request_queue_t *q;
struct Scsi_Host *shost;
q = kmalloc(sizeof(*q), GFP_ATOMIC);
if (!q)
return NULL;
memset(q, 0, sizeof(*q));
/*
* XXX move host code to scsi_register
*/
shost = sdev->host;
if (!shost->max_sectors) {
/*
* Driver imposes no hard sector transfer limit.
......@@ -1224,7 +1288,7 @@ request_queue_t *scsi_alloc_queue(struct Scsi_Host *shost)
shost->max_sectors = SCSI_DEFAULT_MAX_SECTORS;
}
blk_init_queue(q, scsi_request_fn, shost->host_lock);
blk_init_queue(q, scsi_request_fn, &sdev->sdev_lock);
blk_queue_prep_rq(q, scsi_prep_fn);
blk_queue_max_hw_segments(q, shost->sg_tablesize);
......
......@@ -387,7 +387,7 @@ static void print_inquiry(unsigned char *inq_result)
* Scsi_Device pointer, or NULL on failure.
**/
static struct scsi_device *scsi_alloc_sdev(struct Scsi_Host *shost,
struct request_queue **q, uint channel, uint id, uint lun)
uint channel, uint id, uint lun)
{
struct scsi_device *sdev, *device;
......@@ -407,6 +407,7 @@ static struct scsi_device *scsi_alloc_sdev(struct Scsi_Host *shost,
INIT_LIST_HEAD(&sdev->siblings);
INIT_LIST_HEAD(&sdev->same_target_siblings);
INIT_LIST_HEAD(&sdev->cmd_list);
INIT_LIST_HEAD(&sdev->starved_entry);
spin_lock_init(&sdev->list_lock);
/*
......@@ -421,14 +422,10 @@ static struct scsi_device *scsi_alloc_sdev(struct Scsi_Host *shost,
*/
sdev->borken = 1;
if (!q || *q == NULL) {
sdev->request_queue = scsi_alloc_queue(shost);
if (!sdev->request_queue)
goto out_free_dev;
} else {
sdev->request_queue = *q;
*q = NULL;
}
spin_lock_init(&sdev->sdev_lock);
sdev->request_queue = scsi_alloc_queue(sdev);
if (!sdev->request_queue)
goto out_free_dev;
sdev->request_queue->queuedata = sdev;
scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
......@@ -468,10 +465,7 @@ static struct scsi_device *scsi_alloc_sdev(struct Scsi_Host *shost,
return sdev;
out_free_queue:
if (q && sdev->request_queue) {
*q = sdev->request_queue;
sdev->request_queue = NULL;
} else if (sdev->request_queue)
if (sdev->request_queue)
scsi_free_queue(sdev->request_queue);
out_free_dev:
......@@ -491,6 +485,8 @@ static struct scsi_device *scsi_alloc_sdev(struct Scsi_Host *shost,
**/
static void scsi_free_sdev(struct scsi_device *sdev)
{
unsigned int flags;
list_del(&sdev->siblings);
list_del(&sdev->same_target_siblings);
......@@ -500,6 +496,15 @@ static void scsi_free_sdev(struct scsi_device *sdev)
sdev->host->hostt->slave_destroy(sdev);
if (sdev->inquiry)
kfree(sdev->inquiry);
spin_lock_irqsave(sdev->host->host_lock, flags);
list_del(&sdev->starved_entry);
if (sdev->single_lun) {
sdev->sdev_target->starget_refcnt--;
if (sdev->sdev_target->starget_refcnt == 0)
kfree(sdev->sdev_target);
}
spin_unlock_irqrestore(sdev->host->host_lock, flags);
kfree(sdev);
}
......@@ -1135,6 +1140,10 @@ static void scsi_probe_lun(Scsi_Request *sreq, char *inq_result,
static int scsi_add_lun(Scsi_Device *sdev, Scsi_Request *sreq,
char *inq_result, int *bflags)
{
struct scsi_device *sdev_sibling;
struct scsi_target *starget;
unsigned int flags;
/*
* XXX do not save the inquiry, since it can change underneath us,
* save just vendor/model/rev.
......@@ -1256,10 +1265,38 @@ static int scsi_add_lun(Scsi_Device *sdev, Scsi_Request *sreq,
/*
* If we need to allow I/O to only one of the luns attached to
* this target id at a time, then we set this flag.
* this target id at a time set single_lun, and allocate or modify
* sdev_target.
*/
if (*bflags & BLIST_SINGLELUN)
if (*bflags & BLIST_SINGLELUN) {
sdev->single_lun = 1;
spin_lock_irqsave(sdev->host->host_lock, flags);
starget = NULL;
/*
* Search for an existing target for this sdev.
*/
list_for_each_entry(sdev_sibling, &sdev->same_target_siblings,
same_target_siblings) {
if (sdev_sibling->sdev_target != NULL) {
starget = sdev_sibling->sdev_target;
break;
}
}
if (!starget) {
starget = kmalloc(sizeof(*starget), GFP_ATOMIC);
if (!starget) {
printk(ALLOC_FAILURE_MSG, __FUNCTION__);
spin_unlock_irqrestore(sdev->host->host_lock,
flags);
return SCSI_SCAN_NO_RESPONSE;
}
starget->starget_refcnt = 0;
starget->starget_sdev_user = NULL;
}
starget->starget_refcnt++;
sdev->sdev_target = starget;
spin_unlock_irqrestore(sdev->host->host_lock, flags);
}
/* if the device needs this changing, it may do so in the detect
* function */
......@@ -1288,15 +1325,15 @@ static int scsi_add_lun(Scsi_Device *sdev, Scsi_Request *sreq,
* SCSI_SCAN_LUN_PRESENT: a new Scsi_Device was allocated and initialized
**/
static int scsi_probe_and_add_lun(struct Scsi_Host *host,
struct request_queue **q, uint channel, uint id, uint lun,
int *bflagsp, struct scsi_device **sdevp)
uint channel, uint id, uint lun, int *bflagsp,
struct scsi_device **sdevp)
{
struct scsi_device *sdev;
struct scsi_request *sreq;
unsigned char *result;
int bflags, res = SCSI_SCAN_NO_RESPONSE;
sdev = scsi_alloc_sdev(host, q, channel, id, lun);
sdev = scsi_alloc_sdev(host, channel, id, lun);
if (!sdev)
goto out;
sreq = scsi_allocate_request(sdev);
......@@ -1350,13 +1387,8 @@ static int scsi_probe_and_add_lun(struct Scsi_Host *host,
if (res == SCSI_SCAN_LUN_PRESENT) {
if (sdevp)
*sdevp = sdev;
} else {
if (q) {
*q = sdev->request_queue;
sdev->request_queue = NULL;
}
} else
scsi_free_sdev(sdev);
}
out:
return res;
}
......@@ -1374,9 +1406,8 @@ static int scsi_probe_and_add_lun(struct Scsi_Host *host,
*
* Modifies sdevscan->lun.
**/
static void scsi_sequential_lun_scan(struct Scsi_Host *shost,
struct request_queue **q, uint channel, uint id,
int bflags, int lun0_res, int scsi_level)
static void scsi_sequential_lun_scan(struct Scsi_Host *shost, uint channel,
uint id, int bflags, int lun0_res, int scsi_level)
{
unsigned int sparse_lun, lun, max_dev_lun;
......@@ -1444,7 +1475,7 @@ static void scsi_sequential_lun_scan(struct Scsi_Host *shost,
* sparse_lun.
*/
for (lun = 1; lun < max_dev_lun; ++lun)
if ((scsi_probe_and_add_lun(shost, q, channel, id, lun,
if ((scsi_probe_and_add_lun(shost, channel, id, lun,
NULL, NULL) != SCSI_SCAN_LUN_PRESENT) && !sparse_lun)
return;
}
......@@ -1497,8 +1528,7 @@ static int scsilun_to_int(ScsiLun *scsilun)
* 0: scan completed (or no memory, so further scanning is futile)
* 1: no report lun scan, or not configured
**/
static int scsi_report_lun_scan(Scsi_Device *sdev, struct request_queue **q,
int bflags)
static int scsi_report_lun_scan(Scsi_Device *sdev, int bflags)
{
#ifdef CONFIG_SCSI_REPORT_LUNS
......@@ -1659,8 +1689,8 @@ static int scsi_report_lun_scan(Scsi_Device *sdev, struct request_queue **q,
} else {
int res;
res = scsi_probe_and_add_lun(sdev->host, q,
sdev->channel, sdev->id, lun, NULL, NULL);
res = scsi_probe_and_add_lun(sdev->host, sdev->channel,
sdev->id, lun, NULL, NULL);
if (res == SCSI_SCAN_NO_RESPONSE) {
/*
* Got some results, but now none, abort.
......@@ -1688,8 +1718,7 @@ struct scsi_device *scsi_add_device(struct Scsi_Host *shost,
struct scsi_device *sdev;
int error = -ENODEV, res;
res = scsi_probe_and_add_lun(shost, NULL, channel, id, lun,
NULL, &sdev);
res = scsi_probe_and_add_lun(shost, channel, id, lun, NULL, &sdev);
if (res == SCSI_SCAN_LUN_PRESENT)
error = scsi_attach_device(sdev);
......@@ -1730,8 +1759,8 @@ int scsi_remove_device(struct scsi_device *sdev)
* First try a REPORT LUN scan, if that does not scan the target, do a
* sequential scan of LUNs on the target id.
**/
static void scsi_scan_target(struct Scsi_Host *shost, struct request_queue **q,
unsigned int channel, unsigned int id)
static void scsi_scan_target(struct Scsi_Host *shost, unsigned int channel,
unsigned int id)
{
int bflags = 0;
int res;
......@@ -1747,14 +1776,14 @@ static void scsi_scan_target(struct Scsi_Host *shost, struct request_queue **q,
* Scan LUN 0, if there is some response, scan further. Ideally, we
* would not configure LUN 0 until all LUNs are scanned.
*/
res = scsi_probe_and_add_lun(shost, q, channel, id, 0, &bflags, &sdev);
res = scsi_probe_and_add_lun(shost, channel, id, 0, &bflags, &sdev);
if (res == SCSI_SCAN_LUN_PRESENT) {
if (scsi_report_lun_scan(sdev, q, bflags) != 0)
if (scsi_report_lun_scan(sdev, bflags) != 0)
/*
* The REPORT LUN did not scan the target,
* do a sequential scan.
*/
scsi_sequential_lun_scan(shost, q, channel, id, bflags,
scsi_sequential_lun_scan(shost, channel, id, bflags,
res, sdev->scsi_level);
} else if (res == SCSI_SCAN_TARGET_PRESENT) {
/*
......@@ -1763,7 +1792,7 @@ static void scsi_scan_target(struct Scsi_Host *shost, struct request_queue **q,
* sequential lun scan with a bflags of SPARSELUN and
* a default scsi level of SCSI_2
*/
scsi_sequential_lun_scan(shost, q, channel, id, BLIST_SPARSELUN,
scsi_sequential_lun_scan(shost, channel, id, BLIST_SPARSELUN,
SCSI_SCAN_TARGET_PRESENT, SCSI_2);
}
}
......@@ -1778,7 +1807,6 @@ static void scsi_scan_target(struct Scsi_Host *shost, struct request_queue **q,
**/
void scsi_scan_host(struct Scsi_Host *shost)
{
struct request_queue *q = NULL;
uint channel, id, order_id;
/*
......@@ -1803,12 +1831,9 @@ void scsi_scan_host(struct Scsi_Host *shost)
order_id = shost->max_id - id - 1;
else
order_id = id;
scsi_scan_target(shost, &q, channel, order_id);
scsi_scan_target(shost, channel, order_id);
}
}
if (q)
scsi_free_queue(q);
}
void scsi_forget_host(struct Scsi_Host *shost)
......@@ -1847,7 +1872,7 @@ struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost)
{
struct scsi_device *sdev;
sdev = scsi_alloc_sdev(shost, NULL, 0, shost->this_id, 0);
sdev = scsi_alloc_sdev(shost, 0, shost->this_id, 0);
if (sdev) {
sdev->borken = 0;
}
......
......@@ -60,6 +60,8 @@ EXPORT_SYMBOL(scsi_allocate_request);
EXPORT_SYMBOL(scsi_release_request);
EXPORT_SYMBOL(scsi_wait_req);
EXPORT_SYMBOL(scsi_do_req);
EXPORT_SYMBOL(scsi_get_command);
EXPORT_SYMBOL(scsi_put_command);
EXPORT_SYMBOL(scsi_report_bus_reset);
EXPORT_SYMBOL(scsi_block_requests);
......
......@@ -123,7 +123,7 @@ static int sd_major(int major_idx)
case 1 ... 7:
return SCSI_DISK1_MAJOR + major_idx - 1;
case 8 ... 15:
return SCSI_DISK8_MAJOR + major_idx;
return SCSI_DISK8_MAJOR + major_idx - 8;
default:
BUG();
return 0; /* shut up gcc */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment