Commit a052865f authored by Raghava Aditya Renukunta's avatar Raghava Aditya Renukunta Committed by Martin K. Petersen

scsi: aacraid: Added support to set QD of attached drives

Added support to set qd of drives in slave_configure.This only works for
HBA1000 attached drives.
Signed-off-by: default avatarRaghava Aditya Renukunta <RaghavaAditya.Renukunta@microsemi.com>
Signed-off-by: default avatarDave Carroll <David.Carroll@microsemi.com>
Reviewed-by: default avatarJohannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 71a91ca4
...@@ -401,61 +401,89 @@ static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev, ...@@ -401,61 +401,89 @@ static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
static int aac_slave_configure(struct scsi_device *sdev) static int aac_slave_configure(struct scsi_device *sdev)
{ {
struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata; struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
int chn, tid;
unsigned int depth = 0;
unsigned int set_timeout = 0;
chn = aac_logical_to_phys(sdev_channel(sdev));
tid = sdev_id(sdev);
if (chn < AAC_MAX_BUSES && tid < AAC_MAX_TARGETS &&
aac->hba_map[chn][tid].devtype == AAC_DEVTYPE_NATIVE_RAW) {
depth = aac->hba_map[chn][tid].qd_limit;
set_timeout = 1;
goto common_config;
}
if (aac->jbod && (sdev->type == TYPE_DISK)) if (aac->jbod && (sdev->type == TYPE_DISK))
sdev->removable = 1; sdev->removable = 1;
if ((sdev->type == TYPE_DISK) &&
(sdev_channel(sdev) != CONTAINER_CHANNEL) && if (sdev->type == TYPE_DISK
(!aac->jbod || sdev->inq_periph_qual) && && sdev_channel(sdev) != CONTAINER_CHANNEL
(!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))) { && (!aac->jbod || sdev->inq_periph_qual)
&& (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))) {
if (expose_physicals == 0) if (expose_physicals == 0)
return -ENXIO; return -ENXIO;
if (expose_physicals < 0) if (expose_physicals < 0)
sdev->no_uld_attach = 1; sdev->no_uld_attach = 1;
} }
if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
(!aac->raid_scsi_mode || (sdev_channel(sdev) != 2)) && if (sdev->tagged_supported
!sdev->no_uld_attach) { && sdev->type == TYPE_DISK
&& (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))
&& !sdev->no_uld_attach) {
struct scsi_device * dev; struct scsi_device * dev;
struct Scsi_Host *host = sdev->host; struct Scsi_Host *host = sdev->host;
unsigned num_lsu = 0; unsigned num_lsu = 0;
unsigned num_one = 0; unsigned num_one = 0;
unsigned depth;
unsigned cid; unsigned cid;
/* set_timeout = 1;
* Firmware has an individual device recovery time typically
* of 35 seconds, give us a margin.
*/
if (sdev->request_queue->rq_timeout < (45 * HZ))
blk_queue_rq_timeout(sdev->request_queue, 45*HZ);
for (cid = 0; cid < aac->maximum_num_containers; ++cid) for (cid = 0; cid < aac->maximum_num_containers; ++cid)
if (aac->fsa_dev[cid].valid) if (aac->fsa_dev[cid].valid)
++num_lsu; ++num_lsu;
__shost_for_each_device(dev, host) { __shost_for_each_device(dev, host) {
if (dev->tagged_supported && (dev->type == TYPE_DISK) && if (dev->tagged_supported
(!aac->raid_scsi_mode || && dev->type == TYPE_DISK
(sdev_channel(sdev) != 2)) && && (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))
!dev->no_uld_attach) { && !dev->no_uld_attach) {
if ((sdev_channel(dev) != CONTAINER_CHANNEL) if ((sdev_channel(dev) != CONTAINER_CHANNEL)
|| !aac->fsa_dev[sdev_id(dev)].valid) || !aac->fsa_dev[sdev_id(dev)].valid) {
++num_lsu; ++num_lsu;
} else }
} else {
++num_one; ++num_one;
}
} }
if (num_lsu == 0) if (num_lsu == 0)
++num_lsu; ++num_lsu;
depth = (host->can_queue - num_one) / num_lsu;
if (depth > 256)
depth = 256;
else if (depth < 2)
depth = 2;
scsi_change_queue_depth(sdev, depth);
} else {
scsi_change_queue_depth(sdev, 1);
sdev->tagged_supported = 1; depth = (host->can_queue - num_one) / num_lsu;
} }
common_config:
/*
* Firmware has an individual device recovery time typically
* of 35 seconds, give us a margin.
*/
if (set_timeout && sdev->request_queue->rq_timeout < (45 * HZ))
blk_queue_rq_timeout(sdev->request_queue, 45*HZ);
if (depth > 256)
depth = 256;
else if (depth < 1)
depth = 1;
scsi_change_queue_depth(sdev, depth);
sdev->tagged_supported = 1;
return 0; return 0;
} }
...@@ -470,6 +498,15 @@ static int aac_slave_configure(struct scsi_device *sdev) ...@@ -470,6 +498,15 @@ static int aac_slave_configure(struct scsi_device *sdev)
static int aac_change_queue_depth(struct scsi_device *sdev, int depth) static int aac_change_queue_depth(struct scsi_device *sdev, int depth)
{ {
struct aac_dev *aac = (struct aac_dev *)(sdev->host->hostdata);
int chn, tid, is_native_device = 0;
chn = aac_logical_to_phys(sdev_channel(sdev));
tid = sdev_id(sdev);
if (chn < AAC_MAX_BUSES && tid < AAC_MAX_TARGETS &&
aac->hba_map[chn][tid].devtype == AAC_DEVTYPE_NATIVE_RAW)
is_native_device = 1;
if (sdev->tagged_supported && (sdev->type == TYPE_DISK) && if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
(sdev_channel(sdev) == CONTAINER_CHANNEL)) { (sdev_channel(sdev) == CONTAINER_CHANNEL)) {
struct scsi_device * dev; struct scsi_device * dev;
...@@ -491,9 +528,12 @@ static int aac_change_queue_depth(struct scsi_device *sdev, int depth) ...@@ -491,9 +528,12 @@ static int aac_change_queue_depth(struct scsi_device *sdev, int depth)
else if (depth < 2) else if (depth < 2)
depth = 2; depth = 2;
return scsi_change_queue_depth(sdev, depth); return scsi_change_queue_depth(sdev, depth);
} else if (is_native_device) {
scsi_change_queue_depth(sdev, aac->hba_map[chn][tid].qd_limit);
} else {
scsi_change_queue_depth(sdev, 1);
} }
return sdev->queue_depth;
return scsi_change_queue_depth(sdev, 1);
} }
static ssize_t aac_show_raid_level(struct device *dev, struct device_attribute *attr, char *buf) static ssize_t aac_show_raid_level(struct device *dev, struct device_attribute *attr, char *buf)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment