Commit afd7d9c5 authored by James Bottomley's avatar James Bottomley Committed by James Bottomley

Update aacraid to last drop on 2.4 from Alan Cox

parent fecb256b
# Adaptec aacraid
obj-$(CONFIG_SCSI_AACRAID) := aacraid.o
EXTRA_CFLAGS += -I$(TOPDIR)/drivers/scsi
aacraid-objs := linit.o aachba.o commctrl.o comminit.o commsup.o \
O_TARGET := aacraid.o
obj-m := $(O_TARGET)
obj-y := linit.o aachba.o commctrl.o comminit.o commsup.o \
dpcsup.o rx.o sa.o
EXTRA_CFLAGS := -Idrivers/scsi
include $(TOPDIR)/Rules.make
......@@ -18,6 +18,12 @@ Supported Cards/Chipsets
ADAPTEC 2120S
ADAPTEC 2200S
ADAPTEC 5400S
Legend S220
Legend S230
Adaptec 3230S
Adaptec 3240S
ASR-2020S PCI-X
AAR-2410SA SATA
People
-------------------------
......@@ -28,6 +34,9 @@ Deanna Bonds <deanna_bonds@adaptec.com> (non-DASD support, PAE fibs and 64 bit,
added new ioctls, changed scsi interface to use new error handler,
increased the number of fibs and outstanding commands to a container)
(fixed 64bit and 64G memory model, changed confusing naming convention
where fibs that go to the hardware are consistently called hw_fibs and
not just fibs like the name of the driver tracking structure)
Original Driver
-------------------------
Adaptec Unix OEM Product Group
......
......@@ -33,15 +33,16 @@
#include <linux/completion.h>
#include <asm/semaphore.h>
#include <asm/uaccess.h>
#define MAJOR_NR SCSI_DISK0_MAJOR /* For DEVICE_NR() */
#include <linux/blk.h>
#include "scsi.h"
#include "hosts.h"
#include "sd.h"
#include "aacraid.h"
/* SCSI Commands */
/* TODO: dmb - use the ones defined in include/scsi/scsi.h */
/* TODO dmb - use the ones defined in include/scsi/scsi.h*/
#define SS_TEST 0x00 /* Test unit ready */
#define SS_REZERO 0x01 /* Rezero unit */
#define SS_REQSEN 0x03 /* Request Sense */
......@@ -60,15 +61,15 @@
#define SS_SEEK 0x2B /* Seek */
/* values for inqd_pdt: Peripheral device type in plain English */
#define INQD_PDT_DA 0x00 /* Direct-access (DISK) device */
#define INQD_PDT_PROC 0x03 /* Processor device */
#define INQD_PDT_CHNGR 0x08 /* Changer (jukebox, scsi2) */
#define INQD_PDT_COMM 0x09 /* Communication device (scsi2) */
#define INQD_PDT_NOLUN2 0x1f /* Unknown Device (scsi2) */
#define INQD_PDT_NOLUN 0x7f /* Logical Unit Not Present */
#define INQD_PDT_DA 0x00 /* Direct-access (DISK) device */
#define INQD_PDT_PROC 0x03 /* Processor device */
#define INQD_PDT_CHNGR 0x08 /* Changer (jukebox, scsi2) */
#define INQD_PDT_COMM 0x09 /* Communication device (scsi2) */
#define INQD_PDT_NOLUN2 0x1f /* Unknown Device (scsi2) */
#define INQD_PDT_NOLUN 0x7f /* Logical Unit Not Present */
#define INQD_PDT_DMASK 0x1F /* Peripheral Device Type Mask */
#define INQD_PDT_QMASK 0xE0 /* Peripheral Device Qualifer Mask */
#define INQD_PDT_DMASK 0x1F /* Peripheral Device Type Mask */
#define INQD_PDT_QMASK 0xE0 /* Peripheral Device Qualifer Mask */
#define TARGET_LUN_TO_CONTAINER(target, lun) (target)
#define CONTAINER_TO_TARGET(cont) ((cont))
......@@ -81,22 +82,22 @@
/*
* Sense keys
*/
#define SENKEY_NO_SENSE 0x00
#define SENKEY_UNDEFINED 0x01
#define SENKEY_NOT_READY 0x02
#define SENKEY_MEDIUM_ERR 0x03
#define SENKEY_HW_ERR 0x04
#define SENKEY_ILLEGAL 0x05
#define SENKEY_ATTENTION 0x06
#define SENKEY_PROTECTED 0x07
#define SENKEY_BLANK 0x08
#define SENKEY_V_UNIQUE 0x09
#define SENKEY_CPY_ABORT 0x0A
#define SENKEY_ABORT 0x0B
#define SENKEY_EQUAL 0x0C
#define SENKEY_VOL_OVERFLOW 0x0D
#define SENKEY_MISCOMP 0x0E
#define SENKEY_RESERVED 0x0F
#define SENKEY_NO_SENSE 0x00
#define SENKEY_UNDEFINED 0x01
#define SENKEY_NOT_READY 0x02
#define SENKEY_MEDIUM_ERR 0x03
#define SENKEY_HW_ERR 0x04
#define SENKEY_ILLEGAL 0x05
#define SENKEY_ATTENTION 0x06
#define SENKEY_PROTECTED 0x07
#define SENKEY_BLANK 0x08
#define SENKEY_V_UNIQUE 0x09
#define SENKEY_CPY_ABORT 0x0A
#define SENKEY_ABORT 0x0B
#define SENKEY_EQUAL 0x0C
#define SENKEY_VOL_OVERFLOW 0x0D
#define SENKEY_MISCOMP 0x0E
#define SENKEY_RESERVED 0x0F
/*
* Sense codes
......@@ -160,24 +161,24 @@
*----------------------------------------------------------------------------*/
/* SCSI inquiry data */
struct inquiry_data {
u8 inqd_pdt; /* Peripheral qualifier | Peripheral Device Type */
u8 inqd_dtq; /* RMB | Device Type Qualifier */
u8 inqd_ver; /* ISO version | ECMA version | ANSI-approved version */
u8 inqd_rdf; /* AENC | TrmIOP | Response data format */
u8 inqd_len; /* Additional length (n-4) */
u8 inqd_pad1[2]; /* Reserved - must be zero */
u8 inqd_pad2; /* RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */
u8 inqd_vid[8]; /* Vendor ID */
u8 inqd_pid[16]; /* Product ID */
u8 inqd_prl[4]; /* Product Revision Level */
u8 inqd_pdt; /* Peripheral qualifier | Peripheral Device Type */
u8 inqd_dtq; /* RMB | Device Type Qualifier */
u8 inqd_ver; /* ISO version | ECMA version | ANSI-approved version */
u8 inqd_rdf; /* AENC | TrmIOP | Response data format */
u8 inqd_len; /* Additional length (n-4) */
u8 inqd_pad1[2];/* Reserved - must be zero */
u8 inqd_pad2; /* RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */
u8 inqd_vid[8]; /* Vendor ID */
u8 inqd_pid[16];/* Product ID */
u8 inqd_prl[4]; /* Product Revision Level */
};
struct sense_data {
u8 error_code; /* 70h (current errors), 71h(deferred errors) */
u8 valid:1; /* A valid bit of one indicates that the information */
/* field contains valid information as defined in the
* SCSI-2 Standard.
*/
/* field contains valid information as defined in the
* SCSI-2 Standard.
*/
u8 segment_number; /* Only used for COPY, COMPARE, or COPY AND VERIFY Commands */
u8 sense_key:4; /* Sense Key */
u8 reserved:1;
......@@ -214,6 +215,7 @@ struct sense_data {
static struct fsa_scsi_hba *fsa_dev[MAXIMUM_NUM_ADAPTERS]; /* SCSI Device Instance Pointers */
static struct sense_data sense_data[MAXIMUM_NUM_CONTAINERS];
static void get_sd_devname(int disknum, char *buffer);
static unsigned long aac_build_sg(Scsi_Cmnd* scsicmd, struct sgmap* sgmap);
static unsigned long aac_build_sg64(Scsi_Cmnd* scsicmd, struct sgmap64* psg);
static int aac_send_srb_fib(Scsi_Cmnd* scsicmd);
......@@ -257,13 +259,14 @@ int aac_get_containers(struct aac_dev *dev)
1, 1,
NULL, NULL);
if (status < 0 ) {
printk(KERN_WARNING "ProbeContainers: SendFIB failed.\n");
printk(KERN_WARNING "aac_get_containers: SendFIB failed.\n");
break;
}
dresp = (struct aac_mount *)fib_data(fibptr);
if ((le32_to_cpu(dresp->status) == ST_OK) &&
(le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) {
(le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
(le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
fsa_dev_ptr->valid[index] = 1;
fsa_dev_ptr->type[index] = le32_to_cpu(dresp->mnt[0].vol);
fsa_dev_ptr->size[index] = le32_to_cpu(dresp->mnt[0].capacity);
......@@ -274,8 +277,9 @@ int aac_get_containers(struct aac_dev *dev)
/*
* If there are no more containers, then stop asking.
*/
if ((index + 1) >= le32_to_cpu(dresp->count))
if ((index + 1) >= le32_to_cpu(dresp->count)){
break;
}
}
fib_free(fibptr);
fsa_dev[instance] = fsa_dev_ptr;
......@@ -328,7 +332,8 @@ static int probe_container(struct aac_dev *dev, int cid)
dresp = (struct aac_mount *) fib_data(fibptr);
if ((le32_to_cpu(dresp->status) == ST_OK) &&
(le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) {
(le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
(le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
fsa_dev_ptr->valid[cid] = 1;
fsa_dev_ptr->type[cid] = le32_to_cpu(dresp->mnt[0].vol);
fsa_dev_ptr->size[cid] = le32_to_cpu(dresp->mnt[0].capacity);
......@@ -392,8 +397,8 @@ static char *container_types[] = {
* Arguments: [1] pointer to void [1] int
*
* Purpose: Sets SCSI inquiry data strings for vendor, product
* and revision level. Allows strings to be set in platform dependent
* files instead of in OS dependent driver source.
* and revision level. Allows strings to be set in platform dependant
* files instead of in OS dependant driver source.
*/
static void setinqstr(int devtype, void *data, int tindex)
......@@ -429,7 +434,7 @@ void set_sense(u8 *sense_buf, u8 sense_key, u8 sense_code,
sense_buf[1] = 0; /* Segment number, always zero */
if (incorrect_length) {
sense_buf[2] = sense_key | 0x20; /* Set ILI bit | sense key */
sense_buf[2] = sense_key | 0x20;/* Set ILI bit | sense key */
sense_buf[3] = BYTE3(residue);
sense_buf[4] = BYTE2(residue);
sense_buf[5] = BYTE1(residue);
......@@ -448,11 +453,11 @@ void set_sense(u8 *sense_buf, u8 sense_key, u8 sense_code,
sense_buf[15] = 0;
if (sense_code == SENCODE_INVALID_PARAM_FIELD)
sense_buf[15] = 0x80; /* Std sense key specific field */
sense_buf[15] = 0x80;/* Std sense key specific field */
/* Illegal parameter is in the parameter block */
if (sense_code == SENCODE_INVALID_CDB_FIELD)
sense_buf[15] = 0xc0; /* Std sense key specific field */
sense_buf[15] = 0xc0;/* Std sense key specific field */
/* Illegal parameter is in the CDB block */
sense_buf[15] |= bit_pointer;
sense_buf[16] = field_pointer >> 8; /* MSB */
......@@ -463,9 +468,9 @@ void set_sense(u8 *sense_buf, u8 sense_key, u8 sense_code,
static void aac_io_done(Scsi_Cmnd * scsicmd)
{
unsigned long cpu_flags;
spin_lock_irqsave(scsicmd->device->host->host_lock, cpu_flags);
spin_lock_irqsave(&io_request_lock, cpu_flags);
scsicmd->scsi_done(scsicmd);
spin_unlock_irqrestore(scsicmd->device->host->host_lock, cpu_flags);
spin_unlock_irqrestore(&io_request_lock, cpu_flags);
}
static void __aac_io_done(Scsi_Cmnd * scsicmd)
......@@ -498,40 +503,53 @@ int aac_get_adapter_info(struct aac_dev* dev)
memcpy(&dev->adapter_info, info, sizeof(struct aac_adapter_info));
tmp = dev->adapter_info.kernelrev;
printk(KERN_INFO "%s%d: kernel %d.%d.%d build %d\n",
printk(KERN_INFO"%s%d: kernel %d.%d.%d build %d\n",
dev->name, dev->id,
tmp>>24,(tmp>>16)&0xff,(tmp>>8)&0xff,
dev->adapter_info.kernelbuild);
tmp = dev->adapter_info.monitorrev;
printk(KERN_INFO "%s%d: monitor %d.%d.%d build %d\n",
printk(KERN_INFO"%s%d: monitor %d.%d.%d build %d\n",
dev->name, dev->id,
tmp>>24,(tmp>>16)&0xff,(tmp>>8)&0xff,
dev->adapter_info.monitorbuild);
tmp = dev->adapter_info.biosrev;
printk(KERN_INFO "%s%d: bios %d.%d.%d build %d\n",
printk(KERN_INFO"%s%d: bios %d.%d.%d build %d\n",
dev->name, dev->id,
tmp>>24,(tmp>>16)&0xff,(tmp>>8)&0xff,
dev->adapter_info.biosbuild);
printk(KERN_INFO "%s%d: serial %x%x\n",
printk(KERN_INFO"%s%d: serial %x%x\n",
dev->name, dev->id,
dev->adapter_info.serial[0],
dev->adapter_info.serial[1]);
dev->pae_support = 0;
dev->nondasd_support = 0;
if( BITS_PER_LONG >= 64 &&
(dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)){
printk(KERN_INFO "%s%d: 64 Bit PAE enabled\n", dev->name, dev->id);
if(dev->adapter_info.options & AAC_OPT_NONDASD){
// dev->nondasd_support = 1;
// dmb - temporarily disable nondasd
}
if(nondasd != -1) {
dev->nondasd_support = (nondasd!=0);
}
if(dev->nondasd_support != 0){
printk(KERN_INFO"%s%d: Non-DASD support enabled\n",dev->name, dev->id);
}
dev->pae_support = 0;
if( (sizeof(dma_addr_t) > 4) && (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)){
dev->pae_support = 1;
}
/* TODO - dmb temporary until fw can set this bit */
dev->pae_support = (BITS_PER_LONG >= 64);
if(paemode != -1){
dev->pae_support = (paemode!=0);
}
if(dev->pae_support != 0) {
printk(KERN_INFO "%s%d: 64 Bit PAE enabled\n", dev->name, dev->id);
printk(KERN_INFO"%s%d: 64 Bit PAE enabled\n", dev->name, dev->id);
pci_set_dma_mask(dev->pdev, (dma_addr_t)0xFFFFFFFFFFFFFFFFULL);
}
if(dev->adapter_info.options & AAC_OPT_NONDASD){
dev->nondasd_support = 1;
}
fib_complete(fibptr);
fib_free(fibptr);
return rcode;
}
......@@ -546,11 +564,11 @@ static void read_callback(void *context, struct fib * fibptr)
scsicmd = (Scsi_Cmnd *) context;
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
cid =TARGET_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun);
dev = (struct aac_dev *)scsicmd->host->hostdata;
cid =TARGET_LUN_TO_CONTAINER(scsicmd->target, scsicmd->lun);
lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
dprintk((KERN_DEBUG "read_callback[cpu %d]: lba = %d, t = %ld.\n", smp_processor_id(), lba, jiffies));
dprintk((KERN_DEBUG "read_callback[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies));
if (fibptr == NULL)
BUG();
......@@ -561,7 +579,7 @@ static void read_callback(void *context, struct fib * fibptr)
scsicmd->use_sg,
scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
else if(scsicmd->request_bufflen)
pci_unmap_single(dev->pdev, scsicmd->SCp.dma_handle,
pci_unmap_single(dev->pdev, (dma_addr_t)(ulong)scsicmd->SCp.ptr,
scsicmd->request_bufflen,
scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
readreply = (struct aac_read_reply *)fib_data(fibptr);
......@@ -591,11 +609,11 @@ static void write_callback(void *context, struct fib * fibptr)
u32 cid;
scsicmd = (Scsi_Cmnd *) context;
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
cid = TARGET_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun);
dev = (struct aac_dev *)scsicmd->host->hostdata;
cid = TARGET_LUN_TO_CONTAINER(scsicmd->target, scsicmd->lun);
lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
dprintk((KERN_DEBUG "write_callback[cpu %d]: lba = %d, t = %ld.\n", smp_processor_id(), lba, jiffies));
dprintk((KERN_DEBUG "write_callback[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies));
if (fibptr == NULL)
BUG();
......@@ -605,7 +623,7 @@ static void write_callback(void *context, struct fib * fibptr)
scsicmd->use_sg,
scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
else if(scsicmd->request_bufflen)
pci_unmap_single(dev->pdev, scsicmd->SCp.dma_handle,
pci_unmap_single(dev->pdev, (dma_addr_t)(ulong)scsicmd->SCp.ptr,
scsicmd->request_bufflen,
scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
......@@ -637,7 +655,7 @@ int aac_read(Scsi_Cmnd * scsicmd, int cid)
struct aac_dev *dev;
struct fib * cmd_fibcontext;
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
dev = (struct aac_dev *)scsicmd->host->hostdata;
/*
* Get block address and transfer length
*/
......@@ -677,7 +695,7 @@ int aac_read(Scsi_Cmnd * scsicmd, int cid)
readcmd->block = cpu_to_le32(lba);
readcmd->pad = cpu_to_le16(0);
readcmd->flags = cpu_to_le16(0);
aac_build_sg64(scsicmd, &readcmd->sg);
if(readcmd->sg.count > MAX_DRIVER_SG_SEGMENT_COUNT)
BUG();
......@@ -718,8 +736,9 @@ int aac_read(Scsi_Cmnd * scsicmd, int cid)
(fib_callback) read_callback,
(void *) scsicmd);
}
/*
* Check that the command queued to the controller
*/
......@@ -746,7 +765,7 @@ static int aac_write(Scsi_Cmnd * scsicmd, int cid)
struct aac_dev *dev;
struct fib * cmd_fibcontext;
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
dev = (struct aac_dev *)scsicmd->host->hostdata;
/*
* Get block address and transfer length
*/
......@@ -761,7 +780,7 @@ static int aac_write(Scsi_Cmnd * scsicmd, int cid)
lba = (scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
}
dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %lu, t = %ld.\n", smp_processor_id(), lba, jiffies));
dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies));
/*
* Allocate and initialize a Fib then setup a BlockWrite command
*/
......@@ -772,8 +791,7 @@ static int aac_write(Scsi_Cmnd * scsicmd, int cid)
}
fib_init(cmd_fibcontext);
if(dev->pae_support == 1)
{
if(dev->pae_support == 1){
struct aac_write64 *writecmd;
writecmd = (struct aac_write64 *) fib_data(cmd_fibcontext);
writecmd->command = cpu_to_le32(VM_CtHostWrite64);
......@@ -797,9 +815,7 @@ static int aac_write(Scsi_Cmnd * scsicmd, int cid)
0, 1,
(fib_callback) write_callback,
(void *) scsicmd);
}
else
{
} else {
struct aac_write *writecmd;
writecmd = (struct aac_write *) fib_data(cmd_fibcontext);
writecmd->command = cpu_to_le32(VM_CtBlockWrite);
......@@ -809,8 +825,10 @@ static int aac_write(Scsi_Cmnd * scsicmd, int cid)
writecmd->sg.count = cpu_to_le32(1);
/* ->stable is not used - it did mean which type of write */
if (count * 512 > (64 * 1024))
if (count * 512 > (64 * 1024)) {
BUG();
}
aac_build_sg(scsicmd, &writecmd->sg);
if(writecmd->sg.count > MAX_DRIVER_SG_SEGMENT_COUNT)
BUG();
......@@ -861,25 +879,25 @@ int aac_scsi_cmd(Scsi_Cmnd * scsicmd)
struct fsa_scsi_hba *fsa_dev_ptr;
int cardtype;
int ret;
struct aac_dev *dev = (struct aac_dev *)scsicmd->device->host->hostdata;
struct aac_dev *dev = (struct aac_dev *)scsicmd->host->hostdata;
cardtype = dev->cardtype;
fsa_dev_ptr = fsa_dev[scsicmd->device->host->unique_id];
fsa_dev_ptr = fsa_dev[scsicmd->host->unique_id];
/*
* If the bus, target or lun is out of range, return fail
* Test does not apply to ID 16, the pseudo id for the controller
* itself.
*/
if (scsicmd->device->id != scsicmd->device->host->this_id) {
if ((scsicmd->device->channel == 0) ){
if( (scsicmd->device->id >= AAC_MAX_TARGET) || (scsicmd->device->lun != 0)){
if (scsicmd->target != scsicmd->host->this_id) {
if ((scsicmd->channel == 0) ){
if( (scsicmd->target >= AAC_MAX_TARGET) || (scsicmd->lun != 0)){
scsicmd->result = DID_NO_CONNECT << 16;
__aac_io_done(scsicmd);
return 0;
}
cid = TARGET_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun);
cid = TARGET_LUN_TO_CONTAINER(scsicmd->target, scsicmd->lun);
/*
* If the target container doesn't exist, it may have
......@@ -890,9 +908,9 @@ int aac_scsi_cmd(Scsi_Cmnd * scsicmd)
case SS_INQUIR:
case SS_RDCAP:
case SS_TEST:
spin_unlock_irq(scsicmd->device->host->host_lock);
spin_unlock_irq(&io_request_lock);
probe_container(dev, cid);
spin_lock_irq(scsicmd->device->host->host_lock);
spin_lock_irq(&io_request_lock);
if (fsa_dev_ptr->valid[cid] == 0) {
scsicmd->result = DID_NO_CONNECT << 16;
__aac_io_done(scsicmd);
......@@ -944,12 +962,14 @@ int aac_scsi_cmd(Scsi_Cmnd * scsicmd)
{
struct inquiry_data *inq_data_ptr;
dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", scsicmd->device->id));
dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", scsicmd->target));
inq_data_ptr = (struct inquiry_data *)scsicmd->request_buffer;
memset(inq_data_ptr, 0, sizeof (struct inquiry_data));
inq_data_ptr->inqd_ver = 2; /* claim compliance to SCSI-2 */
inq_data_ptr->inqd_dtq = 0x80; /* set RMB bit to one indicating that the medium is removable */
if(!strstr(UTS_RELEASE,"BOOT")){ // If this is not a RH driver disk kernel
inq_data_ptr->inqd_dtq = 0x80; /* set RMB bit to one indicating that the medium is removable */
}
inq_data_ptr->inqd_rdf = 2; /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */
inq_data_ptr->inqd_len = 31;
/*Format for "pad2" is RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */
......@@ -959,7 +979,7 @@ int aac_scsi_cmd(Scsi_Cmnd * scsicmd)
* see: <vendor>.c i.e. aac.c
*/
setinqstr(cardtype, (void *) (inq_data_ptr->inqd_vid), fsa_dev_ptr->type[cid]);
if (scsicmd->device->id == scsicmd->device->host->this_id)
if (scsicmd->target == scsicmd->host->this_id)
inq_data_ptr->inqd_pdt = INQD_PDT_PROC; /* Processor device */
else
inq_data_ptr->inqd_pdt = INQD_PDT_DA; /* Direct/random access device */
......@@ -1053,20 +1073,17 @@ int aac_scsi_cmd(Scsi_Cmnd * scsicmd)
* containers to /dev/sd device names
*/
spin_unlock_irq(scsicmd->device->host->host_lock);
if (scsicmd->request->rq_disk)
memcpy(fsa_dev_ptr->devname[cid],
scsicmd->request->rq_disk->disk_name,
8);
spin_unlock_irq(&io_request_lock);
fsa_dev_ptr->devno[cid] = DEVICE_NR(scsicmd->request.rq_dev);
ret = aac_read(scsicmd, cid);
spin_lock_irq(scsicmd->device->host->host_lock);
spin_lock_irq(&io_request_lock);
return ret;
case SS_WRITE:
case SM_WRITE:
spin_unlock_irq(scsicmd->device->host->host_lock);
spin_unlock_irq(&io_request_lock);
ret = aac_write(scsicmd, cid);
spin_lock_irq(scsicmd->device->host->host_lock);
spin_lock_irq(&io_request_lock);
return ret;
default:
/*
......@@ -1094,7 +1111,7 @@ static int query_disk(struct aac_dev *dev, void *arg)
qd.cnum = TARGET_LUN_TO_CONTAINER(qd.target, qd.lun);
else if ((qd.bus == -1) && (qd.target == -1) && (qd.lun == -1))
{
if (qd.cnum < 0 || qd.cnum >= MAXIMUM_NUM_CONTAINERS)
if (qd.cnum < 0 || qd.cnum > MAXIMUM_NUM_CONTAINERS)
return -EINVAL;
qd.instance = dev->scsi_host_ptr->host_no;
qd.bus = 0;
......@@ -1107,18 +1124,40 @@ static int query_disk(struct aac_dev *dev, void *arg)
qd.locked = fsa_dev_ptr->locked[qd.cnum];
qd.deleted = fsa_dev_ptr->deleted[qd.cnum];
if (fsa_dev_ptr->devname[qd.cnum][0] == '\0')
if (fsa_dev_ptr->devno[qd.cnum] == -1)
qd.unmapped = 1;
else
qd.unmapped = 0;
strncpy(qd.name, fsa_dev_ptr->devname[qd.cnum], 8);
get_sd_devname(fsa_dev_ptr->devno[qd.cnum], qd.name);
if (copy_to_user(arg, &qd, sizeof (struct aac_query_disk)))
return -EFAULT;
return 0;
}
static void get_sd_devname(int disknum, char *buffer)
{
if (disknum < 0) {
sprintf(buffer, "%s", "");
return;
}
if (disknum < 26)
sprintf(buffer, "sd%c", 'a' + disknum);
else {
unsigned int min1;
unsigned int min2;
/*
* For larger numbers of disks, we need to go to a new
* naming scheme.
*/
min1 = disknum / 26;
min2 = disknum % 26;
sprintf(buffer, "sd%c%c", 'a' + min1 - 1, 'a' + min2);
}
}
static int force_delete_disk(struct aac_dev *dev, void *arg)
{
struct aac_delete_disk dd;
......@@ -1129,7 +1168,7 @@ static int force_delete_disk(struct aac_dev *dev, void *arg)
if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
return -EFAULT;
if (dd.cnum >= MAXIMUM_NUM_CONTAINERS)
if (dd.cnum > MAXIMUM_NUM_CONTAINERS)
return -EINVAL;
/*
* Mark this container as being deleted.
......@@ -1152,7 +1191,7 @@ static int delete_disk(struct aac_dev *dev, void *arg)
if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
return -EFAULT;
if (dd.cnum >= MAXIMUM_NUM_CONTAINERS)
if (dd.cnum > MAXIMUM_NUM_CONTAINERS)
return -EINVAL;
/*
* If the container is locked, it can not be deleted by the API.
......@@ -1164,7 +1203,7 @@ static int delete_disk(struct aac_dev *dev, void *arg)
* Mark the container as no longer being valid.
*/
fsa_dev_ptr->valid[dd.cnum] = 0;
fsa_dev_ptr->devname[dd.cnum][0] = '\0';
fsa_dev_ptr->devno[dd.cnum] = -1;
return 0;
}
}
......@@ -1202,7 +1241,7 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
Scsi_Cmnd *scsicmd;
scsicmd = (Scsi_Cmnd *) context;
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
dev = (struct aac_dev *)scsicmd->host->hostdata;
if (fibptr == NULL)
BUG();
......@@ -1219,8 +1258,7 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
scsicmd->use_sg,
scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
else if(scsicmd->request_bufflen)
pci_unmap_single(dev->pdev, scsicmd->SCp.dma_handle,
scsicmd->request_bufflen,
pci_unmap_single(dev->pdev, (ulong)scsicmd->SCp.ptr, scsicmd->request_bufflen,
scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
/*
......@@ -1239,17 +1277,23 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
/*
* Next check the srb status
*/
switch(le32_to_cpu(srbreply->srb_status)){
switch( (le32_to_cpu(srbreply->srb_status))&0x3f){
case SRB_STATUS_ERROR_RECOVERY:
case SRB_STATUS_PENDING:
case SRB_STATUS_SUCCESS:
if(scsicmd->cmnd[0] == INQUIRY ){
u8 b;
u8 b1;
/* We can't expose disk devices because we can't tell whether they
* are the raw container drives or stand alone drives
* are the raw container drives or stand alone drives. If they have
* the removable bit set then we should expose them though.
*/
b = *(u8*)scsicmd->buffer;
if( (b & 0x0f) == TYPE_DISK ){
b = (*(u8*)scsicmd->buffer)&0x1f;
b1 = ((u8*)scsicmd->buffer)[1];
if( b==TYPE_TAPE || b==TYPE_WORM || b==TYPE_ROM || b==TYPE_MOD|| b==TYPE_MEDIUM_CHANGER
|| (b==TYPE_DISK && (b1&0x80)) ){
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
} else {
scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
}
} else {
......@@ -1271,6 +1315,22 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
}
scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
break;
case INQUIRY: {
u8 b;
u8 b1;
/* We can't expose disk devices because we can't tell whether they
* are the raw container drives or stand alone drives
*/
b = (*(u8*)scsicmd->buffer)&0x0f;
b1 = ((u8*)scsicmd->buffer)[1];
if( b==TYPE_TAPE || b==TYPE_WORM || b==TYPE_ROM || b==TYPE_MOD|| b==TYPE_MEDIUM_CHANGER
|| (b==TYPE_DISK && (b1&0x80)) ){
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
} else {
scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
}
break;
}
default:
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
break;
......@@ -1326,17 +1386,19 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
case SRB_STATUS_DOMAIN_VALIDATION_FAIL:
default:
#ifdef AAC_DETAILED_STATUS_INFO
printk("aacraid: SRB ERROR (%s)\n",aac_get_status_string(le32_to_cpu(srbreply->srb_status)));
printk("aacraid: SRB ERROR(%u) %s scsi cmd 0x%x - scsi status 0x%x\n",le32_to_cpu(srbreply->srb_status&0x3f),aac_get_status_string(le32_to_cpu(srbreply->srb_status)), scsicmd->cmnd[0], le32_to_cpu(srbreply->scsi_status) );
#endif
scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
break;
}
if (le32_to_cpu(srbreply->scsi_status) == 0x02 ){ // Check Condition
int len;
scsicmd->result |= CHECK_CONDITION;
len = (srbreply->sense_data_size > sizeof(scsicmd->sense_buffer))?
sizeof(scsicmd->sense_buffer):srbreply->sense_data_size;
printk(KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n", le32_to_cpu(srbreply->status), len);
memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
}
/*
* OR in the scsi status (already shifted up a bit)
......@@ -1365,14 +1427,15 @@ static int aac_send_srb_fib(Scsi_Cmnd* scsicmd)
struct aac_srb *srbcmd;
u16 fibsize;
u32 flag;
u32 timeout;
if( scsicmd->device->id > 15 || scsicmd->device->lun > 7) {
if( scsicmd->target > 15 || scsicmd->lun > 7) {
scsicmd->result = DID_NO_CONNECT << 16;
__aac_io_done(scsicmd);
return 0;
}
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
dev = (struct aac_dev *)scsicmd->host->hostdata;
switch(scsicmd->sc_data_direction){
case SCSI_DATA_WRITE:
flag = SRB_DataOut;
......@@ -1402,11 +1465,15 @@ static int aac_send_srb_fib(Scsi_Cmnd* scsicmd)
srbcmd = (struct aac_srb*) fib_data(cmd_fibcontext);
srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
srbcmd->channel = cpu_to_le32(aac_logical_to_phys(scsicmd->device->channel));
srbcmd->target = cpu_to_le32(scsicmd->device->id);
srbcmd->lun = cpu_to_le32(scsicmd->device->lun);
srbcmd->channel = cpu_to_le32(aac_logical_to_phys(scsicmd->channel));
srbcmd->target = cpu_to_le32(scsicmd->target);
srbcmd->lun = cpu_to_le32(scsicmd->lun);
srbcmd->flags = cpu_to_le32(flag);
srbcmd->timeout = cpu_to_le32(0); // timeout not used
timeout = (scsicmd->timeout-jiffies)/HZ;
if(timeout == 0){
timeout = 1;
}
srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds
srbcmd->retry_limit =cpu_to_le32(0); // Obsolete parameter
srbcmd->cdb_size = cpu_to_le32(scsicmd->cmd_len);
......@@ -1468,7 +1535,7 @@ static unsigned long aac_build_sg(Scsi_Cmnd* scsicmd, struct sgmap* psg)
struct aac_dev *dev;
unsigned long byte_count = 0;
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
dev = (struct aac_dev *)scsicmd->host->hostdata;
// Get rid of old data
psg->count = cpu_to_le32(0);
psg->sg[0].addr = cpu_to_le32(NULL);
......@@ -1511,7 +1578,7 @@ static unsigned long aac_build_sg(Scsi_Cmnd* scsicmd, struct sgmap* psg)
psg->count = cpu_to_le32(1);
psg->sg[0].addr = cpu_to_le32(addr);
psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen);
scsicmd->SCp.dma_handle = addr;
scsicmd->SCp.ptr = (void *)addr;
byte_count = scsicmd->request_bufflen;
}
return byte_count;
......@@ -1524,7 +1591,7 @@ static unsigned long aac_build_sg64(Scsi_Cmnd* scsicmd, struct sgmap64* psg)
unsigned long byte_count = 0;
u64 le_addr;
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
dev = (struct aac_dev *)scsicmd->host->hostdata;
// Get rid of old data
psg->count = cpu_to_le32(0);
psg->sg[0].addr[0] = cpu_to_le32(NULL);
......@@ -1572,7 +1639,7 @@ static unsigned long aac_build_sg64(Scsi_Cmnd* scsicmd, struct sgmap64* psg)
psg->sg[0].addr[1] = (u32)(le_addr>>32);
psg->sg[0].addr[0] = (u32)(le_addr & 0xffffffff);
psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen);
scsicmd->SCp.dma_handle = addr;
scsicmd->SCp.ptr = (void *)addr;
byte_count = scsicmd->request_bufflen;
}
return byte_count;
......
//#define dprintk(x) printk x
#define dprintk(x)
/*#define dprintk(x) printk x */
#include "compat.h"
/*------------------------------------------------------------------------------
* D E F I N E S
*----------------------------------------------------------------------------*/
#define MAXIMUM_NUM_CONTAINERS 31
#define MAXIMUM_NUM_ADAPTERS 8
#define AAC_NUM_FIB 578
#define AAC_NUM_IO_FIB 512
//#define AAC_NUM_IO_FIB 512
#define AAC_NUM_IO_FIB 100
#define AAC_MAX_TARGET (MAXIMUM_NUM_CONTAINERS+1)
//#define AAC_MAX_TARGET (16)
#define AAC_MAX_LUN (8)
/*
......@@ -24,6 +24,9 @@
#define AAC_DETAILED_STATUS_INFO
extern int nondasd;
extern int paemode;
struct diskparm
{
int heads;
......@@ -79,7 +82,7 @@ struct diskparm
/*
* Host side memory scatter gather list
* Used by the adapter for read, write, and readdirplus operations
* We have separate 32 and 64 bit version because even
* We have seperate 32 and 64 bit version because even
* on 64 bit systems not all cards support the 64 bit version
*/
struct sgentry {
......@@ -236,6 +239,92 @@ enum aac_queue_types {
AdapHighRespQueue /* Host to adapter high priority response traffic */
};
/*
* Implement our own version of these so we have 64 bit compatability
* The adapter uses these and can only handle 32 bit addresses
*/
struct aac_list_head {
u32 next;
u32 prev;
};
#define AAC_INIT_LIST_HEAD(ptr) do { \
(ptr)->next = (u32)(ulong)(ptr); \
(ptr)->prev = (u32)(ulong)(ptr); \
} while (0)
/**
* aac_list_empty - tests whether a list is empty
* @head: the list to test.
*/
static __inline__ int aac_list_empty(struct aac_list_head *head)
{
return head->next == ((u32)(ulong)head);
}
/*
* Insert a new entry between two known consecutive entries.
*
* This is only for internal list manipulation where we know
* the prev/next entries already!
*/
static __inline__ void aac_list_add(struct aac_list_head * n,
struct aac_list_head * prev,
struct aac_list_head * next)
{
next->prev = (u32)(ulong)n;
n->next = (u32)(ulong)next;
n->prev = (u32)(ulong)prev;
prev->next = (u32)(ulong)n;
}
/**
* list_add_tail - add a new entry
* @new: new entry to be added
* @head: list head to add it before
*
* Insert a new entry before the specified head.
* This is useful for implementing queues.
*/
static __inline__ void aac_list_add_tail(struct aac_list_head *n, struct aac_list_head *head)
{
aac_list_add(n, (struct aac_list_head*)(ulong)(head->prev), head);
}
/*
* Delete a list entry by making the prev/next entries
* point to each other.
*
* This is only for internal list manipulation where we know
* the prev/next entries already!
*/
static __inline__ void __aac_list_del(struct aac_list_head * p,
struct aac_list_head * n)
{
n->prev = (u32)(ulong)p;
p->next = (u32)(ulong)n;
}
/**
* aac_list_del - deletes entry from list.
* @entry: the element to delete from the list.
* Note: list_empty on entry does not return true after this, the entry is in an undefined state.
*/
static __inline__ void aac_list_del(struct aac_list_head *entry)
{
__aac_list_del((struct aac_list_head*)(ulong)entry->prev,(struct aac_list_head*)(ulong) entry->next);
entry->next = entry->prev = 0;
}
/**
* aac_list_entry - get the struct for this entry
* @ptr: the &struct list_head pointer.
* @type: the type of the struct this is embedded in.
* @member: the name of the list_struct within the struct.
*/
#define aac_list_entry(ptr, type, member) \
((type *)((char *)(ptr)-(ulong)(&((type *)0)->member)))
/*
* Assign type values to the FSA communication data structures
*/
......@@ -249,11 +338,11 @@ enum aac_queue_types {
#define FsaNormal 1
#define FsaHigh 2
/*
* Define the FIB. The FIB is the where all the requested data and
* command information are put to the application on the FSA adapter.
*/
struct aac_fibhdr {
u32 XferState; // Current transfer state for this CCB
u16 Command; // Routing information for the destination
......@@ -269,7 +358,8 @@ struct aac_fibhdr {
u32 _ReceiverTimeStart; // Timestamp for receipt of fib
u32 _ReceiverTimeDone; // Timestamp for completion of fib
} _s;
struct list_head _FibLinks; // Used to link Adapter Initiated Fibs on the host
struct aac_list_head _FibLinks; // Used to link Adapter Initiated Fibs on the host
// struct list_head _FibLinks; // Used to link Adapter Initiated Fibs on the host
} _u;
};
......@@ -443,7 +533,7 @@ struct aac_driver_ident
/*
* The adapter interface specs all queues to be located in the same
* physically contigous block. The host structure that defines the
* commuication queues will assume they are each a separate physically
* commuication queues will assume they are each a seperate physically
* contigous memory region that will support them all being one big
* contigous block.
* There is a command and response queue for each level and direction of
......@@ -451,21 +541,22 @@ struct aac_driver_ident
*/
struct aac_queue {
u64 logical; /* This is the address we give the adapter */
struct aac_entry *base; /* This is the system virtual address */
struct aac_qhdr headers; /* A pointer to the producer and consumer queue headers for this queue */
u32 entries; /* Number of queue entries on this queue */
wait_queue_head_t qfull; /* Event to wait on if the queue is full */
wait_queue_head_t cmdready; /* Indicates there is a Command ready from the adapter on this queue. */
/* This is only valid for adapter to host command queues. */
spinlock_t *lock; /* Spinlock for this queue must take this lock before accessing the lock */
spinlock_t lockdata; /* Actual lock (used only on one side of the lock) */
unsigned long SavedIrql; /* Previous IRQL when the spin lock is taken */
u32 padding; /* Padding - FIXME - can remove I believe */
struct list_head cmdq; /* A queue of FIBs which need to be prcessed by the FS thread. This is */
u64 logical; /*address we give the adapter */
struct aac_entry *base; /*system virtual address */
struct aac_qhdr headers; /*producer,consumer q headers*/
u32 entries; /*Number of queue entries */
wait_queue_head_t qfull; /*Event to wait on if q full */
wait_queue_head_t cmdready; /*Cmd ready from the adapter */
/* This is only valid for adapter to host command queues. */
spinlock_t *lock; /* Spinlock for this queue must take this lock before accessing the lock */
spinlock_t lockdata; /* Actual lock (used only on one side of the lock) */
u32 SavedIrql; /* Previous IRQL when the spin lock is taken */
u32 padding; /* Padding - FIXME - can remove I believe */
struct aac_list_head cmdq; /* A queue of FIBs which need to be prcessed by the FS thread. This is */
// struct list_head cmdq; /* A queue of FIBs which need to be prcessed by the FS thread. This is */
/* only valid for command queues which receive entries from the adapter. */
struct list_head pendingq; /* A queue of outstanding fib's to the adapter. */
unsigned long numpending; /* Number of entries on outstanding queue. */
u32 numpending; /* Number of entries on outstanding queue. */
struct aac_dev * dev; /* Back pointer to adapter structure */
};
......@@ -629,7 +720,7 @@ struct aac_fib_context {
struct semaphore wait_sem; // this is used to wait for the next fib to arrive.
int wait; // Set to true when thread is in WaitForSingleObject
unsigned long count; // total number of FIBs on FibList
struct list_head fibs;
struct aac_list_head hw_fib_list; // this holds hw_fibs which should be 32 bit addresses
};
struct fsa_scsi_hba {
......@@ -639,7 +730,7 @@ struct fsa_scsi_hba {
u8 ro[MAXIMUM_NUM_CONTAINERS];
u8 locked[MAXIMUM_NUM_CONTAINERS];
u8 deleted[MAXIMUM_NUM_CONTAINERS];
char devname[MAXIMUM_NUM_CONTAINERS][8];
s32 devno[MAXIMUM_NUM_CONTAINERS];
};
struct fib {
......@@ -650,7 +741,6 @@ struct fib {
* The Adapter that this I/O is destined for.
*/
struct aac_dev *dev;
u64 logicaladdr; /* 64 bit */
/*
* This is the event the sendfib routine will wait on if the
* caller did not pass one and this is synch io.
......@@ -669,7 +759,8 @@ struct fib {
struct list_head queue;
void *data;
struct hw_fib *fib; /* Actual shared object */
struct hw_fib *hw_fib; /* Actual shared object */
dma_addr_t hw_fib_pa; /* physical address of hw_fib*/
};
/*
......@@ -696,6 +787,7 @@ struct aac_adapter_info
u32 biosrev;
u32 biosbuild;
u32 cluster;
u32 clusterchannelmask;
u32 serial[2];
u32 battery;
u32 options;
......@@ -746,13 +838,13 @@ struct aac_dev
*/
dma_addr_t hw_fib_pa;
struct hw_fib *hw_fib_va;
#if BITS_PER_LONG >= 64
ulong fib_base_va;
#endif
/*
* Fib Headers
*/
struct fib fibs[AAC_NUM_FIB];
// dmb struct fib fibs[AAC_NUM_FIB]; /* Doing it here takes up too much from the scsi pool*/
struct fib *fibs;
struct fib *free_fib;
struct fib *timeout_fib;
spinlock_t fib_lock;
......@@ -771,6 +863,7 @@ struct aac_dev
unsigned long fsrev; /* Main driver's revision number */
struct aac_init *init; /* Holds initialization info to communicate with adapter */
// void * init_pa; /* Holds physical address of the init struct */
dma_addr_t init_pa; /* Holds physical address of the init struct */
struct pci_dev *pdev; /* Our PCI interface */
......@@ -1148,7 +1241,9 @@ struct aac_mntent {
u32 altoid; // != oid <==> snapshot or broken mirror exists
};
#define FSCS_READONLY 0x0002 /* possible result of broken mirror */
#define FSCS_NOTCLEAN 0x0001 /* fsck is neccessary before mounting */
#define FSCS_READONLY 0x0002 /* possible result of broken mirror */
#define FSCS_HIDDEN 0x0004 /* should be ignored - set during a clear */
struct aac_query_mount {
u32 command;
......@@ -1359,31 +1454,6 @@ struct aac_aifcmd {
u8 data[1]; /* Undefined length (from kernel viewpoint) */
};
static inline u32 fib2addr(struct hw_fib *hw)
{
return (u32)hw;
}
static inline struct hw_fib *addr2fib(u32 addr)
{
return (struct hw_fib *)addr;
}
/**
* Convert capacity to cylinders
* accounting for the fact capacity could be a 64 bit value
*
*/
static inline u32 cap_to_cyls(sector_t capacity, u32 divisor)
{
#ifdef CONFIG_LBD
do_div(capacity, divisor);
#else
capacity /= divisor;
#endif
return (u32) capacity;
}
const char *aac_driverinfo(struct Scsi_Host *);
struct fib *fib_alloc(struct aac_dev *dev);
int fib_setup(struct aac_dev *dev);
......@@ -1397,7 +1467,7 @@ int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entr
int aac_consumer_avail(struct aac_dev * dev, struct aac_queue * q);
void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum);
int fib_complete(struct fib * context);
#define fib_data(fibctx) ((void *)(fibctx)->fib->data)
#define fib_data(fibctx) ((void *)(fibctx)->hw_fib->data)
int aac_detach(struct aac_dev *dev);
struct aac_dev *aac_init_adapter(struct aac_dev *dev);
int aac_get_containers(struct aac_dev *dev);
......
......@@ -63,7 +63,7 @@ static int ioctl_send_fib(struct aac_dev * dev, void *arg)
if(fibptr == NULL)
return -ENOMEM;
kfib = fibptr->fib;
kfib = fibptr->hw_fib;
/*
* First copy in the header so that we can check the size field.
*/
......@@ -150,7 +150,7 @@ static int open_getadapter_fib(struct aac_dev * dev, void *arg)
* the list to 0.
*/
fibctx->count = 0;
INIT_LIST_HEAD(&fibctx->fibs);
AAC_INIT_LIST_HEAD(&fibctx->hw_fib_list);
fibctx->jiffies = jiffies/HZ;
/*
* Now add this context onto the adapter's
......@@ -181,7 +181,7 @@ static int next_getadapter_fib(struct aac_dev * dev, void *arg)
{
struct fib_ioctl f;
struct aac_fib_context *fibctx, *aifcp;
struct hw_fib * fib;
struct hw_fib * hw_fib;
int status;
struct list_head * entry;
int found;
......@@ -224,25 +224,25 @@ static int next_getadapter_fib(struct aac_dev * dev, void *arg)
* -EAGAIN
*/
return_fib:
if (!list_empty(&fibctx->fibs)) {
struct list_head * entry;
if (!aac_list_empty(&fibctx->hw_fib_list)) {
struct aac_list_head * entry;
/*
* Pull the next fib from the fibs
*/
entry = fibctx->fibs.next;
list_del(entry);
entry = (struct aac_list_head*)(ulong)fibctx->hw_fib_list.next;
aac_list_del(entry);
fib = list_entry(entry, struct hw_fib, header.FibLinks);
hw_fib = aac_list_entry(entry, struct hw_fib, header.FibLinks);
fibctx->count--;
spin_unlock_irqrestore(&dev->fib_lock, flags);
if (copy_to_user(f.fib, fib, sizeof(struct hw_fib))) {
kfree(fib);
if (copy_to_user(f.fib, hw_fib, sizeof(struct hw_fib))) {
kfree(hw_fib);
return -EFAULT;
}
/*
* Free the space occupied by this copy of the fib.
*/
kfree(fib);
kfree(hw_fib);
status = 0;
fibctx->jiffies = jiffies/HZ;
} else {
......@@ -264,24 +264,24 @@ static int next_getadapter_fib(struct aac_dev * dev, void *arg)
int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx)
{
struct hw_fib *fib;
struct hw_fib *hw_fib;
/*
* First free any FIBs that have not been consumed.
*/
while (!list_empty(&fibctx->fibs)) {
struct list_head * entry;
while (!aac_list_empty(&fibctx->hw_fib_list)) {
struct aac_list_head * entry;
/*
* Pull the next fib from the fibs
*/
entry = fibctx->fibs.next;
list_del(entry);
fib = list_entry(entry, struct hw_fib, header.FibLinks);
entry = (struct aac_list_head*)(ulong)(fibctx->hw_fib_list.next);
aac_list_del(entry);
hw_fib = aac_list_entry(entry, struct hw_fib, header.FibLinks);
fibctx->count--;
/*
* Free the space occupied by this copy of the fib.
*/
kfree(fib);
kfree(hw_fib);
}
/*
* Remove the Context from the AdapterFibContext List
......@@ -372,6 +372,204 @@ static int check_revision(struct aac_dev *dev, void *arg)
return 0;
}
/**
*
* aac_send_raw_scb
*
*/
int aac_send_raw_srb(struct aac_dev* dev, void* arg)
{
struct fib* srbfib;
int status;
struct aac_srb *srbcmd;
struct aac_srb *user_srb = arg;
struct aac_srb_reply* user_reply;
struct aac_srb_reply* reply;
u32 fibsize = 0;
u32 flags = 0;
s32 rcode = 0;
u32 data_dir;
ulong sg_user[32];
ulong sg_list[32];
u32 sg_indx = 0;
u32 byte_count = 0;
u32 actual_fibsize = 0;
int i;
if (!capable(CAP_SYS_ADMIN)){
printk(KERN_DEBUG"aacraid: No permission to send raw srb\n");
return -EPERM;
}
/*
* Allocate and initialize a Fib then setup a BlockWrite command
*/
if (!(srbfib = fib_alloc(dev))) {
return -1;
}
fib_init(srbfib);
srbcmd = (struct aac_srb*) fib_data(srbfib);
if(copy_from_user((void*)&fibsize, (void*)&user_srb->count,sizeof(u32))){
printk(KERN_DEBUG"aacraid: Could not copy data size from user\n");
rcode = -EFAULT;
goto cleanup;
}
if(copy_from_user(srbcmd, user_srb,fibsize)){
printk(KERN_DEBUG"aacraid: Could not copy srb from user\n");
rcode = -EFAULT;
goto cleanup;
}
user_reply = arg+fibsize;
flags = srbcmd->flags;
// Fix up srb for endian and force some values
srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this
srbcmd->channel = cpu_to_le32(srbcmd->channel);
srbcmd->target = cpu_to_le32(srbcmd->target);
srbcmd->lun = cpu_to_le32(srbcmd->lun);
srbcmd->flags = cpu_to_le32(srbcmd->flags);
srbcmd->timeout = cpu_to_le32(srbcmd->timeout);
srbcmd->retry_limit =cpu_to_le32(0); // Obsolete parameter
srbcmd->cdb_size = cpu_to_le32(srbcmd->cdb_size);
switch(srbcmd->flags){
case SRB_DataOut:
data_dir = SCSI_DATA_WRITE;
break;
case (SRB_DataIn | SRB_DataOut):
data_dir = SCSI_DATA_UNKNOWN;
break;
case SRB_DataIn:
data_dir = SCSI_DATA_READ;
break;
default:
data_dir = SCSI_DATA_NONE;
}
if( dev->pae_support ==1 ) {
struct sgmap64* psg = (struct sgmap64*)&srbcmd->sg;
byte_count = 0;
// This should also catch if user used the 32 bit sgmap
actual_fibsize = sizeof (struct aac_srb) + (((srbcmd->sg.count & 0xff) - 1) * sizeof (struct sgentry64));
if(actual_fibsize != fibsize){ // User made a mistake - should not continue
printk(KERN_DEBUG"aacraid: Bad Size specified in Raw SRB command\n");
rcode = -EINVAL;
goto cleanup;
}
for (i = 0; i < psg->count; i++) {
dma_addr_t addr;
u64 le_addr;
void* p;
p = kmalloc(psg->sg[i].count,GFP_KERNEL|__GFP_DMA);
if(p == 0) {
printk(KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
psg->sg[i].count,i,psg->count);
rcode = -ENOMEM;
goto cleanup;
}
sg_user[i] = (ulong)psg->sg[i].addr;
sg_list[i] = (ulong)p; // save so we can clean up later
sg_indx = i;
if( flags & SRB_DataOut ){
if(copy_from_user(p,psg->sg[i].addr,psg->sg[i].count)){
printk(KERN_DEBUG"aacraid: Could not copy sg data from user\n");
rcode = -EFAULT;
goto cleanup;
}
}
addr = pci_map_single(dev->pdev, p, psg->sg[i].count, scsi_to_pci_dma_dir(data_dir));
le_addr = cpu_to_le64(addr);
psg->sg[i].addr[1] = (u32)(le_addr>>32);
psg->sg[i].addr[0] = (u32)(le_addr & 0xffffffff);
psg->sg[i].count = cpu_to_le32(psg->sg[i].count);
byte_count += psg->sg[i].count;
}
srbcmd->count = cpu_to_le32(byte_count);
status = fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,0,0);
} else {
struct sgmap* psg = &srbcmd->sg;
byte_count = 0;
actual_fibsize = sizeof (struct aac_srb) + (((srbcmd->sg.count & 0xff) - 1) * sizeof (struct sgentry));
if(actual_fibsize != fibsize){ // User made a mistake - should not continue
printk(KERN_DEBUG"aacraid: Bad Size specified in Raw SRB command\n");
rcode = -EINVAL;
goto cleanup;
}
for (i = 0; i < psg->count; i++) {
dma_addr_t addr;
void* p;
p = kmalloc(psg->sg[i].count,GFP_KERNEL);
if(p == 0) {
printk(KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
psg->sg[i].count,i,psg->count);
rcode = -ENOMEM;
goto cleanup;
}
sg_user[i] = (ulong)(psg->sg[i].addr);
sg_list[i] = (ulong)p; // save so we can clean up later
sg_indx = i;
if( flags & SRB_DataOut ){
if(copy_from_user((void*)p,(void*)(ulong)(psg->sg[i].addr),psg->sg[i].count)){
printk(KERN_DEBUG"aacraid: Could not copy sg data from user\n");
rcode = -EFAULT;
goto cleanup;
}
}
addr = pci_map_single(dev->pdev, p, psg->sg[i].count, scsi_to_pci_dma_dir(data_dir));
psg->sg[i].addr = cpu_to_le32(addr);
psg->sg[i].count = cpu_to_le32(psg->sg[i].count);
byte_count += psg->sg[i].count;
}
srbcmd->count = cpu_to_le32(byte_count);
status = fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, 0, 0);
}
if (status != 0){
printk(KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n");
rcode = -1;
goto cleanup;
}
if( flags & SRB_DataIn ) {
for(i = 0 ; i <= sg_indx; i++){
if(copy_to_user((void*)(sg_user[i]),(void*)(sg_list[i]),le32_to_cpu(srbcmd->sg.sg[i].count))){
printk(KERN_DEBUG"aacraid: Could not copy sg data to user\n");
rcode = -EFAULT;
goto cleanup;
}
}
}
reply = (struct aac_srb_reply *) fib_data(srbfib);
if(copy_to_user(user_reply,reply,sizeof(struct aac_srb_reply))){
printk(KERN_DEBUG"aacraid: Could not copy reply to user\n");
rcode = -EFAULT;
goto cleanup;
}
cleanup:
for(i=0; i <= sg_indx; i++){
kfree((void*)sg_list[i]);
}
fib_complete(srbfib);
fib_free(srbfib);
return rcode;
}
struct aac_pci_info {
u32 bus;
......@@ -386,8 +584,10 @@ int aac_get_pci_info(struct aac_dev* dev, void* arg)
pci_info.bus = dev->pdev->bus->number;
pci_info.slot = PCI_SLOT(dev->pdev->devfn);
if(copy_to_user( arg, (void*)&pci_info, sizeof(struct aac_pci_info)))
if(copy_to_user( arg, (void*)&pci_info, sizeof(struct aac_pci_info))){
printk(KERN_DEBUG "aacraid: Could not copy pci info\n");
return -EFAULT;
}
return 0;
}
......@@ -420,6 +620,9 @@ int aac_do_ioctl(struct aac_dev * dev, int cmd, void *arg)
case FSACTL_CLOSE_GET_ADAPTER_FIB:
status = close_getadapter_fib(dev, arg);
break;
case FSACTL_SEND_RAW_SRB:
status = aac_send_raw_srb(dev,arg);
break;
case FSACTL_GET_PCI_INFO:
status = aac_get_pci_info(dev,arg);
break;
......
......@@ -39,6 +39,7 @@
#include <linux/slab.h>
#include <linux/blk.h>
#include <linux/completion.h>
#include <linux/mm.h>
#include <asm/semaphore.h>
#include "scsi.h"
#include "hosts.h"
......@@ -58,10 +59,11 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
struct aac_init *init;
dma_addr_t phys;
/* FIXME: Adaptec add 128 bytes to this value - WHY ?? */
size = fibsize + sizeof(struct aac_init) + commsize + commalign + printfbufsiz;
base = pci_alloc_consistent(dev->pdev, size, &phys);
if(base == NULL)
{
printk(KERN_ERR "aacraid: unable to create mapping.\n");
......@@ -74,14 +76,6 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
dev->init = (struct aac_init *)(base + fibsize);
dev->init_pa = phys + fibsize;
/*
* Cache the upper bits of the virtual mapping for 64bit boxes
* FIXME: this crap should be rewritten
*/
#if BITS_PER_LONG >= 64
dev->fib_base_va = ((ulong)base & 0xffffffff00000000);
#endif
init = dev->init;
init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION);
......@@ -92,16 +86,19 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
* Adapter Fibs are the first thing allocated so that they
* start page aligned
*/
init->AdapterFibsVirtualAddress = cpu_to_le32((u32)base);
init->AdapterFibsPhysicalAddress = cpu_to_le32(phys);
dev->fib_base_va = (ulong)base;
init->AdapterFibsVirtualAddress = cpu_to_le32((u32)(ulong)base);
init->AdapterFibsPhysicalAddress = cpu_to_le32((u32)phys);
init->AdapterFibsSize = cpu_to_le32(fibsize);
init->AdapterFibAlign = cpu_to_le32(sizeof(struct hw_fib));
init->HostPhysMemPages = cpu_to_le32(num_physpages); // number of 4k pages of host physical memory
/*
* Increment the base address by the amount already used
*/
base = base + fibsize + sizeof(struct aac_init);
phys = phys + fibsize + sizeof(struct aac_init);
phys = (dma_addr_t)((ulong)phys + fibsize + sizeof(struct aac_init));
/*
* Align the beginning of Headers to commalign
*/
......@@ -111,8 +108,8 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
/*
* Fill in addresses of the Comm Area Headers and Queues
*/
*commaddr = (unsigned long *)base;
init->CommHeaderAddress = cpu_to_le32(phys);
*commaddr = base;
init->CommHeaderAddress = cpu_to_le32((u32)phys);
/*
* Increment the base address by the size of the CommArea
*/
......@@ -134,14 +131,14 @@ static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem,
q->dev = dev;
INIT_LIST_HEAD(&q->pendingq);
init_waitqueue_head(&q->cmdready);
INIT_LIST_HEAD(&q->cmdq);
AAC_INIT_LIST_HEAD(&q->cmdq);
init_waitqueue_head(&q->qfull);
spin_lock_init(&q->lockdata);
q->lock = &q->lockdata;
q->headers.producer = mem;
q->headers.consumer = mem+1;
*q->headers.producer = cpu_to_le32(qsize);
*q->headers.consumer = cpu_to_le32(qsize);
*(q->headers.producer) = cpu_to_le32(qsize);
*(q->headers.consumer) = cpu_to_le32(qsize);
q->entries = qsize;
}
......@@ -210,7 +207,7 @@ int aac_detach(struct aac_dev *detach)
/**
* aac_comm_init - Initialise FSA data structures
* @dev: Adapter to initialise
* @dev: Adapter to intialise
*
* Initializes the data structures that are required for the FSA commuication
* interface to operate.
......@@ -227,7 +224,6 @@ int aac_comm_init(struct aac_dev * dev)
struct aac_entry * queues;
unsigned long size;
struct aac_queue_block * comm = dev->queues;
/*
* Now allocate and initialize the zone structures used as our
* pool of FIB context records. The size of the zone is based
......@@ -246,9 +242,9 @@ int aac_comm_init(struct aac_dev * dev)
if (!aac_alloc_comm(dev, (void * *)&headers, size, QUEUE_ALIGNMENT))
return -ENOMEM;
queues = (struct aac_entry *)((unsigned char *)headers + hdrsize);
queues = (struct aac_entry *)(((ulong)headers) + hdrsize);
/* Adapter to Host normal proirity Command queue */
/* Adapter to Host normal priority Command queue */
comm->queue[HostNormCmdQueue].base = queues;
aac_queue_init(dev, &comm->queue[HostNormCmdQueue], headers, HOST_NORM_CMD_ENTRIES);
queues += HOST_NORM_CMD_ENTRIES;
......@@ -278,7 +274,6 @@ int aac_comm_init(struct aac_dev * dev)
/* adapter to host normal priority response queue */
comm->queue[HostNormRespQueue].base = queues;
aac_queue_init(dev, &comm->queue[HostNormRespQueue], headers, HOST_NORM_RESP_ENTRIES);
queues += HOST_NORM_RESP_ENTRIES;
headers += 2;
......@@ -313,6 +308,7 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
/*
* Ok now init the communication subsystem
*/
dev->queues = (struct aac_queue_block *) kmalloc(sizeof(struct aac_queue_block), GFP_KERNEL);
if (dev->queues == NULL) {
printk(KERN_ERR "Error could not allocate comm region.\n");
......@@ -320,13 +316,17 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
}
memset(dev->queues, 0, sizeof(struct aac_queue_block));
if (aac_comm_init(dev)<0)
if (aac_comm_init(dev)<0){
kfree(dev->queues);
return NULL;
}
/*
* Initialize the list of fibs
*/
if(fib_setup(dev)<0)
if(fib_setup(dev)<0){
kfree(dev->queues);
return NULL;
}
INIT_LIST_HEAD(&dev->fib_list);
init_completion(&dev->aif_completion);
......
......@@ -79,39 +79,39 @@ void fib_map_free(struct aac_dev *dev)
* fib_setup - setup the fibs
* @dev: Adapter to set up
*
* Allocate the PCI space for the fibs, map it and then initialise the
* Allocate the PCI space for the fibs, map it and then intialise the
* fib area, the unmapped fib data and also the free list
*/
int fib_setup(struct aac_dev * dev)
{
struct fib *fibptr;
struct hw_fib *fib;
dma_addr_t fibpa;
struct hw_fib *hw_fib_va;
dma_addr_t hw_fib_pa;
int i;
if(fib_map_alloc(dev)<0)
return -ENOMEM;
fib = dev->hw_fib_va;
fibpa = dev->hw_fib_pa;
memset(fib, 0, sizeof(struct hw_fib) * AAC_NUM_FIB);
hw_fib_va = dev->hw_fib_va;
hw_fib_pa = dev->hw_fib_pa;
memset(hw_fib_va, 0, sizeof(struct hw_fib) * AAC_NUM_FIB);
/*
* Initialise the fibs
*/
for (i = 0, fibptr = &dev->fibs[i]; i < AAC_NUM_FIB; i++, fibptr++)
{
fibptr->dev = dev;
fibptr->fib = fib;
fibptr->data = (void *) fibptr->fib->data;
fibptr->hw_fib = hw_fib_va;
fibptr->data = (void *) fibptr->hw_fib->data;
fibptr->next = fibptr+1; /* Forward chain the fibs */
init_MUTEX_LOCKED(&fibptr->event_wait);
spin_lock_init(&fibptr->event_lock);
fib->header.XferState = cpu_to_le32(0xffffffff);
fib->header.SenderSize = cpu_to_le16(sizeof(struct hw_fib));
fibptr->logicaladdr = (unsigned long) fibpa;
fib = (struct hw_fib *)((unsigned char *)fib + sizeof(struct hw_fib));
fibpa = fibpa + sizeof(struct hw_fib);
hw_fib_va->header.XferState = cpu_to_le32(0xffffffff);
hw_fib_va->header.SenderSize = cpu_to_le16(sizeof(struct hw_fib));
fibptr->hw_fib_pa = hw_fib_pa;
hw_fib_va = (struct hw_fib *)((unsigned char *)hw_fib_va + sizeof(struct hw_fib));
hw_fib_pa = hw_fib_pa + sizeof(struct hw_fib);
}
/*
* Add the fib chain to the free list
......@@ -136,11 +136,15 @@ struct fib * fib_alloc(struct aac_dev *dev)
{
struct fib * fibptr;
unsigned long flags;
spin_lock_irqsave(&dev->fib_lock, flags);
fibptr = dev->free_fib;
if(!fibptr)
BUG();
while(!fibptr){
spin_unlock_irqrestore(&dev->fib_lock, flags);
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(1);
spin_lock_irqsave(&dev->fib_lock, flags);
fibptr = dev->free_fib;
}
dev->free_fib = fibptr->next;
spin_unlock_irqrestore(&dev->fib_lock, flags);
/*
......@@ -152,7 +156,7 @@ struct fib * fib_alloc(struct aac_dev *dev)
* Null out fields that depend on being zero at the start of
* each I/O
*/
fibptr->fib->header.XferState = cpu_to_le32(0);
fibptr->hw_fib->header.XferState = cpu_to_le32(0);
fibptr->callback = NULL;
fibptr->callback_data = NULL;
......@@ -172,15 +176,14 @@ void fib_free(struct fib * fibptr)
unsigned long flags;
spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
if (fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT) {
aac_config.fib_timeouts++;
fibptr->next = fibptr->dev->timeout_fib;
fibptr->dev->timeout_fib = fibptr;
} else {
if (fibptr->fib->header.XferState != 0) {
if (fibptr->hw_fib->header.XferState != 0) {
printk(KERN_WARNING "fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
(void *)fibptr, fibptr->fib->header.XferState);
(void*)fibptr, fibptr->hw_fib->header.XferState);
}
fibptr->next = fibptr->dev->free_fib;
fibptr->dev->free_fib = fibptr;
......@@ -197,14 +200,14 @@ void fib_free(struct fib * fibptr)
void fib_init(struct fib *fibptr)
{
struct hw_fib *fib = fibptr->fib;
fib->header.StructType = FIB_MAGIC;
fib->header.Size = cpu_to_le16(sizeof(struct hw_fib));
fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
fib->header.SenderFibAddress = cpu_to_le32(0);
fib->header.ReceiverFibAddress = cpu_to_le32(0);
fib->header.SenderSize = cpu_to_le16(sizeof(struct hw_fib));
struct hw_fib *hw_fib = fibptr->hw_fib;
hw_fib->header.StructType = FIB_MAGIC;
hw_fib->header.Size = cpu_to_le16(sizeof(struct hw_fib));
hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
hw_fib->header.SenderFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
hw_fib->header.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
hw_fib->header.SenderSize = cpu_to_le16(sizeof(struct hw_fib));
}
/**
......@@ -217,10 +220,10 @@ void fib_init(struct fib *fibptr)
void fib_dealloc(struct fib * fibptr)
{
struct hw_fib *fib = fibptr->fib;
if(fib->header.StructType != FIB_MAGIC)
struct hw_fib *hw_fib = fibptr->hw_fib;
if(hw_fib->header.StructType != FIB_MAGIC)
BUG();
fib->header.XferState = cpu_to_le32(0);
hw_fib->header.XferState = cpu_to_le32(0);
}
/*
......@@ -257,7 +260,7 @@ static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entr
q = &dev->queues->queue[qid];
*index = le32_to_cpu(*(q->headers.producer));
if (*index - 2 == le32_to_cpu(*(q->headers.consumer)))
if ((*index - 2) == le32_to_cpu(*(q->headers.consumer)))
*nonotify = 1;
if (qid == AdapHighCmdQueue) {
......@@ -277,10 +280,14 @@ static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entr
if (*index >= ADAP_NORM_RESP_ENTRIES)
*index = 0; /* Wrap to front of the Producer Queue. */
}
else BUG();
else {
printk("aacraid: invalid qid\n");
BUG();
}
if (*index + 1 == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */
printk(KERN_WARNING "Queue %d full, %ld outstanding.\n", qid, q->numpending);
if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */
printk(KERN_WARNING "Queue %d full, %d outstanding.\n",
qid, q->numpending);
return 0;
} else {
*entry = q->base + *index;
......@@ -288,7 +295,7 @@ static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entr
}
}
/**
/*Command thread: *
* aac_queue_get - get the next free QE
* @dev: Adapter
* @index: Returned index
......@@ -304,7 +311,7 @@ static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entr
* success.
*/
static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * fib, int wait, struct fib * fibptr, unsigned long *nonotify)
static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify)
{
struct aac_entry * entry = NULL;
int map = 0;
......@@ -322,7 +329,7 @@ static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_f
/*
* Setup queue entry with a command, status and fib mapped
*/
entry->size = cpu_to_le32(le16_to_cpu(fib->header.Size));
entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
map = 1;
}
else if (qid == AdapHighRespQueue || qid == AdapNormRespQueue)
......@@ -334,17 +341,18 @@ static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_f
/*
* Setup queue entry with command, status and fib mapped
*/
entry->size = cpu_to_le32(le16_to_cpu(fib->header.Size));
entry->addr = cpu_to_le32(fib->header.SenderFibAddress); /* Restore adapters pointer to the FIB */
fib->header.ReceiverFibAddress = fib->header.SenderFibAddress; /* Let the adapter now where to find its data */
entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
entry->addr = hw_fib->header.SenderFibAddress;
/* Restore adapters pointer to the FIB */
hw_fib->header.ReceiverFibAddress = hw_fib->header.SenderFibAddress; /* Let the adapter now where to find its data */
map = 0;
}
}
/*
* If MapFib is true than we need to map the Fib and put pointers
* in the queue entry.
*/
if (map)
entry->addr = cpu_to_le32((unsigned long)(fibptr->logicaladdr));
entry->addr = fibptr->hw_fib_pa;
return 0;
}
......@@ -415,11 +423,10 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority
u32 qid;
struct aac_dev * dev = fibptr->dev;
unsigned long nointr = 0;
struct hw_fib * fib = fibptr->fib;
struct hw_fib * hw_fib = fibptr->hw_fib;
struct aac_queue * q;
unsigned long flags = 0;
if (!(le32_to_cpu(fib->header.XferState) & HostOwned))
if (!(le32_to_cpu(hw_fib->header.XferState) & HostOwned))
return -EBUSY;
/*
* There are 5 cases with the wait and reponse requested flags.
......@@ -435,19 +442,22 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority
if (wait && !reply) {
return -EINVAL;
} else if (!wait && reply) {
fib->header.XferState |= cpu_to_le32(Async | ResponseExpected);
hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected);
FIB_COUNTER_INCREMENT(aac_config.AsyncSent);
} else if (!wait && !reply) {
fib->header.XferState |= cpu_to_le32(NoResponseExpected);
hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected);
FIB_COUNTER_INCREMENT(aac_config.NoResponseSent);
} else if (wait && reply) {
fib->header.XferState |= cpu_to_le32(ResponseExpected);
hw_fib->header.XferState |= cpu_to_le32(ResponseExpected);
FIB_COUNTER_INCREMENT(aac_config.NormalSent);
}
/*
* Map the fib into 32bits by using the fib number
*/
fib->header.SenderData = fibptr-&dev->fibs[0]; /* for callback */
// hw_fib->header.SenderFibAddress = ((u32)(fibptr-dev->fibs)) << 1;
hw_fib->header.SenderFibAddress = cpu_to_le32((u32)(ulong)fibptr->hw_fib_pa);
hw_fib->header.SenderData = (u32)(fibptr - dev->fibs);
/*
* Set FIB state to indicate where it came from and if we want a
* response from the adapter. Also load the command from the
......@@ -455,15 +465,14 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority
*
* Map the hw fib pointer as a 32bit value
*/
fib->header.SenderFibAddress = fib2addr(fib);
fib->header.Command = cpu_to_le16(command);
fib->header.XferState |= cpu_to_le32(SentFromHost);
fibptr->fib->header.Flags = 0; /* Zero the flags field - its internal only... */
hw_fib->header.Command = cpu_to_le16(command);
hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
fibptr->hw_fib->header.Flags = 0; /* 0 the flags field - internal only*/
/*
* Set the size of the Fib we want to send to the adapter
*/
fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
if (le16_to_cpu(fib->header.Size) > le16_to_cpu(fib->header.SenderSize)) {
hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) {
return -EMSGSIZE;
}
/*
......@@ -471,22 +480,25 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority
* the adapter a command is ready.
*/
if (priority == FsaHigh) {
fib->header.XferState |= cpu_to_le32(HighPriority);
hw_fib->header.XferState |= cpu_to_le32(HighPriority);
qid = AdapHighCmdQueue;
} else {
fib->header.XferState |= cpu_to_le32(NormalPriority);
hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
qid = AdapNormCmdQueue;
}
q = &dev->queues->queue[qid];
if(wait)
spin_lock_irqsave(&fibptr->event_lock, flags);
if(aac_queue_get( dev, &index, qid, fib, 1, fibptr, &nointr)<0)
if(aac_queue_get( dev, &index, qid, hw_fib, 1, fibptr, &nointr)<0)
return -EWOULDBLOCK;
dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index));
dprintk((KERN_DEBUG "Fib contents:.\n"));
dprintk((KERN_DEBUG " Command = %d.\n", fib->header.Command));
dprintk((KERN_DEBUG " XferState = %x.\n", fib->header.XferState));
dprintk((KERN_DEBUG " Command = %d.\n", hw_fib->header.Command));
dprintk((KERN_DEBUG " XferState = %x.\n", hw_fib->header.XferState));
dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib));
dprintk((KERN_DEBUG " hw_fib pa being sent=%xl\n",(ulong)fibptr->hw_fib_pa));
dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr));
/*
* Fill in the Callback and CallbackContext if we are not
* going to wait.
......@@ -500,6 +512,7 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority
q->numpending++;
fibptr->done = 0;
fibptr->flags = 0;
if(aac_insert_entry(dev, index, qid, (nointr & aac_config.irq_mod)) < 0)
return -EWOULDBLOCK;
......@@ -513,10 +526,11 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority
if(fibptr->done == 0)
BUG();
if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)){
return -ETIMEDOUT;
else
} else {
return 0;
}
}
/*
* If the user does not want a response than return success otherwise
......@@ -543,8 +557,7 @@ int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entr
{
u32 index;
int status;
if (*q->headers.producer == *q->headers.consumer) {
if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) {
status = 0;
} else {
/*
......@@ -564,7 +577,7 @@ int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entr
int aac_consumer_avail(struct aac_dev *dev, struct aac_queue * q)
{
return (*q->headers.producer != *q->headers.consumer);
return (le32_to_cpu(*q->headers.producer) != le32_to_cpu(*q->headers.consumer));
}
......@@ -583,7 +596,7 @@ void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
int wasfull = 0;
u32 notify;
if (*q->headers.producer+1 == *q->headers.consumer)
if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer))
wasfull = 1;
if (le32_to_cpu(*q->headers.consumer) >= q->entries)
......@@ -625,16 +638,15 @@ void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
int fib_adapter_complete(struct fib * fibptr, unsigned short size)
{
struct hw_fib * fib = fibptr->fib;
struct hw_fib * hw_fib = fibptr->hw_fib;
struct aac_dev * dev = fibptr->dev;
unsigned long nointr = 0;
if (le32_to_cpu(fib->header.XferState) == 0)
if (le32_to_cpu(hw_fib->header.XferState) == 0)
return 0;
/*
* If we plan to do anything check the structure type first.
*/
if ( fib->header.StructType != FIB_MAGIC ) {
if ( hw_fib->header.StructType != FIB_MAGIC ) {
return -EINVAL;
}
/*
......@@ -644,37 +656,36 @@ int fib_adapter_complete(struct fib * fibptr, unsigned short size)
* and want to send a response back to the adapter. This will
* send the completed cdb to the adapter.
*/
if (fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
fib->header.XferState |= cpu_to_le32(HostProcessed);
if (fib->header.XferState & cpu_to_le32(HighPriority)) {
if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
if (hw_fib->header.XferState & cpu_to_le32(HighPriority)) {
u32 index;
if (size)
{
size += sizeof(struct aac_fibhdr);
if (size > le16_to_cpu(fib->header.SenderSize))
if (size > le16_to_cpu(hw_fib->header.SenderSize))
return -EMSGSIZE;
fib->header.Size = cpu_to_le16(size);
hw_fib->header.Size = cpu_to_le16(size);
}
if(aac_queue_get(dev, &index, AdapHighRespQueue, fib, 1, NULL, &nointr) < 0) {
if(aac_queue_get(dev, &index, AdapHighRespQueue, hw_fib, 1, NULL, &nointr) < 0) {
return -EWOULDBLOCK;
}
if (aac_insert_entry(dev, index, AdapHighRespQueue, (nointr & (int)aac_config.irq_mod)) != 0) {
}
}
else if (fib->header.XferState & NormalPriority)
else if (hw_fib->header.XferState & NormalPriority)
{
u32 index;
if (size) {
size += sizeof(struct aac_fibhdr);
if (size > le16_to_cpu(fib->header.SenderSize))
if (size > le16_to_cpu(hw_fib->header.SenderSize))
return -EMSGSIZE;
fib->header.Size = cpu_to_le16(size);
hw_fib->header.Size = cpu_to_le16(size);
}
if (aac_queue_get(dev, &index, AdapNormRespQueue, fib, 1, NULL, &nointr) < 0)
if (aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr) < 0)
return -EWOULDBLOCK;
if (aac_insert_entry(dev, index, AdapNormRespQueue,
(nointr & (int)aac_config.irq_mod)) != 0)
if (aac_insert_entry(dev, index, AdapNormRespQueue, (nointr & (int)aac_config.irq_mod)) != 0)
{
}
}
......@@ -696,19 +707,19 @@ int fib_adapter_complete(struct fib * fibptr, unsigned short size)
int fib_complete(struct fib * fibptr)
{
struct hw_fib * fib = fibptr->fib;
struct hw_fib * hw_fib = fibptr->hw_fib;
/*
* Check for a fib which has already been completed
*/
if (fib->header.XferState == cpu_to_le32(0))
if (hw_fib->header.XferState == cpu_to_le32(0))
return 0;
/*
* If we plan to do anything check the structure type first.
*/
if (fib->header.StructType != FIB_MAGIC)
if (hw_fib->header.StructType != FIB_MAGIC)
return -EINVAL;
/*
* This block completes a cdb which orginated on the host and we
......@@ -716,19 +727,19 @@ int fib_complete(struct fib * fibptr)
* command is complete that we had sent to the adapter and this
* cdb could be reused.
*/
if((fib->header.XferState & cpu_to_le32(SentFromHost)) &&
(fib->header.XferState & cpu_to_le32(AdapterProcessed)))
if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
(hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
{
fib_dealloc(fibptr);
}
else if(fib->header.XferState & cpu_to_le32(SentFromHost))
else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost))
{
/*
* This handles the case when the host has aborted the I/O
* to the adapter because the adapter is not responding
*/
fib_dealloc(fibptr);
} else if(fib->header.XferState & cpu_to_le32(HostOwned)) {
} else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) {
fib_dealloc(fibptr);
} else {
BUG();
......@@ -778,13 +789,13 @@ void aac_printf(struct aac_dev *dev, u32 val)
static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
{
struct hw_fib * fib = fibptr->fib;
struct hw_fib * hw_fib = fibptr->hw_fib;
/*
* Set the status of this FIB to be Invalid parameter.
*
* *(u32 *)fib->data = ST_INVAL;
*/
*(u32 *)fib->data = cpu_to_le32(ST_OK);
*(u32 *)hw_fib->data = cpu_to_le32(ST_OK);
fib_adapter_complete(fibptr, sizeof(u32));
}
......@@ -800,7 +811,7 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
int aac_command_thread(struct aac_dev * dev)
{
struct hw_fib *fib, *newfib;
struct hw_fib *hw_fib, *newfib;
struct fib fibptr; /* for error logging */
struct aac_queue_block *queues = dev->queues;
struct aac_fib_context *fibctx;
......@@ -816,8 +827,8 @@ int aac_command_thread(struct aac_dev * dev)
* Set up the name that will appear in 'ps'
* stored in task_struct.comm[16].
*/
daemonize("aacraid");
allow_signal(SIGKILL);
sprintf(current->comm, "aacraid");
daemonize();
/*
* Let the DPC know it has a place to send the AIF's to.
*/
......@@ -828,17 +839,18 @@ int aac_command_thread(struct aac_dev * dev)
while(1)
{
spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
while(!list_empty(&(queues->queue[HostNormCmdQueue].cmdq))) {
struct list_head *entry;
while(!aac_list_empty(&(queues->queue[HostNormCmdQueue].cmdq))) {
struct aac_list_head *entry;
struct aac_aifcmd * aifcmd;
set_current_state(TASK_RUNNING);
entry = queues->queue[HostNormCmdQueue].cmdq.next;
list_del(entry);
entry = (struct aac_list_head*)(ulong)(queues->queue[HostNormCmdQueue].cmdq.next);
dprintk(("aacraid: Command thread: removing fib from cmdq (%p)\n",entry));
aac_list_del(entry);
spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags);
fib = list_entry(entry, struct hw_fib, header.FibLinks);
hw_fib = aac_list_entry(entry, struct hw_fib, header.FibLinks);
/*
* We will process the FIB here or pass it to a
* worker thread that is TBD. We Really can't
......@@ -848,16 +860,17 @@ int aac_command_thread(struct aac_dev * dev)
memset(&fibptr, 0, sizeof(struct fib));
fibptr.type = FSAFS_NTC_FIB_CONTEXT;
fibptr.size = sizeof( struct fib );
fibptr.fib = fib;
fibptr.data = fib->data;
fibptr.hw_fib = hw_fib;
fibptr.data = hw_fib->data;
fibptr.dev = dev;
/*
* We only handle AifRequest fibs from the adapter.
*/
aifcmd = (struct aac_aifcmd *) fib->data;
aifcmd = (struct aac_aifcmd *) hw_fib->data;
if (aifcmd->command == le16_to_cpu(AifCmdDriverNotify)) {
aac_handle_aif(dev, &fibptr);
} else {
struct list_head *entry;
/* The u32 here is important and intended. We are using
32bit wrapping time to fit the adapter field */
......@@ -906,12 +919,12 @@ int aac_command_thread(struct aac_dev * dev)
/*
* Make the copy of the FIB
*/
memcpy(newfib, fib, sizeof(struct hw_fib));
memcpy(newfib, hw_fib, sizeof(struct hw_fib));
/*
* Put the FIB onto the
* fibctx's fibs
*/
list_add_tail(&newfib->header.FibLinks, &fibctx->fibs);
aac_list_add_tail(&newfib->header.FibLinks, &fibctx->hw_fib_list);
fibctx->count++;
/*
* Set the event to wake up the
......@@ -926,7 +939,7 @@ int aac_command_thread(struct aac_dev * dev)
/*
* Set the status of this FIB
*/
*(u32 *)fib->data = cpu_to_le32(ST_OK);
*(u32 *)hw_fib->data = cpu_to_le32(ST_OK);
fib_adapter_complete(&fibptr, sizeof(u32));
spin_unlock_irqrestore(&dev->fib_lock, flagv);
}
......
/*
* Adaptec AAC series RAID controller driver
* (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
/*
* This file is for backwards compatibility with older kernel versions
*/
#include <linux/version.h>
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,11)
#include <linux/blk.h>
static inline unsigned int block_size(kdev_t dev)
{
int retval = BLOCK_SIZE;
int major = MAJOR(dev);
if (blksize_size[major]) {
int minor = MINOR(dev);
if (blksize_size[major][minor])
retval = blksize_size[major][minor];
}
return retval;
}
#endif
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,7)
#ifndef COMPLETION_INITIALIZER
#include <linux/wait.h>
struct completion {
unsigned int done;
wait_queue_head_t wait;
};
#define COMPLETION_INITIALIZER(work) \
{ 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
#define DECLARE_COMPLETION(work) \
struct completion work = COMPLETION_INITIALIZER(work)
#define INIT_COMPLETION(x) ((x).done = 0)
static inline void init_completion(struct completion *x)
{
x->done = 0;
init_waitqueue_head(&x->wait);
}
#endif
#ifndef complete_and_exit
static inline void complete_and_exit(struct completion *comp, long code)
{
/*
if (comp)
complete(comp);
do_exit(code);
*/
}
#endif
#endif
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,2)
static inline void scsi_set_pci_device(struct Scsi_Host *SHpnt,
struct pci_dev *pdev)
{
// SHpnt->pci_dev = pdev;
}
static inline void wait_for_completion(struct completion *x)
{
spin_lock_irq(&x->wait.lock);
if (!x->done) {
DECLARE_WAITQUEUE(wait, current);
wait.flags |= WQ_FLAG_EXCLUSIVE;
__add_wait_queue_tail(&x->wait, &wait);
do {
__set_current_state(TASK_UNINTERRUPTIBLE);
spin_unlock_irq(&x->wait.lock);
schedule();
spin_lock_irq(&x->wait.lock);
} while (!x->done);
__remove_wait_queue(&x->wait, &wait);
}
x->done--;
spin_unlock_irq(&x->wait.lock);
}
static inline int pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask)
{
dev->dma_mask = mask;
return 0;
}
#endif
......@@ -65,7 +65,6 @@ unsigned int aac_response_normal(struct aac_queue * q)
unsigned long flags;
spin_lock_irqsave(q->lock, flags);
/*
* Keep pulling response QEs off the response queue and waking
* up the waiters until there are no more QEs. We then return
......@@ -74,12 +73,14 @@ unsigned int aac_response_normal(struct aac_queue * q)
*/
while(aac_consumer_get(dev, q, &entry))
{
int fast;
u32 fast ;
fast = (entry->addr & cpu_to_le32(0x01));
// fib = &dev->fibs[(entry->addr >> 1)];
// hwfib = fib->hw_fib;
hwfib = bus_to_virt(le32_to_cpu(entry->addr & cpu_to_le32(~0x01)));
fib = &dev->fibs[hwfib->header.SenderData];
fast = (int) (entry->addr & 0x01);
hwfib = addr2fib(entry->addr & ~0x01);
aac_consumer_free(dev, q, HostNormRespQueue);
fib = &dev->fibs[hwfib->header.SenderData];
/*
* Remove this fib from the Outstanding I/O queue.
* But only if it has not already been timed out.
......@@ -93,6 +94,7 @@ unsigned int aac_response_normal(struct aac_queue * q)
dev->queues->queue[AdapNormCmdQueue].numpending--;
} else {
printk(KERN_WARNING "aacraid: FIB timeout (%x).\n", fib->flags);
printk(KERN_DEBUG"aacraid: hwfib=%p fib index=%i fib=%p\n",hwfib, hwfib->header.SenderData,fib);
continue;
}
spin_unlock_irqrestore(q->lock, flags);
......@@ -171,11 +173,11 @@ unsigned int aac_command_normal(struct aac_queue *q)
*/
while(aac_consumer_get(dev, q, &entry))
{
struct hw_fib * fib;
fib = addr2fib(entry->addr);
struct hw_fib * hw_fib;
hw_fib = bus_to_virt(le32_to_cpu(entry->addr & cpu_to_le32(~0x01)));
if (dev->aif_thread) {
list_add_tail(&fib->header.FibLinks, &q->cmdq);
aac_list_add_tail(&hw_fib->header.FibLinks, &q->cmdq);
aac_consumer_free(dev, q, HostNormCmdQueue);
wake_up_interruptible(&q->cmdready);
} else {
......@@ -185,13 +187,13 @@ unsigned int aac_command_normal(struct aac_queue *q)
memset(&fibctx, 0, sizeof(struct fib));
fibctx.type = FSAFS_NTC_FIB_CONTEXT;
fibctx.size = sizeof(struct fib);
fibctx.fib = fib;
fibctx.data = fib->data;
fibctx.hw_fib = hw_fib;
fibctx.data = hw_fib->data;
fibctx.dev = dev;
/*
* Set the status of this FIB
*/
*(u32 *)fib->data = cpu_to_le32(ST_OK);
*(u32 *)hw_fib->data = cpu_to_le32(ST_OK);
fib_adapter_complete(&fibctx, sizeof(u32));
spin_lock_irqsave(q->lock, flags);
}
......
......@@ -35,7 +35,7 @@
*
*/
#define AAC_DRIVER_VERSION "0.9.9ac6-TEST"
#define AAC_DRIVER_VERSION "1.1.2"
#define AAC_DRIVER_BUILD_DATE __DATE__
#include <linux/module.h>
......@@ -54,19 +54,23 @@
#include "scsi.h"
#include "hosts.h"
#include <scsi/scsicam.h>
#include "aacraid.h"
#include "sd.h"
#define AAC_DRIVERNAME "aacraid"
MODULE_AUTHOR("Red Hat Inc and Adaptec");
MODULE_DESCRIPTION("Supports Dell PERC2, 2/Si, 3/Si, 3/Di, PERC 320/DC, Adaptec 2120S, 2200S, 5400S, and HP NetRAID-4M devices. http://domsch.com/linux/ or http://linux.adaptec.com");
MODULE_DESCRIPTION("Supports Dell PERC2, 2/Si, 3/Si, 3/Di, Adaptec Advanced Raid Products, and HP NetRAID-4M devices. http://domsch.com/linux/ or http://linux.adaptec.com");
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,4,7)
MODULE_LICENSE("GPL");
#endif
MODULE_PARM(nondasd, "i");
MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices. 0=off, 1=on");
MODULE_PARM(paemode, "i");
MODULE_PARM_DESC(paemode, "Control whether dma addressing is using PAE. 0=off, 1=on");
static int nondasd=-1;
int nondasd=-1;
int paemode=-1;
struct aac_dev *aac_devices[MAXIMUM_NUM_ADAPTERS];
......@@ -76,12 +80,12 @@ static int aac_cfg_major = -1;
/*
* Because of the way Linux names scsi devices, the order in this table has
* become important. Check for on-board Raid first, add-in cards second.
*
*/
/*
* dmb - For now we add the number of channels to this structure.
* In the future we should add a fib that reports the number of channels
* for the card. At that time we can remove the channels from here
*/
static struct aac_driver_ident aac_drivers[] = {
{ 0x1028, 0x0001, 0x1028, 0x0001, aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2 }, /* PERC 2/Si */
{ 0x1028, 0x0002, 0x1028, 0x0002, aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2 }, /* PERC 3/Di */
......@@ -97,11 +101,22 @@ static struct aac_driver_ident aac_drivers[] = {
{ 0x9005, 0x0285, 0x9005, 0x0286, aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2120S ", 1 }, /* Adaptec 2120S (Crusader)*/
{ 0x9005, 0x0285, 0x9005, 0x0285, aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2 }, /* Adaptec 2200S (Vulcan)*/
{ 0x9005, 0x0285, 0x9005, 0x0287, aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2 }, /* Adaptec 2200S (Vulcan-2m)*/
{ 0x9005, 0x0285, 0x1028, 0x0287, aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2 }, /* Dell PERC 320/DC */
{ 0x9005, 0x0285, 0x17aa, 0x0286, aac_rx_init, "aacraid", "Legend ", "Legend S220 ", 1 }, /* Legend S220*/
{ 0x9005, 0x0285, 0x17aa, 0x0287, aac_rx_init, "aacraid", "Legend ", "Legend S230 ", 2 }, /* Legend S230*/
{ 0x9005, 0x0285, 0x9005, 0x0288, aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 3230S ", 2 }, /* Adaptec 3230S (Harrier)*/
{ 0x9005, 0x0285, 0x9005, 0x0289, aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 3240S ", 2 }, /* Adaptec 3240S (Tornado)*/
{ 0x9005, 0x0285, 0x9005, 0x028a, aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2020S PCI-X ", 2 }, /* ASR-2020S PCI-X ZCR (Skyhawk)*/
{ 0x9005, 0x0285, 0x9005, 0x028b, aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2020S PCI-X ", 2 }, /* ASR-2020S SO-DIMM PCI-X ZCR(Terminator)*/
{ 0x9005, 0x0285, 0x9005, 0x0290, aac_rx_init, "aacraid", "ADAPTEC ", "AAR-2410SA SATA ", 2 }, /* AAR-2410SA PCI SATA 4ch (Jaguar II)*/
{ 0x9005, 0x0250, 0x1014, 0x0279, aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec ", 2 }, /* (Marco)*/
{ 0x9005, 0x0250, 0x1014, 0x028c, aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec ", 2 }, /* (Sebring)*/
{ 0x9005, 0x0285, 0x1028, 0x0287, aac_rx_init, "percraid", "DELL ", "PERC 320/DC ", 2 }, /* Perc 320/DC*/
{ 0x1011, 0x0046, 0x9005, 0x0365, aac_sa_init, "aacraid", "ADAPTEC ", "Adaptec 5400S ", 4 }, /* Adaptec 5400S (Mustang)*/
{ 0x1011, 0x0046, 0x9005, 0x0364, aac_sa_init, "aacraid", "ADAPTEC ", "AAC-364 ", 4 }, /* Adaptec 5400S (Mustang)*/
{ 0x1011, 0x0046, 0x9005, 0x1364, aac_sa_init, "percraid", "DELL ", "PERCRAID ", 4 }, /* Dell PERC2 "Quad Channel" */
{ 0x1011, 0x0046, 0x103c, 0x10c2, aac_sa_init, "hpnraid", "HP ", "NetRAID-4M ", 4 } /* HP NetRAID-4M */
{ 0x1011, 0x0046, 0x103c, 0x10c2, aac_sa_init, "hpnraid", "HP ", "NetRAID ", 4 } /* HP NetRAID-4M */
};
#define NUM_AACTYPES (sizeof(aac_drivers) / sizeof(struct aac_driver_ident))
......@@ -112,17 +127,16 @@ static int aac_cfg_open(struct inode * inode, struct file * file);
static int aac_cfg_release(struct inode * inode,struct file * file);
static struct file_operations aac_cfg_fops = {
.owner = THIS_MODULE,
.ioctl = aac_cfg_ioctl,
.open = aac_cfg_open,
.release = aac_cfg_release
owner: THIS_MODULE,
ioctl: aac_cfg_ioctl,
open: aac_cfg_open,
release: aac_cfg_release
};
static int aac_detect(Scsi_Host_Template *);
static int aac_release(struct Scsi_Host *);
static int aac_queuecommand(Scsi_Cmnd *, void (*CompletionRoutine)(Scsi_Cmnd *));
static int aac_biosparm(struct scsi_device *, struct block_device *,
sector_t, int *);
static int aac_biosparm(Scsi_Disk *, kdev_t, int *);
static int aac_procinfo(char *, char **, off_t, int, int, int);
static int aac_ioctl(Scsi_Device *, int, void *);
static int aac_eh_abort(Scsi_Cmnd * cmd);
......@@ -130,7 +144,7 @@ static int aac_eh_device_reset(Scsi_Cmnd* cmd);
static int aac_eh_bus_reset(Scsi_Cmnd* cmd);
static int aac_eh_reset(Scsi_Cmnd* cmd);
static int aac_slave_configure(Scsi_Device *);
static void aac_queuedepth(struct Scsi_Host *, Scsi_Device *);
/**
* aac_detect - Probe for aacraid cards
......@@ -162,13 +176,13 @@ static int aac_detect(Scsi_Host_Template *template)
struct fsa_scsi_hba *fsa_dev_ptr;
char *name = NULL;
printk(KERN_INFO "Red Hat/Adaptec aacraid driver, %s\n", AAC_DRIVER_BUILD_DATE);
printk(KERN_INFO "Red Hat/Adaptec aacraid driver (%s %s)\n", AAC_DRIVER_VERSION, AAC_DRIVER_BUILD_DATE);
/* setting up the proc directory structure */
template->proc_name = "aacraid";
spin_unlock_irq(&io_request_lock);
for( index = 0; index != num_aacdrivers; index++ )
{
for( index = 0; index != num_aacdrivers; index++ ) {
device_id = aac_drivers[index].device;
vendor_id = aac_drivers[index].vendor;
name = aac_drivers[index].name;
......@@ -206,20 +220,14 @@ static int aac_detect(Scsi_Host_Template *template)
* specific information.
*/
host_ptr = scsi_register( template, sizeof(struct aac_dev) );
if(host_ptr == NULL)
continue;
/*
* These three parameters can be used to allow for wide SCSI
* and for host adapters that support multiple buses.
*/
host_ptr->max_id = 17;
host_ptr->max_lun = 8;
host_ptr->max_channel = 1;
host_ptr->irq = dev->irq; /* Adapter IRQ number */
/* host_ptr->base = ( char * )(dev->resource[0].start & ~0xff); */
host_ptr->base = dev->resource[0].start;
scsi_set_device(host_ptr, &dev->dev);
scsi_set_pci_device(host_ptr, dev);
dprintk((KERN_DEBUG "Device base address = 0x%lx [0x%lx].\n", host_ptr->base, dev->resource[0].start));
dprintk((KERN_DEBUG "Device irq = 0x%x.\n", dev->irq));
/*
......@@ -232,17 +240,27 @@ static int aac_detect(Scsi_Host_Template *template)
* value returned as aac->id.
*/
host_ptr->unique_id = aac_count - 1;
/*
* This function is called after the device list has
* been built to find the tagged queueing depth
* supported for each device.
*/
host_ptr->select_queue_depths = aac_queuedepth;
aac = (struct aac_dev *)host_ptr->hostdata;
/* attach a pointer back to Scsi_Host */
aac->scsi_host_ptr = host_ptr;
aac->pdev = dev;
aac->cardtype = index;
aac->name = aac->scsi_host_ptr->hostt->name;
aac->id = aac->scsi_host_ptr->unique_id;
aac->cardtype = index;
aac->fibs = (struct fib*) kmalloc(sizeof(struct fib)*AAC_NUM_FIB, GFP_KERNEL);
spin_lock_init(&aac->fib_lock);
/* Initialize the ordinal number of the device to -1 */
fsa_dev_ptr = &(aac->fsa_dev);
for( container = 0; container < MAXIMUM_NUM_CONTAINERS; container++ )
fsa_dev_ptr->devname[container][0] = '\0';
fsa_dev_ptr->devno[container] = -1;
dprintk((KERN_DEBUG "Initializing Hardware...\n"));
if((*aac_drivers[index].init)(aac , host_ptr->unique_id) != 0)
......@@ -255,46 +273,28 @@ static int aac_detect(Scsi_Host_Template *template)
}
dprintk((KERN_DEBUG "%s:%d device initialization successful.\n", name, host_ptr->unique_id));
aac_get_adapter_info(aac);
if(nondasd != -1)
{
/* someone told us how to set this on the cmdline */
aac->nondasd_support = (nondasd!=0);
}
if(aac->nondasd_support != 0){
printk(KERN_INFO "%s%d: Non-DASD support enabled\n", aac->name, aac->id);
}
dprintk((KERN_DEBUG "%s:%d options flag %04x.\n",name, host_ptr->unique_id,aac->adapter_info.options));
if(aac->nondasd_support == 1)
{
/*
* max channel will be the physical channels plus 1 virtual channel
* all containers are on the virtual channel 0
* physical channels are address by their actual physical number+1
*/
if(aac->nondasd_support == 1){
/*
* max channel will be the physical channels plus 1 virtual channel
* all containers are on the virtual channel 0
* physical channels are address by their actual physical number+1
*/
host_ptr->max_channel = aac_drivers[index].channels+1;
} else {
host_ptr->max_channel = 1;
}
dprintk((KERN_DEBUG "Device has %d logical channels\n", host_ptr->max_channel));
}
dprintk((KERN_DEBUG "Device has %d logical channels\n",host_ptr->max_channel));
aac_get_containers(aac);
aac_devices[aac_count-1] = aac;
// spin_unlock_irqrestore(&aac->fib_lock, flags);
/*
* dmb - we may need to move these 3 parms somewhere else once
* dmb - we may need to move the setting of these parms somewhere else once
* we get a fib that can report the actual numbers
*/
host_ptr->max_id = AAC_MAX_TARGET;
host_ptr->max_lun = AAC_MAX_LUN;
/*
* If we are PAE capable then our future DMA mappings
* (for read/write commands) are 64bit clean and don't
* need bouncing. This assumes we do no other 32bit only
* allocations (eg fib table expands) after this point.
*/
if(aac->pae_support)
pci_set_dma_mask(dev, 0xFFFFFFFFFFFFFFFFUL);
}
}
......@@ -302,6 +302,7 @@ static int aac_detect(Scsi_Host_Template *template)
if((aac_cfg_major = register_chrdev( 0, "aac", &aac_cfg_fops))<0)
printk(KERN_WARNING "aacraid: unable to register \"aac\" device.\n");
}
spin_lock_irq(&io_request_lock);
template->present = aac_count; /* # of cards of this type found */
return aac_count;
......@@ -356,17 +357,19 @@ static int aac_release(struct Scsi_Host *host_ptr)
* Queues a command for execution by the associated Host Adapter.
*/
static int aac_queuecommand(Scsi_Cmnd *scsi_cmnd_ptr, void (*complete)(Scsi_Cmnd *))
static int aac_queuecommand(Scsi_Cmnd *scsi_cmnd_ptr, void (*CompletionRoutine)(Scsi_Cmnd *))
{
int ret;
scsi_cmnd_ptr->scsi_done = complete;
scsi_cmnd_ptr->scsi_done = CompletionRoutine;
/*
* aac_scsi_cmd() handles command processing, setting the
* result code and calling completion routine.
*/
if((ret = aac_scsi_cmd(scsi_cmnd_ptr)) != 0)
if((ret = aac_scsi_cmd(scsi_cmnd_ptr)) != 0){
dprintk((KERN_DEBUG "aac_scsi_cmd failed.\n"));
return FAILED;
}
return ret;
}
......@@ -397,7 +400,7 @@ struct aac_driver_ident* aac_get_driver_ident(int devtype)
/**
* aac_biosparm - return BIOS parameters for disk
* @disk: SCSI disk object to process
* @device: Disk in question
* @device: kdev_t of the disk in question
* @geom: geometry block to fill in
*
* Return the Heads/Sectors/Cylinders BIOS Disk Parameters for Disk.
......@@ -415,20 +418,19 @@ struct aac_driver_ident* aac_get_driver_ident(int devtype)
* be displayed.
*/
static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
sector_t capacity, int *geom)
static int aac_biosparm(Scsi_Disk *disk, kdev_t dev, int *geom)
{
struct diskparm *param = (struct diskparm *)geom;
u8 *buf;
struct buffer_head * buf;
dprintk((KERN_DEBUG "aac_biosparm.\n"));
/*
* Assuming extended translation is enabled - #REVISIT#
*/
if( capacity >= 2 * 1024 * 1024 ) /* 1 GB in 512 byte sectors */
if( disk->capacity >= 2 * 1024 * 1024 ) /* 1 GB in 512 byte sectors */
{
if( capacity >= 4 * 1024 * 1024 ) /* 2 GB in 512 byte sectors */
if( disk->capacity >= 4 * 1024 * 1024 ) /* 2 GB in 512 byte sectors */
{
param->heads = 255;
param->sectors = 63;
......@@ -445,24 +447,24 @@ static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
param->sectors = 32;
}
param->cylinders = cap_to_cyls(capacity,
(param->heads * param->sectors));
param->cylinders = disk->capacity/(param->heads * param->sectors);
/*
* Read the partition table block
* Read the first 1024 bytes from the disk device
*/
buf = scsi_bios_ptable(bdev);
buf = bread(MKDEV(MAJOR(dev), MINOR(dev)&~0xf), 0, block_size(dev));
if(buf == NULL)
return 0;
/*
* If the boot sector partition table is valid, search for a partition
* table entry whose end_head matches one of the standard geometry
* translations ( 64/32, 128/32, 255/63 ).
*/
if(*(unsigned short *)(buf + 0x40) == cpu_to_le16(0xaa55))
if(*(unsigned short *)(buf->b_data + 0x1fe) == cpu_to_le16(0xaa55))
{
struct partition *first = (struct partition *)buf;
struct partition *first = (struct partition * )(buf->b_data + 0x1be);
struct partition *entry = first;
int saved_cylinders = param->cylinders;
int num;
......@@ -500,8 +502,7 @@ static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
end_sec = first->end_sector & 0x3f;
}
param->cylinders = cap_to_cyls(capacity,
(param->heads * param->sectors));
param->cylinders = disk->capacity / (param->heads * param->sectors);
if(num < 4 && end_sec == param->sectors)
{
......@@ -517,101 +518,45 @@ static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
param->heads, param->sectors));
}
}
kfree(buf);
brelse(buf);
return 0;
}
/**
* aac_slave_configure - do device specific setup
* @dev: SCSI device we are attaching
*
* Currently, all we do is set the queue depth on the device.
*/
static int aac_slave_configure(Scsi_Device * dev )
{
if(dev->tagged_supported)
scsi_adjust_queue_depth(dev, MSG_ORDERED_TAG, 128);
else
scsi_adjust_queue_depth(dev, 0, 1);
dprintk((KERN_DEBUG "(scsi%d:%d:%d:%d) Tagged Queue depth %2d, "
"%s\n", dev->host->host_no, dev->channel,
dev->id, dev->lun, dev->queue_depth,
dev->online ? "OnLine" : "OffLine"));
return 0;
}
/**
* aac_eh_abort - Abort command if possible.
* @cmd: SCSI command block to abort
*
* Called when the midlayer wishes to abort a command. We don't support
* this facility, and our firmware looks after life for us. We just
* report this as failing
*/
static int aac_eh_abort(Scsi_Cmnd *cmd)
{
return FAILED;
}
/**
* aac_eh_device_reset - Reset command handling
* @cmd: SCSI command block causing the reset
* aac_queuedepth - compute queue depths
* @host: SCSI host in question
* @dev: SCSI device we are considering
*
* Issue a reset of a SCSI device. We are ourselves not truely a SCSI
* controller and our firmware will do the work for us anyway. Thus this
* is a no-op. We just return FAILED.
* Selects queue depths for each target device based on the host adapter's
* total capacity and the queue depth supported by the target device.
* A queue depth of one automatically disables tagged queueing.
*/
static int aac_eh_device_reset(Scsi_Cmnd *cmd)
static void aac_queuedepth(struct Scsi_Host * host, Scsi_Device * dev )
{
return FAILED;
}
/**
* aac_eh_bus_reset - Reset command handling
* @scsi_cmd: SCSI command block causing the reset
*
* Issue a reset of a SCSI bus. We are ourselves not truely a SCSI
* controller and our firmware will do the work for us anyway. Thus this
* is a no-op. We just return FAILED.
*/
Scsi_Device * dptr;
static int aac_eh_bus_reset(Scsi_Cmnd* cmd)
{
return FAILED;
dprintk((KERN_DEBUG "aac_queuedepth.\n"));
dprintk((KERN_DEBUG "Device # Q Depth Online\n"));
dprintk((KERN_DEBUG "---------------------------\n"));
for(dptr = dev; dptr != NULL; dptr = dptr->next)
{
if(dptr->host == host)
{
dptr->queue_depth = 10;
dprintk((KERN_DEBUG " %2d %d %d\n",
dptr->id, dptr->queue_depth, dptr->online));
}
}
}
/**
* aac_eh_hba_reset - Reset command handling
* @scsi_cmd: SCSI command block causing the reset
*
* Issue a reset of a SCSI host. If things get this bad then arguably we should
* go take a look at what the host adapter is doing and see if something really
* broke (as can occur at least on my Dell QC card if a drive keeps failing spinup)
*/
static int aac_eh_reset(Scsi_Cmnd* cmd)
{
printk(KERN_ERR "aacraid: Host adapter reset request. SCSI hang ?\n");
return FAILED;
}
/*------------------------------------------------------------------------------
aac_ioctl()
/**
* aac_ioctl - Handle SCSI ioctls
* @scsi_dev_ptr: scsi device to operate upon
* @cmd: ioctl command to use issue
* @arg: ioctl data pointer
*
* Issue an ioctl on an aacraid device. Returns a standard unix error code or
* zero for success
*/
Handle SCSI ioctls
*----------------------------------------------------------------------------*/
static int aac_ioctl(Scsi_Device * scsi_dev_ptr, int cmd, void * arg)
/*----------------------------------------------------------------------------*/
{
struct aac_dev *dev;
dprintk((KERN_DEBUG "aac_ioctl.\n"));
......@@ -633,7 +578,7 @@ static int aac_ioctl(Scsi_Device * scsi_dev_ptr, int cmd, void * arg)
static int aac_cfg_open(struct inode * inode, struct file * file )
{
unsigned minor_number = minor(inode->i_rdev);
unsigned minor_number = MINOR(inode->i_rdev);
if(minor_number >= aac_count)
return -ENODEV;
return 0;
......@@ -669,7 +614,7 @@ static int aac_cfg_release(struct inode * inode, struct file * file )
static int aac_cfg_ioctl(struct inode * inode, struct file * file, unsigned int cmd, unsigned long arg )
{
struct aac_dev *dev = aac_devices[minor(inode->i_rdev)];
struct aac_dev *dev = aac_devices[MINOR(inode->i_rdev)];
return aac_do_ioctl(dev, cmd, (void *)arg);
}
......@@ -680,28 +625,74 @@ static int aac_cfg_ioctl(struct inode * inode, struct file * file, unsigned int
*/
static Scsi_Host_Template driver_template = {
.module = THIS_MODULE,
.name = "AAC",
.proc_info = aac_procinfo,
.detect = aac_detect,
.release = aac_release,
.info = aac_driverinfo,
.ioctl = aac_ioctl,
.queuecommand = aac_queuecommand,
.bios_param = aac_biosparm,
.slave_configure = aac_slave_configure,
.can_queue = AAC_NUM_IO_FIB,
.this_id = 16,
.sg_tablesize = 16,
.max_sectors = 128,
.cmd_per_lun = 1,
.eh_abort_handler = aac_eh_abort,
.eh_device_reset_handler = aac_eh_device_reset,
.eh_bus_reset_handler = aac_eh_bus_reset,
.eh_host_reset_handler = aac_eh_reset,
.use_clustering = ENABLE_CLUSTERING,
module: THIS_MODULE,
name: "AAC",
proc_info: aac_procinfo,
detect: aac_detect,
release: aac_release,
info: aac_driverinfo,
ioctl: aac_ioctl,
queuecommand: aac_queuecommand,
bios_param: aac_biosparm,
can_queue: AAC_NUM_IO_FIB,
this_id: 16,
sg_tablesize: 16,
max_sectors: 128,
cmd_per_lun: AAC_NUM_IO_FIB,
eh_abort_handler: aac_eh_abort,
eh_device_reset_handler: aac_eh_device_reset,
eh_bus_reset_handler: aac_eh_bus_reset,
eh_host_reset_handler: aac_eh_reset,
use_new_eh_code: 1,
use_clustering: ENABLE_CLUSTERING,
};
/*===========================================================================
* Error Handling routines
*===========================================================================
*/
/*
*
* We don't support abortting commands.
*/
static int aac_eh_abort(Scsi_Cmnd * scsicmd)
{
printk("aacraid: abort failed\n");
return FAILED;
}
/*
* We don't support device resets.
*/
static int aac_eh_device_reset(Scsi_Cmnd* cmd)
{
printk("aacraid: device reset failed\n");
return FAILED;
}
static int aac_eh_bus_reset(Scsi_Cmnd* cmd)
{
printk("aacraid: bus reset failed\n");
return FAILED;
}
static int aac_eh_reset(Scsi_Cmnd* cmd)
{
printk("aacraid: hba reset failed\n");
return FAILED;
}
/*===========================================================================
*
*===========================================================================
*/
#include "scsi_module.c"
/**
......@@ -735,3 +726,5 @@ static int aac_procinfo(char *proc_buffer, char **start_ptr,off_t offset,
*start_ptr = proc_buffer;
return sprintf(proc_buffer, "%s %d\n", "Raid Controller, scsi hba number", host_no);
}
EXPORT_NO_SYMBOLS;
......@@ -352,7 +352,7 @@ int aac_sa_init(struct aac_dev *dev, unsigned long devnum)
* Wait for the adapter to be up and running. Wait up to 3 minutes.
*/
while (!(sa_readl(dev, Mailbox7) & KERNEL_UP_AND_RUNNING)) {
if (time_after(start+180*HZ, jiffies)) {
if (time_after(jiffies, start+180*HZ)) {
status = sa_readl(dev, Mailbox7) >> 16;
printk(KERN_WARNING "%s%d: adapter kernel failed to start, init status = %d.\n", name, instance, le32_to_cpu(status));
return -1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment