Commit 03f29536 authored by Alan Cox's avatar Alan Cox Committed by Linus Torvalds

[PATCH] aacraid driver for 2.5

Forward port from 2.4
parent c1a178bd
...@@ -51,6 +51,9 @@ fi ...@@ -51,6 +51,9 @@ fi
if [ "$CONFIG_EISA" = "y" ]; then if [ "$CONFIG_EISA" = "y" ]; then
dep_tristate 'Adaptec AHA1740 support' CONFIG_SCSI_AHA1740 $CONFIG_SCSI dep_tristate 'Adaptec AHA1740 support' CONFIG_SCSI_AHA1740 $CONFIG_SCSI
fi fi
if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
dep_tristate 'Adaptec AACRAID support (EXPERIMENTAL)' CONFIG_SCSI_AACRAID $CONFIG_SCSI $CONFIG_PCI
fi
source drivers/scsi/aic7xxx/Config.in source drivers/scsi/aic7xxx/Config.in
if [ "$CONFIG_SCSI_AIC7XXX" != "y" ]; then if [ "$CONFIG_SCSI_AIC7XXX" != "y" ]; then
dep_tristate 'Old Adaptec AIC7xxx support' CONFIG_SCSI_AIC7XXX_OLD $CONFIG_SCSI dep_tristate 'Old Adaptec AIC7xxx support' CONFIG_SCSI_AIC7XXX_OLD $CONFIG_SCSI
......
...@@ -58,6 +58,7 @@ obj-$(CONFIG_SCSI_AHA152X) += aha152x.o ...@@ -58,6 +58,7 @@ obj-$(CONFIG_SCSI_AHA152X) += aha152x.o
obj-$(CONFIG_SCSI_AHA1542) += aha1542.o obj-$(CONFIG_SCSI_AHA1542) += aha1542.o
obj-$(CONFIG_SCSI_AHA1740) += aha1740.o obj-$(CONFIG_SCSI_AHA1740) += aha1740.o
obj-$(CONFIG_SCSI_AIC7XXX) += aic7xxx/ obj-$(CONFIG_SCSI_AIC7XXX) += aic7xxx/
obj-$(CONFIG_SCSI_AACRAID) += aacraid/
obj-$(CONFIG_SCSI_AIC7XXX_OLD) += aic7xxx_old.o obj-$(CONFIG_SCSI_AIC7XXX_OLD) += aic7xxx_old.o
obj-$(CONFIG_SCSI_IPS) += ips.o obj-$(CONFIG_SCSI_IPS) += ips.o
obj-$(CONFIG_SCSI_FD_MCS) += fd_mcs.o obj-$(CONFIG_SCSI_FD_MCS) += fd_mcs.o
......
EXTRA_CFLAGS += -I$(TOPDIR)/drivers/scsi
O_TARGET := aacraid.o
obj-m := $(O_TARGET)
obj-y := linit.o aachba.o commctrl.o comminit.o commsup.o \
dpcsup.o rx.o sa.o
include $(TOPDIR)/Rules.make
AACRAID Driver for Linux (take two)
Introduction
-------------------------
The aacraid driver adds support for Adaptec (http://www.adaptec.com)
RAID controllers. This is a major rewrite from the original
Adaptec supplied driver. It has signficantly cleaned up both the code
and the running binary size (the module is less than half the size of
the original).
Supported Cards/Chipsets
-------------------------
Dell Computer Corporation PERC 2 Quad Channel
Dell Computer Corporation PERC 2/Si
Dell Computer Corporation PERC 3/Si
Dell Computer Corporation PERC 3/Di
HP NetRAID-4M
ADAPTEC 2120S
ADAPTEC 2200S
ADAPTEC 5400S
People
-------------------------
Alan Cox <alan@redhat.com>
Christoph Hellwig <hch@infradead.org> (small cleanups/fixes)
Matt Domsch <matt_domsch@dell.com> (revision ioctl, adapter messages)
Deanna Bonds <deanna_bonds@adaptec.com> (non-DASD support, PAE fibs and 64 bit, added new adaptec controllers
added new ioctls, changed scsi interface to use new error handler,
increased the number of fibs and outstanding commands to a container)
Original Driver
-------------------------
Adaptec Unix OEM Product Group
Mailing List
-------------------------
None currently. Also note this is very different to Brian's original driver
so don't expect him to support it.
Adaptec does support this driver. Contact either tech support or deanna bonds.
Original by Brian Boerner February 2001
Rewritten by Alan Cox, November 2001
o Testing
o More testing
o Feature request: display the firmware/bios/etc revisions in the
/proc info
/*
* Adaptec AAC series RAID controller driver
* (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
* Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/completion.h>
#include <asm/semaphore.h>
#include <asm/uaccess.h>
#define MAJOR_NR SCSI_DISK0_MAJOR /* For DEVICE_NR() */
#include <linux/blk.h>
#include "scsi.h"
#include "hosts.h"
#include "sd.h"
#include "aacraid.h"
/* FIXME: We share this with sd.c - wants putting in one spot only */
#define DEVICE_NR(device) (((major(device) & SD_MAJOR_MASK) << (8 - 4)) + (minor(device) >> 4))
/* SCSI Commands */
/* TODO: dmb - use the ones defined in include/scsi/scsi.h */
#define SS_TEST 0x00 /* Test unit ready */
#define SS_REZERO 0x01 /* Rezero unit */
#define SS_REQSEN 0x03 /* Request Sense */
#define SS_REASGN 0x07 /* Reassign blocks */
#define SS_READ 0x08 /* Read 6 */
#define SS_WRITE 0x0A /* Write 6 */
#define SS_INQUIR 0x12 /* inquiry */
#define SS_ST_SP 0x1B /* Start/Stop unit */
#define SS_LOCK 0x1E /* prevent/allow medium removal */
#define SS_RESERV 0x16 /* Reserve */
#define SS_RELES 0x17 /* Release */
#define SS_MODESEN 0x1A /* Mode Sense 6 */
#define SS_RDCAP 0x25 /* Read Capacity */
#define SM_READ 0x28 /* Read 10 */
#define SM_WRITE 0x2A /* Write 10 */
#define SS_SEEK 0x2B /* Seek */
/* values for inqd_pdt: Peripheral device type in plain English */
#define INQD_PDT_DA 0x00 /* Direct-access (DISK) device */
#define INQD_PDT_PROC 0x03 /* Processor device */
#define INQD_PDT_CHNGR 0x08 /* Changer (jukebox, scsi2) */
#define INQD_PDT_COMM 0x09 /* Communication device (scsi2) */
#define INQD_PDT_NOLUN2 0x1f /* Unknown Device (scsi2) */
#define INQD_PDT_NOLUN 0x7f /* Logical Unit Not Present */
#define INQD_PDT_DMASK 0x1F /* Peripheral Device Type Mask */
#define INQD_PDT_QMASK 0xE0 /* Peripheral Device Qualifer Mask */
#define TARGET_LUN_TO_CONTAINER(target, lun) (target)
#define CONTAINER_TO_TARGET(cont) ((cont))
#define CONTAINER_TO_LUN(cont) (0)
#define MAX_FIB_DATA (sizeof(struct hw_fib) - sizeof(FIB_HEADER))
#define MAX_DRIVER_SG_SEGMENT_COUNT 17
/*
* Sense keys
*/
#define SENKEY_NO_SENSE 0x00
#define SENKEY_UNDEFINED 0x01
#define SENKEY_NOT_READY 0x02
#define SENKEY_MEDIUM_ERR 0x03
#define SENKEY_HW_ERR 0x04
#define SENKEY_ILLEGAL 0x05
#define SENKEY_ATTENTION 0x06
#define SENKEY_PROTECTED 0x07
#define SENKEY_BLANK 0x08
#define SENKEY_V_UNIQUE 0x09
#define SENKEY_CPY_ABORT 0x0A
#define SENKEY_ABORT 0x0B
#define SENKEY_EQUAL 0x0C
#define SENKEY_VOL_OVERFLOW 0x0D
#define SENKEY_MISCOMP 0x0E
#define SENKEY_RESERVED 0x0F
/*
* Sense codes
*/
#define SENCODE_NO_SENSE 0x00
#define SENCODE_END_OF_DATA 0x00
#define SENCODE_BECOMING_READY 0x04
#define SENCODE_INIT_CMD_REQUIRED 0x04
#define SENCODE_PARAM_LIST_LENGTH_ERROR 0x1A
#define SENCODE_INVALID_COMMAND 0x20
#define SENCODE_LBA_OUT_OF_RANGE 0x21
#define SENCODE_INVALID_CDB_FIELD 0x24
#define SENCODE_LUN_NOT_SUPPORTED 0x25
#define SENCODE_INVALID_PARAM_FIELD 0x26
#define SENCODE_PARAM_NOT_SUPPORTED 0x26
#define SENCODE_PARAM_VALUE_INVALID 0x26
#define SENCODE_RESET_OCCURRED 0x29
#define SENCODE_LUN_NOT_SELF_CONFIGURED_YET 0x3E
#define SENCODE_INQUIRY_DATA_CHANGED 0x3F
#define SENCODE_SAVING_PARAMS_NOT_SUPPORTED 0x39
#define SENCODE_DIAGNOSTIC_FAILURE 0x40
#define SENCODE_INTERNAL_TARGET_FAILURE 0x44
#define SENCODE_INVALID_MESSAGE_ERROR 0x49
#define SENCODE_LUN_FAILED_SELF_CONFIG 0x4c
#define SENCODE_OVERLAPPED_COMMAND 0x4E
/*
* Additional sense codes
*/
#define ASENCODE_NO_SENSE 0x00
#define ASENCODE_END_OF_DATA 0x05
#define ASENCODE_BECOMING_READY 0x01
#define ASENCODE_INIT_CMD_REQUIRED 0x02
#define ASENCODE_PARAM_LIST_LENGTH_ERROR 0x00
#define ASENCODE_INVALID_COMMAND 0x00
#define ASENCODE_LBA_OUT_OF_RANGE 0x00
#define ASENCODE_INVALID_CDB_FIELD 0x00
#define ASENCODE_LUN_NOT_SUPPORTED 0x00
#define ASENCODE_INVALID_PARAM_FIELD 0x00
#define ASENCODE_PARAM_NOT_SUPPORTED 0x01
#define ASENCODE_PARAM_VALUE_INVALID 0x02
#define ASENCODE_RESET_OCCURRED 0x00
#define ASENCODE_LUN_NOT_SELF_CONFIGURED_YET 0x00
#define ASENCODE_INQUIRY_DATA_CHANGED 0x03
#define ASENCODE_SAVING_PARAMS_NOT_SUPPORTED 0x00
#define ASENCODE_DIAGNOSTIC_FAILURE 0x80
#define ASENCODE_INTERNAL_TARGET_FAILURE 0x00
#define ASENCODE_INVALID_MESSAGE_ERROR 0x00
#define ASENCODE_LUN_FAILED_SELF_CONFIG 0x00
#define ASENCODE_OVERLAPPED_COMMAND 0x00
#define BYTE0(x) (unsigned char)(x)
#define BYTE1(x) (unsigned char)((x) >> 8)
#define BYTE2(x) (unsigned char)((x) >> 16)
#define BYTE3(x) (unsigned char)((x) >> 24)
/*------------------------------------------------------------------------------
* S T R U C T S / T Y P E D E F S
*----------------------------------------------------------------------------*/
/* SCSI inquiry data */
struct inquiry_data {
u8 inqd_pdt; /* Peripheral qualifier | Peripheral Device Type */
u8 inqd_dtq; /* RMB | Device Type Qualifier */
u8 inqd_ver; /* ISO version | ECMA version | ANSI-approved version */
u8 inqd_rdf; /* AENC | TrmIOP | Response data format */
u8 inqd_len; /* Additional length (n-4) */
u8 inqd_pad1[2]; /* Reserved - must be zero */
u8 inqd_pad2; /* RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */
u8 inqd_vid[8]; /* Vendor ID */
u8 inqd_pid[16]; /* Product ID */
u8 inqd_prl[4]; /* Product Revision Level */
};
struct sense_data {
u8 error_code; /* 70h (current errors), 71h(deferred errors) */
u8 valid:1; /* A valid bit of one indicates that the information */
/* field contains valid information as defined in the
* SCSI-2 Standard.
*/
u8 segment_number; /* Only used for COPY, COMPARE, or COPY AND VERIFY Commands */
u8 sense_key:4; /* Sense Key */
u8 reserved:1;
u8 ILI:1; /* Incorrect Length Indicator */
u8 EOM:1; /* End Of Medium - reserved for random access devices */
u8 filemark:1; /* Filemark - reserved for random access devices */
u8 information[4]; /* for direct-access devices, contains the unsigned
* logical block address or residue associated with
* the sense key
*/
u8 add_sense_len; /* number of additional sense bytes to follow this field */
u8 cmnd_info[4]; /* not used */
u8 ASC; /* Additional Sense Code */
u8 ASCQ; /* Additional Sense Code Qualifier */
u8 FRUC; /* Field Replaceable Unit Code - not used */
u8 bit_ptr:3; /* indicates which byte of the CDB or parameter data
* was in error
*/
u8 BPV:1; /* bit pointer valid (BPV): 1- indicates that
* the bit_ptr field has valid value
*/
u8 reserved2:2;
u8 CD:1; /* command data bit: 1- illegal parameter in CDB.
* 0- illegal parameter in data.
*/
u8 SKSV:1;
u8 field_ptr[2]; /* byte of the CDB or parameter data in error */
};
/*
* M O D U L E G L O B A L S
*/
static struct fsa_scsi_hba *fsa_dev[MAXIMUM_NUM_ADAPTERS]; /* SCSI Device Instance Pointers */
static struct sense_data sense_data[MAXIMUM_NUM_CONTAINERS];
static void get_sd_devname(int disknum, char *buffer);
static unsigned long aac_build_sg(Scsi_Cmnd* scsicmd, struct sgmap* sgmap);
static unsigned long aac_build_sg64(Scsi_Cmnd* scsicmd, struct sgmap64* psg);
static int aac_send_srb_fib(Scsi_Cmnd* scsicmd);
#ifdef AAC_DETAILED_STATUS_INFO
static char *aac_get_status_string(u32 status);
#endif
/**
* aac_get_containers - list containers
* @common: adapter to probe
*
* Make a list of all containers on this controller
*/
int aac_get_containers(struct aac_dev *dev)
{
struct fsa_scsi_hba *fsa_dev_ptr;
u32 index, status = 0;
struct aac_query_mount *dinfo;
struct aac_mount *dresp;
struct fib * fibptr;
unsigned instance;
fsa_dev_ptr = &(dev->fsa_dev);
instance = dev->scsi_host_ptr->unique_id;
if (!(fibptr = fib_alloc(dev)))
return -ENOMEM;
for (index = 0; index < MAXIMUM_NUM_CONTAINERS; index++) {
fib_init(fibptr);
dinfo = (struct aac_query_mount *) fib_data(fibptr);
dinfo->command = cpu_to_le32(VM_NameServe);
dinfo->count = cpu_to_le32(index);
dinfo->type = cpu_to_le32(FT_FILESYS);
status = fib_send(ContainerCommand,
fibptr,
sizeof (struct aac_query_mount),
FsaNormal,
1, 1,
NULL, NULL);
if (status < 0 ) {
printk(KERN_WARNING "ProbeContainers: SendFIB failed.\n");
break;
}
dresp = (struct aac_mount *)fib_data(fibptr);
if ((le32_to_cpu(dresp->status) == ST_OK) &&
(le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) {
fsa_dev_ptr->valid[index] = 1;
fsa_dev_ptr->type[index] = le32_to_cpu(dresp->mnt[0].vol);
fsa_dev_ptr->size[index] = le32_to_cpu(dresp->mnt[0].capacity);
if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
fsa_dev_ptr->ro[index] = 1;
}
fib_complete(fibptr);
/*
* If there are no more containers, then stop asking.
*/
if ((index + 1) >= le32_to_cpu(dresp->count))
break;
}
fib_free(fibptr);
fsa_dev[instance] = fsa_dev_ptr;
return status;
}
/**
* probe_container - query a logical volume
* @dev: device to query
* @cid: container identifier
*
* Queries the controller about the given volume. The volume information
* is updated in the struct fsa_scsi_hba structure rather than returned.
*/
static int probe_container(struct aac_dev *dev, int cid)
{
struct fsa_scsi_hba *fsa_dev_ptr;
int status;
struct aac_query_mount *dinfo;
struct aac_mount *dresp;
struct fib * fibptr;
unsigned instance;
fsa_dev_ptr = &(dev->fsa_dev);
instance = dev->scsi_host_ptr->unique_id;
if (!(fibptr = fib_alloc(dev)))
return -ENOMEM;
fib_init(fibptr);
dinfo = (struct aac_query_mount *)fib_data(fibptr);
dinfo->command = cpu_to_le32(VM_NameServe);
dinfo->count = cpu_to_le32(cid);
dinfo->type = cpu_to_le32(FT_FILESYS);
status = fib_send(ContainerCommand,
fibptr,
sizeof(struct aac_query_mount),
FsaNormal,
1, 1,
NULL, NULL);
if (status < 0) {
printk(KERN_WARNING "aacraid: probe_containers query failed.\n");
goto error;
}
dresp = (struct aac_mount *) fib_data(fibptr);
if ((le32_to_cpu(dresp->status) == ST_OK) &&
(le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) {
fsa_dev_ptr->valid[cid] = 1;
fsa_dev_ptr->type[cid] = le32_to_cpu(dresp->mnt[0].vol);
fsa_dev_ptr->size[cid] = le32_to_cpu(dresp->mnt[0].capacity);
if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
fsa_dev_ptr->ro[cid] = 1;
}
error:
fib_complete(fibptr);
fib_free(fibptr);
return status;
}
/* Local Structure to set SCSI inquiry data strings */
struct scsi_inq {
char vid[8]; /* Vendor ID */
char pid[16]; /* Product ID */
char prl[4]; /* Product Revision Level */
};
/**
* InqStrCopy - string merge
* @a: string to copy from
* @b: string to copy to
*
* Copy a String from one location to another
* without copying \0
*/
static void inqstrcpy(char *a, char *b)
{
while(*a != (char)0)
*b++ = *a++;
}
static char *container_types[] = {
"None",
"Volume",
"Mirror",
"Stripe",
"RAID5",
"SSRW",
"SSRO",
"Morph",
"Legacy",
"RAID4",
"RAID10",
"RAID00",
"V-MIRRORS",
"PSEUDO R4",
"RAID50",
"Unknown"
};
/* Function: setinqstr
*
* Arguments: [1] pointer to void [1] int
*
* Purpose: Sets SCSI inquiry data strings for vendor, product
* and revision level. Allows strings to be set in platform dependant
* files instead of in OS dependant driver source.
*/
static void setinqstr(int devtype, void *data, int tindex)
{
struct scsi_inq *str;
char *findit;
struct aac_driver_ident *mp;
mp = aac_get_driver_ident(devtype);
str = (struct scsi_inq *)(data); /* cast data to scsi inq block */
inqstrcpy (mp->vname, str->vid);
inqstrcpy (mp->model, str->pid); /* last six chars reserved for vol type */
findit = str->pid;
for ( ; *findit != ' '; findit++); /* walk till we find a space then incr by 1 */
findit++;
if (tindex < (sizeof(container_types)/sizeof(char *))){
inqstrcpy (container_types[tindex], findit);
}
inqstrcpy ("V1.0", str->prl);
}
void set_sense(u8 *sense_buf, u8 sense_key, u8 sense_code,
u8 a_sense_code, u8 incorrect_length,
u8 bit_pointer, u16 field_pointer,
u32 residue)
{
sense_buf[0] = 0xF0; /* Sense data valid, err code 70h (current error) */
sense_buf[1] = 0; /* Segment number, always zero */
if (incorrect_length) {
sense_buf[2] = sense_key | 0x20; /* Set ILI bit | sense key */
sense_buf[3] = BYTE3(residue);
sense_buf[4] = BYTE2(residue);
sense_buf[5] = BYTE1(residue);
sense_buf[6] = BYTE0(residue);
} else
sense_buf[2] = sense_key; /* Sense key */
if (sense_key == SENKEY_ILLEGAL)
sense_buf[7] = 10; /* Additional sense length */
else
sense_buf[7] = 6; /* Additional sense length */
sense_buf[12] = sense_code; /* Additional sense code */
sense_buf[13] = a_sense_code; /* Additional sense code qualifier */
if (sense_key == SENKEY_ILLEGAL) {
sense_buf[15] = 0;
if (sense_code == SENCODE_INVALID_PARAM_FIELD)
sense_buf[15] = 0x80; /* Std sense key specific field */
/* Illegal parameter is in the parameter block */
if (sense_code == SENCODE_INVALID_CDB_FIELD)
sense_buf[15] = 0xc0; /* Std sense key specific field */
/* Illegal parameter is in the CDB block */
sense_buf[15] |= bit_pointer;
sense_buf[16] = field_pointer >> 8; /* MSB */
sense_buf[17] = field_pointer; /* LSB */
}
}
static void aac_io_done(Scsi_Cmnd * scsicmd)
{
unsigned long cpu_flags;
spin_lock_irqsave(scsicmd->host->host_lock, cpu_flags);
scsicmd->scsi_done(scsicmd);
spin_unlock_irqrestore(scsicmd->host->host_lock, cpu_flags);
}
static void __aac_io_done(Scsi_Cmnd * scsicmd)
{
scsicmd->scsi_done(scsicmd);
}
int aac_get_adapter_info(struct aac_dev* dev)
{
struct fib* fibptr;
struct aac_adapter_info* info;
int rcode;
u32 tmp;
if (!(fibptr = fib_alloc(dev)))
return -ENOMEM;
fib_init(fibptr);
info = (struct aac_adapter_info*) fib_data(fibptr);
memset(info,0,sizeof(struct aac_adapter_info));
rcode = fib_send(RequestAdapterInfo,
fibptr,
sizeof(struct aac_adapter_info),
FsaNormal,
1, 1,
NULL,
NULL);
memcpy(&dev->adapter_info, info, sizeof(struct aac_adapter_info));
tmp = dev->adapter_info.kernelrev;
printk(KERN_INFO "%s%d: kernel %d.%d.%d build %d\n",
dev->name, dev->id,
tmp>>24,(tmp>>16)&0xff,(tmp>>8)&0xff,
dev->adapter_info.kernelbuild);
tmp = dev->adapter_info.monitorrev;
printk(KERN_INFO "%s%d: monitor %d.%d.%d build %d\n",
dev->name, dev->id,
tmp>>24,(tmp>>16)&0xff,(tmp>>8)&0xff,
dev->adapter_info.monitorbuild);
tmp = dev->adapter_info.biosrev;
printk(KERN_INFO "%s%d: bios %d.%d.%d build %d\n",
dev->name, dev->id,
tmp>>24,(tmp>>16)&0xff,(tmp>>8)&0xff,
dev->adapter_info.biosbuild);
printk(KERN_INFO "%s%d: serial %x%x\n",
dev->name, dev->id,
dev->adapter_info.serial[0],
dev->adapter_info.serial[1]);
dev->pae_support = 0;
dev->nondasd_support = 0;
if( BITS_PER_LONG >= 64 &&
(dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)){
printk(KERN_INFO "%s%d: 64 Bit PAE enabled\n", dev->name, dev->id);
dev->pae_support = 1;
}
/* TODO - dmb temporary until fw can set this bit */
dev->pae_support = (BITS_PER_LONG >= 64);
if(dev->pae_support != 0) {
printk(KERN_INFO "%s%d: 64 Bit PAE enabled\n", dev->name, dev->id);
}
if(dev->adapter_info.options & AAC_OPT_NONDASD){
dev->nondasd_support = 1;
}
return rcode;
}
static void read_callback(void *context, struct fib * fibptr)
{
struct aac_dev *dev;
struct aac_read_reply *readreply;
Scsi_Cmnd *scsicmd;
u32 lba;
u32 cid;
scsicmd = (Scsi_Cmnd *) context;
dev = (struct aac_dev *)scsicmd->host->hostdata;
cid =TARGET_LUN_TO_CONTAINER(scsicmd->target, scsicmd->lun);
lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
dprintk((KERN_DEBUG "read_callback[cpu %d]: lba = %d, t = %ld.\n", smp_processor_id(), lba, jiffies));
if (fibptr == NULL)
BUG();
if(scsicmd->use_sg)
pci_unmap_sg(dev->pdev,
(struct scatterlist *)scsicmd->buffer,
scsicmd->use_sg,
scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
else if(scsicmd->request_bufflen)
pci_unmap_single(dev->pdev, (dma_addr_t)(unsigned long)scsicmd->SCp.ptr,
scsicmd->request_bufflen,
scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
readreply = (struct aac_read_reply *)fib_data(fibptr);
if (le32_to_cpu(readreply->status) == ST_OK)
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
else {
printk(KERN_WARNING "read_callback: read failed, status = %d\n", readreply->status);
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | CHECK_CONDITION;
set_sense((u8 *) &sense_data[cid],
SENKEY_HW_ERR,
SENCODE_INTERNAL_TARGET_FAILURE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
0, 0);
}
fib_complete(fibptr);
fib_free(fibptr);
aac_io_done(scsicmd);
}
static void write_callback(void *context, struct fib * fibptr)
{
struct aac_dev *dev;
struct aac_write_reply *writereply;
Scsi_Cmnd *scsicmd;
u32 lba;
u32 cid;
scsicmd = (Scsi_Cmnd *) context;
dev = (struct aac_dev *)scsicmd->host->hostdata;
cid = TARGET_LUN_TO_CONTAINER(scsicmd->target, scsicmd->lun);
lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
dprintk((KERN_DEBUG "write_callback[cpu %d]: lba = %d, t = %ld.\n", smp_processor_id(), lba, jiffies));
if (fibptr == NULL)
BUG();
if(scsicmd->use_sg)
pci_unmap_sg(dev->pdev,
(struct scatterlist *)scsicmd->buffer,
scsicmd->use_sg,
scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
else if(scsicmd->request_bufflen)
pci_unmap_single(dev->pdev, (dma_addr_t)(unsigned long)scsicmd->SCp.ptr,
scsicmd->request_bufflen,
scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
writereply = (struct aac_write_reply *) fib_data(fibptr);
if (le32_to_cpu(writereply->status) == ST_OK)
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
else {
printk(KERN_WARNING "write_callback: write failed, status = %d\n", writereply->status);
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | CHECK_CONDITION;
set_sense((u8 *) &sense_data[cid],
SENKEY_HW_ERR,
SENCODE_INTERNAL_TARGET_FAILURE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
0, 0);
}
fib_complete(fibptr);
fib_free(fibptr);
aac_io_done(scsicmd);
}
int aac_read(Scsi_Cmnd * scsicmd, int cid)
{
u32 lba;
u32 count;
int status;
u16 fibsize;
struct aac_dev *dev;
struct fib * cmd_fibcontext;
dev = (struct aac_dev *)scsicmd->host->hostdata;
/*
* Get block address and transfer length
*/
if (scsicmd->cmnd[0] == SS_READ) /* 6 byte command */
{
dprintk((KERN_DEBUG "aachba: received a read(6) command on target %d.\n", cid));
lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
count = scsicmd->cmnd[4];
if (count == 0)
count = 256;
} else {
dprintk((KERN_DEBUG "aachba: received a read(10) command on target %d.\n", cid));
lba = (scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
}
dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies));
/*
* Alocate and initialize a Fib
*/
if (!(cmd_fibcontext = fib_alloc(dev))) {
scsicmd->result = DID_ERROR << 16;
aac_io_done(scsicmd);
return (-1);
}
fib_init(cmd_fibcontext);
if(dev->pae_support == 1){
struct aac_read64 *readcmd;
readcmd = (struct aac_read64 *) fib_data(cmd_fibcontext);
readcmd->command = cpu_to_le32(VM_CtHostRead64);
readcmd->cid = cpu_to_le16(cid);
readcmd->sector_count = cpu_to_le16(count);
readcmd->block = cpu_to_le32(lba);
readcmd->pad = cpu_to_le16(0);
readcmd->flags = cpu_to_le16(0);
aac_build_sg64(scsicmd, &readcmd->sg);
if(readcmd->sg.count > MAX_DRIVER_SG_SEGMENT_COUNT)
BUG();
fibsize = sizeof(struct aac_read64) + ((readcmd->sg.count - 1) * sizeof (struct sgentry64));
/*
* Now send the Fib to the adapter
*/
status = fib_send(ContainerCommand64,
cmd_fibcontext,
fibsize,
FsaNormal,
0, 1,
(fib_callback) read_callback,
(void *) scsicmd);
} else {
struct aac_read *readcmd;
readcmd = (struct aac_read *) fib_data(cmd_fibcontext);
readcmd->command = cpu_to_le32(VM_CtBlockRead);
readcmd->cid = cpu_to_le32(cid);
readcmd->block = cpu_to_le32(lba);
readcmd->count = cpu_to_le32(count * 512);
if (count * 512 > (64 * 1024))
BUG();
aac_build_sg(scsicmd, &readcmd->sg);
if(readcmd->sg.count > MAX_DRIVER_SG_SEGMENT_COUNT)
BUG();
fibsize = sizeof(struct aac_read) + ((readcmd->sg.count - 1) * sizeof (struct sgentry));
/*
* Now send the Fib to the adapter
*/
status = fib_send(ContainerCommand,
cmd_fibcontext,
fibsize,
FsaNormal,
0, 1,
(fib_callback) read_callback,
(void *) scsicmd);
}
/*
* Check that the command queued to the controller
*/
if (status == -EINPROGRESS)
return 0;
printk(KERN_WARNING "aac_read: fib_send failed with status: %d.\n", status);
/*
* For some reason, the Fib didn't queue, return QUEUE_FULL
*/
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | QUEUE_FULL;
aac_io_done(scsicmd);
fib_complete(cmd_fibcontext);
fib_free(cmd_fibcontext);
return -1;
}
static int aac_write(Scsi_Cmnd * scsicmd, int cid)
{
u32 lba;
u32 count;
int status;
u16 fibsize;
struct aac_dev *dev;
struct fib * cmd_fibcontext;
dev = (struct aac_dev *)scsicmd->host->hostdata;
/*
* Get block address and transfer length
*/
if (scsicmd->cmnd[0] == SS_WRITE) /* 6 byte command */
{
lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
count = scsicmd->cmnd[4];
if (count == 0)
count = 256;
} else {
dprintk((KERN_DEBUG "aachba: received a write(10) command on target %d.\n", cid));
lba = (scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
}
dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %lu, t = %ld.\n", smp_processor_id(), lba, jiffies));
/*
* Allocate and initialize a Fib then setup a BlockWrite command
*/
if (!(cmd_fibcontext = fib_alloc(dev))) {
scsicmd->result = DID_ERROR << 16;
aac_io_done(scsicmd);
return -1;
}
fib_init(cmd_fibcontext);
if(dev->pae_support == 1)
{
struct aac_write64 *writecmd;
writecmd = (struct aac_write64 *) fib_data(cmd_fibcontext);
writecmd->command = cpu_to_le32(VM_CtHostWrite64);
writecmd->cid = cpu_to_le16(cid);
writecmd->sector_count = cpu_to_le16(count);
writecmd->block = cpu_to_le32(lba);
writecmd->pad = cpu_to_le16(0);
writecmd->flags = cpu_to_le16(0);
aac_build_sg64(scsicmd, &writecmd->sg);
if(writecmd->sg.count > MAX_DRIVER_SG_SEGMENT_COUNT)
BUG();
fibsize = sizeof(struct aac_write64) + ((writecmd->sg.count - 1) * sizeof (struct sgentry64));
/*
* Now send the Fib to the adapter
*/
status = fib_send(ContainerCommand64,
cmd_fibcontext,
fibsize,
FsaNormal,
0, 1,
(fib_callback) write_callback,
(void *) scsicmd);
}
else
{
struct aac_write *writecmd;
writecmd = (struct aac_write *) fib_data(cmd_fibcontext);
writecmd->command = cpu_to_le32(VM_CtBlockWrite);
writecmd->cid = cpu_to_le32(cid);
writecmd->block = cpu_to_le32(lba);
writecmd->count = cpu_to_le32(count * 512);
writecmd->sg.count = cpu_to_le32(1);
/* ->stable is not used - it did mean which type of write */
if (count * 512 > (64 * 1024))
BUG();
aac_build_sg(scsicmd, &writecmd->sg);
if(writecmd->sg.count > MAX_DRIVER_SG_SEGMENT_COUNT)
BUG();
fibsize = sizeof(struct aac_write) + ((writecmd->sg.count - 1) * sizeof (struct sgentry));
/*
* Now send the Fib to the adapter
*/
status = fib_send(ContainerCommand,
cmd_fibcontext,
fibsize,
FsaNormal,
0, 1,
(fib_callback) write_callback,
(void *) scsicmd);
}
/*
* Check that the command queued to the controller
*/
if (status == -EINPROGRESS)
return 0;
printk(KERN_WARNING "aac_write: fib_send failed with status: %d\n", status);
/*
* For some reason, the Fib didn't queue, return QUEUE_FULL
*/
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | QUEUE_FULL;
aac_io_done(scsicmd);
fib_complete(cmd_fibcontext);
fib_free(cmd_fibcontext);
return -1;
}
/**
* aac_scsi_cmd() - Process SCSI command
* @scsicmd: SCSI command block
* @wait: 1 if the user wants to await completion
*
* Emulate a SCSI command and queue the required request for the
* aacraid firmware.
*/
int aac_scsi_cmd(Scsi_Cmnd * scsicmd)
{
u32 cid = 0;
struct fsa_scsi_hba *fsa_dev_ptr;
int cardtype;
int ret;
struct aac_dev *dev = (struct aac_dev *)scsicmd->host->hostdata;
cardtype = dev->cardtype;
fsa_dev_ptr = fsa_dev[scsicmd->host->unique_id];
/*
* If the bus, target or lun is out of range, return fail
* Test does not apply to ID 16, the pseudo id for the controller
* itself.
*/
if (scsicmd->target != scsicmd->host->this_id) {
if ((scsicmd->channel == 0) ){
if( (scsicmd->target >= AAC_MAX_TARGET) || (scsicmd->lun != 0)){
scsicmd->result = DID_NO_CONNECT << 16;
__aac_io_done(scsicmd);
return 0;
}
cid = TARGET_LUN_TO_CONTAINER(scsicmd->target, scsicmd->lun);
/*
* If the target container doesn't exist, it may have
* been newly created
*/
if (fsa_dev_ptr->valid[cid] == 0) {
switch (scsicmd->cmnd[0]) {
case SS_INQUIR:
case SS_RDCAP:
case SS_TEST:
spin_unlock_irq(scsicmd->host->host_lock);
probe_container(dev, cid);
spin_lock_irq(scsicmd->host->host_lock);
if (fsa_dev_ptr->valid[cid] == 0) {
scsicmd->result = DID_NO_CONNECT << 16;
__aac_io_done(scsicmd);
return 0;
}
default:
break;
}
}
/*
* If the target container still doesn't exist,
* return failure
*/
if (fsa_dev_ptr->valid[cid] == 0) {
scsicmd->result = DID_BAD_TARGET << 16;
__aac_io_done(scsicmd);
return -1;
}
} else { /* check for physical non-dasd devices */
if(dev->nondasd_support == 1){
return aac_send_srb_fib(scsicmd);
} else {
scsicmd->result = DID_NO_CONNECT << 16;
__aac_io_done(scsicmd);
return 0;
}
}
}
/*
* else Command for the controller itself
*/
else if ((scsicmd->cmnd[0] != SS_INQUIR) && /* only INQUIRY & TUR cmnd supported for controller */
(scsicmd->cmnd[0] != SS_TEST))
{
dprintk((KERN_WARNING "Only INQUIRY & TUR command supported for controller, rcvd = 0x%x.\n", scsicmd->cmnd[0]));
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | CHECK_CONDITION;
set_sense((u8 *) &sense_data[cid],
SENKEY_ILLEGAL,
SENCODE_INVALID_COMMAND,
ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
__aac_io_done(scsicmd);
return -1;
}
/* Handle commands here that don't really require going out to the adapter */
switch (scsicmd->cmnd[0]) {
case SS_INQUIR:
{
struct inquiry_data *inq_data_ptr;
dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", scsicmd->target));
inq_data_ptr = (struct inquiry_data *)scsicmd->request_buffer;
memset(inq_data_ptr, 0, sizeof (struct inquiry_data));
inq_data_ptr->inqd_ver = 2; /* claim compliance to SCSI-2 */
inq_data_ptr->inqd_dtq = 0x80; /* set RMB bit to one indicating that the medium is removable */
inq_data_ptr->inqd_rdf = 2; /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */
inq_data_ptr->inqd_len = 31;
/*Format for "pad2" is RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */
inq_data_ptr->inqd_pad2= 0x32 ; /*WBus16|Sync|CmdQue */
/*
* Set the Vendor, Product, and Revision Level
* see: <vendor>.c i.e. aac.c
*/
setinqstr(cardtype, (void *) (inq_data_ptr->inqd_vid), fsa_dev_ptr->type[cid]);
if (scsicmd->target == scsicmd->host->this_id)
inq_data_ptr->inqd_pdt = INQD_PDT_PROC; /* Processor device */
else
inq_data_ptr->inqd_pdt = INQD_PDT_DA; /* Direct/random access device */
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
__aac_io_done(scsicmd);
return 0;
}
case SS_RDCAP:
{
int capacity;
char *cp;
dprintk((KERN_DEBUG "READ CAPACITY command.\n"));
capacity = fsa_dev_ptr->size[cid] - 1;
cp = scsicmd->request_buffer;
cp[0] = (capacity >> 24) & 0xff;
cp[1] = (capacity >> 16) & 0xff;
cp[2] = (capacity >> 8) & 0xff;
cp[3] = (capacity >> 0) & 0xff;
cp[4] = 0;
cp[5] = 0;
cp[6] = 2;
cp[7] = 0;
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
__aac_io_done(scsicmd);
return 0;
}
case SS_MODESEN:
{
char *mode_buf;
dprintk((KERN_DEBUG "MODE SENSE command.\n"));
mode_buf = scsicmd->request_buffer;
mode_buf[0] = 0; /* Mode data length (MSB) */
mode_buf[1] = 6; /* Mode data length (LSB) */
mode_buf[2] = 0; /* Medium type - default */
mode_buf[3] = 0; /* Device-specific param, bit 8: 0/1 = write enabled/protected */
mode_buf[4] = 0; /* reserved */
mode_buf[5] = 0; /* reserved */
mode_buf[6] = 0; /* Block descriptor length (MSB) */
mode_buf[7] = 0; /* Block descriptor length (LSB) */
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
__aac_io_done(scsicmd);
return 0;
}
case SS_REQSEN:
dprintk((KERN_DEBUG "REQUEST SENSE command.\n"));
memcpy(scsicmd->sense_buffer, &sense_data[cid], sizeof (struct sense_data));
memset(&sense_data[cid], 0, sizeof (struct sense_data));
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
__aac_io_done(scsicmd);
return (0);
case SS_LOCK:
dprintk((KERN_DEBUG "LOCK command.\n"));
if (scsicmd->cmnd[4])
fsa_dev_ptr->locked[cid] = 1;
else
fsa_dev_ptr->locked[cid] = 0;
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
__aac_io_done(scsicmd);
return 0;
/*
* These commands are all No-Ops
*/
case SS_TEST:
case SS_RESERV:
case SS_RELES:
case SS_REZERO:
case SS_REASGN:
case SS_SEEK:
case SS_ST_SP:
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
__aac_io_done(scsicmd);
return (0);
}
switch (scsicmd->cmnd[0])
{
case SS_READ:
case SM_READ:
/*
* Hack to keep track of ordinal number of the device that
* corresponds to a container. Needed to convert
* containers to /dev/sd device names
*/
spin_unlock_irq(scsicmd->host->host_lock);
fsa_dev_ptr->devno[cid] = DEVICE_NR(scsicmd->sc_request->sr_request->rq_dev);
ret = aac_read(scsicmd, cid);
spin_lock_irq(scsicmd->host->host_lock);
return ret;
case SS_WRITE:
case SM_WRITE:
spin_unlock_irq(scsicmd->host->host_lock);
ret = aac_write(scsicmd, cid);
spin_lock_irq(scsicmd->host->host_lock);
return ret;
default:
/*
* Unhandled commands
*/
printk(KERN_WARNING "Unhandled SCSI Command: 0x%x.\n", scsicmd->cmnd[0]);
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | CHECK_CONDITION;
set_sense((u8 *) &sense_data[cid],
SENKEY_ILLEGAL, SENCODE_INVALID_COMMAND,
ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
__aac_io_done(scsicmd);
return -1;
}
}
static int query_disk(struct aac_dev *dev, void *arg)
{
struct aac_query_disk qd;
struct fsa_scsi_hba *fsa_dev_ptr;
fsa_dev_ptr = &(dev->fsa_dev);
if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk)))
return -EFAULT;
if (qd.cnum == -1)
qd.cnum = TARGET_LUN_TO_CONTAINER(qd.target, qd.lun);
else if ((qd.bus == -1) && (qd.target == -1) && (qd.lun == -1))
{
if (qd.cnum < 0 || qd.cnum > MAXIMUM_NUM_CONTAINERS)
return -EINVAL;
qd.instance = dev->scsi_host_ptr->host_no;
qd.bus = 0;
qd.target = CONTAINER_TO_TARGET(qd.cnum);
qd.lun = CONTAINER_TO_LUN(qd.cnum);
}
else return -EINVAL;
qd.valid = fsa_dev_ptr->valid[qd.cnum];
qd.locked = fsa_dev_ptr->locked[qd.cnum];
qd.deleted = fsa_dev_ptr->deleted[qd.cnum];
if (fsa_dev_ptr->devno[qd.cnum] == -1)
qd.unmapped = 1;
else
qd.unmapped = 0;
get_sd_devname(fsa_dev_ptr->devno[qd.cnum], qd.name);
if (copy_to_user(arg, &qd, sizeof (struct aac_query_disk)))
return -EFAULT;
return 0;
}
static void get_sd_devname(int disknum, char *buffer)
{
if (disknum < 0) {
sprintf(buffer, "%s", "");
return;
}
if (disknum < 26)
sprintf(buffer, "sd%c", 'a' + disknum);
else {
unsigned int min1;
unsigned int min2;
/*
* For larger numbers of disks, we need to go to a new
* naming scheme.
*/
min1 = disknum / 26;
min2 = disknum % 26;
sprintf(buffer, "sd%c%c", 'a' + min1 - 1, 'a' + min2);
}
}
static int force_delete_disk(struct aac_dev *dev, void *arg)
{
struct aac_delete_disk dd;
struct fsa_scsi_hba *fsa_dev_ptr;
fsa_dev_ptr = &(dev->fsa_dev);
if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
return -EFAULT;
if (dd.cnum > MAXIMUM_NUM_CONTAINERS)
return -EINVAL;
/*
* Mark this container as being deleted.
*/
fsa_dev_ptr->deleted[dd.cnum] = 1;
/*
* Mark the container as no longer valid
*/
fsa_dev_ptr->valid[dd.cnum] = 0;
return 0;
}
static int delete_disk(struct aac_dev *dev, void *arg)
{
struct aac_delete_disk dd;
struct fsa_scsi_hba *fsa_dev_ptr;
fsa_dev_ptr = &(dev->fsa_dev);
if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
return -EFAULT;
if (dd.cnum > MAXIMUM_NUM_CONTAINERS)
return -EINVAL;
/*
* If the container is locked, it can not be deleted by the API.
*/
if (fsa_dev_ptr->locked[dd.cnum])
return -EBUSY;
else {
/*
* Mark the container as no longer being valid.
*/
fsa_dev_ptr->valid[dd.cnum] = 0;
fsa_dev_ptr->devno[dd.cnum] = -1;
return 0;
}
}
int aac_dev_ioctl(struct aac_dev *dev, int cmd, void *arg)
{
switch (cmd) {
case FSACTL_QUERY_DISK:
return query_disk(dev, arg);
case FSACTL_DELETE_DISK:
return delete_disk(dev, arg);
case FSACTL_FORCE_DELETE_DISK:
return force_delete_disk(dev, arg);
case 2131:
return aac_get_containers(dev);
default:
return -ENOTTY;
}
}
/**
*
* aac_srb_callback
* @context: the context set in the fib - here it is scsi cmd
* @fibptr: pointer to the fib
*
* Handles the completion of a scsi command to a non dasd device
*
*/
static void aac_srb_callback(void *context, struct fib * fibptr)
{
struct aac_dev *dev;
struct aac_srb_reply *srbreply;
Scsi_Cmnd *scsicmd;
scsicmd = (Scsi_Cmnd *) context;
dev = (struct aac_dev *)scsicmd->host->hostdata;
if (fibptr == NULL)
BUG();
srbreply = (struct aac_srb_reply *) fib_data(fibptr);
scsicmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false
// calculate resid for sg
scsicmd->resid = scsicmd->request_bufflen - srbreply->data_xfer_length;
if(scsicmd->use_sg)
pci_unmap_sg(dev->pdev,
(struct scatterlist *)scsicmd->buffer,
scsicmd->use_sg,
scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
else if(scsicmd->request_bufflen)
pci_unmap_single(dev->pdev, (ulong)scsicmd->SCp.ptr, scsicmd->request_bufflen,
scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
/*
* First check the fib status
*/
if (le32_to_cpu(srbreply->status) != ST_OK){
int len;
printk(KERN_WARNING "aac_srb_callback: srb failed, status = %d\n", le32_to_cpu(srbreply->status));
len = (srbreply->sense_data_size > sizeof(scsicmd->sense_buffer))?
sizeof(scsicmd->sense_buffer):srbreply->sense_data_size;
scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8 | CHECK_CONDITION;
memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
}
/*
* Next check the srb status
*/
switch(le32_to_cpu(srbreply->srb_status)){
case SRB_STATUS_ERROR_RECOVERY:
case SRB_STATUS_PENDING:
case SRB_STATUS_SUCCESS:
if(scsicmd->cmnd[0] == INQUIRY ){
u8 b;
/* We can't expose disk devices because we can't tell whether they
* are the raw container drives or stand alone drives
*/
b = *(u8*)scsicmd->buffer;
if( (b & 0x0f) == TYPE_DISK ){
scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
}
} else {
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
}
break;
case SRB_STATUS_DATA_OVERRUN:
switch(scsicmd->cmnd[0]){
case READ_6:
case WRITE_6:
case READ_10:
case WRITE_10:
case READ_12:
case WRITE_12:
if(le32_to_cpu(srbreply->data_xfer_length) < scsicmd->underflow ) {
printk(KERN_WARNING"aacraid: SCSI CMD underflow\n");
} else {
printk(KERN_WARNING"aacraid: SCSI CMD Data Overrun\n");
}
scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
break;
default:
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
break;
}
break;
case SRB_STATUS_ABORTED:
scsicmd->result = DID_ABORT << 16 | ABORT << 8;
break;
case SRB_STATUS_ABORT_FAILED:
// Not sure about this one - but assuming the hba was trying to abort for some reason
scsicmd->result = DID_ERROR << 16 | ABORT << 8;
break;
case SRB_STATUS_PARITY_ERROR:
scsicmd->result = DID_PARITY << 16 | MSG_PARITY_ERROR << 8;
break;
case SRB_STATUS_NO_DEVICE:
case SRB_STATUS_INVALID_PATH_ID:
case SRB_STATUS_INVALID_TARGET_ID:
case SRB_STATUS_INVALID_LUN:
case SRB_STATUS_SELECTION_TIMEOUT:
scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
break;
case SRB_STATUS_COMMAND_TIMEOUT:
case SRB_STATUS_TIMEOUT:
scsicmd->result = DID_TIME_OUT << 16 | COMMAND_COMPLETE << 8;
break;
case SRB_STATUS_BUSY:
scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
break;
case SRB_STATUS_BUS_RESET:
scsicmd->result = DID_RESET << 16 | COMMAND_COMPLETE << 8;
break;
case SRB_STATUS_MESSAGE_REJECTED:
scsicmd->result = DID_ERROR << 16 | MESSAGE_REJECT << 8;
break;
case SRB_STATUS_REQUEST_FLUSHED:
case SRB_STATUS_ERROR:
case SRB_STATUS_INVALID_REQUEST:
case SRB_STATUS_REQUEST_SENSE_FAILED:
case SRB_STATUS_NO_HBA:
case SRB_STATUS_UNEXPECTED_BUS_FREE:
case SRB_STATUS_PHASE_SEQUENCE_FAILURE:
case SRB_STATUS_BAD_SRB_BLOCK_LENGTH:
case SRB_STATUS_DELAYED_RETRY:
case SRB_STATUS_BAD_FUNCTION:
case SRB_STATUS_NOT_STARTED:
case SRB_STATUS_NOT_IN_USE:
case SRB_STATUS_FORCE_ABORT:
case SRB_STATUS_DOMAIN_VALIDATION_FAIL:
default:
#ifdef AAC_DETAILED_STATUS_INFO
printk("aacraid: SRB ERROR (%s)\n",aac_get_status_string(le32_to_cpu(srbreply->srb_status)));
#endif
scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
break;
}
if (le32_to_cpu(srbreply->scsi_status) == 0x02 ){ // Check Condition
int len;
len = (srbreply->sense_data_size > sizeof(scsicmd->sense_buffer))?
sizeof(scsicmd->sense_buffer):srbreply->sense_data_size;
printk(KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n", le32_to_cpu(srbreply->status), len);
memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
}
/*
* OR in the scsi status (already shifted up a bit)
*/
scsicmd->result |= le32_to_cpu(srbreply->scsi_status);
fib_complete(fibptr);
fib_free(fibptr);
aac_io_done(scsicmd);
}
/**
*
* aac_send_scb_fib
* @scsicmd: the scsi command block
*
* This routine will form a FIB and fill in the aac_srb from the
* scsicmd passed in.
*/
static int aac_send_srb_fib(Scsi_Cmnd* scsicmd)
{
struct fib* cmd_fibcontext;
struct aac_dev* dev;
int status;
struct aac_srb *srbcmd;
u16 fibsize;
u32 flag;
if( scsicmd->target > 15 || scsicmd->lun > 7) {
scsicmd->result = DID_NO_CONNECT << 16;
__aac_io_done(scsicmd);
return 0;
}
dev = (struct aac_dev *)scsicmd->host->hostdata;
switch(scsicmd->sc_data_direction){
case SCSI_DATA_WRITE:
flag = SRB_DataOut;
break;
case SCSI_DATA_UNKNOWN:
flag = SRB_DataIn | SRB_DataOut;
break;
case SCSI_DATA_READ:
flag = SRB_DataIn;
break;
case SCSI_DATA_NONE:
default:
flag = SRB_NoDataXfer;
break;
}
/*
* Allocate and initialize a Fib then setup a BlockWrite command
*/
if (!(cmd_fibcontext = fib_alloc(dev))) {
scsicmd->result = DID_ERROR << 16;
__aac_io_done(scsicmd);
return -1;
}
fib_init(cmd_fibcontext);
srbcmd = (struct aac_srb*) fib_data(cmd_fibcontext);
srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
srbcmd->channel = cpu_to_le32(aac_logical_to_phys(scsicmd->channel));
srbcmd->target = cpu_to_le32(scsicmd->target);
srbcmd->lun = cpu_to_le32(scsicmd->lun);
srbcmd->flags = cpu_to_le32(flag);
srbcmd->timeout = cpu_to_le32(0); // timeout not used
srbcmd->retry_limit =cpu_to_le32(0); // Obsolete parameter
srbcmd->cdb_size = cpu_to_le32(scsicmd->cmd_len);
if( dev->pae_support ==1 ) {
aac_build_sg64(scsicmd, (struct sgmap64*) &srbcmd->sg);
srbcmd->count = cpu_to_le32(scsicmd->request_bufflen);
memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
memcpy(srbcmd->cdb, scsicmd->cmnd, scsicmd->cmd_len);
/*
* Build Scatter/Gather list
*/
fibsize = sizeof (struct aac_srb) + (((srbcmd->sg.count & 0xff) - 1) * sizeof (struct sgentry64));
/*
* Now send the Fib to the adapter
*/
status = fib_send(ScsiPortCommand64, cmd_fibcontext, fibsize, FsaNormal, 0, 1,
(fib_callback) aac_srb_callback, (void *) scsicmd);
} else {
aac_build_sg(scsicmd, (struct sgmap*)&srbcmd->sg);
srbcmd->count = cpu_to_le32(scsicmd->request_bufflen);
memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
memcpy(srbcmd->cdb, scsicmd->cmnd, scsicmd->cmd_len);
/*
* Build Scatter/Gather list
*/
fibsize = sizeof (struct aac_srb) + (((srbcmd->sg.count & 0xff) - 1) * sizeof (struct sgentry));
/*
* Now send the Fib to the adapter
*/
status = fib_send(ScsiPortCommand, cmd_fibcontext, fibsize, FsaNormal, 0, 1,
(fib_callback) aac_srb_callback, (void *) scsicmd);
}
/*
* Check that the command queued to the controller
*/
if (status == -EINPROGRESS){
return 0;
}
printk(KERN_WARNING "aac_srb: fib_send failed with status: %d\n", status);
/*
* For some reason, the Fib didn't queue, return QUEUE_FULL
*/
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | QUEUE_FULL;
__aac_io_done(scsicmd);
fib_complete(cmd_fibcontext);
fib_free(cmd_fibcontext);
return -1;
}
static unsigned long aac_build_sg(Scsi_Cmnd* scsicmd, struct sgmap* psg)
{
struct aac_dev *dev;
unsigned long byte_count = 0;
dev = (struct aac_dev *)scsicmd->host->hostdata;
// Get rid of old data
psg->count = cpu_to_le32(0);
psg->sg[0].addr = cpu_to_le32(NULL);
psg->sg[0].count = cpu_to_le32(0);
if (scsicmd->use_sg) {
struct scatterlist *sg;
int i;
int sg_count;
sg = (struct scatterlist *) scsicmd->request_buffer;
sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg,
scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
psg->count = cpu_to_le32(sg_count);
byte_count = 0;
for (i = 0; i < sg_count; i++) {
psg->sg[i].addr = cpu_to_le32(sg_dma_address(sg));
psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
byte_count += sg_dma_len(sg);
sg++;
}
/* hba wants the size to be exact */
if(byte_count > scsicmd->request_bufflen){
psg->sg[i-1].count -= (byte_count - scsicmd->request_bufflen);
byte_count = scsicmd->request_bufflen;
}
/* Check for command underflow */
if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
byte_count, scsicmd->underflow);
}
}
else if(scsicmd->request_bufflen) {
dma_addr_t addr;
addr = pci_map_single(dev->pdev,
scsicmd->request_buffer,
scsicmd->request_bufflen,
scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
psg->count = cpu_to_le32(1);
psg->sg[0].addr = cpu_to_le32(addr);
psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen);
scsicmd->SCp.ptr = (void *)addr;
byte_count = scsicmd->request_bufflen;
}
return byte_count;
}
static unsigned long aac_build_sg64(Scsi_Cmnd* scsicmd, struct sgmap64* psg)
{
struct aac_dev *dev;
unsigned long byte_count = 0;
u64 le_addr;
dev = (struct aac_dev *)scsicmd->host->hostdata;
// Get rid of old data
psg->count = cpu_to_le32(0);
psg->sg[0].addr[0] = cpu_to_le32(NULL);
psg->sg[0].addr[1] = cpu_to_le32(NULL);
psg->sg[0].count = cpu_to_le32(0);
if (scsicmd->use_sg) {
struct scatterlist *sg;
int i;
int sg_count;
sg = (struct scatterlist *) scsicmd->request_buffer;
sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg,
scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
psg->count = cpu_to_le32(sg_count);
byte_count = 0;
for (i = 0; i < sg_count; i++) {
le_addr = cpu_to_le64(sg_dma_address(sg));
psg->sg[i].addr[1] = (u32)(le_addr>>32);
psg->sg[i].addr[0] = (u32)(le_addr & 0xffffffff);
psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
byte_count += sg_dma_len(sg);
sg++;
}
/* hba wants the size to be exact */
if(byte_count > scsicmd->request_bufflen){
psg->sg[i-1].count -= (byte_count - scsicmd->request_bufflen);
byte_count = scsicmd->request_bufflen;
}
/* Check for command underflow */
if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
byte_count, scsicmd->underflow);
}
}
else if(scsicmd->request_bufflen) {
dma_addr_t addr;
addr = pci_map_single(dev->pdev,
scsicmd->request_buffer,
scsicmd->request_bufflen,
scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
psg->count = cpu_to_le32(1);
le_addr = cpu_to_le64(addr);
psg->sg[0].addr[1] = (u32)(le_addr>>32);
psg->sg[0].addr[0] = (u32)(le_addr & 0xffffffff);
psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen);
scsicmd->SCp.ptr = (void *)addr;
byte_count = scsicmd->request_bufflen;
}
return byte_count;
}
#ifdef AAC_DETAILED_STATUS_INFO
struct aac_srb_status_info {
u32 status;
char *str;
};
static struct aac_srb_status_info srb_status_info[] = {
{ SRB_STATUS_PENDING, "Pending Status"},
{ SRB_STATUS_SUCCESS, "Success"},
{ SRB_STATUS_ABORTED, "Aborted Command"},
{ SRB_STATUS_ABORT_FAILED, "Abort Failed"},
{ SRB_STATUS_ERROR, "Error Event"},
{ SRB_STATUS_BUSY, "Device Busy"},
{ SRB_STATUS_INVALID_REQUEST, "Invalid Request"},
{ SRB_STATUS_INVALID_PATH_ID, "Invalid Path ID"},
{ SRB_STATUS_NO_DEVICE, "No Device"},
{ SRB_STATUS_TIMEOUT, "Timeout"},
{ SRB_STATUS_SELECTION_TIMEOUT, "Selection Timeout"},
{ SRB_STATUS_COMMAND_TIMEOUT, "Command Timeout"},
{ SRB_STATUS_MESSAGE_REJECTED, "Message Rejected"},
{ SRB_STATUS_BUS_RESET, "Bus Reset"},
{ SRB_STATUS_PARITY_ERROR, "Parity Error"},
{ SRB_STATUS_REQUEST_SENSE_FAILED,"Request Sense Failed"},
{ SRB_STATUS_NO_HBA, "No HBA"},
{ SRB_STATUS_DATA_OVERRUN, "Data Overrun/Data Underrun"},
{ SRB_STATUS_UNEXPECTED_BUS_FREE,"Unexpected Bus Free"},
{ SRB_STATUS_PHASE_SEQUENCE_FAILURE,"Phase Error"},
{ SRB_STATUS_BAD_SRB_BLOCK_LENGTH,"Bad Srb Block Length"},
{ SRB_STATUS_REQUEST_FLUSHED, "Request Flushed"},
{ SRB_STATUS_DELAYED_RETRY, "Delayed Retry"},
{ SRB_STATUS_INVALID_LUN, "Invalid LUN"},
{ SRB_STATUS_INVALID_TARGET_ID, "Invalid TARGET ID"},
{ SRB_STATUS_BAD_FUNCTION, "Bad Function"},
{ SRB_STATUS_ERROR_RECOVERY, "Error Recovery"},
{ SRB_STATUS_NOT_STARTED, "Not Started"},
{ SRB_STATUS_NOT_IN_USE, "Not In Use"},
{ SRB_STATUS_FORCE_ABORT, "Force Abort"},
{ SRB_STATUS_DOMAIN_VALIDATION_FAIL,"Domain Validation Failure"},
{ 0xff, "Unknown Error"}
};
char *aac_get_status_string(u32 status)
{
int i;
for(i=0; i < (sizeof(srb_status_info)/sizeof(struct aac_srb_status_info)); i++ ){
if(srb_status_info[i].status == status){
return srb_status_info[i].str;
}
}
return "Bad Status Code";
}
#endif
#define dprintk(x)
/*#define dprintk(x) printk x */
/*------------------------------------------------------------------------------
* D E F I N E S
*----------------------------------------------------------------------------*/
#define MAXIMUM_NUM_CONTAINERS 31
#define MAXIMUM_NUM_ADAPTERS 8
#define AAC_NUM_FIB 578
#define AAC_NUM_IO_FIB 512
#define AAC_MAX_TARGET (MAXIMUM_NUM_CONTAINERS+1)
//#define AAC_MAX_TARGET (16)
#define AAC_MAX_LUN (8)
/*
* These macros convert from physical channels to virtual channels
*/
#define CONTAINER_CHANNEL (0)
#define aac_phys_to_logical(x) (x+1)
#define aac_logical_to_phys(x) (x?x-1:0)
#define AAC_DETAILED_STATUS_INFO
struct diskparm
{
int heads;
int sectors;
int cylinders;
};
/*
* DON'T CHANGE THE ORDER, this is set by the firmware
*/
#define CT_NONE 0
#define CT_VOLUME 1
#define CT_MIRROR 2
#define CT_STRIPE 3
#define CT_RAID5 4
#define CT_SSRW 5
#define CT_SSRO 6
#define CT_MORPH 7
#define CT_PASSTHRU 8
#define CT_RAID4 9
#define CT_RAID10 10 /* stripe of mirror */
#define CT_RAID00 11 /* stripe of stripe */
#define CT_VOLUME_OF_MIRRORS 12 /* volume of mirror */
#define CT_PSEUDO_RAID 13 /* really raid4 */
#define CT_LAST_VOLUME_TYPE 14
/*
* Types of objects addressable in some fashion by the client.
* This is a superset of those objects handled just by the filesystem
* and includes "raw" objects that an administrator would use to
* configure containers and filesystems.
*/
#define FT_REG 1 /* regular file */
#define FT_DIR 2 /* directory */
#define FT_BLK 3 /* "block" device - reserved */
#define FT_CHR 4 /* "character special" device - reserved */
#define FT_LNK 5 /* symbolic link */
#define FT_SOCK 6 /* socket */
#define FT_FIFO 7 /* fifo */
#define FT_FILESYS 8 /* ADAPTEC's "FSA"(tm) filesystem */
#define FT_DRIVE 9 /* physical disk - addressable in scsi by bus/target/lun */
#define FT_SLICE 10 /* virtual disk - raw volume - slice */
#define FT_PARTITION 11 /* FSA partition - carved out of a slice - building block for containers */
#define FT_VOLUME 12 /* Container - Volume Set */
#define FT_STRIPE 13 /* Container - Stripe Set */
#define FT_MIRROR 14 /* Container - Mirror Set */
#define FT_RAID5 15 /* Container - Raid 5 Set */
#define FT_DATABASE 16 /* Storage object with "foreign" content manager */
/*
* Host side memory scatter gather list
* Used by the adapter for read, write, and readdirplus operations
* We have seperate 32 and 64 bit version because even
* on 64 bit systems not all cards support the 64 bit version
*/
struct sgentry {
u32 addr; /* 32-bit address. */
u32 count; /* Length. */
};
struct sgentry64 {
u32 addr[2]; /* 64-bit addr. 2 pieces for data alignment */
u32 count; /* Length. */
};
/*
* SGMAP
*
* This is the SGMAP structure for all commands that use
* 32-bit addressing.
*/
struct sgmap {
u32 count;
struct sgentry sg[1];
};
struct sgmap64 {
u32 count;
struct sgentry64 sg[1];
};
struct creation_info
{
u8 buildnum; /* e.g., 588 */
u8 usec; /* e.g., 588 */
u8 via; /* e.g., 1 = FSU,
* 2 = API
*/
u8 year; /* e.g., 1997 = 97 */
u32 date; /*
* unsigned Month :4; // 1 - 12
* unsigned Day :6; // 1 - 32
* unsigned Hour :6; // 0 - 23
* unsigned Minute :6; // 0 - 60
* unsigned Second :6; // 0 - 60
*/
u32 serial[2]; /* e.g., 0x1DEADB0BFAFAF001 */
};
/*
* Define all the constants needed for the communication interface
*/
/*
* Define how many queue entries each queue will have and the total
* number of entries for the entire communication interface. Also define
* how many queues we support.
*
* This has to match the controller
*/
#define NUMBER_OF_COMM_QUEUES 8 // 4 command; 4 response
#define HOST_HIGH_CMD_ENTRIES 4
#define HOST_NORM_CMD_ENTRIES 8
#define ADAP_HIGH_CMD_ENTRIES 4
#define ADAP_NORM_CMD_ENTRIES 512
#define HOST_HIGH_RESP_ENTRIES 4
#define HOST_NORM_RESP_ENTRIES 512
#define ADAP_HIGH_RESP_ENTRIES 4
#define ADAP_NORM_RESP_ENTRIES 8
#define TOTAL_QUEUE_ENTRIES \
(HOST_NORM_CMD_ENTRIES + HOST_HIGH_CMD_ENTRIES + ADAP_NORM_CMD_ENTRIES + ADAP_HIGH_CMD_ENTRIES + \
HOST_NORM_RESP_ENTRIES + HOST_HIGH_RESP_ENTRIES + ADAP_NORM_RESP_ENTRIES + ADAP_HIGH_RESP_ENTRIES)
/*
* Set the queues on a 16 byte alignment
*/
#define QUEUE_ALIGNMENT 16
/*
* The queue headers define the Communication Region queues. These
* are physically contiguous and accessible by both the adapter and the
* host. Even though all queue headers are in the same contiguous block
* they will be represented as individual units in the data structures.
*/
struct aac_entry {
u32 size; /* Size in bytes of Fib which this QE points to */
u32 addr; /* Receiver address of the FIB */
};
/*
* The adapter assumes the ProducerIndex and ConsumerIndex are grouped
* adjacently and in that order.
*/
struct aac_qhdr {
u64 header_addr; /* Address to hand the adapter to access to this queue head */
u32 *producer; /* The producer index for this queue (host address) */
u32 *consumer; /* The consumer index for this queue (host address) */
};
/*
* Define all the events which the adapter would like to notify
* the host of.
*/
#define HostNormCmdQue 1 /* Change in host normal priority command queue */
#define HostHighCmdQue 2 /* Change in host high priority command queue */
#define HostNormRespQue 3 /* Change in host normal priority response queue */
#define HostHighRespQue 4 /* Change in host high priority response queue */
#define AdapNormRespNotFull 5
#define AdapHighRespNotFull 6
#define AdapNormCmdNotFull 7
#define AdapHighCmdNotFull 8
#define SynchCommandComplete 9
#define AdapInternalError 0xfe /* The adapter detected an internal error shutting down */
/*
* Define all the events the host wishes to notify the
* adapter of. The first four values much match the Qid the
* corresponding queue.
*/
#define AdapNormCmdQue 2
#define AdapHighCmdQue 3
#define AdapNormRespQue 6
#define AdapHighRespQue 7
#define HostShutdown 8
#define HostPowerFail 9
#define FatalCommError 10
#define HostNormRespNotFull 11
#define HostHighRespNotFull 12
#define HostNormCmdNotFull 13
#define HostHighCmdNotFull 14
#define FastIo 15
#define AdapPrintfDone 16
/*
* Define all the queues that the adapter and host use to communicate
* Number them to match the physical queue layout.
*/
enum aac_queue_types {
HostNormCmdQueue = 0, /* Adapter to host normal priority command traffic */
HostHighCmdQueue, /* Adapter to host high priority command traffic */
AdapNormCmdQueue, /* Host to adapter normal priority command traffic */
AdapHighCmdQueue, /* Host to adapter high priority command traffic */
HostNormRespQueue, /* Adapter to host normal priority response traffic */
HostHighRespQueue, /* Adapter to host high priority response traffic */
AdapNormRespQueue, /* Host to adapter normal priority response traffic */
AdapHighRespQueue /* Host to adapter high priority response traffic */
};
/*
* Assign type values to the FSA communication data structures
*/
#define FIB_MAGIC 0x0001
/*
* Define the priority levels the FSA communication routines support.
*/
#define FsaNormal 1
#define FsaHigh 2
/*
* Define the FIB. The FIB is the where all the requested data and
* command information are put to the application on the FSA adapter.
*/
struct aac_fibhdr {
u32 XferState; // Current transfer state for this CCB
u16 Command; // Routing information for the destination
u8 StructType; // Type FIB
u8 Flags; // Flags for FIB
u16 Size; // Size of this FIB in bytes
u16 SenderSize; // Size of the FIB in the sender (for response sizing)
u32 SenderFibAddress; // Host defined data in the FIB
u32 ReceiverFibAddress; // Logical address of this FIB for the adapter
u32 SenderData; // Place holder for the sender to store data
union {
struct {
u32 _ReceiverTimeStart; // Timestamp for receipt of fib
u32 _ReceiverTimeDone; // Timestamp for completion of fib
} _s;
struct list_head _FibLinks; // Used to link Adapter Initiated Fibs on the host
} _u;
};
#define FibLinks _u._FibLinks
#define FIB_DATA_SIZE_IN_BYTES (512 - sizeof(struct aac_fibhdr))
struct hw_fib {
struct aac_fibhdr header;
u8 data[FIB_DATA_SIZE_IN_BYTES]; // Command specific data
};
/*
* FIB commands
*/
#define TestCommandResponse 1
#define TestAdapterCommand 2
/*
* Lowlevel and comm commands
*/
#define LastTestCommand 100
#define ReinitHostNormCommandQueue 101
#define ReinitHostHighCommandQueue 102
#define ReinitHostHighRespQueue 103
#define ReinitHostNormRespQueue 104
#define ReinitAdapNormCommandQueue 105
#define ReinitAdapHighCommandQueue 107
#define ReinitAdapHighRespQueue 108
#define ReinitAdapNormRespQueue 109
#define InterfaceShutdown 110
#define DmaCommandFib 120
#define StartProfile 121
#define TermProfile 122
#define SpeedTest 123
#define TakeABreakPt 124
#define RequestPerfData 125
#define SetInterruptDefTimer 126
#define SetInterruptDefCount 127
#define GetInterruptDefStatus 128
#define LastCommCommand 129
/*
* Filesystem commands
*/
#define NuFileSystem 300
#define UFS 301
#define HostFileSystem 302
#define LastFileSystemCommand 303
/*
* Container Commands
*/
#define ContainerCommand 500
#define ContainerCommand64 501
/*
* Cluster Commands
*/
#define ClusterCommand 550
/*
* Scsi Port commands (scsi passthrough)
*/
#define ScsiPortCommand 600
#define ScsiPortCommand64 601
/*
* Misc house keeping and generic adapter initiated commands
*/
#define AifRequest 700
#define CheckRevision 701
#define FsaHostShutdown 702
#define RequestAdapterInfo 703
#define IsAdapterPaused 704
#define SendHostTime 705
#define LastMiscCommand 706
//
// Commands that will target the failover level on the FSA adapter
//
enum fib_xfer_state {
HostOwned = (1<<0),
AdapterOwned = (1<<1),
FibInitialized = (1<<2),
FibEmpty = (1<<3),
AllocatedFromPool = (1<<4),
SentFromHost = (1<<5),
SentFromAdapter = (1<<6),
ResponseExpected = (1<<7),
NoResponseExpected = (1<<8),
AdapterProcessed = (1<<9),
HostProcessed = (1<<10),
HighPriority = (1<<11),
NormalPriority = (1<<12),
Async = (1<<13),
AsyncIo = (1<<13), // rpbfix: remove with new regime
PageFileIo = (1<<14), // rpbfix: remove with new regime
ShutdownRequest = (1<<15),
LazyWrite = (1<<16), // rpbfix: remove with new regime
AdapterMicroFib = (1<<17),
BIOSFibPath = (1<<18),
FastResponseCapable = (1<<19),
ApiFib = (1<<20) // Its an API Fib.
};
/*
* The following defines needs to be updated any time there is an
* incompatible change made to the aac_init structure.
*/
#define ADAPTER_INIT_STRUCT_REVISION 3
struct aac_init
{
u32 InitStructRevision;
u32 MiniPortRevision;
u32 fsrev;
u32 CommHeaderAddress;
u32 FastIoCommAreaAddress;
u32 AdapterFibsPhysicalAddress;
u32 AdapterFibsVirtualAddress;
u32 AdapterFibsSize;
u32 AdapterFibAlign;
u32 printfbuf;
u32 printfbufsiz;
u32 HostPhysMemPages; // number of 4k pages of host physical memory
u32 HostElapsedSeconds; // number of seconds since 1970.
};
enum aac_log_level {
LOG_INIT = 10,
LOG_INFORMATIONAL = 20,
LOG_WARNING = 30,
LOG_LOW_ERROR = 40,
LOG_MEDIUM_ERROR = 50,
LOG_HIGH_ERROR = 60,
LOG_PANIC = 70,
LOG_DEBUG = 80,
LOG_WINDBG_PRINT = 90
};
#define FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT 0x030b
#define FSAFS_NTC_FIB_CONTEXT 0x030c
struct aac_dev;
struct adapter_ops
{
void (*adapter_interrupt)(struct aac_dev *dev);
void (*adapter_notify)(struct aac_dev *dev, u32 event);
void (*adapter_enable_int)(struct aac_dev *dev, u32 event);
void (*adapter_disable_int)(struct aac_dev *dev, u32 event);
int (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 *status);
};
/*
* Define which interrupt handler needs to be installed
*/
struct aac_driver_ident
{
u16 vendor;
u16 device;
u16 subsystem_vendor;
u16 subsystem_device;
int (*init)(struct aac_dev *dev, unsigned long num);
char * name;
char * vname;
char * model;
u16 channels;
};
/*
* The adapter interface specs all queues to be located in the same
* physically contigous block. The host structure that defines the
* commuication queues will assume they are each a seperate physically
* contigous memory region that will support them all being one big
* contigous block.
* There is a command and response queue for each level and direction of
* commuication. These regions are accessed by both the host and adapter.
*/
struct aac_queue {
u64 logical; /* This is the address we give the adapter */
struct aac_entry *base; /* This is the system virtual address */
struct aac_qhdr headers; /* A pointer to the producer and consumer queue headers for this queue */
u32 entries; /* Number of queue entries on this queue */
wait_queue_head_t qfull; /* Event to wait on if the queue is full */
wait_queue_head_t cmdready; /* Indicates there is a Command ready from the adapter on this queue. */
/* This is only valid for adapter to host command queues. */
spinlock_t *lock; /* Spinlock for this queue must take this lock before accessing the lock */
spinlock_t lockdata; /* Actual lock (used only on one side of the lock) */
unsigned long SavedIrql; /* Previous IRQL when the spin lock is taken */
u32 padding; /* Padding - FIXME - can remove I believe */
struct list_head cmdq; /* A queue of FIBs which need to be prcessed by the FS thread. This is */
/* only valid for command queues which receive entries from the adapter. */
struct list_head pendingq; /* A queue of outstanding fib's to the adapter. */
unsigned long numpending; /* Number of entries on outstanding queue. */
struct aac_dev * dev; /* Back pointer to adapter structure */
};
/*
* Message queues. The order here is important, see also the
* queue type ordering
*/
struct aac_queue_block
{
struct aac_queue queue[8];
};
/*
* SaP1 Message Unit Registers
*/
struct sa_drawbridge_CSR {
// Offset | Name
u32 reserved[10]; // 00h-27h | Reserved
u8 LUT_Offset; // 28h | Looup Table Offset
u8 reserved1[3]; // 29h-2bh | Reserved
u32 LUT_Data; // 2ch | Looup Table Data
u32 reserved2[26]; // 30h-97h | Reserved
u16 PRICLEARIRQ; // 98h | Primary Clear Irq
u16 SECCLEARIRQ; // 9ah | Secondary Clear Irq
u16 PRISETIRQ; // 9ch | Primary Set Irq
u16 SECSETIRQ; // 9eh | Secondary Set Irq
u16 PRICLEARIRQMASK; // a0h | Primary Clear Irq Mask
u16 SECCLEARIRQMASK; // a2h | Secondary Clear Irq Mask
u16 PRISETIRQMASK; // a4h | Primary Set Irq Mask
u16 SECSETIRQMASK; // a6h | Secondary Set Irq Mask
u32 MAILBOX0; // a8h | Scratchpad 0
u32 MAILBOX1; // ach | Scratchpad 1
u32 MAILBOX2; // b0h | Scratchpad 2
u32 MAILBOX3; // b4h | Scratchpad 3
u32 MAILBOX4; // b8h | Scratchpad 4
u32 MAILBOX5; // bch | Scratchpad 5
u32 MAILBOX6; // c0h | Scratchpad 6
u32 MAILBOX7; // c4h | Scratchpad 7
u32 ROM_Setup_Data; // c8h | Rom Setup and Data
u32 ROM_Control_Addr; // cch | Rom Control and Address
u32 reserved3[12]; // d0h-ffh | reserved
u32 LUT[64]; // 100h-1ffh| Lookup Table Entries
//
// TO DO
// need to add DMA, I2O, UART, etc registers form 80h to 364h
//
};
#define Mailbox0 SaDbCSR.MAILBOX0
#define Mailbox1 SaDbCSR.MAILBOX1
#define Mailbox2 SaDbCSR.MAILBOX2
#define Mailbox3 SaDbCSR.MAILBOX3
#define Mailbox4 SaDbCSR.MAILBOX4
#define Mailbox5 SaDbCSR.MAILBOX5
#define Mailbox7 SaDbCSR.MAILBOX7
#define DoorbellReg_p SaDbCSR.PRISETIRQ
#define DoorbellReg_s SaDbCSR.SECSETIRQ
#define DoorbellClrReg_p SaDbCSR.PRICLEARIRQ
#define DOORBELL_0 cpu_to_le16(0x0001)
#define DOORBELL_1 cpu_to_le16(0x0002)
#define DOORBELL_2 cpu_to_le16(0x0004)
#define DOORBELL_3 cpu_to_le16(0x0008)
#define DOORBELL_4 cpu_to_le16(0x0010)
#define DOORBELL_5 cpu_to_le16(0x0020)
#define DOORBELL_6 cpu_to_le16(0x0040)
#define PrintfReady DOORBELL_5
#define PrintfDone DOORBELL_5
struct sa_registers {
struct sa_drawbridge_CSR SaDbCSR; /* 98h - c4h */
};
#define Sa_MINIPORT_REVISION 1
#define sa_readw(AEP, CSR) readl(&((AEP)->regs.sa->CSR))
#define sa_readl(AEP, CSR) readl(&((AEP)->regs.sa->CSR))
#define sa_writew(AEP, CSR, value) writew(value, &((AEP)->regs.sa->CSR))
#define sa_writel(AEP, CSR, value) writel(value, &((AEP)->regs.sa->CSR))
/*
* Rx Message Unit Registers
*/
struct rx_mu_registers {
// Local | PCI* | Name
// | |
u32 ARSR; // 1300h | 00h | APIC Register Select Register
u32 reserved0; // 1304h | 04h | Reserved
u32 AWR; // 1308h | 08h | APIC Window Register
u32 reserved1; // 130Ch | 0Ch | Reserved
u32 IMRx[2]; // 1310h | 10h | Inbound Message Registers
u32 OMRx[2]; // 1318h | 18h | Outbound Message Registers
u32 IDR; // 1320h | 20h | Inbound Doorbell Register
u32 IISR; // 1324h | 24h | Inbound Interrupt Status Register
u32 IIMR; // 1328h | 28h | Inbound Interrupt Mask Register
u32 ODR; // 132Ch | 2Ch | Outbound Doorbell Register
u32 OISR; // 1330h | 30h | Outbound Interrupt Status Register
u32 OIMR; // 1334h | 34h | Outbound Interrupt Mask Register
// * Must access through ATU Inbound Translation Window
};
struct rx_inbound {
u32 Mailbox[8];
};
#define InboundMailbox0 IndexRegs.Mailbox[0]
#define InboundMailbox1 IndexRegs.Mailbox[1]
#define InboundMailbox2 IndexRegs.Mailbox[2]
#define InboundMailbox3 IndexRegs.Mailbox[3]
#define InboundMailbox4 IndexRegs.Mailbox[4]
#define INBOUNDDOORBELL_0 cpu_to_le32(0x00000001)
#define INBOUNDDOORBELL_1 cpu_to_le32(0x00000002)
#define INBOUNDDOORBELL_2 cpu_to_le32(0x00000004)
#define INBOUNDDOORBELL_3 cpu_to_le32(0x00000008)
#define INBOUNDDOORBELL_4 cpu_to_le32(0x00000010)
#define INBOUNDDOORBELL_5 cpu_to_le32(0x00000020)
#define INBOUNDDOORBELL_6 cpu_to_le32(0x00000040)
#define OUTBOUNDDOORBELL_0 cpu_to_le32(0x00000001)
#define OUTBOUNDDOORBELL_1 cpu_to_le32(0x00000002)
#define OUTBOUNDDOORBELL_2 cpu_to_le32(0x00000004)
#define OUTBOUNDDOORBELL_3 cpu_to_le32(0x00000008)
#define OUTBOUNDDOORBELL_4 cpu_to_le32(0x00000010)
#define InboundDoorbellReg MUnit.IDR
#define OutboundDoorbellReg MUnit.ODR
struct rx_registers {
struct rx_mu_registers MUnit; // 1300h - 1334h
u32 reserved1[6]; // 1338h - 134ch
struct rx_inbound IndexRegs;
};
#define rx_readb(AEP, CSR) readb(&((AEP)->regs.rx->CSR))
#define rx_readl(AEP, CSR) readl(&((AEP)->regs.rx->CSR))
#define rx_writeb(AEP, CSR, value) writeb(value, &((AEP)->regs.rx->CSR))
#define rx_writel(AEP, CSR, value) writel(value, &((AEP)->regs.rx->CSR))
struct fib;
typedef void (*fib_callback)(void *ctxt, struct fib *fibctx);
struct aac_fib_context {
s16 type; // used for verification of structure
s16 size;
ulong jiffies; // used for cleanup - dmb changed to ulong
struct list_head next; // used to link context's into a linked list
struct semaphore wait_sem; // this is used to wait for the next fib to arrive.
int wait; // Set to true when thread is in WaitForSingleObject
unsigned long count; // total number of FIBs on FibList
struct list_head fibs;
};
struct fsa_scsi_hba {
u32 size[MAXIMUM_NUM_CONTAINERS];
u32 type[MAXIMUM_NUM_CONTAINERS];
u8 valid[MAXIMUM_NUM_CONTAINERS];
u8 ro[MAXIMUM_NUM_CONTAINERS];
u8 locked[MAXIMUM_NUM_CONTAINERS];
u8 deleted[MAXIMUM_NUM_CONTAINERS];
u32 devno[MAXIMUM_NUM_CONTAINERS];
};
struct fib {
void *next; /* this is used by the allocator */
s16 type;
s16 size;
/*
* The Adapter that this I/O is destined for.
*/
struct aac_dev *dev;
u64 logicaladdr; /* 64 bit */
/*
* This is the event the sendfib routine will wait on if the
* caller did not pass one and this is synch io.
*/
struct semaphore event_wait;
spinlock_t event_lock;
u32 done; /* gets set to 1 when fib is complete */
fib_callback callback;
void *callback_data;
u32 flags; // u32 dmb was ulong
/*
* The following is used to put this fib context onto the
* Outstanding I/O queue.
*/
struct list_head queue;
void *data;
struct hw_fib *fib; /* Actual shared object */
};
/*
* Adapter Information Block
*
* This is returned by the RequestAdapterInfo block
*/
struct aac_adapter_info
{
u32 platform;
u32 cpu;
u32 subcpu;
u32 clock;
u32 execmem;
u32 buffermem;
u32 totalmem;
u32 kernelrev;
u32 kernelbuild;
u32 monitorrev;
u32 monitorbuild;
u32 hwrev;
u32 hwbuild;
u32 biosrev;
u32 biosbuild;
u32 cluster;
u32 serial[2];
u32 battery;
u32 options;
u32 OEM;
};
/*
* Battery platforms
*/
#define AAC_BAT_REQ_PRESENT (1)
#define AAC_BAT_REQ_NOTPRESENT (2)
#define AAC_BAT_OPT_PRESENT (3)
#define AAC_BAT_OPT_NOTPRESENT (4)
#define AAC_BAT_NOT_SUPPORTED (5)
/*
* cpu types
*/
#define AAC_CPU_SIMULATOR (1)
#define AAC_CPU_I960 (2)
#define AAC_CPU_STRONGARM (3)
/*
* Supported Options
*/
#define AAC_OPT_SNAPSHOT cpu_to_le32(1)
#define AAC_OPT_CLUSTERS cpu_to_le32(1<<1)
#define AAC_OPT_WRITE_CACHE cpu_to_le32(1<<2)
#define AAC_OPT_64BIT_DATA cpu_to_le32(1<<3)
#define AAC_OPT_HOST_TIME_FIB cpu_to_le32(1<<4)
#define AAC_OPT_RAID50 cpu_to_le32(1<<5)
#define AAC_OPT_4GB_WINDOW cpu_to_le32(1<<6)
#define AAC_OPT_SCSI_UPGRADEABLE cpu_to_le32(1<<7)
#define AAC_OPT_SOFT_ERR_REPORT cpu_to_le32(1<<8)
#define AAC_OPT_SUPPORTED_RECONDITION cpu_to_le32(1<<9)
#define AAC_OPT_SGMAP_HOST64 cpu_to_le32(1<<10)
#define AAC_OPT_ALARM cpu_to_le32(1<<11)
#define AAC_OPT_NONDASD cpu_to_le32(1<<12)
struct aac_dev
{
struct aac_dev *next;
const char *name;
int id;
u16 irq_mask;
/*
* Map for 128 fib objects (64k)
*/
dma_addr_t hw_fib_pa;
struct hw_fib *hw_fib_va;
#if BITS_PER_LONG >= 64
ulong fib_base_va;
#endif
/*
* Fib Headers
*/
struct fib fibs[AAC_NUM_FIB];
struct fib *free_fib;
struct fib *timeout_fib;
spinlock_t fib_lock;
struct aac_queue_block *queues;
/*
* The user API will use an IOCTL to register itself to receive
* FIBs from the adapter. The following list is used to keep
* track of all the threads that have requested these FIBs. The
* mutex is used to synchronize access to all data associated
* with the adapter fibs.
*/
struct list_head fib_list;
struct adapter_ops a_ops;
unsigned long fsrev; /* Main driver's revision number */
struct aac_init *init; /* Holds initialization info to communicate with adapter */
dma_addr_t init_pa; /* Holds physical address of the init struct */
struct pci_dev *pdev; /* Our PCI interface */
void * printfbuf; /* pointer to buffer used for printf's from the adapter */
void * comm_addr; /* Base address of Comm area */
dma_addr_t comm_phys; /* Physical Address of Comm area */
size_t comm_size;
struct Scsi_Host *scsi_host_ptr;
struct fsa_scsi_hba fsa_dev;
int thread_pid;
int cardtype;
/*
* The following is the device specific extension.
*/
union
{
struct sa_registers *sa;
struct rx_registers *rx;
} regs;
/*
* The following is the number of the individual adapter
*/
u32 devnum;
u32 aif_thread;
struct completion aif_completion;
struct aac_adapter_info adapter_info;
/* These are in adapter info but they are in the io flow so
* lets break them out so we don't have to do an AND to check them
*/
u8 nondasd_support;
u8 pae_support;
};
#define AllocateAndMapFibSpace(dev, MapFibContext) \
dev->a_ops.AllocateAndMapFibSpace(dev, MapFibContext)
#define UnmapAndFreeFibSpace(dev, MapFibContext) \
dev->a_ops.UnmapAndFreeFibSpace(dev, MapFibContext)
#define aac_adapter_interrupt(dev) \
dev->a_ops.adapter_interrupt(dev)
#define aac_adapter_notify(dev, event) \
dev->a_ops.adapter_notify(dev, event)
#define aac_adapter_enable_int(dev, event) \
dev->a_ops.adapter_enable_int(dev, event)
#define aac_adapter_disable_int(dev, event) \
dev->a_ops.adapter_disable_int(dev, event)
#define FIB_CONTEXT_FLAG_TIMED_OUT (0x00000001)
/*
* Define the command values
*/
#define Null 0
#define GetAttributes 1
#define SetAttributes 2
#define Lookup 3
#define ReadLink 4
#define Read 5
#define Write 6
#define Create 7
#define MakeDirectory 8
#define SymbolicLink 9
#define MakeNode 10
#define Removex 11
#define RemoveDirectoryx 12
#define Rename 13
#define Link 14
#define ReadDirectory 15
#define ReadDirectoryPlus 16
#define FileSystemStatus 17
#define FileSystemInfo 18
#define PathConfigure 19
#define Commit 20
#define Mount 21
#define UnMount 22
#define Newfs 23
#define FsCheck 24
#define FsSync 25
#define SimReadWrite 26
#define SetFileSystemStatus 27
#define BlockRead 28
#define BlockWrite 29
#define NvramIoctl 30
#define FsSyncWait 31
#define ClearArchiveBit 32
#define SetAcl 33
#define GetAcl 34
#define AssignAcl 35
#define FaultInsertion 36 /* Fault Insertion Command */
#define CrazyCache 37 /* Crazycache */
#define MAX_FSACOMMAND_NUM 38
/*
* Define the status returns. These are very unixlike although
* most are not in fact used
*/
#define ST_OK 0
#define ST_PERM 1
#define ST_NOENT 2
#define ST_IO 5
#define ST_NXIO 6
#define ST_E2BIG 7
#define ST_ACCES 13
#define ST_EXIST 17
#define ST_XDEV 18
#define ST_NODEV 19
#define ST_NOTDIR 20
#define ST_ISDIR 21
#define ST_INVAL 22
#define ST_FBIG 27
#define ST_NOSPC 28
#define ST_ROFS 30
#define ST_MLINK 31
#define ST_WOULDBLOCK 35
#define ST_NAMETOOLONG 63
#define ST_NOTEMPTY 66
#define ST_DQUOT 69
#define ST_STALE 70
#define ST_REMOTE 71
#define ST_BADHANDLE 10001
#define ST_NOT_SYNC 10002
#define ST_BAD_COOKIE 10003
#define ST_NOTSUPP 10004
#define ST_TOOSMALL 10005
#define ST_SERVERFAULT 10006
#define ST_BADTYPE 10007
#define ST_JUKEBOX 10008
#define ST_NOTMOUNTED 10009
#define ST_MAINTMODE 10010
#define ST_STALEACL 10011
/*
* On writes how does the client want the data written.
*/
#define CACHE_CSTABLE 1
#define CACHE_UNSTABLE 2
/*
* Lets the client know at which level the data was commited on
* a write request
*/
#define CMFILE_SYNCH_NVRAM 1
#define CMDATA_SYNCH_NVRAM 2
#define CMFILE_SYNCH 3
#define CMDATA_SYNCH 4
#define CMUNSTABLE 5
struct aac_read
{
u32 command;
u32 cid;
u32 block;
u32 count;
struct sgmap sg; // Must be last in struct because it is variable
};
struct aac_read64
{
u32 command;
u16 cid;
u16 sector_count;
u32 block;
u16 pad;
u16 flags;
struct sgmap64 sg; // Must be last in struct because it is variable
};
struct aac_read_reply
{
u32 status;
u32 count;
};
struct aac_write
{
u32 command;
u32 cid;
u32 block;
u32 count;
u32 stable; // Not used
struct sgmap sg; // Must be last in struct because it is variable
};
struct aac_write64
{
u32 command;
u16 cid;
u16 sector_count;
u32 block;
u16 pad;
u16 flags;
struct sgmap64 sg; // Must be last in struct because it is variable
};
struct aac_write_reply
{
u32 status;
u32 count;
u32 committed;
};
struct aac_srb
{
u32 function;
u32 channel;
u32 target;
u32 lun;
u32 timeout;
u32 flags;
u32 count; // Data xfer size
u32 retry_limit;
u32 cdb_size;
u8 cdb[16];
struct sgmap sg;
};
#define AAC_SENSE_BUFFERSIZE 30
struct aac_srb_reply
{
u32 status;
u32 srb_status;
u32 scsi_status;
u32 data_xfer_length;
u32 sense_data_size;
u8 sense_data[AAC_SENSE_BUFFERSIZE]; // Can this be SCSI_SENSE_BUFFERSIZE
};
/*
* SRB Flags
*/
#define SRB_NoDataXfer 0x0000
#define SRB_DisableDisconnect 0x0004
#define SRB_DisableSynchTransfer 0x0008
#define SRB_BypassFrozenQueue 0x0010
#define SRB_DisableAutosense 0x0020
#define SRB_DataIn 0x0040
#define SRB_DataOut 0x0080
/*
* SRB Functions - set in aac_srb->function
*/
#define SRBF_ExecuteScsi 0x0000
#define SRBF_ClaimDevice 0x0001
#define SRBF_IO_Control 0x0002
#define SRBF_ReceiveEvent 0x0003
#define SRBF_ReleaseQueue 0x0004
#define SRBF_AttachDevice 0x0005
#define SRBF_ReleaseDevice 0x0006
#define SRBF_Shutdown 0x0007
#define SRBF_Flush 0x0008
#define SRBF_AbortCommand 0x0010
#define SRBF_ReleaseRecovery 0x0011
#define SRBF_ResetBus 0x0012
#define SRBF_ResetDevice 0x0013
#define SRBF_TerminateIO 0x0014
#define SRBF_FlushQueue 0x0015
#define SRBF_RemoveDevice 0x0016
#define SRBF_DomainValidation 0x0017
/*
* SRB SCSI Status - set in aac_srb->scsi_status
*/
#define SRB_STATUS_PENDING 0x00
#define SRB_STATUS_SUCCESS 0x01
#define SRB_STATUS_ABORTED 0x02
#define SRB_STATUS_ABORT_FAILED 0x03
#define SRB_STATUS_ERROR 0x04
#define SRB_STATUS_BUSY 0x05
#define SRB_STATUS_INVALID_REQUEST 0x06
#define SRB_STATUS_INVALID_PATH_ID 0x07
#define SRB_STATUS_NO_DEVICE 0x08
#define SRB_STATUS_TIMEOUT 0x09
#define SRB_STATUS_SELECTION_TIMEOUT 0x0A
#define SRB_STATUS_COMMAND_TIMEOUT 0x0B
#define SRB_STATUS_MESSAGE_REJECTED 0x0D
#define SRB_STATUS_BUS_RESET 0x0E
#define SRB_STATUS_PARITY_ERROR 0x0F
#define SRB_STATUS_REQUEST_SENSE_FAILED 0x10
#define SRB_STATUS_NO_HBA 0x11
#define SRB_STATUS_DATA_OVERRUN 0x12
#define SRB_STATUS_UNEXPECTED_BUS_FREE 0x13
#define SRB_STATUS_PHASE_SEQUENCE_FAILURE 0x14
#define SRB_STATUS_BAD_SRB_BLOCK_LENGTH 0x15
#define SRB_STATUS_REQUEST_FLUSHED 0x16
#define SRB_STATUS_DELAYED_RETRY 0x17
#define SRB_STATUS_INVALID_LUN 0x20
#define SRB_STATUS_INVALID_TARGET_ID 0x21
#define SRB_STATUS_BAD_FUNCTION 0x22
#define SRB_STATUS_ERROR_RECOVERY 0x23
#define SRB_STATUS_NOT_STARTED 0x24
#define SRB_STATUS_NOT_IN_USE 0x30
#define SRB_STATUS_FORCE_ABORT 0x31
#define SRB_STATUS_DOMAIN_VALIDATION_FAIL 0x32
/*
* Object-Server / Volume-Manager Dispatch Classes
*/
#define VM_Null 0
#define VM_NameServe 1
#define VM_ContainerConfig 2
#define VM_Ioctl 3
#define VM_FilesystemIoctl 4
#define VM_CloseAll 5
#define VM_CtBlockRead 6
#define VM_CtBlockWrite 7
#define VM_SliceBlockRead 8 /* raw access to configured "storage objects" */
#define VM_SliceBlockWrite 9
#define VM_DriveBlockRead 10 /* raw access to physical devices */
#define VM_DriveBlockWrite 11
#define VM_EnclosureMgt 12 /* enclosure management */
#define VM_Unused 13 /* used to be diskset management */
#define VM_CtBlockVerify 14
#define VM_CtPerf 15 /* performance test */
#define VM_CtBlockRead64 16
#define VM_CtBlockWrite64 17
#define VM_CtBlockVerify64 18
#define VM_CtHostRead64 19
#define VM_CtHostWrite64 20
#define MAX_VMCOMMAND_NUM 21 /* used for sizing stats array - leave last */
/*
* Descriptive information (eg, vital stats)
* that a content manager might report. The
* FileArray filesystem component is one example
* of a content manager. Raw mode might be
* another.
*/
struct aac_fsinfo {
u32 fsTotalSize; /* Consumed by fs, incl. metadata */
u32 fsBlockSize;
u32 fsFragSize;
u32 fsMaxExtendSize;
u32 fsSpaceUnits;
u32 fsMaxNumFiles;
u32 fsNumFreeFiles;
u32 fsInodeDensity;
}; /* valid iff ObjType == FT_FILESYS && !(ContentState & FSCS_NOTCLEAN) */
union aac_contentinfo {
struct aac_fsinfo filesys; /* valid iff ObjType == FT_FILESYS && !(ContentState & FSCS_NOTCLEAN) */
};
/*
* Query for "mountable" objects, ie, objects that are typically
* associated with a drive letter on the client (host) side.
*/
struct aac_mntent {
u32 oid;
u8 name[16]; // if applicable
struct creation_info create_info; // if applicable
u32 capacity;
u32 vol; // substrate structure
u32 obj; // FT_FILESYS, FT_DATABASE, etc.
u32 state; // unready for mounting, readonly, etc.
union aac_contentinfo fileinfo; // Info specific to content manager (eg, filesystem)
u32 altoid; // != oid <==> snapshot or broken mirror exists
};
#define FSCS_READONLY 0x0002 /* possible result of broken mirror */
struct aac_query_mount {
u32 command;
u32 type;
u32 count;
};
struct aac_mount {
u32 status;
u32 type; /* should be same as that requested */
u32 count;
struct aac_mntent mnt[1];
};
/*
* The following command is sent to shut down each container.
*/
struct aac_close {
u32 command;
u32 cid;
};
struct aac_query_disk
{
s32 cnum;
s32 bus;
s32 target;
s32 lun;
u32 valid;
u32 locked;
u32 deleted;
s32 instance;
s8 name[10];
u32 unmapped;
};
struct aac_delete_disk {
u32 disknum;
u32 cnum;
};
struct fib_ioctl
{
char *fibctx;
int wait;
char *fib;
};
struct revision
{
u32 compat;
u32 version;
u32 build;
};
/*
* Ugly - non Linux like ioctl coding for back compat.
*/
#define CTL_CODE(function, method) ( \
(4<< 16) | ((function) << 2) | (method) \
)
/*
* Define the method codes for how buffers are passed for I/O and FS
* controls
*/
#define METHOD_BUFFERED 0
#define METHOD_NEITHER 3
/*
* Filesystem ioctls
*/
#define FSACTL_SENDFIB CTL_CODE(2050, METHOD_BUFFERED)
#define FSACTL_SEND_RAW_SRB CTL_CODE(2067, METHOD_BUFFERED)
#define FSACTL_DELETE_DISK 0x163
#define FSACTL_QUERY_DISK 0x173
#define FSACTL_OPEN_GET_ADAPTER_FIB CTL_CODE(2100, METHOD_BUFFERED)
#define FSACTL_GET_NEXT_ADAPTER_FIB CTL_CODE(2101, METHOD_BUFFERED)
#define FSACTL_CLOSE_GET_ADAPTER_FIB CTL_CODE(2102, METHOD_BUFFERED)
#define FSACTL_MINIPORT_REV_CHECK CTL_CODE(2107, METHOD_BUFFERED)
#define FSACTL_GET_PCI_INFO CTL_CODE(2119, METHOD_BUFFERED)
#define FSACTL_FORCE_DELETE_DISK CTL_CODE(2120, METHOD_NEITHER)
struct aac_common
{
/*
* If this value is set to 1 then interrupt moderation will occur
* in the base commuication support.
*/
u32 irq_mod;
u32 peak_fibs;
u32 zero_fibs;
u32 fib_timeouts;
/*
* Statistical counters in debug mode
*/
#ifdef DBG
u32 FibsSent;
u32 FibRecved;
u32 NoResponseSent;
u32 NoResponseRecved;
u32 AsyncSent;
u32 AsyncRecved;
u32 NormalSent;
u32 NormalRecved;
#endif
};
extern struct aac_common aac_config;
/*
* The following macro is used when sending and receiving FIBs. It is
* only used for debugging.
*/
#if DBG
#define FIB_COUNTER_INCREMENT(counter) (counter)++
#else
#define FIB_COUNTER_INCREMENT(counter)
#endif
/*
* Adapter direct commands
* Monitor/Kernel API
*/
#define BREAKPOINT_REQUEST cpu_to_le32(0x00000004)
#define INIT_STRUCT_BASE_ADDRESS cpu_to_le32(0x00000005)
#define READ_PERMANENT_PARAMETERS cpu_to_le32(0x0000000a)
#define WRITE_PERMANENT_PARAMETERS cpu_to_le32(0x0000000b)
#define HOST_CRASHING cpu_to_le32(0x0000000d)
#define SEND_SYNCHRONOUS_FIB cpu_to_le32(0x0000000c)
#define GET_ADAPTER_PROPERTIES cpu_to_le32(0x00000019)
#define RE_INIT_ADAPTER cpu_to_le32(0x000000ee)
/*
* Adapter Status Register
*
* Phase Staus mailbox is 32bits:
* <31:16> = Phase Status
* <15:0> = Phase
*
* The adapter reports is present state through the phase. Only
* a single phase should be ever be set. Each phase can have multiple
* phase status bits to provide more detailed information about the
* state of the board. Care should be taken to ensure that any phase
* status bits that are set when changing the phase are also valid
* for the new phase or be cleared out. Adapter software (monitor,
* iflash, kernel) is responsible for properly maintining the phase
* status mailbox when it is running.
*
* MONKER_API Phases
*
* Phases are bit oriented. It is NOT valid to have multiple bits set
*/
#define SELF_TEST_FAILED cpu_to_le32(0x00000004)
#define KERNEL_UP_AND_RUNNING cpu_to_le32(0x00000080)
#define KERNEL_PANIC cpu_to_le32(0x00000100)
/*
* Doorbell bit defines
*/
#define DoorBellPrintfDone cpu_to_le32(1<<5) // Host -> Adapter
#define DoorBellAdapterNormCmdReady cpu_to_le32(1<<1) // Adapter -> Host
#define DoorBellAdapterNormRespReady cpu_to_le32(1<<2) // Adapter -> Host
#define DoorBellAdapterNormCmdNotFull cpu_to_le32(1<<3) // Adapter -> Host
#define DoorBellAdapterNormRespNotFull cpu_to_le32(1<<4) // Adapter -> Host
#define DoorBellPrintfReady cpu_to_le32(1<<5) // Adapter -> Host
/*
* For FIB communication, we need all of the following things
* to send back to the user.
*/
#define AifCmdEventNotify 1 /* Notify of event */
#define AifCmdJobProgress 2 /* Progress report */
#define AifCmdAPIReport 3 /* Report from other user of API */
#define AifCmdDriverNotify 4 /* Notify host driver of event */
#define AifReqJobList 100 /* Gets back complete job list */
#define AifReqJobsForCtr 101 /* Gets back jobs for specific container */
#define AifReqJobsForScsi 102 /* Gets back jobs for specific SCSI device */
#define AifReqJobReport 103 /* Gets back a specific job report or list of them */
#define AifReqTerminateJob 104 /* Terminates job */
#define AifReqSuspendJob 105 /* Suspends a job */
#define AifReqResumeJob 106 /* Resumes a job */
#define AifReqSendAPIReport 107 /* API generic report requests */
#define AifReqAPIJobStart 108 /* Start a job from the API */
#define AifReqAPIJobUpdate 109 /* Update a job report from the API */
#define AifReqAPIJobFinish 110 /* Finish a job from the API */
/*
* Adapter Initiated FIB command structures. Start with the adapter
* initiated FIBs that really come from the adapter, and get responded
* to by the host.
*/
struct aac_aifcmd {
u32 command; /* Tell host what type of notify this is */
u32 seqnum; /* To allow ordering of reports (if necessary) */
u8 data[1]; /* Undefined length (from kernel viewpoint) */
};
static inline u32 fib2addr(struct hw_fib *hw)
{
return (u32)hw;
}
static inline struct hw_fib *addr2fib(u32 addr)
{
return (struct hw_fib *)addr;
}
const char *aac_driverinfo(struct Scsi_Host *);
struct fib *fib_alloc(struct aac_dev *dev);
int fib_setup(struct aac_dev *dev);
void fib_map_free(struct aac_dev *dev);
void fib_free(struct fib * context);
void fib_init(struct fib * context);
void fib_dealloc(struct fib * context);
void aac_printf(struct aac_dev *dev, u32 val);
int fib_send(u16 command, struct fib * context, unsigned long size, int priority, int wait, int reply, fib_callback callback, void *ctxt);
int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry);
int aac_consumer_avail(struct aac_dev * dev, struct aac_queue * q);
void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum);
int fib_complete(struct fib * context);
#define fib_data(fibctx) ((void *)(fibctx)->fib->data)
int aac_detach(struct aac_dev *dev);
struct aac_dev *aac_init_adapter(struct aac_dev *dev);
int aac_get_containers(struct aac_dev *dev);
int aac_scsi_cmd(Scsi_Cmnd *scsi_cmnd_ptr);
int aac_dev_ioctl(struct aac_dev *dev, int cmd, void *arg);
int aac_do_ioctl(struct aac_dev * dev, int cmd, void *arg);
int aac_rx_init(struct aac_dev *dev, unsigned long devNumber);
int aac_sa_init(struct aac_dev *dev, unsigned long devNumber);
unsigned int aac_response_normal(struct aac_queue * q);
unsigned int aac_command_normal(struct aac_queue * q);
int aac_command_thread(struct aac_dev * dev);
int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx);
int fib_adapter_complete(struct fib * fibptr, unsigned short size);
struct aac_driver_ident* aac_get_driver_ident(int devtype);
int aac_get_adapter_info(struct aac_dev* dev);
/*
* Adaptec AAC series RAID controller driver
* (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
*
* Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Module Name:
* commctrl.c
*
* Abstract: Contains all routines for control of the AFA comm layer
*
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/completion.h>
#include <linux/blk.h>
#include <asm/semaphore.h>
#include <asm/uaccess.h>
#include "scsi.h"
#include "hosts.h"
#include "aacraid.h"
/**
* ioctl_send_fib - send a FIB from userspace
* @dev: adapter is being processed
* @arg: arguments to the ioctl call
*
* This routine sends a fib to the adapter on behalf of a user level
* program.
*/
static int ioctl_send_fib(struct aac_dev * dev, void *arg)
{
struct hw_fib * kfib;
struct fib *fibptr;
fibptr = fib_alloc(dev);
if(fibptr == NULL)
return -ENOMEM;
kfib = fibptr->fib;
/*
* First copy in the header so that we can check the size field.
*/
if (copy_from_user((void *)kfib, arg, sizeof(struct aac_fibhdr))) {
fib_free(fibptr);
return -EFAULT;
}
/*
* Since we copy based on the fib header size, make sure that we
* will not overrun the buffer when we copy the memory. Return
* an error if we would.
*/
if(le32_to_cpu(kfib->header.Size) > sizeof(struct hw_fib) - sizeof(struct aac_fibhdr)) {
fib_free(fibptr);
return -EINVAL;
}
if (copy_from_user((void *) kfib, arg, le32_to_cpu(kfib->header.Size) + sizeof(struct aac_fibhdr))) {
fib_free(fibptr);
return -EFAULT;
}
if (kfib->header.Command == cpu_to_le32(TakeABreakPt)) {
aac_adapter_interrupt(dev);
/*
* Since we didn't really send a fib, zero out the state to allow
* cleanup code not to assert.
*/
kfib->header.XferState = 0;
} else {
if (fib_send(kfib->header.Command, fibptr, le32_to_cpu(kfib->header.Size) , FsaNormal,
1, 1, NULL, NULL) != 0)
{
fib_free(fibptr);
return -EINVAL;
}
if (fib_complete(fibptr) != 0) {
fib_free(fibptr);
return -EINVAL;
}
}
/*
* Make sure that the size returned by the adapter (which includes
* the header) is less than or equal to the size of a fib, so we
* don't corrupt application data. Then copy that size to the user
* buffer. (Don't try to add the header information again, since it
* was already included by the adapter.)
*/
if (copy_to_user(arg, (void *)kfib, kfib->header.Size)) {
fib_free(fibptr);
return -EFAULT;
}
fib_free(fibptr);
return 0;
}
/**
* open_getadapter_fib - Get the next fib
*
* This routine will get the next Fib, if available, from the AdapterFibContext
* passed in from the user.
*/
static int open_getadapter_fib(struct aac_dev * dev, void *arg)
{
struct aac_fib_context * fibctx;
int status;
unsigned long flags;
fibctx = kmalloc(sizeof(struct aac_fib_context), GFP_KERNEL);
if (fibctx == NULL) {
status = -ENOMEM;
} else {
fibctx->type = FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT;
fibctx->size = sizeof(struct aac_fib_context);
/*
* Initialize the mutex used to wait for the next AIF.
*/
init_MUTEX_LOCKED(&fibctx->wait_sem);
fibctx->wait = 0;
/*
* Initialize the fibs and set the count of fibs on
* the list to 0.
*/
fibctx->count = 0;
INIT_LIST_HEAD(&fibctx->fibs);
fibctx->jiffies = jiffies/HZ;
/*
* Now add this context onto the adapter's
* AdapterFibContext list.
*/
spin_lock_irqsave(&dev->fib_lock, flags);
list_add_tail(&fibctx->next, &dev->fib_list);
spin_unlock_irqrestore(&dev->fib_lock, flags);
if (copy_to_user(arg, &fibctx, sizeof(struct aac_fib_context *))) {
status = -EFAULT;
} else {
status = 0;
}
}
return status;
}
/**
* next_getadapter_fib - get the next fib
* @dev: adapter to use
* @arg: ioctl argument
*
* This routine will get the next Fib, if available, from the AdapterFibContext
* passed in from the user.
*/
static int next_getadapter_fib(struct aac_dev * dev, void *arg)
{
struct fib_ioctl f;
struct aac_fib_context *fibctx, *aifcp;
struct hw_fib * fib;
int status;
struct list_head * entry;
int found;
unsigned long flags;
if(copy_from_user((void *)&f, arg, sizeof(struct fib_ioctl)))
return -EFAULT;
/*
* Extract the AdapterFibContext from the Input parameters.
*/
fibctx = (struct aac_fib_context *) f.fibctx;
/*
* Verify that the HANDLE passed in was a valid AdapterFibContext
*
* Search the list of AdapterFibContext addresses on the adapter
* to be sure this is a valid address
*/
found = 0;
entry = dev->fib_list.next;
while(entry != &dev->fib_list) {
aifcp = list_entry(entry, struct aac_fib_context, next);
if(fibctx == aifcp) { /* We found a winner */
found = 1;
break;
}
entry = entry->next;
}
if (found == 0)
return -EINVAL;
if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
(fibctx->size != sizeof(struct aac_fib_context)))
return -EINVAL;
status = 0;
spin_lock_irqsave(&dev->fib_lock, flags);
/*
* If there are no fibs to send back, then either wait or return
* -EAGAIN
*/
return_fib:
if (!list_empty(&fibctx->fibs)) {
struct list_head * entry;
/*
* Pull the next fib from the fibs
*/
entry = fibctx->fibs.next;
list_del(entry);
fib = list_entry(entry, struct hw_fib, header.FibLinks);
fibctx->count--;
spin_unlock_irqrestore(&dev->fib_lock, flags);
if (copy_to_user(f.fib, fib, sizeof(struct hw_fib))) {
kfree(fib);
return -EFAULT;
}
/*
* Free the space occupied by this copy of the fib.
*/
kfree(fib);
status = 0;
fibctx->jiffies = jiffies/HZ;
} else {
spin_unlock_irqrestore(&dev->fib_lock, flags);
if (f.wait) {
if(down_interruptible(&fibctx->wait_sem) < 0) {
status = -EINTR;
} else {
/* Lock again and retry */
spin_lock_irqsave(&dev->fib_lock, flags);
goto return_fib;
}
} else {
status = -EAGAIN;
}
}
return status;
}
int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx)
{
struct hw_fib *fib;
/*
* First free any FIBs that have not been consumed.
*/
while (!list_empty(&fibctx->fibs)) {
struct list_head * entry;
/*
* Pull the next fib from the fibs
*/
entry = fibctx->fibs.next;
list_del(entry);
fib = list_entry(entry, struct hw_fib, header.FibLinks);
fibctx->count--;
/*
* Free the space occupied by this copy of the fib.
*/
kfree(fib);
}
/*
* Remove the Context from the AdapterFibContext List
*/
list_del(&fibctx->next);
/*
* Invalidate context
*/
fibctx->type = 0;
/*
* Free the space occupied by the Context
*/
kfree(fibctx);
return 0;
}
/**
* close_getadapter_fib - close down user fib context
* @dev: adapter
* @arg: ioctl arguments
*
* This routine will close down the fibctx passed in from the user.
*/
static int close_getadapter_fib(struct aac_dev * dev, void *arg)
{
struct aac_fib_context *fibctx, *aifcp;
int status;
unsigned long flags;
struct list_head * entry;
int found;
/*
* Extract the fibctx from the input parameters
*/
fibctx = arg;
/*
* Verify that the HANDLE passed in was a valid AdapterFibContext
*
* Search the list of AdapterFibContext addresses on the adapter
* to be sure this is a valid address
*/
found = 0;
entry = dev->fib_list.next;
while(entry != &dev->fib_list) {
aifcp = list_entry(entry, struct aac_fib_context, next);
if(fibctx == aifcp) { /* We found a winner */
found = 1;
break;
}
entry = entry->next;
}
if(found == 0)
return 0; /* Already gone */
if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
(fibctx->size != sizeof(struct aac_fib_context)))
return -EINVAL;
spin_lock_irqsave(&dev->fib_lock, flags);
status = aac_close_fib_context(dev, fibctx);
spin_unlock_irqrestore(&dev->fib_lock, flags);
return status;
}
/**
* check_revision - close down user fib context
* @dev: adapter
* @arg: ioctl arguments
*
* This routine returns the firmware version.
* Under Linux, there have been no version incompatibilities, so this is simple!
*/
static int check_revision(struct aac_dev *dev, void *arg)
{
struct revision response;
response.compat = 1;
response.version = dev->adapter_info.kernelrev;
response.build = dev->adapter_info.kernelbuild;
if (copy_to_user(arg, &response, sizeof(response)))
return -EFAULT;
return 0;
}
struct aac_pci_info {
u32 bus;
u32 slot;
};
int aac_get_pci_info(struct aac_dev* dev, void* arg)
{
struct aac_pci_info pci_info;
pci_info.bus = dev->pdev->bus->number;
pci_info.slot = PCI_SLOT(dev->pdev->devfn);
if(copy_to_user( arg, (void*)&pci_info, sizeof(struct aac_pci_info)))
return -EFAULT;
return 0;
}
int aac_do_ioctl(struct aac_dev * dev, int cmd, void *arg)
{
int status;
/*
* HBA gets first crack
*/
status = aac_dev_ioctl(dev, cmd, arg);
if(status != -ENOTTY)
return status;
switch (cmd) {
case FSACTL_MINIPORT_REV_CHECK:
status = check_revision(dev, arg);
break;
case FSACTL_SENDFIB:
status = ioctl_send_fib(dev, arg);
break;
case FSACTL_OPEN_GET_ADAPTER_FIB:
status = open_getadapter_fib(dev, arg);
break;
case FSACTL_GET_NEXT_ADAPTER_FIB:
status = next_getadapter_fib(dev, arg);
break;
case FSACTL_CLOSE_GET_ADAPTER_FIB:
status = close_getadapter_fib(dev, arg);
break;
case FSACTL_GET_PCI_INFO:
status = aac_get_pci_info(dev,arg);
break;
default:
status = -ENOTTY;
break;
}
return status;
}
/*
* Adaptec AAC series RAID controller driver
* (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
*
* Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Module Name:
* comminit.c
*
* Abstract: This supports the initialization of the host adapter commuication interface.
* This is a platform dependent module for the pci cyclone board.
*
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/blk.h>
#include <linux/completion.h>
#include <asm/semaphore.h>
#include "scsi.h"
#include "hosts.h"
#include "aacraid.h"
struct aac_common aac_config;
static struct aac_dev *devices;
static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long commsize, unsigned long commalign)
{
unsigned char *base;
unsigned long size, align;
unsigned long fibsize = 4096;
unsigned long printfbufsiz = 256;
struct aac_init *init;
dma_addr_t phys;
/* FIXME: Adaptec add 128 bytes to this value - WHY ?? */
size = fibsize + sizeof(struct aac_init) + commsize + commalign + printfbufsiz;
base = pci_alloc_consistent(dev->pdev, size, &phys);
if(base == NULL)
{
printk(KERN_ERR "aacraid: unable to create mapping.\n");
return 0;
}
dev->comm_addr = (void *)base;
dev->comm_phys = phys;
dev->comm_size = size;
dev->init = (struct aac_init *)(base + fibsize);
dev->init_pa = phys + fibsize;
/*
* Cache the upper bits of the virtual mapping for 64bit boxes
* FIXME: this crap should be rewritten
*/
#if BITS_PER_LONG >= 64
dev->fib_base_va = ((ulong)base & 0xffffffff00000000);
#endif
init = dev->init;
init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION);
init->MiniPortRevision = cpu_to_le32(Sa_MINIPORT_REVISION);
init->fsrev = cpu_to_le32(dev->fsrev);
/*
* Adapter Fibs are the first thing allocated so that they
* start page aligned
*/
init->AdapterFibsVirtualAddress = cpu_to_le32((u32)base);
init->AdapterFibsPhysicalAddress = cpu_to_le32(phys);
init->AdapterFibsSize = cpu_to_le32(fibsize);
init->AdapterFibAlign = cpu_to_le32(sizeof(struct hw_fib));
/*
* Increment the base address by the amount already used
*/
base = base + fibsize + sizeof(struct aac_init);
phys = phys + fibsize + sizeof(struct aac_init);
/*
* Align the beginning of Headers to commalign
*/
align = (commalign - ((unsigned long)(base) & (commalign - 1)));
base = base + align;
phys = phys + align;
/*
* Fill in addresses of the Comm Area Headers and Queues
*/
*commaddr = (unsigned long *)base;
init->CommHeaderAddress = cpu_to_le32(phys);
/*
* Increment the base address by the size of the CommArea
*/
base = base + commsize;
phys = phys + commsize;
/*
* Place the Printf buffer area after the Fast I/O comm area.
*/
dev->printfbuf = (void *)base;
init->printfbuf = cpu_to_le32(phys);
init->printfbufsiz = cpu_to_le32(printfbufsiz);
memset(base, 0, printfbufsiz);
return 1;
}
static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem, int qsize)
{
q->numpending = 0;
q->dev = dev;
INIT_LIST_HEAD(&q->pendingq);
init_waitqueue_head(&q->cmdready);
INIT_LIST_HEAD(&q->cmdq);
init_waitqueue_head(&q->qfull);
spin_lock_init(&q->lockdata);
q->lock = &q->lockdata;
q->headers.producer = mem;
q->headers.consumer = mem+1;
*q->headers.producer = cpu_to_le32(qsize);
*q->headers.consumer = cpu_to_le32(qsize);
q->entries = qsize;
}
/**
* aac_send_shutdown - shutdown an adapter
* @dev: Adapter to shutdown
*
* This routine will send a VM_CloseAll (shutdown) request to the adapter.
*/
static int aac_send_shutdown(struct aac_dev * dev)
{
struct fib * fibctx;
struct aac_close *cmd;
int status;
fibctx = fib_alloc(dev);
fib_init(fibctx);
cmd = (struct aac_close *) fib_data(fibctx);
cmd->command = cpu_to_le32(VM_CloseAll);
cmd->cid = cpu_to_le32(0xffffffff);
status = fib_send(ContainerCommand,
fibctx,
sizeof(struct aac_close),
FsaNormal,
1, 1,
NULL, NULL);
if (status == 0)
fib_complete(fibctx);
fib_free(fibctx);
return status;
}
/**
* aac_detach - detach adapter
* @detach: adapter to disconnect
*
* Disconnect and shutdown an AAC based adapter, freeing resources
* as we go.
*/
int aac_detach(struct aac_dev *detach)
{
struct aac_dev **dev = &devices;
while(*dev)
{
if(*dev == detach)
{
*dev = detach->next;
aac_send_shutdown(detach);
fib_map_free(detach);
pci_free_consistent(detach->pdev, detach->comm_size, detach->comm_addr, detach->comm_phys);
kfree(detach->queues);
return 1;
}
dev=&((*dev)->next);
}
BUG();
return 0;
}
/**
* aac_comm_init - Initialise FSA data structures
* @dev: Adapter to intialise
*
* Initializes the data structures that are required for the FSA commuication
* interface to operate.
* Returns
* 1 - if we were able to init the commuication interface.
* 0 - If there were errors initing. This is a fatal error.
*/
int aac_comm_init(struct aac_dev * dev)
{
unsigned long hdrsize = (sizeof(u32) * NUMBER_OF_COMM_QUEUES) * 2;
unsigned long queuesize = sizeof(struct aac_entry) * TOTAL_QUEUE_ENTRIES;
u32 *headers;
struct aac_entry * queues;
unsigned long size;
struct aac_queue_block * comm = dev->queues;
/*
* Now allocate and initialize the zone structures used as our
* pool of FIB context records. The size of the zone is based
* on the system memory size. We also initialize the mutex used
* to protect the zone.
*/
spin_lock_init(&dev->fib_lock);
/*
* Allocate the physically contigous space for the commuication
* queue headers.
*/
size = hdrsize + queuesize;
if (!aac_alloc_comm(dev, (void * *)&headers, size, QUEUE_ALIGNMENT))
return -ENOMEM;
queues = (struct aac_entry *)((unsigned char *)headers + hdrsize);
/* Adapter to Host normal proirity Command queue */
comm->queue[HostNormCmdQueue].base = queues;
aac_queue_init(dev, &comm->queue[HostNormCmdQueue], headers, HOST_NORM_CMD_ENTRIES);
queues += HOST_NORM_CMD_ENTRIES;
headers += 2;
/* Adapter to Host high priority command queue */
comm->queue[HostHighCmdQueue].base = queues;
aac_queue_init(dev, &comm->queue[HostHighCmdQueue], headers, HOST_HIGH_CMD_ENTRIES);
queues += HOST_HIGH_CMD_ENTRIES;
headers +=2;
/* Host to adapter normal priority command queue */
comm->queue[AdapNormCmdQueue].base = queues;
aac_queue_init(dev, &comm->queue[AdapNormCmdQueue], headers, ADAP_NORM_CMD_ENTRIES);
queues += ADAP_NORM_CMD_ENTRIES;
headers += 2;
/* host to adapter high priority command queue */
comm->queue[AdapHighCmdQueue].base = queues;
aac_queue_init(dev, &comm->queue[AdapHighCmdQueue], headers, ADAP_HIGH_CMD_ENTRIES);
queues += ADAP_HIGH_CMD_ENTRIES;
headers += 2;
/* adapter to host normal priority response queue */
comm->queue[HostNormRespQueue].base = queues;
aac_queue_init(dev, &comm->queue[HostNormRespQueue], headers, HOST_NORM_RESP_ENTRIES);
queues += HOST_NORM_RESP_ENTRIES;
headers += 2;
/* adapter to host high priority response queue */
comm->queue[HostHighRespQueue].base = queues;
aac_queue_init(dev, &comm->queue[HostHighRespQueue], headers, HOST_HIGH_RESP_ENTRIES);
queues += HOST_HIGH_RESP_ENTRIES;
headers += 2;
/* host to adapter normal priority response queue */
comm->queue[AdapNormRespQueue].base = queues;
aac_queue_init(dev, &comm->queue[AdapNormRespQueue], headers, ADAP_NORM_RESP_ENTRIES);
queues += ADAP_NORM_RESP_ENTRIES;
headers += 2;
/* host to adapter high priority response queue */
comm->queue[AdapHighRespQueue].base = queues;
aac_queue_init(dev, &comm->queue[AdapHighRespQueue], headers, ADAP_HIGH_RESP_ENTRIES);
comm->queue[AdapNormCmdQueue].lock = comm->queue[HostNormRespQueue].lock;
comm->queue[AdapHighCmdQueue].lock = comm->queue[HostHighRespQueue].lock;
comm->queue[AdapNormRespQueue].lock = comm->queue[HostNormCmdQueue].lock;
comm->queue[AdapHighRespQueue].lock = comm->queue[HostHighCmdQueue].lock;
return 0;
}
struct aac_dev *aac_init_adapter(struct aac_dev *dev)
{
/*
* Ok now init the communication subsystem
*/
dev->queues = (struct aac_queue_block *) kmalloc(sizeof(struct aac_queue_block), GFP_KERNEL);
if (dev->queues == NULL) {
printk(KERN_ERR "Error could not allocate comm region.\n");
return NULL;
}
memset(dev->queues, 0, sizeof(struct aac_queue_block));
if (aac_comm_init(dev)<0)
return NULL;
/*
* Initialize the list of fibs
*/
if(fib_setup(dev)<0)
return NULL;
INIT_LIST_HEAD(&dev->fib_list);
init_completion(&dev->aif_completion);
/*
* Add this adapter in to our dev List.
*/
dev->next = devices;
devices = dev;
return dev;
}
/*
* Adaptec AAC series RAID controller driver
* (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
*
* Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Module Name:
* commsup.c
*
* Abstract: Contain all routines that are required for FSA host/adapter
* commuication.
*
*
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/completion.h>
#include <asm/semaphore.h>
#include <linux/blk.h>
#include "scsi.h"
#include "hosts.h"
#include "aacraid.h"
/**
* fib_map_alloc - allocate the fib objects
* @dev: Adapter to allocate for
*
* Allocate and map the shared PCI space for the FIB blocks used to
* talk to the Adaptec firmware.
*/
static int fib_map_alloc(struct aac_dev *dev)
{
if((dev->hw_fib_va = pci_alloc_consistent(dev->pdev, sizeof(struct hw_fib) * AAC_NUM_FIB, &dev->hw_fib_pa))==NULL)
return -ENOMEM;
return 0;
}
/**
* fib_map_free - free the fib objects
* @dev: Adapter to free
*
* Free the PCI mappings and the memory allocated for FIB blocks
* on this adapter.
*/
void fib_map_free(struct aac_dev *dev)
{
pci_free_consistent(dev->pdev, sizeof(struct hw_fib) * AAC_NUM_FIB, dev->hw_fib_va, dev->hw_fib_pa);
}
/**
* fib_setup - setup the fibs
* @dev: Adapter to set up
*
* Allocate the PCI space for the fibs, map it and then intialise the
* fib area, the unmapped fib data and also the free list
*/
int fib_setup(struct aac_dev * dev)
{
struct fib *fibptr;
struct hw_fib *fib;
dma_addr_t fibpa;
int i;
if(fib_map_alloc(dev)<0)
return -ENOMEM;
fib = dev->hw_fib_va;
fibpa = dev->hw_fib_pa;
memset(fib, 0, sizeof(struct hw_fib) * AAC_NUM_FIB);
/*
* Initialise the fibs
*/
for (i = 0, fibptr = &dev->fibs[i]; i < AAC_NUM_FIB; i++, fibptr++)
{
fibptr->dev = dev;
fibptr->fib = fib;
fibptr->data = (void *) fibptr->fib->data;
fibptr->next = fibptr+1; /* Forward chain the fibs */
init_MUTEX_LOCKED(&fibptr->event_wait);
spin_lock_init(&fibptr->event_lock);
fib->header.XferState = cpu_to_le32(0xffffffff);
fib->header.SenderSize = cpu_to_le16(sizeof(struct hw_fib));
fibptr->logicaladdr = (unsigned long) fibpa;
fib = (struct hw_fib *)((unsigned char *)fib + sizeof(struct hw_fib));
fibpa = fibpa + sizeof(struct hw_fib);
}
/*
* Add the fib chain to the free list
*/
dev->fibs[AAC_NUM_FIB-1].next = NULL;
/*
* Enable this to debug out of queue space
*/
dev->free_fib = &dev->fibs[0];
return 0;
}
/**
* fib_alloc - allocate a fib
* @dev: Adapter to allocate the fib for
*
* Allocate a fib from the adapter fib pool. If the pool is empty we
* wait for fibs to become free.
*/
struct fib * fib_alloc(struct aac_dev *dev)
{
struct fib * fibptr;
unsigned long flags;
spin_lock_irqsave(&dev->fib_lock, flags);
fibptr = dev->free_fib;
if(!fibptr)
BUG();
dev->free_fib = fibptr->next;
spin_unlock_irqrestore(&dev->fib_lock, flags);
/*
* Set the proper node type code and node byte size
*/
fibptr->type = FSAFS_NTC_FIB_CONTEXT;
fibptr->size = sizeof(struct fib);
/*
* Null out fields that depend on being zero at the start of
* each I/O
*/
fibptr->fib->header.XferState = cpu_to_le32(0);
fibptr->callback = NULL;
fibptr->callback_data = NULL;
return fibptr;
}
/**
* fib_free - free a fib
* @fibptr: fib to free up
*
* Frees up a fib and places it on the appropriate queue
* (either free or timed out)
*/
void fib_free(struct fib * fibptr)
{
unsigned long flags;
spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
if (fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT) {
aac_config.fib_timeouts++;
fibptr->next = fibptr->dev->timeout_fib;
fibptr->dev->timeout_fib = fibptr;
} else {
if (fibptr->fib->header.XferState != 0) {
printk(KERN_WARNING "fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
(void *)fibptr, fibptr->fib->header.XferState);
}
fibptr->next = fibptr->dev->free_fib;
fibptr->dev->free_fib = fibptr;
}
spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
}
/**
* fib_init - initialise a fib
* @fibptr: The fib to initialize
*
* Set up the generic fib fields ready for use
*/
void fib_init(struct fib *fibptr)
{
struct hw_fib *fib = fibptr->fib;
fib->header.StructType = FIB_MAGIC;
fib->header.Size = cpu_to_le16(sizeof(struct hw_fib));
fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
fib->header.SenderFibAddress = cpu_to_le32(0);
fib->header.ReceiverFibAddress = cpu_to_le32(0);
fib->header.SenderSize = cpu_to_le16(sizeof(struct hw_fib));
}
/**
* fib_deallocate - deallocate a fib
* @fibptr: fib to deallocate
*
* Will deallocate and return to the free pool the FIB pointed to by the
* caller.
*/
void fib_dealloc(struct fib * fibptr)
{
struct hw_fib *fib = fibptr->fib;
if(fib->header.StructType != FIB_MAGIC)
BUG();
fib->header.XferState = cpu_to_le32(0);
}
/*
* Commuication primitives define and support the queuing method we use to
* support host to adapter commuication. All queue accesses happen through
* these routines and are the only routines which have a knowledge of the
* how these queues are implemented.
*/
/**
* aac_get_entry - get a queue entry
* @dev: Adapter
* @qid: Queue Number
* @entry: Entry return
* @index: Index return
* @nonotify: notification control
*
* With a priority the routine returns a queue entry if the queue has free entries. If the queue
* is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
* returned.
*/
static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
{
struct aac_queue * q;
/*
* All of the queues wrap when they reach the end, so we check
* to see if they have reached the end and if they have we just
* set the index back to zero. This is a wrap. You could or off
* the high bits in all updates but this is a bit faster I think.
*/
q = &dev->queues->queue[qid];
*index = le32_to_cpu(*(q->headers.producer));
if (*index - 2 == le32_to_cpu(*(q->headers.consumer)))
*nonotify = 1;
if (qid == AdapHighCmdQueue) {
if (*index >= ADAP_HIGH_CMD_ENTRIES)
*index = 0;
} else if (qid == AdapNormCmdQueue) {
if (*index >= ADAP_NORM_CMD_ENTRIES)
*index = 0; /* Wrap to front of the Producer Queue. */
}
else if (qid == AdapHighRespQueue)
{
if (*index >= ADAP_HIGH_RESP_ENTRIES)
*index = 0;
}
else if (qid == AdapNormRespQueue)
{
if (*index >= ADAP_NORM_RESP_ENTRIES)
*index = 0; /* Wrap to front of the Producer Queue. */
}
else BUG();
if (*index + 1 == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */
printk(KERN_WARNING "Queue %d full, %ld outstanding.\n", qid, q->numpending);
return 0;
} else {
*entry = q->base + *index;
return 1;
}
}
/**
* aac_queue_get - get the next free QE
* @dev: Adapter
* @index: Returned index
* @priority: Priority of fib
* @fib: Fib to associate with the queue entry
* @wait: Wait if queue full
* @fibptr: Driver fib object to go with fib
* @nonotify: Don't notify the adapter
*
* Gets the next free QE off the requested priorty adapter command
* queue and associates the Fib with the QE. The QE represented by
* index is ready to insert on the queue when this routine returns
* success.
*/
static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * fib, int wait, struct fib * fibptr, unsigned long *nonotify)
{
struct aac_entry * entry = NULL;
int map = 0;
struct aac_queue * q = &dev->queues->queue[qid];
spin_lock_irqsave(q->lock, q->SavedIrql);
if (qid == AdapHighCmdQueue || qid == AdapNormCmdQueue)
{
/* if no entries wait for some if caller wants to */
while (!aac_get_entry(dev, qid, &entry, index, nonotify))
{
printk(KERN_ERR "GetEntries failed\n");
}
/*
* Setup queue entry with a command, status and fib mapped
*/
entry->size = cpu_to_le32(le16_to_cpu(fib->header.Size));
map = 1;
}
else if (qid == AdapHighRespQueue || qid == AdapNormRespQueue)
{
while(!aac_get_entry(dev, qid, &entry, index, nonotify))
{
/* if no entries wait for some if caller wants to */
}
/*
* Setup queue entry with command, status and fib mapped
*/
entry->size = cpu_to_le32(le16_to_cpu(fib->header.Size));
entry->addr = cpu_to_le32(fib->header.SenderFibAddress); /* Restore adapters pointer to the FIB */
fib->header.ReceiverFibAddress = fib->header.SenderFibAddress; /* Let the adapter now where to find its data */
map = 0;
}
/*
* If MapFib is true than we need to map the Fib and put pointers
* in the queue entry.
*/
if (map)
entry->addr = cpu_to_le32((unsigned long)(fibptr->logicaladdr));
return 0;
}
/**
* aac_insert_entry - insert a queue entry
* @dev: Adapter
* @index: Index of entry to insert
* @qid: Queue number
* @nonotify: Suppress adapter notification
*
* Gets the next free QE off the requested priorty adapter command
* queue and associates the Fib with the QE. The QE represented by
* index is ready to insert on the queue when this routine returns
* success.
*/
static int aac_insert_entry(struct aac_dev * dev, u32 index, u32 qid, unsigned long nonotify)
{
struct aac_queue * q = &dev->queues->queue[qid];
if(q == NULL)
BUG();
*(q->headers.producer) = cpu_to_le32(index + 1);
spin_unlock_irqrestore(q->lock, q->SavedIrql);
if (qid == AdapHighCmdQueue ||
qid == AdapNormCmdQueue ||
qid == AdapHighRespQueue ||
qid == AdapNormRespQueue)
{
if (!nonotify)
aac_adapter_notify(dev, qid);
}
else
printk("Suprise insert!\n");
return 0;
}
/*
* Define the highest level of host to adapter communication routines.
* These routines will support host to adapter FS commuication. These
* routines have no knowledge of the commuication method used. This level
* sends and receives FIBs. This level has no knowledge of how these FIBs
* get passed back and forth.
*/
/**
* fib_send - send a fib to the adapter
* @command: Command to send
* @fibptr: The fib
* @size: Size of fib data area
* @priority: Priority of Fib
* @wait: Async/sync select
* @reply: True if a reply is wanted
* @callback: Called with reply
* @callback_data: Passed to callback
*
* Sends the requested FIB to the adapter and optionally will wait for a
* response FIB. If the caller does not wish to wait for a response than
* an event to wait on must be supplied. This event will be set when a
* response FIB is received from the adapter.
*/
int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority, int wait, int reply, fib_callback callback, void * callback_data)
{
u32 index;
u32 qid;
struct aac_dev * dev = fibptr->dev;
unsigned long nointr = 0;
struct hw_fib * fib = fibptr->fib;
struct aac_queue * q;
unsigned long flags = 0;
if (!(le32_to_cpu(fib->header.XferState) & HostOwned))
return -EBUSY;
/*
* There are 5 cases with the wait and reponse requested flags.
* The only invalid cases are if the caller requests to wait and
* does not request a response and if the caller does not want a
* response and the Fibis not allocated from pool. If a response
* is not requesed the Fib will just be deallocaed by the DPC
* routine when the response comes back from the adapter. No
* further processing will be done besides deleting the Fib. We
* will have a debug mode where the adapter can notify the host
* it had a problem and the host can log that fact.
*/
if (wait && !reply) {
return -EINVAL;
} else if (!wait && reply) {
fib->header.XferState |= cpu_to_le32(Async | ResponseExpected);
FIB_COUNTER_INCREMENT(aac_config.AsyncSent);
} else if (!wait && !reply) {
fib->header.XferState |= cpu_to_le32(NoResponseExpected);
FIB_COUNTER_INCREMENT(aac_config.NoResponseSent);
} else if (wait && reply) {
fib->header.XferState |= cpu_to_le32(ResponseExpected);
FIB_COUNTER_INCREMENT(aac_config.NormalSent);
}
/*
* Map the fib into 32bits by using the fib number
*/
fib->header.SenderData = fibptr-&dev->fibs[0]; /* for callback */
/*
* Set FIB state to indicate where it came from and if we want a
* response from the adapter. Also load the command from the
* caller.
*
* Map the hw fib pointer as a 32bit value
*/
fib->header.SenderFibAddress = fib2addr(fib);
fib->header.Command = cpu_to_le16(command);
fib->header.XferState |= cpu_to_le32(SentFromHost);
fibptr->fib->header.Flags = 0; /* Zero the flags field - its internal only... */
/*
* Set the size of the Fib we want to send to the adapter
*/
fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
if (le16_to_cpu(fib->header.Size) > le16_to_cpu(fib->header.SenderSize)) {
return -EMSGSIZE;
}
/*
* Get a queue entry connect the FIB to it and send an notify
* the adapter a command is ready.
*/
if (priority == FsaHigh) {
fib->header.XferState |= cpu_to_le32(HighPriority);
qid = AdapHighCmdQueue;
} else {
fib->header.XferState |= cpu_to_le32(NormalPriority);
qid = AdapNormCmdQueue;
}
q = &dev->queues->queue[qid];
if(wait)
spin_lock_irqsave(&fibptr->event_lock, flags);
if(aac_queue_get( dev, &index, qid, fib, 1, fibptr, &nointr)<0)
return -EWOULDBLOCK;
dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index));
dprintk((KERN_DEBUG "Fib contents:.\n"));
dprintk((KERN_DEBUG " Command = %d.\n", fib->header.Command));
dprintk((KERN_DEBUG " XferState = %x.\n", fib->header.XferState));
/*
* Fill in the Callback and CallbackContext if we are not
* going to wait.
*/
if (!wait) {
fibptr->callback = callback;
fibptr->callback_data = callback_data;
}
FIB_COUNTER_INCREMENT(aac_config.FibsSent);
list_add_tail(&fibptr->queue, &q->pendingq);
q->numpending++;
fibptr->done = 0;
if(aac_insert_entry(dev, index, qid, (nointr & aac_config.irq_mod)) < 0)
return -EWOULDBLOCK;
/*
* If the caller wanted us to wait for response wait now.
*/
if (wait) {
spin_unlock_irqrestore(&fibptr->event_lock, flags);
down(&fibptr->event_wait);
if(fibptr->done == 0)
BUG();
if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
return -ETIMEDOUT;
else
return 0;
}
/*
* If the user does not want a response than return success otherwise
* return pending
*/
if (reply)
return -EINPROGRESS;
else
return 0;
}
/**
* aac_consumer_get - get the top of the queue
* @dev: Adapter
* @q: Queue
* @entry: Return entry
*
* Will return a pointer to the entry on the top of the queue requested that
* we are a consumer of, and return the address of the queue entry. It does
* not change the state of the queue.
*/
int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
{
u32 index;
int status;
if (*q->headers.producer == *q->headers.consumer) {
status = 0;
} else {
/*
* The consumer index must be wrapped if we have reached
* the end of the queue, else we just use the entry
* pointed to by the header index
*/
if (le32_to_cpu(*q->headers.consumer) >= q->entries)
index = 0;
else
index = le32_to_cpu(*q->headers.consumer);
*entry = q->base + index;
status = 1;
}
return(status);
}
int aac_consumer_avail(struct aac_dev *dev, struct aac_queue * q)
{
return (*q->headers.producer != *q->headers.consumer);
}
/**
* aac_consumer_free - free consumer entry
* @dev: Adapter
* @q: Queue
* @qid: Queue ident
*
* Frees up the current top of the queue we are a consumer of. If the
* queue was full notify the producer that the queue is no longer full.
*/
void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
{
int wasfull = 0;
u32 notify;
if (*q->headers.producer+1 == *q->headers.consumer)
wasfull = 1;
if (le32_to_cpu(*q->headers.consumer) >= q->entries)
*q->headers.consumer = cpu_to_le32(1);
else
*q->headers.consumer = cpu_to_le32(le32_to_cpu(*q->headers.consumer)+1);
if (wasfull) {
switch (qid) {
case HostNormCmdQueue:
notify = HostNormCmdNotFull;
break;
case HostHighCmdQueue:
notify = HostHighCmdNotFull;
break;
case HostNormRespQueue:
notify = HostNormRespNotFull;
break;
case HostHighRespQueue:
notify = HostHighRespNotFull;
break;
default:
BUG();
return;
}
aac_adapter_notify(dev, notify);
}
}
/**
* fib_adapter_complete - complete adapter issued fib
* @fibptr: fib to complete
* @size: size of fib
*
* Will do all necessary work to complete a FIB that was sent from
* the adapter.
*/
int fib_adapter_complete(struct fib * fibptr, unsigned short size)
{
struct hw_fib * fib = fibptr->fib;
struct aac_dev * dev = fibptr->dev;
unsigned long nointr = 0;
if (le32_to_cpu(fib->header.XferState) == 0)
return 0;
/*
* If we plan to do anything check the structure type first.
*/
if ( fib->header.StructType != FIB_MAGIC ) {
return -EINVAL;
}
/*
* This block handles the case where the adapter had sent us a
* command and we have finished processing the command. We
* call completeFib when we are done processing the command
* and want to send a response back to the adapter. This will
* send the completed cdb to the adapter.
*/
if (fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
fib->header.XferState |= cpu_to_le32(HostProcessed);
if (fib->header.XferState & cpu_to_le32(HighPriority)) {
u32 index;
if (size)
{
size += sizeof(struct aac_fibhdr);
if (size > le16_to_cpu(fib->header.SenderSize))
return -EMSGSIZE;
fib->header.Size = cpu_to_le16(size);
}
if(aac_queue_get(dev, &index, AdapHighRespQueue, fib, 1, NULL, &nointr) < 0) {
return -EWOULDBLOCK;
}
if (aac_insert_entry(dev, index, AdapHighRespQueue, (nointr & (int)aac_config.irq_mod)) != 0) {
}
}
else if (fib->header.XferState & NormalPriority)
{
u32 index;
if (size) {
size += sizeof(struct aac_fibhdr);
if (size > le16_to_cpu(fib->header.SenderSize))
return -EMSGSIZE;
fib->header.Size = cpu_to_le16(size);
}
if (aac_queue_get(dev, &index, AdapNormRespQueue, fib, 1, NULL, &nointr) < 0)
return -EWOULDBLOCK;
if (aac_insert_entry(dev, index, AdapNormRespQueue,
(nointr & (int)aac_config.irq_mod)) != 0)
{
}
}
}
else
{
printk(KERN_WARNING "fib_adapter_complete: Unknown xferstate detected.\n");
BUG();
}
return 0;
}
/**
* fib_complete - fib completion handler
* @fib: FIB to complete
*
* Will do all necessary work to complete a FIB.
*/
int fib_complete(struct fib * fibptr)
{
struct hw_fib * fib = fibptr->fib;
/*
* Check for a fib which has already been completed
*/
if (fib->header.XferState == cpu_to_le32(0))
return 0;
/*
* If we plan to do anything check the structure type first.
*/
if (fib->header.StructType != FIB_MAGIC)
return -EINVAL;
/*
* This block completes a cdb which orginated on the host and we
* just need to deallocate the cdb or reinit it. At this point the
* command is complete that we had sent to the adapter and this
* cdb could be reused.
*/
if((fib->header.XferState & cpu_to_le32(SentFromHost)) &&
(fib->header.XferState & cpu_to_le32(AdapterProcessed)))
{
fib_dealloc(fibptr);
}
else if(fib->header.XferState & cpu_to_le32(SentFromHost))
{
/*
* This handles the case when the host has aborted the I/O
* to the adapter because the adapter is not responding
*/
fib_dealloc(fibptr);
} else if(fib->header.XferState & cpu_to_le32(HostOwned)) {
fib_dealloc(fibptr);
} else {
BUG();
}
return 0;
}
/**
* aac_printf - handle printf from firmware
* @dev: Adapter
* @val: Message info
*
* Print a message passed to us by the controller firmware on the
* Adaptec board
*/
void aac_printf(struct aac_dev *dev, u32 val)
{
int length = val & 0xffff;
int level = (val >> 16) & 0xffff;
char *cp = dev->printfbuf;
/*
* The size of the printfbuf is set in port.c
* There is no variable or define for it
*/
if (length > 255)
length = 255;
if (cp[length] != 0)
cp[length] = 0;
if (level == LOG_HIGH_ERROR)
printk(KERN_WARNING "aacraid:%s", cp);
else
printk(KERN_INFO "aacraid:%s", cp);
memset(cp, 0, 256);
}
/**
* aac_handle_aif - Handle a message from the firmware
* @dev: Which adapter this fib is from
* @fibptr: Pointer to fibptr from adapter
*
* This routine handles a driver notify fib from the adapter and
* dispatches it to the appropriate routine for handling.
*/
static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
{
struct hw_fib * fib = fibptr->fib;
/*
* Set the status of this FIB to be Invalid parameter.
*
* *(u32 *)fib->data = ST_INVAL;
*/
*(u32 *)fib->data = cpu_to_le32(ST_OK);
fib_adapter_complete(fibptr, sizeof(u32));
}
/**
* aac_command_thread - command processing thread
* @dev: Adapter to monitor
*
* Waits on the commandready event in it's queue. When the event gets set
* it will pull FIBs off it's queue. It will continue to pull FIBs off
* until the queue is empty. When the queue is empty it will wait for
* more FIBs.
*/
int aac_command_thread(struct aac_dev * dev)
{
struct hw_fib *fib, *newfib;
struct fib fibptr; /* for error logging */
struct aac_queue_block *queues = dev->queues;
struct aac_fib_context *fibctx;
unsigned long flags;
DECLARE_WAITQUEUE(wait, current);
/*
* We can only have one thread per adapter for AIF's.
*/
if (dev->aif_thread)
return -EINVAL;
/*
* Set up the name that will appear in 'ps'
* stored in task_struct.comm[16].
*/
sprintf(current->comm, "aacraid");
daemonize();
/*
* Let the DPC know it has a place to send the AIF's to.
*/
dev->aif_thread = 1;
memset(&fibptr, 0, sizeof(struct fib));
add_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait);
set_current_state(TASK_INTERRUPTIBLE);
while(1)
{
spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
while(!list_empty(&(queues->queue[HostNormCmdQueue].cmdq))) {
struct list_head *entry;
struct aac_aifcmd * aifcmd;
set_current_state(TASK_RUNNING);
entry = queues->queue[HostNormCmdQueue].cmdq.next;
list_del(entry);
spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags);
fib = list_entry(entry, struct hw_fib, header.FibLinks);
/*
* We will process the FIB here or pass it to a
* worker thread that is TBD. We Really can't
* do anything at this point since we don't have
* anything defined for this thread to do.
*/
memset(&fibptr, 0, sizeof(struct fib));
fibptr.type = FSAFS_NTC_FIB_CONTEXT;
fibptr.size = sizeof( struct fib );
fibptr.fib = fib;
fibptr.data = fib->data;
fibptr.dev = dev;
/*
* We only handle AifRequest fibs from the adapter.
*/
aifcmd = (struct aac_aifcmd *) fib->data;
if (aifcmd->command == le16_to_cpu(AifCmdDriverNotify)) {
aac_handle_aif(dev, &fibptr);
} else {
/* The u32 here is important and intended. We are using
32bit wrapping time to fit the adapter field */
u32 time_now, time_last;
unsigned long flagv;
time_now = jiffies/HZ;
spin_lock_irqsave(&dev->fib_lock, flagv);
entry = dev->fib_list.next;
/*
* For each Context that is on the
* fibctxList, make a copy of the
* fib, and then set the event to wake up the
* thread that is waiting for it.
*/
while (entry != &dev->fib_list) {
/*
* Extract the fibctx
*/
fibctx = list_entry(entry, struct aac_fib_context, next);
/*
* Check if the queue is getting
* backlogged
*/
if (fibctx->count > 20)
{
time_last = fibctx->jiffies;
/*
* Has it been > 2 minutes
* since the last read off
* the queue?
*/
if ((time_now - time_last) > 120) {
entry = entry->next;
aac_close_fib_context(dev, fibctx);
continue;
}
}
/*
* Warning: no sleep allowed while
* holding spinlock
*/
newfib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC);
if (newfib) {
/*
* Make the copy of the FIB
*/
memcpy(newfib, fib, sizeof(struct hw_fib));
/*
* Put the FIB onto the
* fibctx's fibs
*/
list_add_tail(&newfib->header.FibLinks, &fibctx->fibs);
fibctx->count++;
/*
* Set the event to wake up the
* thread that will waiting.
*/
up(&fibctx->wait_sem);
} else {
printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
}
entry = entry->next;
}
/*
* Set the status of this FIB
*/
*(u32 *)fib->data = cpu_to_le32(ST_OK);
fib_adapter_complete(&fibptr, sizeof(u32));
spin_unlock_irqrestore(&dev->fib_lock, flagv);
}
spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
}
/*
* There are no more AIF's
*/
spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags);
schedule();
if(signal_pending(current))
break;
set_current_state(TASK_INTERRUPTIBLE);
}
remove_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait);
dev->aif_thread = 0;
complete_and_exit(&dev->aif_completion, 0);
}
/*
* Adaptec AAC series RAID controller driver
* (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
*
* Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Module Name:
* dpcsup.c
*
* Abstract: All DPC processing routines for the cyclone board occur here.
*
*
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/completion.h>
#include <linux/blk.h>
#include <asm/semaphore.h>
#include "scsi.h"
#include "hosts.h"
#include "aacraid.h"
/**
* aac_response_normal - Handle command replies
* @q: Queue to read from
*
* This DPC routine will be run when the adapter interrupts us to let us
* know there is a response on our normal priority queue. We will pull off
* all QE there are and wake up all the waiters before exiting. We will
* take a spinlock out on the queue before operating on it.
*/
unsigned int aac_response_normal(struct aac_queue * q)
{
struct aac_dev * dev = q->dev;
struct aac_entry *entry;
struct hw_fib * hwfib;
struct fib * fib;
int consumed = 0;
unsigned long flags;
spin_lock_irqsave(q->lock, flags);
/*
* Keep pulling response QEs off the response queue and waking
* up the waiters until there are no more QEs. We then return
* back to the system. If no response was requesed we just
* deallocate the Fib here and continue.
*/
while(aac_consumer_get(dev, q, &entry))
{
int fast;
fast = (int) (entry->addr & 0x01);
hwfib = addr2fib(entry->addr & ~0x01);
aac_consumer_free(dev, q, HostNormRespQueue);
fib = &dev->fibs[hwfib->header.SenderData];
/*
* Remove this fib from the Outstanding I/O queue.
* But only if it has not already been timed out.
*
* If the fib has been timed out already, then just
* continue. The caller has already been notified that
* the fib timed out.
*/
if (!(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
list_del(&fib->queue);
dev->queues->queue[AdapNormCmdQueue].numpending--;
} else {
printk(KERN_WARNING "aacraid: FIB timeout (%x).\n", fib->flags);
continue;
}
spin_unlock_irqrestore(q->lock, flags);
if (fast) {
/*
* Doctor the fib
*/
*(u32 *)hwfib->data = cpu_to_le32(ST_OK);
hwfib->header.XferState |= cpu_to_le32(AdapterProcessed);
}
FIB_COUNTER_INCREMENT(aac_config.FibRecved);
if (hwfib->header.Command == cpu_to_le16(NuFileSystem))
{
u32 *pstatus = (u32 *)hwfib->data;
if (*pstatus & cpu_to_le32(0xffff0000))
*pstatus = cpu_to_le32(ST_OK);
}
if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async))
{
if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected))
FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved);
else
FIB_COUNTER_INCREMENT(aac_config.AsyncRecved);
/*
* NOTE: we cannot touch the fib after this
* call, because it may have been deallocated.
*/
fib->callback(fib->callback_data, fib);
} else {
unsigned long flagv;
spin_lock_irqsave(&fib->event_lock, flagv);
fib->done = 1;
up(&fib->event_wait);
spin_unlock_irqrestore(&fib->event_lock, flagv);
FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
}
consumed++;
spin_lock_irqsave(q->lock, flags);
}
if (consumed > aac_config.peak_fibs)
aac_config.peak_fibs = consumed;
if (consumed == 0)
aac_config.zero_fibs++;
spin_unlock_irqrestore(q->lock, flags);
return 0;
}
/**
* aac_command_normal - handle commands
* @q: queue to process
*
* This DPC routine will be queued when the adapter interrupts us to
* let us know there is a command on our normal priority queue. We will
* pull off all QE there are and wake up all the waiters before exiting.
* We will take a spinlock out on the queue before operating on it.
*/
unsigned int aac_command_normal(struct aac_queue *q)
{
struct aac_dev * dev = q->dev;
struct aac_entry *entry;
unsigned long flags;
spin_lock_irqsave(q->lock, flags);
/*
* Keep pulling response QEs off the response queue and waking
* up the waiters until there are no more QEs. We then return
* back to the system.
*/
while(aac_consumer_get(dev, q, &entry))
{
struct hw_fib * fib;
fib = addr2fib(entry->addr);
if (dev->aif_thread) {
list_add_tail(&fib->header.FibLinks, &q->cmdq);
aac_consumer_free(dev, q, HostNormCmdQueue);
wake_up_interruptible(&q->cmdready);
} else {
struct fib fibctx;
aac_consumer_free(dev, q, HostNormCmdQueue);
spin_unlock_irqrestore(q->lock, flags);
memset(&fibctx, 0, sizeof(struct fib));
fibctx.type = FSAFS_NTC_FIB_CONTEXT;
fibctx.size = sizeof(struct fib);
fibctx.fib = fib;
fibctx.data = fib->data;
fibctx.dev = dev;
/*
* Set the status of this FIB
*/
*(u32 *)fib->data = cpu_to_le32(ST_OK);
fib_adapter_complete(&fibctx, sizeof(u32));
spin_lock_irqsave(q->lock, flags);
}
}
spin_unlock_irqrestore(q->lock, flags);
return 0;
}
/*
* Adaptec AAC series RAID controller driver
* (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
*
* Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Module Name:
* linit.c
*
* Abstract: Linux Driver entry module for Adaptec RAID Array Controller
*
* Provides the following driver entry points:
* aac_detect()
* aac_release()
* aac_queuecommand()
* aac_resetcommand()
* aac_biosparm()
*
*/
#define AAC_DRIVER_VERSION "0.9.9ac6-TEST"
#define AAC_DRIVER_BUILD_DATE __DATE__
#include <linux/module.h>
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/completion.h>
#include <asm/semaphore.h>
#include <linux/blk.h>
#include "scsi.h"
#include "hosts.h"
#include <scsi/scsicam.h>
#include "aacraid.h"
#include "sd.h"
#define AAC_DRIVERNAME "aacraid"
MODULE_AUTHOR("Red Hat Inc and Adaptec");
MODULE_DESCRIPTION("Supports Dell PERC2, 2/Si, 3/Si, 3/Di, Adaptec 2120S, 2200S, 5400S, and HP NetRAID-4M devices. http://domsch.com/linux/ or http://linux.adaptec.com");
MODULE_LICENSE("GPL");
MODULE_PARM(nondasd, "i");
MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices. 0=off, 1=on");
static int nondasd=-1;
struct aac_dev *aac_devices[MAXIMUM_NUM_ADAPTERS];
static unsigned aac_count = 0;
static int aac_cfg_major = -1;
/*
* Because of the way Linux names scsi devices, the order in this table has
* become important. Check for on-board Raid first, add-in cards second.
*
* dmb - For now we add the number of channels to this structure.
* In the future we should add a fib that reports the number of channels
* for the card. At that time we can remove the channels from here
*/
static struct aac_driver_ident aac_drivers[] = {
{ 0x1028, 0x0001, 0x1028, 0x0001, aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2 }, /* PERC 2/Si */
{ 0x1028, 0x0002, 0x1028, 0x0002, aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2 }, /* PERC 3/Di */
{ 0x1028, 0x0003, 0x1028, 0x0003, aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2 }, /* PERC 3/Si */
{ 0x1028, 0x0004, 0x1028, 0x00d0, aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2 }, /* PERC 3/Si */
{ 0x1028, 0x0002, 0x1028, 0x00d1, aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2 }, /* PERC 3/Di */
{ 0x1028, 0x0002, 0x1028, 0x00d9, aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2 }, /* PERC 3/Di */
{ 0x1028, 0x000a, 0x1028, 0x0106, aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2 }, /* PERC 3/Di */
{ 0x1028, 0x000a, 0x1028, 0x011b, aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2 }, /* PERC 3/Di */
{ 0x1028, 0x000a, 0x1028, 0x0121, aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2 }, /* PERC 3/Di */
{ 0x9005, 0x0283, 0x9005, 0x0283, aac_rx_init, "aacraid", "ADAPTEC ", "catapult ", 2 }, /* catapult*/
{ 0x9005, 0x0284, 0x9005, 0x0284, aac_rx_init, "aacraid", "ADAPTEC ", "tomcat ", 2 }, /* tomcat*/
{ 0x9005, 0x0285, 0x9005, 0x0286, aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2120S ", 1 }, /* Adaptec 2120S (Crusader)*/
{ 0x9005, 0x0285, 0x9005, 0x0285, aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2 }, /* Adaptec 2200S (Vulcan)*/
{ 0x9005, 0x0285, 0x9005, 0x0287, aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2 }, /* Adaptec 2200S (Vulcan-2m)*/
{ 0x1011, 0x0046, 0x9005, 0x0365, aac_sa_init, "aacraid", "ADAPTEC ", "Adaptec 5400S ", 4 }, /* Adaptec 5400S (Mustang)*/
{ 0x1011, 0x0046, 0x9005, 0x0364, aac_sa_init, "aacraid", "ADAPTEC ", "AAC-364 ", 4 }, /* Adaptec 5400S (Mustang)*/
{ 0x1011, 0x0046, 0x9005, 0x1364, aac_sa_init, "percraid", "DELL ", "PERCRAID ", 4 }, /* Dell PERC2 "Quad Channel" */
{ 0x1011, 0x0046, 0x103c, 0x10c2, aac_sa_init, "hpnraid", "HP ", "NetRAID-4M ", 4 } /* HP NetRAID-4M */
};
#define NUM_AACTYPES (sizeof(aac_drivers) / sizeof(struct aac_driver_ident))
static int num_aacdrivers = NUM_AACTYPES;
static int aac_cfg_ioctl(struct inode * inode, struct file * file, unsigned int cmd, unsigned long arg);
static int aac_cfg_open(struct inode * inode, struct file * file);
static int aac_cfg_release(struct inode * inode,struct file * file);
static struct file_operations aac_cfg_fops = {
owner: THIS_MODULE,
ioctl: aac_cfg_ioctl,
open: aac_cfg_open,
release: aac_cfg_release
};
static int aac_detect(Scsi_Host_Template *);
static int aac_release(struct Scsi_Host *);
static int aac_queuecommand(Scsi_Cmnd *, void (*CompletionRoutine)(Scsi_Cmnd *));
static int aac_biosparm(Scsi_Disk *, struct block_device *, int *);
static int aac_procinfo(char *, char **, off_t, int, int, int);
static int aac_ioctl(Scsi_Device *, int, void *);
static int aac_eh_abort(Scsi_Cmnd * cmd);
static int aac_eh_device_reset(Scsi_Cmnd* cmd);
static int aac_eh_bus_reset(Scsi_Cmnd* cmd);
static int aac_eh_reset(Scsi_Cmnd* cmd);
static void aac_queuedepth(struct Scsi_Host *, Scsi_Device *);
/**
* aac_detect - Probe for aacraid cards
* @template: SCSI driver template
*
* Probe for AAC Host Adapters initialize, register, and report the
* configuration of each AAC Host Adapter found.
* Returns the number of adapters successfully initialized and
* registered.
* Initializes all data necessary for this particular SCSI driver.
* Notes:
* The detect routine must not call any of the mid level functions
* to queue commands because things are not guaranteed to be set
* up yet. The detect routine can send commands to the host adapter
* as long as the program control will not be passed to scsi.c in
* the processing of the command. Note especially that
* scsi_malloc/scsi_free must not be called.
*
*/
static int aac_detect(Scsi_Host_Template *template)
{
int index;
int container;
u16 vendor_id, device_id;
struct Scsi_Host *host_ptr;
struct pci_dev *dev = NULL;
struct aac_dev *aac;
struct fsa_scsi_hba *fsa_dev_ptr;
char *name = NULL;
printk(KERN_INFO "Red Hat/Adaptec aacraid driver, %s\n", AAC_DRIVER_BUILD_DATE);
/* setting up the proc directory structure */
template->proc_name = "aacraid";
for( index = 0; index != num_aacdrivers; index++ )
{
device_id = aac_drivers[index].device;
vendor_id = aac_drivers[index].vendor;
name = aac_drivers[index].name;
dprintk((KERN_DEBUG "Checking %s %x/%x/%x/%x.\n",
name, vendor_id, device_id,
aac_drivers[index].subsystem_vendor,
aac_drivers[index].subsystem_device));
dev = NULL;
while((dev = pci_find_device(vendor_id, device_id, dev))) {
if (pci_enable_device(dev))
continue;
pci_set_master(dev);
pci_set_dma_mask(dev, 0xFFFFFFFFULL);
if((dev->subsystem_vendor != aac_drivers[index].subsystem_vendor) ||
(dev->subsystem_device != aac_drivers[index].subsystem_device))
continue;
dprintk((KERN_DEBUG "%s device detected.\n", name));
dprintk((KERN_DEBUG "%x/%x/%x/%x.\n", vendor_id, device_id,
aac_drivers[index].subsystem_vendor, aac_drivers[index].subsystem_device));
/* Increment the host adapter count */
aac_count++;
/*
* scsi_register() allocates memory for a Scsi_Hosts structure and
* links it into the linked list of host adapters. This linked list
* contains the data for all possible <supported> scsi hosts.
* This is similar to the Scsi_Host_Template, except that we have
* one entry for each actual physical host adapter on the system,
* stored as a linked list. If there are two AAC boards, then we
* will need to make two Scsi_Host entries, but there will be only
* one Scsi_Host_Template entry. The second argument to scsi_register()
* specifies the size of the extra memory we want to hold any device
* specific information.
*/
host_ptr = scsi_register( template, sizeof(struct aac_dev) );
/*
* These three parameters can be used to allow for wide SCSI
* and for host adapters that support multiple buses.
*/
host_ptr->max_id = 17;
host_ptr->max_lun = 8;
host_ptr->max_channel = 1;
host_ptr->irq = dev->irq; /* Adapter IRQ number */
/* host_ptr->base = ( char * )(dev->resource[0].start & ~0xff); */
host_ptr->base = dev->resource[0].start;
scsi_set_pci_device(host_ptr, dev);
dprintk((KERN_DEBUG "Device base address = 0x%lx [0x%lx].\n", host_ptr->base, dev->resource[0].start));
dprintk((KERN_DEBUG "Device irq = 0x%x.\n", dev->irq));
/*
* The unique_id field is a unique identifier that must
* be assigned so that we have some way of identifying
* each host adapter properly and uniquely. For hosts
* that do not support more than one card in the
* system, this does not need to be set. It is
* initialized to zero in scsi_register(). This is the
* value returned as aac->id.
*/
host_ptr->unique_id = aac_count - 1;
/*
* This function is called after the device list has
* been built to find the tagged queueing depth
* supported for each device.
*/
host_ptr->select_queue_depths = aac_queuedepth;
aac = (struct aac_dev *)host_ptr->hostdata;
/* attach a pointer back to Scsi_Host */
aac->scsi_host_ptr = host_ptr;
aac->pdev = dev;
aac->cardtype = index;
aac->name = aac->scsi_host_ptr->hostt->name;
aac->id = aac->scsi_host_ptr->unique_id;
/* Initialize the ordinal number of the device to -1 */
fsa_dev_ptr = &(aac->fsa_dev);
for( container = 0; container < MAXIMUM_NUM_CONTAINERS; container++ )
fsa_dev_ptr->devno[container] = -1;
dprintk((KERN_DEBUG "Initializing Hardware...\n"));
if((*aac_drivers[index].init)(aac , host_ptr->unique_id) != 0)
{
/* device initialization failed */
printk(KERN_WARNING "aacraid: device initialization failed.\n");
scsi_unregister(host_ptr);
aac_count--;
continue;
}
dprintk((KERN_DEBUG "%s:%d device initialization successful.\n", name, host_ptr->unique_id));
aac_get_adapter_info(aac);
if(nondasd != -1)
{
/* someone told us how to set this on the cmdline */
aac->nondasd_support = (nondasd!=0);
}
if(aac->nondasd_support != 0){
printk(KERN_INFO "%s%d: Non-DASD support enabled\n", aac->name, aac->id);
}
dprintk((KERN_DEBUG "%s:%d options flag %04x.\n",name, host_ptr->unique_id,aac->adapter_info.options));
if(aac->nondasd_support == 1)
{
/*
* max channel will be the physical channels plus 1 virtual channel
* all containers are on the virtual channel 0
* physical channels are address by their actual physical number+1
*/
host_ptr->max_channel = aac_drivers[index].channels+1;
} else {
host_ptr->max_channel = 1;
}
dprintk((KERN_DEBUG "Device has %d logical channels\n", host_ptr->max_channel));
aac_get_containers(aac);
aac_devices[aac_count-1] = aac;
/*
* dmb - we may need to move these 3 parms somewhere else once
* we get a fib that can report the actual numbers
*/
host_ptr->max_id = AAC_MAX_TARGET;
host_ptr->max_lun = AAC_MAX_LUN;
/*
* If we are PAE capable then our future DMA mappings
* (for read/write commands) are 64bit clean and don't
* need bouncing. This assumes we do no other 32bit only
* allocations (eg fib table expands) after this point.
*/
if(aac->pae_support)
pci_set_dma_mask(dev, 0xFFFFFFFFFFFFFFFFUL);
}
}
if( aac_count ){
if((aac_cfg_major = register_chrdev( 0, "aac", &aac_cfg_fops))<0)
printk(KERN_WARNING "aacraid: unable to register \"aac\" device.\n");
}
template->present = aac_count; /* # of cards of this type found */
return aac_count;
}
/**
* aac_release - release SCSI host resources
* @host_ptr: SCSI host to clean up
*
* Release all resources previously acquired to support a specific Host
* Adapter and unregister the AAC Host Adapter.
*
* BUGS: Does not wait for the thread it kills to die.
*/
static int aac_release(struct Scsi_Host *host_ptr)
{
struct aac_dev *dev;
dprintk((KERN_DEBUG "aac_release.\n"));
dev = (struct aac_dev *)host_ptr->hostdata;
/*
* kill any threads we started
*/
kill_proc(dev->thread_pid, SIGKILL, 0);
wait_for_completion(&dev->aif_completion);
/*
* Call the comm layer to detach from this adapter
*/
aac_detach(dev);
/* Check free orderings... */
/* remove interrupt binding */
free_irq(host_ptr->irq, dev);
iounmap((void * )dev->regs.sa);
/* unregister adapter */
scsi_unregister(host_ptr);
/*
* FIXME: This assumes no hot plugging is going on...
*/
if( aac_cfg_major >= 0 )
{
unregister_chrdev(aac_cfg_major, "aac");
aac_cfg_major = -1;
}
return 0;
}
/**
* aac_queuecommand - queue a SCSI command
* @scsi_cmnd_ptr: SCSI command to queue
* @CompletionRoutine: Function to call on command completion
*
* Queues a command for execution by the associated Host Adapter.
*/
static int aac_queuecommand(Scsi_Cmnd *scsi_cmnd_ptr, void (*complete)(Scsi_Cmnd *))
{
int ret;
scsi_cmnd_ptr->scsi_done = complete;
/*
* aac_scsi_cmd() handles command processing, setting the
* result code and calling completion routine.
*/
if((ret = aac_scsi_cmd(scsi_cmnd_ptr)) != 0)
dprintk((KERN_DEBUG "aac_scsi_cmd failed.\n"));
return ret;
}
/**
* aac_driverinfo - Returns the host adapter name
* @host_ptr: Scsi host to report on
*
* Returns a static string describing the device in question
*/
const char *aac_driverinfo(struct Scsi_Host *host_ptr)
{
struct aac_dev *dev = (struct aac_dev *)host_ptr->hostdata;
return aac_drivers[dev->cardtype].name;
}
/**
* aac_get_driver_ident
* @devtype: index into lookup table
*
* Returns a pointer to the entry in the driver lookup table.
*/
struct aac_driver_ident* aac_get_driver_ident(int devtype)
{
return &aac_drivers[devtype];
}
/**
* aac_biosparm - return BIOS parameters for disk
* @disk: SCSI disk object to process
* @device: Disk in question
* @geom: geometry block to fill in
*
* Return the Heads/Sectors/Cylinders BIOS Disk Parameters for Disk.
* The default disk geometry is 64 heads, 32 sectors, and the appropriate
* number of cylinders so as not to exceed drive capacity. In order for
* disks equal to or larger than 1 GB to be addressable by the BIOS
* without exceeding the BIOS limitation of 1024 cylinders, Extended
* Translation should be enabled. With Extended Translation enabled,
* drives between 1 GB inclusive and 2 GB exclusive are given a disk
* geometry of 128 heads and 32 sectors, and drives above 2 GB inclusive
* are given a disk geometry of 255 heads and 63 sectors. However, if
* the BIOS detects that the Extended Translation setting does not match
* the geometry in the partition table, then the translation inferred
* from the partition table will be used by the BIOS, and a warning may
* be displayed.
*/
static int aac_biosparm(Scsi_Disk *disk, struct block_device *bdev, int *geom)
{
struct diskparm *param = (struct diskparm *)geom;
u8 *buf;
dprintk((KERN_DEBUG "aac_biosparm.\n"));
/*
* Assuming extended translation is enabled - #REVISIT#
*/
if( disk->capacity >= 2 * 1024 * 1024 ) /* 1 GB in 512 byte sectors */
{
if( disk->capacity >= 4 * 1024 * 1024 ) /* 2 GB in 512 byte sectors */
{
param->heads = 255;
param->sectors = 63;
}
else
{
param->heads = 128;
param->sectors = 32;
}
}
else
{
param->heads = 64;
param->sectors = 32;
}
param->cylinders = disk->capacity/(param->heads * param->sectors);
/*
* Read the partition table block
*/
buf = scsi_bios_ptable(bdev);
/*
* If the boot sector partition table is valid, search for a partition
* table entry whose end_head matches one of the standard geometry
* translations ( 64/32, 128/32, 255/63 ).
*/
if(*(unsigned short *)(buf + 0x40) == cpu_to_le16(0xaa55))
{
struct partition *first = (struct partition *)buf;
struct partition *entry = first;
int saved_cylinders = param->cylinders;
int num;
unsigned char end_head, end_sec;
for(num = 0; num < 4; num++)
{
end_head = entry->end_head;
end_sec = entry->end_sector & 0x3f;
if(end_head == 63)
{
param->heads = 64;
param->sectors = 32;
break;
}
else if(end_head == 127)
{
param->heads = 128;
param->sectors = 32;
break;
}
else if(end_head == 254)
{
param->heads = 255;
param->sectors = 63;
break;
}
entry++;
}
if(num == 4)
{
end_head = first->end_head;
end_sec = first->end_sector & 0x3f;
}
param->cylinders = disk->capacity / (param->heads * param->sectors);
if(num < 4 && end_sec == param->sectors)
{
if(param->cylinders != saved_cylinders)
dprintk((KERN_DEBUG "Adopting geometry: heads=%d, sectors=%d from partition table %d.\n",
param->heads, param->sectors, num));
}
else if(end_head > 0 || end_sec > 0)
{
dprintk((KERN_DEBUG "Strange geometry: heads=%d, sectors=%d in partition table %d.\n",
end_head + 1, end_sec, num));
dprintk((KERN_DEBUG "Using geometry: heads=%d, sectors=%d.\n",
param->heads, param->sectors));
}
}
kfree(buf);
return 0;
}
/**
* aac_queuedepth - compute queue depths
* @host: SCSI host in question
* @dev: SCSI device we are considering
*
* Selects queue depths for each target device based on the host adapter's
* total capacity and the queue depth supported by the target device.
* A queue depth of one automatically disables tagged queueing.
*/
static void aac_queuedepth(struct Scsi_Host * host, Scsi_Device * dev )
{
Scsi_Device * dptr;
dprintk((KERN_DEBUG "aac_queuedepth.\n"));
dprintk((KERN_DEBUG "Device # Q Depth Online\n"));
dprintk((KERN_DEBUG "---------------------------\n"));
for(dptr = dev; dptr != NULL; dptr = dptr->next)
{
if(dptr->host == host)
{
dptr->queue_depth = 10;
dprintk((KERN_DEBUG " %2d %d %d\n",
dptr->id, dptr->queue_depth, dptr->online));
}
}
}
/**
* aac_eh_abort - Abort command if possible.
* @cmd: SCSI command block to abort
*
* Called when the midlayer wishes to abort a command. We don't support
* this facility, and our firmware looks after life for us. We just
* report this as failing
*/
static int aac_eh_abort(Scsi_Cmnd *cmd)
{
return FAILED;
}
/**
* aac_eh_device_reset - Reset command handling
* @cmd: SCSI command block causing the reset
*
* Issue a reset of a SCSI device. We are ourselves not truely a SCSI
* controller and our firmware will do the work for us anyway. Thus this
* is a no-op. We just return FAILED.
*/
static int aac_eh_device_reset(Scsi_Cmnd *cmd)
{
return FAILED;
}
/**
* aac_eh_bus_reset - Reset command handling
* @scsi_cmd: SCSI command block causing the reset
*
* Issue a reset of a SCSI bus. We are ourselves not truely a SCSI
* controller and our firmware will do the work for us anyway. Thus this
* is a no-op. We just return FAILED.
*/
static int aac_eh_bus_reset(Scsi_Cmnd* cmd)
{
return FAILED;
}
/**
* aac_eh_hba_reset - Reset command handling
* @scsi_cmd: SCSI command block causing the reset
*
* Issue a reset of a SCSI host. If things get this bad then arguably we should
* go take a look at what the host adapter is doing and see if something really
* broke (as can occur at least on my Dell QC card if a drive keeps failing spinup)
*/
static int aac_eh_reset(Scsi_Cmnd* cmd)
{
printk(KERN_ERR "aacraid: Host adapter reset request. SCSI hang ?\n");
return FAILED;
}
/**
* aac_ioctl - Handle SCSI ioctls
* @scsi_dev_ptr: scsi device to operate upon
* @cmd: ioctl command to use issue
* @arg: ioctl data pointer
*
* Issue an ioctl on an aacraid device. Returns a standard unix error code or
* zero for success
*/
static int aac_ioctl(Scsi_Device * scsi_dev_ptr, int cmd, void * arg)
{
struct aac_dev *dev;
dprintk((KERN_DEBUG "aac_ioctl.\n"));
dev = (struct aac_dev *)scsi_dev_ptr->host->hostdata;
return aac_do_ioctl(dev, cmd, arg);
}
/**
* aac_cfg_open - open a configuration file
* @inode: inode being opened
* @file: file handle attached
*
* Called when the configuration device is opened. Does the needed
* set up on the handle and then returns
*
* Bugs: This needs extending to check a given adapter is present
* so we can support hot plugging, and to ref count adapters.
*/
static int aac_cfg_open(struct inode * inode, struct file * file )
{
unsigned minor_number = minor(inode->i_rdev);
if(minor_number >= aac_count)
return -ENODEV;
return 0;
}
/**
* aac_cfg_release - close down an AAC config device
* @inode: inode of configuration file
* @file: file handle of configuration file
*
* Called when the last close of the configuration file handle
* is performed.
*/
static int aac_cfg_release(struct inode * inode, struct file * file )
{
return 0;
}
/**
* aac_cfg_ioctl - AAC configuration request
* @inode: inode of device
* @file: file handle
* @cmd: ioctl command code
* @arg: argument
*
* Handles a configuration ioctl. Currently this involves wrapping it
* up and feeding it into the nasty windowsalike glue layer.
*
* Bugs: Needs locking against parallel ioctls lower down
* Bugs: Needs to handle hot plugging
*/
static int aac_cfg_ioctl(struct inode * inode, struct file * file, unsigned int cmd, unsigned long arg )
{
struct aac_dev *dev = aac_devices[minor(inode->i_rdev)];
return aac_do_ioctl(dev, cmd, (void *)arg);
}
/*
* To use the low level SCSI driver support using the linux kernel loadable
* module interface we should initialize the global variable driver_interface
* (datatype Scsi_Host_Template) and then include the file scsi_module.c.
*/
static Scsi_Host_Template driver_template = {
module: THIS_MODULE,
name: "AAC",
proc_info: aac_procinfo,
detect: aac_detect,
release: aac_release,
info: aac_driverinfo,
ioctl: aac_ioctl,
queuecommand: aac_queuecommand,
bios_param: aac_biosparm,
can_queue: AAC_NUM_IO_FIB,
this_id: 16,
sg_tablesize: 16,
max_sectors: 128,
cmd_per_lun: AAC_NUM_IO_FIB,
eh_abort_handler: aac_eh_abort,
eh_device_reset_handler:aac_eh_device_reset,
eh_bus_reset_handler: aac_eh_bus_reset,
eh_host_reset_handler: aac_eh_reset,
use_clustering: ENABLE_CLUSTERING,
};
#include "scsi_module.c"
/**
* aac_procinfo - Implement /proc/scsi/<drivername>/<n>
* @proc_buffer: memory buffer for I/O
* @start_ptr: pointer to first valid data
* @offset: offset into file
* @bytes_available: space left
* @host_no: scsi host ident
* @write: direction of I/O
*
* Used to export driver statistics and other infos to the world outside
* the kernel using the proc file system. Also provides an interface to
* feed the driver with information.
*
* For reads
* - if offset > 0 return 0
* - if offset == 0 write data to proc_buffer and set the start_ptr to
* beginning of proc_buffer, return the number of characters written.
* For writes
* - writes currently not supported, return 0
*
* Bugs: Only offset zero is handled
*/
static int aac_procinfo(char *proc_buffer, char **start_ptr,off_t offset,
int bytes_available, int host_no, int write)
{
if(write || offset > 0)
return 0;
*start_ptr = proc_buffer;
return sprintf(proc_buffer, "%s %d\n", "Raid Controller, scsi hba number", host_no);
}
EXPORT_NO_SYMBOLS;
/*
* Adaptec AAC series RAID controller driver
* (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
*
* Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Module Name:
* rx.c
*
* Abstract: Hardware miniport for Drawbridge specific hardware functions.
*
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/blk.h>
#include <linux/delay.h>
#include <linux/completion.h>
#include <asm/semaphore.h>
#include "scsi.h"
#include "hosts.h"
#include "aacraid.h"
static void aac_rx_intr(int irq, void *dev_id, struct pt_regs *regs)
{
struct aac_dev *dev = dev_id;
unsigned long bellbits;
u8 intstat, mask;
intstat = rx_readb(dev, MUnit.OISR);
/*
* Read mask and invert because drawbridge is reversed.
* This allows us to only service interrupts that have
* been enabled.
*/
mask = ~(rx_readb(dev, MUnit.OIMR));
/* Check to see if this is our interrupt. If it isn't just return */
if (intstat & mask)
{
bellbits = rx_readl(dev, OutboundDoorbellReg);
if (bellbits & DoorBellPrintfReady) {
aac_printf(dev, le32_to_cpu(rx_readl (dev, IndexRegs.Mailbox[5])));
rx_writel(dev, MUnit.ODR,DoorBellPrintfReady);
rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone);
}
else if (bellbits & DoorBellAdapterNormCmdReady) {
aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
}
else if (bellbits & DoorBellAdapterNormRespReady) {
aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady);
}
else if (bellbits & DoorBellAdapterNormCmdNotFull) {
rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
}
else if (bellbits & DoorBellAdapterNormRespNotFull) {
rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull);
}
}
}
/**
* aac_rx_enable_interrupt - Enable event reporting
* @dev: Adapter
* @event: Event to enable
*
* Enable event reporting from the i960 for a given event.
*/
static void aac_rx_enable_interrupt(struct aac_dev * dev, u32 event)
{
switch (event) {
case HostNormCmdQue:
dev->irq_mask &= ~(OUTBOUNDDOORBELL_1);
break;
case HostNormRespQue:
dev->irq_mask &= ~(OUTBOUNDDOORBELL_2);
break;
case AdapNormCmdNotFull:
dev->irq_mask &= ~(OUTBOUNDDOORBELL_3);
break;
case AdapNormRespNotFull:
dev->irq_mask &= ~(OUTBOUNDDOORBELL_4);
break;
}
}
/**
* aac_rx_disable_interrupt - Disable event reporting
* @dev: Adapter
* @event: Event to enable
*
* Disable event reporting from the i960 for a given event.
*/
static void aac_rx_disable_interrupt(struct aac_dev *dev, u32 event)
{
switch (event) {
case HostNormCmdQue:
dev->irq_mask |= (OUTBOUNDDOORBELL_1);
break;
case HostNormRespQue:
dev->irq_mask |= (OUTBOUNDDOORBELL_2);
break;
case AdapNormCmdNotFull:
dev->irq_mask |= (OUTBOUNDDOORBELL_3);
break;
case AdapNormRespNotFull:
dev->irq_mask |= (OUTBOUNDDOORBELL_4);
break;
}
}
/**
* rx_sync_cmd - send a command and wait
* @dev: Adapter
* @command: Command to execute
* @p1: first parameter
* @ret: adapter status
*
* This routine will send a synchronous comamnd to the adapter and wait
* for its completion.
*/
static int rx_sync_cmd(struct aac_dev *dev, u32 command, u32 p1, u32 *status)
{
unsigned long start;
int ok;
/*
* Write the command into Mailbox 0
*/
rx_writel(dev, InboundMailbox0, cpu_to_le32(command));
/*
* Write the parameters into Mailboxes 1 - 4
*/
rx_writel(dev, InboundMailbox1, cpu_to_le32(p1));
rx_writel(dev, InboundMailbox2, 0);
rx_writel(dev, InboundMailbox3, 0);
rx_writel(dev, InboundMailbox4, 0);
/*
* Clear the synch command doorbell to start on a clean slate.
*/
rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
/*
* Disable doorbell interrupts
*/
rx_writeb(dev, MUnit.OIMR, rx_readb(dev, MUnit.OIMR) | 0x04);
/*
* Force the completion of the mask register write before issuing
* the interrupt.
*/
rx_readb (dev, MUnit.OIMR);
/*
* Signal that there is a new synch command
*/
rx_writel(dev, InboundDoorbellReg, INBOUNDDOORBELL_0);
ok = 0;
start = jiffies;
/*
* Wait up to 30 seconds
*/
while (time_before(jiffies, start+30*HZ))
{
udelay(5); /* Delay 5 microseconds to let Mon960 get info. */
/*
* Mon960 will set doorbell0 bit when it has completed the command.
*/
if (rx_readl(dev, OutboundDoorbellReg) & OUTBOUNDDOORBELL_0) {
/*
* Clear the doorbell.
*/
rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
ok = 1;
break;
}
/*
* Yield the processor in case we are slow
*/
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(1);
}
if (ok != 1) {
/*
* Restore interrupt mask even though we timed out
*/
rx_writeb(dev, MUnit.OIMR, rx_readl(dev, MUnit.OIMR) & 0xfb);
return -ETIMEDOUT;
}
/*
* Pull the synch status from Mailbox 0.
*/
*status = le32_to_cpu(rx_readl(dev, IndexRegs.Mailbox[0]));
/*
* Clear the synch command doorbell.
*/
rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
/*
* Restore interrupt mask
*/
rx_writeb(dev, MUnit.OIMR, rx_readl(dev, MUnit.OIMR) & 0xfb);
return 0;
}
/**
* aac_rx_interrupt_adapter - interrupt adapter
* @dev: Adapter
*
* Send an interrupt to the i960 and breakpoint it.
*/
static void aac_rx_interrupt_adapter(struct aac_dev *dev)
{
u32 ret;
rx_sync_cmd(dev, BREAKPOINT_REQUEST, 0, &ret);
}
/**
* aac_rx_notify_adapter - send an event to the adapter
* @dev: Adapter
* @event: Event to send
*
* Notify the i960 that something it probably cares about has
* happened.
*/
static void aac_rx_notify_adapter(struct aac_dev *dev, u32 event)
{
switch (event) {
case AdapNormCmdQue:
rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_1);
break;
case HostNormRespNotFull:
rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_4);
break;
case AdapNormRespQue:
rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_2);
break;
case HostNormCmdNotFull:
rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_3);
break;
case HostShutdown:
// rx_sync_cmd(dev, HOST_CRASHING, 0, 0, 0, 0, &ret);
break;
case FastIo:
rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_6);
break;
case AdapPrintfDone:
rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_5);
break;
default:
BUG();
break;
}
}
/**
* aac_rx_start_adapter - activate adapter
* @dev: Adapter
*
* Start up processing on an i960 based AAC adapter
*/
static void aac_rx_start_adapter(struct aac_dev *dev)
{
u32 status;
struct aac_init *init;
init = dev->init;
init->HostElapsedSeconds = cpu_to_le32(jiffies/HZ);
/*
* Tell the adapter we are back and up and running so it will scan
* its command queues and enable our interrupts
*/
dev->irq_mask = (DoorBellPrintfReady | OUTBOUNDDOORBELL_1 | OUTBOUNDDOORBELL_2 | OUTBOUNDDOORBELL_3 | OUTBOUNDDOORBELL_4);
/*
* First clear out all interrupts. Then enable the one's that we
* can handle.
*/
rx_writeb(dev, MUnit.OIMR, 0xff);
rx_writel(dev, MUnit.ODR, 0xffffffff);
// rx_writeb(dev, MUnit.OIMR, ~(u8)OUTBOUND_DOORBELL_INTERRUPT_MASK);
rx_writeb(dev, MUnit.OIMR, 0xfb);
// We can only use a 32 bit address here
rx_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa, &status);
}
/**
* aac_rx_init - initialize an i960 based AAC card
* @dev: device to configure
* @devnum: adapter number
*
* Allocate and set up resources for the i960 based AAC variants. The
* device_interface in the commregion will be allocated and linked
* to the comm region.
*/
int aac_rx_init(struct aac_dev *dev, unsigned long num)
{
unsigned long start;
unsigned long status;
int instance;
const char * name;
dev->devnum = num;
instance = dev->id;
name = dev->name;
/*
* Map in the registers from the adapter.
*/
if((dev->regs.rx = (struct rx_registers *)ioremap((unsigned long)dev->scsi_host_ptr->base, 8192))==NULL)
{
printk(KERN_WARNING "aacraid: unable to map i960.\n" );
return -1;
}
/*
* Check to see if the board failed any self tests.
*/
if (rx_readl(dev, IndexRegs.Mailbox[7]) & SELF_TEST_FAILED) {
printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
return -1;
}
/*
* Check to see if the board panic'd while booting.
*/
if (rx_readl(dev, IndexRegs.Mailbox[7]) & KERNEL_PANIC) {
printk(KERN_ERR "%s%d: adapter kernel panic'd.\n", dev->name, instance);
return -1;
}
start = jiffies;
/*
* Wait for the adapter to be up and running. Wait up to 3 minutes
*/
while (!(rx_readl(dev, IndexRegs.Mailbox[7]) & KERNEL_UP_AND_RUNNING))
{
if(time_after(jiffies, start+180*HZ))
{
status = rx_readl(dev, IndexRegs.Mailbox[7]) >> 16;
printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %ld.\n", dev->name, instance, status);
return -1;
}
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(1);
}
if (request_irq(dev->scsi_host_ptr->irq, aac_rx_intr, SA_SHIRQ|SA_INTERRUPT, "aacraid", (void *)dev)<0)
{
printk(KERN_ERR "%s%d: Interrupt unavailable.\n", name, instance);
return -1;
}
/*
* Fill in the function dispatch table.
*/
dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter;
dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt;
dev->a_ops.adapter_disable_int = aac_rx_disable_interrupt;
dev->a_ops.adapter_notify = aac_rx_notify_adapter;
dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
if (aac_init_adapter(dev) == NULL)
return -1;
/*
* Start any kernel threads needed
*/
dev->thread_pid = kernel_thread((int (*)(void *))aac_command_thread, dev, 0);
/*
* Tell the adapter that all is configured, and it can start
* accepting requests
*/
aac_rx_start_adapter(dev);
return 0;
}
/*
* Adaptec AAC series RAID controller driver
* (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
*
* Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Module Name:
* sa.c
*
* Abstract: Drawbridge specific support functions
*
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/blk.h>
#include <linux/delay.h>
#include <linux/completion.h>
#include <asm/semaphore.h>
#include "scsi.h"
#include "hosts.h"
#include "aacraid.h"
static void aac_sa_intr(int irq, void *dev_id, struct pt_regs *regs)
{
struct aac_dev *dev = dev_id;
unsigned short intstat, mask;
intstat = sa_readw(dev, DoorbellReg_p);
/*
* Read mask and invert because drawbridge is reversed.
* This allows us to only service interrupts that have been enabled.
*/
mask = ~(sa_readw(dev, SaDbCSR.PRISETIRQMASK));
/* Check to see if this is our interrupt. If it isn't just return */
if (intstat & mask) {
if (intstat & PrintfReady) {
aac_printf(dev, le32_to_cpu(sa_readl(dev, Mailbox5)));
sa_writew(dev, DoorbellClrReg_p, PrintfReady); /* clear PrintfReady */
sa_writew(dev, DoorbellReg_s, PrintfDone);
} else if (intstat & DOORBELL_1) { // dev -> Host Normal Command Ready
aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
sa_writew(dev, DoorbellClrReg_p, DOORBELL_1);
} else if (intstat & DOORBELL_2) { // dev -> Host Normal Response Ready
aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
sa_writew(dev, DoorbellClrReg_p, DOORBELL_2);
} else if (intstat & DOORBELL_3) { // dev -> Host Normal Command Not Full
sa_writew(dev, DoorbellClrReg_p, DOORBELL_3);
} else if (intstat & DOORBELL_4) { // dev -> Host Normal Response Not Full
sa_writew(dev, DoorbellClrReg_p, DOORBELL_4);
}
}
}
/**
* aac_sa_enable_interrupt - enable an interrupt event
* @dev: Which adapter to enable.
* @event: Which adapter event.
*
* This routine will enable the corresponding adapter event to cause an interrupt on
* the host.
*/
void aac_sa_enable_interrupt(struct aac_dev *dev, u32 event)
{
switch (event) {
case HostNormCmdQue:
sa_writew(dev, SaDbCSR.PRICLEARIRQMASK, DOORBELL_1);
break;
case HostNormRespQue:
sa_writew(dev, SaDbCSR.PRICLEARIRQMASK, DOORBELL_2);
break;
case AdapNormCmdNotFull:
sa_writew(dev, SaDbCSR.PRICLEARIRQMASK, DOORBELL_3);
break;
case AdapNormRespNotFull:
sa_writew(dev, SaDbCSR.PRICLEARIRQMASK, DOORBELL_4);
break;
}
}
/**
* aac_sa_disable_interrupt - disable an interrupt event
* @dev: Which adapter to enable.
* @event: Which adapter event.
*
* This routine will enable the corresponding adapter event to cause an interrupt on
* the host.
*/
void aac_sa_disable_interrupt (struct aac_dev *dev, u32 event)
{
switch (event) {
case HostNormCmdQue:
sa_writew(dev, SaDbCSR.PRISETIRQMASK, DOORBELL_1);
break;
case HostNormRespQue:
sa_writew(dev, SaDbCSR.PRISETIRQMASK, DOORBELL_2);
break;
case AdapNormCmdNotFull:
sa_writew(dev, SaDbCSR.PRISETIRQMASK, DOORBELL_3);
break;
case AdapNormRespNotFull:
sa_writew(dev, SaDbCSR.PRISETIRQMASK, DOORBELL_4);
break;
}
}
/**
* aac_sa_notify_adapter - handle adapter notification
* @dev: Adapter that notification is for
* @event: Event to notidy
*
* Notify the adapter of an event
*/
void aac_sa_notify_adapter(struct aac_dev *dev, u32 event)
{
switch (event) {
case AdapNormCmdQue:
sa_writew(dev, DoorbellReg_s,DOORBELL_1);
break;
case HostNormRespNotFull:
sa_writew(dev, DoorbellReg_s,DOORBELL_4);
break;
case AdapNormRespQue:
sa_writew(dev, DoorbellReg_s,DOORBELL_2);
break;
case HostNormCmdNotFull:
sa_writew(dev, DoorbellReg_s,DOORBELL_3);
break;
case HostShutdown:
//sa_sync_cmd(dev, HOST_CRASHING, 0, &ret);
break;
case FastIo:
sa_writew(dev, DoorbellReg_s,DOORBELL_6);
break;
case AdapPrintfDone:
sa_writew(dev, DoorbellReg_s,DOORBELL_5);
break;
default:
BUG();
break;
}
}
/**
* sa_sync_cmd - send a command and wait
* @dev: Adapter
* @command: Command to execute
* @p1: first parameter
* @ret: adapter status
*
* This routine will send a synchronous comamnd to the adapter and wait
* for its completion.
*/
static int sa_sync_cmd(struct aac_dev *dev, u32 command, u32 p1, u32 *ret)
{
unsigned long start;
int ok;
/*
* Write the Command into Mailbox 0
*/
sa_writel(dev, Mailbox0, cpu_to_le32(command));
/*
* Write the parameters into Mailboxes 1 - 4
*/
sa_writel(dev, Mailbox1, cpu_to_le32(p1));
sa_writel(dev, Mailbox2, 0);
sa_writel(dev, Mailbox3, 0);
sa_writel(dev, Mailbox4, 0);
/*
* Clear the synch command doorbell to start on a clean slate.
*/
sa_writew(dev, DoorbellClrReg_p, DOORBELL_0);
/*
* Signal that there is a new synch command
*/
sa_writew(dev, DoorbellReg_s, DOORBELL_0);
ok = 0;
start = jiffies;
while(time_before(jiffies, start+30*HZ))
{
/*
* Delay 5uS so that the monitor gets access
*/
udelay(5);
/*
* Mon110 will set doorbell0 bit when it has
* completed the command.
*/
if(sa_readw(dev, DoorbellReg_p) & DOORBELL_0) {
ok = 1;
break;
}
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(1);
}
if (ok != 1)
return -ETIMEDOUT;
/*
* Clear the synch command doorbell.
*/
sa_writew(dev, DoorbellClrReg_p, DOORBELL_0);
/*
* Pull the synch status from Mailbox 0.
*/
*ret = le32_to_cpu(sa_readl(dev, Mailbox0));
return 0;
}
/**
* aac_sa_interrupt_adapter - interrupt an adapter
* @dev: Which adapter to enable.
*
* Breakpoint an adapter.
*/
static void aac_sa_interrupt_adapter (struct aac_dev *dev)
{
u32 ret;
sa_sync_cmd(dev, BREAKPOINT_REQUEST, 0, &ret);
}
/**
* aac_sa_start_adapter - activate adapter
* @dev: Adapter
*
* Start up processing on an ARM based AAC adapter
*/
static void aac_sa_start_adapter(struct aac_dev *dev)
{
u32 ret;
struct aac_init *init;
/*
* Fill in the remaining pieces of the init.
*/
init = dev->init;
init->HostElapsedSeconds = cpu_to_le32(jiffies/HZ);
dprintk(("INIT\n"));
/*
* Tell the adapter we are back and up and running so it will scan its command
* queues and enable our interrupts
*/
dev->irq_mask = (PrintfReady | DOORBELL_1 | DOORBELL_2 | DOORBELL_3 | DOORBELL_4);
/*
* First clear out all interrupts. Then enable the one's that
* we can handle.
*/
dprintk(("MASK\n"));
sa_writew(dev, SaDbCSR.PRISETIRQMASK, cpu_to_le16(0xffff));
sa_writew(dev, SaDbCSR.PRICLEARIRQMASK, (PrintfReady | DOORBELL_1 | DOORBELL_2 | DOORBELL_3 | DOORBELL_4));
dprintk(("SYNCCMD\n"));
/* We can only use a 32 bit address here */
sa_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa, &ret);
}
/**
* aac_sa_init - initialize an ARM based AAC card
* @dev: device to configure
* @devnum: adapter number
*
* Allocate and set up resources for the ARM based AAC variants. The
* device_interface in the commregion will be allocated and linked
* to the comm region.
*/
int aac_sa_init(struct aac_dev *dev, unsigned long devnum)
{
unsigned long start;
unsigned long status;
int instance;
const char *name;
dev->devnum = devnum;
dprintk(("PREINST\n"));
instance = dev->id;
name = dev->name;
/*
* Map in the registers from the adapter.
*/
dprintk(("PREMAP\n"));
if((dev->regs.sa = (struct sa_registers *)ioremap((unsigned long)dev->scsi_host_ptr->base, 8192))==NULL)
{
printk(KERN_WARNING "aacraid: unable to map ARM.\n" );
return -1;
}
/*
* Check to see if the board failed any self tests.
*/
if (sa_readl(dev, Mailbox7) & SELF_TEST_FAILED) {
printk(KERN_WARNING "%s%d: adapter self-test failed.\n", name, instance);
return -1;
}
/*
* Check to see if the board panic'd while booting.
*/
if (sa_readl(dev, Mailbox7) & KERNEL_PANIC) {
printk(KERN_WARNING "%s%d: adapter kernel panic'd.\n", name, instance);
return -1;
}
start = jiffies;
/*
* Wait for the adapter to be up and running. Wait up to 3 minutes.
*/
while (!(sa_readl(dev, Mailbox7) & KERNEL_UP_AND_RUNNING)) {
if (time_after(start+180*HZ, jiffies)) {
status = sa_readl(dev, Mailbox7) >> 16;
printk(KERN_WARNING "%s%d: adapter kernel failed to start, init status = %d.\n", name, instance, le32_to_cpu(status));
return -1;
}
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(1);
}
dprintk(("ATIRQ\n"));
if (request_irq(dev->scsi_host_ptr->irq, aac_sa_intr, SA_SHIRQ|SA_INTERRUPT, "aacraid", (void *)dev ) < 0) {
printk(KERN_WARNING "%s%d: Interrupt unavailable.\n", name, instance);
return -1;
}
/*
* Fill in the function dispatch table.
*/
dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter;
dev->a_ops.adapter_enable_int = aac_sa_enable_interrupt;
dev->a_ops.adapter_disable_int = aac_sa_disable_interrupt;
dev->a_ops.adapter_notify = aac_sa_notify_adapter;
dev->a_ops.adapter_sync_cmd = sa_sync_cmd;
dprintk(("FUNCDONE\n"));
if(aac_init_adapter(dev) == NULL)
return -1;
dprintk(("NEWADAPTDONE\n"));
/*
* Start any kernel threads needed
*/
dev->thread_pid = kernel_thread((int (*)(void *))aac_command_thread, dev, 0);
/*
* Tell the adapter that all is configure, and it can start
* accepting requests
*/
dprintk(("STARTING\n"));
aac_sa_start_adapter(dev);
dprintk(("STARTED\n"));
return 0;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment