Commit 84d891d6 authored by James Bottomley's avatar James Bottomley

Merge ../scsi-rc-fixes-2.6

Conflicts:

	include/scsi/scsi_devinfo.h

Same number for two BLIST flags:  BLIST_MAX_512 and BLIST_ATTACH_PQ3
Signed-off-by: default avatarJames Bottomley <James.Bottomley@SteelEye.com>
parents 5bb0b55a 7676f83a
...@@ -350,16 +350,51 @@ static int sg_io(struct file *file, request_queue_t *q, ...@@ -350,16 +350,51 @@ static int sg_io(struct file *file, request_queue_t *q,
return ret; return ret;
} }
/**
* sg_scsi_ioctl -- handle deprecated SCSI_IOCTL_SEND_COMMAND ioctl
* @file: file this ioctl operates on (optional)
* @q: request queue to send scsi commands down
* @disk: gendisk to operate on (option)
* @sic: userspace structure describing the command to perform
*
* Send down the scsi command described by @sic to the device below
* the request queue @q. If @file is non-NULL it's used to perform
* fine-grained permission checks that allow users to send down
* non-destructive SCSI commands. If the caller has a struct gendisk
* available it should be passed in as @disk to allow the low level
* driver to use the information contained in it. A non-NULL @disk
* is only allowed if the caller knows that the low level driver doesn't
* need it (e.g. in the scsi subsystem).
*
* Notes:
* - This interface is deprecated - users should use the SG_IO
* interface instead, as this is a more flexible approach to
* performing SCSI commands on a device.
* - The SCSI command length is determined by examining the 1st byte
* of the given command. There is no way to override this.
* - Data transfers are limited to PAGE_SIZE
* - The length (x + y) must be at least OMAX_SB_LEN bytes long to
* accommodate the sense buffer when an error occurs.
* The sense buffer is truncated to OMAX_SB_LEN (16) bytes so that
* old code will not be surprised.
* - If a Unix error occurs (e.g. ENOMEM) then the user will receive
* a negative return and the Unix error code in 'errno'.
* If the SCSI command succeeds then 0 is returned.
* Positive numbers returned are the compacted SCSI error codes (4
* bytes in one int) where the lowest byte is the SCSI status.
*/
#define OMAX_SB_LEN 16 /* For backward compatibility */ #define OMAX_SB_LEN 16 /* For backward compatibility */
int sg_scsi_ioctl(struct file *file, struct request_queue *q,
static int sg_scsi_ioctl(struct file *file, request_queue_t *q, struct gendisk *disk, struct scsi_ioctl_command __user *sic)
struct gendisk *bd_disk, Scsi_Ioctl_Command __user *sic)
{ {
struct request *rq; struct request *rq;
int err; int err;
unsigned int in_len, out_len, bytes, opcode, cmdlen; unsigned int in_len, out_len, bytes, opcode, cmdlen;
char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE]; char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
if (!sic)
return -EINVAL;
/* /*
* get in an out lengths, verify they don't exceed a page worth of data * get in an out lengths, verify they don't exceed a page worth of data
*/ */
...@@ -393,45 +428,53 @@ static int sg_scsi_ioctl(struct file *file, request_queue_t *q, ...@@ -393,45 +428,53 @@ static int sg_scsi_ioctl(struct file *file, request_queue_t *q,
if (copy_from_user(rq->cmd, sic->data, cmdlen)) if (copy_from_user(rq->cmd, sic->data, cmdlen))
goto error; goto error;
if (copy_from_user(buffer, sic->data + cmdlen, in_len)) if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
goto error; goto error;
err = verify_command(file, rq->cmd); err = verify_command(file, rq->cmd);
if (err) if (err)
goto error; goto error;
/* default. possible overriden later */
rq->retries = 5;
switch (opcode) { switch (opcode) {
case SEND_DIAGNOSTIC: case SEND_DIAGNOSTIC:
case FORMAT_UNIT: case FORMAT_UNIT:
rq->timeout = FORMAT_UNIT_TIMEOUT; rq->timeout = FORMAT_UNIT_TIMEOUT;
break; rq->retries = 1;
case START_STOP: break;
rq->timeout = START_STOP_TIMEOUT; case START_STOP:
break; rq->timeout = START_STOP_TIMEOUT;
case MOVE_MEDIUM: break;
rq->timeout = MOVE_MEDIUM_TIMEOUT; case MOVE_MEDIUM:
break; rq->timeout = MOVE_MEDIUM_TIMEOUT;
case READ_ELEMENT_STATUS: break;
rq->timeout = READ_ELEMENT_STATUS_TIMEOUT; case READ_ELEMENT_STATUS:
break; rq->timeout = READ_ELEMENT_STATUS_TIMEOUT;
case READ_DEFECT_DATA: break;
rq->timeout = READ_DEFECT_DATA_TIMEOUT; case READ_DEFECT_DATA:
break; rq->timeout = READ_DEFECT_DATA_TIMEOUT;
default: rq->retries = 1;
rq->timeout = BLK_DEFAULT_TIMEOUT; break;
break; default:
rq->timeout = BLK_DEFAULT_TIMEOUT;
break;
}
if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_WAIT)) {
err = DRIVER_ERROR << 24;
goto out;
} }
memset(sense, 0, sizeof(sense)); memset(sense, 0, sizeof(sense));
rq->sense = sense; rq->sense = sense;
rq->sense_len = 0; rq->sense_len = 0;
rq->data = buffer;
rq->data_len = bytes;
rq->flags |= REQ_BLOCK_PC; rq->flags |= REQ_BLOCK_PC;
rq->retries = 0;
blk_execute_rq(q, bd_disk, rq, 0); blk_execute_rq(q, disk, rq, 0);
out:
err = rq->errors & 0xff; /* only 8 bit SCSI status */ err = rq->errors & 0xff; /* only 8 bit SCSI status */
if (err) { if (err) {
if (rq->sense_len && rq->sense) { if (rq->sense_len && rq->sense) {
...@@ -450,7 +493,7 @@ static int sg_scsi_ioctl(struct file *file, request_queue_t *q, ...@@ -450,7 +493,7 @@ static int sg_scsi_ioctl(struct file *file, request_queue_t *q,
blk_put_request(rq); blk_put_request(rq);
return err; return err;
} }
EXPORT_SYMBOL_GPL(sg_scsi_ioctl);
/* Send basic block requests */ /* Send basic block requests */
static int __blk_send_generic(request_queue_t *q, struct gendisk *bd_disk, int cmd, int data) static int __blk_send_generic(request_queue_t *q, struct gendisk *bd_disk, int cmd, int data)
......
...@@ -366,7 +366,15 @@ mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure, ...@@ -366,7 +366,15 @@ mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure,
static int static int
mptsas_slave_configure(struct scsi_device *sdev) mptsas_slave_configure(struct scsi_device *sdev)
{ {
sas_read_port_mode_page(sdev); struct Scsi_Host *host = sdev->host;
MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
/*
* RAID volumes placed beyond the last expected port.
* Ignore sending sas mode pages in that case..
*/
if (sdev->channel < hd->ioc->num_ports)
sas_read_port_mode_page(sdev);
return mptscsih_slave_configure(sdev); return mptscsih_slave_configure(sdev);
} }
......
...@@ -65,6 +65,7 @@ ...@@ -65,6 +65,7 @@
2.26.02.005 - Fix use_sg == 0 mapping on systems with 4GB or higher. 2.26.02.005 - Fix use_sg == 0 mapping on systems with 4GB or higher.
2.26.02.006 - Fix 9550SX pchip reset timeout. 2.26.02.006 - Fix 9550SX pchip reset timeout.
Add big endian support. Add big endian support.
2.26.02.007 - Disable local interrupts during kmap/unmap_atomic().
*/ */
#include <linux/module.h> #include <linux/module.h>
...@@ -88,7 +89,7 @@ ...@@ -88,7 +89,7 @@
#include "3w-9xxx.h" #include "3w-9xxx.h"
/* Globals */ /* Globals */
#define TW_DRIVER_VERSION "2.26.02.006" #define TW_DRIVER_VERSION "2.26.02.007"
static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT]; static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
static unsigned int twa_device_extension_count; static unsigned int twa_device_extension_count;
static int twa_major = -1; static int twa_major = -1;
...@@ -1942,9 +1943,13 @@ static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int re ...@@ -1942,9 +1943,13 @@ static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int re
} }
if (tw_dev->srb[request_id]->use_sg == 1) { if (tw_dev->srb[request_id]->use_sg == 1) {
struct scatterlist *sg = (struct scatterlist *)tw_dev->srb[request_id]->request_buffer; struct scatterlist *sg = (struct scatterlist *)tw_dev->srb[request_id]->request_buffer;
char *buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset; char *buf;
unsigned long flags = 0;
local_irq_save(flags);
buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
memcpy(buf, tw_dev->generic_buffer_virt[request_id], sg->length); memcpy(buf, tw_dev->generic_buffer_virt[request_id], sg->length);
kunmap_atomic(buf - sg->offset, KM_IRQ0); kunmap_atomic(buf - sg->offset, KM_IRQ0);
local_irq_restore(flags);
} }
} }
} /* End twa_scsiop_execute_scsi_complete() */ } /* End twa_scsiop_execute_scsi_complete() */
......
...@@ -1079,7 +1079,7 @@ config SCSI_SYM53C8XX_DMA_ADDRESSING_MODE ...@@ -1079,7 +1079,7 @@ config SCSI_SYM53C8XX_DMA_ADDRESSING_MODE
memory using PCI DAC cycles. memory using PCI DAC cycles.
config SCSI_SYM53C8XX_DEFAULT_TAGS config SCSI_SYM53C8XX_DEFAULT_TAGS
int "default tagged command queue depth" int "Default tagged command queue depth"
depends on SCSI_SYM53C8XX_2 depends on SCSI_SYM53C8XX_2
default "16" default "16"
help help
...@@ -1090,7 +1090,7 @@ config SCSI_SYM53C8XX_DEFAULT_TAGS ...@@ -1090,7 +1090,7 @@ config SCSI_SYM53C8XX_DEFAULT_TAGS
exceed CONFIG_SCSI_SYM53C8XX_MAX_TAGS. exceed CONFIG_SCSI_SYM53C8XX_MAX_TAGS.
config SCSI_SYM53C8XX_MAX_TAGS config SCSI_SYM53C8XX_MAX_TAGS
int "maximum number of queued commands" int "Maximum number of queued commands"
depends on SCSI_SYM53C8XX_2 depends on SCSI_SYM53C8XX_2
default "64" default "64"
help help
...@@ -1099,13 +1099,14 @@ config SCSI_SYM53C8XX_MAX_TAGS ...@@ -1099,13 +1099,14 @@ config SCSI_SYM53C8XX_MAX_TAGS
possible. The driver supports up to 256 queued commands per device. possible. The driver supports up to 256 queued commands per device.
This value is used as a compiled-in hard limit. This value is used as a compiled-in hard limit.
config SCSI_SYM53C8XX_IOMAPPED config SCSI_SYM53C8XX_MMIO
bool "use port IO" bool "Use memory mapped IO"
depends on SCSI_SYM53C8XX_2 depends on SCSI_SYM53C8XX_2
default y
help help
If you say Y here, the driver will use port IO to access Memory mapped IO is faster than Port IO. Most people should
the card. This is significantly slower then using memory answer Y here, but some machines may have problems. If you have
mapped IO. Most people should answer N. to answer N here, please report the problem to the maintainer.
config SCSI_IPR config SCSI_IPR
tristate "IBM Power Linux RAID adapter support" tristate "IBM Power Linux RAID adapter support"
...@@ -1309,15 +1310,6 @@ config SCSI_QLOGIC_FAS ...@@ -1309,15 +1310,6 @@ config SCSI_QLOGIC_FAS
To compile this driver as a module, choose M here: the To compile this driver as a module, choose M here: the
module will be called qlogicfas. module will be called qlogicfas.
config SCSI_QLOGIC_FC
tristate "Qlogic ISP FC SCSI support"
depends on PCI && SCSI
help
This is a driver for the QLogic ISP2100 SCSI-FCP host adapter.
To compile this driver as a module, choose M here: the
module will be called qlogicfc.
config SCSI_QLOGIC_FC_FIRMWARE config SCSI_QLOGIC_FC_FIRMWARE
bool "Include loadable firmware in driver" bool "Include loadable firmware in driver"
depends on SCSI_QLOGIC_FC depends on SCSI_QLOGIC_FC
......
...@@ -78,7 +78,6 @@ obj-$(CONFIG_SCSI_NCR_Q720) += NCR_Q720_mod.o ...@@ -78,7 +78,6 @@ obj-$(CONFIG_SCSI_NCR_Q720) += NCR_Q720_mod.o
obj-$(CONFIG_SCSI_SYM53C416) += sym53c416.o obj-$(CONFIG_SCSI_SYM53C416) += sym53c416.o
obj-$(CONFIG_SCSI_QLOGIC_FAS) += qlogicfas408.o qlogicfas.o obj-$(CONFIG_SCSI_QLOGIC_FAS) += qlogicfas408.o qlogicfas.o
obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o
obj-$(CONFIG_SCSI_QLOGIC_FC) += qlogicfc.o
obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o
obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/ obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/
obj-$(CONFIG_SCSI_LPFC) += lpfc/ obj-$(CONFIG_SCSI_LPFC) += lpfc/
......
This diff is collapsed.
...@@ -10,6 +10,10 @@ ...@@ -10,6 +10,10 @@
* D E F I N E S * D E F I N E S
*----------------------------------------------------------------------------*/ *----------------------------------------------------------------------------*/
#ifndef AAC_DRIVER_BUILD
# define AAC_DRIVER_BUILD 2409
# define AAC_DRIVER_BRANCH "-mh1"
#endif
#define MAXIMUM_NUM_CONTAINERS 32 #define MAXIMUM_NUM_CONTAINERS 32
#define AAC_NUM_MGT_FIB 8 #define AAC_NUM_MGT_FIB 8
...@@ -25,7 +29,6 @@ ...@@ -25,7 +29,6 @@
* These macros convert from physical channels to virtual channels * These macros convert from physical channels to virtual channels
*/ */
#define CONTAINER_CHANNEL (0) #define CONTAINER_CHANNEL (0)
#define ID_LUN_TO_CONTAINER(id, lun) (id)
#define CONTAINER_TO_CHANNEL(cont) (CONTAINER_CHANNEL) #define CONTAINER_TO_CHANNEL(cont) (CONTAINER_CHANNEL)
#define CONTAINER_TO_ID(cont) (cont) #define CONTAINER_TO_ID(cont) (cont)
#define CONTAINER_TO_LUN(cont) (0) #define CONTAINER_TO_LUN(cont) (0)
...@@ -789,6 +792,7 @@ struct fsa_dev_info { ...@@ -789,6 +792,7 @@ struct fsa_dev_info {
u64 size; u64 size;
u32 type; u32 type;
u32 config_waiting_on; u32 config_waiting_on;
unsigned long config_waiting_stamp;
u16 queue_depth; u16 queue_depth;
u8 config_needed; u8 config_needed;
u8 valid; u8 valid;
...@@ -1771,6 +1775,11 @@ static inline u32 cap_to_cyls(sector_t capacity, u32 divisor) ...@@ -1771,6 +1775,11 @@ static inline u32 cap_to_cyls(sector_t capacity, u32 divisor)
} }
struct scsi_cmnd; struct scsi_cmnd;
/* SCp.phase values */
#define AAC_OWNER_MIDLEVEL 0x101
#define AAC_OWNER_LOWLEVEL 0x102
#define AAC_OWNER_ERROR_HANDLER 0x103
#define AAC_OWNER_FIRMWARE 0x106
const char *aac_driverinfo(struct Scsi_Host *); const char *aac_driverinfo(struct Scsi_Host *);
struct fib *aac_fib_alloc(struct aac_dev *dev); struct fib *aac_fib_alloc(struct aac_dev *dev);
......
...@@ -38,6 +38,8 @@ ...@@ -38,6 +38,8 @@
#include <linux/completion.h> #include <linux/completion.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <asm/semaphore.h> #include <asm/semaphore.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
...@@ -293,6 +295,16 @@ static int next_getadapter_fib(struct aac_dev * dev, void __user *arg) ...@@ -293,6 +295,16 @@ static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
status = 0; status = 0;
} else { } else {
spin_unlock_irqrestore(&dev->fib_lock, flags); spin_unlock_irqrestore(&dev->fib_lock, flags);
/* If someone killed the AIF aacraid thread, restart it */
status = !dev->aif_thread;
if (status && dev->queues && dev->fsa_dev) {
/* Be paranoid, be very paranoid! */
kthread_stop(dev->thread);
ssleep(1);
dev->aif_thread = 0;
dev->thread = kthread_run(aac_command_thread, dev, dev->name);
ssleep(1);
}
if (f.wait) { if (f.wait) {
if(down_interruptible(&fibctx->wait_sem) < 0) { if(down_interruptible(&fibctx->wait_sem) < 0) {
status = -EINTR; status = -EINTR;
......
...@@ -767,9 +767,9 @@ void aac_printf(struct aac_dev *dev, u32 val) ...@@ -767,9 +767,9 @@ void aac_printf(struct aac_dev *dev, u32 val)
if (cp[length] != 0) if (cp[length] != 0)
cp[length] = 0; cp[length] = 0;
if (level == LOG_AAC_HIGH_ERROR) if (level == LOG_AAC_HIGH_ERROR)
printk(KERN_WARNING "aacraid:%s", cp); printk(KERN_WARNING "%s:%s", dev->name, cp);
else else
printk(KERN_INFO "aacraid:%s", cp); printk(KERN_INFO "%s:%s", dev->name, cp);
} }
memset(cp, 0, 256); memset(cp, 0, 256);
} }
...@@ -784,6 +784,7 @@ void aac_printf(struct aac_dev *dev, u32 val) ...@@ -784,6 +784,7 @@ void aac_printf(struct aac_dev *dev, u32 val)
* dispatches it to the appropriate routine for handling. * dispatches it to the appropriate routine for handling.
*/ */
#define AIF_SNIFF_TIMEOUT (30*HZ)
static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
{ {
struct hw_fib * hw_fib = fibptr->hw_fib; struct hw_fib * hw_fib = fibptr->hw_fib;
...@@ -837,6 +838,7 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) ...@@ -837,6 +838,7 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
if (device) { if (device) {
dev->fsa_dev[container].config_needed = CHANGE; dev->fsa_dev[container].config_needed = CHANGE;
dev->fsa_dev[container].config_waiting_on = AifEnConfigChange; dev->fsa_dev[container].config_waiting_on = AifEnConfigChange;
dev->fsa_dev[container].config_waiting_stamp = jiffies;
scsi_device_put(device); scsi_device_put(device);
} }
} }
...@@ -849,13 +851,15 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) ...@@ -849,13 +851,15 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
if (container != (u32)-1) { if (container != (u32)-1) {
if (container >= dev->maximum_num_containers) if (container >= dev->maximum_num_containers)
break; break;
if (dev->fsa_dev[container].config_waiting_on == if ((dev->fsa_dev[container].config_waiting_on ==
le32_to_cpu(*(u32 *)aifcmd->data)) le32_to_cpu(*(u32 *)aifcmd->data)) &&
time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
dev->fsa_dev[container].config_waiting_on = 0; dev->fsa_dev[container].config_waiting_on = 0;
} else for (container = 0; } else for (container = 0;
container < dev->maximum_num_containers; ++container) { container < dev->maximum_num_containers; ++container) {
if (dev->fsa_dev[container].config_waiting_on == if ((dev->fsa_dev[container].config_waiting_on ==
le32_to_cpu(*(u32 *)aifcmd->data)) le32_to_cpu(*(u32 *)aifcmd->data)) &&
time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
dev->fsa_dev[container].config_waiting_on = 0; dev->fsa_dev[container].config_waiting_on = 0;
} }
break; break;
...@@ -872,6 +876,7 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) ...@@ -872,6 +876,7 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
dev->fsa_dev[container].config_needed = ADD; dev->fsa_dev[container].config_needed = ADD;
dev->fsa_dev[container].config_waiting_on = dev->fsa_dev[container].config_waiting_on =
AifEnConfigChange; AifEnConfigChange;
dev->fsa_dev[container].config_waiting_stamp = jiffies;
break; break;
/* /*
...@@ -884,6 +889,7 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) ...@@ -884,6 +889,7 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
dev->fsa_dev[container].config_needed = DELETE; dev->fsa_dev[container].config_needed = DELETE;
dev->fsa_dev[container].config_waiting_on = dev->fsa_dev[container].config_waiting_on =
AifEnConfigChange; AifEnConfigChange;
dev->fsa_dev[container].config_waiting_stamp = jiffies;
break; break;
/* /*
...@@ -894,11 +900,13 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) ...@@ -894,11 +900,13 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
container = le32_to_cpu(((u32 *)aifcmd->data)[1]); container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
if (container >= dev->maximum_num_containers) if (container >= dev->maximum_num_containers)
break; break;
if (dev->fsa_dev[container].config_waiting_on) if (dev->fsa_dev[container].config_waiting_on &&
time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
break; break;
dev->fsa_dev[container].config_needed = CHANGE; dev->fsa_dev[container].config_needed = CHANGE;
dev->fsa_dev[container].config_waiting_on = dev->fsa_dev[container].config_waiting_on =
AifEnConfigChange; AifEnConfigChange;
dev->fsa_dev[container].config_waiting_stamp = jiffies;
break; break;
case AifEnConfigChange: case AifEnConfigChange:
...@@ -913,13 +921,15 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) ...@@ -913,13 +921,15 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
if (container != (u32)-1) { if (container != (u32)-1) {
if (container >= dev->maximum_num_containers) if (container >= dev->maximum_num_containers)
break; break;
if (dev->fsa_dev[container].config_waiting_on == if ((dev->fsa_dev[container].config_waiting_on ==
le32_to_cpu(*(u32 *)aifcmd->data)) le32_to_cpu(*(u32 *)aifcmd->data)) &&
time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
dev->fsa_dev[container].config_waiting_on = 0; dev->fsa_dev[container].config_waiting_on = 0;
} else for (container = 0; } else for (container = 0;
container < dev->maximum_num_containers; ++container) { container < dev->maximum_num_containers; ++container) {
if (dev->fsa_dev[container].config_waiting_on == if ((dev->fsa_dev[container].config_waiting_on ==
le32_to_cpu(*(u32 *)aifcmd->data)) le32_to_cpu(*(u32 *)aifcmd->data)) &&
time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
dev->fsa_dev[container].config_waiting_on = 0; dev->fsa_dev[container].config_waiting_on = 0;
} }
break; break;
...@@ -946,6 +956,8 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) ...@@ -946,6 +956,8 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
dev->fsa_dev[container].config_waiting_on = dev->fsa_dev[container].config_waiting_on =
AifEnContainerChange; AifEnContainerChange;
dev->fsa_dev[container].config_needed = ADD; dev->fsa_dev[container].config_needed = ADD;
dev->fsa_dev[container].config_waiting_stamp =
jiffies;
} }
} }
if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero)) if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero))
...@@ -961,6 +973,8 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) ...@@ -961,6 +973,8 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
dev->fsa_dev[container].config_waiting_on = dev->fsa_dev[container].config_waiting_on =
AifEnContainerChange; AifEnContainerChange;
dev->fsa_dev[container].config_needed = DELETE; dev->fsa_dev[container].config_needed = DELETE;
dev->fsa_dev[container].config_waiting_stamp =
jiffies;
} }
} }
break; break;
...@@ -969,8 +983,9 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) ...@@ -969,8 +983,9 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
device_config_needed = NOTHING; device_config_needed = NOTHING;
for (container = 0; container < dev->maximum_num_containers; for (container = 0; container < dev->maximum_num_containers;
++container) { ++container) {
if ((dev->fsa_dev[container].config_waiting_on == 0) if ((dev->fsa_dev[container].config_waiting_on == 0) &&
&& (dev->fsa_dev[container].config_needed != NOTHING)) { (dev->fsa_dev[container].config_needed != NOTHING) &&
time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) {
device_config_needed = device_config_needed =
dev->fsa_dev[container].config_needed; dev->fsa_dev[container].config_needed;
dev->fsa_dev[container].config_needed = NOTHING; dev->fsa_dev[container].config_needed = NOTHING;
......
...@@ -27,12 +27,6 @@ ...@@ -27,12 +27,6 @@
* Abstract: Linux Driver entry module for Adaptec RAID Array Controller * Abstract: Linux Driver entry module for Adaptec RAID Array Controller
*/ */
#define AAC_DRIVER_VERSION "1.1-4"
#ifndef AAC_DRIVER_BRANCH
#define AAC_DRIVER_BRANCH ""
#endif
#define AAC_DRIVER_BUILD_DATE __DATE__ " " __TIME__
#define AAC_DRIVERNAME "aacraid"
#include <linux/compat.h> #include <linux/compat.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
...@@ -62,6 +56,13 @@ ...@@ -62,6 +56,13 @@
#include "aacraid.h" #include "aacraid.h"
#define AAC_DRIVER_VERSION "1.1-5"
#ifndef AAC_DRIVER_BRANCH
#define AAC_DRIVER_BRANCH ""
#endif
#define AAC_DRIVER_BUILD_DATE __DATE__ " " __TIME__
#define AAC_DRIVERNAME "aacraid"
#ifdef AAC_DRIVER_BUILD #ifdef AAC_DRIVER_BUILD
#define _str(x) #x #define _str(x) #x
#define str(x) _str(x) #define str(x) _str(x)
...@@ -73,7 +74,7 @@ ...@@ -73,7 +74,7 @@
MODULE_AUTHOR("Red Hat Inc and Adaptec"); MODULE_AUTHOR("Red Hat Inc and Adaptec");
MODULE_DESCRIPTION("Dell PERC2, 2/Si, 3/Si, 3/Di, " MODULE_DESCRIPTION("Dell PERC2, 2/Si, 3/Si, 3/Di, "
"Adaptec Advanced Raid Products, " "Adaptec Advanced Raid Products, "
"and HP NetRAID-4M SCSI driver"); "HP NetRAID-4M, IBM ServeRAID & ICP SCSI driver");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_VERSION(AAC_DRIVER_FULL_VERSION); MODULE_VERSION(AAC_DRIVER_FULL_VERSION);
...@@ -243,6 +244,7 @@ static struct aac_driver_ident aac_drivers[] = { ...@@ -243,6 +244,7 @@ static struct aac_driver_ident aac_drivers[] = {
static int aac_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) static int aac_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
{ {
cmd->scsi_done = done; cmd->scsi_done = done;
cmd->SCp.phase = AAC_OWNER_LOWLEVEL;
return (aac_scsi_cmd(cmd) ? FAILED : 0); return (aac_scsi_cmd(cmd) ? FAILED : 0);
} }
...@@ -471,7 +473,8 @@ static int aac_eh_reset(struct scsi_cmnd* cmd) ...@@ -471,7 +473,8 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
__shost_for_each_device(dev, host) { __shost_for_each_device(dev, host) {
spin_lock_irqsave(&dev->list_lock, flags); spin_lock_irqsave(&dev->list_lock, flags);
list_for_each_entry(command, &dev->cmd_list, list) { list_for_each_entry(command, &dev->cmd_list, list) {
if (command->serial_number) { if ((command != cmd) &&
(command->SCp.phase == AAC_OWNER_FIRMWARE)) {
active++; active++;
break; break;
} }
...@@ -569,12 +572,12 @@ static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long ...@@ -569,12 +572,12 @@ static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long
f = compat_alloc_user_space(sizeof(*f)); f = compat_alloc_user_space(sizeof(*f));
ret = 0; ret = 0;
if (clear_user(f, sizeof(*f) != sizeof(*f))) if (clear_user(f, sizeof(*f)) != sizeof(*f))
ret = -EFAULT; ret = -EFAULT;
if (copy_in_user(f, (void __user *)arg, sizeof(struct fib_ioctl) - sizeof(u32))) if (copy_in_user(f, (void __user *)arg, sizeof(struct fib_ioctl) - sizeof(u32)))
ret = -EFAULT; ret = -EFAULT;
if (!ret) if (!ret)
ret = aac_do_ioctl(dev, cmd, (void __user *)arg); ret = aac_do_ioctl(dev, cmd, f);
break; break;
} }
...@@ -687,6 +690,18 @@ static ssize_t aac_show_serial_number(struct class_device *class_dev, ...@@ -687,6 +690,18 @@ static ssize_t aac_show_serial_number(struct class_device *class_dev,
return len; return len;
} }
static ssize_t aac_show_max_channel(struct class_device *class_dev, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n",
class_to_shost(class_dev)->max_channel);
}
static ssize_t aac_show_max_id(struct class_device *class_dev, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n",
class_to_shost(class_dev)->max_id);
}
static struct class_device_attribute aac_model = { static struct class_device_attribute aac_model = {
.attr = { .attr = {
...@@ -730,6 +745,20 @@ static struct class_device_attribute aac_serial_number = { ...@@ -730,6 +745,20 @@ static struct class_device_attribute aac_serial_number = {
}, },
.show = aac_show_serial_number, .show = aac_show_serial_number,
}; };
static struct class_device_attribute aac_max_channel = {
.attr = {
.name = "max_channel",
.mode = S_IRUGO,
},
.show = aac_show_max_channel,
};
static struct class_device_attribute aac_max_id = {
.attr = {
.name = "max_id",
.mode = S_IRUGO,
},
.show = aac_show_max_id,
};
static struct class_device_attribute *aac_attrs[] = { static struct class_device_attribute *aac_attrs[] = {
&aac_model, &aac_model,
...@@ -738,6 +767,8 @@ static struct class_device_attribute *aac_attrs[] = { ...@@ -738,6 +767,8 @@ static struct class_device_attribute *aac_attrs[] = {
&aac_monitor_version, &aac_monitor_version,
&aac_bios_version, &aac_bios_version,
&aac_serial_number, &aac_serial_number,
&aac_max_channel,
&aac_max_id,
NULL NULL
}; };
...@@ -775,6 +806,7 @@ static struct scsi_host_template aac_driver_template = { ...@@ -775,6 +806,7 @@ static struct scsi_host_template aac_driver_template = {
.cmd_per_lun = AAC_NUM_IO_FIB, .cmd_per_lun = AAC_NUM_IO_FIB,
#endif #endif
.use_clustering = ENABLE_CLUSTERING, .use_clustering = ENABLE_CLUSTERING,
.emulated = 1,
}; };
...@@ -798,10 +830,11 @@ static int __devinit aac_probe_one(struct pci_dev *pdev, ...@@ -798,10 +830,11 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
error = pci_enable_device(pdev); error = pci_enable_device(pdev);
if (error) if (error)
goto out; goto out;
error = -ENODEV;
if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) || if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) ||
pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))
goto out; goto out_disable_pdev;
/* /*
* If the quirk31 bit is set, the adapter needs adapter * If the quirk31 bit is set, the adapter needs adapter
* to driver communication memory to be allocated below 2gig * to driver communication memory to be allocated below 2gig
...@@ -809,7 +842,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev, ...@@ -809,7 +842,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
if (aac_drivers[index].quirks & AAC_QUIRK_31BIT) if (aac_drivers[index].quirks & AAC_QUIRK_31BIT)
if (pci_set_dma_mask(pdev, DMA_31BIT_MASK) || if (pci_set_dma_mask(pdev, DMA_31BIT_MASK) ||
pci_set_consistent_dma_mask(pdev, DMA_31BIT_MASK)) pci_set_consistent_dma_mask(pdev, DMA_31BIT_MASK))
goto out; goto out_disable_pdev;
pci_set_master(pdev); pci_set_master(pdev);
...@@ -904,9 +937,9 @@ static int __devinit aac_probe_one(struct pci_dev *pdev, ...@@ -904,9 +937,9 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
* physical channels are address by their actual physical number+1 * physical channels are address by their actual physical number+1
*/ */
if (aac->nondasd_support == 1) if (aac->nondasd_support == 1)
shost->max_channel = aac->maximum_num_channels + 1; shost->max_channel = aac->maximum_num_channels;
else else
shost->max_channel = 1; shost->max_channel = 0;
aac_get_config_status(aac); aac_get_config_status(aac);
aac_get_containers(aac); aac_get_containers(aac);
...@@ -1020,7 +1053,8 @@ static int __init aac_init(void) ...@@ -1020,7 +1053,8 @@ static int __init aac_init(void)
static void __exit aac_exit(void) static void __exit aac_exit(void)
{ {
unregister_chrdev(aac_cfg_major, "aac"); if (aac_cfg_major > -1)
unregister_chrdev(aac_cfg_major, "aac");
pci_unregister_driver(&aac_pci_driver); pci_unregister_driver(&aac_pci_driver);
} }
......
...@@ -183,7 +183,7 @@ static int rkt_sync_cmd(struct aac_dev *dev, u32 command, ...@@ -183,7 +183,7 @@ static int rkt_sync_cmd(struct aac_dev *dev, u32 command,
/* /*
* Yield the processor in case we are slow * Yield the processor in case we are slow
*/ */
schedule_timeout_uninterruptible(1); msleep(1);
} }
if (ok != 1) { if (ok != 1) {
/* /*
...@@ -343,7 +343,7 @@ static int aac_rkt_check_health(struct aac_dev *dev) ...@@ -343,7 +343,7 @@ static int aac_rkt_check_health(struct aac_dev *dev)
NULL, NULL, NULL, NULL, NULL); NULL, NULL, NULL, NULL, NULL);
pci_free_consistent(dev->pdev, sizeof(struct POSTSTATUS), pci_free_consistent(dev->pdev, sizeof(struct POSTSTATUS),
post, paddr); post, paddr);
if ((buffer[0] == '0') && (buffer[1] == 'x')) { if ((buffer[0] == '0') && ((buffer[1] == 'x') || (buffer[1] == 'X'))) {
ret = (buffer[2] <= '9') ? (buffer[2] - '0') : (buffer[2] - 'A' + 10); ret = (buffer[2] <= '9') ? (buffer[2] - '0') : (buffer[2] - 'A' + 10);
ret <<= 4; ret <<= 4;
ret += (buffer[3] <= '9') ? (buffer[3] - '0') : (buffer[3] - 'A' + 10); ret += (buffer[3] <= '9') ? (buffer[3] - '0') : (buffer[3] - 'A' + 10);
......
...@@ -183,7 +183,7 @@ static int rx_sync_cmd(struct aac_dev *dev, u32 command, ...@@ -183,7 +183,7 @@ static int rx_sync_cmd(struct aac_dev *dev, u32 command,
/* /*
* Yield the processor in case we are slow * Yield the processor in case we are slow
*/ */
schedule_timeout_uninterruptible(1); msleep(1);
} }
if (ok != 1) { if (ok != 1) {
/* /*
...@@ -342,7 +342,7 @@ static int aac_rx_check_health(struct aac_dev *dev) ...@@ -342,7 +342,7 @@ static int aac_rx_check_health(struct aac_dev *dev)
NULL, NULL, NULL, NULL, NULL); NULL, NULL, NULL, NULL, NULL);
pci_free_consistent(dev->pdev, sizeof(struct POSTSTATUS), pci_free_consistent(dev->pdev, sizeof(struct POSTSTATUS),
post, paddr); post, paddr);
if ((buffer[0] == '0') && (buffer[1] == 'x')) { if ((buffer[0] == '0') && ((buffer[1] == 'x') || (buffer[1] == 'X'))) {
ret = (buffer[2] <= '9') ? (buffer[2] - '0') : (buffer[2] - 'A' + 10); ret = (buffer[2] <= '9') ? (buffer[2] - '0') : (buffer[2] - 'A' + 10);
ret <<= 4; ret <<= 4;
ret += (buffer[3] <= '9') ? (buffer[3] - '0') : (buffer[3] - 'A' + 10); ret += (buffer[3] <= '9') ? (buffer[3] - '0') : (buffer[3] - 'A' + 10);
......
...@@ -189,7 +189,7 @@ static int sa_sync_cmd(struct aac_dev *dev, u32 command, ...@@ -189,7 +189,7 @@ static int sa_sync_cmd(struct aac_dev *dev, u32 command,
ok = 1; ok = 1;
break; break;
} }
schedule_timeout_uninterruptible(1); msleep(1);
} }
if (ok != 1) if (ok != 1)
......
...@@ -372,7 +372,7 @@ typedef enum { ...@@ -372,7 +372,7 @@ typedef enum {
AHD_CURRENT_SENSING = 0x40000, AHD_CURRENT_SENSING = 0x40000,
AHD_SCB_CONFIG_USED = 0x80000,/* No SEEPROM but SCB had info. */ AHD_SCB_CONFIG_USED = 0x80000,/* No SEEPROM but SCB had info. */
AHD_HP_BOARD = 0x100000, AHD_HP_BOARD = 0x100000,
AHD_RESET_POLL_ACTIVE = 0x200000, AHD_BUS_RESET_ACTIVE = 0x200000,
AHD_UPDATE_PEND_CMDS = 0x400000, AHD_UPDATE_PEND_CMDS = 0x400000,
AHD_RUNNING_QOUTFIFO = 0x800000, AHD_RUNNING_QOUTFIFO = 0x800000,
AHD_HAD_FIRST_SEL = 0x1000000 AHD_HAD_FIRST_SEL = 0x1000000
...@@ -589,7 +589,7 @@ typedef enum { ...@@ -589,7 +589,7 @@ typedef enum {
SCB_PACKETIZED = 0x00800, SCB_PACKETIZED = 0x00800,
SCB_EXPECT_PPR_BUSFREE = 0x01000, SCB_EXPECT_PPR_BUSFREE = 0x01000,
SCB_PKT_SENSE = 0x02000, SCB_PKT_SENSE = 0x02000,
SCB_CMDPHASE_ABORT = 0x04000, SCB_EXTERNAL_RESET = 0x04000,/* Device was reset externally */
SCB_ON_COL_LIST = 0x08000, SCB_ON_COL_LIST = 0x08000,
SCB_SILENT = 0x10000 /* SCB_SILENT = 0x10000 /*
* Be quiet about transmission type * Be quiet about transmission type
......
...@@ -207,7 +207,6 @@ static void ahd_add_scb_to_free_list(struct ahd_softc *ahd, ...@@ -207,7 +207,6 @@ static void ahd_add_scb_to_free_list(struct ahd_softc *ahd,
static u_int ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid, static u_int ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid,
u_int prev, u_int next, u_int tid); u_int prev, u_int next, u_int tid);
static void ahd_reset_current_bus(struct ahd_softc *ahd); static void ahd_reset_current_bus(struct ahd_softc *ahd);
static ahd_callback_t ahd_reset_poll;
static ahd_callback_t ahd_stat_timer; static ahd_callback_t ahd_stat_timer;
#ifdef AHD_DUMP_SEQ #ifdef AHD_DUMP_SEQ
static void ahd_dumpseq(struct ahd_softc *ahd); static void ahd_dumpseq(struct ahd_softc *ahd);
...@@ -1054,12 +1053,10 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat) ...@@ -1054,12 +1053,10 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
* If a target takes us into the command phase * If a target takes us into the command phase
* assume that it has been externally reset and * assume that it has been externally reset and
* has thus lost our previous packetized negotiation * has thus lost our previous packetized negotiation
* agreement. Since we have not sent an identify * agreement.
* message and may not have fully qualified the * Revert to async/narrow transfers until we
* connection, we change our command to TUR, assert * can renegotiate with the device and notify
* ATN and ABORT the task when we go to message in * the OSM about the reset.
* phase. The OSM will see the REQUEUE_REQUEST
* status and retry the command.
*/ */
scbid = ahd_get_scbptr(ahd); scbid = ahd_get_scbptr(ahd);
scb = ahd_lookup_scb(ahd, scbid); scb = ahd_lookup_scb(ahd, scbid);
...@@ -1086,31 +1083,15 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat) ...@@ -1086,31 +1083,15 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
ahd_set_syncrate(ahd, &devinfo, /*period*/0, ahd_set_syncrate(ahd, &devinfo, /*period*/0,
/*offset*/0, /*ppr_options*/0, /*offset*/0, /*ppr_options*/0,
AHD_TRANS_ACTIVE, /*paused*/TRUE); AHD_TRANS_ACTIVE, /*paused*/TRUE);
ahd_outb(ahd, SCB_CDB_STORE, 0); scb->flags |= SCB_EXTERNAL_RESET;
ahd_outb(ahd, SCB_CDB_STORE+1, 0);
ahd_outb(ahd, SCB_CDB_STORE+2, 0);
ahd_outb(ahd, SCB_CDB_STORE+3, 0);
ahd_outb(ahd, SCB_CDB_STORE+4, 0);
ahd_outb(ahd, SCB_CDB_STORE+5, 0);
ahd_outb(ahd, SCB_CDB_LEN, 6);
scb->hscb->control &= ~(TAG_ENB|SCB_TAG_TYPE);
scb->hscb->control |= MK_MESSAGE;
ahd_outb(ahd, SCB_CONTROL, scb->hscb->control);
ahd_outb(ahd, MSG_OUT, HOST_MSG);
ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid);
/*
* The lun is 0, regardless of the SCB's lun
* as we have not sent an identify message.
*/
ahd_outb(ahd, SAVED_LUN, 0);
ahd_outb(ahd, SEQ_FLAGS, 0);
ahd_assert_atn(ahd);
scb->flags &= ~SCB_PACKETIZED;
scb->flags |= SCB_ABORT|SCB_CMDPHASE_ABORT;
ahd_freeze_devq(ahd, scb); ahd_freeze_devq(ahd, scb);
ahd_set_transaction_status(scb, CAM_REQUEUE_REQ); ahd_set_transaction_status(scb, CAM_REQUEUE_REQ);
ahd_freeze_scb(scb); ahd_freeze_scb(scb);
/* Notify XPT */
ahd_send_async(ahd, devinfo.channel, devinfo.target,
CAM_LUN_WILDCARD, AC_SENT_BDR, NULL);
/* /*
* Allow the sequencer to continue with * Allow the sequencer to continue with
* non-pack processing. * non-pack processing.
...@@ -1534,6 +1515,18 @@ ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat) ...@@ -1534,6 +1515,18 @@ ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat)
lqistat1 = ahd_inb(ahd, LQISTAT1); lqistat1 = ahd_inb(ahd, LQISTAT1);
lqostat0 = ahd_inb(ahd, LQOSTAT0); lqostat0 = ahd_inb(ahd, LQOSTAT0);
busfreetime = ahd_inb(ahd, SSTAT2) & BUSFREETIME; busfreetime = ahd_inb(ahd, SSTAT2) & BUSFREETIME;
/*
* Ignore external resets after a bus reset.
*/
if (((status & SCSIRSTI) != 0) && (ahd->flags & AHD_BUS_RESET_ACTIVE))
return;
/*
* Clear bus reset flag
*/
ahd->flags &= ~AHD_BUS_RESET_ACTIVE;
if ((status0 & (SELDI|SELDO)) != 0) { if ((status0 & (SELDI|SELDO)) != 0) {
u_int simode0; u_int simode0;
...@@ -2207,22 +2200,6 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd) ...@@ -2207,22 +2200,6 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
if (sent_msg == MSG_ABORT_TAG) if (sent_msg == MSG_ABORT_TAG)
tag = SCB_GET_TAG(scb); tag = SCB_GET_TAG(scb);
if ((scb->flags & SCB_CMDPHASE_ABORT) != 0) {
/*
* This abort is in response to an
* unexpected switch to command phase
* for a packetized connection. Since
* the identify message was never sent,
* "saved lun" is 0. We really want to
* abort only the SCB that encountered
* this error, which could have a different
* lun. The SCB will be retried so the OS
* will see the UA after renegotiating to
* packetized.
*/
tag = SCB_GET_TAG(scb);
saved_lun = scb->hscb->lun;
}
found = ahd_abort_scbs(ahd, target, 'A', saved_lun, found = ahd_abort_scbs(ahd, target, 'A', saved_lun,
tag, ROLE_INITIATOR, tag, ROLE_INITIATOR,
CAM_REQ_ABORTED); CAM_REQ_ABORTED);
...@@ -7847,6 +7824,17 @@ ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset) ...@@ -7847,6 +7824,17 @@ ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset)
int found; int found;
u_int fifo; u_int fifo;
u_int next_fifo; u_int next_fifo;
uint8_t scsiseq;
/*
* Check if the last bus reset is cleared
*/
if (ahd->flags & AHD_BUS_RESET_ACTIVE) {
printf("%s: bus reset still active\n",
ahd_name(ahd));
return 0;
}
ahd->flags |= AHD_BUS_RESET_ACTIVE;
ahd->pending_device = NULL; ahd->pending_device = NULL;
...@@ -7860,6 +7848,12 @@ ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset) ...@@ -7860,6 +7848,12 @@ ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset)
/* Make sure the sequencer is in a safe location. */ /* Make sure the sequencer is in a safe location. */
ahd_clear_critical_section(ahd); ahd_clear_critical_section(ahd);
/*
* Run our command complete fifos to ensure that we perform
* completion processing on any commands that 'completed'
* before the reset occurred.
*/
ahd_run_qoutfifo(ahd);
#ifdef AHD_TARGET_MODE #ifdef AHD_TARGET_MODE
if ((ahd->flags & AHD_TARGETROLE) != 0) { if ((ahd->flags & AHD_TARGETROLE) != 0) {
ahd_run_tqinfifo(ahd, /*paused*/TRUE); ahd_run_tqinfifo(ahd, /*paused*/TRUE);
...@@ -7924,30 +7918,14 @@ ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset) ...@@ -7924,30 +7918,14 @@ ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset)
ahd_clear_fifo(ahd, 1); ahd_clear_fifo(ahd, 1);
/* /*
* Revert to async/narrow transfers until we renegotiate. * Reenable selections
*/ */
max_scsiid = (ahd->features & AHD_WIDE) ? 15 : 7; ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) | ENSCSIRST);
for (target = 0; target <= max_scsiid; target++) { scsiseq = ahd_inb(ahd, SCSISEQ_TEMPLATE);
ahd_outb(ahd, SCSISEQ1, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP));
if (ahd->enabled_targets[target] == NULL)
continue;
for (initiator = 0; initiator <= max_scsiid; initiator++) {
struct ahd_devinfo devinfo;
ahd_compile_devinfo(&devinfo, target, initiator,
CAM_LUN_WILDCARD,
'A', ROLE_UNKNOWN);
ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
AHD_TRANS_CUR, /*paused*/TRUE);
ahd_set_syncrate(ahd, &devinfo, /*period*/0,
/*offset*/0, /*ppr_options*/0,
AHD_TRANS_CUR, /*paused*/TRUE);
}
}
#ifdef AHD_TARGET_MODE
max_scsiid = (ahd->features & AHD_WIDE) ? 15 : 7; max_scsiid = (ahd->features & AHD_WIDE) ? 15 : 7;
#ifdef AHD_TARGET_MODE
/* /*
* Send an immediate notify ccb to all target more peripheral * Send an immediate notify ccb to all target more peripheral
* drivers affected by this action. * drivers affected by this action.
...@@ -7975,51 +7953,31 @@ ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset) ...@@ -7975,51 +7953,31 @@ ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset)
/* Notify the XPT that a bus reset occurred */ /* Notify the XPT that a bus reset occurred */
ahd_send_async(ahd, devinfo.channel, CAM_TARGET_WILDCARD, ahd_send_async(ahd, devinfo.channel, CAM_TARGET_WILDCARD,
CAM_LUN_WILDCARD, AC_BUS_RESET, NULL); CAM_LUN_WILDCARD, AC_BUS_RESET, NULL);
ahd_restart(ahd);
/* /*
* Freeze the SIMQ until our poller can determine that * Revert to async/narrow transfers until we renegotiate.
* the bus reset has really gone away. We set the initial
* timer to 0 to have the check performed as soon as possible
* from the timer context.
*/ */
if ((ahd->flags & AHD_RESET_POLL_ACTIVE) == 0) { for (target = 0; target <= max_scsiid; target++) {
ahd->flags |= AHD_RESET_POLL_ACTIVE;
ahd_freeze_simq(ahd);
ahd_timer_reset(&ahd->reset_timer, 0, ahd_reset_poll, ahd);
}
return (found);
}
if (ahd->enabled_targets[target] == NULL)
continue;
for (initiator = 0; initiator <= max_scsiid; initiator++) {
struct ahd_devinfo devinfo;
#define AHD_RESET_POLL_US 1000 ahd_compile_devinfo(&devinfo, target, initiator,
static void CAM_LUN_WILDCARD,
ahd_reset_poll(void *arg) 'A', ROLE_UNKNOWN);
{ ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
struct ahd_softc *ahd = arg; AHD_TRANS_CUR, /*paused*/TRUE);
u_int scsiseq1; ahd_set_syncrate(ahd, &devinfo, /*period*/0,
u_long s; /*offset*/0, /*ppr_options*/0,
AHD_TRANS_CUR, /*paused*/TRUE);
ahd_lock(ahd, &s); }
ahd_pause(ahd);
ahd_update_modes(ahd);
ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
ahd_outb(ahd, CLRSINT1, CLRSCSIRSTI);
if ((ahd_inb(ahd, SSTAT1) & SCSIRSTI) != 0) {
ahd_timer_reset(&ahd->reset_timer, AHD_RESET_POLL_US,
ahd_reset_poll, ahd);
ahd_unpause(ahd);
ahd_unlock(ahd, &s);
return;
} }
/* Reset is now low. Complete chip reinitialization. */ ahd_restart(ahd);
ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) | ENSCSIRST);
scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE); return (found);
ahd_outb(ahd, SCSISEQ1, scsiseq1 & (ENSELI|ENRSELI|ENAUTOATNP));
ahd_unpause(ahd);
ahd->flags &= ~AHD_RESET_POLL_ACTIVE;
ahd_unlock(ahd, &s);
ahd_release_simq(ahd);
} }
/**************************** Statistics Processing ***************************/ /**************************** Statistics Processing ***************************/
......
...@@ -782,6 +782,7 @@ ahd_linux_bus_reset(struct scsi_cmnd *cmd) ...@@ -782,6 +782,7 @@ ahd_linux_bus_reset(struct scsi_cmnd *cmd)
{ {
struct ahd_softc *ahd; struct ahd_softc *ahd;
int found; int found;
unsigned long flags;
ahd = *(struct ahd_softc **)cmd->device->host->hostdata; ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
#ifdef AHD_DEBUG #ifdef AHD_DEBUG
...@@ -789,8 +790,11 @@ ahd_linux_bus_reset(struct scsi_cmnd *cmd) ...@@ -789,8 +790,11 @@ ahd_linux_bus_reset(struct scsi_cmnd *cmd)
printf("%s: Bus reset called for cmd %p\n", printf("%s: Bus reset called for cmd %p\n",
ahd_name(ahd), cmd); ahd_name(ahd), cmd);
#endif #endif
ahd_lock(ahd, &flags);
found = ahd_reset_channel(ahd, scmd_channel(cmd) + 'A', found = ahd_reset_channel(ahd, scmd_channel(cmd) + 'A',
/*initiate reset*/TRUE); /*initiate reset*/TRUE);
ahd_unlock(ahd, &flags);
if (bootverbose) if (bootverbose)
printf("%s: SCSI bus reset delivered. " printf("%s: SCSI bus reset delivered. "
......
This diff is collapsed.
...@@ -68,7 +68,7 @@ struct srp_event_struct { ...@@ -68,7 +68,7 @@ struct srp_event_struct {
void (*cmnd_done) (struct scsi_cmnd *); void (*cmnd_done) (struct scsi_cmnd *);
struct completion comp; struct completion comp;
union viosrp_iu *sync_srp; union viosrp_iu *sync_srp;
struct memory_descriptor *ext_list; struct srp_direct_buf *ext_list;
dma_addr_t ext_list_token; dma_addr_t ext_list_token;
}; };
......
...@@ -34,7 +34,6 @@ ...@@ -34,7 +34,6 @@
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include "ibmvscsi.h" #include "ibmvscsi.h"
#include "srp.h"
static char partition_name[97] = "UNKNOWN"; static char partition_name[97] = "UNKNOWN";
static unsigned int partition_number = -1; static unsigned int partition_number = -1;
......
/*****************************************************************************/
/* srp.h -- SCSI RDMA Protocol definitions */
/* */
/* Written By: Colin Devilbis, IBM Corporation */
/* */
/* Copyright (C) 2003 IBM Corporation */
/* */
/* This program is free software; you can redistribute it and/or modify */
/* it under the terms of the GNU General Public License as published by */
/* the Free Software Foundation; either version 2 of the License, or */
/* (at your option) any later version. */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU General Public License for more details. */
/* */
/* You should have received a copy of the GNU General Public License */
/* along with this program; if not, write to the Free Software */
/* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
/* */
/* */
/* This file contains structures and definitions for the SCSI RDMA Protocol */
/* (SRP) as defined in the T10 standard available at www.t10.org. This */
/* file was based on the 16a version of the standard */
/* */
/*****************************************************************************/
#ifndef SRP_H
#define SRP_H
#define SRP_VERSION "16.a"
#define PACKED __attribute__((packed))
enum srp_types {
SRP_LOGIN_REQ_TYPE = 0x00,
SRP_LOGIN_RSP_TYPE = 0xC0,
SRP_LOGIN_REJ_TYPE = 0xC2,
SRP_I_LOGOUT_TYPE = 0x03,
SRP_T_LOGOUT_TYPE = 0x80,
SRP_TSK_MGMT_TYPE = 0x01,
SRP_CMD_TYPE = 0x02,
SRP_RSP_TYPE = 0xC1,
SRP_CRED_REQ_TYPE = 0x81,
SRP_CRED_RSP_TYPE = 0x41,
SRP_AER_REQ_TYPE = 0x82,
SRP_AER_RSP_TYPE = 0x42
};
enum srp_descriptor_formats {
SRP_NO_BUFFER = 0x00,
SRP_DIRECT_BUFFER = 0x01,
SRP_INDIRECT_BUFFER = 0x02
};
struct memory_descriptor {
u64 virtual_address;
u32 memory_handle;
u32 length;
};
struct indirect_descriptor {
struct memory_descriptor head;
u32 total_length;
struct memory_descriptor list[1] PACKED;
};
struct srp_generic {
u8 type;
u8 reserved1[7];
u64 tag;
};
struct srp_login_req {
u8 type;
u8 reserved1[7];
u64 tag;
u32 max_requested_initiator_to_target_iulen;
u32 reserved2;
u16 required_buffer_formats;
u8 reserved3:6;
u8 multi_channel_action:2;
u8 reserved4;
u32 reserved5;
u8 initiator_port_identifier[16];
u8 target_port_identifier[16];
};
struct srp_login_rsp {
u8 type;
u8 reserved1[3];
u32 request_limit_delta;
u64 tag;
u32 max_initiator_to_target_iulen;
u32 max_target_to_initiator_iulen;
u16 supported_buffer_formats;
u8 reserved2:6;
u8 multi_channel_result:2;
u8 reserved3;
u8 reserved4[24];
};
struct srp_login_rej {
u8 type;
u8 reserved1[3];
u32 reason;
u64 tag;
u64 reserved2;
u16 supported_buffer_formats;
u8 reserved3[6];
};
struct srp_i_logout {
u8 type;
u8 reserved1[7];
u64 tag;
};
struct srp_t_logout {
u8 type;
u8 reserved1[3];
u32 reason;
u64 tag;
};
struct srp_tsk_mgmt {
u8 type;
u8 reserved1[7];
u64 tag;
u32 reserved2;
u64 lun PACKED;
u8 reserved3;
u8 reserved4;
u8 task_mgmt_flags;
u8 reserved5;
u64 managed_task_tag;
u64 reserved6;
};
struct srp_cmd {
u8 type;
u32 reserved1 PACKED;
u8 data_out_format:4;
u8 data_in_format:4;
u8 data_out_count;
u8 data_in_count;
u64 tag;
u32 reserved2;
u64 lun PACKED;
u8 reserved3;
u8 reserved4:5;
u8 task_attribute:3;
u8 reserved5;
u8 additional_cdb_len;
u8 cdb[16];
u8 additional_data[0x100 - 0x30];
};
struct srp_rsp {
u8 type;
u8 reserved1[3];
u32 request_limit_delta;
u64 tag;
u16 reserved2;
u8 reserved3:2;
u8 diunder:1;
u8 diover:1;
u8 dounder:1;
u8 doover:1;
u8 snsvalid:1;
u8 rspvalid:1;
u8 status;
u32 data_in_residual_count;
u32 data_out_residual_count;
u32 sense_data_list_length;
u32 response_data_list_length;
u8 sense_and_response_data[18];
};
struct srp_cred_req {
u8 type;
u8 reserved1[3];
u32 request_limit_delta;
u64 tag;
};
struct srp_cred_rsp {
u8 type;
u8 reserved1[7];
u64 tag;
};
struct srp_aer_req {
u8 type;
u8 reserved1[3];
u32 request_limit_delta;
u64 tag;
u32 reserved2;
u64 lun;
u32 sense_data_list_length;
u32 reserved3;
u8 sense_data[20];
};
struct srp_aer_rsp {
u8 type;
u8 reserved1[7];
u64 tag;
};
union srp_iu {
struct srp_generic generic;
struct srp_login_req login_req;
struct srp_login_rsp login_rsp;
struct srp_login_rej login_rej;
struct srp_i_logout i_logout;
struct srp_t_logout t_logout;
struct srp_tsk_mgmt tsk_mgmt;
struct srp_cmd cmd;
struct srp_rsp rsp;
struct srp_cred_req cred_req;
struct srp_cred_rsp cred_rsp;
struct srp_aer_req aer_req;
struct srp_aer_rsp aer_rsp;
};
#endif
...@@ -33,7 +33,22 @@ ...@@ -33,7 +33,22 @@
/*****************************************************************************/ /*****************************************************************************/
#ifndef VIOSRP_H #ifndef VIOSRP_H
#define VIOSRP_H #define VIOSRP_H
#include "srp.h" #include <scsi/srp.h>
#define SRP_VERSION "16.a"
#define SRP_MAX_IU_LEN 256
union srp_iu {
struct srp_login_req login_req;
struct srp_login_rsp login_rsp;
struct srp_login_rej login_rej;
struct srp_i_logout i_logout;
struct srp_t_logout t_logout;
struct srp_tsk_mgmt tsk_mgmt;
struct srp_cmd cmd;
struct srp_rsp rsp;
u8 reserved[SRP_MAX_IU_LEN];
};
enum viosrp_crq_formats { enum viosrp_crq_formats {
VIOSRP_SRP_FORMAT = 0x01, VIOSRP_SRP_FORMAT = 0x01,
......
...@@ -164,29 +164,6 @@ MODULE_PARM_DESC(auto_create, "Auto-create single device RAID 0 arrays when init ...@@ -164,29 +164,6 @@ MODULE_PARM_DESC(auto_create, "Auto-create single device RAID 0 arrays when init
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_VERSION(IPR_DRIVER_VERSION); MODULE_VERSION(IPR_DRIVER_VERSION);
static const char *ipr_gpdd_dev_end_states[] = {
"Command complete",
"Terminated by host",
"Terminated by device reset",
"Terminated by bus reset",
"Unknown",
"Command not started"
};
static const char *ipr_gpdd_dev_bus_phases[] = {
"Bus free",
"Arbitration",
"Selection",
"Message out",
"Command",
"Message in",
"Data out",
"Data in",
"Status",
"Reselection",
"Unknown"
};
/* A constant array of IOASCs/URCs/Error Messages */ /* A constant array of IOASCs/URCs/Error Messages */
static const static const
struct ipr_error_table_t ipr_error_table[] = { struct ipr_error_table_t ipr_error_table[] = {
...@@ -869,8 +846,8 @@ static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg, ...@@ -869,8 +846,8 @@ static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) { if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
if (res->sdev) { if (res->sdev) {
res->sdev->hostdata = NULL;
res->del_from_ml = 1; res->del_from_ml = 1;
res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
if (ioa_cfg->allow_ml_add_del) if (ioa_cfg->allow_ml_add_del)
schedule_work(&ioa_cfg->work_q); schedule_work(&ioa_cfg->work_q);
} else } else
...@@ -1356,8 +1333,8 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg, ...@@ -1356,8 +1333,8 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
return; return;
if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) { if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) {
ipr_res_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr, ipr_ra_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
"%s\n", ipr_error_table[error_index].error); "%s\n", ipr_error_table[error_index].error);
} else { } else {
dev_err(&ioa_cfg->pdev->dev, "%s\n", dev_err(&ioa_cfg->pdev->dev, "%s\n",
ipr_error_table[error_index].error); ipr_error_table[error_index].error);
...@@ -2107,7 +2084,6 @@ static void ipr_worker_thread(void *data) ...@@ -2107,7 +2084,6 @@ static void ipr_worker_thread(void *data)
did_work = 1; did_work = 1;
sdev = res->sdev; sdev = res->sdev;
if (!scsi_device_get(sdev)) { if (!scsi_device_get(sdev)) {
res->sdev = NULL;
list_move_tail(&res->queue, &ioa_cfg->free_res_q); list_move_tail(&res->queue, &ioa_cfg->free_res_q);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
scsi_remove_device(sdev); scsi_remove_device(sdev);
...@@ -2124,6 +2100,7 @@ static void ipr_worker_thread(void *data) ...@@ -2124,6 +2100,7 @@ static void ipr_worker_thread(void *data)
bus = res->cfgte.res_addr.bus; bus = res->cfgte.res_addr.bus;
target = res->cfgte.res_addr.target; target = res->cfgte.res_addr.target;
lun = res->cfgte.res_addr.lun; lun = res->cfgte.res_addr.lun;
res->add_to_ml = 0;
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
scsi_add_device(ioa_cfg->host, bus, target, lun); scsi_add_device(ioa_cfg->host, bus, target, lun);
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
...@@ -3214,7 +3191,7 @@ static int ipr_slave_configure(struct scsi_device *sdev) ...@@ -3214,7 +3191,7 @@ static int ipr_slave_configure(struct scsi_device *sdev)
sdev->timeout = IPR_VSET_RW_TIMEOUT; sdev->timeout = IPR_VSET_RW_TIMEOUT;
blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
} }
if (IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data)) if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
sdev->allow_restart = 1; sdev->allow_restart = 1;
scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
} }
...@@ -3303,6 +3280,44 @@ static int ipr_eh_host_reset(struct scsi_cmnd * cmd) ...@@ -3303,6 +3280,44 @@ static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
return rc; return rc;
} }
/**
* ipr_device_reset - Reset the device
* @ioa_cfg: ioa config struct
* @res: resource entry struct
*
* This function issues a device reset to the affected device.
* If the device is a SCSI device, a LUN reset will be sent
* to the device first. If that does not work, a target reset
* will be sent.
*
* Return value:
* 0 on success / non-zero on failure
**/
static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
struct ipr_resource_entry *res)
{
struct ipr_cmnd *ipr_cmd;
struct ipr_ioarcb *ioarcb;
struct ipr_cmd_pkt *cmd_pkt;
u32 ioasc;
ENTER;
ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
ioarcb = &ipr_cmd->ioarcb;
cmd_pkt = &ioarcb->cmd_pkt;
ioarcb->res_handle = res->cfgte.res_handle;
cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
LEAVE;
return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
}
/** /**
* ipr_eh_dev_reset - Reset the device * ipr_eh_dev_reset - Reset the device
* @scsi_cmd: scsi command struct * @scsi_cmd: scsi command struct
...@@ -3319,8 +3334,7 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd) ...@@ -3319,8 +3334,7 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
struct ipr_cmnd *ipr_cmd; struct ipr_cmnd *ipr_cmd;
struct ipr_ioa_cfg *ioa_cfg; struct ipr_ioa_cfg *ioa_cfg;
struct ipr_resource_entry *res; struct ipr_resource_entry *res;
struct ipr_cmd_pkt *cmd_pkt; int rc;
u32 ioasc;
ENTER; ENTER;
ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
...@@ -3347,25 +3361,12 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd) ...@@ -3347,25 +3361,12 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
} }
res->resetting_device = 1; res->resetting_device = 1;
scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); rc = ipr_device_reset(ioa_cfg, res);
ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
ipr_sdev_err(scsi_cmd->device, "Resetting device\n");
ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
res->resetting_device = 0; res->resetting_device = 0;
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
LEAVE; LEAVE;
return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS); return (rc ? FAILED : SUCCESS);
} }
static int ipr_eh_dev_reset(struct scsi_cmnd * cmd) static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
...@@ -3440,7 +3441,7 @@ static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd) ...@@ -3440,7 +3441,7 @@ static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
return; return;
} }
ipr_sdev_err(ipr_cmd->u.sdev, "Abort timed out. Resetting bus\n"); sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
ipr_cmd->sibling = reset_cmd; ipr_cmd->sibling = reset_cmd;
reset_cmd->sibling = ipr_cmd; reset_cmd->sibling = ipr_cmd;
...@@ -3504,7 +3505,8 @@ static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd) ...@@ -3504,7 +3505,8 @@ static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS; cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
ipr_cmd->u.sdev = scsi_cmd->device; ipr_cmd->u.sdev = scsi_cmd->device;
ipr_sdev_err(scsi_cmd->device, "Aborting command: %02X\n", scsi_cmd->cmnd[0]); scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
scsi_cmd->cmnd[0]);
ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT); ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
...@@ -3815,8 +3817,8 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd) ...@@ -3815,8 +3817,8 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
if (IPR_IOASC_SENSE_KEY(ioasc) > 0) { if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
scsi_cmd->result |= (DID_ERROR << 16); scsi_cmd->result |= (DID_ERROR << 16);
ipr_sdev_err(scsi_cmd->device, scmd_printk(KERN_ERR, scsi_cmd,
"Request Sense failed with IOASC: 0x%08X\n", ioasc); "Request Sense failed with IOASC: 0x%08X\n", ioasc);
} else { } else {
memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer, memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
SCSI_SENSE_BUFFERSIZE); SCSI_SENSE_BUFFERSIZE);
...@@ -3938,6 +3940,7 @@ static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd) ...@@ -3938,6 +3940,7 @@ static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
* ipr_dump_ioasa - Dump contents of IOASA * ipr_dump_ioasa - Dump contents of IOASA
* @ioa_cfg: ioa config struct * @ioa_cfg: ioa config struct
* @ipr_cmd: ipr command struct * @ipr_cmd: ipr command struct
* @res: resource entry struct
* *
* This function is invoked by the interrupt handler when ops * This function is invoked by the interrupt handler when ops
* fail. It will log the IOASA if appropriate. Only called * fail. It will log the IOASA if appropriate. Only called
...@@ -3947,7 +3950,7 @@ static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd) ...@@ -3947,7 +3950,7 @@ static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
* none * none
**/ **/
static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg, static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
struct ipr_cmnd *ipr_cmd) struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
{ {
int i; int i;
u16 data_len; u16 data_len;
...@@ -3975,16 +3978,7 @@ static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg, ...@@ -3975,16 +3978,7 @@ static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
return; return;
} }
ipr_sdev_err(ipr_cmd->scsi_cmd->device, "%s\n", ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
ipr_error_table[error_index].error);
if ((ioasa->u.gpdd.end_state <= ARRAY_SIZE(ipr_gpdd_dev_end_states)) &&
(ioasa->u.gpdd.bus_phase <= ARRAY_SIZE(ipr_gpdd_dev_bus_phases))) {
ipr_sdev_err(ipr_cmd->scsi_cmd->device,
"Device End state: %s Phase: %s\n",
ipr_gpdd_dev_end_states[ioasa->u.gpdd.end_state],
ipr_gpdd_dev_bus_phases[ioasa->u.gpdd.bus_phase]);
}
if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len)) if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
data_len = sizeof(struct ipr_ioasa); data_len = sizeof(struct ipr_ioasa);
...@@ -4141,7 +4135,7 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg, ...@@ -4141,7 +4135,7 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
} }
if (ipr_is_gscsi(res)) if (ipr_is_gscsi(res))
ipr_dump_ioasa(ioa_cfg, ipr_cmd); ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
else else
ipr_gen_sense(ipr_cmd); ipr_gen_sense(ipr_cmd);
...@@ -4540,7 +4534,7 @@ static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd) ...@@ -4540,7 +4534,7 @@ static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
ipr_cmd->job_step = ipr_ioa_reset_done; ipr_cmd->job_step = ipr_ioa_reset_done;
list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) { list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
if (!IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data)) if (!ipr_is_scsi_disk(res))
continue; continue;
ipr_cmd->u.res = res; ipr_cmd->u.res = res;
...@@ -4980,7 +4974,7 @@ static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd) ...@@ -4980,7 +4974,7 @@ static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
list_for_each_entry_safe(res, temp, &old_res, queue) { list_for_each_entry_safe(res, temp, &old_res, queue) {
if (res->sdev) { if (res->sdev) {
res->del_from_ml = 1; res->del_from_ml = 1;
res->sdev->hostdata = NULL; res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
list_move_tail(&res->queue, &ioa_cfg->used_res_q); list_move_tail(&res->queue, &ioa_cfg->used_res_q);
} else { } else {
list_move_tail(&res->queue, &ioa_cfg->free_res_q); list_move_tail(&res->queue, &ioa_cfg->free_res_q);
......
...@@ -36,8 +36,8 @@ ...@@ -36,8 +36,8 @@
/* /*
* Literals * Literals
*/ */
#define IPR_DRIVER_VERSION "2.1.2" #define IPR_DRIVER_VERSION "2.1.3"
#define IPR_DRIVER_DATE "(February 8, 2006)" #define IPR_DRIVER_DATE "(March 29, 2006)"
/* /*
* IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
...@@ -133,6 +133,7 @@ ...@@ -133,6 +133,7 @@
#define IPR_MAX_SCSI_RATE(width) ((320 * 10) / ((width) / 8)) #define IPR_MAX_SCSI_RATE(width) ((320 * 10) / ((width) / 8))
#define IPR_IOA_RES_HANDLE 0xffffffff #define IPR_IOA_RES_HANDLE 0xffffffff
#define IPR_INVALID_RES_HANDLE 0
#define IPR_IOA_RES_ADDR 0x00ffffff #define IPR_IOA_RES_ADDR 0x00ffffff
/* /*
...@@ -1191,30 +1192,17 @@ struct ipr_ucode_image_header { ...@@ -1191,30 +1192,17 @@ struct ipr_ucode_image_header {
*/ */
#define ipr_err(...) printk(KERN_ERR IPR_NAME ": "__VA_ARGS__) #define ipr_err(...) printk(KERN_ERR IPR_NAME ": "__VA_ARGS__)
#define ipr_info(...) printk(KERN_INFO IPR_NAME ": "__VA_ARGS__) #define ipr_info(...) printk(KERN_INFO IPR_NAME ": "__VA_ARGS__)
#define ipr_crit(...) printk(KERN_CRIT IPR_NAME ": "__VA_ARGS__)
#define ipr_warn(...) printk(KERN_WARNING IPR_NAME": "__VA_ARGS__)
#define ipr_dbg(...) IPR_DBG_CMD(printk(KERN_INFO IPR_NAME ": "__VA_ARGS__)) #define ipr_dbg(...) IPR_DBG_CMD(printk(KERN_INFO IPR_NAME ": "__VA_ARGS__))
#define ipr_sdev_printk(level, sdev, fmt, args...) \ #define ipr_ra_printk(level, ioa_cfg, ra, fmt, ...) \
sdev_printk(level, sdev, fmt, ## args) printk(level IPR_NAME ": %d:%d:%d:%d: " fmt, (ioa_cfg)->host->host_no, \
(ra).bus, (ra).target, (ra).lun, ##__VA_ARGS__)
#define ipr_sdev_err(sdev, fmt, ...) \ #define ipr_ra_err(ioa_cfg, ra, fmt, ...) \
ipr_sdev_printk(KERN_ERR, sdev, fmt, ##__VA_ARGS__) ipr_ra_printk(KERN_ERR, ioa_cfg, ra, fmt, ##__VA_ARGS__)
#define ipr_sdev_info(sdev, fmt, ...) \
ipr_sdev_printk(KERN_INFO, sdev, fmt, ##__VA_ARGS__)
#define ipr_sdev_dbg(sdev, fmt, ...) \
IPR_DBG_CMD(ipr_sdev_printk(KERN_INFO, sdev, fmt, ##__VA_ARGS__))
#define ipr_res_printk(level, ioa_cfg, res, fmt, ...) \
printk(level IPR_NAME ": %d:%d:%d:%d: " fmt, ioa_cfg->host->host_no, \
res.bus, res.target, res.lun, ##__VA_ARGS__)
#define ipr_res_err(ioa_cfg, res, fmt, ...) \ #define ipr_res_err(ioa_cfg, res, fmt, ...) \
ipr_res_printk(KERN_ERR, ioa_cfg, res, fmt, ##__VA_ARGS__) ipr_ra_err(ioa_cfg, (res)->cfgte.res_addr, fmt, ##__VA_ARGS__)
#define ipr_res_dbg(ioa_cfg, res, fmt, ...) \
IPR_DBG_CMD(ipr_res_printk(KERN_INFO, ioa_cfg, res, fmt, ##__VA_ARGS__))
#define ipr_phys_res_err(ioa_cfg, res, fmt, ...) \ #define ipr_phys_res_err(ioa_cfg, res, fmt, ...) \
{ \ { \
...@@ -1303,6 +1291,22 @@ static inline int ipr_is_gscsi(struct ipr_resource_entry *res) ...@@ -1303,6 +1291,22 @@ static inline int ipr_is_gscsi(struct ipr_resource_entry *res)
return 0; return 0;
} }
/**
* ipr_is_scsi_disk - Determine if a resource is a SCSI disk
* @res: resource entry struct
*
* Return value:
* 1 if SCSI disk / 0 if not SCSI disk
**/
static inline int ipr_is_scsi_disk(struct ipr_resource_entry *res)
{
if (ipr_is_af_dasd_device(res) ||
(ipr_is_gscsi(res) && IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data)))
return 1;
else
return 0;
}
/** /**
* ipr_is_naca_model - Determine if a resource is using NACA queueing model * ipr_is_naca_model - Determine if a resource is using NACA queueing model
* @res: resource entry struct * @res: resource entry struct
......
This diff is collapsed.
This diff is collapsed.
...@@ -565,7 +565,8 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) ...@@ -565,7 +565,8 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
/* /*
* If SCSI-2 or lower, store the LUN value in cmnd. * If SCSI-2 or lower, store the LUN value in cmnd.
*/ */
if (cmd->device->scsi_level <= SCSI_2) { if (cmd->device->scsi_level <= SCSI_2 &&
cmd->device->scsi_level != SCSI_UNKNOWN) {
cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) | cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
(cmd->device->lun << 5 & 0xe0); (cmd->device->lun << 5 & 0xe0);
} }
...@@ -1243,7 +1244,7 @@ static int __init init_scsi(void) ...@@ -1243,7 +1244,7 @@ static int __init init_scsi(void)
if (error) if (error)
goto cleanup_sysctl; goto cleanup_sysctl;
for_each_cpu(i) for_each_possible_cpu(i)
INIT_LIST_HEAD(&per_cpu(scsi_done_q, i)); INIT_LIST_HEAD(&per_cpu(scsi_done_q, i));
printk(KERN_NOTICE "SCSI subsystem initialized\n"); printk(KERN_NOTICE "SCSI subsystem initialized\n");
......
...@@ -132,7 +132,9 @@ static struct { ...@@ -132,7 +132,9 @@ static struct {
{"CMD", "CRA-7280", NULL, BLIST_SPARSELUN}, /* CMD RAID Controller */ {"CMD", "CRA-7280", NULL, BLIST_SPARSELUN}, /* CMD RAID Controller */
{"CNSI", "G7324", NULL, BLIST_SPARSELUN}, /* Chaparral G7324 RAID */ {"CNSI", "G7324", NULL, BLIST_SPARSELUN}, /* Chaparral G7324 RAID */
{"CNSi", "G8324", NULL, BLIST_SPARSELUN}, /* Chaparral G8324 RAID */ {"CNSi", "G8324", NULL, BLIST_SPARSELUN}, /* Chaparral G8324 RAID */
{"COMPAQ", "LOGICAL VOLUME", NULL, BLIST_FORCELUN}, {"COMPAQ", "ARRAY CONTROLLER", NULL, BLIST_SPARSELUN | BLIST_LARGELUN |
BLIST_MAX_512 | BLIST_REPORTLUN2}, /* Compaq RA4x00 */
{"COMPAQ", "LOGICAL VOLUME", NULL, BLIST_FORCELUN | BLIST_MAX_512}, /* Compaq RA4x00 */
{"COMPAQ", "CR3500", NULL, BLIST_FORCELUN}, {"COMPAQ", "CR3500", NULL, BLIST_FORCELUN},
{"COMPAQ", "MSA1000", NULL, BLIST_SPARSELUN | BLIST_NOSTARTONADD}, {"COMPAQ", "MSA1000", NULL, BLIST_SPARSELUN | BLIST_NOSTARTONADD},
{"COMPAQ", "MSA1000 VOLUME", NULL, BLIST_SPARSELUN | BLIST_NOSTARTONADD}, {"COMPAQ", "MSA1000 VOLUME", NULL, BLIST_SPARSELUN | BLIST_NOSTARTONADD},
......
...@@ -157,180 +157,6 @@ int scsi_set_medium_removal(struct scsi_device *sdev, char state) ...@@ -157,180 +157,6 @@ int scsi_set_medium_removal(struct scsi_device *sdev, char state)
} }
EXPORT_SYMBOL(scsi_set_medium_removal); EXPORT_SYMBOL(scsi_set_medium_removal);
/*
* This interface is deprecated - users should use the scsi generic (sg)
* interface instead, as this is a more flexible approach to performing
* generic SCSI commands on a device.
*
* The structure that we are passed should look like:
*
* struct sdata {
* unsigned int inlen; [i] Length of data to be written to device
* unsigned int outlen; [i] Length of data to be read from device
* unsigned char cmd[x]; [i] SCSI command (6 <= x <= 12).
* [o] Data read from device starts here.
* [o] On error, sense buffer starts here.
* unsigned char wdata[y]; [i] Data written to device starts here.
* };
* Notes:
* - The SCSI command length is determined by examining the 1st byte
* of the given command. There is no way to override this.
* - Data transfers are limited to PAGE_SIZE (4K on i386, 8K on alpha).
* - The length (x + y) must be at least OMAX_SB_LEN bytes long to
* accommodate the sense buffer when an error occurs.
* The sense buffer is truncated to OMAX_SB_LEN (16) bytes so that
* old code will not be surprised.
* - If a Unix error occurs (e.g. ENOMEM) then the user will receive
* a negative return and the Unix error code in 'errno'.
* If the SCSI command succeeds then 0 is returned.
* Positive numbers returned are the compacted SCSI error codes (4
* bytes in one int) where the lowest byte is the SCSI status.
* See the drivers/scsi/scsi.h file for more information on this.
*
*/
#define OMAX_SB_LEN 16 /* Old sense buffer length */
int scsi_ioctl_send_command(struct scsi_device *sdev,
struct scsi_ioctl_command __user *sic)
{
char *buf;
unsigned char cmd[MAX_COMMAND_SIZE];
unsigned char sense[SCSI_SENSE_BUFFERSIZE];
char __user *cmd_in;
unsigned char opcode;
unsigned int inlen, outlen, cmdlen;
unsigned int needed, buf_needed;
int timeout, retries, result;
int data_direction;
gfp_t gfp_mask = GFP_KERNEL;
if (!sic)
return -EINVAL;
if (sdev->host->unchecked_isa_dma)
gfp_mask |= GFP_DMA;
/*
* Verify that we can read at least this much.
*/
if (!access_ok(VERIFY_READ, sic, sizeof(Scsi_Ioctl_Command)))
return -EFAULT;
if(__get_user(inlen, &sic->inlen))
return -EFAULT;
if(__get_user(outlen, &sic->outlen))
return -EFAULT;
/*
* We do not transfer more than MAX_BUF with this interface.
* If the user needs to transfer more data than this, they
* should use scsi_generics (sg) instead.
*/
if (inlen > MAX_BUF)
return -EINVAL;
if (outlen > MAX_BUF)
return -EINVAL;
cmd_in = sic->data;
if(get_user(opcode, cmd_in))
return -EFAULT;
needed = buf_needed = (inlen > outlen ? inlen : outlen);
if (buf_needed) {
buf_needed = (buf_needed + 511) & ~511;
if (buf_needed > MAX_BUF)
buf_needed = MAX_BUF;
buf = kzalloc(buf_needed, gfp_mask);
if (!buf)
return -ENOMEM;
if (inlen == 0) {
data_direction = DMA_FROM_DEVICE;
} else if (outlen == 0 ) {
data_direction = DMA_TO_DEVICE;
} else {
/*
* Can this ever happen?
*/
data_direction = DMA_BIDIRECTIONAL;
}
} else {
buf = NULL;
data_direction = DMA_NONE;
}
/*
* Obtain the command from the user's address space.
*/
cmdlen = COMMAND_SIZE(opcode);
result = -EFAULT;
if (!access_ok(VERIFY_READ, cmd_in, cmdlen + inlen))
goto error;
if(__copy_from_user(cmd, cmd_in, cmdlen))
goto error;
/*
* Obtain the data to be sent to the device (if any).
*/
if(inlen && copy_from_user(buf, cmd_in + cmdlen, inlen))
goto error;
switch (opcode) {
case SEND_DIAGNOSTIC:
case FORMAT_UNIT:
timeout = FORMAT_UNIT_TIMEOUT;
retries = 1;
break;
case START_STOP:
timeout = START_STOP_TIMEOUT;
retries = NORMAL_RETRIES;
break;
case MOVE_MEDIUM:
timeout = MOVE_MEDIUM_TIMEOUT;
retries = NORMAL_RETRIES;
break;
case READ_ELEMENT_STATUS:
timeout = READ_ELEMENT_STATUS_TIMEOUT;
retries = NORMAL_RETRIES;
break;
case READ_DEFECT_DATA:
timeout = READ_DEFECT_DATA_TIMEOUT;
retries = 1;
break;
default:
timeout = IOCTL_NORMAL_TIMEOUT;
retries = NORMAL_RETRIES;
break;
}
result = scsi_execute(sdev, cmd, data_direction, buf, needed,
sense, timeout, retries, 0);
/*
* If there was an error condition, pass the info back to the user.
*/
if (result) {
int sb_len = sizeof(*sense);
sb_len = (sb_len > OMAX_SB_LEN) ? OMAX_SB_LEN : sb_len;
if (copy_to_user(cmd_in, sense, sb_len))
result = -EFAULT;
} else {
if (outlen && copy_to_user(cmd_in, buf, outlen))
result = -EFAULT;
}
error:
kfree(buf);
return result;
}
EXPORT_SYMBOL(scsi_ioctl_send_command);
/* /*
* The scsi_ioctl_get_pci() function places into arg the value * The scsi_ioctl_get_pci() function places into arg the value
* pci_dev::slot_name (8 characters) for the PCI device (if any). * pci_dev::slot_name (8 characters) for the PCI device (if any).
...@@ -409,7 +235,7 @@ int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) ...@@ -409,7 +235,7 @@ int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
case SCSI_IOCTL_SEND_COMMAND: case SCSI_IOCTL_SEND_COMMAND:
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
return -EACCES; return -EACCES;
return scsi_ioctl_send_command(sdev, arg); return sg_scsi_ioctl(NULL, sdev->request_queue, NULL, arg);
case SCSI_IOCTL_DOORLOCK: case SCSI_IOCTL_DOORLOCK:
return scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT); return scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT);
case SCSI_IOCTL_DOORUNLOCK: case SCSI_IOCTL_DOORUNLOCK:
......
...@@ -1479,6 +1479,8 @@ static inline int scsi_host_queue_ready(struct request_queue *q, ...@@ -1479,6 +1479,8 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
static void scsi_kill_request(struct request *req, request_queue_t *q) static void scsi_kill_request(struct request *req, request_queue_t *q)
{ {
struct scsi_cmnd *cmd = req->special; struct scsi_cmnd *cmd = req->special;
struct scsi_device *sdev = cmd->device;
struct Scsi_Host *shost = sdev->host;
blkdev_dequeue_request(req); blkdev_dequeue_request(req);
...@@ -1491,6 +1493,19 @@ static void scsi_kill_request(struct request *req, request_queue_t *q) ...@@ -1491,6 +1493,19 @@ static void scsi_kill_request(struct request *req, request_queue_t *q)
scsi_init_cmd_errh(cmd); scsi_init_cmd_errh(cmd);
cmd->result = DID_NO_CONNECT << 16; cmd->result = DID_NO_CONNECT << 16;
atomic_inc(&cmd->device->iorequest_cnt); atomic_inc(&cmd->device->iorequest_cnt);
/*
* SCSI request completion path will do scsi_device_unbusy(),
* bump busy counts. To bump the counters, we need to dance
* with the locks as normal issue path does.
*/
sdev->device_busy++;
spin_unlock(sdev->request_queue->queue_lock);
spin_lock(shost->host_lock);
shost->host_busy++;
spin_unlock(shost->host_lock);
spin_lock(sdev->request_queue->queue_lock);
__scsi_done(cmd); __scsi_done(cmd);
} }
......
#ifndef _SCSI_SAS_INTERNAL_H
#define _SCSI_SAS_INTERNAL_H
#define SAS_HOST_ATTRS 0
#define SAS_PORT_ATTRS 17
#define SAS_RPORT_ATTRS 7
#define SAS_END_DEV_ATTRS 3
#define SAS_EXPANDER_ATTRS 7
struct sas_internal {
struct scsi_transport_template t;
struct sas_function_template *f;
struct sas_domain_function_template *dft;
struct class_device_attribute private_host_attrs[SAS_HOST_ATTRS];
struct class_device_attribute private_phy_attrs[SAS_PORT_ATTRS];
struct class_device_attribute private_rphy_attrs[SAS_RPORT_ATTRS];
struct class_device_attribute private_end_dev_attrs[SAS_END_DEV_ATTRS];
struct class_device_attribute private_expander_attrs[SAS_EXPANDER_ATTRS];
struct transport_container phy_attr_cont;
struct transport_container rphy_attr_cont;
struct transport_container end_dev_attr_cont;
struct transport_container expander_attr_cont;
/*
* The array of null terminated pointers to attributes
* needed by scsi_sysfs.c
*/
struct class_device_attribute *host_attrs[SAS_HOST_ATTRS + 1];
struct class_device_attribute *phy_attrs[SAS_PORT_ATTRS + 1];
struct class_device_attribute *rphy_attrs[SAS_RPORT_ATTRS + 1];
struct class_device_attribute *end_dev_attrs[SAS_END_DEV_ATTRS + 1];
struct class_device_attribute *expander_attrs[SAS_EXPANDER_ATTRS + 1];
};
#define to_sas_internal(tmpl) container_of(tmpl, struct sas_internal, t)
#endif
...@@ -673,6 +673,7 @@ static int scsi_add_lun(struct scsi_device *sdev, char *inq_result, int *bflags) ...@@ -673,6 +673,7 @@ static int scsi_add_lun(struct scsi_device *sdev, char *inq_result, int *bflags)
case TYPE_MEDIUM_CHANGER: case TYPE_MEDIUM_CHANGER:
case TYPE_ENCLOSURE: case TYPE_ENCLOSURE:
case TYPE_COMM: case TYPE_COMM:
case TYPE_RAID:
case TYPE_RBC: case TYPE_RBC:
sdev->writeable = 1; sdev->writeable = 1;
break; break;
...@@ -737,6 +738,13 @@ static int scsi_add_lun(struct scsi_device *sdev, char *inq_result, int *bflags) ...@@ -737,6 +738,13 @@ static int scsi_add_lun(struct scsi_device *sdev, char *inq_result, int *bflags)
if (*bflags & BLIST_SELECT_NO_ATN) if (*bflags & BLIST_SELECT_NO_ATN)
sdev->select_no_atn = 1; sdev->select_no_atn = 1;
/*
* Maximum 512 sector transfer length
* broken RA4x00 Compaq Disk Array
*/
if (*bflags & BLIST_MAX_512)
blk_queue_max_sectors(sdev->request_queue, 512);
/* /*
* Some devices may not want to have a start command automatically * Some devices may not want to have a start command automatically
* issued when a device is added. * issued when a device is added.
...@@ -1146,10 +1154,13 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags, ...@@ -1146,10 +1154,13 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
* Also allow SCSI-2 if BLIST_REPORTLUN2 is set and host adapter does * Also allow SCSI-2 if BLIST_REPORTLUN2 is set and host adapter does
* support more than 8 LUNs. * support more than 8 LUNs.
*/ */
if ((bflags & BLIST_NOREPORTLUN) || if (bflags & BLIST_NOREPORTLUN)
starget->scsi_level < SCSI_2 || return 1;
(starget->scsi_level < SCSI_3 && if (starget->scsi_level < SCSI_2 &&
(!(bflags & BLIST_REPORTLUN2) || shost->max_lun <= 8)) ) starget->scsi_level != SCSI_UNKNOWN)
return 1;
if (starget->scsi_level < SCSI_3 &&
(!(bflags & BLIST_REPORTLUN2) || shost->max_lun <= 8))
return 1; return 1;
if (bflags & BLIST_NOLUN) if (bflags & BLIST_NOLUN)
return 0; return 0;
......
This diff is collapsed.
...@@ -35,40 +35,7 @@ ...@@ -35,40 +35,7 @@
#include <scsi/scsi_transport.h> #include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h> #include <scsi/scsi_transport_sas.h>
#include "scsi_sas_internal.h"
#define SAS_HOST_ATTRS 0
#define SAS_PORT_ATTRS 17
#define SAS_RPORT_ATTRS 7
#define SAS_END_DEV_ATTRS 3
#define SAS_EXPANDER_ATTRS 7
struct sas_internal {
struct scsi_transport_template t;
struct sas_function_template *f;
struct class_device_attribute private_host_attrs[SAS_HOST_ATTRS];
struct class_device_attribute private_phy_attrs[SAS_PORT_ATTRS];
struct class_device_attribute private_rphy_attrs[SAS_RPORT_ATTRS];
struct class_device_attribute private_end_dev_attrs[SAS_END_DEV_ATTRS];
struct class_device_attribute private_expander_attrs[SAS_EXPANDER_ATTRS];
struct transport_container phy_attr_cont;
struct transport_container rphy_attr_cont;
struct transport_container end_dev_attr_cont;
struct transport_container expander_attr_cont;
/*
* The array of null terminated pointers to attributes
* needed by scsi_sysfs.c
*/
struct class_device_attribute *host_attrs[SAS_HOST_ATTRS + 1];
struct class_device_attribute *phy_attrs[SAS_PORT_ATTRS + 1];
struct class_device_attribute *rphy_attrs[SAS_RPORT_ATTRS + 1];
struct class_device_attribute *end_dev_attrs[SAS_END_DEV_ATTRS + 1];
struct class_device_attribute *expander_attrs[SAS_EXPANDER_ATTRS + 1];
};
#define to_sas_internal(tmpl) container_of(tmpl, struct sas_internal, t)
struct sas_host_attrs { struct sas_host_attrs {
struct list_head rphy_list; struct list_head rphy_list;
struct mutex lock; struct mutex lock;
...@@ -406,8 +373,6 @@ struct sas_phy *sas_phy_alloc(struct device *parent, int number) ...@@ -406,8 +373,6 @@ struct sas_phy *sas_phy_alloc(struct device *parent, int number)
if (!phy) if (!phy)
return NULL; return NULL;
get_device(parent);
phy->number = number; phy->number = number;
device_initialize(&phy->dev); device_initialize(&phy->dev);
...@@ -459,10 +424,7 @@ EXPORT_SYMBOL(sas_phy_add); ...@@ -459,10 +424,7 @@ EXPORT_SYMBOL(sas_phy_add);
void sas_phy_free(struct sas_phy *phy) void sas_phy_free(struct sas_phy *phy)
{ {
transport_destroy_device(&phy->dev); transport_destroy_device(&phy->dev);
put_device(phy->dev.parent); put_device(&phy->dev);
put_device(phy->dev.parent);
put_device(phy->dev.parent);
kfree(phy);
} }
EXPORT_SYMBOL(sas_phy_free); EXPORT_SYMBOL(sas_phy_free);
...@@ -484,7 +446,7 @@ sas_phy_delete(struct sas_phy *phy) ...@@ -484,7 +446,7 @@ sas_phy_delete(struct sas_phy *phy)
transport_remove_device(dev); transport_remove_device(dev);
device_del(dev); device_del(dev);
transport_destroy_device(dev); transport_destroy_device(dev);
put_device(dev->parent); put_device(dev);
} }
EXPORT_SYMBOL(sas_phy_delete); EXPORT_SYMBOL(sas_phy_delete);
...@@ -800,7 +762,6 @@ struct sas_rphy *sas_end_device_alloc(struct sas_phy *parent) ...@@ -800,7 +762,6 @@ struct sas_rphy *sas_end_device_alloc(struct sas_phy *parent)
rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
if (!rdev) { if (!rdev) {
put_device(&parent->dev);
return NULL; return NULL;
} }
...@@ -836,7 +797,6 @@ struct sas_rphy *sas_expander_alloc(struct sas_phy *parent, ...@@ -836,7 +797,6 @@ struct sas_rphy *sas_expander_alloc(struct sas_phy *parent,
rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
if (!rdev) { if (!rdev) {
put_device(&parent->dev);
return NULL; return NULL;
} }
...@@ -885,6 +845,8 @@ int sas_rphy_add(struct sas_rphy *rphy) ...@@ -885,6 +845,8 @@ int sas_rphy_add(struct sas_rphy *rphy)
(identify->target_port_protocols & (identify->target_port_protocols &
(SAS_PROTOCOL_SSP|SAS_PROTOCOL_STP|SAS_PROTOCOL_SATA))) (SAS_PROTOCOL_SSP|SAS_PROTOCOL_STP|SAS_PROTOCOL_SATA)))
rphy->scsi_target_id = sas_host->next_target_id++; rphy->scsi_target_id = sas_host->next_target_id++;
else if (identify->device_type == SAS_END_DEVICE)
rphy->scsi_target_id = -1;
mutex_unlock(&sas_host->lock); mutex_unlock(&sas_host->lock);
if (identify->device_type == SAS_END_DEVICE && if (identify->device_type == SAS_END_DEVICE &&
...@@ -910,6 +872,7 @@ EXPORT_SYMBOL(sas_rphy_add); ...@@ -910,6 +872,7 @@ EXPORT_SYMBOL(sas_rphy_add);
*/ */
void sas_rphy_free(struct sas_rphy *rphy) void sas_rphy_free(struct sas_rphy *rphy)
{ {
struct device *dev = &rphy->dev;
struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent); struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent);
struct sas_host_attrs *sas_host = to_sas_host_attrs(shost); struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
...@@ -917,21 +880,9 @@ void sas_rphy_free(struct sas_rphy *rphy) ...@@ -917,21 +880,9 @@ void sas_rphy_free(struct sas_rphy *rphy)
list_del(&rphy->list); list_del(&rphy->list);
mutex_unlock(&sas_host->lock); mutex_unlock(&sas_host->lock);
transport_destroy_device(&rphy->dev); transport_destroy_device(dev);
put_device(rphy->dev.parent);
put_device(rphy->dev.parent);
put_device(rphy->dev.parent);
if (rphy->identify.device_type == SAS_END_DEVICE) {
struct sas_end_device *edev = rphy_to_end_device(rphy);
kfree(edev);
} else {
/* must be expander */
struct sas_expander_device *edev =
rphy_to_expander_device(rphy);
kfree(edev); put_device(dev);
}
} }
EXPORT_SYMBOL(sas_rphy_free); EXPORT_SYMBOL(sas_rphy_free);
...@@ -971,7 +922,7 @@ sas_rphy_delete(struct sas_rphy *rphy) ...@@ -971,7 +922,7 @@ sas_rphy_delete(struct sas_rphy *rphy)
parent->rphy = NULL; parent->rphy = NULL;
put_device(&parent->dev); put_device(dev);
} }
EXPORT_SYMBOL(sas_rphy_delete); EXPORT_SYMBOL(sas_rphy_delete);
......
...@@ -748,6 +748,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp, ...@@ -748,6 +748,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
/* /*
* most likely out of mem, but could also be a bad map * most likely out of mem, but could also be a bad map
*/ */
sg_finish_rem_req(srp);
return -ENOMEM; return -ENOMEM;
} else } else
return 0; return 0;
...@@ -1044,7 +1045,7 @@ sg_ioctl(struct inode *inode, struct file *filp, ...@@ -1044,7 +1045,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
if (!sg_allow_access(opcode, sdp->device->type)) if (!sg_allow_access(opcode, sdp->device->type))
return -EPERM; return -EPERM;
} }
return scsi_ioctl_send_command(sdp->device, p); return sg_scsi_ioctl(filp, sdp->device->request_queue, NULL, p);
case SG_SET_DEBUG: case SG_SET_DEBUG:
result = get_user(val, ip); result = get_user(val, ip);
if (result) if (result)
...@@ -1798,8 +1799,10 @@ sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len) ...@@ -1798,8 +1799,10 @@ sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len)
res = st_map_user_pages(schp->buffer, mx_sc_elems, res = st_map_user_pages(schp->buffer, mx_sc_elems,
(unsigned long)hp->dxferp, dxfer_len, (unsigned long)hp->dxferp, dxfer_len,
(SG_DXFER_TO_DEV == hp->dxfer_direction) ? 1 : 0); (SG_DXFER_TO_DEV == hp->dxfer_direction) ? 1 : 0);
if (res <= 0) if (res <= 0) {
sg_remove_scat(schp);
return 1; return 1;
}
schp->k_use_sg = res; schp->k_use_sg = res;
schp->dio_in_use = 1; schp->dio_in_use = 1;
hp->info |= SG_INFO_DIRECT_IO; hp->info |= SG_INFO_DIRECT_IO;
......
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
#ifndef SYM_DEFS_H #ifndef SYM_DEFS_H
#define SYM_DEFS_H #define SYM_DEFS_H
#define SYM_VERSION "2.2.2" #define SYM_VERSION "2.2.3"
#define SYM_DRIVER_NAME "sym-" SYM_VERSION #define SYM_DRIVER_NAME "sym-" SYM_VERSION
/* /*
......
This diff is collapsed.
...@@ -68,7 +68,7 @@ ...@@ -68,7 +68,7 @@
*/ */
#define SYM_CONF_TIMER_INTERVAL ((HZ+1)/2) #define SYM_CONF_TIMER_INTERVAL ((HZ+1)/2)
#define SYM_OPT_HANDLE_DEVICE_QUEUEING #undef SYM_OPT_HANDLE_DEVICE_QUEUEING
#define SYM_OPT_LIMIT_COMMAND_REORDERING #define SYM_OPT_LIMIT_COMMAND_REORDERING
/* /*
......
This diff is collapsed.
...@@ -1049,6 +1049,8 @@ int sym_reset_scsi_bus(struct sym_hcb *np, int enab_int); ...@@ -1049,6 +1049,8 @@ int sym_reset_scsi_bus(struct sym_hcb *np, int enab_int);
struct sym_chip *sym_lookup_chip_table(u_short device_id, u_char revision); struct sym_chip *sym_lookup_chip_table(u_short device_id, u_char revision);
#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
void sym_start_next_ccbs(struct sym_hcb *np, struct sym_lcb *lp, int maxn); void sym_start_next_ccbs(struct sym_hcb *np, struct sym_lcb *lp, int maxn);
#else
void sym_put_start_queue(struct sym_hcb *np, struct sym_ccb *cp);
#endif #endif
void sym_start_up(struct sym_hcb *np, int reason); void sym_start_up(struct sym_hcb *np, int reason);
void sym_interrupt(struct sym_hcb *np); void sym_interrupt(struct sym_hcb *np);
......
...@@ -17,6 +17,8 @@ ...@@ -17,6 +17,8 @@
#include <asm/scatterlist.h> #include <asm/scatterlist.h>
struct scsi_ioctl_command;
struct request_queue; struct request_queue;
typedef struct request_queue request_queue_t; typedef struct request_queue request_queue_t;
struct elevator_queue; struct elevator_queue;
...@@ -611,6 +613,8 @@ extern void blk_plug_device(request_queue_t *); ...@@ -611,6 +613,8 @@ extern void blk_plug_device(request_queue_t *);
extern int blk_remove_plug(request_queue_t *); extern int blk_remove_plug(request_queue_t *);
extern void blk_recount_segments(request_queue_t *, struct bio *); extern void blk_recount_segments(request_queue_t *, struct bio *);
extern int scsi_cmd_ioctl(struct file *, struct gendisk *, unsigned int, void __user *); extern int scsi_cmd_ioctl(struct file *, struct gendisk *, unsigned int, void __user *);
extern int sg_scsi_ioctl(struct file *, struct request_queue *,
struct gendisk *, struct scsi_ioctl_command __user *);
extern void blk_start_queue(request_queue_t *q); extern void blk_start_queue(request_queue_t *q);
extern void blk_stop_queue(request_queue_t *q); extern void blk_stop_queue(request_queue_t *q);
extern void blk_sync_queue(struct request_queue *q); extern void blk_sync_queue(struct request_queue *q);
......
...@@ -28,5 +28,6 @@ ...@@ -28,5 +28,6 @@
#define BLIST_NO_ULD_ATTACH 0x100000 /* device is actually for RAID config */ #define BLIST_NO_ULD_ATTACH 0x100000 /* device is actually for RAID config */
#define BLIST_SELECT_NO_ATN 0x200000 /* select without ATN */ #define BLIST_SELECT_NO_ATN 0x200000 /* select without ATN */
#define BLIST_RETRY_HWERROR 0x400000 /* retry HARDWARE_ERROR */ #define BLIST_RETRY_HWERROR 0x400000 /* retry HARDWARE_ERROR */
#define BLIST_ATTACH_PQ3 0x800000 /* Scan: Attach to PQ3 devices */ #define BLIST_MAX_512 0x800000 /* maximum 512 sector cdb length */
#define BLIST_ATTACH_PQ3 0x1000000 /* Scan: Attach to PQ3 devices */
#endif #endif
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment