Commit 89a93f2f authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (102 commits)
  [SCSI] scsi_dh: fix kconfig related build errors
  [SCSI] sym53c8xx: Fix bogus sym_que_entry re-implementation of container_of
  [SCSI] scsi_cmnd.h: remove double inclusion of linux/blkdev.h
  [SCSI] make struct scsi_{host,target}_type static
  [SCSI] fix locking in host use of blk_plug_device()
  [SCSI] zfcp: Cleanup external header file
  [SCSI] zfcp: Cleanup code in zfcp_erp.c
  [SCSI] zfcp: zfcp_fsf cleanup.
  [SCSI] zfcp: consolidate sysfs things into one file.
  [SCSI] zfcp: Cleanup of code in zfcp_aux.c
  [SCSI] zfcp: Cleanup of code in zfcp_scsi.c
  [SCSI] zfcp: Move status accessors from zfcp to SCSI include file.
  [SCSI] zfcp: Small QDIO cleanups
  [SCSI] zfcp: Adapter reopen for large number of unsolicited status
  [SCSI] zfcp: Fix error checking for ELS ADISC requests
  [SCSI] zfcp: wait until adapter is finished with ERP during auto-port
  [SCSI] ibmvfc: IBM Power Virtual Fibre Channel Adapter Client Driver
  [SCSI] sg: Add target reset support
  [SCSI] lib: Add support for the T10 (SCSI) Data Integrity Field CRC
  [SCSI] sd: Move scsi_disk() accessor function to sd.h
  ...
parents 260eddf4 fe9233fb
......@@ -56,19 +56,33 @@ Supported Cards/Chipsets
9005:0285:9005:02d1 Adaptec 5405 (Voodoo40)
9005:0285:15d9:02d2 SMC AOC-USAS-S8i-LP
9005:0285:15d9:02d3 SMC AOC-USAS-S8iR-LP
9005:0285:9005:02d4 Adaptec 2045 (Voodoo04 Lite)
9005:0285:9005:02d5 Adaptec 2405 (Voodoo40 Lite)
9005:0285:9005:02d6 Adaptec 2445 (Voodoo44 Lite)
9005:0285:9005:02d7 Adaptec 2805 (Voodoo80 Lite)
9005:0285:9005:02d4 Adaptec ASR-2045 (Voodoo04 Lite)
9005:0285:9005:02d5 Adaptec ASR-2405 (Voodoo40 Lite)
9005:0285:9005:02d6 Adaptec ASR-2445 (Voodoo44 Lite)
9005:0285:9005:02d7 Adaptec ASR-2805 (Voodoo80 Lite)
9005:0285:9005:02d8 Adaptec 5405G (Voodoo40 PM)
9005:0285:9005:02d9 Adaptec 5445G (Voodoo44 PM)
9005:0285:9005:02da Adaptec 5805G (Voodoo80 PM)
9005:0285:9005:02db Adaptec 5085G (Voodoo08 PM)
9005:0285:9005:02dc Adaptec 51245G (Voodoo124 PM)
9005:0285:9005:02dd Adaptec 51645G (Voodoo164 PM)
9005:0285:9005:02de Adaptec 52445G (Voodoo244 PM)
9005:0285:9005:02df Adaptec ASR-2045G (Voodoo04 Lite PM)
9005:0285:9005:02e0 Adaptec ASR-2405G (Voodoo40 Lite PM)
9005:0285:9005:02e1 Adaptec ASR-2445G (Voodoo44 Lite PM)
9005:0285:9005:02e2 Adaptec ASR-2805G (Voodoo80 Lite PM)
1011:0046:9005:0364 Adaptec 5400S (Mustang)
1011:0046:9005:0365 Adaptec 5400S (Mustang)
9005:0287:9005:0800 Adaptec Themisto (Jupiter)
9005:0200:9005:0200 Adaptec Themisto (Jupiter)
9005:0286:9005:0800 Adaptec Callisto (Jupiter)
1011:0046:9005:1364 Dell PERC 2/QC (Quad Channel, Mustang)
1011:0046:9005:1365 Dell PERC 2/QC (Quad Channel, Mustang)
1028:0001:1028:0001 Dell PERC 2/Si (Iguana)
1028:0003:1028:0003 Dell PERC 3/Si (SlimFast)
1028:0002:1028:0002 Dell PERC 3/Di (Opal)
1028:0004:1028:0004 Dell PERC 3/DiF (Iguana)
1028:0004:1028:0004 Dell PERC 3/SiF (Iguana)
1028:0004:1028:00d0 Dell PERC 3/DiF (Iguana)
1028:0002:1028:00d1 Dell PERC 3/DiV (Viper)
1028:0002:1028:00d9 Dell PERC 3/DiL (Lexus)
1028:000a:1028:0106 Dell PERC 3/DiJ (Jaguar)
......
......@@ -740,8 +740,13 @@ static int bsg_put_device(struct bsg_device *bd)
mutex_lock(&bsg_mutex);
do_free = atomic_dec_and_test(&bd->ref_count);
if (!do_free)
if (!do_free) {
mutex_unlock(&bsg_mutex);
goto out;
}
hlist_del(&bd->dev_list);
mutex_unlock(&bsg_mutex);
dprintk("%s: tearing down\n", bd->name);
......@@ -757,10 +762,8 @@ static int bsg_put_device(struct bsg_device *bd)
*/
ret = bsg_complete_all_commands(bd);
hlist_del(&bd->dev_list);
kfree(bd);
out:
mutex_unlock(&bsg_mutex);
kref_put(&q->bsg_dev.ref, bsg_kref_release_function);
if (do_free)
blk_put_queue(q);
......
This diff is collapsed.
......@@ -94,7 +94,6 @@
/* support upto 512KB in one RDMA */
#define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K)
#define ISCSI_ISER_MAX_LUN 256
#define ISCSI_ISER_MAX_CMD_LEN 16
/* QP settings */
/* Maximal bounds on received asynchronous PDUs */
......@@ -172,7 +171,8 @@ struct iser_data_buf {
/* fwd declarations */
struct iser_device;
struct iscsi_iser_conn;
struct iscsi_iser_cmd_task;
struct iscsi_iser_task;
struct iscsi_endpoint;
struct iser_mem_reg {
u32 lkey;
......@@ -196,7 +196,7 @@ struct iser_regd_buf {
#define MAX_REGD_BUF_VECTOR_LEN 2
struct iser_dto {
struct iscsi_iser_cmd_task *ctask;
struct iscsi_iser_task *task;
struct iser_conn *ib_conn;
int notify_enable;
......@@ -240,7 +240,9 @@ struct iser_device {
struct iser_conn {
struct iscsi_iser_conn *iser_conn; /* iser conn for upcalls */
struct iscsi_endpoint *ep;
enum iser_ib_conn_state state; /* rdma connection state */
atomic_t refcount;
spinlock_t lock; /* used for state changes */
struct iser_device *device; /* device context */
struct rdma_cm_id *cma_id; /* CMA ID */
......@@ -259,11 +261,9 @@ struct iser_conn {
struct iscsi_iser_conn {
struct iscsi_conn *iscsi_conn;/* ptr to iscsi conn */
struct iser_conn *ib_conn; /* iSER IB conn */
rwlock_t lock;
};
struct iscsi_iser_cmd_task {
struct iscsi_iser_task {
struct iser_desc desc;
struct iscsi_iser_conn *iser_conn;
enum iser_task_status status;
......@@ -296,22 +296,26 @@ extern int iser_debug_level;
/* allocate connection resources needed for rdma functionality */
int iser_conn_set_full_featured_mode(struct iscsi_conn *conn);
int iser_send_control(struct iscsi_conn *conn,
struct iscsi_mgmt_task *mtask);
int iser_send_control(struct iscsi_conn *conn,
struct iscsi_task *task);
int iser_send_command(struct iscsi_conn *conn,
struct iscsi_cmd_task *ctask);
int iser_send_command(struct iscsi_conn *conn,
struct iscsi_task *task);
int iser_send_data_out(struct iscsi_conn *conn,
struct iscsi_cmd_task *ctask,
struct iscsi_data *hdr);
int iser_send_data_out(struct iscsi_conn *conn,
struct iscsi_task *task,
struct iscsi_data *hdr);
void iscsi_iser_recv(struct iscsi_conn *conn,
struct iscsi_hdr *hdr,
char *rx_data,
int rx_data_len);
int iser_conn_init(struct iser_conn **ib_conn);
void iser_conn_init(struct iser_conn *ib_conn);
void iser_conn_get(struct iser_conn *ib_conn);
void iser_conn_put(struct iser_conn *ib_conn);
void iser_conn_terminate(struct iser_conn *ib_conn);
......@@ -320,9 +324,9 @@ void iser_rcv_completion(struct iser_desc *desc,
void iser_snd_completion(struct iser_desc *desc);
void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *ctask);
void iser_task_rdma_init(struct iscsi_iser_task *task);
void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *ctask);
void iser_task_rdma_finalize(struct iscsi_iser_task *task);
void iser_dto_buffs_release(struct iser_dto *dto);
......@@ -332,10 +336,10 @@ void iser_reg_single(struct iser_device *device,
struct iser_regd_buf *regd_buf,
enum dma_data_direction direction);
void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *ctask,
void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *task,
enum iser_data_dir cmd_dir);
int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *ctask,
int iser_reg_rdma_mem(struct iscsi_iser_task *task,
enum iser_data_dir cmd_dir);
int iser_connect(struct iser_conn *ib_conn,
......@@ -355,10 +359,10 @@ int iser_post_send(struct iser_desc *tx_desc);
int iser_conn_state_comp(struct iser_conn *ib_conn,
enum iser_ib_conn_state comp);
int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
struct iser_data_buf *data,
enum iser_data_dir iser_dir,
enum dma_data_direction dma_dir);
void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask);
void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task);
#endif
......@@ -99,13 +99,13 @@ void iser_reg_single(struct iser_device *device,
/**
* iser_start_rdma_unaligned_sg
*/
static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir)
{
int dma_nents;
struct ib_device *dev;
char *mem = NULL;
struct iser_data_buf *data = &iser_ctask->data[cmd_dir];
struct iser_data_buf *data = &iser_task->data[cmd_dir];
unsigned long cmd_data_len = data->data_len;
if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
......@@ -138,37 +138,37 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
}
}
sg_init_one(&iser_ctask->data_copy[cmd_dir].sg_single, mem, cmd_data_len);
iser_ctask->data_copy[cmd_dir].buf =
&iser_ctask->data_copy[cmd_dir].sg_single;
iser_ctask->data_copy[cmd_dir].size = 1;
sg_init_one(&iser_task->data_copy[cmd_dir].sg_single, mem, cmd_data_len);
iser_task->data_copy[cmd_dir].buf =
&iser_task->data_copy[cmd_dir].sg_single;
iser_task->data_copy[cmd_dir].size = 1;
iser_ctask->data_copy[cmd_dir].copy_buf = mem;
iser_task->data_copy[cmd_dir].copy_buf = mem;
dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
dev = iser_task->iser_conn->ib_conn->device->ib_device;
dma_nents = ib_dma_map_sg(dev,
&iser_ctask->data_copy[cmd_dir].sg_single,
&iser_task->data_copy[cmd_dir].sg_single,
1,
(cmd_dir == ISER_DIR_OUT) ?
DMA_TO_DEVICE : DMA_FROM_DEVICE);
BUG_ON(dma_nents == 0);
iser_ctask->data_copy[cmd_dir].dma_nents = dma_nents;
iser_task->data_copy[cmd_dir].dma_nents = dma_nents;
return 0;
}
/**
* iser_finalize_rdma_unaligned_sg
*/
void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir)
{
struct ib_device *dev;
struct iser_data_buf *mem_copy;
unsigned long cmd_data_len;
dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
mem_copy = &iser_ctask->data_copy[cmd_dir];
dev = iser_task->iser_conn->ib_conn->device->ib_device;
mem_copy = &iser_task->data_copy[cmd_dir];
ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1,
(cmd_dir == ISER_DIR_OUT) ?
......@@ -184,8 +184,8 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
/* copy back read RDMA to unaligned sg */
mem = mem_copy->copy_buf;
sgl = (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf;
sg_size = iser_ctask->data[ISER_DIR_IN].size;
sgl = (struct scatterlist *)iser_task->data[ISER_DIR_IN].buf;
sg_size = iser_task->data[ISER_DIR_IN].size;
p = mem;
for_each_sg(sgl, sg, sg_size, i) {
......@@ -198,7 +198,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
}
}
cmd_data_len = iser_ctask->data[cmd_dir].data_len;
cmd_data_len = iser_task->data[cmd_dir].data_len;
if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
free_pages((unsigned long)mem_copy->copy_buf,
......@@ -376,15 +376,15 @@ static void iser_page_vec_build(struct iser_data_buf *data,
}
}
int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
struct iser_data_buf *data,
enum iser_data_dir iser_dir,
enum dma_data_direction dma_dir)
int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
struct iser_data_buf *data,
enum iser_data_dir iser_dir,
enum dma_data_direction dma_dir)
{
struct ib_device *dev;
iser_ctask->dir[iser_dir] = 1;
dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
iser_task->dir[iser_dir] = 1;
dev = iser_task->iser_conn->ib_conn->device->ib_device;
data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir);
if (data->dma_nents == 0) {
......@@ -394,20 +394,20 @@ int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
return 0;
}
void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask)
void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task)
{
struct ib_device *dev;
struct iser_data_buf *data;
dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
dev = iser_task->iser_conn->ib_conn->device->ib_device;
if (iser_ctask->dir[ISER_DIR_IN]) {
data = &iser_ctask->data[ISER_DIR_IN];
if (iser_task->dir[ISER_DIR_IN]) {
data = &iser_task->data[ISER_DIR_IN];
ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
}
if (iser_ctask->dir[ISER_DIR_OUT]) {
data = &iser_ctask->data[ISER_DIR_OUT];
if (iser_task->dir[ISER_DIR_OUT]) {
data = &iser_task->data[ISER_DIR_OUT];
ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE);
}
}
......@@ -418,21 +418,21 @@ void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask)
*
* returns 0 on success, errno code on failure
*/
int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir)
{
struct iscsi_conn *iscsi_conn = iser_ctask->iser_conn->iscsi_conn;
struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn;
struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn;
struct iser_conn *ib_conn = iser_task->iser_conn->ib_conn;
struct iser_device *device = ib_conn->device;
struct ib_device *ibdev = device->ib_device;
struct iser_data_buf *mem = &iser_ctask->data[cmd_dir];
struct iser_data_buf *mem = &iser_task->data[cmd_dir];
struct iser_regd_buf *regd_buf;
int aligned_len;
int err;
int i;
struct scatterlist *sg;
regd_buf = &iser_ctask->rdma_regd[cmd_dir];
regd_buf = &iser_task->rdma_regd[cmd_dir];
aligned_len = iser_data_buf_aligned_len(mem, ibdev);
if (aligned_len != mem->dma_nents) {
......@@ -442,13 +442,13 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
iser_data_buf_dump(mem, ibdev);
/* unmap the command data before accessing it */
iser_dma_unmap_task_data(iser_ctask);
iser_dma_unmap_task_data(iser_task);
/* allocate copy buf, if we are writing, copy the */
/* unaligned scatterlist, dma map the copy */
if (iser_start_rdma_unaligned_sg(iser_ctask, cmd_dir) != 0)
if (iser_start_rdma_unaligned_sg(iser_task, cmd_dir) != 0)
return -ENOMEM;
mem = &iser_ctask->data_copy[cmd_dir];
mem = &iser_task->data_copy[cmd_dir];
}
/* if there a single dma entry, FMR is not needed */
......@@ -472,8 +472,9 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, &regd_buf->reg);
if (err) {
iser_data_buf_dump(mem, ibdev);
iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents,
ntoh24(iser_ctask->desc.iscsi_header.dlength));
iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
mem->dma_nents,
ntoh24(iser_task->desc.iscsi_header.dlength));
iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
ib_conn->page_vec->data_size, ib_conn->page_vec->length,
ib_conn->page_vec->offset);
......
......@@ -323,7 +323,18 @@ static void iser_conn_release(struct iser_conn *ib_conn)
iser_device_try_release(device);
if (ib_conn->iser_conn)
ib_conn->iser_conn->ib_conn = NULL;
kfree(ib_conn);
iscsi_destroy_endpoint(ib_conn->ep);
}
void iser_conn_get(struct iser_conn *ib_conn)
{
atomic_inc(&ib_conn->refcount);
}
void iser_conn_put(struct iser_conn *ib_conn)
{
if (atomic_dec_and_test(&ib_conn->refcount))
iser_conn_release(ib_conn);
}
/**
......@@ -347,7 +358,7 @@ void iser_conn_terminate(struct iser_conn *ib_conn)
wait_event_interruptible(ib_conn->wait,
ib_conn->state == ISER_CONN_DOWN);
iser_conn_release(ib_conn);
iser_conn_put(ib_conn);
}
static void iser_connect_error(struct rdma_cm_id *cma_id)
......@@ -481,24 +492,15 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
return ret;
}
int iser_conn_init(struct iser_conn **ibconn)
void iser_conn_init(struct iser_conn *ib_conn)
{
struct iser_conn *ib_conn;
ib_conn = kzalloc(sizeof *ib_conn, GFP_KERNEL);
if (!ib_conn) {
iser_err("can't alloc memory for struct iser_conn\n");
return -ENOMEM;
}
ib_conn->state = ISER_CONN_INIT;
init_waitqueue_head(&ib_conn->wait);
atomic_set(&ib_conn->post_recv_buf_count, 0);
atomic_set(&ib_conn->post_send_buf_count, 0);
atomic_set(&ib_conn->refcount, 1);
INIT_LIST_HEAD(&ib_conn->conn_list);
spin_lock_init(&ib_conn->lock);
*ibconn = ib_conn;
return 0;
}
/**
......
......@@ -252,27 +252,14 @@ config DM_ZERO
config DM_MULTIPATH
tristate "Multipath target"
depends on BLK_DEV_DM
# nasty syntax but means make DM_MULTIPATH independent
# of SCSI_DH if the latter isn't defined but if
# it is, DM_MULTIPATH must depend on it. We get a build
# error if SCSI_DH=m and DM_MULTIPATH=y
depends on SCSI_DH || !SCSI_DH
---help---
Allow volume managers to support multipath hardware.
config DM_MULTIPATH_EMC
tristate "EMC CX/AX multipath support"
depends on DM_MULTIPATH && BLK_DEV_DM
---help---
Multipath support for EMC CX/AX series hardware.
config DM_MULTIPATH_RDAC
tristate "LSI/Engenio RDAC multipath support (EXPERIMENTAL)"
depends on DM_MULTIPATH && BLK_DEV_DM && SCSI && EXPERIMENTAL
---help---
Multipath support for LSI/Engenio RDAC.
config DM_MULTIPATH_HP
tristate "HP MSA multipath support (EXPERIMENTAL)"
depends on DM_MULTIPATH && BLK_DEV_DM && SCSI && EXPERIMENTAL
---help---
Multipath support for HP MSA (Active/Passive) series hardware.
config DM_DELAY
tristate "I/O delaying target (EXPERIMENTAL)"
depends on BLK_DEV_DM && EXPERIMENTAL
......
......@@ -4,11 +4,9 @@
dm-mod-objs := dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \
dm-ioctl.o dm-io.o dm-kcopyd.o
dm-multipath-objs := dm-hw-handler.o dm-path-selector.o dm-mpath.o
dm-multipath-objs := dm-path-selector.o dm-mpath.o
dm-snapshot-objs := dm-snap.o dm-exception-store.o
dm-mirror-objs := dm-raid1.o
dm-rdac-objs := dm-mpath-rdac.o
dm-hp-sw-objs := dm-mpath-hp-sw.o
md-mod-objs := md.o bitmap.o
raid456-objs := raid5.o raid6algos.o raid6recov.o raid6tables.o \
raid6int1.o raid6int2.o raid6int4.o \
......@@ -35,9 +33,6 @@ obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o
obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
obj-$(CONFIG_DM_DELAY) += dm-delay.o
obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o
obj-$(CONFIG_DM_MULTIPATH_EMC) += dm-emc.o
obj-$(CONFIG_DM_MULTIPATH_HP) += dm-hp-sw.o
obj-$(CONFIG_DM_MULTIPATH_RDAC) += dm-rdac.o
obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o
obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o
obj-$(CONFIG_DM_ZERO) += dm-zero.o
......
/*
* Copyright (C) 2004 SUSE LINUX Products GmbH. All rights reserved.
* Copyright (C) 2004 Red Hat, Inc. All rights reserved.
*
* This file is released under the GPL.
*
* Multipath support for EMC CLARiiON AX/CX-series hardware.
*/
#include "dm.h"
#include "dm-hw-handler.h"
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#define DM_MSG_PREFIX "multipath emc"
struct emc_handler {
spinlock_t lock;
/* Whether we should send the short trespass command (FC-series)
* or the long version (default for AX/CX CLARiiON arrays). */
unsigned short_trespass;
/* Whether or not to honor SCSI reservations when initiating a
* switch-over. Default: Don't. */
unsigned hr;
unsigned char sense[SCSI_SENSE_BUFFERSIZE];
};
#define TRESPASS_PAGE 0x22
#define EMC_FAILOVER_TIMEOUT (60 * HZ)
/* Code borrowed from dm-lsi-rdac by Mike Christie */
static inline void free_bio(struct bio *bio)
{
__free_page(bio->bi_io_vec[0].bv_page);
bio_put(bio);
}
static void emc_endio(struct bio *bio, int error)
{
struct dm_path *path = bio->bi_private;
/* We also need to look at the sense keys here whether or not to
* switch to the next PG etc.
*
* For now simple logic: either it works or it doesn't.
*/
if (error)
dm_pg_init_complete(path, MP_FAIL_PATH);
else
dm_pg_init_complete(path, 0);
/* request is freed in block layer */
free_bio(bio);
}
static struct bio *get_failover_bio(struct dm_path *path, unsigned data_size)
{
struct bio *bio;
struct page *page;
bio = bio_alloc(GFP_ATOMIC, 1);
if (!bio) {
DMERR("get_failover_bio: bio_alloc() failed.");
return NULL;
}
bio->bi_rw |= (1 << BIO_RW);
bio->bi_bdev = path->dev->bdev;
bio->bi_sector = 0;
bio->bi_private = path;
bio->bi_end_io = emc_endio;
page = alloc_page(GFP_ATOMIC);
if (!page) {
DMERR("get_failover_bio: alloc_page() failed.");
bio_put(bio);
return NULL;
}
if (bio_add_page(bio, page, data_size, 0) != data_size) {
DMERR("get_failover_bio: bio_add_page() failed.");
__free_page(page);
bio_put(bio);
return NULL;
}
return bio;
}
static struct request *get_failover_req(struct emc_handler *h,
struct bio *bio, struct dm_path *path)
{
struct request *rq;
struct block_device *bdev = bio->bi_bdev;
struct request_queue *q = bdev_get_queue(bdev);
/* FIXME: Figure out why it fails with GFP_ATOMIC. */
rq = blk_get_request(q, WRITE, __GFP_WAIT);
if (!rq) {
DMERR("get_failover_req: blk_get_request failed");
return NULL;
}
blk_rq_append_bio(q, rq, bio);
rq->sense = h->sense;
memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
rq->sense_len = 0;
rq->timeout = EMC_FAILOVER_TIMEOUT;
rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
return rq;
}
static struct request *emc_trespass_get(struct emc_handler *h,
struct dm_path *path)
{
struct bio *bio;
struct request *rq;
unsigned char *page22;
unsigned char long_trespass_pg[] = {
0, 0, 0, 0,
TRESPASS_PAGE, /* Page code */
0x09, /* Page length - 2 */
h->hr ? 0x01 : 0x81, /* Trespass code + Honor reservation bit */
0xff, 0xff, /* Trespass target */
0, 0, 0, 0, 0, 0 /* Reserved bytes / unknown */
};
unsigned char short_trespass_pg[] = {
0, 0, 0, 0,
TRESPASS_PAGE, /* Page code */
0x02, /* Page length - 2 */
h->hr ? 0x01 : 0x81, /* Trespass code + Honor reservation bit */
0xff, /* Trespass target */
};
unsigned data_size = h->short_trespass ? sizeof(short_trespass_pg) :
sizeof(long_trespass_pg);
/* get bio backing */
if (data_size > PAGE_SIZE)
/* this should never happen */
return NULL;
bio = get_failover_bio(path, data_size);
if (!bio) {
DMERR("emc_trespass_get: no bio");
return NULL;
}
page22 = (unsigned char *)bio_data(bio);
memset(page22, 0, data_size);
memcpy(page22, h->short_trespass ?
short_trespass_pg : long_trespass_pg, data_size);
/* get request for block layer packet command */
rq = get_failover_req(h, bio, path);
if (!rq) {
DMERR("emc_trespass_get: no rq");
free_bio(bio);
return NULL;
}
/* Prepare the command. */
rq->cmd[0] = MODE_SELECT;
rq->cmd[1] = 0x10;
rq->cmd[4] = data_size;
rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
return rq;
}
static void emc_pg_init(struct hw_handler *hwh, unsigned bypassed,
struct dm_path *path)
{
struct request *rq;
struct request_queue *q = bdev_get_queue(path->dev->bdev);
/*
* We can either blindly init the pg (then look at the sense),
* or we can send some commands to get the state here (then
* possibly send the fo cmnd), or we can also have the
* initial state passed into us and then get an update here.
*/
if (!q) {
DMINFO("emc_pg_init: no queue");
goto fail_path;
}
/* FIXME: The request should be pre-allocated. */
rq = emc_trespass_get(hwh->context, path);
if (!rq) {
DMERR("emc_pg_init: no rq");
goto fail_path;
}
DMINFO("emc_pg_init: sending switch-over command");
elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 1);
return;
fail_path:
dm_pg_init_complete(path, MP_FAIL_PATH);
}
static struct emc_handler *alloc_emc_handler(void)
{
struct emc_handler *h = kzalloc(sizeof(*h), GFP_KERNEL);
if (h)
spin_lock_init(&h->lock);
return h;
}
static int emc_create(struct hw_handler *hwh, unsigned argc, char **argv)
{
struct emc_handler *h;
unsigned hr, short_trespass;
if (argc == 0) {
/* No arguments: use defaults */
hr = 0;
short_trespass = 0;
} else if (argc != 2) {
DMWARN("incorrect number of arguments");
return -EINVAL;
} else {
if ((sscanf(argv[0], "%u", &short_trespass) != 1)
|| (short_trespass > 1)) {
DMWARN("invalid trespass mode selected");
return -EINVAL;
}
if ((sscanf(argv[1], "%u", &hr) != 1)
|| (hr > 1)) {
DMWARN("invalid honor reservation flag selected");
return -EINVAL;
}
}
h = alloc_emc_handler();
if (!h)
return -ENOMEM;
hwh->context = h;
if ((h->short_trespass = short_trespass))
DMWARN("short trespass command will be send");
else
DMWARN("long trespass command will be send");
if ((h->hr = hr))
DMWARN("honor reservation bit will be set");
else
DMWARN("honor reservation bit will not be set (default)");
return 0;
}
static void emc_destroy(struct hw_handler *hwh)
{
struct emc_handler *h = (struct emc_handler *) hwh->context;
kfree(h);
hwh->context = NULL;
}
static unsigned emc_error(struct hw_handler *hwh, struct bio *bio)
{
/* FIXME: Patch from axboe still missing */
#if 0
int sense;
if (bio->bi_error & BIO_SENSE) {
sense = bio->bi_error & 0xffffff; /* sense key / asc / ascq */
if (sense == 0x020403) {
/* LUN Not Ready - Manual Intervention Required
* indicates this is a passive path.
*
* FIXME: However, if this is seen and EVPD C0
* indicates that this is due to a NDU in
* progress, we should set FAIL_PATH too.
* This indicates we might have to do a SCSI
* inquiry in the end_io path. Ugh. */
return MP_BYPASS_PG | MP_RETRY_IO;
} else if (sense == 0x052501) {
/* An array based copy is in progress. Do not
* fail the path, do not bypass to another PG,
* do not retry. Fail the IO immediately.
* (Actually this is the same conclusion as in
* the default handler, but lets make sure.) */
return 0;
} else if (sense == 0x062900) {
/* Unit Attention Code. This is the first IO
* to the new path, so just retry. */
return MP_RETRY_IO;
}
}
#endif
/* Try default handler */
return dm_scsi_err_handler(hwh, bio);
}
static struct hw_handler_type emc_hwh = {
.name = "emc",
.module = THIS_MODULE,
.create = emc_create,
.destroy = emc_destroy,
.pg_init = emc_pg_init,
.error = emc_error,
};
static int __init dm_emc_init(void)
{
int r = dm_register_hw_handler(&emc_hwh);
if (r < 0)
DMERR("register failed %d", r);
DMINFO("version 0.0.3 loaded");
return r;
}
static void __exit dm_emc_exit(void)
{
int r = dm_unregister_hw_handler(&emc_hwh);
if (r < 0)
DMERR("unregister failed %d", r);
}
module_init(dm_emc_init);
module_exit(dm_emc_exit);
MODULE_DESCRIPTION(DM_NAME " EMC CX/AX/FC-family multipath");
MODULE_AUTHOR("Lars Marowsky-Bree <lmb@suse.de>");
MODULE_LICENSE("GPL");
/*
* Copyright (C) 2004 Red Hat, Inc. All rights reserved.
*
* This file is released under the GPL.
*
* Multipath hardware handler registration.
*/
#include "dm.h"
#include "dm-hw-handler.h"
#include <linux/slab.h>
struct hwh_internal {
struct hw_handler_type hwht;
struct list_head list;
long use;
};
#define hwht_to_hwhi(__hwht) container_of((__hwht), struct hwh_internal, hwht)
static LIST_HEAD(_hw_handlers);
static DECLARE_RWSEM(_hwh_lock);
static struct hwh_internal *__find_hw_handler_type(const char *name)
{
struct hwh_internal *hwhi;
list_for_each_entry(hwhi, &_hw_handlers, list) {
if (!strcmp(name, hwhi->hwht.name))
return hwhi;
}
return NULL;
}
static struct hwh_internal *get_hw_handler(const char *name)
{
struct hwh_internal *hwhi;
down_read(&_hwh_lock);
hwhi = __find_hw_handler_type(name);
if (hwhi) {
if ((hwhi->use == 0) && !try_module_get(hwhi->hwht.module))
hwhi = NULL;
else
hwhi->use++;
}
up_read(&_hwh_lock);
return hwhi;
}
struct hw_handler_type *dm_get_hw_handler(const char *name)
{
struct hwh_internal *hwhi;
if (!name)
return NULL;
hwhi = get_hw_handler(name);
if (!hwhi) {
request_module("dm-%s", name);
hwhi = get_hw_handler(name);
}
return hwhi ? &hwhi->hwht : NULL;
}
void dm_put_hw_handler(struct hw_handler_type *hwht)
{
struct hwh_internal *hwhi;
if (!hwht)
return;
down_read(&_hwh_lock);
hwhi = __find_hw_handler_type(hwht->name);
if (!hwhi)
goto out;
if (--hwhi->use == 0)
module_put(hwhi->hwht.module);
BUG_ON(hwhi->use < 0);
out:
up_read(&_hwh_lock);
}
static struct hwh_internal *_alloc_hw_handler(struct hw_handler_type *hwht)
{
struct hwh_internal *hwhi = kzalloc(sizeof(*hwhi), GFP_KERNEL);
if (hwhi)
hwhi->hwht = *hwht;
return hwhi;
}
int dm_register_hw_handler(struct hw_handler_type *hwht)
{
int r = 0;
struct hwh_internal *hwhi = _alloc_hw_handler(hwht);
if (!hwhi)
return -ENOMEM;
down_write(&_hwh_lock);
if (__find_hw_handler_type(hwht->name)) {
kfree(hwhi);
r = -EEXIST;
} else
list_add(&hwhi->list, &_hw_handlers);
up_write(&_hwh_lock);
return r;
}
int dm_unregister_hw_handler(struct hw_handler_type *hwht)
{
struct hwh_internal *hwhi;
down_write(&_hwh_lock);
hwhi = __find_hw_handler_type(hwht->name);
if (!hwhi) {
up_write(&_hwh_lock);
return -EINVAL;
}
if (hwhi->use) {
up_write(&_hwh_lock);
return -ETXTBSY;
}
list_del(&hwhi->list);
up_write(&_hwh_lock);
kfree(hwhi);
return 0;
}
unsigned dm_scsi_err_handler(struct hw_handler *hwh, struct bio *bio)
{
#if 0
int sense_key, asc, ascq;
if (bio->bi_error & BIO_SENSE) {
/* FIXME: This is just an initial guess. */
/* key / asc / ascq */
sense_key = (bio->bi_error >> 16) & 0xff;
asc = (bio->bi_error >> 8) & 0xff;
ascq = bio->bi_error & 0xff;
switch (sense_key) {
/* This block as a whole comes from the device.
* So no point retrying on another path. */
case 0x03: /* Medium error */
case 0x05: /* Illegal request */
case 0x07: /* Data protect */
case 0x08: /* Blank check */
case 0x0a: /* copy aborted */
case 0x0c: /* obsolete - no clue ;-) */
case 0x0d: /* volume overflow */
case 0x0e: /* data miscompare */
case 0x0f: /* reserved - no idea either. */
return MP_ERROR_IO;
/* For these errors it's unclear whether they
* come from the device or the controller.
* So just lets try a different path, and if
* it eventually succeeds, user-space will clear
* the paths again... */
case 0x02: /* Not ready */
case 0x04: /* Hardware error */
case 0x09: /* vendor specific */
case 0x0b: /* Aborted command */
return MP_FAIL_PATH;
case 0x06: /* Unit attention - might want to decode */
if (asc == 0x04 && ascq == 0x01)
/* "Unit in the process of
* becoming ready" */
return 0;
return MP_FAIL_PATH;
/* FIXME: For Unit Not Ready we may want
* to have a generic pg activation
* feature (START_UNIT). */
/* Should these two ever end up in the
* error path? I don't think so. */
case 0x00: /* No sense */
case 0x01: /* Recovered error */
return 0;
}
}
#endif
/* We got no idea how to decode the other kinds of errors ->
* assume generic error condition. */
return MP_FAIL_PATH;
}
EXPORT_SYMBOL_GPL(dm_register_hw_handler);
EXPORT_SYMBOL_GPL(dm_unregister_hw_handler);
EXPORT_SYMBOL_GPL(dm_scsi_err_handler);
/*
* Copyright (C) 2004 Red Hat, Inc. All rights reserved.
*
* This file is released under the GPL.
*
* Multipath hardware handler registration.
*/
#ifndef DM_HW_HANDLER_H
#define DM_HW_HANDLER_H
#include <linux/device-mapper.h>
#include "dm-mpath.h"
struct hw_handler_type;
struct hw_handler {
struct hw_handler_type *type;
struct mapped_device *md;
void *context;
};
/*
* Constructs a hardware handler object, takes custom arguments
*/
/* Information about a hardware handler type */
struct hw_handler_type {
char *name;
struct module *module;
int (*create) (struct hw_handler *handler, unsigned int argc,
char **argv);
void (*destroy) (struct hw_handler *hwh);
void (*pg_init) (struct hw_handler *hwh, unsigned bypassed,
struct dm_path *path);
unsigned (*error) (struct hw_handler *hwh, struct bio *bio);
int (*status) (struct hw_handler *hwh, status_type_t type,
char *result, unsigned int maxlen);
};
/* Register a hardware handler */
int dm_register_hw_handler(struct hw_handler_type *type);
/* Unregister a hardware handler */
int dm_unregister_hw_handler(struct hw_handler_type *type);
/* Returns a registered hardware handler type */
struct hw_handler_type *dm_get_hw_handler(const char *name);
/* Releases a hardware handler */
void dm_put_hw_handler(struct hw_handler_type *hwht);
/* Default err function */
unsigned dm_scsi_err_handler(struct hw_handler *hwh, struct bio *bio);
/* Error flags for err and dm_pg_init_complete */
#define MP_FAIL_PATH 1
#define MP_BYPASS_PG 2
#define MP_ERROR_IO 4 /* Don't retry this I/O */
#define MP_RETRY 8
#endif
/*
* Copyright (C) 2005 Mike Christie, All rights reserved.
* Copyright (C) 2007 Red Hat, Inc. All rights reserved.
* Authors: Mike Christie
* Dave Wysochanski
*
* This file is released under the GPL.
*
* This module implements the specific path activation code for
* HP StorageWorks and FSC FibreCat Asymmetric (Active/Passive)
* storage arrays.
* These storage arrays have controller-based failover, not
* LUN-based failover. However, LUN-based failover is the design
* of dm-multipath. Thus, this module is written for LUN-based failover.
*/
#include <linux/blkdev.h>
#include <linux/list.h>
#include <linux/types.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
#include "dm.h"
#include "dm-hw-handler.h"
#define DM_MSG_PREFIX "multipath hp-sw"
#define DM_HP_HWH_NAME "hp-sw"
#define DM_HP_HWH_VER "1.0.0"
struct hp_sw_context {
unsigned char sense[SCSI_SENSE_BUFFERSIZE];
};
/*
* hp_sw_error_is_retryable - Is an HP-specific check condition retryable?
* @req: path activation request
*
* Examine error codes of request and determine whether the error is retryable.
* Some error codes are already retried by scsi-ml (see
* scsi_decide_disposition), but some HP specific codes are not.
* The intent of this routine is to supply the logic for the HP specific
* check conditions.
*
* Returns:
* 1 - command completed with retryable error
* 0 - command completed with non-retryable error
*
* Possible optimizations
* 1. More hardware-specific error codes
*/
static int hp_sw_error_is_retryable(struct request *req)
{
/*
* NOT_READY is known to be retryable
* For now we just dump out the sense data and call it retryable
*/
if (status_byte(req->errors) == CHECK_CONDITION)
__scsi_print_sense(DM_HP_HWH_NAME, req->sense, req->sense_len);
/*
* At this point we don't have complete information about all the error
* codes from this hardware, so we are just conservative and retry
* when in doubt.
*/
return 1;
}
/*
* hp_sw_end_io - Completion handler for HP path activation.
* @req: path activation request
* @error: scsi-ml error
*
* Check sense data, free request structure, and notify dm that
* pg initialization has completed.
*
* Context: scsi-ml softirq
*
*/
static void hp_sw_end_io(struct request *req, int error)
{
struct dm_path *path = req->end_io_data;
unsigned err_flags = 0;
if (!error) {
DMDEBUG("%s path activation command - success",
path->dev->name);
goto out;
}
if (hp_sw_error_is_retryable(req)) {
DMDEBUG("%s path activation command - retry",
path->dev->name);
err_flags = MP_RETRY;
goto out;
}
DMWARN("%s path activation fail - error=0x%x",
path->dev->name, error);
err_flags = MP_FAIL_PATH;
out:
req->end_io_data = NULL;
__blk_put_request(req->q, req);
dm_pg_init_complete(path, err_flags);
}
/*
* hp_sw_get_request - Allocate an HP specific path activation request
* @path: path on which request will be sent (needed for request queue)
*
* The START command is used for path activation request.
* These arrays are controller-based failover, not LUN based.
* One START command issued to a single path will fail over all
* LUNs for the same controller.
*
* Possible optimizations
* 1. Make timeout configurable
* 2. Preallocate request
*/
static struct request *hp_sw_get_request(struct dm_path *path)
{
struct request *req;
struct block_device *bdev = path->dev->bdev;
struct request_queue *q = bdev_get_queue(bdev);
struct hp_sw_context *h = path->hwhcontext;
req = blk_get_request(q, WRITE, GFP_NOIO);
if (!req)
goto out;
req->timeout = 60 * HZ;
req->errors = 0;
req->cmd_type = REQ_TYPE_BLOCK_PC;
req->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
req->end_io_data = path;
req->sense = h->sense;
memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);
req->cmd[0] = START_STOP;
req->cmd[4] = 1;
req->cmd_len = COMMAND_SIZE(req->cmd[0]);
out:
return req;
}
/*
* hp_sw_pg_init - HP path activation implementation.
* @hwh: hardware handler specific data
* @bypassed: unused; is the path group bypassed? (see dm-mpath.c)
* @path: path to send initialization command
*
* Send an HP-specific path activation command on 'path'.
* Do not try to optimize in any way, just send the activation command.
* More than one path activation command may be sent to the same controller.
* This seems to work fine for basic failover support.
*
* Possible optimizations
* 1. Detect an in-progress activation request and avoid submitting another one
* 2. Model the controller and only send a single activation request at a time
* 3. Determine the state of a path before sending an activation request
*
* Context: kmpathd (see process_queued_ios() in dm-mpath.c)
*/
static void hp_sw_pg_init(struct hw_handler *hwh, unsigned bypassed,
struct dm_path *path)
{
struct request *req;
struct hp_sw_context *h;
path->hwhcontext = hwh->context;
h = hwh->context;
req = hp_sw_get_request(path);
if (!req) {
DMERR("%s path activation command - allocation fail",
path->dev->name);
goto retry;
}
DMDEBUG("%s path activation command - sent", path->dev->name);
blk_execute_rq_nowait(req->q, NULL, req, 1, hp_sw_end_io);
return;
retry:
dm_pg_init_complete(path, MP_RETRY);
}
static int hp_sw_create(struct hw_handler *hwh, unsigned argc, char **argv)
{
struct hp_sw_context *h;
h = kmalloc(sizeof(*h), GFP_KERNEL);
if (!h)
return -ENOMEM;
hwh->context = h;
return 0;
}
static void hp_sw_destroy(struct hw_handler *hwh)
{
struct hp_sw_context *h = hwh->context;
kfree(h);
}
static struct hw_handler_type hp_sw_hwh = {
.name = DM_HP_HWH_NAME,
.module = THIS_MODULE,
.create = hp_sw_create,
.destroy = hp_sw_destroy,
.pg_init = hp_sw_pg_init,
};
static int __init hp_sw_init(void)
{
int r;
r = dm_register_hw_handler(&hp_sw_hwh);
if (r < 0)
DMERR("register failed %d", r);
else
DMINFO("version " DM_HP_HWH_VER " loaded");
return r;
}
static void __exit hp_sw_exit(void)
{
int r;
r = dm_unregister_hw_handler(&hp_sw_hwh);
if (r < 0)
DMERR("unregister failed %d", r);
}
module_init(hp_sw_init);
module_exit(hp_sw_exit);
MODULE_DESCRIPTION("DM Multipath HP StorageWorks / FSC FibreCat (A/P) support");
MODULE_AUTHOR("Mike Christie, Dave Wysochanski <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");
MODULE_VERSION(DM_HP_HWH_VER);
......@@ -7,7 +7,6 @@
#include "dm.h"
#include "dm-path-selector.h"
#include "dm-hw-handler.h"
#include "dm-bio-list.h"
#include "dm-bio-record.h"
#include "dm-uevent.h"
......@@ -20,6 +19,7 @@
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/workqueue.h>
#include <scsi/scsi_dh.h>
#include <asm/atomic.h>
#define DM_MSG_PREFIX "multipath"
......@@ -61,7 +61,8 @@ struct multipath {
spinlock_t lock;
struct hw_handler hw_handler;
const char *hw_handler_name;
struct work_struct activate_path;
unsigned nr_priority_groups;
struct list_head priority_groups;
unsigned pg_init_required; /* pg_init needs calling? */
......@@ -106,9 +107,10 @@ typedef int (*action_fn) (struct pgpath *pgpath);
static struct kmem_cache *_mpio_cache;
static struct workqueue_struct *kmultipathd;
static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
static void process_queued_ios(struct work_struct *work);
static void trigger_event(struct work_struct *work);
static void activate_path(struct work_struct *work);
/*-----------------------------------------------
......@@ -178,6 +180,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
m->queue_io = 1;
INIT_WORK(&m->process_queued_ios, process_queued_ios);
INIT_WORK(&m->trigger_event, trigger_event);
INIT_WORK(&m->activate_path, activate_path);
m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
if (!m->mpio_pool) {
kfree(m);
......@@ -193,18 +196,13 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
static void free_multipath(struct multipath *m)
{
struct priority_group *pg, *tmp;
struct hw_handler *hwh = &m->hw_handler;
list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
list_del(&pg->list);
free_priority_group(pg, m->ti);
}
if (hwh->type) {
hwh->type->destroy(hwh);
dm_put_hw_handler(hwh->type);
}
kfree(m->hw_handler_name);
mempool_destroy(m->mpio_pool);
kfree(m);
}
......@@ -216,12 +214,10 @@ static void free_multipath(struct multipath *m)
static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
{
struct hw_handler *hwh = &m->hw_handler;
m->current_pg = pgpath->pg;
/* Must we initialise the PG first, and queue I/O till it's ready? */
if (hwh->type && hwh->type->pg_init) {
if (m->hw_handler_name) {
m->pg_init_required = 1;
m->queue_io = 1;
} else {
......@@ -409,7 +405,6 @@ static void process_queued_ios(struct work_struct *work)
{
struct multipath *m =
container_of(work, struct multipath, process_queued_ios);
struct hw_handler *hwh = &m->hw_handler;
struct pgpath *pgpath = NULL;
unsigned init_required = 0, must_queue = 1;
unsigned long flags;
......@@ -439,7 +434,7 @@ static void process_queued_ios(struct work_struct *work)
spin_unlock_irqrestore(&m->lock, flags);
if (init_required)
hwh->type->pg_init(hwh, pgpath->pg->bypassed, &pgpath->path);
queue_work(kmpath_handlerd, &m->activate_path);
if (!must_queue)
dispatch_queued_ios(m);
......@@ -652,8 +647,6 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
static int parse_hw_handler(struct arg_set *as, struct multipath *m)
{
int r;
struct hw_handler_type *hwht;
unsigned hw_argc;
struct dm_target *ti = m->ti;
......@@ -661,30 +654,20 @@ static int parse_hw_handler(struct arg_set *as, struct multipath *m)
{0, 1024, "invalid number of hardware handler args"},
};
r = read_param(_params, shift(as), &hw_argc, &ti->error);
if (r)
if (read_param(_params, shift(as), &hw_argc, &ti->error))
return -EINVAL;
if (!hw_argc)
return 0;
hwht = dm_get_hw_handler(shift(as));
if (!hwht) {
m->hw_handler_name = kstrdup(shift(as), GFP_KERNEL);
request_module("scsi_dh_%s", m->hw_handler_name);
if (scsi_dh_handler_exist(m->hw_handler_name) == 0) {
ti->error = "unknown hardware handler type";
kfree(m->hw_handler_name);
m->hw_handler_name = NULL;
return -EINVAL;
}
m->hw_handler.md = dm_table_get_md(ti->table);
dm_put(m->hw_handler.md);
r = hwht->create(&m->hw_handler, hw_argc - 1, as->argv);
if (r) {
dm_put_hw_handler(hwht);
ti->error = "hardware handler constructor failed";
return r;
}
m->hw_handler.type = hwht;
consume(as, hw_argc - 1);
return 0;
......@@ -808,6 +791,7 @@ static void multipath_dtr(struct dm_target *ti)
{
struct multipath *m = (struct multipath *) ti->private;
flush_workqueue(kmpath_handlerd);
flush_workqueue(kmultipathd);
free_multipath(m);
}
......@@ -1025,52 +1009,85 @@ static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
return limit_reached;
}
/*
* pg_init must call this when it has completed its initialisation
*/
void dm_pg_init_complete(struct dm_path *path, unsigned err_flags)
static void pg_init_done(struct dm_path *path, int errors)
{
struct pgpath *pgpath = path_to_pgpath(path);
struct priority_group *pg = pgpath->pg;
struct multipath *m = pg->m;
unsigned long flags;
/*
* If requested, retry pg_init until maximum number of retries exceeded.
* If retry not requested and PG already bypassed, always fail the path.
*/
if (err_flags & MP_RETRY) {
if (pg_init_limit_reached(m, pgpath))
err_flags |= MP_FAIL_PATH;
} else if (err_flags && pg->bypassed)
err_flags |= MP_FAIL_PATH;
if (err_flags & MP_FAIL_PATH)
/* device or driver problems */
switch (errors) {
case SCSI_DH_OK:
break;
case SCSI_DH_NOSYS:
if (!m->hw_handler_name) {
errors = 0;
break;
}
DMERR("Cannot failover device because scsi_dh_%s was not "
"loaded.", m->hw_handler_name);
/*
* Fail path for now, so we do not ping pong
*/
fail_path(pgpath);
if (err_flags & MP_BYPASS_PG)
break;
case SCSI_DH_DEV_TEMP_BUSY:
/*
* Probably doing something like FW upgrade on the
* controller so try the other pg.
*/
bypass_pg(m, pg, 1);
break;
/* TODO: For SCSI_DH_RETRY we should wait a couple seconds */
case SCSI_DH_RETRY:
case SCSI_DH_IMM_RETRY:
case SCSI_DH_RES_TEMP_UNAVAIL:
if (pg_init_limit_reached(m, pgpath))
fail_path(pgpath);
errors = 0;
break;
default:
/*
* We probably do not want to fail the path for a device
* error, but this is what the old dm did. In future
* patches we can do more advanced handling.
*/
fail_path(pgpath);
}
spin_lock_irqsave(&m->lock, flags);
if (err_flags & ~MP_RETRY) {
if (errors) {
DMERR("Could not failover device. Error %d.", errors);
m->current_pgpath = NULL;
m->current_pg = NULL;
} else if (!m->pg_init_required)
} else if (!m->pg_init_required) {
m->queue_io = 0;
pg->bypassed = 0;
}
m->pg_init_in_progress = 0;
queue_work(kmultipathd, &m->process_queued_ios);
spin_unlock_irqrestore(&m->lock, flags);
}
static void activate_path(struct work_struct *work)
{
int ret;
struct multipath *m =
container_of(work, struct multipath, activate_path);
struct dm_path *path = &m->current_pgpath->path;
ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev));
pg_init_done(path, ret);
}
/*
* end_io handling
*/
static int do_end_io(struct multipath *m, struct bio *bio,
int error, struct dm_mpath_io *mpio)
{
struct hw_handler *hwh = &m->hw_handler;
unsigned err_flags = MP_FAIL_PATH; /* Default behavior */
unsigned long flags;
if (!error)
......@@ -1097,19 +1114,8 @@ static int do_end_io(struct multipath *m, struct bio *bio,
}
spin_unlock_irqrestore(&m->lock, flags);
if (hwh->type && hwh->type->error)
err_flags = hwh->type->error(hwh, bio);
if (mpio->pgpath) {
if (err_flags & MP_FAIL_PATH)
fail_path(mpio->pgpath);
if (err_flags & MP_BYPASS_PG)
bypass_pg(m, mpio->pgpath->pg, 1);
}
if (err_flags & MP_ERROR_IO)
return -EIO;
if (mpio->pgpath)
fail_path(mpio->pgpath);
requeue:
dm_bio_restore(&mpio->details, bio);
......@@ -1194,7 +1200,6 @@ static int multipath_status(struct dm_target *ti, status_type_t type,
int sz = 0;
unsigned long flags;
struct multipath *m = (struct multipath *) ti->private;
struct hw_handler *hwh = &m->hw_handler;
struct priority_group *pg;
struct pgpath *p;
unsigned pg_num;
......@@ -1214,12 +1219,10 @@ static int multipath_status(struct dm_target *ti, status_type_t type,
DMEMIT("pg_init_retries %u ", m->pg_init_retries);
}
if (hwh->type && hwh->type->status)
sz += hwh->type->status(hwh, type, result + sz, maxlen - sz);
else if (!hwh->type || type == STATUSTYPE_INFO)
if (!m->hw_handler_name || type == STATUSTYPE_INFO)
DMEMIT("0 ");
else
DMEMIT("1 %s ", hwh->type->name);
DMEMIT("1 %s ", m->hw_handler_name);
DMEMIT("%u ", m->nr_priority_groups);
......@@ -1422,6 +1425,21 @@ static int __init dm_multipath_init(void)
return -ENOMEM;
}
/*
* A separate workqueue is used to handle the device handlers
* to avoid overloading existing workqueue. Overloading the
* old workqueue would also create a bottleneck in the
* path of the storage hardware device activation.
*/
kmpath_handlerd = create_singlethread_workqueue("kmpath_handlerd");
if (!kmpath_handlerd) {
DMERR("failed to create workqueue kmpath_handlerd");
destroy_workqueue(kmultipathd);
dm_unregister_target(&multipath_target);
kmem_cache_destroy(_mpio_cache);
return -ENOMEM;
}
DMINFO("version %u.%u.%u loaded",
multipath_target.version[0], multipath_target.version[1],
multipath_target.version[2]);
......@@ -1433,6 +1451,7 @@ static void __exit dm_multipath_exit(void)
{
int r;
destroy_workqueue(kmpath_handlerd);
destroy_workqueue(kmultipathd);
r = dm_unregister_target(&multipath_target);
......@@ -1441,8 +1460,6 @@ static void __exit dm_multipath_exit(void)
kmem_cache_destroy(_mpio_cache);
}
EXPORT_SYMBOL_GPL(dm_pg_init_complete);
module_init(dm_multipath_init);
module_exit(dm_multipath_exit);
......
......@@ -16,7 +16,6 @@ struct dm_path {
unsigned is_active; /* Read-only */
void *pscontext; /* For path-selector use */
void *hwhcontext; /* For hw-handler use */
};
/* Callback for hwh_pg_init_fn to use when complete */
......
/*
* Copyright (c) 2000-2007 LSI Corporation.
* Copyright (c) 2000-2008 LSI Corporation.
*
*
* Name: mpi.h
......
/*
* Copyright (c) 2000-2007 LSI Corporation.
* Copyright (c) 2000-2008 LSI Corporation.
*
*
* Name: mpi_cnfg.h
......
......@@ -5,7 +5,7 @@
* For use with LSI PCI chip/adapter(s)
* running LSI Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 1999-2007 LSI Corporation
* Copyright (c) 1999-2008 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
*/
......@@ -103,7 +103,7 @@ static int mfcounter = 0;
* Public data...
*/
struct proc_dir_entry *mpt_proc_root_dir;
static struct proc_dir_entry *mpt_proc_root_dir;
#define WHOINIT_UNKNOWN 0xAA
......@@ -253,6 +253,55 @@ mpt_get_cb_idx(MPT_DRIVER_CLASS dclass)
return 0;
}
/**
* mpt_fault_reset_work - work performed on workq after ioc fault
* @work: input argument, used to derive ioc
*
**/
static void
mpt_fault_reset_work(struct work_struct *work)
{
MPT_ADAPTER *ioc =
container_of(work, MPT_ADAPTER, fault_reset_work.work);
u32 ioc_raw_state;
int rc;
unsigned long flags;
if (ioc->diagPending || !ioc->active)
goto out;
ioc_raw_state = mpt_GetIocState(ioc, 0);
if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT) {
printk(MYIOC_s_WARN_FMT "IOC is in FAULT state (%04xh)!!!\n",
ioc->name, ioc_raw_state & MPI_DOORBELL_DATA_MASK);
printk(MYIOC_s_WARN_FMT "Issuing HardReset from %s!!\n",
ioc->name, __FUNCTION__);
rc = mpt_HardResetHandler(ioc, CAN_SLEEP);
printk(MYIOC_s_WARN_FMT "%s: HardReset: %s\n", ioc->name,
__FUNCTION__, (rc == 0) ? "success" : "failed");
ioc_raw_state = mpt_GetIocState(ioc, 0);
if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT)
printk(MYIOC_s_WARN_FMT "IOC is in FAULT state after "
"reset (%04xh)\n", ioc->name, ioc_raw_state &
MPI_DOORBELL_DATA_MASK);
}
out:
/*
* Take turns polling alternate controller
*/
if (ioc->alt_ioc)
ioc = ioc->alt_ioc;
/* rearm the timer */
spin_lock_irqsave(&ioc->fault_reset_work_lock, flags);
if (ioc->reset_work_q)
queue_delayed_work(ioc->reset_work_q, &ioc->fault_reset_work,
msecs_to_jiffies(MPT_POLLING_INTERVAL));
spin_unlock_irqrestore(&ioc->fault_reset_work_lock, flags);
}
/*
* Process turbo (context) reply...
*/
......@@ -1616,6 +1665,22 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
/* Find lookup slot. */
INIT_LIST_HEAD(&ioc->list);
/* Initialize workqueue */
INIT_DELAYED_WORK(&ioc->fault_reset_work, mpt_fault_reset_work);
spin_lock_init(&ioc->fault_reset_work_lock);
snprintf(ioc->reset_work_q_name, KOBJ_NAME_LEN, "mpt_poll_%d", ioc->id);
ioc->reset_work_q =
create_singlethread_workqueue(ioc->reset_work_q_name);
if (!ioc->reset_work_q) {
printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n",
ioc->name);
pci_release_selected_regions(pdev, ioc->bars);
kfree(ioc);
return -ENOMEM;
}
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "facts @ %p, pfacts[0] @ %p\n",
ioc->name, &ioc->facts, &ioc->pfacts[0]));
......@@ -1727,6 +1792,10 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
iounmap(ioc->memmap);
if (r != -5)
pci_release_selected_regions(pdev, ioc->bars);
destroy_workqueue(ioc->reset_work_q);
ioc->reset_work_q = NULL;
kfree(ioc);
pci_set_drvdata(pdev, NULL);
return r;
......@@ -1759,6 +1828,10 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
}
#endif
if (!ioc->alt_ioc)
queue_delayed_work(ioc->reset_work_q, &ioc->fault_reset_work,
msecs_to_jiffies(MPT_POLLING_INTERVAL));
return 0;
}
......@@ -1774,6 +1847,19 @@ mpt_detach(struct pci_dev *pdev)
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
char pname[32];
u8 cb_idx;
unsigned long flags;
struct workqueue_struct *wq;
/*
* Stop polling ioc for fault condition
*/
spin_lock_irqsave(&ioc->fault_reset_work_lock, flags);
wq = ioc->reset_work_q;
ioc->reset_work_q = NULL;
spin_unlock_irqrestore(&ioc->fault_reset_work_lock, flags);
cancel_delayed_work(&ioc->fault_reset_work);
destroy_workqueue(wq);
sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s/summary", ioc->name);
remove_proc_entry(pname, NULL);
......@@ -7456,7 +7542,6 @@ EXPORT_SYMBOL(mpt_resume);
EXPORT_SYMBOL(mpt_suspend);
#endif
EXPORT_SYMBOL(ioc_list);
EXPORT_SYMBOL(mpt_proc_root_dir);
EXPORT_SYMBOL(mpt_register);
EXPORT_SYMBOL(mpt_deregister);
EXPORT_SYMBOL(mpt_event_register);
......
......@@ -5,7 +5,7 @@
* LSIFC9xx/LSI409xx Fibre Channel
* running LSI Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 1999-2007 LSI Corporation
* Copyright (c) 1999-2008 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
*/
......@@ -73,11 +73,11 @@
#endif
#ifndef COPYRIGHT
#define COPYRIGHT "Copyright (c) 1999-2007 " MODULEAUTHOR
#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR
#endif
#define MPT_LINUX_VERSION_COMMON "3.04.06"
#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.06"
#define MPT_LINUX_VERSION_COMMON "3.04.07"
#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.07"
#define WHAT_MAGIC_STRING "@" "(" "#" ")"
#define show_mptmod_ver(s,ver) \
......@@ -176,6 +176,8 @@
/* debug print string length used for events and iocstatus */
# define EVENT_DESCR_STR_SZ 100
#define MPT_POLLING_INTERVAL 1000 /* in milliseconds */
#ifdef __KERNEL__ /* { */
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
......@@ -709,6 +711,12 @@ typedef struct _MPT_ADAPTER
struct workqueue_struct *fc_rescan_work_q;
struct scsi_cmnd **ScsiLookup;
spinlock_t scsi_lookup_lock;
char reset_work_q_name[KOBJ_NAME_LEN];
struct workqueue_struct *reset_work_q;
struct delayed_work fault_reset_work;
spinlock_t fault_reset_work_lock;
} MPT_ADAPTER;
/*
......@@ -919,7 +927,6 @@ extern int mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhys
* Public data decl's...
*/
extern struct list_head ioc_list;
extern struct proc_dir_entry *mpt_proc_root_dir;
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
#endif /* } __KERNEL__ */
......
......@@ -4,7 +4,7 @@
* For use with LSI PCI chip/adapters
* running LSI Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 1999-2007 LSI Corporation
* Copyright (c) 1999-2008 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
*/
......@@ -66,7 +66,7 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#define COPYRIGHT "Copyright (c) 1999-2007 LSI Corporation"
#define COPYRIGHT "Copyright (c) 1999-2008 LSI Corporation"
#define MODULEAUTHOR "LSI Corporation"
#include "mptbase.h"
#include "mptctl.h"
......
......@@ -5,7 +5,7 @@
* LSIFC9xx/LSI409xx Fibre Channel
* running LSI Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 1999-2007 LSI Corporation
* Copyright (c) 1999-2008 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
*/
......
......@@ -3,7 +3,7 @@
* For use with LSI PCI chip/adapter(s)
* running LSI Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 1999-2007 LSI Corporation
* Copyright (c) 1999-2008 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
*/
......
......@@ -3,7 +3,7 @@
* For use with LSI PCI chip/adapter(s)
* running LSI Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 1999-2007 LSI Corporation
* Copyright (c) 1999-2008 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
*/
......
......@@ -4,7 +4,7 @@
* For use with LSI Fibre Channel PCI chip/adapters
* running LSI Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 2000-2007 LSI Corporation
* Copyright (c) 2000-2008 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
*/
......
......@@ -4,7 +4,7 @@
* For use with LSI Fibre Channel PCI chip/adapters
* running LSI Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 2000-2007 LSI Corporation
* Copyright (c) 2000-2008 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
*/
......
......@@ -3,7 +3,7 @@
* For use with LSI PCI chip/adapter(s)
* running LSI Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 1999-2007 LSI Corporation
* Copyright (c) 1999-2008 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
......
......@@ -5,7 +5,7 @@
* LSIFC9xx/LSI409xx Fibre Channel
* running LSI MPT (Message Passing Technology) firmware.
*
* Copyright (c) 1999-2007 LSI Corporation
* Copyright (c) 1999-2008 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
*/
......
......@@ -3,7 +3,7 @@
* For use with LSI PCI chip/adapter(s)
* running LSI Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 1999-2007 LSI Corporation
* Copyright (c) 1999-2008 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
*/
......
......@@ -5,7 +5,7 @@
* LSIFC9xx/LSI409xx Fibre Channel
* running LSI Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 1999-2007 LSI Corporation
* Copyright (c) 1999-2008 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
*/
......
......@@ -3,7 +3,7 @@
* For use with LSI PCI chip/adapter(s)
* running LSI Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 1999-2007 LSI Corporation
* Copyright (c) 1999-2008 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
*/
......@@ -447,6 +447,7 @@ static int mptspi_target_alloc(struct scsi_target *starget)
spi_max_offset(starget) = ioc->spi_data.maxSyncOffset;
spi_offset(starget) = 0;
spi_period(starget) = 0xFF;
mptspi_write_width(starget, 0);
return 0;
......
......@@ -3,7 +3,6 @@
#
zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_scsi.o zfcp_erp.o zfcp_qdio.o \
zfcp_fsf.o zfcp_dbf.o zfcp_sysfs_adapter.o zfcp_sysfs_port.o \
zfcp_sysfs_unit.o zfcp_sysfs_driver.o
zfcp_fsf.o zfcp_dbf.o zfcp_sysfs.o zfcp_fc.o zfcp_cfdc.o
obj-$(CONFIG_ZFCP) += zfcp.o
This diff is collapsed.
/*
* This file is part of the zfcp device driver for
* FCP adapters for IBM System z9 and zSeries.
* zfcp device driver
*
* (C) Copyright IBM Corp. 2002, 2006
* Registration and callback for the s390 common I/O layer.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
* Copyright IBM Corporation 2002, 2008
*/
#include "zfcp_ext.h"
#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
static int zfcp_ccw_probe(struct ccw_device *);
static void zfcp_ccw_remove(struct ccw_device *);
static int zfcp_ccw_set_online(struct ccw_device *);
static int zfcp_ccw_set_offline(struct ccw_device *);
static int zfcp_ccw_notify(struct ccw_device *, int);
static void zfcp_ccw_shutdown(struct ccw_device *);
static struct ccw_device_id zfcp_ccw_device_id[] = {
{CCW_DEVICE_DEVTYPE(ZFCP_CONTROL_UNIT_TYPE,
ZFCP_CONTROL_UNIT_MODEL,
ZFCP_DEVICE_TYPE,
ZFCP_DEVICE_MODEL)},
{CCW_DEVICE_DEVTYPE(ZFCP_CONTROL_UNIT_TYPE,
ZFCP_CONTROL_UNIT_MODEL,
ZFCP_DEVICE_TYPE,
ZFCP_DEVICE_MODEL_PRIV)},
{},
};
static struct ccw_driver zfcp_ccw_driver = {
.owner = THIS_MODULE,
.name = ZFCP_NAME,
.ids = zfcp_ccw_device_id,
.probe = zfcp_ccw_probe,
.remove = zfcp_ccw_remove,
.set_online = zfcp_ccw_set_online,
.set_offline = zfcp_ccw_set_offline,
.notify = zfcp_ccw_notify,
.shutdown = zfcp_ccw_shutdown,
.driver = {
.groups = zfcp_driver_attr_groups,
},
};
MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
/**
* zfcp_ccw_probe - probe function of zfcp driver
* @ccw_device: pointer to belonging ccw device
......@@ -69,19 +18,16 @@ MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
* In addition the nameserver port will be added to the ports of the adapter
* and its sysfs representation will be created too.
*/
static int
zfcp_ccw_probe(struct ccw_device *ccw_device)
static int zfcp_ccw_probe(struct ccw_device *ccw_device)
{
struct zfcp_adapter *adapter;
int retval = 0;
down(&zfcp_data.config_sema);
adapter = zfcp_adapter_enqueue(ccw_device);
if (!adapter)
if (zfcp_adapter_enqueue(ccw_device)) {
dev_err(&ccw_device->dev,
"Setup of data structures failed.\n");
retval = -EINVAL;
else
ZFCP_LOG_DEBUG("Probed adapter %s\n",
zfcp_get_busid_by_adapter(adapter));
}
up(&zfcp_data.config_sema);
return retval;
}
......@@ -95,8 +41,7 @@ zfcp_ccw_probe(struct ccw_device *ccw_device)
* ports that belong to this adapter. And in addition all resources of this
* adapter will be freed too.
*/
static void
zfcp_ccw_remove(struct ccw_device *ccw_device)
static void zfcp_ccw_remove(struct ccw_device *ccw_device)
{
struct zfcp_adapter *adapter;
struct zfcp_port *port, *p;
......@@ -106,8 +51,6 @@ zfcp_ccw_remove(struct ccw_device *ccw_device)
down(&zfcp_data.config_sema);
adapter = dev_get_drvdata(&ccw_device->dev);
ZFCP_LOG_DEBUG("Removing adapter %s\n",
zfcp_get_busid_by_adapter(adapter));
write_lock_irq(&zfcp_data.config_lock);
list_for_each_entry_safe(port, p, &adapter->port_list_head, list) {
list_for_each_entry_safe(unit, u, &port->unit_list_head, list) {
......@@ -145,8 +88,7 @@ zfcp_ccw_remove(struct ccw_device *ccw_device)
* registered with the SCSI stack, that the QDIO queues will be set up
* and that the adapter will be opened (asynchronously).
*/
static int
zfcp_ccw_set_online(struct ccw_device *ccw_device)
static int zfcp_ccw_set_online(struct ccw_device *ccw_device)
{
struct zfcp_adapter *adapter;
int retval;
......@@ -155,12 +97,8 @@ zfcp_ccw_set_online(struct ccw_device *ccw_device)
adapter = dev_get_drvdata(&ccw_device->dev);
retval = zfcp_erp_thread_setup(adapter);
if (retval) {
ZFCP_LOG_INFO("error: start of error recovery thread for "
"adapter %s failed\n",
zfcp_get_busid_by_adapter(adapter));
if (retval)
goto out;
}
retval = zfcp_adapter_scsi_register(adapter);
if (retval)
......@@ -191,8 +129,7 @@ zfcp_ccw_set_online(struct ccw_device *ccw_device)
* This function gets called by the common i/o layer and sets an adapter
* into state offline.
*/
static int
zfcp_ccw_set_offline(struct ccw_device *ccw_device)
static int zfcp_ccw_set_offline(struct ccw_device *ccw_device)
{
struct zfcp_adapter *adapter;
......@@ -206,15 +143,14 @@ zfcp_ccw_set_offline(struct ccw_device *ccw_device)
}
/**
* zfcp_ccw_notify
* zfcp_ccw_notify - ccw notify function
* @ccw_device: pointer to belonging ccw device
* @event: indicates if adapter was detached or attached
*
* This function gets called by the common i/o layer if an adapter has gone
* or reappeared.
*/
static int
zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
{
struct zfcp_adapter *adapter;
......@@ -222,18 +158,15 @@ zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
adapter = dev_get_drvdata(&ccw_device->dev);
switch (event) {
case CIO_GONE:
ZFCP_LOG_NORMAL("adapter %s: device gone\n",
zfcp_get_busid_by_adapter(adapter));
dev_warn(&adapter->ccw_device->dev, "device gone\n");
zfcp_erp_adapter_shutdown(adapter, 0, 87, NULL);
break;
case CIO_NO_PATH:
ZFCP_LOG_NORMAL("adapter %s: no path\n",
zfcp_get_busid_by_adapter(adapter));
dev_warn(&adapter->ccw_device->dev, "no path\n");
zfcp_erp_adapter_shutdown(adapter, 0, 88, NULL);
break;
case CIO_OPER:
ZFCP_LOG_NORMAL("adapter %s: operational again\n",
zfcp_get_busid_by_adapter(adapter));
dev_info(&adapter->ccw_device->dev, "operational again\n");
zfcp_erp_modify_adapter_status(adapter, 11, NULL,
ZFCP_STATUS_COMMON_RUNNING,
ZFCP_SET);
......@@ -247,24 +180,10 @@ zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
}
/**
* zfcp_ccw_register - ccw register function
*
* Registers the driver at the common i/o layer. This function will be called
* at module load time/system start.
*/
int __init
zfcp_ccw_register(void)
{
return ccw_driver_register(&zfcp_ccw_driver);
}
/**
* zfcp_ccw_shutdown - gets called on reboot/shutdown
*
* Makes sure that QDIO queues are down when the system gets stopped.
* zfcp_ccw_shutdown - handle shutdown from cio
* @cdev: device for adapter to shutdown.
*/
static void
zfcp_ccw_shutdown(struct ccw_device *cdev)
static void zfcp_ccw_shutdown(struct ccw_device *cdev)
{
struct zfcp_adapter *adapter;
......@@ -275,4 +194,33 @@ zfcp_ccw_shutdown(struct ccw_device *cdev)
up(&zfcp_data.config_sema);
}
#undef ZFCP_LOG_AREA
static struct ccw_device_id zfcp_ccw_device_id[] = {
{ CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x3) },
{ CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x4) }, /* priv. */
{},
};
MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
static struct ccw_driver zfcp_ccw_driver = {
.owner = THIS_MODULE,
.name = "zfcp",
.ids = zfcp_ccw_device_id,
.probe = zfcp_ccw_probe,
.remove = zfcp_ccw_remove,
.set_online = zfcp_ccw_set_online,
.set_offline = zfcp_ccw_set_offline,
.notify = zfcp_ccw_notify,
.shutdown = zfcp_ccw_shutdown,
};
/**
* zfcp_ccw_register - ccw register function
*
* Registers the driver at the common i/o layer. This function will be called
* at module load time/system start.
*/
int __init zfcp_ccw_register(void)
{
return ccw_driver_register(&zfcp_ccw_driver);
}
/*
* zfcp device driver
*
* Userspace interface for accessing the
* Access Control Lists / Control File Data Channel
*
* Copyright IBM Corporation 2008
*/
#include <linux/types.h>
#include <linux/miscdevice.h>
#include <asm/ccwdev.h>
#include "zfcp_def.h"
#include "zfcp_ext.h"
#include "zfcp_fsf.h"
#define ZFCP_CFDC_CMND_DOWNLOAD_NORMAL 0x00010001
#define ZFCP_CFDC_CMND_DOWNLOAD_FORCE 0x00010101
#define ZFCP_CFDC_CMND_FULL_ACCESS 0x00000201
#define ZFCP_CFDC_CMND_RESTRICTED_ACCESS 0x00000401
#define ZFCP_CFDC_CMND_UPLOAD 0x00010002
#define ZFCP_CFDC_DOWNLOAD 0x00000001
#define ZFCP_CFDC_UPLOAD 0x00000002
#define ZFCP_CFDC_WITH_CONTROL_FILE 0x00010000
#define ZFCP_CFDC_IOC_MAGIC 0xDD
#define ZFCP_CFDC_IOC \
_IOWR(ZFCP_CFDC_IOC_MAGIC, 0, struct zfcp_cfdc_data)
/**
* struct zfcp_cfdc_data - data for ioctl cfdc interface
* @signature: request signature
* @devno: FCP adapter device number
* @command: command code
* @fsf_status: returns status of FSF command to userspace
* @fsf_status_qual: returned to userspace
* @payloads: access conflicts list
* @control_file: access control table
*/
struct zfcp_cfdc_data {
u32 signature;
u32 devno;
u32 command;
u32 fsf_status;
u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
u8 payloads[256];
u8 control_file[0];
};
static int zfcp_cfdc_copy_from_user(struct scatterlist *sg,
void __user *user_buffer)
{
unsigned int length;
unsigned int size = ZFCP_CFDC_MAX_SIZE;
while (size) {
length = min((unsigned int)size, sg->length);
if (copy_from_user(sg_virt(sg++), user_buffer, length))
return -EFAULT;
user_buffer += length;
size -= length;
}
return 0;
}
static int zfcp_cfdc_copy_to_user(void __user *user_buffer,
struct scatterlist *sg)
{
unsigned int length;
unsigned int size = ZFCP_CFDC_MAX_SIZE;
while (size) {
length = min((unsigned int) size, sg->length);
if (copy_to_user(user_buffer, sg_virt(sg++), length))
return -EFAULT;
user_buffer += length;
size -= length;
}
return 0;
}
static struct zfcp_adapter *zfcp_cfdc_get_adapter(u32 devno)
{
struct zfcp_adapter *adapter = NULL, *cur_adapter;
struct ccw_dev_id dev_id;
read_lock_irq(&zfcp_data.config_lock);
list_for_each_entry(cur_adapter, &zfcp_data.adapter_list_head, list) {
ccw_device_get_id(cur_adapter->ccw_device, &dev_id);
if (dev_id.devno == devno) {
adapter = cur_adapter;
zfcp_adapter_get(adapter);
break;
}
}
read_unlock_irq(&zfcp_data.config_lock);
return adapter;
}
static int zfcp_cfdc_set_fsf(struct zfcp_fsf_cfdc *fsf_cfdc, int command)
{
switch (command) {
case ZFCP_CFDC_CMND_DOWNLOAD_NORMAL:
fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
fsf_cfdc->option = FSF_CFDC_OPTION_NORMAL_MODE;
break;
case ZFCP_CFDC_CMND_DOWNLOAD_FORCE:
fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
fsf_cfdc->option = FSF_CFDC_OPTION_FORCE;
break;
case ZFCP_CFDC_CMND_FULL_ACCESS:
fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
fsf_cfdc->option = FSF_CFDC_OPTION_FULL_ACCESS;
break;
case ZFCP_CFDC_CMND_RESTRICTED_ACCESS:
fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
fsf_cfdc->option = FSF_CFDC_OPTION_RESTRICTED_ACCESS;
break;
case ZFCP_CFDC_CMND_UPLOAD:
fsf_cfdc->command = FSF_QTCB_UPLOAD_CONTROL_FILE;
fsf_cfdc->option = 0;
break;
default:
return -EINVAL;
}
return 0;
}
static int zfcp_cfdc_sg_setup(int command, struct scatterlist *sg,
u8 __user *control_file)
{
int retval;
retval = zfcp_sg_setup_table(sg, ZFCP_CFDC_PAGES);
if (retval)
return retval;
sg[ZFCP_CFDC_PAGES - 1].length = ZFCP_CFDC_MAX_SIZE % PAGE_SIZE;
if (command & ZFCP_CFDC_WITH_CONTROL_FILE &&
command & ZFCP_CFDC_DOWNLOAD) {
retval = zfcp_cfdc_copy_from_user(sg, control_file);
if (retval) {
zfcp_sg_free_table(sg, ZFCP_CFDC_PAGES);
return -EFAULT;
}
}
return 0;
}
static void zfcp_cfdc_req_to_sense(struct zfcp_cfdc_data *data,
struct zfcp_fsf_req *req)
{
data->fsf_status = req->qtcb->header.fsf_status;
memcpy(&data->fsf_status_qual, &req->qtcb->header.fsf_status_qual,
sizeof(union fsf_status_qual));
memcpy(&data->payloads, &req->qtcb->bottom.support.els,
sizeof(req->qtcb->bottom.support.els));
}
static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
unsigned long buffer)
{
struct zfcp_cfdc_data *data;
struct zfcp_cfdc_data __user *data_user;
struct zfcp_adapter *adapter;
struct zfcp_fsf_req *req;
struct zfcp_fsf_cfdc *fsf_cfdc;
int retval;
if (command != ZFCP_CFDC_IOC)
return -ENOTTY;
data_user = (void __user *) buffer;
if (!data_user)
return -EINVAL;
fsf_cfdc = kmalloc(sizeof(struct zfcp_fsf_cfdc), GFP_KERNEL);
if (!fsf_cfdc)
return -ENOMEM;
data = kmalloc(sizeof(struct zfcp_cfdc_data), GFP_KERNEL);
if (!data) {
retval = -ENOMEM;
goto no_mem_sense;
}
retval = copy_from_user(data, data_user, sizeof(*data));
if (retval) {
retval = -EFAULT;
goto free_buffer;
}
if (data->signature != 0xCFDCACDF) {
retval = -EINVAL;
goto free_buffer;
}
retval = zfcp_cfdc_set_fsf(fsf_cfdc, data->command);
adapter = zfcp_cfdc_get_adapter(data->devno);
if (!adapter) {
retval = -ENXIO;
goto free_buffer;
}
retval = zfcp_cfdc_sg_setup(data->command, fsf_cfdc->sg,
data_user->control_file);
if (retval)
goto adapter_put;
req = zfcp_fsf_control_file(adapter, fsf_cfdc);
if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto free_sg;
}
if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
retval = -ENXIO;
goto free_fsf;
}
zfcp_cfdc_req_to_sense(data, req);
retval = copy_to_user(data_user, data, sizeof(*data_user));
if (retval) {
retval = -EFAULT;
goto free_fsf;
}
if (data->command & ZFCP_CFDC_UPLOAD)
retval = zfcp_cfdc_copy_to_user(&data_user->control_file,
fsf_cfdc->sg);
free_fsf:
zfcp_fsf_req_free(req);
free_sg:
zfcp_sg_free_table(fsf_cfdc->sg, ZFCP_CFDC_PAGES);
adapter_put:
zfcp_adapter_put(adapter);
free_buffer:
kfree(data);
no_mem_sense:
kfree(fsf_cfdc);
return retval;
}
static const struct file_operations zfcp_cfdc_fops = {
.unlocked_ioctl = zfcp_cfdc_dev_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = zfcp_cfdc_dev_ioctl
#endif
};
struct miscdevice zfcp_cfdc_misc = {
.minor = MISC_DYNAMIC_MINOR,
.name = "zfcp_cfdc",
.fops = &zfcp_cfdc_fops,
};
/*
* This file is part of the zfcp device driver for
* FCP adapters for IBM System z9 and zSeries.
* zfcp device driver
*
* (C) Copyright IBM Corp. 2002, 2006
* Debug traces for zfcp.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
* Copyright IBM Corporation 2002, 2008
*/
#include <linux/ctype.h>
......@@ -29,8 +16,6 @@ module_param(dbfsize, uint, 0400);
MODULE_PARM_DESC(dbfsize,
"number of pages for each debug feature area (default 4)");
#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
static void zfcp_dbf_hexdump(debug_info_t *dbf, void *to, int to_len,
int level, char *from, int from_len)
{
......@@ -186,8 +171,8 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req)
fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE);
response->fsf_req_status = fsf_req->status;
response->sbal_first = fsf_req->sbal_first;
response->sbal_curr = fsf_req->sbal_curr;
response->sbal_last = fsf_req->sbal_last;
response->sbal_response = fsf_req->sbal_response;
response->pool = fsf_req->pool != NULL;
response->erp_action = (unsigned long)fsf_req->erp_action;
......@@ -268,7 +253,7 @@ void zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter,
strncpy(rec->tag, "stat", ZFCP_DBF_TAG_SIZE);
strncpy(rec->tag2, tag, ZFCP_DBF_TAG_SIZE);
rec->u.status.failed = adapter->status_read_failed;
rec->u.status.failed = atomic_read(&adapter->stat_miss);
if (status_buffer != NULL) {
rec->u.status.status_type = status_buffer->status_type;
rec->u.status.status_subtype = status_buffer->status_subtype;
......@@ -355,8 +340,8 @@ static void zfcp_hba_dbf_view_response(char **p,
FSF_STATUS_QUALIFIER_SIZE, 0, FSF_STATUS_QUALIFIER_SIZE);
zfcp_dbf_out(p, "fsf_req_status", "0x%08x", r->fsf_req_status);
zfcp_dbf_out(p, "sbal_first", "0x%02x", r->sbal_first);
zfcp_dbf_out(p, "sbal_curr", "0x%02x", r->sbal_curr);
zfcp_dbf_out(p, "sbal_last", "0x%02x", r->sbal_last);
zfcp_dbf_out(p, "sbal_response", "0x%02x", r->sbal_response);
zfcp_dbf_out(p, "pool", "0x%02x", r->pool);
switch (r->fsf_command) {
......@@ -515,13 +500,13 @@ static const char *zfcp_rec_dbf_ids[] = {
[52] = "port boxed close unit",
[53] = "port boxed fcp",
[54] = "unit boxed fcp",
[55] = "port access denied ct",
[56] = "port access denied els",
[57] = "port access denied open port",
[58] = "port access denied close physical",
[59] = "unit access denied open unit",
[55] = "port access denied",
[56] = "",
[57] = "",
[58] = "",
[59] = "unit access denied",
[60] = "shared unit access denied open unit",
[61] = "unit access denied fcp",
[61] = "",
[62] = "request timeout",
[63] = "adisc link test reject or timeout",
[64] = "adisc link test d_id changed",
......@@ -546,8 +531,8 @@ static const char *zfcp_rec_dbf_ids[] = {
[80] = "exclusive read-only unit access unsupported",
[81] = "shared read-write unit access unsupported",
[82] = "incoming rscn",
[83] = "incoming plogi",
[84] = "incoming logo",
[83] = "incoming wwpn",
[84] = "",
[85] = "online",
[86] = "offline",
[87] = "ccw device gone",
......@@ -586,8 +571,8 @@ static const char *zfcp_rec_dbf_ids[] = {
[120] = "unknown fsf command",
[121] = "no recommendation for status qualifier",
[122] = "status read physical port closed in error",
[123] = "fc service class not supported ct",
[124] = "fc service class not supported els",
[123] = "fc service class not supported",
[124] = "",
[125] = "need newer zfcp",
[126] = "need newer microcode",
[127] = "arbitrated loop not supported",
......@@ -595,7 +580,7 @@ static const char *zfcp_rec_dbf_ids[] = {
[129] = "qtcb size mismatch",
[130] = "unknown fsf status ecd",
[131] = "fcp request too big",
[132] = "fc service class not supported fcp",
[132] = "",
[133] = "data direction not valid fcp",
[134] = "command length not valid fcp",
[135] = "status read act update",
......@@ -603,13 +588,18 @@ static const char *zfcp_rec_dbf_ids[] = {
[137] = "hbaapi port open",
[138] = "hbaapi unit open",
[139] = "hbaapi unit shutdown",
[140] = "qdio error",
[140] = "qdio error outbound",
[141] = "scsi host reset",
[142] = "dismissing fsf request for recovery action",
[143] = "recovery action timed out",
[144] = "recovery action gone",
[145] = "recovery action being processed",
[146] = "recovery action ready for next step",
[147] = "qdio error inbound",
[148] = "nameserver needed for port scan",
[149] = "port scan",
[150] = "ptp attach",
[151] = "port validation failed",
};
static int zfcp_rec_dbf_view_format(debug_info_t *id, struct debug_view *view,
......@@ -670,24 +660,20 @@ static struct debug_view zfcp_rec_dbf_view = {
* zfcp_rec_dbf_event_thread - trace event related to recovery thread operation
* @id2: identifier for event
* @adapter: adapter
* @lock: non-zero value indicates that erp_lock has not yet been acquired
* This function assumes that the caller is holding erp_lock.
*/
void zfcp_rec_dbf_event_thread(u8 id2, struct zfcp_adapter *adapter, int lock)
void zfcp_rec_dbf_event_thread(u8 id2, struct zfcp_adapter *adapter)
{
struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf;
unsigned long flags = 0;
struct list_head *entry;
unsigned ready = 0, running = 0, total;
if (lock)
read_lock_irqsave(&adapter->erp_lock, flags);
list_for_each(entry, &adapter->erp_ready_head)
ready++;
list_for_each(entry, &adapter->erp_running_head)
running++;
total = adapter->erp_total_count;
if (lock)
read_unlock_irqrestore(&adapter->erp_lock, flags);
spin_lock_irqsave(&adapter->rec_dbf_lock, flags);
memset(r, 0, sizeof(*r));
......@@ -696,10 +682,25 @@ void zfcp_rec_dbf_event_thread(u8 id2, struct zfcp_adapter *adapter, int lock)
r->u.thread.total = total;
r->u.thread.ready = ready;
r->u.thread.running = running;
debug_event(adapter->rec_dbf, 5, r, sizeof(*r));
debug_event(adapter->rec_dbf, 6, r, sizeof(*r));
spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags);
}
/**
* zfcp_rec_dbf_event_thread - trace event related to recovery thread operation
* @id2: identifier for event
* @adapter: adapter
* This function assumes that the caller does not hold erp_lock.
*/
void zfcp_rec_dbf_event_thread_lock(u8 id2, struct zfcp_adapter *adapter)
{
unsigned long flags;
read_lock_irqsave(&adapter->erp_lock, flags);
zfcp_rec_dbf_event_thread(id2, adapter);
read_unlock_irqrestore(&adapter->erp_lock, flags);
}
static void zfcp_rec_dbf_event_target(u8 id2, void *ref,
struct zfcp_adapter *adapter,
atomic_t *status, atomic_t *erp_count,
......@@ -823,7 +824,7 @@ void zfcp_rec_dbf_event_action(u8 id2, struct zfcp_erp_action *erp_action)
r->u.action.status = erp_action->status;
r->u.action.step = erp_action->step;
r->u.action.fsf_req = (unsigned long)erp_action->fsf_req;
debug_event(adapter->rec_dbf, 4, r, sizeof(*r));
debug_event(adapter->rec_dbf, 5, r, sizeof(*r));
spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags);
}
......@@ -960,7 +961,7 @@ void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *fsf_req)
zfcp_san_dbf_event_els("iels", 1, fsf_req, buf->d_id,
fc_host_port_id(adapter->scsi_host),
*(u8 *)buf->payload, (void *)buf->payload,
buf->payload.data[0], (void *)buf->payload.data,
length);
}
......@@ -1064,8 +1065,7 @@ static void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level,
if (fsf_req != NULL) {
fcp_rsp = (struct fcp_rsp_iu *)
&(fsf_req->qtcb->bottom.io.fcp_rsp);
fcp_rsp_info =
zfcp_get_fcp_rsp_info_ptr(fcp_rsp);
fcp_rsp_info = (unsigned char *) &fcp_rsp[1];
fcp_sns_info =
zfcp_get_fcp_sns_info_ptr(fcp_rsp);
......@@ -1279,5 +1279,3 @@ void zfcp_adapter_debug_unregister(struct zfcp_adapter *adapter)
adapter->hba_dbf = NULL;
adapter->rec_dbf = NULL;
}
#undef ZFCP_LOG_AREA
......@@ -38,7 +38,7 @@ struct zfcp_rec_dbf_record_thread {
u32 total;
u32 ready;
u32 running;
} __attribute__ ((packed));
};
struct zfcp_rec_dbf_record_target {
u64 ref;
......@@ -47,7 +47,7 @@ struct zfcp_rec_dbf_record_target {
u64 wwpn;
u64 fcp_lun;
u32 erp_count;
} __attribute__ ((packed));
};
struct zfcp_rec_dbf_record_trigger {
u8 want;
......@@ -59,14 +59,14 @@ struct zfcp_rec_dbf_record_trigger {
u64 action;
u64 wwpn;
u64 fcp_lun;
} __attribute__ ((packed));
};
struct zfcp_rec_dbf_record_action {
u32 status;
u32 step;
u64 action;
u64 fsf_req;
} __attribute__ ((packed));
};
struct zfcp_rec_dbf_record {
u8 id;
......@@ -77,7 +77,7 @@ struct zfcp_rec_dbf_record {
struct zfcp_rec_dbf_record_target target;
struct zfcp_rec_dbf_record_trigger trigger;
} u;
} __attribute__ ((packed));
};
enum {
ZFCP_REC_DBF_ID_ACTION,
......@@ -97,8 +97,8 @@ struct zfcp_hba_dbf_record_response {
u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
u32 fsf_req_status;
u8 sbal_first;
u8 sbal_curr;
u8 sbal_last;
u8 sbal_response;
u8 pool;
u64 erp_action;
union {
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/*
* This file is part of the zfcp device driver for
* FCP adapters for IBM System z9 and zSeries.
* zfcp device driver
*
* (C) Copyright IBM Corp. 2002, 2006
* Interface to the FSF support functions.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
* Copyright IBM Corporation 2002, 2008
*/
#ifndef FSF_H
#define FSF_H
#include <linux/pfn.h>
#define FSF_QTCB_CURRENT_VERSION 0x00000001
/* FSF commands */
......@@ -258,6 +247,16 @@
#define FSF_UNIT_ACCESS_EXCLUSIVE 0x02000000
#define FSF_UNIT_ACCESS_OUTBOUND_TRANSFER 0x10000000
/* FSF interface for CFDC */
#define ZFCP_CFDC_MAX_SIZE 127 * 1024
#define ZFCP_CFDC_PAGES PFN_UP(ZFCP_CFDC_MAX_SIZE)
struct zfcp_fsf_cfdc {
struct scatterlist sg[ZFCP_CFDC_PAGES];
u32 command;
u32 option;
};
struct fsf_queue_designator {
u8 cssid;
u8 chpid;
......@@ -288,6 +287,18 @@ struct fsf_bit_error_payload {
u32 current_transmit_b2b_credit;
} __attribute__ ((packed));
struct fsf_link_down_info {
u32 error_code;
u32 res1;
u8 res2[2];
u8 primary_status;
u8 ioerr_code;
u8 action_code;
u8 reason_code;
u8 explanation_code;
u8 vendor_specific_code;
} __attribute__ ((packed));
struct fsf_status_read_buffer {
u32 status_type;
u32 status_subtype;
......@@ -298,7 +309,12 @@ struct fsf_status_read_buffer {
u32 class;
u64 fcp_lun;
u8 res3[24];
u8 payload[FSF_STATUS_READ_PAYLOAD_SIZE];
union {
u8 data[FSF_STATUS_READ_PAYLOAD_SIZE];
u32 word[FSF_STATUS_READ_PAYLOAD_SIZE/sizeof(u32)];
struct fsf_link_down_info link_down_info;
struct fsf_bit_error_payload bit_error;
} payload;
} __attribute__ ((packed));
struct fsf_qual_version_error {
......@@ -311,23 +327,19 @@ struct fsf_qual_sequence_error {
u32 res1[3];
} __attribute__ ((packed));
struct fsf_link_down_info {
u32 error_code;
u32 res1;
u8 res2[2];
u8 primary_status;
u8 ioerr_code;
u8 action_code;
u8 reason_code;
u8 explanation_code;
u8 vendor_specific_code;
struct fsf_qual_latency_info {
u32 channel_lat;
u32 fabric_lat;
u8 res1[8];
} __attribute__ ((packed));
union fsf_prot_status_qual {
u32 word[FSF_PROT_STATUS_QUAL_SIZE / sizeof(u32)];
u64 doubleword[FSF_PROT_STATUS_QUAL_SIZE / sizeof(u64)];
struct fsf_qual_version_error version_error;
struct fsf_qual_sequence_error sequence_error;
struct fsf_link_down_info link_down_info;
struct fsf_qual_latency_info latency_info;
} __attribute__ ((packed));
struct fsf_qtcb_prefix {
......@@ -437,7 +449,9 @@ struct fsf_qtcb_bottom_config {
u32 fc_link_speed;
u32 adapter_type;
u32 peer_d_id;
u8 res2[12];
u8 res1[2];
u16 timer_interval;
u8 res2[8];
u32 s_id;
struct fsf_nport_serv_param nport_serv_param;
u8 reserved_nport_serv_param[16];
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/*
* This file is part of the zfcp device driver for
* FCP adapters for IBM System z9 and zSeries.
*
* (C) Copyright IBM Corp. 2002, 2006
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "zfcp_ext.h"
#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
/**
* ZFCP_DEFINE_DRIVER_ATTR - define for all loglevels sysfs attributes
* @_name: name of attribute
* @_define: name of ZFCP loglevel define
*
* Generates store function for a sysfs loglevel attribute of zfcp driver.
*/
#define ZFCP_DEFINE_DRIVER_ATTR(_name, _define) \
static ssize_t zfcp_sysfs_loglevel_##_name##_store(struct device_driver *drv, \
const char *buf, \
size_t count) \
{ \
unsigned int loglevel; \
unsigned int new_loglevel; \
char *endp; \
\
new_loglevel = simple_strtoul(buf, &endp, 0); \
if ((endp + 1) < (buf + count)) \
return -EINVAL; \
if (new_loglevel > 3) \
return -EINVAL; \
down(&zfcp_data.config_sema); \
loglevel = atomic_read(&zfcp_data.loglevel); \
loglevel &= ~((unsigned int) 0xf << (ZFCP_LOG_AREA_##_define << 2)); \
loglevel |= new_loglevel << (ZFCP_LOG_AREA_##_define << 2); \
atomic_set(&zfcp_data.loglevel, loglevel); \
up(&zfcp_data.config_sema); \
return count; \
} \
\
static ssize_t zfcp_sysfs_loglevel_##_name##_show(struct device_driver *dev, \
char *buf) \
{ \
return sprintf(buf,"%d\n", (unsigned int) \
ZFCP_GET_LOG_VALUE(ZFCP_LOG_AREA_##_define)); \
} \
\
static DRIVER_ATTR(loglevel_##_name, S_IWUSR | S_IRUGO, \
zfcp_sysfs_loglevel_##_name##_show, \
zfcp_sysfs_loglevel_##_name##_store);
ZFCP_DEFINE_DRIVER_ATTR(other, OTHER);
ZFCP_DEFINE_DRIVER_ATTR(scsi, SCSI);
ZFCP_DEFINE_DRIVER_ATTR(fsf, FSF);
ZFCP_DEFINE_DRIVER_ATTR(config, CONFIG);
ZFCP_DEFINE_DRIVER_ATTR(cio, CIO);
ZFCP_DEFINE_DRIVER_ATTR(qdio, QDIO);
ZFCP_DEFINE_DRIVER_ATTR(erp, ERP);
ZFCP_DEFINE_DRIVER_ATTR(fc, FC);
static ssize_t zfcp_sysfs_version_show(struct device_driver *dev,
char *buf)
{
return sprintf(buf, "%s\n", zfcp_data.driver_version);
}
static DRIVER_ATTR(version, S_IRUGO, zfcp_sysfs_version_show, NULL);
static struct attribute *zfcp_driver_attrs[] = {
&driver_attr_loglevel_other.attr,
&driver_attr_loglevel_scsi.attr,
&driver_attr_loglevel_fsf.attr,
&driver_attr_loglevel_config.attr,
&driver_attr_loglevel_cio.attr,
&driver_attr_loglevel_qdio.attr,
&driver_attr_loglevel_erp.attr,
&driver_attr_loglevel_fc.attr,
&driver_attr_version.attr,
NULL
};
static struct attribute_group zfcp_driver_attr_group = {
.attrs = zfcp_driver_attrs,
};
struct attribute_group *zfcp_driver_attr_groups[] = {
&zfcp_driver_attr_group,
NULL,
};
#undef ZFCP_LOG_AREA
This diff is collapsed.
This diff is collapsed.
......@@ -888,6 +888,25 @@ config SCSI_IBMVSCSIS
To compile this driver as a module, choose M here: the
module will be called ibmvstgt.
config SCSI_IBMVFC
tristate "IBM Virtual FC support"
depends on PPC_PSERIES && SCSI
select SCSI_FC_ATTRS
help
This is the IBM POWER Virtual FC Client
To compile this driver as a module, choose M here: the
module will be called ibmvfc.
config SCSI_IBMVFC_TRACE
bool "enable driver internal trace"
depends on SCSI_IBMVFC
default y
help
If you say Y here, the driver will trace all commands issued
to the adapter. Performance impact is minimal. Trace can be
dumped using /sys/class/scsi_host/hostXX/trace.
config SCSI_INITIO
tristate "Initio 9100U(W) support"
depends on PCI && SCSI
......@@ -1738,10 +1757,12 @@ config SCSI_SUNESP
select SCSI_SPI_ATTRS
help
This is the driver for the Sun ESP SCSI host adapter. The ESP
chipset is present in most SPARC SBUS-based computers.
chipset is present in most SPARC SBUS-based computers and
supports the Emulex family of ESP SCSI chips (esp100, esp100A,
esp236, fas101, fas236) as well as the Qlogic fas366 SCSI chip.
To compile this driver as a module, choose M here: the
module will be called esp.
module will be called sun_esp.
config ZFCP
tristate "FCP host bus adapter driver for IBM eServer zSeries"
......@@ -1771,4 +1792,6 @@ endif # SCSI_LOWLEVEL
source "drivers/scsi/pcmcia/Kconfig"
source "drivers/scsi/device_handler/Kconfig"
endmenu
......@@ -34,6 +34,7 @@ obj-$(CONFIG_SCSI_ISCSI_ATTRS) += scsi_transport_iscsi.o
obj-$(CONFIG_SCSI_SAS_ATTRS) += scsi_transport_sas.o
obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas/
obj-$(CONFIG_SCSI_SRP_ATTRS) += scsi_transport_srp.o
obj-$(CONFIG_SCSI_DH) += device_handler/
obj-$(CONFIG_ISCSI_TCP) += libiscsi.o iscsi_tcp.o
obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
......@@ -118,6 +119,7 @@ obj-$(CONFIG_SCSI_IPR) += ipr.o
obj-$(CONFIG_SCSI_SRP) += libsrp.o
obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/
obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi/
obj-$(CONFIG_SCSI_IBMVFC) += ibmvscsi/
obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
obj-$(CONFIG_SCSI_STEX) += stex.o
obj-$(CONFIG_SCSI_MVSAS) += mvsas.o
......
......@@ -41,6 +41,7 @@
#include <linux/kthread.h>
#include <linux/semaphore.h>
#include <asm/uaccess.h>
#include <scsi/scsi_host.h>
#include "aacraid.h"
......@@ -581,6 +582,14 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
for (i = 0; i < upsg->count; i++) {
u64 addr;
void* p;
if (upsg->sg[i].count >
(dev->adapter_info.options &
AAC_OPT_NEW_COMM) ?
(dev->scsi_host_ptr->max_sectors << 9) :
65536) {
rcode = -EINVAL;
goto cleanup;
}
/* Does this really need to be GFP_DMA? */
p = kmalloc(upsg->sg[i].count,GFP_KERNEL|__GFP_DMA);
if(!p) {
......@@ -625,6 +634,14 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
for (i = 0; i < usg->count; i++) {
u64 addr;
void* p;
if (usg->sg[i].count >
(dev->adapter_info.options &
AAC_OPT_NEW_COMM) ?
(dev->scsi_host_ptr->max_sectors << 9) :
65536) {
rcode = -EINVAL;
goto cleanup;
}
/* Does this really need to be GFP_DMA? */
p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
if(!p) {
......@@ -667,6 +684,14 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
for (i = 0; i < upsg->count; i++) {
uintptr_t addr;
void* p;
if (usg->sg[i].count >
(dev->adapter_info.options &
AAC_OPT_NEW_COMM) ?
(dev->scsi_host_ptr->max_sectors << 9) :
65536) {
rcode = -EINVAL;
goto cleanup;
}
/* Does this really need to be GFP_DMA? */
p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
if(!p) {
......@@ -698,6 +723,14 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
for (i = 0; i < upsg->count; i++) {
dma_addr_t addr;
void* p;
if (upsg->sg[i].count >
(dev->adapter_info.options &
AAC_OPT_NEW_COMM) ?
(dev->scsi_host_ptr->max_sectors << 9) :
65536) {
rcode = -EINVAL;
goto cleanup;
}
p = kmalloc(upsg->sg[i].count, GFP_KERNEL);
if (!p) {
dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment