Commit e3d6f909 authored by Andy Grover's avatar Andy Grover Committed by Nicholas Bellinger

target: Core cleanups from AGrover (round 1)

This patch contains the squashed version of a number of cleanups and
minor fixes from Andy's initial series (round 1) for target core this
past spring.  The condensed log looks like:

target: use errno values instead of returning -1 for everything
target: Rename transport_calc_sg_num to transport_init_task_sg
target: Fix leak in error path in transport_init_task_sg
target/pscsi: Remove pscsi_get_sh() usage
target: Make two runtime checks into WARN_ONs
target: Remove hba queue depth and convert to spin_lock_irq usage
target: dev->dev_status_queue_obj is unused
target: Make struct se_queue_req.cmd type struct se_cmd *
target: Remove __transport_get_qr_from_queue()
target: Rename se_dev->g_se_dev_list to se_dev_node
target: Remove struct se_global
target: Simplify scsi mib index table code
target: Make dev_queue_obj a member of se_device instead of a pointer
target: remove extraneous returns at end of void functions
target: Ensure transport_dump_vpd_ident_type returns null-terminated str
target: Function pointers don't need to use '&' to be assigned
target: Fix comment in __transport_execute_tasks()
target: Misc style cleanups
target: rename struct pr_reservation_template to pr_reservation
target: Remove #defines that just perform indirection
target: Inline transport_get_task_from_execute_queue()
target: Minor header comment fixes
Signed-off-by: default avatarAndy Grover <agrover@redhat.com>
Signed-off-by: default avatarNicholas Bellinger <nab@linux-iscsi.org>
parent a8c6da90
...@@ -118,7 +118,7 @@ static struct se_cmd *tcm_loop_allocate_core_cmd( ...@@ -118,7 +118,7 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
* Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi
*/ */
if (scsi_bidi_cmnd(sc)) if (scsi_bidi_cmnd(sc))
T_TASK(se_cmd)->t_tasks_bidi = 1; se_cmd->t_task->t_tasks_bidi = 1;
/* /*
* Locate the struct se_lun pointer and attach it to struct se_cmd * Locate the struct se_lun pointer and attach it to struct se_cmd
*/ */
...@@ -176,7 +176,7 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd) ...@@ -176,7 +176,7 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
* For BIDI commands, pass in the extra READ buffer * For BIDI commands, pass in the extra READ buffer
* to transport_generic_map_mem_to_cmd() below.. * to transport_generic_map_mem_to_cmd() below..
*/ */
if (T_TASK(se_cmd)->t_tasks_bidi) { if (se_cmd->t_task->t_tasks_bidi) {
struct scsi_data_buffer *sdb = scsi_in(sc); struct scsi_data_buffer *sdb = scsi_in(sc);
mem_bidi_ptr = (void *)sdb->table.sgl; mem_bidi_ptr = (void *)sdb->table.sgl;
...@@ -1402,9 +1402,9 @@ static int tcm_loop_register_configfs(void) ...@@ -1402,9 +1402,9 @@ static int tcm_loop_register_configfs(void)
* Register the top level struct config_item_type with TCM core * Register the top level struct config_item_type with TCM core
*/ */
fabric = target_fabric_configfs_init(THIS_MODULE, "loopback"); fabric = target_fabric_configfs_init(THIS_MODULE, "loopback");
if (!fabric) { if (IS_ERR(fabric)) {
printk(KERN_ERR "tcm_loop_register_configfs() failed!\n"); printk(KERN_ERR "tcm_loop_register_configfs() failed!\n");
return -1; return PTR_ERR(fabric);
} }
/* /*
* Setup the fabric API of function pointers used by target_core_mod * Setup the fabric API of function pointers used by target_core_mod
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -118,7 +118,7 @@ static int target_fabric_mappedlun_link( ...@@ -118,7 +118,7 @@ static int target_fabric_mappedlun_link(
lun_access = deve->lun_flags; lun_access = deve->lun_flags;
else else
lun_access = lun_access =
(TPG_TFO(se_tpg)->tpg_check_prod_mode_write_protect( (se_tpg->se_tpg_tfo->tpg_check_prod_mode_write_protect(
se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY : se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY :
TRANSPORT_LUNFLAGS_READ_WRITE; TRANSPORT_LUNFLAGS_READ_WRITE;
spin_unlock_irq(&lacl->se_lun_nacl->device_list_lock); spin_unlock_irq(&lacl->se_lun_nacl->device_list_lock);
...@@ -204,7 +204,7 @@ static ssize_t target_fabric_mappedlun_store_write_protect( ...@@ -204,7 +204,7 @@ static ssize_t target_fabric_mappedlun_store_write_protect(
printk(KERN_INFO "%s_ConfigFS: Changed Initiator ACL: %s" printk(KERN_INFO "%s_ConfigFS: Changed Initiator ACL: %s"
" Mapped LUN: %u Write Protect bit to %s\n", " Mapped LUN: %u Write Protect bit to %s\n",
TPG_TFO(se_tpg)->get_fabric_name(), se_tpg->se_tpg_tfo->get_fabric_name(),
lacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF"); lacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF");
return count; return count;
...@@ -379,7 +379,7 @@ static struct config_group *target_fabric_make_mappedlun( ...@@ -379,7 +379,7 @@ static struct config_group *target_fabric_make_mappedlun(
lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group; lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group;
lacl_cg->default_groups[1] = NULL; lacl_cg->default_groups[1] = NULL;
ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group; ml_stat_grp = &lacl->ml_stat_grps.stat_group;
ml_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3, ml_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3,
GFP_KERNEL); GFP_KERNEL);
if (!ml_stat_grp->default_groups) { if (!ml_stat_grp->default_groups) {
...@@ -408,7 +408,7 @@ static void target_fabric_drop_mappedlun( ...@@ -408,7 +408,7 @@ static void target_fabric_drop_mappedlun(
struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL; struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL;
int i; int i;
ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group; ml_stat_grp = &lacl->ml_stat_grps.stat_group;
for (i = 0; ml_stat_grp->default_groups[i]; i++) { for (i = 0; ml_stat_grp->default_groups[i]; i++) {
df_item = &ml_stat_grp->default_groups[i]->cg_item; df_item = &ml_stat_grp->default_groups[i]->cg_item;
ml_stat_grp->default_groups[i] = NULL; ml_stat_grp->default_groups[i] = NULL;
...@@ -914,7 +914,7 @@ static struct config_group *target_fabric_make_lun( ...@@ -914,7 +914,7 @@ static struct config_group *target_fabric_make_lun(
lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group; lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group;
lun_cg->default_groups[1] = NULL; lun_cg->default_groups[1] = NULL;
port_stat_grp = &PORT_STAT_GRP(lun)->stat_group; port_stat_grp = &lun->port_stat_grps.stat_group;
port_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3, port_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3,
GFP_KERNEL); GFP_KERNEL);
if (!port_stat_grp->default_groups) { if (!port_stat_grp->default_groups) {
...@@ -941,7 +941,7 @@ static void target_fabric_drop_lun( ...@@ -941,7 +941,7 @@ static void target_fabric_drop_lun(
struct config_group *lun_cg, *port_stat_grp; struct config_group *lun_cg, *port_stat_grp;
int i; int i;
port_stat_grp = &PORT_STAT_GRP(lun)->stat_group; port_stat_grp = &lun->port_stat_grps.stat_group;
for (i = 0; port_stat_grp->default_groups[i]; i++) { for (i = 0; port_stat_grp->default_groups[i]; i++) {
df_item = &port_stat_grp->default_groups[i]->cg_item; df_item = &port_stat_grp->default_groups[i]->cg_item;
port_stat_grp->default_groups[i] = NULL; port_stat_grp->default_groups[i] = NULL;
......
...@@ -67,22 +67,19 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id) ...@@ -67,22 +67,19 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id)
fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL); fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL);
if (!(fd_host)) { if (!(fd_host)) {
printk(KERN_ERR "Unable to allocate memory for struct fd_host\n"); printk(KERN_ERR "Unable to allocate memory for struct fd_host\n");
return -1; return -ENOMEM;
} }
fd_host->fd_host_id = host_id; fd_host->fd_host_id = host_id;
atomic_set(&hba->left_queue_depth, FD_HBA_QUEUE_DEPTH); hba->hba_ptr = fd_host;
atomic_set(&hba->max_queue_depth, FD_HBA_QUEUE_DEPTH);
hba->hba_ptr = (void *) fd_host;
printk(KERN_INFO "CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" printk(KERN_INFO "CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
" Target Core Stack %s\n", hba->hba_id, FD_VERSION, " Target Core Stack %s\n", hba->hba_id, FD_VERSION,
TARGET_CORE_MOD_VERSION); TARGET_CORE_MOD_VERSION);
printk(KERN_INFO "CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic" printk(KERN_INFO "CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
" Target Core with TCQ Depth: %d MaxSectors: %u\n", " MaxSectors: %u\n",
hba->hba_id, fd_host->fd_host_id, hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS);
atomic_read(&hba->max_queue_depth), FD_MAX_SECTORS);
return 0; return 0;
} }
...@@ -282,7 +279,7 @@ fd_alloc_task(struct se_cmd *cmd) ...@@ -282,7 +279,7 @@ fd_alloc_task(struct se_cmd *cmd)
return NULL; return NULL;
} }
fd_req->fd_dev = SE_DEV(cmd)->dev_ptr; fd_req->fd_dev = cmd->se_lun->lun_se_dev->dev_ptr;
return &fd_req->fd_task; return &fd_req->fd_task;
} }
...@@ -294,13 +291,14 @@ static int fd_do_readv(struct se_task *task) ...@@ -294,13 +291,14 @@ static int fd_do_readv(struct se_task *task)
struct scatterlist *sg = task->task_sg; struct scatterlist *sg = task->task_sg;
struct iovec *iov; struct iovec *iov;
mm_segment_t old_fs; mm_segment_t old_fs;
loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size); loff_t pos = (task->task_lba *
task->se_dev->se_sub_dev->se_dev_attrib.block_size);
int ret = 0, i; int ret = 0, i;
iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL); iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL);
if (!(iov)) { if (!(iov)) {
printk(KERN_ERR "Unable to allocate fd_do_readv iov[]\n"); printk(KERN_ERR "Unable to allocate fd_do_readv iov[]\n");
return -1; return -ENOMEM;
} }
for (i = 0; i < task->task_sg_num; i++) { for (i = 0; i < task->task_sg_num; i++) {
...@@ -324,13 +322,13 @@ static int fd_do_readv(struct se_task *task) ...@@ -324,13 +322,13 @@ static int fd_do_readv(struct se_task *task)
printk(KERN_ERR "vfs_readv() returned %d," printk(KERN_ERR "vfs_readv() returned %d,"
" expecting %d for S_ISBLK\n", ret, " expecting %d for S_ISBLK\n", ret,
(int)task->task_size); (int)task->task_size);
return -1; return (ret < 0 ? ret : -EINVAL);
} }
} else { } else {
if (ret < 0) { if (ret < 0) {
printk(KERN_ERR "vfs_readv() returned %d for non" printk(KERN_ERR "vfs_readv() returned %d for non"
" S_ISBLK\n", ret); " S_ISBLK\n", ret);
return -1; return ret;
} }
} }
...@@ -344,13 +342,14 @@ static int fd_do_writev(struct se_task *task) ...@@ -344,13 +342,14 @@ static int fd_do_writev(struct se_task *task)
struct scatterlist *sg = task->task_sg; struct scatterlist *sg = task->task_sg;
struct iovec *iov; struct iovec *iov;
mm_segment_t old_fs; mm_segment_t old_fs;
loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size); loff_t pos = (task->task_lba *
task->se_dev->se_sub_dev->se_dev_attrib.block_size);
int ret, i = 0; int ret, i = 0;
iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL); iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL);
if (!(iov)) { if (!(iov)) {
printk(KERN_ERR "Unable to allocate fd_do_writev iov[]\n"); printk(KERN_ERR "Unable to allocate fd_do_writev iov[]\n");
return -1; return -ENOMEM;
} }
for (i = 0; i < task->task_sg_num; i++) { for (i = 0; i < task->task_sg_num; i++) {
...@@ -367,7 +366,7 @@ static int fd_do_writev(struct se_task *task) ...@@ -367,7 +366,7 @@ static int fd_do_writev(struct se_task *task)
if (ret < 0 || ret != task->task_size) { if (ret < 0 || ret != task->task_size) {
printk(KERN_ERR "vfs_writev() returned %d\n", ret); printk(KERN_ERR "vfs_writev() returned %d\n", ret);
return -1; return (ret < 0 ? ret : -EINVAL);
} }
return 1; return 1;
...@@ -375,7 +374,7 @@ static int fd_do_writev(struct se_task *task) ...@@ -375,7 +374,7 @@ static int fd_do_writev(struct se_task *task)
static void fd_emulate_sync_cache(struct se_task *task) static void fd_emulate_sync_cache(struct se_task *task)
{ {
struct se_cmd *cmd = TASK_CMD(task); struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
struct fd_dev *fd_dev = dev->dev_ptr; struct fd_dev *fd_dev = dev->dev_ptr;
int immed = (cmd->t_task->t_task_cdb[1] & 0x2); int immed = (cmd->t_task->t_task_cdb[1] & 0x2);
...@@ -396,7 +395,7 @@ static void fd_emulate_sync_cache(struct se_task *task) ...@@ -396,7 +395,7 @@ static void fd_emulate_sync_cache(struct se_task *task)
start = 0; start = 0;
end = LLONG_MAX; end = LLONG_MAX;
} else { } else {
start = cmd->t_task->t_task_lba * DEV_ATTRIB(dev)->block_size; start = cmd->t_task->t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
if (cmd->data_length) if (cmd->data_length)
end = start + cmd->data_length; end = start + cmd->data_length;
else else
...@@ -446,7 +445,7 @@ static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task) ...@@ -446,7 +445,7 @@ static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task)
{ {
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
struct fd_dev *fd_dev = dev->dev_ptr; struct fd_dev *fd_dev = dev->dev_ptr;
loff_t start = task->task_lba * DEV_ATTRIB(dev)->block_size; loff_t start = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
loff_t end = start + task->task_size; loff_t end = start + task->task_size;
int ret; int ret;
...@@ -474,9 +473,9 @@ static int fd_do_task(struct se_task *task) ...@@ -474,9 +473,9 @@ static int fd_do_task(struct se_task *task)
ret = fd_do_writev(task); ret = fd_do_writev(task);
if (ret > 0 && if (ret > 0 &&
DEV_ATTRIB(dev)->emulate_write_cache > 0 && dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
DEV_ATTRIB(dev)->emulate_fua_write > 0 && dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
T_TASK(cmd)->t_tasks_fua) { cmd->t_task->t_tasks_fua) {
/* /*
* We might need to be a bit smarter here * We might need to be a bit smarter here
* and return some sense data to let the initiator * and return some sense data to let the initiator
...@@ -599,7 +598,7 @@ static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsys ...@@ -599,7 +598,7 @@ static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsys
if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) { if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
printk(KERN_ERR "Missing fd_dev_name=\n"); printk(KERN_ERR "Missing fd_dev_name=\n");
return -1; return -EINVAL;
} }
return 0; return 0;
...@@ -654,7 +653,7 @@ static sector_t fd_get_blocks(struct se_device *dev) ...@@ -654,7 +653,7 @@ static sector_t fd_get_blocks(struct se_device *dev)
{ {
struct fd_dev *fd_dev = dev->dev_ptr; struct fd_dev *fd_dev = dev->dev_ptr;
unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size, unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size,
DEV_ATTRIB(dev)->block_size); dev->se_sub_dev->se_dev_attrib.block_size);
return blocks_long; return blocks_long;
} }
......
...@@ -4,8 +4,6 @@ ...@@ -4,8 +4,6 @@
#define FD_VERSION "4.0" #define FD_VERSION "4.0"
#define FD_MAX_DEV_NAME 256 #define FD_MAX_DEV_NAME 256
/* Maximum queuedepth for the FILEIO HBA */
#define FD_HBA_QUEUE_DEPTH 256
#define FD_DEVICE_QUEUE_DEPTH 32 #define FD_DEVICE_QUEUE_DEPTH 32
#define FD_MAX_DEVICE_QUEUE_DEPTH 128 #define FD_MAX_DEVICE_QUEUE_DEPTH 128
#define FD_BLOCKSIZE 512 #define FD_BLOCKSIZE 512
......
/******************************************************************************* /*******************************************************************************
* Filename: target_core_hba.c * Filename: target_core_hba.c
* *
* This file copntains the iSCSI HBA Transport related functions. * This file contains the TCM HBA Transport related functions.
* *
* Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
* Copyright (c) 2005, 2006, 2007 SBE, Inc. * Copyright (c) 2005, 2006, 2007 SBE, Inc.
...@@ -45,6 +45,11 @@ ...@@ -45,6 +45,11 @@
static LIST_HEAD(subsystem_list); static LIST_HEAD(subsystem_list);
static DEFINE_MUTEX(subsystem_mutex); static DEFINE_MUTEX(subsystem_mutex);
static u32 hba_id_counter;
static DEFINE_SPINLOCK(hba_lock);
static LIST_HEAD(hba_list);
int transport_subsystem_register(struct se_subsystem_api *sub_api) int transport_subsystem_register(struct se_subsystem_api *sub_api)
{ {
struct se_subsystem_api *s; struct se_subsystem_api *s;
...@@ -110,15 +115,11 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags) ...@@ -110,15 +115,11 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
INIT_LIST_HEAD(&hba->hba_dev_list); INIT_LIST_HEAD(&hba->hba_dev_list);
spin_lock_init(&hba->device_lock); spin_lock_init(&hba->device_lock);
spin_lock_init(&hba->hba_queue_lock);
mutex_init(&hba->hba_access_mutex); mutex_init(&hba->hba_access_mutex);
hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX); hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX);
hba->hba_flags |= hba_flags; hba->hba_flags |= hba_flags;
atomic_set(&hba->max_queue_depth, 0);
atomic_set(&hba->left_queue_depth, 0);
hba->transport = core_get_backend(plugin_name); hba->transport = core_get_backend(plugin_name);
if (!hba->transport) { if (!hba->transport) {
ret = -EINVAL; ret = -EINVAL;
...@@ -129,10 +130,10 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags) ...@@ -129,10 +130,10 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
if (ret < 0) if (ret < 0)
goto out_module_put; goto out_module_put;
spin_lock(&se_global->hba_lock); spin_lock(&hba_lock);
hba->hba_id = se_global->g_hba_id_counter++; hba->hba_id = hba_id_counter++;
list_add_tail(&hba->hba_list, &se_global->g_hba_list); list_add_tail(&hba->hba_node, &hba_list);
spin_unlock(&se_global->hba_lock); spin_unlock(&hba_lock);
printk(KERN_INFO "CORE_HBA[%d] - Attached HBA to Generic Target" printk(KERN_INFO "CORE_HBA[%d] - Attached HBA to Generic Target"
" Core\n", hba->hba_id); " Core\n", hba->hba_id);
...@@ -156,9 +157,9 @@ core_delete_hba(struct se_hba *hba) ...@@ -156,9 +157,9 @@ core_delete_hba(struct se_hba *hba)
hba->transport->detach_hba(hba); hba->transport->detach_hba(hba);
spin_lock(&se_global->hba_lock); spin_lock(&hba_lock);
list_del(&hba->hba_list); list_del(&hba->hba_node);
spin_unlock(&se_global->hba_lock); spin_unlock(&hba_lock);
printk(KERN_INFO "CORE_HBA[%d] - Detached HBA from Generic Target" printk(KERN_INFO "CORE_HBA[%d] - Detached HBA from Generic Target"
" Core\n", hba->hba_id); " Core\n", hba->hba_id);
......
...@@ -74,17 +74,14 @@ static int iblock_attach_hba(struct se_hba *hba, u32 host_id) ...@@ -74,17 +74,14 @@ static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
ib_host->iblock_host_id = host_id; ib_host->iblock_host_id = host_id;
atomic_set(&hba->left_queue_depth, IBLOCK_HBA_QUEUE_DEPTH);
atomic_set(&hba->max_queue_depth, IBLOCK_HBA_QUEUE_DEPTH);
hba->hba_ptr = (void *) ib_host; hba->hba_ptr = (void *) ib_host;
printk(KERN_INFO "CORE_HBA[%d] - TCM iBlock HBA Driver %s on" printk(KERN_INFO "CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
" Generic Target Core Stack %s\n", hba->hba_id, " Generic Target Core Stack %s\n", hba->hba_id,
IBLOCK_VERSION, TARGET_CORE_MOD_VERSION); IBLOCK_VERSION, TARGET_CORE_MOD_VERSION);
printk(KERN_INFO "CORE_HBA[%d] - Attached iBlock HBA: %u to Generic" printk(KERN_INFO "CORE_HBA[%d] - Attached iBlock HBA: %u to Generic\n",
" Target Core TCQ Depth: %d\n", hba->hba_id, hba->hba_id, ib_host->iblock_host_id);
ib_host->iblock_host_id, atomic_read(&hba->max_queue_depth));
return 0; return 0;
} }
...@@ -188,15 +185,15 @@ static struct se_device *iblock_create_virtdevice( ...@@ -188,15 +185,15 @@ static struct se_device *iblock_create_virtdevice(
* in ATA and we need to set TPE=1 * in ATA and we need to set TPE=1
*/ */
if (blk_queue_discard(q)) { if (blk_queue_discard(q)) {
DEV_ATTRIB(dev)->max_unmap_lba_count = dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count =
q->limits.max_discard_sectors; q->limits.max_discard_sectors;
/* /*
* Currently hardcoded to 1 in Linux/SCSI code.. * Currently hardcoded to 1 in Linux/SCSI code..
*/ */
DEV_ATTRIB(dev)->max_unmap_block_desc_count = 1; dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1;
DEV_ATTRIB(dev)->unmap_granularity = dev->se_sub_dev->se_dev_attrib.unmap_granularity =
q->limits.discard_granularity; q->limits.discard_granularity;
DEV_ATTRIB(dev)->unmap_granularity_alignment = dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
q->limits.discard_alignment; q->limits.discard_alignment;
printk(KERN_INFO "IBLOCK: BLOCK Discard support available," printk(KERN_INFO "IBLOCK: BLOCK Discard support available,"
...@@ -243,7 +240,7 @@ iblock_alloc_task(struct se_cmd *cmd) ...@@ -243,7 +240,7 @@ iblock_alloc_task(struct se_cmd *cmd)
return NULL; return NULL;
} }
ib_req->ib_dev = SE_DEV(cmd)->dev_ptr; ib_req->ib_dev = cmd->se_lun->lun_se_dev->dev_ptr;
atomic_set(&ib_req->ib_bio_cnt, 0); atomic_set(&ib_req->ib_bio_cnt, 0);
return &ib_req->ib_task; return &ib_req->ib_task;
} }
...@@ -257,12 +254,12 @@ static unsigned long long iblock_emulate_read_cap_with_block_size( ...@@ -257,12 +254,12 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
bdev_logical_block_size(bd)) - 1); bdev_logical_block_size(bd)) - 1);
u32 block_size = bdev_logical_block_size(bd); u32 block_size = bdev_logical_block_size(bd);
if (block_size == DEV_ATTRIB(dev)->block_size) if (block_size == dev->se_sub_dev->se_dev_attrib.block_size)
return blocks_long; return blocks_long;
switch (block_size) { switch (block_size) {
case 4096: case 4096:
switch (DEV_ATTRIB(dev)->block_size) { switch (dev->se_sub_dev->se_dev_attrib.block_size) {
case 2048: case 2048:
blocks_long <<= 1; blocks_long <<= 1;
break; break;
...@@ -276,7 +273,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size( ...@@ -276,7 +273,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
} }
break; break;
case 2048: case 2048:
switch (DEV_ATTRIB(dev)->block_size) { switch (dev->se_sub_dev->se_dev_attrib.block_size) {
case 4096: case 4096:
blocks_long >>= 1; blocks_long >>= 1;
break; break;
...@@ -291,7 +288,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size( ...@@ -291,7 +288,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
} }
break; break;
case 1024: case 1024:
switch (DEV_ATTRIB(dev)->block_size) { switch (dev->se_sub_dev->se_dev_attrib.block_size) {
case 4096: case 4096:
blocks_long >>= 2; blocks_long >>= 2;
break; break;
...@@ -306,7 +303,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size( ...@@ -306,7 +303,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
} }
break; break;
case 512: case 512:
switch (DEV_ATTRIB(dev)->block_size) { switch (dev->se_sub_dev->se_dev_attrib.block_size) {
case 4096: case 4096:
blocks_long >>= 3; blocks_long >>= 3;
break; break;
...@@ -332,9 +329,9 @@ static unsigned long long iblock_emulate_read_cap_with_block_size( ...@@ -332,9 +329,9 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
*/ */
static void iblock_emulate_sync_cache(struct se_task *task) static void iblock_emulate_sync_cache(struct se_task *task)
{ {
struct se_cmd *cmd = TASK_CMD(task); struct se_cmd *cmd = task->task_se_cmd;
struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
int immed = (T_TASK(cmd)->t_task_cdb[1] & 0x2); int immed = (cmd->t_task->t_task_cdb[1] & 0x2);
sector_t error_sector; sector_t error_sector;
int ret; int ret;
...@@ -401,9 +398,9 @@ static int iblock_do_task(struct se_task *task) ...@@ -401,9 +398,9 @@ static int iblock_do_task(struct se_task *task)
* Force data to disk if we pretend to not have a volatile * Force data to disk if we pretend to not have a volatile
* write cache, or the initiator set the Force Unit Access bit. * write cache, or the initiator set the Force Unit Access bit.
*/ */
if (DEV_ATTRIB(dev)->emulate_write_cache == 0 || if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
(DEV_ATTRIB(dev)->emulate_fua_write > 0 && (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
T_TASK(task->task_se_cmd)->t_tasks_fua)) task->task_se_cmd->t_task->t_tasks_fua))
rw = WRITE_FUA; rw = WRITE_FUA;
else else
rw = WRITE; rw = WRITE;
...@@ -527,7 +524,7 @@ static ssize_t iblock_check_configfs_dev_params( ...@@ -527,7 +524,7 @@ static ssize_t iblock_check_configfs_dev_params(
if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) { if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) {
printk(KERN_ERR "Missing udev_path= parameters for IBLOCK\n"); printk(KERN_ERR "Missing udev_path= parameters for IBLOCK\n");
return -1; return -EINVAL;
} }
return 0; return 0;
...@@ -611,7 +608,7 @@ static struct bio *iblock_get_bio( ...@@ -611,7 +608,7 @@ static struct bio *iblock_get_bio(
static int iblock_map_task_SG(struct se_task *task) static int iblock_map_task_SG(struct se_task *task)
{ {
struct se_cmd *cmd = task->task_se_cmd; struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = SE_DEV(cmd); struct se_device *dev = cmd->se_lun->lun_se_dev;
struct iblock_dev *ib_dev = task->se_dev->dev_ptr; struct iblock_dev *ib_dev = task->se_dev->dev_ptr;
struct iblock_req *ib_req = IBLOCK_REQ(task); struct iblock_req *ib_req = IBLOCK_REQ(task);
struct bio *bio = NULL, *hbio = NULL, *tbio = NULL; struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
...@@ -623,17 +620,17 @@ static int iblock_map_task_SG(struct se_task *task) ...@@ -623,17 +620,17 @@ static int iblock_map_task_SG(struct se_task *task)
* Do starting conversion up from non 512-byte blocksize with * Do starting conversion up from non 512-byte blocksize with
* struct se_task SCSI blocksize into Linux/Block 512 units for BIO. * struct se_task SCSI blocksize into Linux/Block 512 units for BIO.
*/ */
if (DEV_ATTRIB(dev)->block_size == 4096) if (dev->se_sub_dev->se_dev_attrib.block_size == 4096)
block_lba = (task->task_lba << 3); block_lba = (task->task_lba << 3);
else if (DEV_ATTRIB(dev)->block_size == 2048) else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048)
block_lba = (task->task_lba << 2); block_lba = (task->task_lba << 2);
else if (DEV_ATTRIB(dev)->block_size == 1024) else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024)
block_lba = (task->task_lba << 1); block_lba = (task->task_lba << 1);
else if (DEV_ATTRIB(dev)->block_size == 512) else if (dev->se_sub_dev->se_dev_attrib.block_size == 512)
block_lba = task->task_lba; block_lba = task->task_lba;
else { else {
printk(KERN_ERR "Unsupported SCSI -> BLOCK LBA conversion:" printk(KERN_ERR "Unsupported SCSI -> BLOCK LBA conversion:"
" %u\n", DEV_ATTRIB(dev)->block_size); " %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
return PYX_TRANSPORT_LU_COMM_FAILURE; return PYX_TRANSPORT_LU_COMM_FAILURE;
} }
......
...@@ -3,7 +3,6 @@ ...@@ -3,7 +3,6 @@
#define IBLOCK_VERSION "4.0" #define IBLOCK_VERSION "4.0"
#define IBLOCK_HBA_QUEUE_DEPTH 512
#define IBLOCK_DEVICE_QUEUE_DEPTH 32 #define IBLOCK_DEVICE_QUEUE_DEPTH 32
#define IBLOCK_MAX_DEVICE_QUEUE_DEPTH 128 #define IBLOCK_MAX_DEVICE_QUEUE_DEPTH 128
#define IBLOCK_MAX_CDBS 16 #define IBLOCK_MAX_CDBS 16
......
This diff is collapsed.
...@@ -49,7 +49,7 @@ extern int core_pr_dump_initiator_port(struct t10_pr_registration *, ...@@ -49,7 +49,7 @@ extern int core_pr_dump_initiator_port(struct t10_pr_registration *,
char *, u32); char *, u32);
extern int core_scsi2_emulate_crh(struct se_cmd *); extern int core_scsi2_emulate_crh(struct se_cmd *);
extern int core_scsi3_alloc_aptpl_registration( extern int core_scsi3_alloc_aptpl_registration(
struct t10_reservation_template *, u64, struct t10_reservation *, u64,
unsigned char *, unsigned char *, u32, unsigned char *, unsigned char *, u32,
unsigned char *, u16, u32, int, int, u8); unsigned char *, u16, u32, int, int, u8);
extern int core_scsi3_check_aptpl_registration(struct se_device *, extern int core_scsi3_check_aptpl_registration(struct se_device *,
......
...@@ -55,24 +55,6 @@ static struct se_subsystem_api pscsi_template; ...@@ -55,24 +55,6 @@ static struct se_subsystem_api pscsi_template;
static void pscsi_req_done(struct request *, int); static void pscsi_req_done(struct request *, int);
/* pscsi_get_sh():
*
*
*/
static struct Scsi_Host *pscsi_get_sh(u32 host_no)
{
struct Scsi_Host *sh = NULL;
sh = scsi_host_lookup(host_no);
if (IS_ERR(sh)) {
printk(KERN_ERR "Unable to locate SCSI HBA with Host ID:"
" %u\n", host_no);
return NULL;
}
return sh;
}
/* pscsi_attach_hba(): /* pscsi_attach_hba():
* *
* pscsi_get_sh() used scsi_host_lookup() to locate struct Scsi_Host. * pscsi_get_sh() used scsi_host_lookup() to locate struct Scsi_Host.
...@@ -80,28 +62,23 @@ static struct Scsi_Host *pscsi_get_sh(u32 host_no) ...@@ -80,28 +62,23 @@ static struct Scsi_Host *pscsi_get_sh(u32 host_no)
*/ */
static int pscsi_attach_hba(struct se_hba *hba, u32 host_id) static int pscsi_attach_hba(struct se_hba *hba, u32 host_id)
{ {
int hba_depth;
struct pscsi_hba_virt *phv; struct pscsi_hba_virt *phv;
phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL); phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL);
if (!(phv)) { if (!(phv)) {
printk(KERN_ERR "Unable to allocate struct pscsi_hba_virt\n"); printk(KERN_ERR "Unable to allocate struct pscsi_hba_virt\n");
return -1; return -ENOMEM;
} }
phv->phv_host_id = host_id; phv->phv_host_id = host_id;
phv->phv_mode = PHV_VIRUTAL_HOST_ID; phv->phv_mode = PHV_VIRUTAL_HOST_ID;
hba_depth = PSCSI_VIRTUAL_HBA_DEPTH;
atomic_set(&hba->left_queue_depth, hba_depth);
atomic_set(&hba->max_queue_depth, hba_depth);
hba->hba_ptr = (void *)phv; hba->hba_ptr = (void *)phv;
printk(KERN_INFO "CORE_HBA[%d] - TCM SCSI HBA Driver %s on" printk(KERN_INFO "CORE_HBA[%d] - TCM SCSI HBA Driver %s on"
" Generic Target Core Stack %s\n", hba->hba_id, " Generic Target Core Stack %s\n", hba->hba_id,
PSCSI_VERSION, TARGET_CORE_MOD_VERSION); PSCSI_VERSION, TARGET_CORE_MOD_VERSION);
printk(KERN_INFO "CORE_HBA[%d] - Attached SCSI HBA to Generic" printk(KERN_INFO "CORE_HBA[%d] - Attached SCSI HBA to Generic\n",
" Target Core with TCQ Depth: %d\n", hba->hba_id, hba->hba_id);
atomic_read(&hba->max_queue_depth));
return 0; return 0;
} }
...@@ -130,7 +107,6 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag) ...@@ -130,7 +107,6 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
{ {
struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr; struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr;
struct Scsi_Host *sh = phv->phv_lld_host; struct Scsi_Host *sh = phv->phv_lld_host;
int hba_depth = PSCSI_VIRTUAL_HBA_DEPTH;
/* /*
* Release the struct Scsi_Host * Release the struct Scsi_Host
*/ */
...@@ -140,8 +116,6 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag) ...@@ -140,8 +116,6 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
phv->phv_lld_host = NULL; phv->phv_lld_host = NULL;
phv->phv_mode = PHV_VIRUTAL_HOST_ID; phv->phv_mode = PHV_VIRUTAL_HOST_ID;
atomic_set(&hba->left_queue_depth, hba_depth);
atomic_set(&hba->max_queue_depth, hba_depth);
printk(KERN_INFO "CORE_HBA[%d] - Disabled pSCSI HBA Passthrough" printk(KERN_INFO "CORE_HBA[%d] - Disabled pSCSI HBA Passthrough"
" %s\n", hba->hba_id, (sh->hostt->name) ? " %s\n", hba->hba_id, (sh->hostt->name) ?
...@@ -154,22 +128,12 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag) ...@@ -154,22 +128,12 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
* Otherwise, locate struct Scsi_Host from the original passed * Otherwise, locate struct Scsi_Host from the original passed
* pSCSI Host ID and enable for phba mode * pSCSI Host ID and enable for phba mode
*/ */
sh = pscsi_get_sh(phv->phv_host_id); sh = scsi_host_lookup(phv->phv_host_id);
if (!(sh)) { if (IS_ERR(sh)) {
printk(KERN_ERR "pSCSI: Unable to locate SCSI Host for" printk(KERN_ERR "pSCSI: Unable to locate SCSI Host for"
" phv_host_id: %d\n", phv->phv_host_id); " phv_host_id: %d\n", phv->phv_host_id);
return -1; return PTR_ERR(sh);
} }
/*
* Usually the SCSI LLD will use the hostt->can_queue value to define
* its HBA TCQ depth. Some other drivers (like 2.6 megaraid) don't set
* this at all and set sh->can_queue at runtime.
*/
hba_depth = (sh->hostt->can_queue > sh->can_queue) ?
sh->hostt->can_queue : sh->can_queue;
atomic_set(&hba->left_queue_depth, hba_depth);
atomic_set(&hba->max_queue_depth, hba_depth);
phv->phv_lld_host = sh; phv->phv_lld_host = sh;
phv->phv_mode = PHV_LLD_SCSI_HOST_NO; phv->phv_mode = PHV_LLD_SCSI_HOST_NO;
...@@ -236,7 +200,7 @@ pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn) ...@@ -236,7 +200,7 @@ pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn)
buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL); buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL);
if (!buf) if (!buf)
return -1; return -ENOMEM;
memset(cdb, 0, MAX_COMMAND_SIZE); memset(cdb, 0, MAX_COMMAND_SIZE);
cdb[0] = INQUIRY; cdb[0] = INQUIRY;
...@@ -259,7 +223,7 @@ pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn) ...@@ -259,7 +223,7 @@ pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn)
out_free: out_free:
kfree(buf); kfree(buf);
return -1; return -EPERM;
} }
static void static void
...@@ -601,11 +565,11 @@ static struct se_device *pscsi_create_virtdevice( ...@@ -601,11 +565,11 @@ static struct se_device *pscsi_create_virtdevice(
hba->hba_flags |= HBA_FLAGS_PSCSI_MODE; hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
sh = phv->phv_lld_host; sh = phv->phv_lld_host;
} else { } else {
sh = pscsi_get_sh(pdv->pdv_host_id); sh = scsi_host_lookup(pdv->pdv_host_id);
if (!(sh)) { if (IS_ERR(sh)) {
printk(KERN_ERR "pSCSI: Unable to locate" printk(KERN_ERR "pSCSI: Unable to locate"
" pdv_host_id: %d\n", pdv->pdv_host_id); " pdv_host_id: %d\n", pdv->pdv_host_id);
return ERR_PTR(-ENODEV); return (struct se_device *) sh;
} }
} }
} else { } else {
...@@ -728,13 +692,12 @@ static int pscsi_transport_complete(struct se_task *task) ...@@ -728,13 +692,12 @@ static int pscsi_transport_complete(struct se_task *task)
*/ */
if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) && if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) &&
(status_byte(result) << 1) == SAM_STAT_GOOD) { (status_byte(result) << 1) == SAM_STAT_GOOD) {
if (!TASK_CMD(task)->se_deve) if (!task->task_se_cmd->se_deve)
goto after_mode_sense; goto after_mode_sense;
if (TASK_CMD(task)->se_deve->lun_flags & if (task->task_se_cmd->se_deve->lun_flags &
TRANSPORT_LUNFLAGS_READ_ONLY) { TRANSPORT_LUNFLAGS_READ_ONLY) {
unsigned char *buf = (unsigned char *) unsigned char *buf = task->task_se_cmd->t_task->t_task_buf;
T_TASK(task->task_se_cmd)->t_task_buf;
if (cdb[0] == MODE_SENSE_10) { if (cdb[0] == MODE_SENSE_10) {
if (!(buf[3] & 0x80)) if (!(buf[3] & 0x80))
...@@ -800,7 +763,7 @@ static struct se_task * ...@@ -800,7 +763,7 @@ static struct se_task *
pscsi_alloc_task(struct se_cmd *cmd) pscsi_alloc_task(struct se_cmd *cmd)
{ {
struct pscsi_plugin_task *pt; struct pscsi_plugin_task *pt;
unsigned char *cdb = T_TASK(cmd)->t_task_cdb; unsigned char *cdb = cmd->t_task->t_task_cdb;
pt = kzalloc(sizeof(struct pscsi_plugin_task), GFP_KERNEL); pt = kzalloc(sizeof(struct pscsi_plugin_task), GFP_KERNEL);
if (!pt) { if (!pt) {
...@@ -813,7 +776,7 @@ pscsi_alloc_task(struct se_cmd *cmd) ...@@ -813,7 +776,7 @@ pscsi_alloc_task(struct se_cmd *cmd)
* allocate the extended CDB buffer for per struct se_task context * allocate the extended CDB buffer for per struct se_task context
* pt->pscsi_cdb now. * pt->pscsi_cdb now.
*/ */
if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb) { if (cmd->t_task->t_task_cdb != cmd->t_task->__t_task_cdb) {
pt->pscsi_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL); pt->pscsi_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL);
if (!(pt->pscsi_cdb)) { if (!(pt->pscsi_cdb)) {
...@@ -926,7 +889,7 @@ static void pscsi_free_task(struct se_task *task) ...@@ -926,7 +889,7 @@ static void pscsi_free_task(struct se_task *task)
* Release the extended CDB allocation from pscsi_alloc_task() * Release the extended CDB allocation from pscsi_alloc_task()
* if one exists. * if one exists.
*/ */
if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb) if (cmd->t_task->t_task_cdb != cmd->t_task->__t_task_cdb)
kfree(pt->pscsi_cdb); kfree(pt->pscsi_cdb);
/* /*
* We do not release the bio(s) here associated with this task, as * We do not release the bio(s) here associated with this task, as
...@@ -1030,7 +993,7 @@ static ssize_t pscsi_check_configfs_dev_params( ...@@ -1030,7 +993,7 @@ static ssize_t pscsi_check_configfs_dev_params(
!(pdv->pdv_flags & PDF_HAS_LUN_ID)) { !(pdv->pdv_flags & PDF_HAS_LUN_ID)) {
printk(KERN_ERR "Missing scsi_channel_id=, scsi_target_id= and" printk(KERN_ERR "Missing scsi_channel_id=, scsi_target_id= and"
" scsi_lun_id= parameters\n"); " scsi_lun_id= parameters\n");
return -1; return -EINVAL;
} }
return 0; return 0;
...@@ -1291,7 +1254,7 @@ static int pscsi_map_task_SG(struct se_task *task) ...@@ -1291,7 +1254,7 @@ static int pscsi_map_task_SG(struct se_task *task)
*/ */
static int pscsi_map_task_non_SG(struct se_task *task) static int pscsi_map_task_non_SG(struct se_task *task)
{ {
struct se_cmd *cmd = TASK_CMD(task); struct se_cmd *cmd = task->task_se_cmd;
struct pscsi_plugin_task *pt = PSCSI_TASK(task); struct pscsi_plugin_task *pt = PSCSI_TASK(task);
struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
int ret = 0; int ret = 0;
...@@ -1303,7 +1266,7 @@ static int pscsi_map_task_non_SG(struct se_task *task) ...@@ -1303,7 +1266,7 @@ static int pscsi_map_task_non_SG(struct se_task *task)
return 0; return 0;
ret = blk_rq_map_kern(pdv->pdv_sd->request_queue, ret = blk_rq_map_kern(pdv->pdv_sd->request_queue,
pt->pscsi_req, T_TASK(cmd)->t_task_buf, pt->pscsi_req, cmd->t_task->t_task_buf,
task->task_size, GFP_KERNEL); task->task_size, GFP_KERNEL);
if (ret < 0) { if (ret < 0) {
printk(KERN_ERR "PSCSI: blk_rq_map_kern() failed: %d\n", ret); printk(KERN_ERR "PSCSI: blk_rq_map_kern() failed: %d\n", ret);
...@@ -1400,13 +1363,11 @@ static inline void pscsi_process_SAM_status( ...@@ -1400,13 +1363,11 @@ static inline void pscsi_process_SAM_status(
pt->pscsi_result); pt->pscsi_result);
task->task_scsi_status = SAM_STAT_CHECK_CONDITION; task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
TASK_CMD(task)->transport_error_status = task->task_se_cmd->transport_error_status =
PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
transport_complete_task(task, 0); transport_complete_task(task, 0);
break; break;
} }
return;
} }
static void pscsi_req_done(struct request *req, int uptodate) static void pscsi_req_done(struct request *req, int uptodate)
......
...@@ -2,7 +2,6 @@ ...@@ -2,7 +2,6 @@
#define TARGET_CORE_PSCSI_H #define TARGET_CORE_PSCSI_H
#define PSCSI_VERSION "v4.0" #define PSCSI_VERSION "v4.0"
#define PSCSI_VIRTUAL_HBA_DEPTH 2048
/* used in pscsi_find_alloc_len() */ /* used in pscsi_find_alloc_len() */
#ifndef INQUIRY_DATA_SIZE #ifndef INQUIRY_DATA_SIZE
......
...@@ -66,17 +66,14 @@ static int rd_attach_hba(struct se_hba *hba, u32 host_id) ...@@ -66,17 +66,14 @@ static int rd_attach_hba(struct se_hba *hba, u32 host_id)
rd_host->rd_host_id = host_id; rd_host->rd_host_id = host_id;
atomic_set(&hba->left_queue_depth, RD_HBA_QUEUE_DEPTH);
atomic_set(&hba->max_queue_depth, RD_HBA_QUEUE_DEPTH);
hba->hba_ptr = (void *) rd_host; hba->hba_ptr = (void *) rd_host;
printk(KERN_INFO "CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on" printk(KERN_INFO "CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
" Generic Target Core Stack %s\n", hba->hba_id, " Generic Target Core Stack %s\n", hba->hba_id,
RD_HBA_VERSION, TARGET_CORE_MOD_VERSION); RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
printk(KERN_INFO "CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic" printk(KERN_INFO "CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic"
" Target Core TCQ Depth: %d MaxSectors: %u\n", hba->hba_id, " MaxSectors: %u\n", hba->hba_id,
rd_host->rd_host_id, atomic_read(&hba->max_queue_depth), rd_host->rd_host_id, RD_MAX_SECTORS);
RD_MAX_SECTORS);
return 0; return 0;
} }
...@@ -339,7 +336,7 @@ rd_alloc_task(struct se_cmd *cmd) ...@@ -339,7 +336,7 @@ rd_alloc_task(struct se_cmd *cmd)
printk(KERN_ERR "Unable to allocate struct rd_request\n"); printk(KERN_ERR "Unable to allocate struct rd_request\n");
return NULL; return NULL;
} }
rd_req->rd_dev = SE_DEV(cmd)->dev_ptr; rd_req->rd_dev = cmd->se_lun->lun_se_dev->dev_ptr;
return &rd_req->rd_task; return &rd_req->rd_task;
} }
...@@ -383,7 +380,7 @@ static int rd_MEMCPY_read(struct rd_request *req) ...@@ -383,7 +380,7 @@ static int rd_MEMCPY_read(struct rd_request *req)
table = rd_get_sg_table(dev, req->rd_page); table = rd_get_sg_table(dev, req->rd_page);
if (!(table)) if (!(table))
return -1; return -EINVAL;
table_sg_end = (table->page_end_offset - req->rd_page); table_sg_end = (table->page_end_offset - req->rd_page);
sg_d = task->task_sg; sg_d = task->task_sg;
...@@ -481,7 +478,7 @@ static int rd_MEMCPY_read(struct rd_request *req) ...@@ -481,7 +478,7 @@ static int rd_MEMCPY_read(struct rd_request *req)
#endif #endif
table = rd_get_sg_table(dev, req->rd_page); table = rd_get_sg_table(dev, req->rd_page);
if (!(table)) if (!(table))
return -1; return -EINVAL;
sg_s = &table->sg_table[j = 0]; sg_s = &table->sg_table[j = 0];
} }
...@@ -506,7 +503,7 @@ static int rd_MEMCPY_write(struct rd_request *req) ...@@ -506,7 +503,7 @@ static int rd_MEMCPY_write(struct rd_request *req)
table = rd_get_sg_table(dev, req->rd_page); table = rd_get_sg_table(dev, req->rd_page);
if (!(table)) if (!(table))
return -1; return -EINVAL;
table_sg_end = (table->page_end_offset - req->rd_page); table_sg_end = (table->page_end_offset - req->rd_page);
sg_d = &table->sg_table[req->rd_page - table->page_start_offset]; sg_d = &table->sg_table[req->rd_page - table->page_start_offset];
...@@ -604,7 +601,7 @@ static int rd_MEMCPY_write(struct rd_request *req) ...@@ -604,7 +601,7 @@ static int rd_MEMCPY_write(struct rd_request *req)
#endif #endif
table = rd_get_sg_table(dev, req->rd_page); table = rd_get_sg_table(dev, req->rd_page);
if (!(table)) if (!(table))
return -1; return -EINVAL;
sg_d = &table->sg_table[j = 0]; sg_d = &table->sg_table[j = 0];
} }
...@@ -623,11 +620,11 @@ static int rd_MEMCPY_do_task(struct se_task *task) ...@@ -623,11 +620,11 @@ static int rd_MEMCPY_do_task(struct se_task *task)
unsigned long long lba; unsigned long long lba;
int ret; int ret;
req->rd_page = (task->task_lba * DEV_ATTRIB(dev)->block_size) / PAGE_SIZE; req->rd_page = (task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size) / PAGE_SIZE;
lba = task->task_lba; lba = task->task_lba;
req->rd_offset = (do_div(lba, req->rd_offset = (do_div(lba,
(PAGE_SIZE / DEV_ATTRIB(dev)->block_size))) * (PAGE_SIZE / dev->se_sub_dev->se_dev_attrib.block_size))) *
DEV_ATTRIB(dev)->block_size; dev->se_sub_dev->se_dev_attrib.block_size;
req->rd_size = task->task_size; req->rd_size = task->task_size;
if (task->task_data_direction == DMA_FROM_DEVICE) if (task->task_data_direction == DMA_FROM_DEVICE)
...@@ -664,7 +661,7 @@ static int rd_DIRECT_with_offset( ...@@ -664,7 +661,7 @@ static int rd_DIRECT_with_offset(
table = rd_get_sg_table(dev, req->rd_page); table = rd_get_sg_table(dev, req->rd_page);
if (!(table)) if (!(table))
return -1; return -EINVAL;
table_sg_end = (table->page_end_offset - req->rd_page); table_sg_end = (table->page_end_offset - req->rd_page);
sg_s = &table->sg_table[req->rd_page - table->page_start_offset]; sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
...@@ -678,7 +675,7 @@ static int rd_DIRECT_with_offset( ...@@ -678,7 +675,7 @@ static int rd_DIRECT_with_offset(
se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
if (!(se_mem)) { if (!(se_mem)) {
printk(KERN_ERR "Unable to allocate struct se_mem\n"); printk(KERN_ERR "Unable to allocate struct se_mem\n");
return -1; return -ENOMEM;
} }
INIT_LIST_HEAD(&se_mem->se_list); INIT_LIST_HEAD(&se_mem->se_list);
...@@ -734,13 +731,13 @@ static int rd_DIRECT_with_offset( ...@@ -734,13 +731,13 @@ static int rd_DIRECT_with_offset(
#endif #endif
table = rd_get_sg_table(dev, req->rd_page); table = rd_get_sg_table(dev, req->rd_page);
if (!(table)) if (!(table))
return -1; return -EINVAL;
sg_s = &table->sg_table[j = 0]; sg_s = &table->sg_table[j = 0];
} }
out: out:
T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt; task->task_se_cmd->t_task->t_tasks_se_num += *se_mem_cnt;
#ifdef DEBUG_RAMDISK_DR #ifdef DEBUG_RAMDISK_DR
printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n", printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
*se_mem_cnt); *se_mem_cnt);
...@@ -767,7 +764,7 @@ static int rd_DIRECT_without_offset( ...@@ -767,7 +764,7 @@ static int rd_DIRECT_without_offset(
table = rd_get_sg_table(dev, req->rd_page); table = rd_get_sg_table(dev, req->rd_page);
if (!(table)) if (!(table))
return -1; return -EINVAL;
sg_s = &table->sg_table[req->rd_page - table->page_start_offset]; sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
#ifdef DEBUG_RAMDISK_DR #ifdef DEBUG_RAMDISK_DR
...@@ -780,7 +777,7 @@ static int rd_DIRECT_without_offset( ...@@ -780,7 +777,7 @@ static int rd_DIRECT_without_offset(
se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
if (!(se_mem)) { if (!(se_mem)) {
printk(KERN_ERR "Unable to allocate struct se_mem\n"); printk(KERN_ERR "Unable to allocate struct se_mem\n");
return -1; return -ENOMEM;
} }
INIT_LIST_HEAD(&se_mem->se_list); INIT_LIST_HEAD(&se_mem->se_list);
...@@ -816,13 +813,13 @@ static int rd_DIRECT_without_offset( ...@@ -816,13 +813,13 @@ static int rd_DIRECT_without_offset(
#endif #endif
table = rd_get_sg_table(dev, req->rd_page); table = rd_get_sg_table(dev, req->rd_page);
if (!(table)) if (!(table))
return -1; return -EINVAL;
sg_s = &table->sg_table[j = 0]; sg_s = &table->sg_table[j = 0];
} }
out: out:
T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt; task->task_se_cmd->t_task->t_tasks_se_num += *se_mem_cnt;
#ifdef DEBUG_RAMDISK_DR #ifdef DEBUG_RAMDISK_DR
printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n", printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
*se_mem_cnt); *se_mem_cnt);
...@@ -848,13 +845,11 @@ static int rd_DIRECT_do_se_mem_map( ...@@ -848,13 +845,11 @@ static int rd_DIRECT_do_se_mem_map(
u32 task_offset = *task_offset_in; u32 task_offset = *task_offset_in;
unsigned long long lba; unsigned long long lba;
int ret; int ret;
int block_size = task->se_dev->se_sub_dev->se_dev_attrib.block_size;
req->rd_page = ((task->task_lba * DEV_ATTRIB(task->se_dev)->block_size) /
PAGE_SIZE);
lba = task->task_lba; lba = task->task_lba;
req->rd_offset = (do_div(lba, req->rd_page = ((task->task_lba * block_size) / PAGE_SIZE);
(PAGE_SIZE / DEV_ATTRIB(task->se_dev)->block_size))) * req->rd_offset = (do_div(lba, (PAGE_SIZE / block_size))) * block_size;
DEV_ATTRIB(task->se_dev)->block_size;
req->rd_size = task->task_size; req->rd_size = task->task_size;
if (req->rd_offset) if (req->rd_offset)
...@@ -867,7 +862,7 @@ static int rd_DIRECT_do_se_mem_map( ...@@ -867,7 +862,7 @@ static int rd_DIRECT_do_se_mem_map(
if (ret < 0) if (ret < 0)
return ret; return ret;
if (CMD_TFO(cmd)->task_sg_chaining == 0) if (cmd->se_tfo->task_sg_chaining == 0)
return 0; return 0;
/* /*
* Currently prevent writers from multiple HW fabrics doing * Currently prevent writers from multiple HW fabrics doing
...@@ -876,7 +871,7 @@ static int rd_DIRECT_do_se_mem_map( ...@@ -876,7 +871,7 @@ static int rd_DIRECT_do_se_mem_map(
if (cmd->data_direction == DMA_TO_DEVICE) { if (cmd->data_direction == DMA_TO_DEVICE) {
printk(KERN_ERR "DMA_TO_DEVICE not supported for" printk(KERN_ERR "DMA_TO_DEVICE not supported for"
" RAMDISK_DR with task_sg_chaining=1\n"); " RAMDISK_DR with task_sg_chaining=1\n");
return -1; return -ENOSYS;
} }
/* /*
* Special case for if task_sg_chaining is enabled, then * Special case for if task_sg_chaining is enabled, then
...@@ -884,14 +879,15 @@ static int rd_DIRECT_do_se_mem_map( ...@@ -884,14 +879,15 @@ static int rd_DIRECT_do_se_mem_map(
* transport_do_task_sg_chain() for creating chainged SGLs * transport_do_task_sg_chain() for creating chainged SGLs
* across multiple struct se_task->task_sg[]. * across multiple struct se_task->task_sg[].
*/ */
if (!(transport_calc_sg_num(task, ret = transport_init_task_sg(task,
list_entry(T_TASK(cmd)->t_mem_list->next, list_entry(cmd->t_task->t_mem_list->next,
struct se_mem, se_list), struct se_mem, se_list),
task_offset))) task_offset);
return -1; if (ret <= 0)
return ret;
return transport_map_mem_to_sg(task, se_mem_list, task->task_sg, return transport_map_mem_to_sg(task, se_mem_list, task->task_sg,
list_entry(T_TASK(cmd)->t_mem_list->next, list_entry(cmd->t_task->t_mem_list->next,
struct se_mem, se_list), struct se_mem, se_list),
out_se_mem, se_mem_cnt, task_offset_in); out_se_mem, se_mem_cnt, task_offset_in);
} }
...@@ -975,7 +971,7 @@ static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsys ...@@ -975,7 +971,7 @@ static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsys
if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) { if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
printk(KERN_INFO "Missing rd_pages= parameter\n"); printk(KERN_INFO "Missing rd_pages= parameter\n");
return -1; return -EINVAL;
} }
return 0; return 0;
...@@ -1021,7 +1017,7 @@ static sector_t rd_get_blocks(struct se_device *dev) ...@@ -1021,7 +1017,7 @@ static sector_t rd_get_blocks(struct se_device *dev)
{ {
struct rd_dev *rd_dev = dev->dev_ptr; struct rd_dev *rd_dev = dev->dev_ptr;
unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) / unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
DEV_ATTRIB(dev)->block_size) - 1; dev->se_sub_dev->se_dev_attrib.block_size) - 1;
return blocks_long; return blocks_long;
} }
......
...@@ -7,8 +7,6 @@ ...@@ -7,8 +7,6 @@
/* Largest piece of memory kmalloc can allocate */ /* Largest piece of memory kmalloc can allocate */
#define RD_MAX_ALLOCATION_SIZE 65536 #define RD_MAX_ALLOCATION_SIZE 65536
/* Maximum queuedepth for the Ramdisk HBA */
#define RD_HBA_QUEUE_DEPTH 256
#define RD_DEVICE_QUEUE_DEPTH 32 #define RD_DEVICE_QUEUE_DEPTH 32
#define RD_MAX_DEVICE_QUEUE_DEPTH 128 #define RD_MAX_DEVICE_QUEUE_DEPTH 128
#define RD_BLOCKSIZE 512 #define RD_BLOCKSIZE 512
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -80,10 +80,10 @@ int core_scsi3_ua_check( ...@@ -80,10 +80,10 @@ int core_scsi3_ua_check(
case REQUEST_SENSE: case REQUEST_SENSE:
return 0; return 0;
default: default:
return -1; return -EINVAL;
} }
return -1; return -EINVAL;
} }
int core_scsi3_ua_allocate( int core_scsi3_ua_allocate(
...@@ -98,12 +98,12 @@ int core_scsi3_ua_allocate( ...@@ -98,12 +98,12 @@ int core_scsi3_ua_allocate(
* PASSTHROUGH OPS * PASSTHROUGH OPS
*/ */
if (!(nacl)) if (!(nacl))
return -1; return -EINVAL;
ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC); ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC);
if (!(ua)) { if (!(ua)) {
printk(KERN_ERR "Unable to allocate struct se_ua\n"); printk(KERN_ERR "Unable to allocate struct se_ua\n");
return -1; return -ENOMEM;
} }
INIT_LIST_HEAD(&ua->ua_dev_list); INIT_LIST_HEAD(&ua->ua_dev_list);
INIT_LIST_HEAD(&ua->ua_nacl_list); INIT_LIST_HEAD(&ua->ua_nacl_list);
...@@ -179,7 +179,7 @@ int core_scsi3_ua_allocate( ...@@ -179,7 +179,7 @@ int core_scsi3_ua_allocate(
printk(KERN_INFO "[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:" printk(KERN_INFO "[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:"
" 0x%02x, ASCQ: 0x%02x\n", " 0x%02x, ASCQ: 0x%02x\n",
TPG_TFO(nacl->se_tpg)->get_fabric_name(), unpacked_lun, nacl->se_tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
asc, ascq); asc, ascq);
atomic_inc(&deve->ua_count); atomic_inc(&deve->ua_count);
...@@ -208,7 +208,7 @@ void core_scsi3_ua_for_check_condition( ...@@ -208,7 +208,7 @@ void core_scsi3_ua_for_check_condition(
u8 *asc, u8 *asc,
u8 *ascq) u8 *ascq)
{ {
struct se_device *dev = SE_DEV(cmd); struct se_device *dev = cmd->se_lun->lun_se_dev;
struct se_dev_entry *deve; struct se_dev_entry *deve;
struct se_session *sess = cmd->se_sess; struct se_session *sess = cmd->se_sess;
struct se_node_acl *nacl; struct se_node_acl *nacl;
...@@ -240,7 +240,7 @@ void core_scsi3_ua_for_check_condition( ...@@ -240,7 +240,7 @@ void core_scsi3_ua_for_check_condition(
* highest priority UNIT_ATTENTION and ASC/ASCQ without * highest priority UNIT_ATTENTION and ASC/ASCQ without
* clearing it. * clearing it.
*/ */
if (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl != 0) { if (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) {
*asc = ua->ua_asc; *asc = ua->ua_asc;
*ascq = ua->ua_ascq; *ascq = ua->ua_ascq;
break; break;
...@@ -267,10 +267,10 @@ void core_scsi3_ua_for_check_condition( ...@@ -267,10 +267,10 @@ void core_scsi3_ua_for_check_condition(
printk(KERN_INFO "[%s]: %s UNIT ATTENTION condition with" printk(KERN_INFO "[%s]: %s UNIT ATTENTION condition with"
" INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x" " INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x"
" reported ASC: 0x%02x, ASCQ: 0x%02x\n", " reported ASC: 0x%02x, ASCQ: 0x%02x\n",
TPG_TFO(nacl->se_tpg)->get_fabric_name(), nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
(DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl != 0) ? "Reporting" : (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" :
"Releasing", DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl, "Releasing", dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl,
cmd->orig_fe_lun, T_TASK(cmd)->t_task_cdb[0], *asc, *ascq); cmd->orig_fe_lun, cmd->t_task->t_task_cdb[0], *asc, *ascq);
} }
int core_scsi3_ua_clear_for_request_sense( int core_scsi3_ua_clear_for_request_sense(
...@@ -285,17 +285,17 @@ int core_scsi3_ua_clear_for_request_sense( ...@@ -285,17 +285,17 @@ int core_scsi3_ua_clear_for_request_sense(
int head = 1; int head = 1;
if (!(sess)) if (!(sess))
return -1; return -EINVAL;
nacl = sess->se_node_acl; nacl = sess->se_node_acl;
if (!(nacl)) if (!(nacl))
return -1; return -EINVAL;
spin_lock_irq(&nacl->device_list_lock); spin_lock_irq(&nacl->device_list_lock);
deve = &nacl->device_list[cmd->orig_fe_lun]; deve = &nacl->device_list[cmd->orig_fe_lun];
if (!(atomic_read(&deve->ua_count))) { if (!(atomic_read(&deve->ua_count))) {
spin_unlock_irq(&nacl->device_list_lock); spin_unlock_irq(&nacl->device_list_lock);
return -1; return -EPERM;
} }
/* /*
* The highest priority Unit Attentions are placed at the head of the * The highest priority Unit Attentions are placed at the head of the
...@@ -325,8 +325,8 @@ int core_scsi3_ua_clear_for_request_sense( ...@@ -325,8 +325,8 @@ int core_scsi3_ua_clear_for_request_sense(
printk(KERN_INFO "[%s]: Released UNIT ATTENTION condition, mapped" printk(KERN_INFO "[%s]: Released UNIT ATTENTION condition, mapped"
" LUN: %u, got REQUEST_SENSE reported ASC: 0x%02x," " LUN: %u, got REQUEST_SENSE reported ASC: 0x%02x,"
" ASCQ: 0x%02x\n", TPG_TFO(nacl->se_tpg)->get_fabric_name(), " ASCQ: 0x%02x\n", nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
cmd->orig_fe_lun, *asc, *ascq); cmd->orig_fe_lun, *asc, *ascq);
return (head) ? -1 : 0; return (head) ? -EPERM : 0;
} }
...@@ -72,7 +72,7 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) ...@@ -72,7 +72,7 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
caller, cmd, cmd->cdb); caller, cmd, cmd->cdb);
printk(KERN_INFO "%s: cmd %p lun %d\n", caller, cmd, cmd->lun); printk(KERN_INFO "%s: cmd %p lun %d\n", caller, cmd, cmd->lun);
task = T_TASK(se_cmd); task = se_cmd->t_task;
printk(KERN_INFO "%s: cmd %p task %p se_num %u buf %p len %u se_cmd_flags <0x%x>\n", printk(KERN_INFO "%s: cmd %p task %p se_num %u buf %p len %u se_cmd_flags <0x%x>\n",
caller, cmd, task, task->t_tasks_se_num, caller, cmd, task, task->t_tasks_se_num,
task->t_task_buf, se_cmd->data_length, se_cmd->se_cmd_flags); task->t_task_buf, se_cmd->data_length, se_cmd->se_cmd_flags);
...@@ -262,9 +262,9 @@ int ft_write_pending(struct se_cmd *se_cmd) ...@@ -262,9 +262,9 @@ int ft_write_pending(struct se_cmd *se_cmd)
* TCM/LIO target * TCM/LIO target
*/ */
transport_do_task_sg_chain(se_cmd); transport_do_task_sg_chain(se_cmd);
cmd->sg = T_TASK(se_cmd)->t_tasks_sg_chained; cmd->sg = se_cmd->t_task->t_tasks_sg_chained;
cmd->sg_cnt = cmd->sg_cnt =
T_TASK(se_cmd)->t_tasks_sg_chained_no; se_cmd->t_task->t_tasks_sg_chained_no;
} }
if (cmd->sg && lport->tt.ddp_setup(lport, ep->xid, if (cmd->sg && lport->tt.ddp_setup(lport, ep->xid,
cmd->sg, cmd->sg_cnt)) cmd->sg, cmd->sg_cnt))
...@@ -670,7 +670,6 @@ static void ft_send_cmd(struct ft_cmd *cmd) ...@@ -670,7 +670,6 @@ static void ft_send_cmd(struct ft_cmd *cmd)
err: err:
ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID); ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID);
return;
} }
/* /*
......
...@@ -582,10 +582,10 @@ int ft_register_configfs(void) ...@@ -582,10 +582,10 @@ int ft_register_configfs(void)
* Register the top level struct config_item_type with TCM core * Register the top level struct config_item_type with TCM core
*/ */
fabric = target_fabric_configfs_init(THIS_MODULE, "fc"); fabric = target_fabric_configfs_init(THIS_MODULE, "fc");
if (!fabric) { if (IS_ERR(fabric)) {
printk(KERN_INFO "%s: target_fabric_configfs_init() failed!\n", printk(KERN_INFO "%s: target_fabric_configfs_init() failed!\n",
__func__); __func__);
return -1; return PTR_ERR(fabric);
} }
fabric->tf_ops = ft_fabric_ops; fabric->tf_ops = ft_fabric_ops;
......
...@@ -90,7 +90,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd) ...@@ -90,7 +90,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
lport = ep->lp; lport = ep->lp;
cmd->seq = lport->tt.seq_start_next(cmd->seq); cmd->seq = lport->tt.seq_start_next(cmd->seq);
task = T_TASK(se_cmd); task = se_cmd->t_task;
BUG_ON(!task); BUG_ON(!task);
remaining = se_cmd->data_length; remaining = se_cmd->data_length;
...@@ -236,7 +236,7 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) ...@@ -236,7 +236,7 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
u32 f_ctl; u32 f_ctl;
void *buf; void *buf;
task = T_TASK(se_cmd); task = se_cmd->t_task;
BUG_ON(!task); BUG_ON(!task);
fh = fc_frame_header_get(fp); fh = fc_frame_header_get(fp);
......
This diff is collapsed.
...@@ -111,9 +111,8 @@ struct se_subsystem_api; ...@@ -111,9 +111,8 @@ struct se_subsystem_api;
extern struct kmem_cache *se_mem_cache; extern struct kmem_cache *se_mem_cache;
extern int init_se_global(void); extern int init_se_kmem_caches(void);
extern void release_se_global(void); extern void release_se_kmem_caches(void);
extern void init_scsi_index_table(void);
extern u32 scsi_get_new_index(scsi_index_t); extern u32 scsi_get_new_index(scsi_index_t);
extern void transport_init_queue_obj(struct se_queue_obj *); extern void transport_init_queue_obj(struct se_queue_obj *);
extern int transport_subsystem_check_init(void); extern int transport_subsystem_check_init(void);
...@@ -184,7 +183,7 @@ extern void transport_send_task_abort(struct se_cmd *); ...@@ -184,7 +183,7 @@ extern void transport_send_task_abort(struct se_cmd *);
extern void transport_release_cmd_to_pool(struct se_cmd *); extern void transport_release_cmd_to_pool(struct se_cmd *);
extern void transport_generic_free_cmd(struct se_cmd *, int, int, int); extern void transport_generic_free_cmd(struct se_cmd *, int, int, int);
extern void transport_generic_wait_for_cmds(struct se_cmd *, int); extern void transport_generic_wait_for_cmds(struct se_cmd *, int);
extern u32 transport_calc_sg_num(struct se_task *, struct se_mem *, u32); extern int transport_init_task_sg(struct se_task *, struct se_mem *, u32);
extern int transport_map_mem_to_sg(struct se_task *, struct list_head *, extern int transport_map_mem_to_sg(struct se_task *, struct list_head *,
void *, struct se_mem *, void *, struct se_mem *,
struct se_mem **, u32 *, u32 *); struct se_mem **, u32 *, u32 *);
...@@ -352,9 +351,4 @@ struct se_subsystem_api { ...@@ -352,9 +351,4 @@ struct se_subsystem_api {
unsigned char *(*get_sense_buffer)(struct se_task *); unsigned char *(*get_sense_buffer)(struct se_task *);
} ____cacheline_aligned; } ____cacheline_aligned;
#define TRANSPORT(dev) ((dev)->transport)
#define HBA_TRANSPORT(hba) ((hba)->transport)
extern struct se_global *se_global;
#endif /* TARGET_CORE_TRANSPORT_H */ #endif /* TARGET_CORE_TRANSPORT_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment