Commit dae8f283 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

Pull SCSI target fixes from Nicholas Bellinger:
 "These are mostly minor fixes, with the exception of the following that
  address fall-out from recent v4.1-rc1 changes:

   - regression fix related to the big fabric API registration changes
     and configfs_depend_item() usage, that required cherry-picking one
     of HCH's patches from for-next to address the issue for v4.1 code.

   - remaining TCM-USER -v2 related changes to enforce full CDB
     passthrough from Andy + Ilias.

  Also included is a target_core_pscsi driver fix from Andy that
  addresses a long standing issue with a Scsi_Host reference being
  leaked on PSCSI device shutdown"

* git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending:
  iser-target: Fix error path in isert_create_pi_ctx()
  target: Use a PASSTHROUGH flag instead of transport_types
  target: Move passthrough CDB parsing into a common function
  target/user: Only support full command pass-through
  target/user: Update example code for new ABI requirements
  target/pscsi: Don't leak scsi_host if hba is VIRTUAL_HOST
  target: Fix se_tpg_tfo->tf_subsys regression + remove tf_subsystem
  target: Drop signal_pending checks after interruptible lock acquire
  target: Add missing parentheses
  target: Fix bidi command handling
  target/user: Disallow full passthrough (pass_level=0)
  ISCSI: fix minor memory leak
parents 30a5f118 b2feda4f
...@@ -15,8 +15,7 @@ Contents: ...@@ -15,8 +15,7 @@ Contents:
a) Discovering and configuring TCMU uio devices a) Discovering and configuring TCMU uio devices
b) Waiting for events on the device(s) b) Waiting for events on the device(s)
c) Managing the command ring c) Managing the command ring
3) Command filtering and pass_level 3) A final note
4) A final note
TCM Userspace Design TCM Userspace Design
...@@ -324,7 +323,7 @@ int handle_device_events(int fd, void *map) ...@@ -324,7 +323,7 @@ int handle_device_events(int fd, void *map)
/* Process events from cmd ring until we catch up with cmd_head */ /* Process events from cmd ring until we catch up with cmd_head */
while (ent != (void *)mb + mb->cmdr_off + mb->cmd_head) { while (ent != (void *)mb + mb->cmdr_off + mb->cmd_head) {
if (tcmu_hdr_get_op(&ent->hdr) == TCMU_OP_CMD) { if (tcmu_hdr_get_op(ent->hdr.len_op) == TCMU_OP_CMD) {
uint8_t *cdb = (void *)mb + ent->req.cdb_off; uint8_t *cdb = (void *)mb + ent->req.cdb_off;
bool success = true; bool success = true;
...@@ -339,8 +338,12 @@ int handle_device_events(int fd, void *map) ...@@ -339,8 +338,12 @@ int handle_device_events(int fd, void *map)
ent->rsp.scsi_status = SCSI_CHECK_CONDITION; ent->rsp.scsi_status = SCSI_CHECK_CONDITION;
} }
} }
else if (tcmu_hdr_get_op(ent->hdr.len_op) != TCMU_OP_PAD) {
/* Tell the kernel we didn't handle unknown opcodes */
ent->hdr.uflags |= TCMU_UFLAG_UNKNOWN_OP;
}
else { else {
/* Do nothing for PAD entries */ /* Do nothing for PAD entries except update cmd_tail */
} }
/* update cmd_tail */ /* update cmd_tail */
...@@ -360,28 +363,6 @@ int handle_device_events(int fd, void *map) ...@@ -360,28 +363,6 @@ int handle_device_events(int fd, void *map)
} }
Command filtering and pass_level
--------------------------------
TCMU supports a "pass_level" option with valid values of 0 or 1. When
the value is 0 (the default), nearly all SCSI commands received for
the device are passed through to the handler. This allows maximum
flexibility but increases the amount of code required by the handler,
to support all mandatory SCSI commands. If pass_level is set to 1,
then only IO-related commands are presented, and the rest are handled
by LIO's in-kernel command emulation. The commands presented at level
1 include all versions of:
READ
WRITE
WRITE_VERIFY
XDWRITEREAD
WRITE_SAME
COMPARE_AND_WRITE
SYNCHRONIZE_CACHE
UNMAP
A final note A final note
------------ ------------
......
...@@ -547,11 +547,11 @@ isert_create_pi_ctx(struct fast_reg_descriptor *desc, ...@@ -547,11 +547,11 @@ isert_create_pi_ctx(struct fast_reg_descriptor *desc,
return 0; return 0;
err_prot_mr: err_prot_mr:
ib_dereg_mr(desc->pi_ctx->prot_mr); ib_dereg_mr(pi_ctx->prot_mr);
err_prot_frpl: err_prot_frpl:
ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl); ib_free_fast_reg_page_list(pi_ctx->prot_frpl);
err_pi_ctx: err_pi_ctx:
kfree(desc->pi_ctx); kfree(pi_ctx);
return ret; return ret;
} }
......
...@@ -1020,8 +1020,7 @@ static void tcm_qla2xxx_depend_tpg(struct work_struct *work) ...@@ -1020,8 +1020,7 @@ static void tcm_qla2xxx_depend_tpg(struct work_struct *work)
struct se_portal_group *se_tpg = &base_tpg->se_tpg; struct se_portal_group *se_tpg = &base_tpg->se_tpg;
struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha; struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha;
if (!configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys, if (!target_depend_item(&se_tpg->tpg_group.cg_item)) {
&se_tpg->tpg_group.cg_item)) {
atomic_set(&base_tpg->lport_tpg_enabled, 1); atomic_set(&base_tpg->lport_tpg_enabled, 1);
qlt_enable_vha(base_vha); qlt_enable_vha(base_vha);
} }
...@@ -1037,8 +1036,7 @@ static void tcm_qla2xxx_undepend_tpg(struct work_struct *work) ...@@ -1037,8 +1036,7 @@ static void tcm_qla2xxx_undepend_tpg(struct work_struct *work)
if (!qlt_stop_phase1(base_vha->vha_tgt.qla_tgt)) { if (!qlt_stop_phase1(base_vha->vha_tgt.qla_tgt)) {
atomic_set(&base_tpg->lport_tpg_enabled, 0); atomic_set(&base_tpg->lport_tpg_enabled, 0);
configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys, target_undepend_item(&se_tpg->tpg_group.cg_item);
&se_tpg->tpg_group.cg_item);
} }
complete(&base_tpg->tpg_base_comp); complete(&base_tpg->tpg_base_comp);
} }
......
...@@ -230,7 +230,7 @@ int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg) ...@@ -230,7 +230,7 @@ int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
* Here we serialize access across the TIQN+TPG Tuple. * Here we serialize access across the TIQN+TPG Tuple.
*/ */
ret = down_interruptible(&tpg->np_login_sem); ret = down_interruptible(&tpg->np_login_sem);
if ((ret != 0) || signal_pending(current)) if (ret != 0)
return -1; return -1;
spin_lock_bh(&tpg->tpg_state_lock); spin_lock_bh(&tpg->tpg_state_lock);
......
...@@ -346,6 +346,7 @@ static int iscsi_login_zero_tsih_s1( ...@@ -346,6 +346,7 @@ static int iscsi_login_zero_tsih_s1(
if (IS_ERR(sess->se_sess)) { if (IS_ERR(sess->se_sess)) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES); ISCSI_LOGIN_STATUS_NO_RESOURCES);
kfree(sess->sess_ops);
kfree(sess); kfree(sess);
return -ENOMEM; return -ENOMEM;
} }
......
...@@ -161,10 +161,7 @@ struct iscsi_portal_group *iscsit_get_tpg_from_np( ...@@ -161,10 +161,7 @@ struct iscsi_portal_group *iscsit_get_tpg_from_np(
int iscsit_get_tpg( int iscsit_get_tpg(
struct iscsi_portal_group *tpg) struct iscsi_portal_group *tpg)
{ {
int ret; return mutex_lock_interruptible(&tpg->tpg_access_lock);
ret = mutex_lock_interruptible(&tpg->tpg_access_lock);
return ((ret != 0) || signal_pending(current)) ? -1 : 0;
} }
void iscsit_put_tpg(struct iscsi_portal_group *tpg) void iscsit_put_tpg(struct iscsi_portal_group *tpg)
......
...@@ -704,7 +704,7 @@ target_alua_state_check(struct se_cmd *cmd) ...@@ -704,7 +704,7 @@ target_alua_state_check(struct se_cmd *cmd)
if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
return 0; return 0;
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
return 0; return 0;
if (!port) if (!port)
...@@ -2377,7 +2377,7 @@ ssize_t core_alua_store_secondary_write_metadata( ...@@ -2377,7 +2377,7 @@ ssize_t core_alua_store_secondary_write_metadata(
int core_setup_alua(struct se_device *dev) int core_setup_alua(struct se_device *dev)
{ {
if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV && if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) &&
!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) { !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
struct t10_alua_lu_gp_member *lu_gp_mem; struct t10_alua_lu_gp_member *lu_gp_mem;
......
...@@ -212,10 +212,6 @@ static struct config_group *target_core_register_fabric( ...@@ -212,10 +212,6 @@ static struct config_group *target_core_register_fabric(
pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:" pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
" %s\n", tf->tf_group.cg_item.ci_name); " %s\n", tf->tf_group.cg_item.ci_name);
/*
* Setup tf_ops.tf_subsys pointer for usage with configfs_depend_item()
*/
tf->tf_ops.tf_subsys = tf->tf_subsys;
tf->tf_fabric = &tf->tf_group.cg_item; tf->tf_fabric = &tf->tf_group.cg_item;
pr_debug("Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric" pr_debug("Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric"
" for %s\n", name); " for %s\n", name);
...@@ -291,10 +287,17 @@ static struct configfs_subsystem target_core_fabrics = { ...@@ -291,10 +287,17 @@ static struct configfs_subsystem target_core_fabrics = {
}, },
}; };
struct configfs_subsystem *target_core_subsystem[] = { int target_depend_item(struct config_item *item)
&target_core_fabrics, {
NULL, return configfs_depend_item(&target_core_fabrics, item);
}; }
EXPORT_SYMBOL(target_depend_item);
void target_undepend_item(struct config_item *item)
{
return configfs_undepend_item(&target_core_fabrics, item);
}
EXPORT_SYMBOL(target_undepend_item);
/*############################################################################## /*##############################################################################
// Start functions called by external Target Fabrics Modules // Start functions called by external Target Fabrics Modules
...@@ -467,7 +470,6 @@ int target_register_template(const struct target_core_fabric_ops *fo) ...@@ -467,7 +470,6 @@ int target_register_template(const struct target_core_fabric_ops *fo)
* struct target_fabric_configfs->tf_cit_tmpl * struct target_fabric_configfs->tf_cit_tmpl
*/ */
tf->tf_module = fo->module; tf->tf_module = fo->module;
tf->tf_subsys = target_core_subsystem[0];
snprintf(tf->tf_name, TARGET_FABRIC_NAME_SIZE, "%s", fo->name); snprintf(tf->tf_name, TARGET_FABRIC_NAME_SIZE, "%s", fo->name);
tf->tf_ops = *fo; tf->tf_ops = *fo;
...@@ -809,7 +811,7 @@ static ssize_t target_core_dev_pr_show_attr_res_holder(struct se_device *dev, ...@@ -809,7 +811,7 @@ static ssize_t target_core_dev_pr_show_attr_res_holder(struct se_device *dev,
{ {
int ret; int ret;
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
return sprintf(page, "Passthrough\n"); return sprintf(page, "Passthrough\n");
spin_lock(&dev->dev_reservation_lock); spin_lock(&dev->dev_reservation_lock);
...@@ -960,7 +962,7 @@ SE_DEV_PR_ATTR_RO(res_pr_type); ...@@ -960,7 +962,7 @@ SE_DEV_PR_ATTR_RO(res_pr_type);
static ssize_t target_core_dev_pr_show_attr_res_type( static ssize_t target_core_dev_pr_show_attr_res_type(
struct se_device *dev, char *page) struct se_device *dev, char *page)
{ {
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
return sprintf(page, "SPC_PASSTHROUGH\n"); return sprintf(page, "SPC_PASSTHROUGH\n");
else if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) else if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
return sprintf(page, "SPC2_RESERVATIONS\n"); return sprintf(page, "SPC2_RESERVATIONS\n");
...@@ -973,7 +975,7 @@ SE_DEV_PR_ATTR_RO(res_type); ...@@ -973,7 +975,7 @@ SE_DEV_PR_ATTR_RO(res_type);
static ssize_t target_core_dev_pr_show_attr_res_aptpl_active( static ssize_t target_core_dev_pr_show_attr_res_aptpl_active(
struct se_device *dev, char *page) struct se_device *dev, char *page)
{ {
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
return 0; return 0;
return sprintf(page, "APTPL Bit Status: %s\n", return sprintf(page, "APTPL Bit Status: %s\n",
...@@ -988,7 +990,7 @@ SE_DEV_PR_ATTR_RO(res_aptpl_active); ...@@ -988,7 +990,7 @@ SE_DEV_PR_ATTR_RO(res_aptpl_active);
static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata( static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata(
struct se_device *dev, char *page) struct se_device *dev, char *page)
{ {
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
return 0; return 0;
return sprintf(page, "Ready to process PR APTPL metadata..\n"); return sprintf(page, "Ready to process PR APTPL metadata..\n");
...@@ -1035,7 +1037,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( ...@@ -1035,7 +1037,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
u16 port_rpti = 0, tpgt = 0; u16 port_rpti = 0, tpgt = 0;
u8 type = 0, scope; u8 type = 0, scope;
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
return 0; return 0;
if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
return 0; return 0;
...@@ -2870,7 +2872,7 @@ static int __init target_core_init_configfs(void) ...@@ -2870,7 +2872,7 @@ static int __init target_core_init_configfs(void)
{ {
struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL; struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL;
struct config_group *lu_gp_cg = NULL; struct config_group *lu_gp_cg = NULL;
struct configfs_subsystem *subsys; struct configfs_subsystem *subsys = &target_core_fabrics;
struct t10_alua_lu_gp *lu_gp; struct t10_alua_lu_gp *lu_gp;
int ret; int ret;
...@@ -2878,7 +2880,6 @@ static int __init target_core_init_configfs(void) ...@@ -2878,7 +2880,6 @@ static int __init target_core_init_configfs(void)
" Engine: %s on %s/%s on "UTS_RELEASE"\n", " Engine: %s on %s/%s on "UTS_RELEASE"\n",
TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine); TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine);
subsys = target_core_subsystem[0];
config_group_init(&subsys->su_group); config_group_init(&subsys->su_group);
mutex_init(&subsys->su_mutex); mutex_init(&subsys->su_mutex);
...@@ -3008,13 +3009,10 @@ static int __init target_core_init_configfs(void) ...@@ -3008,13 +3009,10 @@ static int __init target_core_init_configfs(void)
static void __exit target_core_exit_configfs(void) static void __exit target_core_exit_configfs(void)
{ {
struct configfs_subsystem *subsys;
struct config_group *hba_cg, *alua_cg, *lu_gp_cg; struct config_group *hba_cg, *alua_cg, *lu_gp_cg;
struct config_item *item; struct config_item *item;
int i; int i;
subsys = target_core_subsystem[0];
lu_gp_cg = &alua_lu_gps_group; lu_gp_cg = &alua_lu_gps_group;
for (i = 0; lu_gp_cg->default_groups[i]; i++) { for (i = 0; lu_gp_cg->default_groups[i]; i++) {
item = &lu_gp_cg->default_groups[i]->cg_item; item = &lu_gp_cg->default_groups[i]->cg_item;
...@@ -3045,8 +3043,8 @@ static void __exit target_core_exit_configfs(void) ...@@ -3045,8 +3043,8 @@ static void __exit target_core_exit_configfs(void)
* We expect subsys->su_group.default_groups to be released * We expect subsys->su_group.default_groups to be released
* by configfs subsystem provider logic.. * by configfs subsystem provider logic..
*/ */
configfs_unregister_subsystem(subsys); configfs_unregister_subsystem(&target_core_fabrics);
kfree(subsys->su_group.default_groups); kfree(target_core_fabrics.su_group.default_groups);
core_alua_free_lu_gp(default_lu_gp); core_alua_free_lu_gp(default_lu_gp);
default_lu_gp = NULL; default_lu_gp = NULL;
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/in.h> #include <linux/in.h>
#include <linux/export.h> #include <linux/export.h>
#include <asm/unaligned.h>
#include <net/sock.h> #include <net/sock.h>
#include <net/tcp.h> #include <net/tcp.h>
#include <scsi/scsi.h> #include <scsi/scsi.h>
...@@ -527,7 +528,7 @@ static void core_export_port( ...@@ -527,7 +528,7 @@ static void core_export_port(
list_add_tail(&port->sep_list, &dev->dev_sep_list); list_add_tail(&port->sep_list, &dev->dev_sep_list);
spin_unlock(&dev->se_port_lock); spin_unlock(&dev->se_port_lock);
if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV && if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) &&
!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) { !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port); tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) { if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
...@@ -1603,7 +1604,7 @@ int target_configure_device(struct se_device *dev) ...@@ -1603,7 +1604,7 @@ int target_configure_device(struct se_device *dev)
* anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
* passthrough because this is being provided by the backend LLD. * passthrough because this is being provided by the backend LLD.
*/ */
if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)) {
strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8); strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8);
strncpy(&dev->t10_wwn.model[0], strncpy(&dev->t10_wwn.model[0],
dev->transport->inquiry_prod, 16); dev->transport->inquiry_prod, 16);
...@@ -1707,3 +1708,76 @@ void core_dev_release_virtual_lun0(void) ...@@ -1707,3 +1708,76 @@ void core_dev_release_virtual_lun0(void)
target_free_device(g_lun0_dev); target_free_device(g_lun0_dev);
core_delete_hba(hba); core_delete_hba(hba);
} }
/*
* Common CDB parsing for kernel and user passthrough.
*/
sense_reason_t
passthrough_parse_cdb(struct se_cmd *cmd,
sense_reason_t (*exec_cmd)(struct se_cmd *cmd))
{
unsigned char *cdb = cmd->t_task_cdb;
/*
* Clear a lun set in the cdb if the initiator talking to use spoke
* and old standards version, as we can't assume the underlying device
* won't choke up on it.
*/
switch (cdb[0]) {
case READ_10: /* SBC - RDProtect */
case READ_12: /* SBC - RDProtect */
case READ_16: /* SBC - RDProtect */
case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
case VERIFY: /* SBC - VRProtect */
case VERIFY_16: /* SBC - VRProtect */
case WRITE_VERIFY: /* SBC - VRProtect */
case WRITE_VERIFY_12: /* SBC - VRProtect */
case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
break;
default:
cdb[1] &= 0x1f; /* clear logical unit number */
break;
}
/*
* For REPORT LUNS we always need to emulate the response, for everything
* else, pass it up.
*/
if (cdb[0] == REPORT_LUNS) {
cmd->execute_cmd = spc_emulate_report_luns;
return TCM_NO_SENSE;
}
/* Set DATA_CDB flag for ops that should have it */
switch (cdb[0]) {
case READ_6:
case READ_10:
case READ_12:
case READ_16:
case WRITE_6:
case WRITE_10:
case WRITE_12:
case WRITE_16:
case WRITE_VERIFY:
case WRITE_VERIFY_12:
case 0x8e: /* WRITE_VERIFY_16 */
case COMPARE_AND_WRITE:
case XDWRITEREAD_10:
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
break;
case VARIABLE_LENGTH_CMD:
switch (get_unaligned_be16(&cdb[8])) {
case READ_32:
case WRITE_32:
case 0x0c: /* WRITE_VERIFY_32 */
case XDWRITEREAD_32:
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
break;
}
}
cmd->execute_cmd = exec_cmd;
return TCM_NO_SENSE;
}
EXPORT_SYMBOL(passthrough_parse_cdb);
...@@ -958,7 +958,6 @@ static struct se_subsystem_api fileio_template = { ...@@ -958,7 +958,6 @@ static struct se_subsystem_api fileio_template = {
.inquiry_prod = "FILEIO", .inquiry_prod = "FILEIO",
.inquiry_rev = FD_VERSION, .inquiry_rev = FD_VERSION,
.owner = THIS_MODULE, .owner = THIS_MODULE,
.transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
.attach_hba = fd_attach_hba, .attach_hba = fd_attach_hba,
.detach_hba = fd_detach_hba, .detach_hba = fd_detach_hba,
.alloc_device = fd_alloc_device, .alloc_device = fd_alloc_device,
......
...@@ -904,7 +904,6 @@ static struct se_subsystem_api iblock_template = { ...@@ -904,7 +904,6 @@ static struct se_subsystem_api iblock_template = {
.inquiry_prod = "IBLOCK", .inquiry_prod = "IBLOCK",
.inquiry_rev = IBLOCK_VERSION, .inquiry_rev = IBLOCK_VERSION,
.owner = THIS_MODULE, .owner = THIS_MODULE,
.transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
.attach_hba = iblock_attach_hba, .attach_hba = iblock_attach_hba,
.detach_hba = iblock_detach_hba, .detach_hba = iblock_detach_hba,
.alloc_device = iblock_alloc_device, .alloc_device = iblock_alloc_device,
......
...@@ -4,9 +4,6 @@ ...@@ -4,9 +4,6 @@
/* target_core_alua.c */ /* target_core_alua.c */
extern struct t10_alua_lu_gp *default_lu_gp; extern struct t10_alua_lu_gp *default_lu_gp;
/* target_core_configfs.c */
extern struct configfs_subsystem *target_core_subsystem[];
/* target_core_device.c */ /* target_core_device.c */
extern struct mutex g_device_mutex; extern struct mutex g_device_mutex;
extern struct list_head g_device_list; extern struct list_head g_device_list;
......
...@@ -1367,41 +1367,26 @@ void core_scsi3_free_all_registrations( ...@@ -1367,41 +1367,26 @@ void core_scsi3_free_all_registrations(
static int core_scsi3_tpg_depend_item(struct se_portal_group *tpg) static int core_scsi3_tpg_depend_item(struct se_portal_group *tpg)
{ {
return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys, return target_depend_item(&tpg->tpg_group.cg_item);
&tpg->tpg_group.cg_item);
} }
static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg) static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg)
{ {
configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, target_undepend_item(&tpg->tpg_group.cg_item);
&tpg->tpg_group.cg_item);
atomic_dec_mb(&tpg->tpg_pr_ref_count); atomic_dec_mb(&tpg->tpg_pr_ref_count);
} }
static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl) static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl)
{ {
struct se_portal_group *tpg = nacl->se_tpg;
if (nacl->dynamic_node_acl) if (nacl->dynamic_node_acl)
return 0; return 0;
return target_depend_item(&nacl->acl_group.cg_item);
return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys,
&nacl->acl_group.cg_item);
} }
static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl) static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
{ {
struct se_portal_group *tpg = nacl->se_tpg; if (!nacl->dynamic_node_acl)
target_undepend_item(&nacl->acl_group.cg_item);
if (nacl->dynamic_node_acl) {
atomic_dec_mb(&nacl->acl_pr_ref_count);
return;
}
configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
&nacl->acl_group.cg_item);
atomic_dec_mb(&nacl->acl_pr_ref_count); atomic_dec_mb(&nacl->acl_pr_ref_count);
} }
...@@ -1419,8 +1404,7 @@ static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve) ...@@ -1419,8 +1404,7 @@ static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
nacl = lun_acl->se_lun_nacl; nacl = lun_acl->se_lun_nacl;
tpg = nacl->se_tpg; tpg = nacl->se_tpg;
return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys, return target_depend_item(&lun_acl->se_lun_group.cg_item);
&lun_acl->se_lun_group.cg_item);
} }
static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve) static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
...@@ -1438,9 +1422,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve) ...@@ -1438,9 +1422,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
nacl = lun_acl->se_lun_nacl; nacl = lun_acl->se_lun_nacl;
tpg = nacl->se_tpg; tpg = nacl->se_tpg;
configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, target_undepend_item(&lun_acl->se_lun_group.cg_item);
&lun_acl->se_lun_group.cg_item);
atomic_dec_mb(&se_deve->pr_ref_count); atomic_dec_mb(&se_deve->pr_ref_count);
} }
...@@ -4111,7 +4093,7 @@ target_check_reservation(struct se_cmd *cmd) ...@@ -4111,7 +4093,7 @@ target_check_reservation(struct se_cmd *cmd)
return 0; return 0;
if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
return 0; return 0;
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
return 0; return 0;
spin_lock(&dev->dev_reservation_lock); spin_lock(&dev->dev_reservation_lock);
......
...@@ -521,6 +521,7 @@ static int pscsi_configure_device(struct se_device *dev) ...@@ -521,6 +521,7 @@ static int pscsi_configure_device(struct se_device *dev)
" pdv_host_id: %d\n", pdv->pdv_host_id); " pdv_host_id: %d\n", pdv->pdv_host_id);
return -EINVAL; return -EINVAL;
} }
pdv->pdv_lld_host = sh;
} }
} else { } else {
if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) { if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) {
...@@ -603,6 +604,8 @@ static void pscsi_free_device(struct se_device *dev) ...@@ -603,6 +604,8 @@ static void pscsi_free_device(struct se_device *dev)
if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) && if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) &&
(phv->phv_lld_host != NULL)) (phv->phv_lld_host != NULL))
scsi_host_put(phv->phv_lld_host); scsi_host_put(phv->phv_lld_host);
else if (pdv->pdv_lld_host)
scsi_host_put(pdv->pdv_lld_host);
if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM)) if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM))
scsi_device_put(sd); scsi_device_put(sd);
...@@ -970,64 +973,13 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, ...@@ -970,64 +973,13 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
} }
/*
* Clear a lun set in the cdb if the initiator talking to use spoke
* and old standards version, as we can't assume the underlying device
* won't choke up on it.
*/
static inline void pscsi_clear_cdb_lun(unsigned char *cdb)
{
switch (cdb[0]) {
case READ_10: /* SBC - RDProtect */
case READ_12: /* SBC - RDProtect */
case READ_16: /* SBC - RDProtect */
case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
case VERIFY: /* SBC - VRProtect */
case VERIFY_16: /* SBC - VRProtect */
case WRITE_VERIFY: /* SBC - VRProtect */
case WRITE_VERIFY_12: /* SBC - VRProtect */
case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
break;
default:
cdb[1] &= 0x1f; /* clear logical unit number */
break;
}
}
static sense_reason_t static sense_reason_t
pscsi_parse_cdb(struct se_cmd *cmd) pscsi_parse_cdb(struct se_cmd *cmd)
{ {
unsigned char *cdb = cmd->t_task_cdb;
if (cmd->se_cmd_flags & SCF_BIDI) if (cmd->se_cmd_flags & SCF_BIDI)
return TCM_UNSUPPORTED_SCSI_OPCODE; return TCM_UNSUPPORTED_SCSI_OPCODE;
pscsi_clear_cdb_lun(cdb); return passthrough_parse_cdb(cmd, pscsi_execute_cmd);
/*
* For REPORT LUNS we always need to emulate the response, for everything
* else the default for pSCSI is to pass the command to the underlying
* LLD / physical hardware.
*/
switch (cdb[0]) {
case REPORT_LUNS:
cmd->execute_cmd = spc_emulate_report_luns;
return 0;
case READ_6:
case READ_10:
case READ_12:
case READ_16:
case WRITE_6:
case WRITE_10:
case WRITE_12:
case WRITE_16:
case WRITE_VERIFY:
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
/* FALLTHROUGH*/
default:
cmd->execute_cmd = pscsi_execute_cmd;
return 0;
}
} }
static sense_reason_t static sense_reason_t
...@@ -1189,7 +1141,7 @@ static struct configfs_attribute *pscsi_backend_dev_attrs[] = { ...@@ -1189,7 +1141,7 @@ static struct configfs_attribute *pscsi_backend_dev_attrs[] = {
static struct se_subsystem_api pscsi_template = { static struct se_subsystem_api pscsi_template = {
.name = "pscsi", .name = "pscsi",
.owner = THIS_MODULE, .owner = THIS_MODULE,
.transport_type = TRANSPORT_PLUGIN_PHBA_PDEV, .transport_flags = TRANSPORT_FLAG_PASSTHROUGH,
.attach_hba = pscsi_attach_hba, .attach_hba = pscsi_attach_hba,
.detach_hba = pscsi_detach_hba, .detach_hba = pscsi_detach_hba,
.pmode_enable_hba = pscsi_pmode_enable_hba, .pmode_enable_hba = pscsi_pmode_enable_hba,
......
...@@ -45,6 +45,7 @@ struct pscsi_dev_virt { ...@@ -45,6 +45,7 @@ struct pscsi_dev_virt {
int pdv_lun_id; int pdv_lun_id;
struct block_device *pdv_bd; struct block_device *pdv_bd;
struct scsi_device *pdv_sd; struct scsi_device *pdv_sd;
struct Scsi_Host *pdv_lld_host;
} ____cacheline_aligned; } ____cacheline_aligned;
typedef enum phv_modes { typedef enum phv_modes {
......
...@@ -733,7 +733,6 @@ static struct se_subsystem_api rd_mcp_template = { ...@@ -733,7 +733,6 @@ static struct se_subsystem_api rd_mcp_template = {
.name = "rd_mcp", .name = "rd_mcp",
.inquiry_prod = "RAMDISK-MCP", .inquiry_prod = "RAMDISK-MCP",
.inquiry_rev = RD_MCP_VERSION, .inquiry_rev = RD_MCP_VERSION,
.transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
.attach_hba = rd_attach_hba, .attach_hba = rd_attach_hba,
.detach_hba = rd_detach_hba, .detach_hba = rd_detach_hba,
.alloc_device = rd_alloc_device, .alloc_device = rd_alloc_device,
......
...@@ -568,7 +568,7 @@ sbc_compare_and_write(struct se_cmd *cmd) ...@@ -568,7 +568,7 @@ sbc_compare_and_write(struct se_cmd *cmd)
* comparision using SGLs at cmd->t_bidi_data_sg.. * comparision using SGLs at cmd->t_bidi_data_sg..
*/ */
rc = down_interruptible(&dev->caw_sem); rc = down_interruptible(&dev->caw_sem);
if ((rc != 0) || signal_pending(current)) { if (rc != 0) {
cmd->transport_complete_callback = NULL; cmd->transport_complete_callback = NULL;
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
} }
......
...@@ -1196,7 +1196,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd) ...@@ -1196,7 +1196,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
* Check if SAM Task Attribute emulation is enabled for this * Check if SAM Task Attribute emulation is enabled for this
* struct se_device storage object * struct se_device storage object
*/ */
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
return 0; return 0;
if (cmd->sam_task_attr == TCM_ACA_TAG) { if (cmd->sam_task_attr == TCM_ACA_TAG) {
...@@ -1770,7 +1770,7 @@ static int target_write_prot_action(struct se_cmd *cmd) ...@@ -1770,7 +1770,7 @@ static int target_write_prot_action(struct se_cmd *cmd)
sectors, 0, NULL, 0); sectors, 0, NULL, 0);
if (unlikely(cmd->pi_err)) { if (unlikely(cmd->pi_err)) {
spin_lock_irq(&cmd->t_state_lock); spin_lock_irq(&cmd->t_state_lock);
cmd->transport_state &= ~CMD_T_BUSY|CMD_T_SENT; cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
spin_unlock_irq(&cmd->t_state_lock); spin_unlock_irq(&cmd->t_state_lock);
transport_generic_request_failure(cmd, cmd->pi_err); transport_generic_request_failure(cmd, cmd->pi_err);
return -1; return -1;
...@@ -1787,7 +1787,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd) ...@@ -1787,7 +1787,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
{ {
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
return false; return false;
/* /*
...@@ -1868,7 +1868,7 @@ void target_execute_cmd(struct se_cmd *cmd) ...@@ -1868,7 +1868,7 @@ void target_execute_cmd(struct se_cmd *cmd)
if (target_handle_task_attr(cmd)) { if (target_handle_task_attr(cmd)) {
spin_lock_irq(&cmd->t_state_lock); spin_lock_irq(&cmd->t_state_lock);
cmd->transport_state &= ~CMD_T_BUSY|CMD_T_SENT; cmd->transport_state &= ~(CMD_T_BUSY | CMD_T_SENT);
spin_unlock_irq(&cmd->t_state_lock); spin_unlock_irq(&cmd->t_state_lock);
return; return;
} }
...@@ -1912,7 +1912,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd) ...@@ -1912,7 +1912,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
{ {
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
return; return;
if (cmd->sam_task_attr == TCM_SIMPLE_TAG) { if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
...@@ -1957,8 +1957,7 @@ static void transport_complete_qf(struct se_cmd *cmd) ...@@ -1957,8 +1957,7 @@ static void transport_complete_qf(struct se_cmd *cmd)
case DMA_TO_DEVICE: case DMA_TO_DEVICE:
if (cmd->se_cmd_flags & SCF_BIDI) { if (cmd->se_cmd_flags & SCF_BIDI) {
ret = cmd->se_tfo->queue_data_in(cmd); ret = cmd->se_tfo->queue_data_in(cmd);
if (ret < 0) break;
break;
} }
/* Fall through for DMA_TO_DEVICE */ /* Fall through for DMA_TO_DEVICE */
case DMA_NONE: case DMA_NONE:
......
...@@ -71,13 +71,6 @@ struct tcmu_hba { ...@@ -71,13 +71,6 @@ struct tcmu_hba {
u32 host_id; u32 host_id;
}; };
/* User wants all cmds or just some */
enum passthru_level {
TCMU_PASS_ALL = 0,
TCMU_PASS_IO,
TCMU_PASS_INVALID,
};
#define TCMU_CONFIG_LEN 256 #define TCMU_CONFIG_LEN 256
struct tcmu_dev { struct tcmu_dev {
...@@ -89,7 +82,6 @@ struct tcmu_dev { ...@@ -89,7 +82,6 @@ struct tcmu_dev {
#define TCMU_DEV_BIT_OPEN 0 #define TCMU_DEV_BIT_OPEN 0
#define TCMU_DEV_BIT_BROKEN 1 #define TCMU_DEV_BIT_BROKEN 1
unsigned long flags; unsigned long flags;
enum passthru_level pass_level;
struct uio_info uio_info; struct uio_info uio_info;
...@@ -683,8 +675,6 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) ...@@ -683,8 +675,6 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
setup_timer(&udev->timeout, tcmu_device_timedout, setup_timer(&udev->timeout, tcmu_device_timedout,
(unsigned long)udev); (unsigned long)udev);
udev->pass_level = TCMU_PASS_ALL;
return &udev->se_dev; return &udev->se_dev;
} }
...@@ -948,13 +938,13 @@ static void tcmu_free_device(struct se_device *dev) ...@@ -948,13 +938,13 @@ static void tcmu_free_device(struct se_device *dev)
} }
enum { enum {
Opt_dev_config, Opt_dev_size, Opt_err, Opt_pass_level, Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_err,
}; };
static match_table_t tokens = { static match_table_t tokens = {
{Opt_dev_config, "dev_config=%s"}, {Opt_dev_config, "dev_config=%s"},
{Opt_dev_size, "dev_size=%u"}, {Opt_dev_size, "dev_size=%u"},
{Opt_pass_level, "pass_level=%u"}, {Opt_hw_block_size, "hw_block_size=%u"},
{Opt_err, NULL} {Opt_err, NULL}
}; };
...@@ -965,7 +955,7 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, ...@@ -965,7 +955,7 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
char *orig, *ptr, *opts, *arg_p; char *orig, *ptr, *opts, *arg_p;
substring_t args[MAX_OPT_ARGS]; substring_t args[MAX_OPT_ARGS];
int ret = 0, token; int ret = 0, token;
int arg; unsigned long tmp_ul;
opts = kstrdup(page, GFP_KERNEL); opts = kstrdup(page, GFP_KERNEL);
if (!opts) if (!opts)
...@@ -998,15 +988,23 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, ...@@ -998,15 +988,23 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
if (ret < 0) if (ret < 0)
pr_err("kstrtoul() failed for dev_size=\n"); pr_err("kstrtoul() failed for dev_size=\n");
break; break;
case Opt_pass_level: case Opt_hw_block_size:
match_int(args, &arg); arg_p = match_strdup(&args[0]);
if (arg >= TCMU_PASS_INVALID) { if (!arg_p) {
pr_warn("TCMU: Invalid pass_level: %d\n", arg); ret = -ENOMEM;
break; break;
} }
ret = kstrtoul(arg_p, 0, &tmp_ul);
pr_debug("TCMU: Setting pass_level to %d\n", arg); kfree(arg_p);
udev->pass_level = arg; if (ret < 0) {
pr_err("kstrtoul() failed for hw_block_size=\n");
break;
}
if (!tmp_ul) {
pr_err("hw_block_size must be nonzero\n");
break;
}
dev->dev_attrib.hw_block_size = tmp_ul;
break; break;
default: default:
break; break;
...@@ -1024,8 +1022,7 @@ static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b) ...@@ -1024,8 +1022,7 @@ static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
bl = sprintf(b + bl, "Config: %s ", bl = sprintf(b + bl, "Config: %s ",
udev->dev_config[0] ? udev->dev_config : "NULL"); udev->dev_config[0] ? udev->dev_config : "NULL");
bl += sprintf(b + bl, "Size: %zu PassLevel: %u\n", bl += sprintf(b + bl, "Size: %zu\n", udev->dev_size);
udev->dev_size, udev->pass_level);
return bl; return bl;
} }
...@@ -1038,20 +1035,6 @@ static sector_t tcmu_get_blocks(struct se_device *dev) ...@@ -1038,20 +1035,6 @@ static sector_t tcmu_get_blocks(struct se_device *dev)
dev->dev_attrib.block_size); dev->dev_attrib.block_size);
} }
static sense_reason_t
tcmu_execute_rw(struct se_cmd *se_cmd, struct scatterlist *sgl, u32 sgl_nents,
enum dma_data_direction data_direction)
{
int ret;
ret = tcmu_queue_cmd(se_cmd);
if (ret != 0)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
else
return TCM_NO_SENSE;
}
static sense_reason_t static sense_reason_t
tcmu_pass_op(struct se_cmd *se_cmd) tcmu_pass_op(struct se_cmd *se_cmd)
{ {
...@@ -1063,91 +1046,29 @@ tcmu_pass_op(struct se_cmd *se_cmd) ...@@ -1063,91 +1046,29 @@ tcmu_pass_op(struct se_cmd *se_cmd)
return TCM_NO_SENSE; return TCM_NO_SENSE;
} }
static struct sbc_ops tcmu_sbc_ops = {
.execute_rw = tcmu_execute_rw,
.execute_sync_cache = tcmu_pass_op,
.execute_write_same = tcmu_pass_op,
.execute_write_same_unmap = tcmu_pass_op,
.execute_unmap = tcmu_pass_op,
};
static sense_reason_t static sense_reason_t
tcmu_parse_cdb(struct se_cmd *cmd) tcmu_parse_cdb(struct se_cmd *cmd)
{ {
unsigned char *cdb = cmd->t_task_cdb; return passthrough_parse_cdb(cmd, tcmu_pass_op);
struct tcmu_dev *udev = TCMU_DEV(cmd->se_dev);
sense_reason_t ret;
switch (udev->pass_level) {
case TCMU_PASS_ALL:
/* We're just like pscsi, then */
/*
* For REPORT LUNS we always need to emulate the response, for everything
* else, pass it up.
*/
switch (cdb[0]) {
case REPORT_LUNS:
cmd->execute_cmd = spc_emulate_report_luns;
break;
case READ_6:
case READ_10:
case READ_12:
case READ_16:
case WRITE_6:
case WRITE_10:
case WRITE_12:
case WRITE_16:
case WRITE_VERIFY:
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
/* FALLTHROUGH */
default:
cmd->execute_cmd = tcmu_pass_op;
}
ret = TCM_NO_SENSE;
break;
case TCMU_PASS_IO:
ret = sbc_parse_cdb(cmd, &tcmu_sbc_ops);
break;
default:
pr_err("Unknown tcm-user pass level %d\n", udev->pass_level);
ret = TCM_CHECK_CONDITION_ABORT_CMD;
}
return ret;
} }
DEF_TB_DEFAULT_ATTRIBS(tcmu); DEF_TB_DEV_ATTRIB_RO(tcmu, hw_pi_prot_type);
TB_DEV_ATTR_RO(tcmu, hw_pi_prot_type);
DEF_TB_DEV_ATTRIB_RO(tcmu, hw_block_size);
TB_DEV_ATTR_RO(tcmu, hw_block_size);
DEF_TB_DEV_ATTRIB_RO(tcmu, hw_max_sectors);
TB_DEV_ATTR_RO(tcmu, hw_max_sectors);
DEF_TB_DEV_ATTRIB_RO(tcmu, hw_queue_depth);
TB_DEV_ATTR_RO(tcmu, hw_queue_depth);
static struct configfs_attribute *tcmu_backend_dev_attrs[] = { static struct configfs_attribute *tcmu_backend_dev_attrs[] = {
&tcmu_dev_attrib_emulate_model_alias.attr,
&tcmu_dev_attrib_emulate_dpo.attr,
&tcmu_dev_attrib_emulate_fua_write.attr,
&tcmu_dev_attrib_emulate_fua_read.attr,
&tcmu_dev_attrib_emulate_write_cache.attr,
&tcmu_dev_attrib_emulate_ua_intlck_ctrl.attr,
&tcmu_dev_attrib_emulate_tas.attr,
&tcmu_dev_attrib_emulate_tpu.attr,
&tcmu_dev_attrib_emulate_tpws.attr,
&tcmu_dev_attrib_emulate_caw.attr,
&tcmu_dev_attrib_emulate_3pc.attr,
&tcmu_dev_attrib_pi_prot_type.attr,
&tcmu_dev_attrib_hw_pi_prot_type.attr, &tcmu_dev_attrib_hw_pi_prot_type.attr,
&tcmu_dev_attrib_pi_prot_format.attr,
&tcmu_dev_attrib_enforce_pr_isids.attr,
&tcmu_dev_attrib_is_nonrot.attr,
&tcmu_dev_attrib_emulate_rest_reord.attr,
&tcmu_dev_attrib_force_pr_aptpl.attr,
&tcmu_dev_attrib_hw_block_size.attr, &tcmu_dev_attrib_hw_block_size.attr,
&tcmu_dev_attrib_block_size.attr,
&tcmu_dev_attrib_hw_max_sectors.attr, &tcmu_dev_attrib_hw_max_sectors.attr,
&tcmu_dev_attrib_optimal_sectors.attr,
&tcmu_dev_attrib_hw_queue_depth.attr, &tcmu_dev_attrib_hw_queue_depth.attr,
&tcmu_dev_attrib_queue_depth.attr,
&tcmu_dev_attrib_max_unmap_lba_count.attr,
&tcmu_dev_attrib_max_unmap_block_desc_count.attr,
&tcmu_dev_attrib_unmap_granularity.attr,
&tcmu_dev_attrib_unmap_granularity_alignment.attr,
&tcmu_dev_attrib_max_write_same_len.attr,
NULL, NULL,
}; };
...@@ -1156,7 +1077,7 @@ static struct se_subsystem_api tcmu_template = { ...@@ -1156,7 +1077,7 @@ static struct se_subsystem_api tcmu_template = {
.inquiry_prod = "USER", .inquiry_prod = "USER",
.inquiry_rev = TCMU_VERSION, .inquiry_rev = TCMU_VERSION,
.owner = THIS_MODULE, .owner = THIS_MODULE,
.transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, .transport_flags = TRANSPORT_FLAG_PASSTHROUGH,
.attach_hba = tcmu_attach_hba, .attach_hba = tcmu_attach_hba,
.detach_hba = tcmu_detach_hba, .detach_hba = tcmu_detach_hba,
.alloc_device = tcmu_alloc_device, .alloc_device = tcmu_alloc_device,
......
...@@ -58,7 +58,6 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op ...@@ -58,7 +58,6 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
bool src) bool src)
{ {
struct se_device *se_dev; struct se_device *se_dev;
struct configfs_subsystem *subsys = target_core_subsystem[0];
unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn; unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn;
int rc; int rc;
...@@ -90,8 +89,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op ...@@ -90,8 +89,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
" se_dev\n", xop->src_dev); " se_dev\n", xop->src_dev);
} }
rc = configfs_depend_item(subsys, rc = target_depend_item(&se_dev->dev_group.cg_item);
&se_dev->dev_group.cg_item);
if (rc != 0) { if (rc != 0) {
pr_err("configfs_depend_item attempt failed:" pr_err("configfs_depend_item attempt failed:"
" %d for se_dev: %p\n", rc, se_dev); " %d for se_dev: %p\n", rc, se_dev);
...@@ -99,8 +97,8 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op ...@@ -99,8 +97,8 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
return rc; return rc;
} }
pr_debug("Called configfs_depend_item for subsys: %p se_dev: %p" pr_debug("Called configfs_depend_item for se_dev: %p"
" se_dev->se_dev_group: %p\n", subsys, se_dev, " se_dev->se_dev_group: %p\n", se_dev,
&se_dev->dev_group); &se_dev->dev_group);
mutex_unlock(&g_device_mutex); mutex_unlock(&g_device_mutex);
...@@ -373,7 +371,6 @@ static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd) ...@@ -373,7 +371,6 @@ static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd)
static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop) static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop)
{ {
struct configfs_subsystem *subsys = target_core_subsystem[0];
struct se_device *remote_dev; struct se_device *remote_dev;
if (xop->op_origin == XCOL_SOURCE_RECV_OP) if (xop->op_origin == XCOL_SOURCE_RECV_OP)
...@@ -381,11 +378,11 @@ static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop) ...@@ -381,11 +378,11 @@ static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop)
else else
remote_dev = xop->src_dev; remote_dev = xop->src_dev;
pr_debug("Calling configfs_undepend_item for subsys: %p" pr_debug("Calling configfs_undepend_item for"
" remote_dev: %p remote_dev->dev_group: %p\n", " remote_dev: %p remote_dev->dev_group: %p\n",
subsys, remote_dev, &remote_dev->dev_group.cg_item); remote_dev, &remote_dev->dev_group.cg_item);
configfs_undepend_item(subsys, &remote_dev->dev_group.cg_item); target_undepend_item(&remote_dev->dev_group.cg_item);
} }
static void xcopy_pt_release_cmd(struct se_cmd *se_cmd) static void xcopy_pt_release_cmd(struct se_cmd *se_cmd)
......
...@@ -1409,8 +1409,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, ...@@ -1409,8 +1409,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
* dependency now. * dependency now.
*/ */
se_tpg = &tpg->se_tpg; se_tpg = &tpg->se_tpg;
ret = configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys, ret = target_depend_item(&se_tpg->tpg_group.cg_item);
&se_tpg->tpg_group.cg_item);
if (ret) { if (ret) {
pr_warn("configfs_depend_item() failed: %d\n", ret); pr_warn("configfs_depend_item() failed: %d\n", ret);
kfree(vs_tpg); kfree(vs_tpg);
...@@ -1513,8 +1512,7 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs, ...@@ -1513,8 +1512,7 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
* to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur. * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
*/ */
se_tpg = &tpg->se_tpg; se_tpg = &tpg->se_tpg;
configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys, target_undepend_item(&se_tpg->tpg_group.cg_item);
&se_tpg->tpg_group.cg_item);
} }
if (match) { if (match) {
for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
......
#ifndef TARGET_CORE_BACKEND_H #ifndef TARGET_CORE_BACKEND_H
#define TARGET_CORE_BACKEND_H #define TARGET_CORE_BACKEND_H
#define TRANSPORT_PLUGIN_PHBA_PDEV 1 #define TRANSPORT_FLAG_PASSTHROUGH 1
#define TRANSPORT_PLUGIN_VHBA_PDEV 2
#define TRANSPORT_PLUGIN_VHBA_VDEV 3
struct target_backend_cits { struct target_backend_cits {
struct config_item_type tb_dev_cit; struct config_item_type tb_dev_cit;
...@@ -22,7 +20,7 @@ struct se_subsystem_api { ...@@ -22,7 +20,7 @@ struct se_subsystem_api {
char inquiry_rev[4]; char inquiry_rev[4];
struct module *owner; struct module *owner;
u8 transport_type; u8 transport_flags;
int (*attach_hba)(struct se_hba *, u32); int (*attach_hba)(struct se_hba *, u32);
void (*detach_hba)(struct se_hba *); void (*detach_hba)(struct se_hba *);
...@@ -138,5 +136,7 @@ int se_dev_set_queue_depth(struct se_device *, u32); ...@@ -138,5 +136,7 @@ int se_dev_set_queue_depth(struct se_device *, u32);
int se_dev_set_max_sectors(struct se_device *, u32); int se_dev_set_max_sectors(struct se_device *, u32);
int se_dev_set_optimal_sectors(struct se_device *, u32); int se_dev_set_optimal_sectors(struct se_device *, u32);
int se_dev_set_block_size(struct se_device *, u32); int se_dev_set_block_size(struct se_device *, u32);
sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
sense_reason_t (*exec_cmd)(struct se_cmd *cmd));
#endif /* TARGET_CORE_BACKEND_H */ #endif /* TARGET_CORE_BACKEND_H */
...@@ -40,8 +40,6 @@ struct target_fabric_configfs { ...@@ -40,8 +40,6 @@ struct target_fabric_configfs {
struct config_item *tf_fabric; struct config_item *tf_fabric;
/* Passed from fabric modules */ /* Passed from fabric modules */
struct config_item_type *tf_fabric_cit; struct config_item_type *tf_fabric_cit;
/* Pointer to target core subsystem */
struct configfs_subsystem *tf_subsys;
/* Pointer to fabric's struct module */ /* Pointer to fabric's struct module */
struct module *tf_module; struct module *tf_module;
struct target_core_fabric_ops tf_ops; struct target_core_fabric_ops tf_ops;
......
...@@ -4,7 +4,6 @@ ...@@ -4,7 +4,6 @@
struct target_core_fabric_ops { struct target_core_fabric_ops {
struct module *module; struct module *module;
const char *name; const char *name;
struct configfs_subsystem *tf_subsys;
char *(*get_fabric_name)(void); char *(*get_fabric_name)(void);
u8 (*get_fabric_proto_ident)(struct se_portal_group *); u8 (*get_fabric_proto_ident)(struct se_portal_group *);
char *(*tpg_get_wwn)(struct se_portal_group *); char *(*tpg_get_wwn)(struct se_portal_group *);
...@@ -109,6 +108,9 @@ struct target_core_fabric_ops { ...@@ -109,6 +108,9 @@ struct target_core_fabric_ops {
int target_register_template(const struct target_core_fabric_ops *fo); int target_register_template(const struct target_core_fabric_ops *fo);
void target_unregister_template(const struct target_core_fabric_ops *fo); void target_unregister_template(const struct target_core_fabric_ops *fo);
int target_depend_item(struct config_item *item);
void target_undepend_item(struct config_item *item);
struct se_session *transport_init_session(enum target_prot_op); struct se_session *transport_init_session(enum target_prot_op);
int transport_alloc_session_tags(struct se_session *, unsigned int, int transport_alloc_session_tags(struct se_session *, unsigned int,
unsigned int); unsigned int);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment