Commit 5266e5b1 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

Pull SCSI target updates from Nicholas Bellinger:
 "The highlights this round include:

   - Add target_alloc_session() w/ callback helper for doing se_session
     allocation + tag + se_node_acl lookup.  (HCH + nab)

   - Tree-wide fabric driver conversion to use target_alloc_session()

   - Convert sbp-target to use percpu_ida tag pre-allocation, and
     TARGET_SCF_ACK_KREF I/O krefs (Chris Boot + nab)

   - Convert usb-gadget to use percpu_ida tag pre-allocation, and
     TARGET_SCF_ACK_KREF I/O krefs (Andrzej Pietrasiewicz + nab)

   - Convert xen-scsiback to use percpu_ida tag pre-allocation, and
     TARGET_SCF_ACK_KREF I/O krefs (Juergen Gross + nab)

   - Convert tcm_fc to use TARGET_SCF_ACK_KREF I/O + TMR krefs

   - Convert ib_srpt to use percpu_ida tag pre-allocation

   - Add DebugFS node for qla2xxx target sess list (Quinn)

   - Rework iser-target connection termination (Jenny + Sagi)

   - Convert iser-target to new CQ API (HCH)

   - Add pass-through WRITE_SAME support for IBLOCK (Mike Christie)

   - Introduce data_bitmap for asynchronous access of data area (Sheng
     Yang + Andy)

   - Fix target_release_cmd_kref shutdown comp leak (Himanshu Madhani)

  Also, there is a separate PULL request coming for cxgb4 NIC driver
  prerequisites for supporting hw iscsi segmentation offload (ISO), that
  will be the base for a number of v4.7 developments involving
  iscsi-target hw offloads"

* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (36 commits)
  target: Fix target_release_cmd_kref shutdown comp leak
  target: Avoid DataIN transfers for non-GOOD SAM status
  target/user: Report capability of handling out-of-order completions to userspace
  target/user: Fix size_t format-spec build warning
  target/user: Don't free expired command when time out
  target/user: Introduce data_bitmap, replace data_length/data_head/data_tail
  target/user: Free data ring in unified function
  target/user: Use iovec[] to describe continuous area
  target: Remove enum transport_lunflags_table
  target/iblock: pass WRITE_SAME to device if possible
  iser-target: Kill the ->isert_cmd back pointer in struct iser_tx_desc
  iser-target: Kill struct isert_rdma_wr
  iser-target: Convert to new CQ API
  iser-target: Split and properly type the login buffer
  iser-target: Remove ISER_RECV_DATA_SEG_LEN
  iser-target: Remove impossible condition from isert_wait_conn
  iser-target: Remove redundant wait in release_conn
  iser-target: Rework connection termination
  iser-target: Separate flows for np listeners and connections cma events
  iser-target: Add new state ISER_CONN_BOUND to isert_conn
  ...
parents fc739eba 5e47f198
...@@ -117,7 +117,9 @@ userspace (respectively) to put commands on the ring, and indicate ...@@ -117,7 +117,9 @@ userspace (respectively) to put commands on the ring, and indicate
when the commands are completed. when the commands are completed.
version - 1 (userspace should abort if otherwise) version - 1 (userspace should abort if otherwise)
flags - none yet defined. flags:
- TCMU_MAILBOX_FLAG_CAP_OOOC: indicates out-of-order completion is
supported. See "The Command Ring" for details.
cmdr_off - The offset of the start of the command ring from the start cmdr_off - The offset of the start of the command ring from the start
of the memory region, to account for the mailbox size. of the memory region, to account for the mailbox size.
cmdr_size - The size of the command ring. This does *not* need to be a cmdr_size - The size of the command ring. This does *not* need to be a
...@@ -162,6 +164,13 @@ rsp.sense_buffer if necessary. Userspace then increments ...@@ -162,6 +164,13 @@ rsp.sense_buffer if necessary. Userspace then increments
mailbox.cmd_tail by entry.hdr.length (mod cmdr_size) and signals the mailbox.cmd_tail by entry.hdr.length (mod cmdr_size) and signals the
kernel via the UIO method, a 4-byte write to the file descriptor. kernel via the UIO method, a 4-byte write to the file descriptor.
If TCMU_MAILBOX_FLAG_CAP_OOOC is set for mailbox->flags, kernel is
capable of handling out-of-order completions. In this case, userspace can
handle command in different order other than original. Since kernel would
still process the commands in the same order it appeared in the command
ring, userspace need to update the cmd->id when completing the
command(a.k.a steal the original command's entry).
When the opcode is PAD, userspace only updates cmd_tail as above -- When the opcode is PAD, userspace only updates cmd_tail as above --
it's a no-op. (The kernel inserts PAD entries to ensure each CMD entry it's a no-op. (The kernel inserts PAD entries to ensure each CMD entry
is contiguous within the command ring.) is contiguous within the command ring.)
......
This diff is collapsed.
...@@ -36,9 +36,7 @@ ...@@ -36,9 +36,7 @@
/* Constant PDU lengths calculations */ /* Constant PDU lengths calculations */
#define ISER_HEADERS_LEN (sizeof(struct iser_ctrl) + \ #define ISER_HEADERS_LEN (sizeof(struct iser_ctrl) + \
sizeof(struct iscsi_hdr)) sizeof(struct iscsi_hdr))
#define ISER_RECV_DATA_SEG_LEN 8192 #define ISER_RX_PAYLOAD_SIZE (ISER_HEADERS_LEN + ISCSI_DEF_MAX_RECV_SEG_LEN)
#define ISER_RX_PAYLOAD_SIZE (ISER_HEADERS_LEN + ISER_RECV_DATA_SEG_LEN)
#define ISER_RX_LOGIN_SIZE (ISER_HEADERS_LEN + ISCSI_DEF_MAX_RECV_SEG_LEN)
/* QP settings */ /* QP settings */
/* Maximal bounds on received asynchronous PDUs */ /* Maximal bounds on received asynchronous PDUs */
...@@ -62,12 +60,11 @@ ...@@ -62,12 +60,11 @@
ISERT_MAX_TX_MISC_PDUS + \ ISERT_MAX_TX_MISC_PDUS + \
ISERT_MAX_RX_MISC_PDUS) ISERT_MAX_RX_MISC_PDUS)
#define ISER_RX_PAD_SIZE (ISER_RECV_DATA_SEG_LEN + 4096 - \ #define ISER_RX_PAD_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 4096 - \
(ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge))) (ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge) + \
sizeof(struct ib_cqe)))
#define ISCSI_ISER_SG_TABLESIZE 256 #define ISCSI_ISER_SG_TABLESIZE 256
#define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL
#define ISER_BEACON_WRID 0xfffffffffffffffeULL
enum isert_desc_type { enum isert_desc_type {
ISCSI_TX_CONTROL, ISCSI_TX_CONTROL,
...@@ -84,6 +81,7 @@ enum iser_ib_op_code { ...@@ -84,6 +81,7 @@ enum iser_ib_op_code {
enum iser_conn_state { enum iser_conn_state {
ISER_CONN_INIT, ISER_CONN_INIT,
ISER_CONN_UP, ISER_CONN_UP,
ISER_CONN_BOUND,
ISER_CONN_FULL_FEATURE, ISER_CONN_FULL_FEATURE,
ISER_CONN_TERMINATING, ISER_CONN_TERMINATING,
ISER_CONN_DOWN, ISER_CONN_DOWN,
...@@ -92,23 +90,35 @@ enum iser_conn_state { ...@@ -92,23 +90,35 @@ enum iser_conn_state {
struct iser_rx_desc { struct iser_rx_desc {
struct iser_ctrl iser_header; struct iser_ctrl iser_header;
struct iscsi_hdr iscsi_header; struct iscsi_hdr iscsi_header;
char data[ISER_RECV_DATA_SEG_LEN]; char data[ISCSI_DEF_MAX_RECV_SEG_LEN];
u64 dma_addr; u64 dma_addr;
struct ib_sge rx_sg; struct ib_sge rx_sg;
struct ib_cqe rx_cqe;
char pad[ISER_RX_PAD_SIZE]; char pad[ISER_RX_PAD_SIZE];
} __packed; } __packed;
static inline struct iser_rx_desc *cqe_to_rx_desc(struct ib_cqe *cqe)
{
return container_of(cqe, struct iser_rx_desc, rx_cqe);
}
struct iser_tx_desc { struct iser_tx_desc {
struct iser_ctrl iser_header; struct iser_ctrl iser_header;
struct iscsi_hdr iscsi_header; struct iscsi_hdr iscsi_header;
enum isert_desc_type type; enum isert_desc_type type;
u64 dma_addr; u64 dma_addr;
struct ib_sge tx_sg[2]; struct ib_sge tx_sg[2];
struct ib_cqe tx_cqe;
int num_sge; int num_sge;
struct isert_cmd *isert_cmd;
struct ib_send_wr send_wr; struct ib_send_wr send_wr;
} __packed; } __packed;
static inline struct iser_tx_desc *cqe_to_tx_desc(struct ib_cqe *cqe)
{
return container_of(cqe, struct iser_tx_desc, tx_cqe);
}
enum isert_indicator { enum isert_indicator {
ISERT_PROTECTED = 1 << 0, ISERT_PROTECTED = 1 << 0,
ISERT_DATA_KEY_VALID = 1 << 1, ISERT_DATA_KEY_VALID = 1 << 1,
...@@ -144,20 +154,6 @@ enum { ...@@ -144,20 +154,6 @@ enum {
SIG = 2, SIG = 2,
}; };
struct isert_rdma_wr {
struct isert_cmd *isert_cmd;
enum iser_ib_op_code iser_ib_op;
struct ib_sge *ib_sge;
struct ib_sge s_ib_sge;
int rdma_wr_num;
struct ib_rdma_wr *rdma_wr;
struct ib_rdma_wr s_rdma_wr;
struct ib_sge ib_sg[3];
struct isert_data_buf data;
struct isert_data_buf prot;
struct fast_reg_descriptor *fr_desc;
};
struct isert_cmd { struct isert_cmd {
uint32_t read_stag; uint32_t read_stag;
uint32_t write_stag; uint32_t write_stag;
...@@ -170,22 +166,34 @@ struct isert_cmd { ...@@ -170,22 +166,34 @@ struct isert_cmd {
struct iscsi_cmd *iscsi_cmd; struct iscsi_cmd *iscsi_cmd;
struct iser_tx_desc tx_desc; struct iser_tx_desc tx_desc;
struct iser_rx_desc *rx_desc; struct iser_rx_desc *rx_desc;
struct isert_rdma_wr rdma_wr; enum iser_ib_op_code iser_ib_op;
struct ib_sge *ib_sge;
struct ib_sge s_ib_sge;
int rdma_wr_num;
struct ib_rdma_wr *rdma_wr;
struct ib_rdma_wr s_rdma_wr;
struct ib_sge ib_sg[3];
struct isert_data_buf data;
struct isert_data_buf prot;
struct fast_reg_descriptor *fr_desc;
struct work_struct comp_work; struct work_struct comp_work;
struct scatterlist sg; struct scatterlist sg;
}; };
static inline struct isert_cmd *tx_desc_to_cmd(struct iser_tx_desc *desc)
{
return container_of(desc, struct isert_cmd, tx_desc);
}
struct isert_device; struct isert_device;
struct isert_conn { struct isert_conn {
enum iser_conn_state state; enum iser_conn_state state;
int post_recv_buf_count;
u32 responder_resources; u32 responder_resources;
u32 initiator_depth; u32 initiator_depth;
bool pi_support; bool pi_support;
u32 max_sge; u32 max_sge;
char *login_buf; struct iser_rx_desc *login_req_buf;
char *login_req_buf;
char *login_rsp_buf; char *login_rsp_buf;
u64 login_req_dma; u64 login_req_dma;
int login_req_len; int login_req_len;
...@@ -201,7 +209,6 @@ struct isert_conn { ...@@ -201,7 +209,6 @@ struct isert_conn {
struct ib_qp *qp; struct ib_qp *qp;
struct isert_device *device; struct isert_device *device;
struct mutex mutex; struct mutex mutex;
struct completion wait;
struct completion wait_comp_err; struct completion wait_comp_err;
struct kref kref; struct kref kref;
struct list_head fr_pool; struct list_head fr_pool;
...@@ -221,17 +228,13 @@ struct isert_conn { ...@@ -221,17 +228,13 @@ struct isert_conn {
* *
* @device: pointer to device handle * @device: pointer to device handle
* @cq: completion queue * @cq: completion queue
* @wcs: work completion array
* @active_qps: Number of active QPs attached * @active_qps: Number of active QPs attached
* to completion context * to completion context
* @work: completion work handle
*/ */
struct isert_comp { struct isert_comp {
struct isert_device *device; struct isert_device *device;
struct ib_cq *cq; struct ib_cq *cq;
struct ib_wc wcs[16];
int active_qps; int active_qps;
struct work_struct work;
}; };
struct isert_device { struct isert_device {
...@@ -243,9 +246,8 @@ struct isert_device { ...@@ -243,9 +246,8 @@ struct isert_device {
struct isert_comp *comps; struct isert_comp *comps;
int comps_used; int comps_used;
struct list_head dev_node; struct list_head dev_node;
int (*reg_rdma_mem)(struct iscsi_conn *conn, int (*reg_rdma_mem)(struct isert_cmd *isert_cmd,
struct iscsi_cmd *cmd, struct iscsi_conn *conn);
struct isert_rdma_wr *wr);
void (*unreg_rdma_mem)(struct isert_cmd *isert_cmd, void (*unreg_rdma_mem)(struct isert_cmd *isert_cmd,
struct isert_conn *isert_conn); struct isert_conn *isert_conn);
}; };
......
...@@ -1264,40 +1264,26 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch, ...@@ -1264,40 +1264,26 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
*/ */
static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch) static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
{ {
struct se_session *se_sess;
struct srpt_send_ioctx *ioctx; struct srpt_send_ioctx *ioctx;
unsigned long flags; int tag;
BUG_ON(!ch); BUG_ON(!ch);
se_sess = ch->sess;
ioctx = NULL; tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
spin_lock_irqsave(&ch->spinlock, flags); if (tag < 0) {
if (!list_empty(&ch->free_list)) { pr_err("Unable to obtain tag for srpt_send_ioctx\n");
ioctx = list_first_entry(&ch->free_list, return NULL;
struct srpt_send_ioctx, free_list);
list_del(&ioctx->free_list);
} }
spin_unlock_irqrestore(&ch->spinlock, flags); ioctx = &((struct srpt_send_ioctx *)se_sess->sess_cmd_map)[tag];
memset(ioctx, 0, sizeof(struct srpt_send_ioctx));
if (!ioctx) ioctx->ch = ch;
return ioctx;
BUG_ON(ioctx->ch != ch);
spin_lock_init(&ioctx->spinlock); spin_lock_init(&ioctx->spinlock);
ioctx->state = SRPT_STATE_NEW; ioctx->state = SRPT_STATE_NEW;
ioctx->n_rbuf = 0;
ioctx->rbufs = NULL;
ioctx->n_rdma = 0;
ioctx->n_rdma_wrs = 0;
ioctx->rdma_wrs = NULL;
ioctx->mapped_sg_count = 0;
init_completion(&ioctx->tx_done); init_completion(&ioctx->tx_done);
ioctx->queue_status_only = false;
/* ioctx->cmd.map_tag = tag;
* transport_init_se_cmd() does not initialize all fields, so do it
* here.
*/
memset(&ioctx->cmd, 0, sizeof(ioctx->cmd));
memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data));
return ioctx; return ioctx;
} }
...@@ -2034,9 +2020,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, ...@@ -2034,9 +2020,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
struct srp_login_rej *rej; struct srp_login_rej *rej;
struct ib_cm_rep_param *rep_param; struct ib_cm_rep_param *rep_param;
struct srpt_rdma_ch *ch, *tmp_ch; struct srpt_rdma_ch *ch, *tmp_ch;
struct se_node_acl *se_acl;
u32 it_iu_len; u32 it_iu_len;
int i, ret = 0; int ret = 0;
unsigned char *p; unsigned char *p;
WARN_ON_ONCE(irqs_disabled()); WARN_ON_ONCE(irqs_disabled());
...@@ -2158,12 +2143,6 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, ...@@ -2158,12 +2143,6 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
if (!ch->ioctx_ring) if (!ch->ioctx_ring)
goto free_ch; goto free_ch;
INIT_LIST_HEAD(&ch->free_list);
for (i = 0; i < ch->rq_size; i++) {
ch->ioctx_ring[i]->ch = ch;
list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list);
}
ret = srpt_create_ch_ib(ch); ret = srpt_create_ch_ib(ch);
if (ret) { if (ret) {
rej->reason = cpu_to_be32( rej->reason = cpu_to_be32(
...@@ -2193,19 +2172,13 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, ...@@ -2193,19 +2172,13 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
pr_debug("registering session %s\n", ch->sess_name); pr_debug("registering session %s\n", ch->sess_name);
p = &ch->sess_name[0]; p = &ch->sess_name[0];
ch->sess = transport_init_session(TARGET_PROT_NORMAL);
if (IS_ERR(ch->sess)) {
rej->reason = cpu_to_be32(
SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
pr_debug("Failed to create session\n");
goto destroy_ib;
}
try_again: try_again:
se_acl = core_tpg_get_initiator_node_acl(&sport->port_tpg_1, p); ch->sess = target_alloc_session(&sport->port_tpg_1, ch->rq_size,
if (!se_acl) { sizeof(struct srpt_send_ioctx),
TARGET_PROT_NORMAL, p, ch, NULL);
if (IS_ERR(ch->sess)) {
pr_info("Rejected login because no ACL has been" pr_info("Rejected login because no ACL has been"
" configured yet for initiator %s.\n", ch->sess_name); " configured yet for initiator %s.\n", p);
/* /*
* XXX: Hack to retry of ch->i_port_id without leading '0x' * XXX: Hack to retry of ch->i_port_id without leading '0x'
*/ */
...@@ -2213,14 +2186,11 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, ...@@ -2213,14 +2186,11 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
p += 2; p += 2;
goto try_again; goto try_again;
} }
rej->reason = cpu_to_be32( rej->reason = cpu_to_be32((PTR_ERR(ch->sess) == -ENOMEM) ?
SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES :
SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED); SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
transport_free_session(ch->sess);
goto destroy_ib; goto destroy_ib;
} }
ch->sess->se_node_acl = se_acl;
transport_register_session(&sport->port_tpg_1, se_acl, ch->sess, ch);
pr_debug("Establish connection sess=%p name=%s cm_id=%p\n", ch->sess, pr_debug("Establish connection sess=%p name=%s cm_id=%p\n", ch->sess,
ch->sess_name, ch->cm_id); ch->sess_name, ch->cm_id);
...@@ -2911,7 +2881,7 @@ static void srpt_release_cmd(struct se_cmd *se_cmd) ...@@ -2911,7 +2881,7 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
struct srpt_send_ioctx *ioctx = container_of(se_cmd, struct srpt_send_ioctx *ioctx = container_of(se_cmd,
struct srpt_send_ioctx, cmd); struct srpt_send_ioctx, cmd);
struct srpt_rdma_ch *ch = ioctx->ch; struct srpt_rdma_ch *ch = ioctx->ch;
unsigned long flags; struct se_session *se_sess = ch->sess;
WARN_ON(ioctx->state != SRPT_STATE_DONE); WARN_ON(ioctx->state != SRPT_STATE_DONE);
WARN_ON(ioctx->mapped_sg_count != 0); WARN_ON(ioctx->mapped_sg_count != 0);
...@@ -2922,9 +2892,7 @@ static void srpt_release_cmd(struct se_cmd *se_cmd) ...@@ -2922,9 +2892,7 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
ioctx->n_rbuf = 0; ioctx->n_rbuf = 0;
} }
spin_lock_irqsave(&ch->spinlock, flags); percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
list_add(&ioctx->free_list, &ch->free_list);
spin_unlock_irqrestore(&ch->spinlock, flags);
} }
/** /**
......
...@@ -179,7 +179,6 @@ struct srpt_recv_ioctx { ...@@ -179,7 +179,6 @@ struct srpt_recv_ioctx {
* struct srpt_send_ioctx - SRPT send I/O context. * struct srpt_send_ioctx - SRPT send I/O context.
* @ioctx: See above. * @ioctx: See above.
* @ch: Channel pointer. * @ch: Channel pointer.
* @free_list: Node in srpt_rdma_ch.free_list.
* @n_rbuf: Number of data buffers in the received SRP command. * @n_rbuf: Number of data buffers in the received SRP command.
* @rbufs: Pointer to SRP data buffer array. * @rbufs: Pointer to SRP data buffer array.
* @single_rbuf: SRP data buffer if the command has only a single buffer. * @single_rbuf: SRP data buffer if the command has only a single buffer.
...@@ -202,7 +201,6 @@ struct srpt_send_ioctx { ...@@ -202,7 +201,6 @@ struct srpt_send_ioctx {
struct srp_direct_buf *rbufs; struct srp_direct_buf *rbufs;
struct srp_direct_buf single_rbuf; struct srp_direct_buf single_rbuf;
struct scatterlist *sg; struct scatterlist *sg;
struct list_head free_list;
spinlock_t spinlock; spinlock_t spinlock;
enum srpt_command_state state; enum srpt_command_state state;
struct se_cmd cmd; struct se_cmd cmd;
......
...@@ -2963,6 +2963,7 @@ struct qlt_hw_data { ...@@ -2963,6 +2963,7 @@ struct qlt_hw_data {
uint8_t tgt_node_name[WWN_SIZE]; uint8_t tgt_node_name[WWN_SIZE];
struct dentry *dfs_tgt_sess;
struct list_head q_full_list; struct list_head q_full_list;
uint32_t num_pend_cmds; uint32_t num_pend_cmds;
uint32_t num_qfull_cmds_alloc; uint32_t num_qfull_cmds_alloc;
......
...@@ -12,6 +12,47 @@ ...@@ -12,6 +12,47 @@
static struct dentry *qla2x00_dfs_root; static struct dentry *qla2x00_dfs_root;
static atomic_t qla2x00_dfs_root_count; static atomic_t qla2x00_dfs_root_count;
static int
qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused)
{
scsi_qla_host_t *vha = s->private;
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
struct qla_tgt_sess *sess = NULL;
struct qla_tgt *tgt= vha->vha_tgt.qla_tgt;
seq_printf(s, "%s\n",vha->host_str);
if (tgt) {
seq_printf(s, "Port ID Port Name Handle\n");
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
seq_printf(s, "%02x:%02x:%02x %8phC %d\n",
sess->s_id.b.domain,sess->s_id.b.area,
sess->s_id.b.al_pa, sess->port_name,
sess->loop_id);
}
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
}
return 0;
}
static int
qla2x00_dfs_tgt_sess_open(struct inode *inode, struct file *file)
{
scsi_qla_host_t *vha = inode->i_private;
return single_open(file, qla2x00_dfs_tgt_sess_show, vha);
}
static const struct file_operations dfs_tgt_sess_ops = {
.open = qla2x00_dfs_tgt_sess_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int static int
qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused) qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
{ {
...@@ -248,6 +289,15 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha) ...@@ -248,6 +289,15 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha)
"Unable to create debugfs fce node.\n"); "Unable to create debugfs fce node.\n");
goto out; goto out;
} }
ha->tgt.dfs_tgt_sess = debugfs_create_file("tgt_sess",
S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_sess_ops);
if (!ha->tgt.dfs_tgt_sess) {
ql_log(ql_log_warn, vha, 0xffff,
"Unable to create debugFS tgt_sess node.\n");
goto out;
}
out: out:
return 0; return 0;
} }
...@@ -257,6 +307,11 @@ qla2x00_dfs_remove(scsi_qla_host_t *vha) ...@@ -257,6 +307,11 @@ qla2x00_dfs_remove(scsi_qla_host_t *vha)
{ {
struct qla_hw_data *ha = vha->hw; struct qla_hw_data *ha = vha->hw;
if (ha->tgt.dfs_tgt_sess) {
debugfs_remove(ha->tgt.dfs_tgt_sess);
ha->tgt.dfs_tgt_sess = NULL;
}
if (ha->dfs_fw_resource_cnt) { if (ha->dfs_fw_resource_cnt) {
debugfs_remove(ha->dfs_fw_resource_cnt); debugfs_remove(ha->dfs_fw_resource_cnt);
ha->dfs_fw_resource_cnt = NULL; ha->dfs_fw_resource_cnt = NULL;
......
...@@ -641,7 +641,8 @@ void qlt_unreg_sess(struct qla_tgt_sess *sess) ...@@ -641,7 +641,8 @@ void qlt_unreg_sess(struct qla_tgt_sess *sess)
{ {
struct scsi_qla_host *vha = sess->vha; struct scsi_qla_host *vha = sess->vha;
vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess); if (sess->se_sess)
vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
if (!list_empty(&sess->del_list_entry)) if (!list_empty(&sess->del_list_entry))
list_del_init(&sess->del_list_entry); list_del_init(&sess->del_list_entry);
...@@ -856,8 +857,12 @@ static void qlt_del_sess_work_fn(struct delayed_work *work) ...@@ -856,8 +857,12 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
"Timeout: sess %p about to be deleted\n", "Timeout: sess %p about to be deleted\n",
sess); sess);
ha->tgt.tgt_ops->shutdown_sess(sess); if (sess->se_sess) {
ha->tgt.tgt_ops->put_sess(sess); ha->tgt.tgt_ops->shutdown_sess(sess);
ha->tgt.tgt_ops->put_sess(sess);
} else {
qlt_unreg_sess(sess);
}
} else { } else {
schedule_delayed_work(&tgt->sess_del_work, schedule_delayed_work(&tgt->sess_del_work,
sess->expires - elapsed); sess->expires - elapsed);
...@@ -879,7 +884,6 @@ static struct qla_tgt_sess *qlt_create_sess( ...@@ -879,7 +884,6 @@ static struct qla_tgt_sess *qlt_create_sess(
struct qla_hw_data *ha = vha->hw; struct qla_hw_data *ha = vha->hw;
struct qla_tgt_sess *sess; struct qla_tgt_sess *sess;
unsigned long flags; unsigned long flags;
unsigned char be_sid[3];
/* Check to avoid double sessions */ /* Check to avoid double sessions */
spin_lock_irqsave(&ha->tgt.sess_lock, flags); spin_lock_irqsave(&ha->tgt.sess_lock, flags);
...@@ -905,6 +909,14 @@ static struct qla_tgt_sess *qlt_create_sess( ...@@ -905,6 +909,14 @@ static struct qla_tgt_sess *qlt_create_sess(
if (sess->deleted) if (sess->deleted)
qlt_undelete_sess(sess); qlt_undelete_sess(sess);
if (!sess->se_sess) {
if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
&sess->port_name[0], sess) < 0) {
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
return NULL;
}
}
kref_get(&sess->se_sess->sess_kref); kref_get(&sess->se_sess->sess_kref);
ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
(fcport->flags & FCF_CONF_COMP_SUPPORTED)); (fcport->flags & FCF_CONF_COMP_SUPPORTED));
...@@ -948,26 +960,6 @@ static struct qla_tgt_sess *qlt_create_sess( ...@@ -948,26 +960,6 @@ static struct qla_tgt_sess *qlt_create_sess(
"Adding sess %p to tgt %p via ->check_initiator_node_acl()\n", "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
sess, vha->vha_tgt.qla_tgt); sess, vha->vha_tgt.qla_tgt);
be_sid[0] = sess->s_id.b.domain;
be_sid[1] = sess->s_id.b.area;
be_sid[2] = sess->s_id.b.al_pa;
/*
* Determine if this fc_port->port_name is allowed to access
* target mode using explict NodeACLs+MappedLUNs, or using
* TPG demo mode. If this is successful a target mode FC nexus
* is created.
*/
if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
&fcport->port_name[0], sess, &be_sid[0], fcport->loop_id) < 0) {
kfree(sess);
return NULL;
}
/*
* Take an extra reference to ->sess_kref here to handle qla_tgt_sess
* access across ->tgt.sess_lock reaquire.
*/
kref_get(&sess->se_sess->sess_kref);
sess->conf_compl_supported = (fcport->flags & FCF_CONF_COMP_SUPPORTED); sess->conf_compl_supported = (fcport->flags & FCF_CONF_COMP_SUPPORTED);
BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name)); BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name));
memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name)); memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
...@@ -985,6 +977,23 @@ static struct qla_tgt_sess *qlt_create_sess( ...@@ -985,6 +977,23 @@ static struct qla_tgt_sess *qlt_create_sess(
fcport->loop_id, sess->s_id.b.domain, sess->s_id.b.area, fcport->loop_id, sess->s_id.b.domain, sess->s_id.b.area,
sess->s_id.b.al_pa, sess->conf_compl_supported ? "" : "not "); sess->s_id.b.al_pa, sess->conf_compl_supported ? "" : "not ");
/*
* Determine if this fc_port->port_name is allowed to access
* target mode using explict NodeACLs+MappedLUNs, or using
* TPG demo mode. If this is successful a target mode FC nexus
* is created.
*/
if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
&fcport->port_name[0], sess) < 0) {
return NULL;
} else {
/*
* Take an extra reference to ->sess_kref here to handle qla_tgt_sess
* access across ->tgt.sess_lock reaquire.
*/
kref_get(&sess->se_sess->sess_kref);
}
return sess; return sess;
} }
......
...@@ -731,7 +731,7 @@ struct qla_tgt_func_tmpl { ...@@ -731,7 +731,7 @@ struct qla_tgt_func_tmpl {
void (*free_session)(struct qla_tgt_sess *); void (*free_session)(struct qla_tgt_sess *);
int (*check_initiator_node_acl)(struct scsi_qla_host *, unsigned char *, int (*check_initiator_node_acl)(struct scsi_qla_host *, unsigned char *,
void *, uint8_t *, uint16_t); struct qla_tgt_sess *);
void (*update_sess)(struct qla_tgt_sess *, port_id_t, uint16_t, bool); void (*update_sess)(struct qla_tgt_sess *, port_id_t, uint16_t, bool);
struct qla_tgt_sess *(*find_sess_by_loop_id)(struct scsi_qla_host *, struct qla_tgt_sess *(*find_sess_by_loop_id)(struct scsi_qla_host *,
const uint16_t); const uint16_t);
......
...@@ -1406,6 +1406,39 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess) ...@@ -1406,6 +1406,39 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
transport_deregister_session(sess->se_sess); transport_deregister_session(sess->se_sess);
} }
static int tcm_qla2xxx_session_cb(struct se_portal_group *se_tpg,
struct se_session *se_sess, void *p)
{
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
struct tcm_qla2xxx_tpg, se_tpg);
struct tcm_qla2xxx_lport *lport = tpg->lport;
struct qla_hw_data *ha = lport->qla_vha->hw;
struct se_node_acl *se_nacl = se_sess->se_node_acl;
struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
struct tcm_qla2xxx_nacl, se_node_acl);
struct qla_tgt_sess *qlat_sess = p;
uint16_t loop_id = qlat_sess->loop_id;
unsigned long flags;
unsigned char be_sid[3];
be_sid[0] = qlat_sess->s_id.b.domain;
be_sid[1] = qlat_sess->s_id.b.area;
be_sid[2] = qlat_sess->s_id.b.al_pa;
/*
* And now setup se_nacl and session pointers into HW lport internal
* mappings for fabric S_ID and LOOP_ID.
*/
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl,
se_sess, qlat_sess, be_sid);
tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl,
se_sess, qlat_sess, loop_id);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
return 0;
}
/* /*
* Called via qlt_create_sess():ha->qla2x_tmpl->check_initiator_node_acl() * Called via qlt_create_sess():ha->qla2x_tmpl->check_initiator_node_acl()
* to locate struct se_node_acl * to locate struct se_node_acl
...@@ -1413,20 +1446,13 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess) ...@@ -1413,20 +1446,13 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
static int tcm_qla2xxx_check_initiator_node_acl( static int tcm_qla2xxx_check_initiator_node_acl(
scsi_qla_host_t *vha, scsi_qla_host_t *vha,
unsigned char *fc_wwpn, unsigned char *fc_wwpn,
void *qla_tgt_sess, struct qla_tgt_sess *qlat_sess)
uint8_t *s_id,
uint16_t loop_id)
{ {
struct qla_hw_data *ha = vha->hw; struct qla_hw_data *ha = vha->hw;
struct tcm_qla2xxx_lport *lport; struct tcm_qla2xxx_lport *lport;
struct tcm_qla2xxx_tpg *tpg; struct tcm_qla2xxx_tpg *tpg;
struct tcm_qla2xxx_nacl *nacl;
struct se_portal_group *se_tpg;
struct se_node_acl *se_nacl;
struct se_session *se_sess; struct se_session *se_sess;
struct qla_tgt_sess *sess = qla_tgt_sess;
unsigned char port_name[36]; unsigned char port_name[36];
unsigned long flags;
int num_tags = (ha->cur_fw_xcb_count) ? ha->cur_fw_xcb_count : int num_tags = (ha->cur_fw_xcb_count) ? ha->cur_fw_xcb_count :
TCM_QLA2XXX_DEFAULT_TAGS; TCM_QLA2XXX_DEFAULT_TAGS;
...@@ -1444,15 +1470,6 @@ static int tcm_qla2xxx_check_initiator_node_acl( ...@@ -1444,15 +1470,6 @@ static int tcm_qla2xxx_check_initiator_node_acl(
pr_err("Unable to lcoate struct tcm_qla2xxx_lport->tpg_1\n"); pr_err("Unable to lcoate struct tcm_qla2xxx_lport->tpg_1\n");
return -EINVAL; return -EINVAL;
} }
se_tpg = &tpg->se_tpg;
se_sess = transport_init_session_tags(num_tags,
sizeof(struct qla_tgt_cmd),
TARGET_PROT_ALL);
if (IS_ERR(se_sess)) {
pr_err("Unable to initialize struct se_session\n");
return PTR_ERR(se_sess);
}
/* /*
* Format the FCP Initiator port_name into colon seperated values to * Format the FCP Initiator port_name into colon seperated values to
* match the format by tcm_qla2xxx explict ConfigFS NodeACLs. * match the format by tcm_qla2xxx explict ConfigFS NodeACLs.
...@@ -1463,28 +1480,12 @@ static int tcm_qla2xxx_check_initiator_node_acl( ...@@ -1463,28 +1480,12 @@ static int tcm_qla2xxx_check_initiator_node_acl(
* Locate our struct se_node_acl either from an explict NodeACL created * Locate our struct se_node_acl either from an explict NodeACL created
* via ConfigFS, or via running in TPG demo mode. * via ConfigFS, or via running in TPG demo mode.
*/ */
se_sess->se_node_acl = core_tpg_check_initiator_node_acl(se_tpg, se_sess = target_alloc_session(&tpg->se_tpg, num_tags,
port_name); sizeof(struct qla_tgt_cmd),
if (!se_sess->se_node_acl) { TARGET_PROT_ALL, port_name,
transport_free_session(se_sess); qlat_sess, tcm_qla2xxx_session_cb);
return -EINVAL; if (IS_ERR(se_sess))
} return PTR_ERR(se_sess);
se_nacl = se_sess->se_node_acl;
nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
/*
* And now setup the new se_nacl and session pointers into our HW lport
* mappings for fabric S_ID and LOOP_ID.
*/
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl, se_sess,
qla_tgt_sess, s_id);
tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl, se_sess,
qla_tgt_sess, loop_id);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
/*
* Finally register the new FC Nexus with TCM
*/
transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess);
return 0; return 0;
} }
......
...@@ -802,58 +802,48 @@ static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = { ...@@ -802,58 +802,48 @@ static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = {
/* Start items for tcm_loop_nexus_cit */ /* Start items for tcm_loop_nexus_cit */
static int tcm_loop_alloc_sess_cb(struct se_portal_group *se_tpg,
struct se_session *se_sess, void *p)
{
struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
struct tcm_loop_tpg, tl_se_tpg);
tl_tpg->tl_nexus = p;
return 0;
}
static int tcm_loop_make_nexus( static int tcm_loop_make_nexus(
struct tcm_loop_tpg *tl_tpg, struct tcm_loop_tpg *tl_tpg,
const char *name) const char *name)
{ {
struct se_portal_group *se_tpg;
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
struct tcm_loop_nexus *tl_nexus; struct tcm_loop_nexus *tl_nexus;
int ret = -ENOMEM; int ret;
if (tl_tpg->tl_nexus) { if (tl_tpg->tl_nexus) {
pr_debug("tl_tpg->tl_nexus already exists\n"); pr_debug("tl_tpg->tl_nexus already exists\n");
return -EEXIST; return -EEXIST;
} }
se_tpg = &tl_tpg->tl_se_tpg;
tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL); tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL);
if (!tl_nexus) { if (!tl_nexus) {
pr_err("Unable to allocate struct tcm_loop_nexus\n"); pr_err("Unable to allocate struct tcm_loop_nexus\n");
return -ENOMEM; return -ENOMEM;
} }
/*
* Initialize the struct se_session pointer tl_nexus->se_sess = target_alloc_session(&tl_tpg->tl_se_tpg, 0, 0,
*/ TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
tl_nexus->se_sess = transport_init_session( name, tl_nexus, tcm_loop_alloc_sess_cb);
TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS);
if (IS_ERR(tl_nexus->se_sess)) { if (IS_ERR(tl_nexus->se_sess)) {
ret = PTR_ERR(tl_nexus->se_sess); ret = PTR_ERR(tl_nexus->se_sess);
goto out; kfree(tl_nexus);
} return ret;
/*
* Since we are running in 'demo mode' this call with generate a
* struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI
* Initiator port name of the passed configfs group 'name'.
*/
tl_nexus->se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
se_tpg, (unsigned char *)name);
if (!tl_nexus->se_sess->se_node_acl) {
transport_free_session(tl_nexus->se_sess);
goto out;
} }
/* Now, register the I_T Nexus as active. */
transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
tl_nexus->se_sess, tl_nexus);
tl_tpg->tl_nexus = tl_nexus;
pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated" pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
name); name);
return 0; return 0;
out:
kfree(tl_nexus);
return ret;
} }
static int tcm_loop_drop_nexus( static int tcm_loop_drop_nexus(
......
...@@ -196,45 +196,30 @@ static struct sbp_session *sbp_session_create( ...@@ -196,45 +196,30 @@ static struct sbp_session *sbp_session_create(
struct sbp_session *sess; struct sbp_session *sess;
int ret; int ret;
char guid_str[17]; char guid_str[17];
struct se_node_acl *se_nacl;
snprintf(guid_str, sizeof(guid_str), "%016llx", guid);
sess = kmalloc(sizeof(*sess), GFP_KERNEL); sess = kmalloc(sizeof(*sess), GFP_KERNEL);
if (!sess) { if (!sess) {
pr_err("failed to allocate session descriptor\n"); pr_err("failed to allocate session descriptor\n");
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
spin_lock_init(&sess->lock);
INIT_LIST_HEAD(&sess->login_list);
INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
sess->guid = guid;
sess->se_sess = transport_init_session(TARGET_PROT_NORMAL); sess->se_sess = target_alloc_session(&tpg->se_tpg, 128,
sizeof(struct sbp_target_request),
TARGET_PROT_NORMAL, guid_str,
sess, NULL);
if (IS_ERR(sess->se_sess)) { if (IS_ERR(sess->se_sess)) {
pr_err("failed to init se_session\n"); pr_err("failed to init se_session\n");
ret = PTR_ERR(sess->se_sess); ret = PTR_ERR(sess->se_sess);
kfree(sess); kfree(sess);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
snprintf(guid_str, sizeof(guid_str), "%016llx", guid);
se_nacl = core_tpg_check_initiator_node_acl(&tpg->se_tpg, guid_str);
if (!se_nacl) {
pr_warn("Node ACL not found for %s\n", guid_str);
transport_free_session(sess->se_sess);
kfree(sess);
return ERR_PTR(-EPERM);
}
sess->se_sess->se_node_acl = se_nacl;
spin_lock_init(&sess->lock);
INIT_LIST_HEAD(&sess->login_list);
INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
sess->guid = guid;
transport_register_session(&tpg->se_tpg, se_nacl, sess->se_sess, sess);
return sess; return sess;
} }
...@@ -908,7 +893,6 @@ static void tgt_agent_process_work(struct work_struct *work) ...@@ -908,7 +893,6 @@ static void tgt_agent_process_work(struct work_struct *work)
STATUS_BLOCK_SBP_STATUS( STATUS_BLOCK_SBP_STATUS(
SBP_STATUS_REQ_TYPE_NOTSUPP)); SBP_STATUS_REQ_TYPE_NOTSUPP));
sbp_send_status(req); sbp_send_status(req);
sbp_free_request(req);
return; return;
case 3: /* Dummy ORB */ case 3: /* Dummy ORB */
req->status.status |= cpu_to_be32( req->status.status |= cpu_to_be32(
...@@ -919,7 +903,6 @@ static void tgt_agent_process_work(struct work_struct *work) ...@@ -919,7 +903,6 @@ static void tgt_agent_process_work(struct work_struct *work)
STATUS_BLOCK_SBP_STATUS( STATUS_BLOCK_SBP_STATUS(
SBP_STATUS_DUMMY_ORB_COMPLETE)); SBP_STATUS_DUMMY_ORB_COMPLETE));
sbp_send_status(req); sbp_send_status(req);
sbp_free_request(req);
return; return;
default: default:
BUG(); BUG();
...@@ -938,6 +921,25 @@ static inline bool tgt_agent_check_active(struct sbp_target_agent *agent) ...@@ -938,6 +921,25 @@ static inline bool tgt_agent_check_active(struct sbp_target_agent *agent)
return active; return active;
} }
static struct sbp_target_request *sbp_mgt_get_req(struct sbp_session *sess,
struct fw_card *card, u64 next_orb)
{
struct se_session *se_sess = sess->se_sess;
struct sbp_target_request *req;
int tag;
tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
if (tag < 0)
return ERR_PTR(-ENOMEM);
req = &((struct sbp_target_request *)se_sess->sess_cmd_map)[tag];
memset(req, 0, sizeof(*req));
req->se_cmd.map_tag = tag;
req->se_cmd.tag = next_orb;
return req;
}
static void tgt_agent_fetch_work(struct work_struct *work) static void tgt_agent_fetch_work(struct work_struct *work)
{ {
struct sbp_target_agent *agent = struct sbp_target_agent *agent =
...@@ -949,8 +951,8 @@ static void tgt_agent_fetch_work(struct work_struct *work) ...@@ -949,8 +951,8 @@ static void tgt_agent_fetch_work(struct work_struct *work)
u64 next_orb = agent->orb_pointer; u64 next_orb = agent->orb_pointer;
while (next_orb && tgt_agent_check_active(agent)) { while (next_orb && tgt_agent_check_active(agent)) {
req = kzalloc(sizeof(*req), GFP_KERNEL); req = sbp_mgt_get_req(sess, sess->card, next_orb);
if (!req) { if (IS_ERR(req)) {
spin_lock_bh(&agent->lock); spin_lock_bh(&agent->lock);
agent->state = AGENT_STATE_DEAD; agent->state = AGENT_STATE_DEAD;
spin_unlock_bh(&agent->lock); spin_unlock_bh(&agent->lock);
...@@ -985,7 +987,6 @@ static void tgt_agent_fetch_work(struct work_struct *work) ...@@ -985,7 +987,6 @@ static void tgt_agent_fetch_work(struct work_struct *work)
spin_unlock_bh(&agent->lock); spin_unlock_bh(&agent->lock);
sbp_send_status(req); sbp_send_status(req);
sbp_free_request(req);
return; return;
} }
...@@ -1232,7 +1233,7 @@ static void sbp_handle_command(struct sbp_target_request *req) ...@@ -1232,7 +1233,7 @@ static void sbp_handle_command(struct sbp_target_request *req)
req->se_cmd.tag = req->orb_pointer; req->se_cmd.tag = req->orb_pointer;
if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf, if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
req->sense_buf, unpacked_lun, data_length, req->sense_buf, unpacked_lun, data_length,
TCM_SIMPLE_TAG, data_dir, 0)) TCM_SIMPLE_TAG, data_dir, TARGET_SCF_ACK_KREF))
goto err; goto err;
return; return;
...@@ -1244,7 +1245,6 @@ static void sbp_handle_command(struct sbp_target_request *req) ...@@ -1244,7 +1245,6 @@ static void sbp_handle_command(struct sbp_target_request *req)
STATUS_BLOCK_LEN(1) | STATUS_BLOCK_LEN(1) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
sbp_send_status(req); sbp_send_status(req);
sbp_free_request(req);
} }
/* /*
...@@ -1343,22 +1343,29 @@ static int sbp_rw_data(struct sbp_target_request *req) ...@@ -1343,22 +1343,29 @@ static int sbp_rw_data(struct sbp_target_request *req)
static int sbp_send_status(struct sbp_target_request *req) static int sbp_send_status(struct sbp_target_request *req)
{ {
int ret, length; int rc, ret = 0, length;
struct sbp_login_descriptor *login = req->login; struct sbp_login_descriptor *login = req->login;
length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4; length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4;
ret = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST, rc = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST,
login->status_fifo_addr, &req->status, length); login->status_fifo_addr, &req->status, length);
if (ret != RCODE_COMPLETE) { if (rc != RCODE_COMPLETE) {
pr_debug("sbp_send_status: write failed: 0x%x\n", ret); pr_debug("sbp_send_status: write failed: 0x%x\n", rc);
return -EIO; ret = -EIO;
goto put_ref;
} }
pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n", pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n",
req->orb_pointer); req->orb_pointer);
/*
return 0; * Drop the extra ACK_KREF reference taken by target_submit_cmd()
* ahead of sbp_check_stop_free() -> transport_generic_free_cmd()
* final se_cmd->cmd_kref put.
*/
put_ref:
target_put_sess_cmd(&req->se_cmd);
return ret;
} }
static void sbp_sense_mangle(struct sbp_target_request *req) static void sbp_sense_mangle(struct sbp_target_request *req)
...@@ -1447,9 +1454,13 @@ static int sbp_send_sense(struct sbp_target_request *req) ...@@ -1447,9 +1454,13 @@ static int sbp_send_sense(struct sbp_target_request *req)
static void sbp_free_request(struct sbp_target_request *req) static void sbp_free_request(struct sbp_target_request *req)
{ {
struct se_cmd *se_cmd = &req->se_cmd;
struct se_session *se_sess = se_cmd->se_sess;
kfree(req->pg_tbl); kfree(req->pg_tbl);
kfree(req->cmd_buf); kfree(req->cmd_buf);
kfree(req);
percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
} }
static void sbp_mgt_agent_process(struct work_struct *work) static void sbp_mgt_agent_process(struct work_struct *work)
...@@ -1609,7 +1620,6 @@ static void sbp_mgt_agent_rw(struct fw_card *card, ...@@ -1609,7 +1620,6 @@ static void sbp_mgt_agent_rw(struct fw_card *card,
rcode = RCODE_CONFLICT_ERROR; rcode = RCODE_CONFLICT_ERROR;
goto out; goto out;
} }
req = kzalloc(sizeof(*req), GFP_ATOMIC); req = kzalloc(sizeof(*req), GFP_ATOMIC);
if (!req) { if (!req) {
rcode = RCODE_CONFLICT_ERROR; rcode = RCODE_CONFLICT_ERROR;
...@@ -1815,8 +1825,7 @@ static int sbp_check_stop_free(struct se_cmd *se_cmd) ...@@ -1815,8 +1825,7 @@ static int sbp_check_stop_free(struct se_cmd *se_cmd)
struct sbp_target_request *req = container_of(se_cmd, struct sbp_target_request *req = container_of(se_cmd,
struct sbp_target_request, se_cmd); struct sbp_target_request, se_cmd);
transport_generic_free_cmd(&req->se_cmd, 0); return transport_generic_free_cmd(&req->se_cmd, 0);
return 1;
} }
static int sbp_count_se_tpg_luns(struct se_portal_group *tpg) static int sbp_count_se_tpg_luns(struct se_portal_group *tpg)
......
...@@ -86,7 +86,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun) ...@@ -86,7 +86,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
se_cmd->lun_ref_active = true; se_cmd->lun_ref_active = true;
if ((se_cmd->data_direction == DMA_TO_DEVICE) && if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
(deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) { deve->lun_access_ro) {
pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
" Access for 0x%08llx\n", " Access for 0x%08llx\n",
se_cmd->se_tfo->get_fabric_name(), se_cmd->se_tfo->get_fabric_name(),
...@@ -199,7 +199,7 @@ bool target_lun_is_rdonly(struct se_cmd *cmd) ...@@ -199,7 +199,7 @@ bool target_lun_is_rdonly(struct se_cmd *cmd)
rcu_read_lock(); rcu_read_lock();
deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun); deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun);
ret = (deve && deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY); ret = deve && deve->lun_access_ro;
rcu_read_unlock(); rcu_read_unlock();
return ret; return ret;
...@@ -258,22 +258,15 @@ void core_free_device_list_for_node( ...@@ -258,22 +258,15 @@ void core_free_device_list_for_node(
void core_update_device_list_access( void core_update_device_list_access(
u64 mapped_lun, u64 mapped_lun,
u32 lun_access, bool lun_access_ro,
struct se_node_acl *nacl) struct se_node_acl *nacl)
{ {
struct se_dev_entry *deve; struct se_dev_entry *deve;
mutex_lock(&nacl->lun_entry_mutex); mutex_lock(&nacl->lun_entry_mutex);
deve = target_nacl_find_deve(nacl, mapped_lun); deve = target_nacl_find_deve(nacl, mapped_lun);
if (deve) { if (deve)
if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { deve->lun_access_ro = lun_access_ro;
deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
} else {
deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
}
}
mutex_unlock(&nacl->lun_entry_mutex); mutex_unlock(&nacl->lun_entry_mutex);
} }
...@@ -319,7 +312,7 @@ int core_enable_device_list_for_node( ...@@ -319,7 +312,7 @@ int core_enable_device_list_for_node(
struct se_lun *lun, struct se_lun *lun,
struct se_lun_acl *lun_acl, struct se_lun_acl *lun_acl,
u64 mapped_lun, u64 mapped_lun,
u32 lun_access, bool lun_access_ro,
struct se_node_acl *nacl, struct se_node_acl *nacl,
struct se_portal_group *tpg) struct se_portal_group *tpg)
{ {
...@@ -340,11 +333,7 @@ int core_enable_device_list_for_node( ...@@ -340,11 +333,7 @@ int core_enable_device_list_for_node(
kref_init(&new->pr_kref); kref_init(&new->pr_kref);
init_completion(&new->pr_comp); init_completion(&new->pr_comp);
if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) new->lun_access_ro = lun_access_ro;
new->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
else
new->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
new->creation_time = get_jiffies_64(); new->creation_time = get_jiffies_64();
new->attach_count++; new->attach_count++;
...@@ -433,7 +422,7 @@ void core_disable_device_list_for_node( ...@@ -433,7 +422,7 @@ void core_disable_device_list_for_node(
hlist_del_rcu(&orig->link); hlist_del_rcu(&orig->link);
clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags); clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
orig->lun_flags = 0; orig->lun_access_ro = false;
orig->creation_time = 0; orig->creation_time = 0;
orig->attach_count--; orig->attach_count--;
/* /*
...@@ -558,8 +547,7 @@ int core_dev_add_lun( ...@@ -558,8 +547,7 @@ int core_dev_add_lun(
{ {
int rc; int rc;
rc = core_tpg_add_lun(tpg, lun, rc = core_tpg_add_lun(tpg, lun, false, dev);
TRANSPORT_LUNFLAGS_READ_WRITE, dev);
if (rc < 0) if (rc < 0)
return rc; return rc;
...@@ -635,7 +623,7 @@ int core_dev_add_initiator_node_lun_acl( ...@@ -635,7 +623,7 @@ int core_dev_add_initiator_node_lun_acl(
struct se_portal_group *tpg, struct se_portal_group *tpg,
struct se_lun_acl *lacl, struct se_lun_acl *lacl,
struct se_lun *lun, struct se_lun *lun,
u32 lun_access) bool lun_access_ro)
{ {
struct se_node_acl *nacl = lacl->se_lun_nacl; struct se_node_acl *nacl = lacl->se_lun_nacl;
/* /*
...@@ -647,20 +635,19 @@ int core_dev_add_initiator_node_lun_acl( ...@@ -647,20 +635,19 @@ int core_dev_add_initiator_node_lun_acl(
if (!nacl) if (!nacl)
return -EINVAL; return -EINVAL;
if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) && if (lun->lun_access_ro)
(lun_access & TRANSPORT_LUNFLAGS_READ_WRITE)) lun_access_ro = true;
lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
lacl->se_lun = lun; lacl->se_lun = lun;
if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun, if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
lun_access, nacl, tpg) < 0) lun_access_ro, nacl, tpg) < 0)
return -EINVAL; return -EINVAL;
pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for " pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for "
" InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(), " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun, tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
(lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO", lun_access_ro ? "RO" : "RW",
nacl->initiatorname); nacl->initiatorname);
/* /*
* Check to see if there are any existing persistent reservation APTPL * Check to see if there are any existing persistent reservation APTPL
......
...@@ -78,7 +78,7 @@ static int target_fabric_mappedlun_link( ...@@ -78,7 +78,7 @@ static int target_fabric_mappedlun_link(
struct se_lun_acl, se_lun_group); struct se_lun_acl, se_lun_group);
struct se_portal_group *se_tpg; struct se_portal_group *se_tpg;
struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s; struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s;
int lun_access; bool lun_access_ro;
if (lun->lun_link_magic != SE_LUN_LINK_MAGIC) { if (lun->lun_link_magic != SE_LUN_LINK_MAGIC) {
pr_err("Bad lun->lun_link_magic, not a valid lun_ci pointer:" pr_err("Bad lun->lun_link_magic, not a valid lun_ci pointer:"
...@@ -115,19 +115,18 @@ static int target_fabric_mappedlun_link( ...@@ -115,19 +115,18 @@ static int target_fabric_mappedlun_link(
} }
/* /*
* If this struct se_node_acl was dynamically generated with * If this struct se_node_acl was dynamically generated with
* tpg_1/attrib/generate_node_acls=1, use the existing deve->lun_flags, * tpg_1/attrib/generate_node_acls=1, use the existing
* which be will write protected (READ-ONLY) when * deve->lun_access_ro value, which will be true when
* tpg_1/attrib/demo_mode_write_protect=1 * tpg_1/attrib/demo_mode_write_protect=1
*/ */
rcu_read_lock(); rcu_read_lock();
deve = target_nacl_find_deve(lacl->se_lun_nacl, lacl->mapped_lun); deve = target_nacl_find_deve(lacl->se_lun_nacl, lacl->mapped_lun);
if (deve) if (deve)
lun_access = deve->lun_flags; lun_access_ro = deve->lun_access_ro;
else else
lun_access = lun_access_ro =
(se_tpg->se_tpg_tfo->tpg_check_prod_mode_write_protect( (se_tpg->se_tpg_tfo->tpg_check_prod_mode_write_protect(
se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY : se_tpg)) ? true : false;
TRANSPORT_LUNFLAGS_READ_WRITE;
rcu_read_unlock(); rcu_read_unlock();
/* /*
* Determine the actual mapped LUN value user wants.. * Determine the actual mapped LUN value user wants..
...@@ -135,7 +134,7 @@ static int target_fabric_mappedlun_link( ...@@ -135,7 +134,7 @@ static int target_fabric_mappedlun_link(
* This value is what the SCSI Initiator actually sees the * This value is what the SCSI Initiator actually sees the
* $FABRIC/$WWPN/$TPGT/lun/lun_* as on their SCSI Initiator Ports. * $FABRIC/$WWPN/$TPGT/lun/lun_* as on their SCSI Initiator Ports.
*/ */
return core_dev_add_initiator_node_lun_acl(se_tpg, lacl, lun, lun_access); return core_dev_add_initiator_node_lun_acl(se_tpg, lacl, lun, lun_access_ro);
} }
static int target_fabric_mappedlun_unlink( static int target_fabric_mappedlun_unlink(
...@@ -167,8 +166,7 @@ static ssize_t target_fabric_mappedlun_write_protect_show( ...@@ -167,8 +166,7 @@ static ssize_t target_fabric_mappedlun_write_protect_show(
rcu_read_lock(); rcu_read_lock();
deve = target_nacl_find_deve(se_nacl, lacl->mapped_lun); deve = target_nacl_find_deve(se_nacl, lacl->mapped_lun);
if (deve) { if (deve) {
len = sprintf(page, "%d\n", len = sprintf(page, "%d\n", deve->lun_access_ro);
(deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) ? 1 : 0);
} }
rcu_read_unlock(); rcu_read_unlock();
...@@ -181,25 +179,23 @@ static ssize_t target_fabric_mappedlun_write_protect_store( ...@@ -181,25 +179,23 @@ static ssize_t target_fabric_mappedlun_write_protect_store(
struct se_lun_acl *lacl = item_to_lun_acl(item); struct se_lun_acl *lacl = item_to_lun_acl(item);
struct se_node_acl *se_nacl = lacl->se_lun_nacl; struct se_node_acl *se_nacl = lacl->se_lun_nacl;
struct se_portal_group *se_tpg = se_nacl->se_tpg; struct se_portal_group *se_tpg = se_nacl->se_tpg;
unsigned long op; unsigned long wp;
int ret; int ret;
ret = kstrtoul(page, 0, &op); ret = kstrtoul(page, 0, &wp);
if (ret) if (ret)
return ret; return ret;
if ((op != 1) && (op != 0)) if ((wp != 1) && (wp != 0))
return -EINVAL; return -EINVAL;
core_update_device_list_access(lacl->mapped_lun, (op) ? /* wp=1 means lun_access_ro=true */
TRANSPORT_LUNFLAGS_READ_ONLY : core_update_device_list_access(lacl->mapped_lun, wp, lacl->se_lun_nacl);
TRANSPORT_LUNFLAGS_READ_WRITE,
lacl->se_lun_nacl);
pr_debug("%s_ConfigFS: Changed Initiator ACL: %s" pr_debug("%s_ConfigFS: Changed Initiator ACL: %s"
" Mapped LUN: %llu Write Protect bit to %s\n", " Mapped LUN: %llu Write Protect bit to %s\n",
se_tpg->se_tpg_tfo->get_fabric_name(), se_tpg->se_tpg_tfo->get_fabric_name(),
se_nacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF"); se_nacl->initiatorname, lacl->mapped_lun, (wp) ? "ON" : "OFF");
return count; return count;
......
...@@ -412,9 +412,40 @@ iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb) ...@@ -412,9 +412,40 @@ iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
return 0; return 0;
} }
static sense_reason_t
iblock_execute_write_same_direct(struct block_device *bdev, struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
struct scatterlist *sg = &cmd->t_data_sg[0];
struct page *page = NULL;
int ret;
if (sg->offset) {
page = alloc_page(GFP_KERNEL);
if (!page)
return TCM_OUT_OF_RESOURCES;
sg_copy_to_buffer(sg, cmd->t_data_nents, page_address(page),
dev->dev_attrib.block_size);
}
ret = blkdev_issue_write_same(bdev,
target_to_linux_sector(dev, cmd->t_task_lba),
target_to_linux_sector(dev,
sbc_get_write_same_sectors(cmd)),
GFP_KERNEL, page ? page : sg_page(sg));
if (page)
__free_page(page);
if (ret)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
target_complete_cmd(cmd, GOOD);
return 0;
}
static sense_reason_t static sense_reason_t
iblock_execute_write_same(struct se_cmd *cmd) iblock_execute_write_same(struct se_cmd *cmd)
{ {
struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
struct iblock_req *ibr; struct iblock_req *ibr;
struct scatterlist *sg; struct scatterlist *sg;
struct bio *bio; struct bio *bio;
...@@ -439,6 +470,9 @@ iblock_execute_write_same(struct se_cmd *cmd) ...@@ -439,6 +470,9 @@ iblock_execute_write_same(struct se_cmd *cmd)
return TCM_INVALID_CDB_FIELD; return TCM_INVALID_CDB_FIELD;
} }
if (bdev_write_same(bdev))
return iblock_execute_write_same_direct(bdev, cmd);
ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
if (!ibr) if (!ibr)
goto fail; goto fail;
......
...@@ -59,10 +59,10 @@ struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16); ...@@ -59,10 +59,10 @@ struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16);
void target_pr_kref_release(struct kref *); void target_pr_kref_release(struct kref *);
void core_free_device_list_for_node(struct se_node_acl *, void core_free_device_list_for_node(struct se_node_acl *,
struct se_portal_group *); struct se_portal_group *);
void core_update_device_list_access(u64, u32, struct se_node_acl *); void core_update_device_list_access(u64, bool, struct se_node_acl *);
struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *, u64); struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *, u64);
int core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *, int core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *,
u64, u32, struct se_node_acl *, struct se_portal_group *); u64, bool, struct se_node_acl *, struct se_portal_group *);
void core_disable_device_list_for_node(struct se_lun *, struct se_dev_entry *, void core_disable_device_list_for_node(struct se_lun *, struct se_dev_entry *,
struct se_node_acl *, struct se_portal_group *); struct se_node_acl *, struct se_portal_group *);
void core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *); void core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *);
...@@ -72,7 +72,7 @@ void core_dev_del_lun(struct se_portal_group *, struct se_lun *); ...@@ -72,7 +72,7 @@ void core_dev_del_lun(struct se_portal_group *, struct se_lun *);
struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *, struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
struct se_node_acl *, u64, int *); struct se_node_acl *, u64, int *);
int core_dev_add_initiator_node_lun_acl(struct se_portal_group *, int core_dev_add_initiator_node_lun_acl(struct se_portal_group *,
struct se_lun_acl *, struct se_lun *lun, u32); struct se_lun_acl *, struct se_lun *lun, bool);
int core_dev_del_initiator_node_lun_acl(struct se_lun *, int core_dev_del_initiator_node_lun_acl(struct se_lun *,
struct se_lun_acl *); struct se_lun_acl *);
void core_dev_free_initiator_node_lun_acl(struct se_portal_group *, void core_dev_free_initiator_node_lun_acl(struct se_portal_group *,
...@@ -118,7 +118,7 @@ void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *, ...@@ -118,7 +118,7 @@ void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *,
void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *); void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u64); struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u64);
int core_tpg_add_lun(struct se_portal_group *, struct se_lun *, int core_tpg_add_lun(struct se_portal_group *, struct se_lun *,
u32, struct se_device *); bool, struct se_device *);
void core_tpg_remove_lun(struct se_portal_group *, struct se_lun *); void core_tpg_remove_lun(struct se_portal_group *, struct se_lun *);
struct se_node_acl *core_tpg_add_initiator_node_acl(struct se_portal_group *tpg, struct se_node_acl *core_tpg_add_initiator_node_acl(struct se_portal_group *tpg,
const char *initiatorname); const char *initiatorname);
......
...@@ -997,7 +997,6 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd) ...@@ -997,7 +997,6 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
int length = 0; int length = 0;
int ret; int ret;
int i; int i;
bool read_only = target_lun_is_rdonly(cmd);;
memset(buf, 0, SE_MODE_PAGE_BUF); memset(buf, 0, SE_MODE_PAGE_BUF);
...@@ -1008,7 +1007,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd) ...@@ -1008,7 +1007,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
length = ten ? 3 : 2; length = ten ? 3 : 2;
/* DEVICE-SPECIFIC PARAMETER */ /* DEVICE-SPECIFIC PARAMETER */
if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || read_only) if (cmd->se_lun->lun_access_ro || target_lun_is_rdonly(cmd))
spc_modesense_write_protect(&buf[length], type); spc_modesense_write_protect(&buf[length], type);
/* /*
......
...@@ -121,7 +121,7 @@ void core_tpg_add_node_to_devs( ...@@ -121,7 +121,7 @@ void core_tpg_add_node_to_devs(
struct se_portal_group *tpg, struct se_portal_group *tpg,
struct se_lun *lun_orig) struct se_lun *lun_orig)
{ {
u32 lun_access = 0; bool lun_access_ro = true;
struct se_lun *lun; struct se_lun *lun;
struct se_device *dev; struct se_device *dev;
...@@ -137,27 +137,26 @@ void core_tpg_add_node_to_devs( ...@@ -137,27 +137,26 @@ void core_tpg_add_node_to_devs(
* demo_mode_write_protect is ON, or READ_ONLY; * demo_mode_write_protect is ON, or READ_ONLY;
*/ */
if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) { if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; lun_access_ro = false;
} else { } else {
/* /*
* Allow only optical drives to issue R/W in default RO * Allow only optical drives to issue R/W in default RO
* demo mode. * demo mode.
*/ */
if (dev->transport->get_device_type(dev) == TYPE_DISK) if (dev->transport->get_device_type(dev) == TYPE_DISK)
lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; lun_access_ro = true;
else else
lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; lun_access_ro = false;
} }
pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s" pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s"
" access for LUN in Demo Mode\n", " access for LUN in Demo Mode\n",
tpg->se_tpg_tfo->get_fabric_name(), tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
(lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ? lun_access_ro ? "READ-ONLY" : "READ-WRITE");
"READ-WRITE" : "READ-ONLY");
core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun, core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
lun_access, acl, tpg); lun_access_ro, acl, tpg);
/* /*
* Check to see if there are any existing persistent reservation * Check to see if there are any existing persistent reservation
* APTPL pre-registrations that need to be enabled for this dynamic * APTPL pre-registrations that need to be enabled for this dynamic
...@@ -522,7 +521,7 @@ int core_tpg_register( ...@@ -522,7 +521,7 @@ int core_tpg_register(
return PTR_ERR(se_tpg->tpg_virt_lun0); return PTR_ERR(se_tpg->tpg_virt_lun0);
ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0, ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0,
TRANSPORT_LUNFLAGS_READ_ONLY, g_lun0_dev); true, g_lun0_dev);
if (ret < 0) { if (ret < 0) {
kfree(se_tpg->tpg_virt_lun0); kfree(se_tpg->tpg_virt_lun0);
return ret; return ret;
...@@ -616,7 +615,7 @@ struct se_lun *core_tpg_alloc_lun( ...@@ -616,7 +615,7 @@ struct se_lun *core_tpg_alloc_lun(
int core_tpg_add_lun( int core_tpg_add_lun(
struct se_portal_group *tpg, struct se_portal_group *tpg,
struct se_lun *lun, struct se_lun *lun,
u32 lun_access, bool lun_access_ro,
struct se_device *dev) struct se_device *dev)
{ {
int ret; int ret;
...@@ -644,9 +643,9 @@ int core_tpg_add_lun( ...@@ -644,9 +643,9 @@ int core_tpg_add_lun(
spin_unlock(&dev->se_port_lock); spin_unlock(&dev->se_port_lock);
if (dev->dev_flags & DF_READ_ONLY) if (dev->dev_flags & DF_READ_ONLY)
lun->lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; lun->lun_access_ro = true;
else else
lun->lun_access = lun_access; lun->lun_access_ro = lun_access_ro;
if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist); hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist);
mutex_unlock(&tpg->tpg_lun_mutex); mutex_unlock(&tpg->tpg_lun_mutex);
......
...@@ -281,6 +281,17 @@ struct se_session *transport_init_session_tags(unsigned int tag_num, ...@@ -281,6 +281,17 @@ struct se_session *transport_init_session_tags(unsigned int tag_num,
struct se_session *se_sess; struct se_session *se_sess;
int rc; int rc;
if (tag_num != 0 && !tag_size) {
pr_err("init_session_tags called with percpu-ida tag_num:"
" %u, but zero tag_size\n", tag_num);
return ERR_PTR(-EINVAL);
}
if (!tag_num && tag_size) {
pr_err("init_session_tags called with percpu-ida tag_size:"
" %u, but zero tag_num\n", tag_size);
return ERR_PTR(-EINVAL);
}
se_sess = transport_init_session(sup_prot_ops); se_sess = transport_init_session(sup_prot_ops);
if (IS_ERR(se_sess)) if (IS_ERR(se_sess))
return se_sess; return se_sess;
...@@ -374,6 +385,51 @@ void transport_register_session( ...@@ -374,6 +385,51 @@ void transport_register_session(
} }
EXPORT_SYMBOL(transport_register_session); EXPORT_SYMBOL(transport_register_session);
struct se_session *
target_alloc_session(struct se_portal_group *tpg,
unsigned int tag_num, unsigned int tag_size,
enum target_prot_op prot_op,
const char *initiatorname, void *private,
int (*callback)(struct se_portal_group *,
struct se_session *, void *))
{
struct se_session *sess;
/*
* If the fabric driver is using percpu-ida based pre allocation
* of I/O descriptor tags, go ahead and perform that setup now..
*/
if (tag_num != 0)
sess = transport_init_session_tags(tag_num, tag_size, prot_op);
else
sess = transport_init_session(prot_op);
if (IS_ERR(sess))
return sess;
sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg,
(unsigned char *)initiatorname);
if (!sess->se_node_acl) {
transport_free_session(sess);
return ERR_PTR(-EACCES);
}
/*
* Go ahead and perform any remaining fabric setup that is
* required before transport_register_session().
*/
if (callback != NULL) {
int rc = callback(tpg, sess, private);
if (rc) {
transport_free_session(sess);
return ERR_PTR(rc);
}
}
transport_register_session(tpg, sess->se_node_acl, sess, private);
return sess;
}
EXPORT_SYMBOL(target_alloc_session);
static void target_release_session(struct kref *kref) static void target_release_session(struct kref *kref)
{ {
struct se_session *se_sess = container_of(kref, struct se_session *se_sess = container_of(kref,
...@@ -1941,6 +1997,9 @@ static void transport_complete_qf(struct se_cmd *cmd) ...@@ -1941,6 +1997,9 @@ static void transport_complete_qf(struct se_cmd *cmd)
switch (cmd->data_direction) { switch (cmd->data_direction) {
case DMA_FROM_DEVICE: case DMA_FROM_DEVICE:
if (cmd->scsi_status)
goto queue_status;
trace_target_cmd_complete(cmd); trace_target_cmd_complete(cmd);
ret = cmd->se_tfo->queue_data_in(cmd); ret = cmd->se_tfo->queue_data_in(cmd);
break; break;
...@@ -1951,6 +2010,7 @@ static void transport_complete_qf(struct se_cmd *cmd) ...@@ -1951,6 +2010,7 @@ static void transport_complete_qf(struct se_cmd *cmd)
} }
/* Fall through for DMA_TO_DEVICE */ /* Fall through for DMA_TO_DEVICE */
case DMA_NONE: case DMA_NONE:
queue_status:
trace_target_cmd_complete(cmd); trace_target_cmd_complete(cmd);
ret = cmd->se_tfo->queue_status(cmd); ret = cmd->se_tfo->queue_status(cmd);
break; break;
...@@ -2072,6 +2132,9 @@ static void target_complete_ok_work(struct work_struct *work) ...@@ -2072,6 +2132,9 @@ static void target_complete_ok_work(struct work_struct *work)
queue_rsp: queue_rsp:
switch (cmd->data_direction) { switch (cmd->data_direction) {
case DMA_FROM_DEVICE: case DMA_FROM_DEVICE:
if (cmd->scsi_status)
goto queue_status;
atomic_long_add(cmd->data_length, atomic_long_add(cmd->data_length,
&cmd->se_lun->lun_stats.tx_data_octets); &cmd->se_lun->lun_stats.tx_data_octets);
/* /*
...@@ -2111,6 +2174,7 @@ static void target_complete_ok_work(struct work_struct *work) ...@@ -2111,6 +2174,7 @@ static void target_complete_ok_work(struct work_struct *work)
} }
/* Fall through for DMA_TO_DEVICE */ /* Fall through for DMA_TO_DEVICE */
case DMA_NONE: case DMA_NONE:
queue_status:
trace_target_cmd_complete(cmd); trace_target_cmd_complete(cmd);
ret = cmd->se_tfo->queue_status(cmd); ret = cmd->se_tfo->queue_status(cmd);
if (ret == -EAGAIN || ret == -ENOMEM) if (ret == -EAGAIN || ret == -ENOMEM)
...@@ -2596,8 +2660,6 @@ void target_wait_for_sess_cmds(struct se_session *se_sess) ...@@ -2596,8 +2660,6 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
list_for_each_entry_safe(se_cmd, tmp_cmd, list_for_each_entry_safe(se_cmd, tmp_cmd,
&se_sess->sess_wait_list, se_cmd_list) { &se_sess->sess_wait_list, se_cmd_list) {
list_del_init(&se_cmd->se_cmd_list);
pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
" %d\n", se_cmd, se_cmd->t_state, " %d\n", se_cmd, se_cmd->t_state,
se_cmd->se_tfo->get_cmd_state(se_cmd)); se_cmd->se_tfo->get_cmd_state(se_cmd));
......
This diff is collapsed.
...@@ -107,8 +107,7 @@ void ft_release_cmd(struct se_cmd *se_cmd) ...@@ -107,8 +107,7 @@ void ft_release_cmd(struct se_cmd *se_cmd)
int ft_check_stop_free(struct se_cmd *se_cmd) int ft_check_stop_free(struct se_cmd *se_cmd)
{ {
transport_generic_free_cmd(se_cmd, 0); return transport_generic_free_cmd(se_cmd, 0);
return 1;
} }
/* /*
...@@ -179,6 +178,12 @@ int ft_queue_status(struct se_cmd *se_cmd) ...@@ -179,6 +178,12 @@ int ft_queue_status(struct se_cmd *se_cmd)
return -ENOMEM; return -ENOMEM;
} }
lport->tt.exch_done(cmd->seq); lport->tt.exch_done(cmd->seq);
/*
* Drop the extra ACK_KREF reference taken by target_submit_cmd()
* ahead of ft_check_stop_free() -> transport_generic_free_cmd()
* final se_cmd->cmd_kref put.
*/
target_put_sess_cmd(&cmd->se_cmd);
return 0; return 0;
} }
...@@ -387,7 +392,7 @@ static void ft_send_tm(struct ft_cmd *cmd) ...@@ -387,7 +392,7 @@ static void ft_send_tm(struct ft_cmd *cmd)
/* FIXME: Add referenced task tag for ABORT_TASK */ /* FIXME: Add referenced task tag for ABORT_TASK */
rc = target_submit_tmr(&cmd->se_cmd, cmd->sess->se_sess, rc = target_submit_tmr(&cmd->se_cmd, cmd->sess->se_sess,
&cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun), &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
cmd, tm_func, GFP_KERNEL, 0, 0); cmd, tm_func, GFP_KERNEL, 0, TARGET_SCF_ACK_KREF);
if (rc < 0) if (rc < 0)
ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED); ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED);
} }
...@@ -422,6 +427,12 @@ void ft_queue_tm_resp(struct se_cmd *se_cmd) ...@@ -422,6 +427,12 @@ void ft_queue_tm_resp(struct se_cmd *se_cmd)
pr_debug("tmr fn %d resp %d fcp code %d\n", pr_debug("tmr fn %d resp %d fcp code %d\n",
tmr->function, tmr->response, code); tmr->function, tmr->response, code);
ft_send_resp_code(cmd, code); ft_send_resp_code(cmd, code);
/*
* Drop the extra ACK_KREF reference taken by target_submit_tmr()
* ahead of ft_check_stop_free() -> transport_generic_free_cmd()
* final se_cmd->cmd_kref put.
*/
target_put_sess_cmd(&cmd->se_cmd);
} }
void ft_aborted_task(struct se_cmd *se_cmd) void ft_aborted_task(struct se_cmd *se_cmd)
...@@ -560,7 +571,8 @@ static void ft_send_work(struct work_struct *work) ...@@ -560,7 +571,8 @@ static void ft_send_work(struct work_struct *work)
*/ */
if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb, if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb,
&cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun), &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
ntohl(fcp->fc_dl), task_attr, data_dir, 0)) ntohl(fcp->fc_dl), task_attr, data_dir,
TARGET_SCF_ACK_KREF))
goto err; goto err;
pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl); pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl);
......
...@@ -186,6 +186,20 @@ static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id) ...@@ -186,6 +186,20 @@ static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
return NULL; return NULL;
} }
static int ft_sess_alloc_cb(struct se_portal_group *se_tpg,
struct se_session *se_sess, void *p)
{
struct ft_sess *sess = p;
struct ft_tport *tport = sess->tport;
struct hlist_head *head = &tport->hash[ft_sess_hash(sess->port_id)];
pr_debug("port_id %x sess %p\n", sess->port_id, sess);
hlist_add_head_rcu(&sess->hash, head);
tport->sess_count++;
return 0;
}
/* /*
* Allocate session and enter it in the hash for the local port. * Allocate session and enter it in the hash for the local port.
* Caller holds ft_lport_lock. * Caller holds ft_lport_lock.
...@@ -194,7 +208,6 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id, ...@@ -194,7 +208,6 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
struct fc_rport_priv *rdata) struct fc_rport_priv *rdata)
{ {
struct se_portal_group *se_tpg = &tport->tpg->se_tpg; struct se_portal_group *se_tpg = &tport->tpg->se_tpg;
struct se_node_acl *se_acl;
struct ft_sess *sess; struct ft_sess *sess;
struct hlist_head *head; struct hlist_head *head;
unsigned char initiatorname[TRANSPORT_IQN_LEN]; unsigned char initiatorname[TRANSPORT_IQN_LEN];
...@@ -210,31 +223,18 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id, ...@@ -210,31 +223,18 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
if (!sess) if (!sess)
return NULL; return NULL;
sess->se_sess = transport_init_session_tags(TCM_FC_DEFAULT_TAGS, kref_init(&sess->kref); /* ref for table entry */
sizeof(struct ft_cmd), sess->tport = tport;
TARGET_PROT_NORMAL); sess->port_id = port_id;
if (IS_ERR(sess->se_sess)) {
kfree(sess);
return NULL;
}
se_acl = core_tpg_get_initiator_node_acl(se_tpg, &initiatorname[0]); sess->se_sess = target_alloc_session(se_tpg, TCM_FC_DEFAULT_TAGS,
if (!se_acl) { sizeof(struct ft_cmd),
transport_free_session(sess->se_sess); TARGET_PROT_NORMAL, &initiatorname[0],
sess, ft_sess_alloc_cb);
if (IS_ERR(sess->se_sess)) {
kfree(sess); kfree(sess);
return NULL; return NULL;
} }
sess->se_sess->se_node_acl = se_acl;
sess->tport = tport;
sess->port_id = port_id;
kref_init(&sess->kref); /* ref for table entry */
hlist_add_head_rcu(&sess->hash, head);
tport->sess_count++;
pr_debug("port_id %x sess %p\n", port_id, sess);
transport_register_session(&tport->tpg->se_tpg, se_acl,
sess->se_sess, sess);
return sess; return sess;
} }
......
...@@ -41,13 +41,6 @@ static inline struct f_uas *to_f_uas(struct usb_function *f) ...@@ -41,13 +41,6 @@ static inline struct f_uas *to_f_uas(struct usb_function *f)
return container_of(f, struct f_uas, function); return container_of(f, struct f_uas, function);
} }
static void usbg_cmd_release(struct kref *);
static inline void usbg_cleanup_cmd(struct usbg_cmd *cmd)
{
kref_put(&cmd->ref, usbg_cmd_release);
}
/* Start bot.c code */ /* Start bot.c code */
static int bot_enqueue_cmd_cbw(struct f_uas *fu) static int bot_enqueue_cmd_cbw(struct f_uas *fu)
...@@ -68,7 +61,7 @@ static void bot_status_complete(struct usb_ep *ep, struct usb_request *req) ...@@ -68,7 +61,7 @@ static void bot_status_complete(struct usb_ep *ep, struct usb_request *req)
struct usbg_cmd *cmd = req->context; struct usbg_cmd *cmd = req->context;
struct f_uas *fu = cmd->fu; struct f_uas *fu = cmd->fu;
usbg_cleanup_cmd(cmd); transport_generic_free_cmd(&cmd->se_cmd, 0);
if (req->status < 0) { if (req->status < 0) {
pr_err("ERR %s(%d)\n", __func__, __LINE__); pr_err("ERR %s(%d)\n", __func__, __LINE__);
return; return;
...@@ -605,7 +598,7 @@ static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req) ...@@ -605,7 +598,7 @@ static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req)
break; break;
case UASP_QUEUE_COMMAND: case UASP_QUEUE_COMMAND:
usbg_cleanup_cmd(cmd); transport_generic_free_cmd(&cmd->se_cmd, 0);
usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC); usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
break; break;
...@@ -615,7 +608,7 @@ static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req) ...@@ -615,7 +608,7 @@ static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req)
return; return;
cleanup: cleanup:
usbg_cleanup_cmd(cmd); transport_generic_free_cmd(&cmd->se_cmd, 0);
} }
static int uasp_send_status_response(struct usbg_cmd *cmd) static int uasp_send_status_response(struct usbg_cmd *cmd)
...@@ -977,7 +970,7 @@ static void usbg_data_write_cmpl(struct usb_ep *ep, struct usb_request *req) ...@@ -977,7 +970,7 @@ static void usbg_data_write_cmpl(struct usb_ep *ep, struct usb_request *req)
return; return;
cleanup: cleanup:
usbg_cleanup_cmd(cmd); transport_generic_free_cmd(&cmd->se_cmd, 0);
} }
static int usbg_prepare_w_request(struct usbg_cmd *cmd, struct usb_request *req) static int usbg_prepare_w_request(struct usbg_cmd *cmd, struct usb_request *req)
...@@ -1046,7 +1039,7 @@ static void usbg_cmd_work(struct work_struct *work) ...@@ -1046,7 +1039,7 @@ static void usbg_cmd_work(struct work_struct *work)
struct se_cmd *se_cmd; struct se_cmd *se_cmd;
struct tcm_usbg_nexus *tv_nexus; struct tcm_usbg_nexus *tv_nexus;
struct usbg_tpg *tpg; struct usbg_tpg *tpg;
int dir; int dir, flags = (TARGET_SCF_UNKNOWN_SIZE | TARGET_SCF_ACK_KREF);
se_cmd = &cmd->se_cmd; se_cmd = &cmd->se_cmd;
tpg = cmd->fu->tpg; tpg = cmd->fu->tpg;
...@@ -1060,9 +1053,9 @@ static void usbg_cmd_work(struct work_struct *work) ...@@ -1060,9 +1053,9 @@ static void usbg_cmd_work(struct work_struct *work)
goto out; goto out;
} }
if (target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess, if (target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess, cmd->cmd_buf,
cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun, cmd->sense_iu.sense, cmd->unpacked_lun, 0,
0, cmd->prio_attr, dir, TARGET_SCF_UNKNOWN_SIZE) < 0) cmd->prio_attr, dir, flags) < 0)
goto out; goto out;
return; return;
...@@ -1070,42 +1063,64 @@ static void usbg_cmd_work(struct work_struct *work) ...@@ -1070,42 +1063,64 @@ static void usbg_cmd_work(struct work_struct *work)
out: out:
transport_send_check_condition_and_sense(se_cmd, transport_send_check_condition_and_sense(se_cmd,
TCM_UNSUPPORTED_SCSI_OPCODE, 1); TCM_UNSUPPORTED_SCSI_OPCODE, 1);
usbg_cleanup_cmd(cmd); transport_generic_free_cmd(&cmd->se_cmd, 0);
} }
static struct usbg_cmd *usbg_get_cmd(struct f_uas *fu,
struct tcm_usbg_nexus *tv_nexus, u32 scsi_tag)
{
struct se_session *se_sess = tv_nexus->tvn_se_sess;
struct usbg_cmd *cmd;
int tag;
tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
if (tag < 0)
return ERR_PTR(-ENOMEM);
cmd = &((struct usbg_cmd *)se_sess->sess_cmd_map)[tag];
memset(cmd, 0, sizeof(*cmd));
cmd->se_cmd.map_tag = tag;
cmd->se_cmd.tag = cmd->tag = scsi_tag;
cmd->fu = fu;
return cmd;
}
static void usbg_release_cmd(struct se_cmd *);
static int usbg_submit_command(struct f_uas *fu, static int usbg_submit_command(struct f_uas *fu,
void *cmdbuf, unsigned int len) void *cmdbuf, unsigned int len)
{ {
struct command_iu *cmd_iu = cmdbuf; struct command_iu *cmd_iu = cmdbuf;
struct usbg_cmd *cmd; struct usbg_cmd *cmd;
struct usbg_tpg *tpg; struct usbg_tpg *tpg = fu->tpg;
struct tcm_usbg_nexus *tv_nexus; struct tcm_usbg_nexus *tv_nexus = tpg->tpg_nexus;
u32 cmd_len; u32 cmd_len;
u16 scsi_tag;
if (cmd_iu->iu_id != IU_ID_COMMAND) { if (cmd_iu->iu_id != IU_ID_COMMAND) {
pr_err("Unsupported type %d\n", cmd_iu->iu_id); pr_err("Unsupported type %d\n", cmd_iu->iu_id);
return -EINVAL; return -EINVAL;
} }
cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); tv_nexus = tpg->tpg_nexus;
if (!cmd) if (!tv_nexus) {
return -ENOMEM; pr_err("Missing nexus, ignoring command\n");
return -EINVAL;
cmd->fu = fu; }
/* XXX until I figure out why I can't free in on complete */
kref_init(&cmd->ref);
kref_get(&cmd->ref);
tpg = fu->tpg;
cmd_len = (cmd_iu->len & ~0x3) + 16; cmd_len = (cmd_iu->len & ~0x3) + 16;
if (cmd_len > USBG_MAX_CMD) if (cmd_len > USBG_MAX_CMD)
goto err; return -EINVAL;
scsi_tag = be16_to_cpup(&cmd_iu->tag);
cmd = usbg_get_cmd(fu, tv_nexus, scsi_tag);
if (IS_ERR(cmd)) {
pr_err("usbg_get_cmd failed\n");
return -ENOMEM;
}
memcpy(cmd->cmd_buf, cmd_iu->cdb, cmd_len); memcpy(cmd->cmd_buf, cmd_iu->cdb, cmd_len);
cmd->tag = be16_to_cpup(&cmd_iu->tag);
cmd->se_cmd.tag = cmd->tag;
if (fu->flags & USBG_USE_STREAMS) { if (fu->flags & USBG_USE_STREAMS) {
if (cmd->tag > UASP_SS_EP_COMP_NUM_STREAMS) if (cmd->tag > UASP_SS_EP_COMP_NUM_STREAMS)
goto err; goto err;
...@@ -1117,12 +1132,6 @@ static int usbg_submit_command(struct f_uas *fu, ...@@ -1117,12 +1132,6 @@ static int usbg_submit_command(struct f_uas *fu,
cmd->stream = &fu->stream[0]; cmd->stream = &fu->stream[0];
} }
tv_nexus = tpg->tpg_nexus;
if (!tv_nexus) {
pr_err("Missing nexus, ignoring command\n");
goto err;
}
switch (cmd_iu->prio_attr & 0x7) { switch (cmd_iu->prio_attr & 0x7) {
case UAS_HEAD_TAG: case UAS_HEAD_TAG:
cmd->prio_attr = TCM_HEAD_TAG; cmd->prio_attr = TCM_HEAD_TAG;
...@@ -1148,7 +1157,7 @@ static int usbg_submit_command(struct f_uas *fu, ...@@ -1148,7 +1157,7 @@ static int usbg_submit_command(struct f_uas *fu,
return 0; return 0;
err: err:
kfree(cmd); usbg_release_cmd(&cmd->se_cmd);
return -EINVAL; return -EINVAL;
} }
...@@ -1182,7 +1191,7 @@ static void bot_cmd_work(struct work_struct *work) ...@@ -1182,7 +1191,7 @@ static void bot_cmd_work(struct work_struct *work)
out: out:
transport_send_check_condition_and_sense(se_cmd, transport_send_check_condition_and_sense(se_cmd,
TCM_UNSUPPORTED_SCSI_OPCODE, 1); TCM_UNSUPPORTED_SCSI_OPCODE, 1);
usbg_cleanup_cmd(cmd); transport_generic_free_cmd(&cmd->se_cmd, 0);
} }
static int bot_submit_command(struct f_uas *fu, static int bot_submit_command(struct f_uas *fu,
...@@ -1190,7 +1199,7 @@ static int bot_submit_command(struct f_uas *fu, ...@@ -1190,7 +1199,7 @@ static int bot_submit_command(struct f_uas *fu,
{ {
struct bulk_cb_wrap *cbw = cmdbuf; struct bulk_cb_wrap *cbw = cmdbuf;
struct usbg_cmd *cmd; struct usbg_cmd *cmd;
struct usbg_tpg *tpg; struct usbg_tpg *tpg = fu->tpg;
struct tcm_usbg_nexus *tv_nexus; struct tcm_usbg_nexus *tv_nexus;
u32 cmd_len; u32 cmd_len;
...@@ -1207,28 +1216,20 @@ static int bot_submit_command(struct f_uas *fu, ...@@ -1207,28 +1216,20 @@ static int bot_submit_command(struct f_uas *fu,
if (cmd_len < 1 || cmd_len > 16) if (cmd_len < 1 || cmd_len > 16)
return -EINVAL; return -EINVAL;
cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
if (!cmd)
return -ENOMEM;
cmd->fu = fu;
/* XXX until I figure out why I can't free in on complete */
kref_init(&cmd->ref);
kref_get(&cmd->ref);
tpg = fu->tpg;
memcpy(cmd->cmd_buf, cbw->CDB, cmd_len);
cmd->bot_tag = cbw->Tag;
tv_nexus = tpg->tpg_nexus; tv_nexus = tpg->tpg_nexus;
if (!tv_nexus) { if (!tv_nexus) {
pr_err("Missing nexus, ignoring command\n"); pr_err("Missing nexus, ignoring command\n");
goto err; return -ENODEV;
} }
cmd = usbg_get_cmd(fu, tv_nexus, cbw->Tag);
if (IS_ERR(cmd)) {
pr_err("usbg_get_cmd failed\n");
return -ENOMEM;
}
memcpy(cmd->cmd_buf, cbw->CDB, cmd_len);
cmd->bot_tag = cbw->Tag;
cmd->prio_attr = TCM_SIMPLE_TAG; cmd->prio_attr = TCM_SIMPLE_TAG;
cmd->unpacked_lun = cbw->Lun; cmd->unpacked_lun = cbw->Lun;
cmd->is_read = cbw->Flags & US_BULK_FLAG_IN ? 1 : 0; cmd->is_read = cbw->Flags & US_BULK_FLAG_IN ? 1 : 0;
...@@ -1239,9 +1240,6 @@ static int bot_submit_command(struct f_uas *fu, ...@@ -1239,9 +1240,6 @@ static int bot_submit_command(struct f_uas *fu,
queue_work(tpg->workqueue, &cmd->work); queue_work(tpg->workqueue, &cmd->work);
return 0; return 0;
err:
kfree(cmd);
return -EINVAL;
} }
/* Start fabric.c code */ /* Start fabric.c code */
...@@ -1282,20 +1280,14 @@ static u32 usbg_tpg_get_inst_index(struct se_portal_group *se_tpg) ...@@ -1282,20 +1280,14 @@ static u32 usbg_tpg_get_inst_index(struct se_portal_group *se_tpg)
return 1; return 1;
} }
static void usbg_cmd_release(struct kref *ref)
{
struct usbg_cmd *cmd = container_of(ref, struct usbg_cmd,
ref);
transport_generic_free_cmd(&cmd->se_cmd, 0);
}
static void usbg_release_cmd(struct se_cmd *se_cmd) static void usbg_release_cmd(struct se_cmd *se_cmd)
{ {
struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd, struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
se_cmd); se_cmd);
struct se_session *se_sess = se_cmd->se_sess;
kfree(cmd->data_buf); kfree(cmd->data_buf);
kfree(cmd); percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
} }
static int usbg_shutdown_session(struct se_session *se_sess) static int usbg_shutdown_session(struct se_session *se_sess)
...@@ -1579,55 +1571,48 @@ static ssize_t tcm_usbg_tpg_nexus_show(struct config_item *item, char *page) ...@@ -1579,55 +1571,48 @@ static ssize_t tcm_usbg_tpg_nexus_show(struct config_item *item, char *page)
return ret; return ret;
} }
static int usbg_alloc_sess_cb(struct se_portal_group *se_tpg,
struct se_session *se_sess, void *p)
{
struct usbg_tpg *tpg = container_of(se_tpg,
struct usbg_tpg, se_tpg);
tpg->tpg_nexus = p;
return 0;
}
static int tcm_usbg_make_nexus(struct usbg_tpg *tpg, char *name) static int tcm_usbg_make_nexus(struct usbg_tpg *tpg, char *name)
{ {
struct se_portal_group *se_tpg;
struct tcm_usbg_nexus *tv_nexus; struct tcm_usbg_nexus *tv_nexus;
int ret; int ret = 0;
mutex_lock(&tpg->tpg_mutex); mutex_lock(&tpg->tpg_mutex);
if (tpg->tpg_nexus) { if (tpg->tpg_nexus) {
ret = -EEXIST; ret = -EEXIST;
pr_debug("tpg->tpg_nexus already exists\n"); pr_debug("tpg->tpg_nexus already exists\n");
goto err_unlock; goto out_unlock;
} }
se_tpg = &tpg->se_tpg;
ret = -ENOMEM;
tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL); tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
if (!tv_nexus) if (!tv_nexus) {
goto err_unlock; ret = -ENOMEM;
tv_nexus->tvn_se_sess = transport_init_session(TARGET_PROT_NORMAL); goto out_unlock;
if (IS_ERR(tv_nexus->tvn_se_sess)) }
goto err_free;
/* tv_nexus->tvn_se_sess = target_alloc_session(&tpg->se_tpg,
* Since we are running in 'demo mode' this call with generate a USB_G_DEFAULT_SESSION_TAGS,
* struct se_node_acl for the tcm_vhost struct se_portal_group with sizeof(struct usbg_cmd),
* the SCSI Initiator port name of the passed configfs group 'name'. TARGET_PROT_NORMAL, name,
*/ tv_nexus, usbg_alloc_sess_cb);
tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl( if (IS_ERR(tv_nexus->tvn_se_sess)) {
se_tpg, name);
if (!tv_nexus->tvn_se_sess->se_node_acl) {
#define MAKE_NEXUS_MSG "core_tpg_check_initiator_node_acl() failed for %s\n" #define MAKE_NEXUS_MSG "core_tpg_check_initiator_node_acl() failed for %s\n"
pr_debug(MAKE_NEXUS_MSG, name); pr_debug(MAKE_NEXUS_MSG, name);
#undef MAKE_NEXUS_MSG #undef MAKE_NEXUS_MSG
goto err_session; ret = PTR_ERR(tv_nexus->tvn_se_sess);
kfree(tv_nexus);
} }
/*
* Now register the TCM vHost virtual I_T Nexus as active.
*/
transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
tv_nexus->tvn_se_sess, tv_nexus);
tpg->tpg_nexus = tv_nexus;
mutex_unlock(&tpg->tpg_mutex);
return 0;
err_session: out_unlock:
transport_free_session(tv_nexus->tvn_se_sess);
err_free:
kfree(tv_nexus);
err_unlock:
mutex_unlock(&tpg->tpg_mutex); mutex_unlock(&tpg->tpg_mutex);
return ret; return ret;
} }
...@@ -1735,11 +1720,7 @@ static void usbg_port_unlink(struct se_portal_group *se_tpg, ...@@ -1735,11 +1720,7 @@ static void usbg_port_unlink(struct se_portal_group *se_tpg,
static int usbg_check_stop_free(struct se_cmd *se_cmd) static int usbg_check_stop_free(struct se_cmd *se_cmd)
{ {
struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd, return target_put_sess_cmd(se_cmd);
se_cmd);
kref_put(&cmd->ref, usbg_cmd_release);
return 1;
} }
static const struct target_core_fabric_ops usbg_ops = { static const struct target_core_fabric_ops usbg_ops = {
......
...@@ -23,6 +23,8 @@ enum { ...@@ -23,6 +23,8 @@ enum {
#define USB_G_ALT_INT_BBB 0 #define USB_G_ALT_INT_BBB 0
#define USB_G_ALT_INT_UAS 1 #define USB_G_ALT_INT_UAS 1
#define USB_G_DEFAULT_SESSION_TAGS 128
struct tcm_usbg_nexus { struct tcm_usbg_nexus {
struct se_session *tvn_se_sess; struct se_session *tvn_se_sess;
}; };
......
...@@ -1664,8 +1664,7 @@ static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg, ...@@ -1664,8 +1664,7 @@ static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
mutex_unlock(&vhost_scsi_mutex); mutex_unlock(&vhost_scsi_mutex);
} }
static void vhost_scsi_free_cmd_map_res(struct vhost_scsi_nexus *nexus, static void vhost_scsi_free_cmd_map_res(struct se_session *se_sess)
struct se_session *se_sess)
{ {
struct vhost_scsi_cmd *tv_cmd; struct vhost_scsi_cmd *tv_cmd;
unsigned int i; unsigned int i;
...@@ -1721,98 +1720,82 @@ static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = { ...@@ -1721,98 +1720,82 @@ static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
NULL, NULL,
}; };
static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg, static int vhost_scsi_nexus_cb(struct se_portal_group *se_tpg,
const char *name) struct se_session *se_sess, void *p)
{ {
struct se_portal_group *se_tpg;
struct se_session *se_sess;
struct vhost_scsi_nexus *tv_nexus;
struct vhost_scsi_cmd *tv_cmd; struct vhost_scsi_cmd *tv_cmd;
unsigned int i; unsigned int i;
mutex_lock(&tpg->tv_tpg_mutex);
if (tpg->tpg_nexus) {
mutex_unlock(&tpg->tv_tpg_mutex);
pr_debug("tpg->tpg_nexus already exists\n");
return -EEXIST;
}
se_tpg = &tpg->se_tpg;
tv_nexus = kzalloc(sizeof(struct vhost_scsi_nexus), GFP_KERNEL);
if (!tv_nexus) {
mutex_unlock(&tpg->tv_tpg_mutex);
pr_err("Unable to allocate struct vhost_scsi_nexus\n");
return -ENOMEM;
}
/*
* Initialize the struct se_session pointer and setup tagpool
* for struct vhost_scsi_cmd descriptors
*/
tv_nexus->tvn_se_sess = transport_init_session_tags(
VHOST_SCSI_DEFAULT_TAGS,
sizeof(struct vhost_scsi_cmd),
TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS);
if (IS_ERR(tv_nexus->tvn_se_sess)) {
mutex_unlock(&tpg->tv_tpg_mutex);
kfree(tv_nexus);
return -ENOMEM;
}
se_sess = tv_nexus->tvn_se_sess;
for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) { for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i]; tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) * tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) *
VHOST_SCSI_PREALLOC_SGLS, GFP_KERNEL); VHOST_SCSI_PREALLOC_SGLS, GFP_KERNEL);
if (!tv_cmd->tvc_sgl) { if (!tv_cmd->tvc_sgl) {
mutex_unlock(&tpg->tv_tpg_mutex);
pr_err("Unable to allocate tv_cmd->tvc_sgl\n"); pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
goto out; goto out;
} }
tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) * tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) *
VHOST_SCSI_PREALLOC_UPAGES, GFP_KERNEL); VHOST_SCSI_PREALLOC_UPAGES, GFP_KERNEL);
if (!tv_cmd->tvc_upages) { if (!tv_cmd->tvc_upages) {
mutex_unlock(&tpg->tv_tpg_mutex);
pr_err("Unable to allocate tv_cmd->tvc_upages\n"); pr_err("Unable to allocate tv_cmd->tvc_upages\n");
goto out; goto out;
} }
tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) * tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) *
VHOST_SCSI_PREALLOC_PROT_SGLS, GFP_KERNEL); VHOST_SCSI_PREALLOC_PROT_SGLS, GFP_KERNEL);
if (!tv_cmd->tvc_prot_sgl) { if (!tv_cmd->tvc_prot_sgl) {
mutex_unlock(&tpg->tv_tpg_mutex);
pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n"); pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
goto out; goto out;
} }
} }
return 0;
out:
vhost_scsi_free_cmd_map_res(se_sess);
return -ENOMEM;
}
static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
const char *name)
{
struct se_portal_group *se_tpg;
struct vhost_scsi_nexus *tv_nexus;
mutex_lock(&tpg->tv_tpg_mutex);
if (tpg->tpg_nexus) {
mutex_unlock(&tpg->tv_tpg_mutex);
pr_debug("tpg->tpg_nexus already exists\n");
return -EEXIST;
}
se_tpg = &tpg->se_tpg;
tv_nexus = kzalloc(sizeof(struct vhost_scsi_nexus), GFP_KERNEL);
if (!tv_nexus) {
mutex_unlock(&tpg->tv_tpg_mutex);
pr_err("Unable to allocate struct vhost_scsi_nexus\n");
return -ENOMEM;
}
/* /*
* Since we are running in 'demo mode' this call with generate a * Since we are running in 'demo mode' this call with generate a
* struct se_node_acl for the vhost_scsi struct se_portal_group with * struct se_node_acl for the vhost_scsi struct se_portal_group with
* the SCSI Initiator port name of the passed configfs group 'name'. * the SCSI Initiator port name of the passed configfs group 'name'.
*/ */
tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl( tv_nexus->tvn_se_sess = target_alloc_session(&tpg->se_tpg,
se_tpg, (unsigned char *)name); VHOST_SCSI_DEFAULT_TAGS,
if (!tv_nexus->tvn_se_sess->se_node_acl) { sizeof(struct vhost_scsi_cmd),
TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
(unsigned char *)name, tv_nexus,
vhost_scsi_nexus_cb);
if (IS_ERR(tv_nexus->tvn_se_sess)) {
mutex_unlock(&tpg->tv_tpg_mutex); mutex_unlock(&tpg->tv_tpg_mutex);
pr_debug("core_tpg_check_initiator_node_acl() failed" kfree(tv_nexus);
" for %s\n", name); return -ENOMEM;
goto out;
} }
/*
* Now register the TCM vhost virtual I_T Nexus as active.
*/
transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
tv_nexus->tvn_se_sess, tv_nexus);
tpg->tpg_nexus = tv_nexus; tpg->tpg_nexus = tv_nexus;
mutex_unlock(&tpg->tv_tpg_mutex); mutex_unlock(&tpg->tv_tpg_mutex);
return 0; return 0;
out:
vhost_scsi_free_cmd_map_res(tv_nexus, se_sess);
transport_free_session(se_sess);
kfree(tv_nexus);
return -ENOMEM;
} }
static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg) static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
...@@ -1853,7 +1836,7 @@ static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg) ...@@ -1853,7 +1836,7 @@ static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
" %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport), " %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
tv_nexus->tvn_se_sess->se_node_acl->initiatorname); tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
vhost_scsi_free_cmd_map_res(tv_nexus, se_sess); vhost_scsi_free_cmd_map_res(se_sess);
/* /*
* Release the SCSI I_T Nexus to the emulated vhost Target Port * Release the SCSI I_T Nexus to the emulated vhost Target Port
*/ */
......
This diff is collapsed.
...@@ -144,12 +144,6 @@ enum se_cmd_flags_table { ...@@ -144,12 +144,6 @@ enum se_cmd_flags_table {
SCF_USE_CPUID = 0x00800000, SCF_USE_CPUID = 0x00800000,
}; };
/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
enum transport_lunflags_table {
TRANSPORT_LUNFLAGS_READ_ONLY = 0x01,
TRANSPORT_LUNFLAGS_READ_WRITE = 0x02,
};
/* /*
* Used by transport_send_check_condition_and_sense() * Used by transport_send_check_condition_and_sense()
* to signal which ASC/ASCQ sense payload should be built. * to signal which ASC/ASCQ sense payload should be built.
...@@ -633,11 +627,10 @@ struct se_lun_acl { ...@@ -633,11 +627,10 @@ struct se_lun_acl {
}; };
struct se_dev_entry { struct se_dev_entry {
/* See transport_lunflags_table */
u64 mapped_lun; u64 mapped_lun;
u64 pr_res_key; u64 pr_res_key;
u64 creation_time; u64 creation_time;
u32 lun_flags; bool lun_access_ro;
u32 attach_count; u32 attach_count;
atomic_long_t total_cmds; atomic_long_t total_cmds;
atomic_long_t read_bytes; atomic_long_t read_bytes;
...@@ -711,7 +704,7 @@ struct se_lun { ...@@ -711,7 +704,7 @@ struct se_lun {
u64 unpacked_lun; u64 unpacked_lun;
#define SE_LUN_LINK_MAGIC 0xffff7771 #define SE_LUN_LINK_MAGIC 0xffff7771
u32 lun_link_magic; u32 lun_link_magic;
u32 lun_access; bool lun_access_ro;
u32 lun_index; u32 lun_index;
/* RELATIVE TARGET PORT IDENTIFER */ /* RELATIVE TARGET PORT IDENTIFER */
......
...@@ -108,6 +108,12 @@ void target_unregister_template(const struct target_core_fabric_ops *fo); ...@@ -108,6 +108,12 @@ void target_unregister_template(const struct target_core_fabric_ops *fo);
int target_depend_item(struct config_item *item); int target_depend_item(struct config_item *item);
void target_undepend_item(struct config_item *item); void target_undepend_item(struct config_item *item);
struct se_session *target_alloc_session(struct se_portal_group *,
unsigned int, unsigned int, enum target_prot_op prot_op,
const char *, void *,
int (*callback)(struct se_portal_group *,
struct se_session *, void *));
struct se_session *transport_init_session(enum target_prot_op); struct se_session *transport_init_session(enum target_prot_op);
int transport_alloc_session_tags(struct se_session *, unsigned int, int transport_alloc_session_tags(struct se_session *, unsigned int,
unsigned int); unsigned int);
......
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
#define TCMU_MAILBOX_VERSION 2 #define TCMU_MAILBOX_VERSION 2
#define ALIGN_SIZE 64 /* Should be enough for most CPUs */ #define ALIGN_SIZE 64 /* Should be enough for most CPUs */
#define TCMU_MAILBOX_FLAG_CAP_OOOC (1 << 0) /* Out-of-order completions */
struct tcmu_mailbox { struct tcmu_mailbox {
__u16 version; __u16 version;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment