Commit 5716af6e authored by Sagi Grimberg's avatar Sagi Grimberg Committed by Roland Dreier

IB/iser: Rename ib_conn -> iser_conn

Two reasons why we choose to do this:

1. No point today calling struct iser_conn by another name ib_conn
2. In the next patches we will restructure iser control plane representation
   - struct iser_conn: connection logical representation
   - struct ib_conn: connection RDMA layout representation

This patch does not change any functionality.
Signed-off-by: default avatarAriel Nahum <arieln@mellanox.com>
Signed-off-by: default avatarSagi Grimberg <sagig@mellanox.com>
Signed-off-by: default avatarOr Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: default avatarRoland Dreier <roland@purestorage.com>
parent fe82dcec
......@@ -147,8 +147,8 @@ static int iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
int iser_initialize_task_headers(struct iscsi_task *task,
struct iser_tx_desc *tx_desc)
{
struct iser_conn *ib_conn = task->conn->dd_data;
struct iser_device *device = ib_conn->device;
struct iser_conn *iser_conn = task->conn->dd_data;
struct iser_device *device = iser_conn->device;
struct iscsi_iser_task *iser_task = task->dd_data;
u64 dma_addr;
......@@ -162,7 +162,7 @@ int iser_initialize_task_headers(struct iscsi_task *task,
tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
tx_desc->tx_sg[0].lkey = device->mr->lkey;
iser_task->ib_conn = ib_conn;
iser_task->iser_conn = iser_conn;
return 0;
}
/**
......@@ -290,8 +290,8 @@ static void iscsi_iser_cleanup_task(struct iscsi_task *task)
{
struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_tx_desc *tx_desc = &iser_task->desc;
struct iser_conn *ib_conn = task->conn->dd_data;
struct iser_device *device = ib_conn->device;
struct iser_conn *iser_conn = task->conn->dd_data;
struct iser_device *device = iser_conn->device;
ib_dma_unmap_single(device->ib_device,
tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
......@@ -344,7 +344,7 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
int is_leading)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iser_conn *ib_conn;
struct iser_conn *iser_conn;
struct iscsi_endpoint *ep;
int error;
......@@ -360,30 +360,30 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
(unsigned long long)transport_eph);
return -EINVAL;
}
ib_conn = ep->dd_data;
iser_conn = ep->dd_data;
mutex_lock(&ib_conn->state_mutex);
if (ib_conn->state != ISER_CONN_UP) {
mutex_lock(&iser_conn->state_mutex);
if (iser_conn->state != ISER_CONN_UP) {
error = -EINVAL;
iser_err("iser_conn %p state is %d, teardown started\n",
ib_conn, ib_conn->state);
iser_conn, iser_conn->state);
goto out;
}
error = iser_alloc_rx_descriptors(ib_conn, conn->session);
error = iser_alloc_rx_descriptors(iser_conn, conn->session);
if (error)
goto out;
/* binds the iSER connection retrieved from the previously
* connected ep_handle to the iSCSI layer connection. exchanges
* connection pointers */
iser_info("binding iscsi conn %p to ib_conn %p\n", conn, ib_conn);
iser_info("binding iscsi conn %p to iser_conn %p\n", conn, iser_conn);
conn->dd_data = ib_conn;
ib_conn->iscsi_conn = conn;
conn->dd_data = iser_conn;
iser_conn->iscsi_conn = conn;
out:
mutex_unlock(&ib_conn->state_mutex);
mutex_unlock(&iser_conn->state_mutex);
return error;
}
......@@ -391,11 +391,11 @@ static int
iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
{
struct iscsi_conn *iscsi_conn;
struct iser_conn *ib_conn;
struct iser_conn *iser_conn;
iscsi_conn = cls_conn->dd_data;
ib_conn = iscsi_conn->dd_data;
reinit_completion(&ib_conn->stop_completion);
iser_conn = iscsi_conn->dd_data;
reinit_completion(&iser_conn->stop_completion);
return iscsi_conn_start(cls_conn);
}
......@@ -404,18 +404,18 @@ static void
iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iser_conn *ib_conn = conn->dd_data;
struct iser_conn *iser_conn = conn->dd_data;
iser_dbg("stopping iscsi_conn: %p, ib_conn: %p\n", conn, ib_conn);
iser_dbg("stopping iscsi_conn: %p, iser_conn: %p\n", conn, iser_conn);
iscsi_conn_stop(cls_conn, flag);
/*
* Userspace may have goofed up and not bound the connection or
* might have only partially setup the connection.
*/
if (ib_conn) {
if (iser_conn) {
conn->dd_data = NULL;
complete(&ib_conn->stop_completion);
complete(&iser_conn->stop_completion);
}
}
......@@ -447,7 +447,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
struct iscsi_cls_session *cls_session;
struct iscsi_session *session;
struct Scsi_Host *shost;
struct iser_conn *ib_conn = NULL;
struct iser_conn *iser_conn = NULL;
shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0);
if (!shost)
......@@ -464,9 +464,9 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
* the leading conn's ep so this will be NULL;
*/
if (ep) {
ib_conn = ep->dd_data;
if (ib_conn->pi_support) {
u32 sig_caps = ib_conn->device->dev_attr.sig_prot_cap;
iser_conn = ep->dd_data;
if (iser_conn->pi_support) {
u32 sig_caps = iser_conn->device->dev_attr.sig_prot_cap;
scsi_host_set_prot(shost, iser_dif_prot_caps(sig_caps));
if (iser_pi_guard)
......@@ -476,8 +476,8 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
}
}
if (iscsi_host_add(shost,
ep ? ib_conn->device->ib_device->dma_device : NULL))
if (iscsi_host_add(shost, ep ?
iser_conn->device->ib_device->dma_device : NULL))
goto free_host;
if (cmds_max > ISER_DEF_XMIT_CMDS_MAX) {
......@@ -577,17 +577,17 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s
static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep,
enum iscsi_param param, char *buf)
{
struct iser_conn *ib_conn = ep->dd_data;
struct iser_conn *iser_conn = ep->dd_data;
int len;
switch (param) {
case ISCSI_PARAM_CONN_PORT:
case ISCSI_PARAM_CONN_ADDRESS:
if (!ib_conn || !ib_conn->cma_id)
if (!iser_conn || !iser_conn->cma_id)
return -ENOTCONN;
return iscsi_conn_get_addr_param((struct sockaddr_storage *)
&ib_conn->cma_id->route.addr.dst_addr,
&iser_conn->cma_id->route.addr.dst_addr,
param, buf);
break;
default:
......@@ -602,24 +602,24 @@ iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
int non_blocking)
{
int err;
struct iser_conn *ib_conn;
struct iser_conn *iser_conn;
struct iscsi_endpoint *ep;
ep = iscsi_create_endpoint(0);
if (!ep)
return ERR_PTR(-ENOMEM);
ib_conn = kzalloc(sizeof(*ib_conn), GFP_KERNEL);
if (!ib_conn) {
iser_conn = kzalloc(sizeof(*iser_conn), GFP_KERNEL);
if (!iser_conn) {
err = -ENOMEM;
goto failure;
}
ep->dd_data = ib_conn;
ib_conn->ep = ep;
iser_conn_init(ib_conn);
ep->dd_data = iser_conn;
iser_conn->ep = ep;
iser_conn_init(iser_conn);
err = iser_connect(ib_conn, NULL, dst_addr, non_blocking);
err = iser_connect(iser_conn, NULL, dst_addr, non_blocking);
if (err)
goto failure;
......@@ -632,22 +632,22 @@ iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
static int
iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
{
struct iser_conn *ib_conn;
struct iser_conn *iser_conn;
int rc;
ib_conn = ep->dd_data;
rc = wait_for_completion_interruptible_timeout(&ib_conn->up_completion,
iser_conn = ep->dd_data;
rc = wait_for_completion_interruptible_timeout(&iser_conn->up_completion,
msecs_to_jiffies(timeout_ms));
/* if conn establishment failed, return error code to iscsi */
if (rc == 0) {
mutex_lock(&ib_conn->state_mutex);
if (ib_conn->state == ISER_CONN_TERMINATING ||
ib_conn->state == ISER_CONN_DOWN)
mutex_lock(&iser_conn->state_mutex);
if (iser_conn->state == ISER_CONN_TERMINATING ||
iser_conn->state == ISER_CONN_DOWN)
rc = -1;
mutex_unlock(&ib_conn->state_mutex);
mutex_unlock(&iser_conn->state_mutex);
}
iser_info("ib conn %p rc = %d\n", ib_conn, rc);
iser_info("ib conn %p rc = %d\n", iser_conn, rc);
if (rc > 0)
return 1; /* success, this is the equivalent of POLLOUT */
......@@ -660,12 +660,14 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
static void
iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
{
struct iser_conn *ib_conn;
struct iser_conn *iser_conn;
ib_conn = ep->dd_data;
iser_info("ep %p ib conn %p state %d\n", ep, ib_conn, ib_conn->state);
mutex_lock(&ib_conn->state_mutex);
iser_conn_terminate(ib_conn);
iser_conn = ep->dd_data;
iser_info("ep %p iser conn %p state %d\n",
ep, iser_conn, iser_conn->state);
mutex_lock(&iser_conn->state_mutex);
iser_conn_terminate(iser_conn);
/*
* if iser_conn and iscsi_conn are bound, we must wait for
......@@ -673,14 +675,14 @@ iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
* the iser resources. Otherwise we are safe to free resources
* immediately.
*/
if (ib_conn->iscsi_conn) {
INIT_WORK(&ib_conn->release_work, iser_release_work);
queue_work(release_wq, &ib_conn->release_work);
mutex_unlock(&ib_conn->state_mutex);
if (iser_conn->iscsi_conn) {
INIT_WORK(&iser_conn->release_work, iser_release_work);
queue_work(release_wq, &iser_conn->release_work);
mutex_unlock(&iser_conn->state_mutex);
} else {
ib_conn->state = ISER_CONN_DOWN;
mutex_unlock(&ib_conn->state_mutex);
iser_conn_release(ib_conn);
iser_conn->state = ISER_CONN_DOWN;
mutex_unlock(&iser_conn->state_mutex);
iser_conn_release(iser_conn);
}
iscsi_destroy_endpoint(ep);
}
......@@ -843,7 +845,7 @@ static int __init iser_init(void)
static void __exit iser_exit(void)
{
struct iser_conn *ib_conn, *n;
struct iser_conn *iser_conn, *n;
int connlist_empty;
iser_dbg("Removing iSER datamover...\n");
......@@ -856,8 +858,9 @@ static void __exit iser_exit(void)
if (!connlist_empty) {
iser_err("Error cleanup stage completed but we still have iser "
"connections, destroying them anyway.\n");
list_for_each_entry_safe(ib_conn, n, &ig.connlist, conn_list) {
iser_conn_release(ib_conn);
list_for_each_entry_safe(iser_conn, n, &ig.connlist,
conn_list) {
iser_conn_release(iser_conn);
}
}
......
......@@ -179,7 +179,7 @@ struct iser_cm_hdr {
/* Length of an object name string */
#define ISER_OBJECT_NAME_SIZE 64
enum iser_ib_conn_state {
enum iser_conn_state {
ISER_CONN_INIT, /* descriptor allocd, no conn */
ISER_CONN_PENDING, /* in the process of being established */
ISER_CONN_UP, /* up and running */
......@@ -281,9 +281,9 @@ struct iser_device {
int cq_active_qps[ISER_MAX_CQ];
int cqs_used;
struct iser_cq_desc *cq_desc;
int (*iser_alloc_rdma_reg_res)(struct iser_conn *ib_conn,
int (*iser_alloc_rdma_reg_res)(struct iser_conn *iser_conn,
unsigned cmds_max);
void (*iser_free_rdma_reg_res)(struct iser_conn *ib_conn);
void (*iser_free_rdma_reg_res)(struct iser_conn *iser_conn);
int (*iser_reg_rdma_mem)(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir);
void (*iser_unreg_rdma_mem)(struct iscsi_iser_task *iser_task,
......@@ -320,7 +320,7 @@ struct fast_reg_descriptor {
struct iser_conn {
struct iscsi_conn *iscsi_conn;
struct iscsi_endpoint *ep;
enum iser_ib_conn_state state; /* rdma connection state */
enum iser_conn_state state; /* rdma connection state */
atomic_t refcount;
spinlock_t lock; /* used for state changes */
struct iser_device *device; /* device context */
......@@ -363,7 +363,7 @@ struct iser_conn {
struct iscsi_iser_task {
struct iser_tx_desc desc;
struct iser_conn *ib_conn;
struct iser_conn *iser_conn;
enum iser_task_status status;
struct scsi_cmnd *sc;
int command_sent; /* set if command sent */
......@@ -419,25 +419,26 @@ void iscsi_iser_recv(struct iscsi_conn *conn,
char *rx_data,
int rx_data_len);
void iser_conn_init(struct iser_conn *ib_conn);
void iser_conn_init(struct iser_conn *iser_conn);
void iser_conn_release(struct iser_conn *ib_conn);
void iser_conn_release(struct iser_conn *iser_conn);
void iser_conn_terminate(struct iser_conn *ib_conn);
void iser_conn_terminate(struct iser_conn *iser_conn);
void iser_release_work(struct work_struct *work);
void iser_rcv_completion(struct iser_rx_desc *desc,
unsigned long dto_xfer_len,
struct iser_conn *ib_conn);
struct iser_conn *iser_conn);
void iser_snd_completion(struct iser_tx_desc *desc, struct iser_conn *ib_conn);
void iser_snd_completion(struct iser_tx_desc *desc,
struct iser_conn *iser_conn);
void iser_task_rdma_init(struct iscsi_iser_task *task);
void iser_task_rdma_finalize(struct iscsi_iser_task *task);
void iser_free_rx_descriptors(struct iser_conn *ib_conn);
void iser_free_rx_descriptors(struct iser_conn *iser_conn);
void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
struct iser_data_buf *mem,
......@@ -449,12 +450,12 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *task,
int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *task,
enum iser_data_dir cmd_dir);
int iser_connect(struct iser_conn *ib_conn,
int iser_connect(struct iser_conn *iser_conn,
struct sockaddr *src_addr,
struct sockaddr *dst_addr,
int non_blocking);
int iser_reg_page_vec(struct iser_conn *ib_conn,
int iser_reg_page_vec(struct iser_conn *iser_conn,
struct iser_page_vec *page_vec,
struct iser_mem_reg *mem_reg);
......@@ -463,9 +464,9 @@ void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir);
int iser_post_recvl(struct iser_conn *ib_conn);
int iser_post_recvm(struct iser_conn *ib_conn, int count);
int iser_post_send(struct iser_conn *ib_conn, struct iser_tx_desc *tx_desc);
int iser_post_recvl(struct iser_conn *iser_conn);
int iser_post_recvm(struct iser_conn *iser_conn, int count);
int iser_post_send(struct iser_conn *iser_conn, struct iser_tx_desc *tx_desc);
int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
struct iser_data_buf *data,
......@@ -476,11 +477,12 @@ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
struct iser_data_buf *data);
int iser_initialize_task_headers(struct iscsi_task *task,
struct iser_tx_desc *tx_desc);
int iser_alloc_rx_descriptors(struct iser_conn *ib_conn, struct iscsi_session *session);
int iser_create_fmr_pool(struct iser_conn *ib_conn, unsigned cmds_max);
void iser_free_fmr_pool(struct iser_conn *ib_conn);
int iser_create_fastreg_pool(struct iser_conn *ib_conn, unsigned cmds_max);
void iser_free_fastreg_pool(struct iser_conn *ib_conn);
int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
struct iscsi_session *session);
int iser_create_fmr_pool(struct iser_conn *iser_conn, unsigned cmds_max);
void iser_free_fmr_pool(struct iser_conn *iser_conn);
int iser_create_fastreg_pool(struct iser_conn *iser_conn, unsigned cmds_max);
void iser_free_fastreg_pool(struct iser_conn *iser_conn);
u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir, sector_t *sector);
#endif
......@@ -49,7 +49,7 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
struct iser_data_buf *data_copy,
enum iser_data_dir cmd_dir)
{
struct ib_device *dev = iser_task->ib_conn->device->ib_device;
struct ib_device *dev = iser_task->iser_conn->device->ib_device;
struct scatterlist *sgl = (struct scatterlist *)data->buf;
struct scatterlist *sg;
char *mem = NULL;
......@@ -116,7 +116,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
struct ib_device *dev;
unsigned long cmd_data_len;
dev = iser_task->ib_conn->device->ib_device;
dev = iser_task->iser_conn->device->ib_device;
ib_dma_unmap_sg(dev, &data_copy->sg_single, 1,
(cmd_dir == ISER_DIR_OUT) ?
......@@ -322,7 +322,7 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
struct ib_device *dev;
iser_task->dir[iser_dir] = 1;
dev = iser_task->ib_conn->device->ib_device;
dev = iser_task->iser_conn->device->ib_device;
data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir);
if (data->dma_nents == 0) {
......@@ -337,7 +337,7 @@ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
{
struct ib_device *dev;
dev = iser_task->ib_conn->device->ib_device;
dev = iser_task->iser_conn->device->ib_device;
ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
}
......@@ -348,7 +348,7 @@ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir,
int aligned_len)
{
struct iscsi_conn *iscsi_conn = iser_task->ib_conn->iscsi_conn;
struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn;
iscsi_conn->fmr_unalign_cnt++;
iser_warn("rdma alignment violation (%d/%d aligned) or FMR not supported\n",
......@@ -377,8 +377,8 @@ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir)
{
struct iser_conn *ib_conn = iser_task->ib_conn;
struct iser_device *device = ib_conn->device;
struct iser_conn *iser_conn = iser_task->iser_conn;
struct iser_device *device = iser_conn->device;
struct ib_device *ibdev = device->ib_device;
struct iser_data_buf *mem = &iser_task->data[cmd_dir];
struct iser_regd_buf *regd_buf;
......@@ -418,8 +418,8 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
(unsigned long)regd_buf->reg.va,
(unsigned long)regd_buf->reg.len);
} else { /* use FMR for multiple dma entries */
iser_page_vec_build(mem, ib_conn->fmr.page_vec, ibdev);
err = iser_reg_page_vec(ib_conn, ib_conn->fmr.page_vec,
iser_page_vec_build(mem, iser_conn->fmr.page_vec, ibdev);
err = iser_reg_page_vec(iser_conn, iser_conn->fmr.page_vec,
&regd_buf->reg);
if (err && err != -EAGAIN) {
iser_data_buf_dump(mem, ibdev);
......@@ -427,12 +427,12 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
mem->dma_nents,
ntoh24(iser_task->desc.iscsi_header.dlength));
iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
ib_conn->fmr.page_vec->data_size,
ib_conn->fmr.page_vec->length,
ib_conn->fmr.page_vec->offset);
for (i = 0; i < ib_conn->fmr.page_vec->length; i++)
iser_conn->fmr.page_vec->data_size,
iser_conn->fmr.page_vec->length,
iser_conn->fmr.page_vec->offset);
for (i = 0; i < iser_conn->fmr.page_vec->length; i++)
iser_err("page_vec[%d] = 0x%llx\n", i,
(unsigned long long) ib_conn->fmr.page_vec->pages[i]);
(unsigned long long)iser_conn->fmr.page_vec->pages[i]);
}
if (err)
return err;
......@@ -533,7 +533,7 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
struct fast_reg_descriptor *desc, struct ib_sge *data_sge,
struct ib_sge *prot_sge, struct ib_sge *sig_sge)
{
struct iser_conn *ib_conn = iser_task->ib_conn;
struct iser_conn *iser_conn = iser_task->iser_conn;
struct iser_pi_context *pi_ctx = desc->pi_ctx;
struct ib_send_wr sig_wr, inv_wr;
struct ib_send_wr *bad_wr, *wr = NULL;
......@@ -579,7 +579,7 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
else
wr->next = &sig_wr;
ret = ib_post_send(ib_conn->qp, wr, &bad_wr);
ret = ib_post_send(iser_conn->qp, wr, &bad_wr);
if (ret) {
iser_err("reg_sig_mr failed, ret:%d\n", ret);
goto err;
......@@ -609,8 +609,8 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
struct ib_sge *sge)
{
struct fast_reg_descriptor *desc = regd_buf->reg.mem_h;
struct iser_conn *ib_conn = iser_task->ib_conn;
struct iser_device *device = ib_conn->device;
struct iser_conn *iser_conn = iser_task->iser_conn;
struct iser_device *device = iser_conn->device;
struct ib_device *ibdev = device->ib_device;
struct ib_mr *mr;
struct ib_fast_reg_page_list *frpl;
......@@ -677,7 +677,7 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
else
wr->next = &fastreg_wr;
ret = ib_post_send(ib_conn->qp, wr, &bad_wr);
ret = ib_post_send(iser_conn->qp, wr, &bad_wr);
if (ret) {
iser_err("fast registration failed, ret:%d\n", ret);
return ret;
......@@ -700,8 +700,8 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir)
{
struct iser_conn *ib_conn = iser_task->ib_conn;
struct iser_device *device = ib_conn->device;
struct iser_conn *iser_conn = iser_task->iser_conn;
struct iser_device *device = iser_conn->device;
struct ib_device *ibdev = device->ib_device;
struct iser_data_buf *mem = &iser_task->data[cmd_dir];
struct iser_regd_buf *regd_buf = &iser_task->rdma_regd[cmd_dir];
......@@ -724,11 +724,11 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task,
if (mem->dma_nents != 1 ||
scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) {
spin_lock_irqsave(&ib_conn->lock, flags);
desc = list_first_entry(&ib_conn->fastreg.pool,
spin_lock_irqsave(&iser_conn->lock, flags);
desc = list_first_entry(&iser_conn->fastreg.pool,
struct fast_reg_descriptor, list);
list_del(&desc->list);
spin_unlock_irqrestore(&ib_conn->lock, flags);
spin_unlock_irqrestore(&iser_conn->lock, flags);
regd_buf->reg.mem_h = desc;
}
......@@ -791,9 +791,9 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task,
return 0;
err_reg:
if (desc) {
spin_lock_irqsave(&ib_conn->lock, flags);
list_add_tail(&desc->list, &ib_conn->fastreg.pool);
spin_unlock_irqrestore(&ib_conn->lock, flags);
spin_lock_irqsave(&iser_conn->lock, flags);
list_add_tail(&desc->list, &iser_conn->fastreg.pool);
spin_unlock_irqrestore(&iser_conn->lock, flags);
}
return err;
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment