Commit f3a2c3ee authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband:
  IB/mthca: Don't execute QUERY_QP firmware command for QP in RESET state
  IB/ehca: Use proper GFP_ flags for get_zeroed_page()
  IB/mthca: Fix PRM compliance problem in atomic-send completions
  RDMA/ucma: Don't report events with invalid user context
  RDMA/ucma: Fix struct ucma_event leak when backlog is full
  RDMA/iwcm: iWARP connection timeouts shouldn't be reported as rejects
  IB/iser: Return error code when PDUs may not be sent
  IB/mthca: Fix off-by-one in FMR handling on memfree
parents 656829e2 f5e10529
...@@ -1088,10 +1088,21 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) ...@@ -1088,10 +1088,21 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
*sin = iw_event->local_addr; *sin = iw_event->local_addr;
sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr; sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr;
*sin = iw_event->remote_addr; *sin = iw_event->remote_addr;
if (iw_event->status) switch (iw_event->status) {
event.event = RDMA_CM_EVENT_REJECTED; case 0:
else
event.event = RDMA_CM_EVENT_ESTABLISHED; event.event = RDMA_CM_EVENT_ESTABLISHED;
break;
case -ECONNRESET:
case -ECONNREFUSED:
event.event = RDMA_CM_EVENT_REJECTED;
break;
case -ETIMEDOUT:
event.event = RDMA_CM_EVENT_UNREACHABLE;
break;
default:
event.event = RDMA_CM_EVENT_CONNECT_ERROR;
break;
}
break; break;
case IW_CM_EVENT_ESTABLISHED: case IW_CM_EVENT_ESTABLISHED:
event.event = RDMA_CM_EVENT_ESTABLISHED; event.event = RDMA_CM_EVENT_ESTABLISHED;
......
...@@ -209,10 +209,21 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id, ...@@ -209,10 +209,21 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id,
if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) { if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
if (!ctx->backlog) { if (!ctx->backlog) {
ret = -EDQUOT; ret = -EDQUOT;
kfree(uevent);
goto out; goto out;
} }
ctx->backlog--; ctx->backlog--;
} else if (!ctx->uid) {
/*
* We ignore events for new connections until userspace has set
* their context. This can only happen if an error occurs on a
* new connection before the user accepts it. This is okay,
* since the accept will just fail later.
*/
kfree(uevent);
goto out;
} }
list_add_tail(&uevent->list, &ctx->file->event_list); list_add_tail(&uevent->list, &ctx->file->event_list);
wake_up_interruptible(&ctx->file->poll_wait); wake_up_interruptible(&ctx->file->poll_wait);
out: out:
......
...@@ -50,7 +50,7 @@ int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props) ...@@ -50,7 +50,7 @@ int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
ib_device); ib_device);
struct hipz_query_hca *rblock; struct hipz_query_hca *rblock;
rblock = ehca_alloc_fw_ctrlblock(); rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
if (!rblock) { if (!rblock) {
ehca_err(&shca->ib_device, "Can't allocate rblock memory."); ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
return -ENOMEM; return -ENOMEM;
...@@ -110,7 +110,7 @@ int ehca_query_port(struct ib_device *ibdev, ...@@ -110,7 +110,7 @@ int ehca_query_port(struct ib_device *ibdev,
ib_device); ib_device);
struct hipz_query_port *rblock; struct hipz_query_port *rblock;
rblock = ehca_alloc_fw_ctrlblock(); rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
if (!rblock) { if (!rblock) {
ehca_err(&shca->ib_device, "Can't allocate rblock memory."); ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
return -ENOMEM; return -ENOMEM;
...@@ -179,7 +179,7 @@ int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) ...@@ -179,7 +179,7 @@ int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
return -EINVAL; return -EINVAL;
} }
rblock = ehca_alloc_fw_ctrlblock(); rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
if (!rblock) { if (!rblock) {
ehca_err(&shca->ib_device, "Can't allocate rblock memory."); ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
return -ENOMEM; return -ENOMEM;
...@@ -212,7 +212,7 @@ int ehca_query_gid(struct ib_device *ibdev, u8 port, ...@@ -212,7 +212,7 @@ int ehca_query_gid(struct ib_device *ibdev, u8 port,
return -EINVAL; return -EINVAL;
} }
rblock = ehca_alloc_fw_ctrlblock(); rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
if (!rblock) { if (!rblock) {
ehca_err(&shca->ib_device, "Can't allocate rblock memory."); ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
return -ENOMEM; return -ENOMEM;
......
...@@ -138,7 +138,7 @@ int ehca_error_data(struct ehca_shca *shca, void *data, ...@@ -138,7 +138,7 @@ int ehca_error_data(struct ehca_shca *shca, void *data,
u64 *rblock; u64 *rblock;
unsigned long block_count; unsigned long block_count;
rblock = ehca_alloc_fw_ctrlblock(); rblock = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
if (!rblock) { if (!rblock) {
ehca_err(&shca->ib_device, "Cannot allocate rblock memory."); ehca_err(&shca->ib_device, "Cannot allocate rblock memory.");
ret = -ENOMEM; ret = -ENOMEM;
......
...@@ -180,10 +180,10 @@ int ehca_mmap_register(u64 physical,void **mapped, ...@@ -180,10 +180,10 @@ int ehca_mmap_register(u64 physical,void **mapped,
int ehca_munmap(unsigned long addr, size_t len); int ehca_munmap(unsigned long addr, size_t len);
#ifdef CONFIG_PPC_64K_PAGES #ifdef CONFIG_PPC_64K_PAGES
void *ehca_alloc_fw_ctrlblock(void); void *ehca_alloc_fw_ctrlblock(gfp_t flags);
void ehca_free_fw_ctrlblock(void *ptr); void ehca_free_fw_ctrlblock(void *ptr);
#else #else
#define ehca_alloc_fw_ctrlblock() ((void *) get_zeroed_page(GFP_KERNEL)) #define ehca_alloc_fw_ctrlblock(flags) ((void *) get_zeroed_page(flags))
#define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr)) #define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr))
#endif #endif
......
...@@ -106,9 +106,9 @@ static struct timer_list poll_eqs_timer; ...@@ -106,9 +106,9 @@ static struct timer_list poll_eqs_timer;
#ifdef CONFIG_PPC_64K_PAGES #ifdef CONFIG_PPC_64K_PAGES
static struct kmem_cache *ctblk_cache = NULL; static struct kmem_cache *ctblk_cache = NULL;
void *ehca_alloc_fw_ctrlblock(void) void *ehca_alloc_fw_ctrlblock(gfp_t flags)
{ {
void *ret = kmem_cache_zalloc(ctblk_cache, GFP_KERNEL); void *ret = kmem_cache_zalloc(ctblk_cache, flags);
if (!ret) if (!ret)
ehca_gen_err("Out of memory for ctblk"); ehca_gen_err("Out of memory for ctblk");
return ret; return ret;
...@@ -206,7 +206,7 @@ int ehca_sense_attributes(struct ehca_shca *shca) ...@@ -206,7 +206,7 @@ int ehca_sense_attributes(struct ehca_shca *shca)
u64 h_ret; u64 h_ret;
struct hipz_query_hca *rblock; struct hipz_query_hca *rblock;
rblock = ehca_alloc_fw_ctrlblock(); rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
if (!rblock) { if (!rblock) {
ehca_gen_err("Cannot allocate rblock memory."); ehca_gen_err("Cannot allocate rblock memory.");
return -ENOMEM; return -ENOMEM;
...@@ -258,7 +258,7 @@ static int init_node_guid(struct ehca_shca *shca) ...@@ -258,7 +258,7 @@ static int init_node_guid(struct ehca_shca *shca)
int ret = 0; int ret = 0;
struct hipz_query_hca *rblock; struct hipz_query_hca *rblock;
rblock = ehca_alloc_fw_ctrlblock(); rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
if (!rblock) { if (!rblock) {
ehca_err(&shca->ib_device, "Can't allocate rblock memory."); ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
return -ENOMEM; return -ENOMEM;
...@@ -469,7 +469,7 @@ static ssize_t ehca_show_##name(struct device *dev, \ ...@@ -469,7 +469,7 @@ static ssize_t ehca_show_##name(struct device *dev, \
\ \
shca = dev->driver_data; \ shca = dev->driver_data; \
\ \
rblock = ehca_alloc_fw_ctrlblock(); \ rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); \
if (!rblock) { \ if (!rblock) { \
dev_err(dev, "Can't allocate rblock memory."); \ dev_err(dev, "Can't allocate rblock memory."); \
return 0; \ return 0; \
......
...@@ -1013,7 +1013,7 @@ int ehca_reg_mr_rpages(struct ehca_shca *shca, ...@@ -1013,7 +1013,7 @@ int ehca_reg_mr_rpages(struct ehca_shca *shca,
u32 i; u32 i;
u64 *kpage; u64 *kpage;
kpage = ehca_alloc_fw_ctrlblock(); kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
if (!kpage) { if (!kpage) {
ehca_err(&shca->ib_device, "kpage alloc failed"); ehca_err(&shca->ib_device, "kpage alloc failed");
ret = -ENOMEM; ret = -ENOMEM;
...@@ -1124,7 +1124,7 @@ inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca, ...@@ -1124,7 +1124,7 @@ inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
ehca_mrmw_map_acl(acl, &hipz_acl); ehca_mrmw_map_acl(acl, &hipz_acl);
ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl); ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
kpage = ehca_alloc_fw_ctrlblock(); kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
if (!kpage) { if (!kpage) {
ehca_err(&shca->ib_device, "kpage alloc failed"); ehca_err(&shca->ib_device, "kpage alloc failed");
ret = -ENOMEM; ret = -ENOMEM;
......
...@@ -807,7 +807,7 @@ static int internal_modify_qp(struct ib_qp *ibqp, ...@@ -807,7 +807,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
unsigned long spl_flags = 0; unsigned long spl_flags = 0;
/* do query_qp to obtain current attr values */ /* do query_qp to obtain current attr values */
mqpcb = ehca_alloc_fw_ctrlblock(); mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
if (!mqpcb) { if (!mqpcb) {
ehca_err(ibqp->device, "Could not get zeroed page for mqpcb " ehca_err(ibqp->device, "Could not get zeroed page for mqpcb "
"ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num); "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num);
...@@ -1273,7 +1273,7 @@ int ehca_query_qp(struct ib_qp *qp, ...@@ -1273,7 +1273,7 @@ int ehca_query_qp(struct ib_qp *qp,
return -EINVAL; return -EINVAL;
} }
qpcb = ehca_alloc_fw_ctrlblock(); qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
if (!qpcb) { if (!qpcb) {
ehca_err(qp->device,"Out of memory for qpcb " ehca_err(qp->device,"Out of memory for qpcb "
"ehca_qp=%p qp_num=%x", my_qp, qp->qp_num); "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num);
......
...@@ -54,6 +54,10 @@ enum { ...@@ -54,6 +54,10 @@ enum {
MTHCA_CQ_ENTRY_SIZE = 0x20 MTHCA_CQ_ENTRY_SIZE = 0x20
}; };
enum {
MTHCA_ATOMIC_BYTE_LEN = 8
};
/* /*
* Must be packed because start is 64 bits but only aligned to 32 bits. * Must be packed because start is 64 bits but only aligned to 32 bits.
*/ */
...@@ -599,11 +603,11 @@ static inline int mthca_poll_one(struct mthca_dev *dev, ...@@ -599,11 +603,11 @@ static inline int mthca_poll_one(struct mthca_dev *dev,
break; break;
case MTHCA_OPCODE_ATOMIC_CS: case MTHCA_OPCODE_ATOMIC_CS:
entry->opcode = IB_WC_COMP_SWAP; entry->opcode = IB_WC_COMP_SWAP;
entry->byte_len = be32_to_cpu(cqe->byte_cnt); entry->byte_len = MTHCA_ATOMIC_BYTE_LEN;
break; break;
case MTHCA_OPCODE_ATOMIC_FA: case MTHCA_OPCODE_ATOMIC_FA:
entry->opcode = IB_WC_FETCH_ADD; entry->opcode = IB_WC_FETCH_ADD;
entry->byte_len = be32_to_cpu(cqe->byte_cnt); entry->byte_len = MTHCA_ATOMIC_BYTE_LEN;
break; break;
case MTHCA_OPCODE_BIND_MW: case MTHCA_OPCODE_BIND_MW:
entry->opcode = IB_WC_BIND_MW; entry->opcode = IB_WC_BIND_MW;
......
...@@ -232,7 +232,7 @@ void *mthca_table_find(struct mthca_icm_table *table, int obj) ...@@ -232,7 +232,7 @@ void *mthca_table_find(struct mthca_icm_table *table, int obj)
list_for_each_entry(chunk, &icm->chunk_list, list) { list_for_each_entry(chunk, &icm->chunk_list, list) {
for (i = 0; i < chunk->npages; ++i) { for (i = 0; i < chunk->npages; ++i) {
if (chunk->mem[i].length >= offset) { if (chunk->mem[i].length > offset) {
page = chunk->mem[i].page; page = chunk->mem[i].page;
goto out; goto out;
} }
......
...@@ -429,13 +429,18 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m ...@@ -429,13 +429,18 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
{ {
struct mthca_dev *dev = to_mdev(ibqp->device); struct mthca_dev *dev = to_mdev(ibqp->device);
struct mthca_qp *qp = to_mqp(ibqp); struct mthca_qp *qp = to_mqp(ibqp);
int err; int err = 0;
struct mthca_mailbox *mailbox; struct mthca_mailbox *mailbox = NULL;
struct mthca_qp_param *qp_param; struct mthca_qp_param *qp_param;
struct mthca_qp_context *context; struct mthca_qp_context *context;
int mthca_state; int mthca_state;
u8 status; u8 status;
if (qp->state == IB_QPS_RESET) {
qp_attr->qp_state = IB_QPS_RESET;
goto done;
}
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox)) if (IS_ERR(mailbox))
return PTR_ERR(mailbox); return PTR_ERR(mailbox);
...@@ -454,7 +459,6 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m ...@@ -454,7 +459,6 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
mthca_state = be32_to_cpu(context->flags) >> 28; mthca_state = be32_to_cpu(context->flags) >> 28;
qp_attr->qp_state = to_ib_qp_state(mthca_state); qp_attr->qp_state = to_ib_qp_state(mthca_state);
qp_attr->cur_qp_state = qp_attr->qp_state;
qp_attr->path_mtu = context->mtu_msgmax >> 5; qp_attr->path_mtu = context->mtu_msgmax >> 5;
qp_attr->path_mig_state = qp_attr->path_mig_state =
to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3); to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
...@@ -464,11 +468,6 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m ...@@ -464,11 +468,6 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
qp_attr->dest_qp_num = be32_to_cpu(context->remote_qpn) & 0xffffff; qp_attr->dest_qp_num = be32_to_cpu(context->remote_qpn) & 0xffffff;
qp_attr->qp_access_flags = qp_attr->qp_access_flags =
to_ib_qp_access_flags(be32_to_cpu(context->params2)); to_ib_qp_access_flags(be32_to_cpu(context->params2));
qp_attr->cap.max_send_wr = qp->sq.max;
qp_attr->cap.max_recv_wr = qp->rq.max;
qp_attr->cap.max_send_sge = qp->sq.max_gs;
qp_attr->cap.max_recv_sge = qp->rq.max_gs;
qp_attr->cap.max_inline_data = qp->max_inline_data;
if (qp->transport == RC || qp->transport == UC) { if (qp->transport == RC || qp->transport == UC) {
to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
...@@ -495,7 +494,16 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m ...@@ -495,7 +494,16 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7; qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7;
qp_attr->rnr_retry = context->pri_path.rnr_retry >> 5; qp_attr->rnr_retry = context->pri_path.rnr_retry >> 5;
qp_attr->alt_timeout = context->alt_path.ackto >> 3; qp_attr->alt_timeout = context->alt_path.ackto >> 3;
qp_init_attr->cap = qp_attr->cap;
done:
qp_attr->cur_qp_state = qp_attr->qp_state;
qp_attr->cap.max_send_wr = qp->sq.max;
qp_attr->cap.max_recv_wr = qp->rq.max;
qp_attr->cap.max_send_sge = qp->sq.max_gs;
qp_attr->cap.max_recv_sge = qp->rq.max_gs;
qp_attr->cap.max_inline_data = qp->max_inline_data;
qp_init_attr->cap = qp_attr->cap;
out: out:
mthca_free_mailbox(dev, mailbox); mthca_free_mailbox(dev, mailbox);
......
...@@ -177,7 +177,7 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn, ...@@ -177,7 +177,7 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn,
* - if yes, the mtask is recycled at iscsi_complete_pdu * - if yes, the mtask is recycled at iscsi_complete_pdu
* - if no, the mtask is recycled at iser_snd_completion * - if no, the mtask is recycled at iser_snd_completion
*/ */
if (error && error != -EAGAIN) if (error && error != -ENOBUFS)
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
return error; return error;
...@@ -241,7 +241,7 @@ iscsi_iser_ctask_xmit(struct iscsi_conn *conn, ...@@ -241,7 +241,7 @@ iscsi_iser_ctask_xmit(struct iscsi_conn *conn,
error = iscsi_iser_ctask_xmit_unsol_data(conn, ctask); error = iscsi_iser_ctask_xmit_unsol_data(conn, ctask);
iscsi_iser_ctask_xmit_exit: iscsi_iser_ctask_xmit_exit:
if (error && error != -EAGAIN) if (error && error != -ENOBUFS)
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
return error; return error;
} }
......
...@@ -304,18 +304,14 @@ int iser_conn_set_full_featured_mode(struct iscsi_conn *conn) ...@@ -304,18 +304,14 @@ int iser_conn_set_full_featured_mode(struct iscsi_conn *conn)
static int static int
iser_check_xmit(struct iscsi_conn *conn, void *task) iser_check_xmit(struct iscsi_conn *conn, void *task)
{ {
int rc = 0;
struct iscsi_iser_conn *iser_conn = conn->dd_data; struct iscsi_iser_conn *iser_conn = conn->dd_data;
write_lock_bh(conn->recv_lock);
if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) == if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) ==
ISER_QP_MAX_REQ_DTOS) { ISER_QP_MAX_REQ_DTOS) {
iser_dbg("%ld can't xmit task %p, suspending tx\n",jiffies,task); iser_dbg("%ld can't xmit task %p\n",jiffies,task);
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); return -ENOBUFS;
rc = -EAGAIN;
} }
write_unlock_bh(conn->recv_lock); return 0;
return rc;
} }
...@@ -340,7 +336,7 @@ int iser_send_command(struct iscsi_conn *conn, ...@@ -340,7 +336,7 @@ int iser_send_command(struct iscsi_conn *conn,
return -EPERM; return -EPERM;
} }
if (iser_check_xmit(conn, ctask)) if (iser_check_xmit(conn, ctask))
return -EAGAIN; return -ENOBUFS;
edtl = ntohl(hdr->data_length); edtl = ntohl(hdr->data_length);
...@@ -426,7 +422,7 @@ int iser_send_data_out(struct iscsi_conn *conn, ...@@ -426,7 +422,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
} }
if (iser_check_xmit(conn, ctask)) if (iser_check_xmit(conn, ctask))
return -EAGAIN; return -ENOBUFS;
itt = ntohl(hdr->itt); itt = ntohl(hdr->itt);
data_seg_len = ntoh24(hdr->dlength); data_seg_len = ntoh24(hdr->dlength);
...@@ -498,7 +494,7 @@ int iser_send_control(struct iscsi_conn *conn, ...@@ -498,7 +494,7 @@ int iser_send_control(struct iscsi_conn *conn,
} }
if (iser_check_xmit(conn,mtask)) if (iser_check_xmit(conn,mtask))
return -EAGAIN; return -ENOBUFS;
/* build the tx desc regd header and add it to the tx desc dto */ /* build the tx desc regd header and add it to the tx desc dto */
mdesc->type = ISCSI_TX_CONTROL; mdesc->type = ISCSI_TX_CONTROL;
...@@ -605,6 +601,7 @@ void iser_snd_completion(struct iser_desc *tx_desc) ...@@ -605,6 +601,7 @@ void iser_snd_completion(struct iser_desc *tx_desc)
struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn; struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn;
struct iscsi_conn *conn = iser_conn->iscsi_conn; struct iscsi_conn *conn = iser_conn->iscsi_conn;
struct iscsi_mgmt_task *mtask; struct iscsi_mgmt_task *mtask;
int resume_tx = 0;
iser_dbg("Initiator, Data sent dto=0x%p\n", dto); iser_dbg("Initiator, Data sent dto=0x%p\n", dto);
...@@ -613,15 +610,16 @@ void iser_snd_completion(struct iser_desc *tx_desc) ...@@ -613,15 +610,16 @@ void iser_snd_completion(struct iser_desc *tx_desc)
if (tx_desc->type == ISCSI_TX_DATAOUT) if (tx_desc->type == ISCSI_TX_DATAOUT)
kmem_cache_free(ig.desc_cache, tx_desc); kmem_cache_free(ig.desc_cache, tx_desc);
if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) ==
ISER_QP_MAX_REQ_DTOS)
resume_tx = 1;
atomic_dec(&ib_conn->post_send_buf_count); atomic_dec(&ib_conn->post_send_buf_count);
write_lock(conn->recv_lock); if (resume_tx) {
if (conn->suspend_tx) {
iser_dbg("%ld resuming tx\n",jiffies); iser_dbg("%ld resuming tx\n",jiffies);
clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
scsi_queue_work(conn->session->host, &conn->xmitwork); scsi_queue_work(conn->session->host, &conn->xmitwork);
} }
write_unlock(conn->recv_lock);
if (tx_desc->type == ISCSI_TX_CONTROL) { if (tx_desc->type == ISCSI_TX_CONTROL) {
/* this arithmetic is legal by libiscsi dd_data allocation */ /* this arithmetic is legal by libiscsi dd_data allocation */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment