Commit 13ce8317 authored by Steve Wise's avatar Steve Wise Committed by Doug Ledford

iw_cxgb4: allocate wait object for each cq object

Remove the local stack allocated c4iw_wr_wait object in preparation for
correctly handling timeouts.

Also cleaned up some error path unwind logic to make it more readable.
Signed-off-by: default avatarSteve Wise <swise@opengridcomputing.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent a3f12da0
...@@ -33,12 +33,12 @@ ...@@ -33,12 +33,12 @@
#include "iw_cxgb4.h" #include "iw_cxgb4.h"
static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
struct c4iw_dev_ucontext *uctx, struct sk_buff *skb) struct c4iw_dev_ucontext *uctx, struct sk_buff *skb,
struct c4iw_wr_wait *wr_waitp)
{ {
struct fw_ri_res_wr *res_wr; struct fw_ri_res_wr *res_wr;
struct fw_ri_res *res; struct fw_ri_res *res;
int wr_len; int wr_len;
struct c4iw_wr_wait wr_wait;
int ret; int ret;
wr_len = sizeof *res_wr + sizeof *res; wr_len = sizeof *res_wr + sizeof *res;
...@@ -50,16 +50,16 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, ...@@ -50,16 +50,16 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
FW_RI_RES_WR_NRES_V(1) | FW_RI_RES_WR_NRES_V(1) |
FW_WR_COMPL_F); FW_WR_COMPL_F);
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
res_wr->cookie = (uintptr_t)&wr_wait; res_wr->cookie = (uintptr_t)wr_waitp;
res = res_wr->res; res = res_wr->res;
res->u.cq.restype = FW_RI_RES_TYPE_CQ; res->u.cq.restype = FW_RI_RES_TYPE_CQ;
res->u.cq.op = FW_RI_RES_OP_RESET; res->u.cq.op = FW_RI_RES_OP_RESET;
res->u.cq.iqid = cpu_to_be32(cq->cqid); res->u.cq.iqid = cpu_to_be32(cq->cqid);
c4iw_init_wr_wait(&wr_wait); c4iw_init_wr_wait(wr_waitp);
ret = c4iw_ofld_send(rdev, skb); ret = c4iw_ofld_send(rdev, skb);
if (!ret) { if (!ret) {
ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__); ret = c4iw_wait_for_reply(rdev, wr_waitp, 0, 0, __func__);
} }
kfree(cq->sw_queue); kfree(cq->sw_queue);
...@@ -71,13 +71,13 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, ...@@ -71,13 +71,13 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
} }
static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
struct c4iw_dev_ucontext *uctx) struct c4iw_dev_ucontext *uctx,
struct c4iw_wr_wait *wr_waitp)
{ {
struct fw_ri_res_wr *res_wr; struct fw_ri_res_wr *res_wr;
struct fw_ri_res *res; struct fw_ri_res *res;
int wr_len; int wr_len;
int user = (uctx != &rdev->uctx); int user = (uctx != &rdev->uctx);
struct c4iw_wr_wait wr_wait;
int ret; int ret;
struct sk_buff *skb; struct sk_buff *skb;
...@@ -119,7 +119,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, ...@@ -119,7 +119,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
FW_RI_RES_WR_NRES_V(1) | FW_RI_RES_WR_NRES_V(1) |
FW_WR_COMPL_F); FW_WR_COMPL_F);
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
res_wr->cookie = (uintptr_t)&wr_wait; res_wr->cookie = (uintptr_t)wr_waitp;
res = res_wr->res; res = res_wr->res;
res->u.cq.restype = FW_RI_RES_TYPE_CQ; res->u.cq.restype = FW_RI_RES_TYPE_CQ;
res->u.cq.op = FW_RI_RES_OP_WRITE; res->u.cq.op = FW_RI_RES_OP_WRITE;
...@@ -139,13 +139,13 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, ...@@ -139,13 +139,13 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
res->u.cq.iqsize = cpu_to_be16(cq->size); res->u.cq.iqsize = cpu_to_be16(cq->size);
res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr); res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
c4iw_init_wr_wait(&wr_wait); c4iw_init_wr_wait(wr_waitp);
ret = c4iw_ofld_send(rdev, skb); ret = c4iw_ofld_send(rdev, skb);
if (ret) if (ret)
goto err4; goto err4;
pr_debug("wait_event wr_wait %p\n", &wr_wait); pr_debug("wait_event wr_wait %p\n", wr_waitp);
ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__); ret = c4iw_wait_for_reply(rdev, wr_waitp, 0, 0, __func__);
if (ret) if (ret)
goto err4; goto err4;
...@@ -868,8 +868,8 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq) ...@@ -868,8 +868,8 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq)
: NULL; : NULL;
destroy_cq(&chp->rhp->rdev, &chp->cq, destroy_cq(&chp->rhp->rdev, &chp->cq,
ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx, ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx,
chp->destroy_skb); chp->destroy_skb, chp->wr_waitp);
chp->destroy_skb = NULL; kfree(chp->wr_waitp);
kfree(chp); kfree(chp);
return 0; return 0;
} }
...@@ -901,12 +901,18 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, ...@@ -901,12 +901,18 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
chp = kzalloc(sizeof(*chp), GFP_KERNEL); chp = kzalloc(sizeof(*chp), GFP_KERNEL);
if (!chp) if (!chp)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
chp->wr_waitp = kzalloc(sizeof(*chp->wr_waitp), GFP_KERNEL);
if (!chp->wr_waitp) {
ret = -ENOMEM;
goto err_free_chp;
}
c4iw_init_wr_wait(chp->wr_waitp);
wr_len = sizeof(struct fw_ri_res_wr) + sizeof(struct fw_ri_res); wr_len = sizeof(struct fw_ri_res_wr) + sizeof(struct fw_ri_res);
chp->destroy_skb = alloc_skb(wr_len, GFP_KERNEL); chp->destroy_skb = alloc_skb(wr_len, GFP_KERNEL);
if (!chp->destroy_skb) { if (!chp->destroy_skb) {
ret = -ENOMEM; ret = -ENOMEM;
goto err1; goto err_free_wr_wait;
} }
if (ib_context) if (ib_context)
...@@ -947,9 +953,10 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, ...@@ -947,9 +953,10 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
chp->cq.vector = vector; chp->cq.vector = vector;
ret = create_cq(&rhp->rdev, &chp->cq, ret = create_cq(&rhp->rdev, &chp->cq,
ucontext ? &ucontext->uctx : &rhp->rdev.uctx); ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
chp->wr_waitp);
if (ret) if (ret)
goto err2; goto err_free_skb;
chp->rhp = rhp; chp->rhp = rhp;
chp->cq.size--; /* status page */ chp->cq.size--; /* status page */
...@@ -960,16 +967,16 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, ...@@ -960,16 +967,16 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
init_waitqueue_head(&chp->wait); init_waitqueue_head(&chp->wait);
ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid); ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
if (ret) if (ret)
goto err3; goto err_destroy_cq;
if (ucontext) { if (ucontext) {
ret = -ENOMEM; ret = -ENOMEM;
mm = kmalloc(sizeof *mm, GFP_KERNEL); mm = kmalloc(sizeof *mm, GFP_KERNEL);
if (!mm) if (!mm)
goto err4; goto err_remove_handle;
mm2 = kmalloc(sizeof *mm2, GFP_KERNEL); mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
if (!mm2) if (!mm2)
goto err5; goto err_free_mm;
uresp.qid_mask = rhp->rdev.cqmask; uresp.qid_mask = rhp->rdev.cqmask;
uresp.cqid = chp->cq.cqid; uresp.cqid = chp->cq.cqid;
...@@ -984,7 +991,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, ...@@ -984,7 +991,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
ret = ib_copy_to_udata(udata, &uresp, ret = ib_copy_to_udata(udata, &uresp,
sizeof(uresp) - sizeof(uresp.reserved)); sizeof(uresp) - sizeof(uresp.reserved));
if (ret) if (ret)
goto err6; goto err_free_mm2;
mm->key = uresp.key; mm->key = uresp.key;
mm->addr = virt_to_phys(chp->cq.queue); mm->addr = virt_to_phys(chp->cq.queue);
...@@ -1000,19 +1007,21 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, ...@@ -1000,19 +1007,21 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
chp->cq.cqid, chp, chp->cq.size, chp->cq.cqid, chp, chp->cq.size,
chp->cq.memsize, (unsigned long long)chp->cq.dma_addr); chp->cq.memsize, (unsigned long long)chp->cq.dma_addr);
return &chp->ibcq; return &chp->ibcq;
err6: err_free_mm2:
kfree(mm2); kfree(mm2);
err5: err_free_mm:
kfree(mm); kfree(mm);
err4: err_remove_handle:
remove_handle(rhp, &rhp->cqidr, chp->cq.cqid); remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
err3: err_destroy_cq:
destroy_cq(&chp->rhp->rdev, &chp->cq, destroy_cq(&chp->rhp->rdev, &chp->cq,
ucontext ? &ucontext->uctx : &rhp->rdev.uctx, ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
chp->destroy_skb); chp->destroy_skb, chp->wr_waitp);
err2: err_free_skb:
kfree_skb(chp->destroy_skb); kfree_skb(chp->destroy_skb);
err1: err_free_wr_wait:
kfree(chp->wr_waitp);
err_free_chp:
kfree(chp); kfree(chp);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
......
...@@ -425,6 +425,7 @@ struct c4iw_cq { ...@@ -425,6 +425,7 @@ struct c4iw_cq {
spinlock_t comp_handler_lock; spinlock_t comp_handler_lock;
atomic_t refcnt; atomic_t refcnt;
wait_queue_head_t wait; wait_queue_head_t wait;
struct c4iw_wr_wait *wr_waitp;
}; };
static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq) static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment