Commit cfe876d8 authored by Leon Romanovsky's avatar Leon Romanovsky Committed by Jason Gunthorpe

RDMA/cxgb4: Remove kref accounting for sync operation

Ucontext allocation and release aren't async events and don't need kref
accounting. The common layer of RDMA subsystem ensures that dealloc
ucontext will be called after all other objects are released.
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Reviewed-by: default avatarSteve Wise <swise@opengridcomputing.com>
Tested-by: default avatarRaju Rangoju <rajur@chelsio.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent be56b07b
...@@ -589,7 +589,6 @@ struct c4iw_ucontext { ...@@ -589,7 +589,6 @@ struct c4iw_ucontext {
u32 key; u32 key;
spinlock_t mmap_lock; spinlock_t mmap_lock;
struct list_head mmaps; struct list_head mmaps;
struct kref kref;
bool is_32b_cqe; bool is_32b_cqe;
}; };
...@@ -598,18 +597,6 @@ static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c) ...@@ -598,18 +597,6 @@ static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
return container_of(c, struct c4iw_ucontext, ibucontext); return container_of(c, struct c4iw_ucontext, ibucontext);
} }
void _c4iw_free_ucontext(struct kref *kref);
static inline void c4iw_put_ucontext(struct c4iw_ucontext *ucontext)
{
kref_put(&ucontext->kref, _c4iw_free_ucontext);
}
static inline void c4iw_get_ucontext(struct c4iw_ucontext *ucontext)
{
kref_get(&ucontext->kref);
}
struct c4iw_mm_entry { struct c4iw_mm_entry {
struct list_head entry; struct list_head entry;
u64 addr; u64 addr;
......
...@@ -58,28 +58,19 @@ static int fastreg_support = 1; ...@@ -58,28 +58,19 @@ static int fastreg_support = 1;
module_param(fastreg_support, int, 0644); module_param(fastreg_support, int, 0644);
MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=1)"); MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=1)");
void _c4iw_free_ucontext(struct kref *kref) static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
{ {
struct c4iw_ucontext *ucontext; struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
struct c4iw_dev *rhp; struct c4iw_dev *rhp;
struct c4iw_mm_entry *mm, *tmp; struct c4iw_mm_entry *mm, *tmp;
ucontext = container_of(kref, struct c4iw_ucontext, kref); pr_debug("context %p\n", context);
rhp = to_c4iw_dev(ucontext->ibucontext.device); rhp = to_c4iw_dev(ucontext->ibucontext.device);
pr_debug("ucontext %p\n", ucontext);
list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry) list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
kfree(mm); kfree(mm);
c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx); c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
kfree(ucontext); kfree(ucontext);
}
static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
{
struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
pr_debug("context %p\n", context);
c4iw_put_ucontext(ucontext);
return 0; return 0;
} }
...@@ -102,7 +93,6 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev, ...@@ -102,7 +93,6 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx); c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
INIT_LIST_HEAD(&context->mmaps); INIT_LIST_HEAD(&context->mmaps);
spin_lock_init(&context->mmap_lock); spin_lock_init(&context->mmap_lock);
kref_init(&context->kref);
if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) { if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) {
pr_err_once("Warning - downlevel libcxgb4 (non-fatal), device status page disabled\n"); pr_err_once("Warning - downlevel libcxgb4 (non-fatal), device status page disabled\n");
......
...@@ -904,8 +904,6 @@ static void free_qp_work(struct work_struct *work) ...@@ -904,8 +904,6 @@ static void free_qp_work(struct work_struct *work)
destroy_qp(&rhp->rdev, &qhp->wq, destroy_qp(&rhp->rdev, &qhp->wq,
ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !qhp->srq); ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !qhp->srq);
if (ucontext)
c4iw_put_ucontext(ucontext);
c4iw_put_wr_wait(qhp->wr_waitp); c4iw_put_wr_wait(qhp->wr_waitp);
kfree(qhp); kfree(qhp);
} }
...@@ -2338,7 +2336,6 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, ...@@ -2338,7 +2336,6 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
insert_mmap(ucontext, ma_sync_key_mm); insert_mmap(ucontext, ma_sync_key_mm);
} }
c4iw_get_ucontext(ucontext);
qhp->ucontext = ucontext; qhp->ucontext = ucontext;
} }
if (!attrs->srq) { if (!attrs->srq) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment