Commit a61eb613 authored by Bryan Tan's avatar Bryan Tan Committed by Jason Gunthorpe

RDMA/vmw_pvrdma: Use refcount_t instead of atomic_t

refcount_t is the preferred type for refcounts. Change the
QP and CQ refcnt fields to use refcount_t.
Reviewed-by: default avatarAdit Ranadive <aditr@vmware.com>
Reviewed-by: default avatarAditya Sarwade <asarwade@vmware.com>
Reviewed-by: default avatarJorgen Hansen <jhansen@vmware.com>
Signed-off-by: default avatarBryan Tan <bryantan@vmware.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 1a9ecf8d
...@@ -93,7 +93,7 @@ struct pvrdma_cq { ...@@ -93,7 +93,7 @@ struct pvrdma_cq {
struct pvrdma_page_dir pdir; struct pvrdma_page_dir pdir;
u32 cq_handle; u32 cq_handle;
bool is_kernel; bool is_kernel;
atomic_t refcnt; refcount_t refcnt;
struct completion free; struct completion free;
}; };
...@@ -196,7 +196,7 @@ struct pvrdma_qp { ...@@ -196,7 +196,7 @@ struct pvrdma_qp {
u8 state; u8 state;
bool is_kernel; bool is_kernel;
struct mutex mutex; /* QP state mutex. */ struct mutex mutex; /* QP state mutex. */
atomic_t refcnt; refcount_t refcnt;
struct completion free; struct completion free;
}; };
......
...@@ -177,7 +177,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, ...@@ -177,7 +177,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
else else
pvrdma_page_dir_insert_umem(&cq->pdir, cq->umem, 0); pvrdma_page_dir_insert_umem(&cq->pdir, cq->umem, 0);
atomic_set(&cq->refcnt, 1); refcount_set(&cq->refcnt, 1);
init_completion(&cq->free); init_completion(&cq->free);
spin_lock_init(&cq->cq_lock); spin_lock_init(&cq->cq_lock);
...@@ -229,7 +229,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, ...@@ -229,7 +229,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq) static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq)
{ {
if (atomic_dec_and_test(&cq->refcnt)) if (refcount_dec_and_test(&cq->refcnt))
complete(&cq->free); complete(&cq->free);
wait_for_completion(&cq->free); wait_for_completion(&cq->free);
......
...@@ -333,7 +333,7 @@ static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type) ...@@ -333,7 +333,7 @@ static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type)
spin_lock_irqsave(&dev->qp_tbl_lock, flags); spin_lock_irqsave(&dev->qp_tbl_lock, flags);
qp = dev->qp_tbl[qpn % dev->dsr->caps.max_qp]; qp = dev->qp_tbl[qpn % dev->dsr->caps.max_qp];
if (qp) if (qp)
atomic_inc(&qp->refcnt); refcount_inc(&qp->refcnt);
spin_unlock_irqrestore(&dev->qp_tbl_lock, flags); spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
if (qp && qp->ibqp.event_handler) { if (qp && qp->ibqp.event_handler) {
...@@ -346,7 +346,7 @@ static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type) ...@@ -346,7 +346,7 @@ static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type)
ibqp->event_handler(&e, ibqp->qp_context); ibqp->event_handler(&e, ibqp->qp_context);
} }
if (qp) { if (qp) {
if (atomic_dec_and_test(&qp->refcnt)) if (refcount_dec_and_test(&qp->refcnt))
complete(&qp->free); complete(&qp->free);
} }
} }
...@@ -359,7 +359,7 @@ static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type) ...@@ -359,7 +359,7 @@ static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type)
spin_lock_irqsave(&dev->cq_tbl_lock, flags); spin_lock_irqsave(&dev->cq_tbl_lock, flags);
cq = dev->cq_tbl[cqn % dev->dsr->caps.max_cq]; cq = dev->cq_tbl[cqn % dev->dsr->caps.max_cq];
if (cq) if (cq)
atomic_inc(&cq->refcnt); refcount_inc(&cq->refcnt);
spin_unlock_irqrestore(&dev->cq_tbl_lock, flags); spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
if (cq && cq->ibcq.event_handler) { if (cq && cq->ibcq.event_handler) {
...@@ -372,7 +372,7 @@ static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type) ...@@ -372,7 +372,7 @@ static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type)
ibcq->event_handler(&e, ibcq->cq_context); ibcq->event_handler(&e, ibcq->cq_context);
} }
if (cq) { if (cq) {
if (atomic_dec_and_test(&cq->refcnt)) if (refcount_dec_and_test(&cq->refcnt))
complete(&cq->free); complete(&cq->free);
} }
} }
...@@ -531,13 +531,13 @@ static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id) ...@@ -531,13 +531,13 @@ static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id)
spin_lock_irqsave(&dev->cq_tbl_lock, flags); spin_lock_irqsave(&dev->cq_tbl_lock, flags);
cq = dev->cq_tbl[cqne->info % dev->dsr->caps.max_cq]; cq = dev->cq_tbl[cqne->info % dev->dsr->caps.max_cq];
if (cq) if (cq)
atomic_inc(&cq->refcnt); refcount_inc(&cq->refcnt);
spin_unlock_irqrestore(&dev->cq_tbl_lock, flags); spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
if (cq && cq->ibcq.comp_handler) if (cq && cq->ibcq.comp_handler)
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
if (cq) { if (cq) {
if (atomic_dec_and_test(&cq->refcnt)) if (refcount_dec_and_test(&cq->refcnt))
complete(&cq->free); complete(&cq->free);
} }
pvrdma_idx_ring_inc(&ring->cons_head, ring_slots); pvrdma_idx_ring_inc(&ring->cons_head, ring_slots);
......
...@@ -245,7 +245,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, ...@@ -245,7 +245,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
spin_lock_init(&qp->sq.lock); spin_lock_init(&qp->sq.lock);
spin_lock_init(&qp->rq.lock); spin_lock_init(&qp->rq.lock);
mutex_init(&qp->mutex); mutex_init(&qp->mutex);
atomic_set(&qp->refcnt, 1); refcount_set(&qp->refcnt, 1);
init_completion(&qp->free); init_completion(&qp->free);
qp->state = IB_QPS_RESET; qp->state = IB_QPS_RESET;
...@@ -427,7 +427,7 @@ static void pvrdma_free_qp(struct pvrdma_qp *qp) ...@@ -427,7 +427,7 @@ static void pvrdma_free_qp(struct pvrdma_qp *qp)
pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags); pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
if (atomic_dec_and_test(&qp->refcnt)) if (refcount_dec_and_test(&qp->refcnt))
complete(&qp->free); complete(&qp->free);
wait_for_completion(&qp->free); wait_for_completion(&qp->free);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment