Commit e3524b26 authored by Bryan Tan's avatar Bryan Tan Committed by Jason Gunthorpe

RDMA/vmw_pvrdma: Avoid use after free due to QP/CQ/SRQ destroy

The use of wait queues in vmw_pvrdma for handling concurrent
access to a resource leaves a race condition which can cause a use
after free bug.

Fix this by using the pattern from other drivers, complete() protected by
dec_and_test to ensure complete() is called only once.

Fixes: 29c8d9eb ("IB: Add vmw_pvrdma driver")
Signed-off-by: default avatarBryan Tan <bryantan@vmware.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 30a366a9
...@@ -94,7 +94,7 @@ struct pvrdma_cq { ...@@ -94,7 +94,7 @@ struct pvrdma_cq {
u32 cq_handle; u32 cq_handle;
bool is_kernel; bool is_kernel;
atomic_t refcnt; atomic_t refcnt;
wait_queue_head_t wait; struct completion free;
}; };
struct pvrdma_id_table { struct pvrdma_id_table {
...@@ -175,7 +175,7 @@ struct pvrdma_srq { ...@@ -175,7 +175,7 @@ struct pvrdma_srq {
u32 srq_handle; u32 srq_handle;
int npages; int npages;
refcount_t refcnt; refcount_t refcnt;
wait_queue_head_t wait; struct completion free;
}; };
struct pvrdma_qp { struct pvrdma_qp {
...@@ -197,7 +197,7 @@ struct pvrdma_qp { ...@@ -197,7 +197,7 @@ struct pvrdma_qp {
bool is_kernel; bool is_kernel;
struct mutex mutex; /* QP state mutex. */ struct mutex mutex; /* QP state mutex. */
atomic_t refcnt; atomic_t refcnt;
wait_queue_head_t wait; struct completion free;
}; };
struct pvrdma_dev { struct pvrdma_dev {
......
...@@ -179,7 +179,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, ...@@ -179,7 +179,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
pvrdma_page_dir_insert_umem(&cq->pdir, cq->umem, 0); pvrdma_page_dir_insert_umem(&cq->pdir, cq->umem, 0);
atomic_set(&cq->refcnt, 1); atomic_set(&cq->refcnt, 1);
init_waitqueue_head(&cq->wait); init_completion(&cq->free);
spin_lock_init(&cq->cq_lock); spin_lock_init(&cq->cq_lock);
memset(cmd, 0, sizeof(*cmd)); memset(cmd, 0, sizeof(*cmd));
...@@ -230,8 +230,9 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, ...@@ -230,8 +230,9 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq) static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq)
{ {
atomic_dec(&cq->refcnt); if (atomic_dec_and_test(&cq->refcnt))
wait_event(cq->wait, !atomic_read(&cq->refcnt)); complete(&cq->free);
wait_for_completion(&cq->free);
if (!cq->is_kernel) if (!cq->is_kernel)
ib_umem_release(cq->umem); ib_umem_release(cq->umem);
......
...@@ -346,9 +346,8 @@ static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type) ...@@ -346,9 +346,8 @@ static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type)
ibqp->event_handler(&e, ibqp->qp_context); ibqp->event_handler(&e, ibqp->qp_context);
} }
if (qp) { if (qp) {
atomic_dec(&qp->refcnt); if (atomic_dec_and_test(&qp->refcnt))
if (atomic_read(&qp->refcnt) == 0) complete(&qp->free);
wake_up(&qp->wait);
} }
} }
...@@ -373,9 +372,8 @@ static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type) ...@@ -373,9 +372,8 @@ static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type)
ibcq->event_handler(&e, ibcq->cq_context); ibcq->event_handler(&e, ibcq->cq_context);
} }
if (cq) { if (cq) {
atomic_dec(&cq->refcnt); if (atomic_dec_and_test(&cq->refcnt))
if (atomic_read(&cq->refcnt) == 0) complete(&cq->free);
wake_up(&cq->wait);
} }
} }
...@@ -404,7 +402,7 @@ static void pvrdma_srq_event(struct pvrdma_dev *dev, u32 srqn, int type) ...@@ -404,7 +402,7 @@ static void pvrdma_srq_event(struct pvrdma_dev *dev, u32 srqn, int type)
} }
if (srq) { if (srq) {
if (refcount_dec_and_test(&srq->refcnt)) if (refcount_dec_and_test(&srq->refcnt))
wake_up(&srq->wait); complete(&srq->free);
} }
} }
...@@ -539,9 +537,8 @@ static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id) ...@@ -539,9 +537,8 @@ static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id)
if (cq && cq->ibcq.comp_handler) if (cq && cq->ibcq.comp_handler)
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
if (cq) { if (cq) {
atomic_dec(&cq->refcnt); if (atomic_dec_and_test(&cq->refcnt))
if (atomic_read(&cq->refcnt)) complete(&cq->free);
wake_up(&cq->wait);
} }
pvrdma_idx_ring_inc(&ring->cons_head, ring_slots); pvrdma_idx_ring_inc(&ring->cons_head, ring_slots);
} }
......
...@@ -246,7 +246,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, ...@@ -246,7 +246,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
spin_lock_init(&qp->rq.lock); spin_lock_init(&qp->rq.lock);
mutex_init(&qp->mutex); mutex_init(&qp->mutex);
atomic_set(&qp->refcnt, 1); atomic_set(&qp->refcnt, 1);
init_waitqueue_head(&qp->wait); init_completion(&qp->free);
qp->state = IB_QPS_RESET; qp->state = IB_QPS_RESET;
...@@ -428,8 +428,9 @@ static void pvrdma_free_qp(struct pvrdma_qp *qp) ...@@ -428,8 +428,9 @@ static void pvrdma_free_qp(struct pvrdma_qp *qp)
pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags); pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
atomic_dec(&qp->refcnt); if (atomic_dec_and_test(&qp->refcnt))
wait_event(qp->wait, !atomic_read(&qp->refcnt)); complete(&qp->free);
wait_for_completion(&qp->free);
if (!qp->is_kernel) { if (!qp->is_kernel) {
if (qp->rumem) if (qp->rumem)
......
...@@ -149,7 +149,7 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd, ...@@ -149,7 +149,7 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
spin_lock_init(&srq->lock); spin_lock_init(&srq->lock);
refcount_set(&srq->refcnt, 1); refcount_set(&srq->refcnt, 1);
init_waitqueue_head(&srq->wait); init_completion(&srq->free);
dev_dbg(&dev->pdev->dev, dev_dbg(&dev->pdev->dev,
"create shared receive queue from user space\n"); "create shared receive queue from user space\n");
...@@ -236,8 +236,9 @@ static void pvrdma_free_srq(struct pvrdma_dev *dev, struct pvrdma_srq *srq) ...@@ -236,8 +236,9 @@ static void pvrdma_free_srq(struct pvrdma_dev *dev, struct pvrdma_srq *srq)
dev->srq_tbl[srq->srq_handle] = NULL; dev->srq_tbl[srq->srq_handle] = NULL;
spin_unlock_irqrestore(&dev->srq_tbl_lock, flags); spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
if (!refcount_dec_and_test(&srq->refcnt)) if (refcount_dec_and_test(&srq->refcnt))
wait_event(srq->wait, !refcount_read(&srq->refcnt)); complete(&srq->free);
wait_for_completion(&srq->free);
/* There is no support for kernel clients, so this is safe. */ /* There is no support for kernel clients, so this is safe. */
ib_umem_release(srq->umem); ib_umem_release(srq->umem);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment