Commit 5aef7cf2 authored by Bryan Tan's avatar Bryan Tan Committed by Jason Gunthorpe

RDMA/vmw_pvrdma: Clarify QP and CQ is_kernel logic

Be more consistent in setting and checking is_kernel
flag for QPs and CQs.
Reviewed-by: default avatarAdit Ranadive <aditr@vmware.com>
Reviewed-by: default avatarAditya Sarwade <asarwade@vmware.com>
Reviewed-by: default avatarJorgen Hansen <jhansen@vmware.com>
Signed-off-by: default avatarBryan Tan <bryantan@vmware.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 926aae27
...@@ -132,8 +132,9 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, ...@@ -132,8 +132,9 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
} }
cq->ibcq.cqe = entries; cq->ibcq.cqe = entries;
cq->is_kernel = !context;
if (context) { if (!cq->is_kernel) {
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
ret = -EFAULT; ret = -EFAULT;
goto err_cq; goto err_cq;
...@@ -148,8 +149,6 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, ...@@ -148,8 +149,6 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
npages = ib_umem_page_count(cq->umem); npages = ib_umem_page_count(cq->umem);
} else { } else {
cq->is_kernel = true;
/* One extra page for shared ring state */ /* One extra page for shared ring state */
npages = 1 + (entries * sizeof(struct pvrdma_cqe) + npages = 1 + (entries * sizeof(struct pvrdma_cqe) +
PAGE_SIZE - 1) / PAGE_SIZE; PAGE_SIZE - 1) / PAGE_SIZE;
...@@ -202,7 +201,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, ...@@ -202,7 +201,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
dev->cq_tbl[cq->cq_handle % dev->dsr->caps.max_cq] = cq; dev->cq_tbl[cq->cq_handle % dev->dsr->caps.max_cq] = cq;
spin_unlock_irqrestore(&dev->cq_tbl_lock, flags); spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
if (context) { if (!cq->is_kernel) {
cq->uar = &(to_vucontext(context)->uar); cq->uar = &(to_vucontext(context)->uar);
/* Copy udata back. */ /* Copy udata back. */
...@@ -219,7 +218,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, ...@@ -219,7 +218,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
err_page_dir: err_page_dir:
pvrdma_page_dir_cleanup(dev, &cq->pdir); pvrdma_page_dir_cleanup(dev, &cq->pdir);
err_umem: err_umem:
if (context) if (!cq->is_kernel)
ib_umem_release(cq->umem); ib_umem_release(cq->umem);
err_cq: err_cq:
atomic_dec(&dev->num_cqs); atomic_dec(&dev->num_cqs);
......
...@@ -249,8 +249,9 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, ...@@ -249,8 +249,9 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
init_completion(&qp->free); init_completion(&qp->free);
qp->state = IB_QPS_RESET; qp->state = IB_QPS_RESET;
qp->is_kernel = !(pd->uobject && udata);
if (pd->uobject && udata) { if (!qp->is_kernel) {
dev_dbg(&dev->pdev->dev, dev_dbg(&dev->pdev->dev,
"create queuepair from user space\n"); "create queuepair from user space\n");
...@@ -291,8 +292,6 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, ...@@ -291,8 +292,6 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
qp->npages_recv = 0; qp->npages_recv = 0;
qp->npages = qp->npages_send + qp->npages_recv; qp->npages = qp->npages_send + qp->npages_recv;
} else { } else {
qp->is_kernel = true;
ret = pvrdma_set_sq_size(to_vdev(pd->device), ret = pvrdma_set_sq_size(to_vdev(pd->device),
&init_attr->cap, qp); &init_attr->cap, qp);
if (ret) if (ret)
...@@ -394,7 +393,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, ...@@ -394,7 +393,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
err_pdir: err_pdir:
pvrdma_page_dir_cleanup(dev, &qp->pdir); pvrdma_page_dir_cleanup(dev, &qp->pdir);
err_umem: err_umem:
if (pd->uobject && udata) { if (!qp->is_kernel) {
if (qp->rumem) if (qp->rumem)
ib_umem_release(qp->rumem); ib_umem_release(qp->rumem);
if (qp->sumem) if (qp->sumem)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment