Commit cc28f351 authored by Bob Pearson's avatar Bob Pearson Committed by Jason Gunthorpe

RDMA/rxe: Fix rxe_modify_srq

This patch corrects an error in rxe_modify_srq where if the
caller changes the srq size the actual new value is not returned
to the caller since it may be larger than what is requested.
Additionally it open codes the subroutine rcv_wqe_size() which
adds very little value, and makes some whitespace changes.

Fixes: 8700e3e7 ("Soft RoCE driver")
Link: https://lore.kernel.org/r/20230620140142.9452-1-rpearsonhpe@gmail.comSigned-off-by: default avatarBob Pearson <rpearsonhpe@gmail.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 5993b75d
...@@ -136,12 +136,6 @@ static inline int qp_mtu(struct rxe_qp *qp) ...@@ -136,12 +136,6 @@ static inline int qp_mtu(struct rxe_qp *qp)
return IB_MTU_4096; return IB_MTU_4096;
} }
static inline int rcv_wqe_size(int max_sge)
{
return sizeof(struct rxe_recv_wqe) +
max_sge * sizeof(struct ib_sge);
}
void free_rd_atomic_resource(struct resp_res *res); void free_rd_atomic_resource(struct resp_res *res);
static inline void rxe_advance_resp_resource(struct rxe_qp *qp) static inline void rxe_advance_resp_resource(struct rxe_qp *qp)
......
...@@ -45,40 +45,41 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq, ...@@ -45,40 +45,41 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
struct ib_srq_init_attr *init, struct ib_udata *udata, struct ib_srq_init_attr *init, struct ib_udata *udata,
struct rxe_create_srq_resp __user *uresp) struct rxe_create_srq_resp __user *uresp)
{ {
int err;
int srq_wqe_size;
struct rxe_queue *q; struct rxe_queue *q;
enum queue_type type; int wqe_size;
int err;
srq->ibsrq.event_handler = init->event_handler; srq->ibsrq.event_handler = init->event_handler;
srq->ibsrq.srq_context = init->srq_context; srq->ibsrq.srq_context = init->srq_context;
srq->limit = init->attr.srq_limit; srq->limit = init->attr.srq_limit;
srq->srq_num = srq->elem.index; srq->srq_num = srq->elem.index;
srq->rq.max_wr = init->attr.max_wr; srq->rq.max_wr = init->attr.max_wr;
srq->rq.max_sge = init->attr.max_sge; srq->rq.max_sge = init->attr.max_sge;
srq_wqe_size = rcv_wqe_size(srq->rq.max_sge); wqe_size = sizeof(struct rxe_recv_wqe) +
srq->rq.max_sge*sizeof(struct ib_sge);
spin_lock_init(&srq->rq.producer_lock); spin_lock_init(&srq->rq.producer_lock);
spin_lock_init(&srq->rq.consumer_lock); spin_lock_init(&srq->rq.consumer_lock);
type = QUEUE_TYPE_FROM_CLIENT; q = rxe_queue_init(rxe, &srq->rq.max_wr, wqe_size,
q = rxe_queue_init(rxe, &srq->rq.max_wr, srq_wqe_size, type); QUEUE_TYPE_FROM_CLIENT);
if (!q) { if (!q) {
rxe_dbg_srq(srq, "Unable to allocate queue\n"); rxe_dbg_srq(srq, "Unable to allocate queue\n");
return -ENOMEM; err = -ENOMEM;
goto err_out;
} }
srq->rq.queue = q;
err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata, q->buf, err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata, q->buf,
q->buf_size, &q->ip); q->buf_size, &q->ip);
if (err) { if (err) {
vfree(q->buf); rxe_dbg_srq(srq, "Unable to init mmap info for caller\n");
kfree(q); goto err_free;
return err;
} }
srq->rq.queue = q;
init->attr.max_wr = srq->rq.max_wr;
if (uresp) { if (uresp) {
if (copy_to_user(&uresp->srq_num, &srq->srq_num, if (copy_to_user(&uresp->srq_num, &srq->srq_num,
sizeof(uresp->srq_num))) { sizeof(uresp->srq_num))) {
...@@ -88,6 +89,12 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq, ...@@ -88,6 +89,12 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
} }
return 0; return 0;
err_free:
vfree(q->buf);
kfree(q);
err_out:
return err;
} }
int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq, int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
...@@ -145,9 +152,10 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq, ...@@ -145,9 +152,10 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
struct ib_srq_attr *attr, enum ib_srq_attr_mask mask, struct ib_srq_attr *attr, enum ib_srq_attr_mask mask,
struct rxe_modify_srq_cmd *ucmd, struct ib_udata *udata) struct rxe_modify_srq_cmd *ucmd, struct ib_udata *udata)
{ {
int err;
struct rxe_queue *q = srq->rq.queue; struct rxe_queue *q = srq->rq.queue;
struct mminfo __user *mi = NULL; struct mminfo __user *mi = NULL;
int wqe_size;
int err;
if (mask & IB_SRQ_MAX_WR) { if (mask & IB_SRQ_MAX_WR) {
/* /*
...@@ -156,12 +164,16 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq, ...@@ -156,12 +164,16 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
*/ */
mi = u64_to_user_ptr(ucmd->mmap_info_addr); mi = u64_to_user_ptr(ucmd->mmap_info_addr);
err = rxe_queue_resize(q, &attr->max_wr, wqe_size = sizeof(struct rxe_recv_wqe) +
rcv_wqe_size(srq->rq.max_sge), udata, mi, srq->rq.max_sge*sizeof(struct ib_sge);
&srq->rq.producer_lock,
err = rxe_queue_resize(q, &attr->max_wr, wqe_size,
udata, mi, &srq->rq.producer_lock,
&srq->rq.consumer_lock); &srq->rq.consumer_lock);
if (err) if (err)
goto err2; goto err_free;
srq->rq.max_wr = attr->max_wr;
} }
if (mask & IB_SRQ_LIMIT) if (mask & IB_SRQ_LIMIT)
...@@ -169,7 +181,7 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq, ...@@ -169,7 +181,7 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
return 0; return 0;
err2: err_free:
rxe_queue_cleanup(q); rxe_queue_cleanup(q);
srq->rq.queue = NULL; srq->rq.queue = NULL;
return err; return err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment