Commit 692373d1 authored by Yunsheng Lin's avatar Yunsheng Lin Committed by Jason Gunthorpe

RDMA/rxe: cleanup some error handling in rxe_verbs.c

Instead of 'goto and return', just return directly to
simplify the error handling, and avoid some unnecessary
return value check.

Link: https://lore.kernel.org/r/20221028075053.3990467-1-xuhaoyue1@hisilicon.comSigned-off-by: default avatarYunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: default avatarHaoyue Xu <xuhaoyue1@hisilicon.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent b071850e
...@@ -238,7 +238,6 @@ static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags) ...@@ -238,7 +238,6 @@ static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr) static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
{ {
int err;
int i; int i;
u32 length; u32 length;
struct rxe_recv_wqe *recv_wqe; struct rxe_recv_wqe *recv_wqe;
...@@ -246,15 +245,11 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr) ...@@ -246,15 +245,11 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
int full; int full;
full = queue_full(rq->queue, QUEUE_TYPE_TO_DRIVER); full = queue_full(rq->queue, QUEUE_TYPE_TO_DRIVER);
if (unlikely(full)) { if (unlikely(full))
err = -ENOMEM; return -ENOMEM;
goto err1;
}
if (unlikely(num_sge > rq->max_sge)) { if (unlikely(num_sge > rq->max_sge))
err = -EINVAL; return -EINVAL;
goto err1;
}
length = 0; length = 0;
for (i = 0; i < num_sge; i++) for (i = 0; i < num_sge; i++)
...@@ -275,9 +270,6 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr) ...@@ -275,9 +270,6 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
queue_advance_producer(rq->queue, QUEUE_TYPE_TO_DRIVER); queue_advance_producer(rq->queue, QUEUE_TYPE_TO_DRIVER);
return 0; return 0;
err1:
return err;
} }
static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init, static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
...@@ -343,10 +335,7 @@ static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, ...@@ -343,10 +335,7 @@ static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
if (err) if (err)
return err; return err;
err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd, udata); return rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd, udata);
if (err)
return err;
return 0;
} }
static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
...@@ -453,11 +442,11 @@ static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -453,11 +442,11 @@ static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
err = rxe_qp_chk_attr(rxe, qp, attr, mask); err = rxe_qp_chk_attr(rxe, qp, attr, mask);
if (err) if (err)
goto err1; return err;
err = rxe_qp_from_attr(qp, attr, mask, udata); err = rxe_qp_from_attr(qp, attr, mask, udata);
if (err) if (err)
goto err1; return err;
if ((mask & IB_QP_AV) && (attr->ah_attr.ah_flags & IB_AH_GRH)) if ((mask & IB_QP_AV) && (attr->ah_attr.ah_flags & IB_AH_GRH))
qp->src_port = rdma_get_udp_sport(attr->ah_attr.grh.flow_label, qp->src_port = rdma_get_udp_sport(attr->ah_attr.grh.flow_label,
...@@ -465,9 +454,6 @@ static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -465,9 +454,6 @@ static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
qp->attr.dest_qp_num); qp->attr.dest_qp_num);
return 0; return 0;
err1:
return err;
} }
static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
...@@ -501,24 +487,21 @@ static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr, ...@@ -501,24 +487,21 @@ static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
struct rxe_sq *sq = &qp->sq; struct rxe_sq *sq = &qp->sq;
if (unlikely(num_sge > sq->max_sge)) if (unlikely(num_sge > sq->max_sge))
goto err1; return -EINVAL;
if (unlikely(mask & WR_ATOMIC_MASK)) { if (unlikely(mask & WR_ATOMIC_MASK)) {
if (length < 8) if (length < 8)
goto err1; return -EINVAL;
if (atomic_wr(ibwr)->remote_addr & 0x7) if (atomic_wr(ibwr)->remote_addr & 0x7)
goto err1; return -EINVAL;
} }
if (unlikely((ibwr->send_flags & IB_SEND_INLINE) && if (unlikely((ibwr->send_flags & IB_SEND_INLINE) &&
(length > sq->max_inline))) (length > sq->max_inline)))
goto err1; return -EINVAL;
return 0; return 0;
err1:
return -EINVAL;
} }
static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr, static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
...@@ -735,14 +718,12 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, ...@@ -735,14 +718,12 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) { if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
*bad_wr = wr; *bad_wr = wr;
err = -EINVAL; return -EINVAL;
goto err1;
} }
if (unlikely(qp->srq)) { if (unlikely(qp->srq)) {
*bad_wr = wr; *bad_wr = wr;
err = -EINVAL; return -EINVAL;
goto err1;
} }
spin_lock_irqsave(&rq->producer_lock, flags); spin_lock_irqsave(&rq->producer_lock, flags);
...@@ -761,7 +742,6 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, ...@@ -761,7 +742,6 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
if (qp->resp.state == QP_STATE_ERROR) if (qp->resp.state == QP_STATE_ERROR)
rxe_sched_task(&qp->resp.task); rxe_sched_task(&qp->resp.task);
err1:
return err; return err;
} }
...@@ -826,16 +806,9 @@ static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) ...@@ -826,16 +806,9 @@ static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
err = rxe_cq_chk_attr(rxe, cq, cqe, 0); err = rxe_cq_chk_attr(rxe, cq, cqe, 0);
if (err) if (err)
goto err1; return err;
err = rxe_cq_resize_queue(cq, cqe, uresp, udata);
if (err)
goto err1;
return 0;
err1: return rxe_cq_resize_queue(cq, cqe, uresp, udata);
return err;
} }
static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
...@@ -921,26 +894,22 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, ...@@ -921,26 +894,22 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
struct rxe_mr *mr; struct rxe_mr *mr;
mr = rxe_alloc(&rxe->mr_pool); mr = rxe_alloc(&rxe->mr_pool);
if (!mr) { if (!mr)
err = -ENOMEM; return ERR_PTR(-ENOMEM);
goto err2;
}
rxe_get(pd); rxe_get(pd);
mr->ibmr.pd = ibpd; mr->ibmr.pd = ibpd;
err = rxe_mr_init_user(rxe, start, length, iova, access, mr); err = rxe_mr_init_user(rxe, start, length, iova, access, mr);
if (err) if (err)
goto err3; goto err1;
rxe_finalize(mr); rxe_finalize(mr);
return &mr->ibmr; return &mr->ibmr;
err3: err1:
rxe_cleanup(mr); rxe_cleanup(mr);
err2:
return ERR_PTR(err); return ERR_PTR(err);
} }
...@@ -956,25 +925,22 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, ...@@ -956,25 +925,22 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
mr = rxe_alloc(&rxe->mr_pool); mr = rxe_alloc(&rxe->mr_pool);
if (!mr) { if (!mr)
err = -ENOMEM; return ERR_PTR(-ENOMEM);
goto err1;
}
rxe_get(pd); rxe_get(pd);
mr->ibmr.pd = ibpd; mr->ibmr.pd = ibpd;
err = rxe_mr_init_fast(max_num_sg, mr); err = rxe_mr_init_fast(max_num_sg, mr);
if (err) if (err)
goto err2; goto err1;
rxe_finalize(mr); rxe_finalize(mr);
return &mr->ibmr; return &mr->ibmr;
err2:
rxe_cleanup(mr);
err1: err1:
rxe_cleanup(mr);
return ERR_PTR(err); return ERR_PTR(err);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment