Commit e365b26c authored by Xi Wang's avatar Xi Wang Committed by Jason Gunthorpe

RDMA/hns: Optimize qp destroy flow

Wrap the duplicate code in hip08 and hip06 qp destruction process as
hns_roce_qp_destroy() to simply the qp destroy flow.

Link: https://lore.kernel.org/r/1582526258-13825-2-git-send-email-liweihang@huawei.comSigned-off-by: default avatarXi Wang <wangxi11@huawei.com>
Signed-off-by: default avatarWeihang Li <liweihang@huawei.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 75c994e6
......@@ -1250,9 +1250,8 @@ void hns_roce_lock_cqs(struct hns_roce_cq *send_cq,
void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
struct hns_roce_cq *recv_cq);
void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
int cnt);
void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
struct ib_udata *udata);
__be32 send_ieth(const struct ib_send_wr *wr);
int to_hr_qp_type(int qp_type);
......
......@@ -3618,26 +3618,11 @@ int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
if (send_cq && send_cq != recv_cq)
__hns_roce_v1_cq_clean(send_cq, hr_qp->qpn, NULL);
}
hns_roce_unlock_cqs(send_cq, recv_cq);
hns_roce_qp_remove(hr_dev, hr_qp);
hns_roce_qp_free(hr_dev, hr_qp);
/* RC QP, release QPN */
if (hr_qp->ibqp.qp_type == IB_QPT_RC)
hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
ib_umem_release(hr_qp->umem);
if (!udata) {
kfree(hr_qp->sq.wrid);
kfree(hr_qp->rq.wrid);
hns_roce_unlock_cqs(send_cq, recv_cq);
hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
}
hns_roce_qp_destroy(hr_dev, hr_qp, udata);
kfree(hr_qp);
return 0;
}
......
......@@ -5040,43 +5040,6 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
hns_roce_unlock_cqs(send_cq, recv_cq);
spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
hns_roce_qp_free(hr_dev, hr_qp);
/* Not special_QP, free their QPN */
if ((hr_qp->ibqp.qp_type == IB_QPT_RC) ||
(hr_qp->ibqp.qp_type == IB_QPT_UC) ||
(hr_qp->ibqp.qp_type == IB_QPT_UD))
hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr);
if (udata) {
struct hns_roce_ucontext *context =
rdma_udata_to_drv_context(
udata,
struct hns_roce_ucontext,
ibucontext);
if (hr_qp->sq.wqe_cnt && (hr_qp->sdb_en == 1))
hns_roce_db_unmap_user(context, &hr_qp->sdb);
if (hr_qp->rq.wqe_cnt && (hr_qp->rdb_en == 1))
hns_roce_db_unmap_user(context, &hr_qp->rdb);
} else {
kfree(hr_qp->sq.wrid);
kfree(hr_qp->rq.wrid);
hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
if (hr_qp->rq.wqe_cnt)
hns_roce_free_db(hr_dev, &hr_qp->rdb);
}
ib_umem_release(hr_qp->umem);
if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
hr_qp->rq.wqe_cnt) {
kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
kfree(hr_qp->rq_inl_buf.wqe_list);
}
return ret;
}
......@@ -5091,7 +5054,7 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
ibdev_err(&hr_dev->ib_dev, "Destroy qp 0x%06lx failed(%d)\n",
hr_qp->qpn, ret);
kfree(hr_qp);
hns_roce_qp_destroy(hr_dev, hr_qp, udata);
return 0;
}
......
......@@ -1087,6 +1087,47 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
return ret;
}
void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
struct ib_udata *udata)
{
hns_roce_qp_free(hr_dev, hr_qp);
/* Not special_QP, free their QPN */
if (hr_qp->ibqp.qp_type != IB_QPT_GSI)
hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr);
if (udata) {
struct hns_roce_ucontext *context =
rdma_udata_to_drv_context(
udata,
struct hns_roce_ucontext,
ibucontext);
if (hr_qp->sq.wqe_cnt && (hr_qp->sdb_en == 1))
hns_roce_db_unmap_user(context, &hr_qp->sdb);
if (hr_qp->rq.wqe_cnt && (hr_qp->rdb_en == 1))
hns_roce_db_unmap_user(context, &hr_qp->rdb);
} else {
kfree(hr_qp->sq.wrid);
kfree(hr_qp->rq.wrid);
hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
if (hr_qp->rq.wqe_cnt)
hns_roce_free_db(hr_dev, &hr_qp->rdb);
}
ib_umem_release(hr_qp->umem);
if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
hr_qp->rq.wqe_cnt) {
kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
kfree(hr_qp->rq_inl_buf.wqe_list);
}
kfree(hr_qp);
}
struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment