Commit 487d6509 authored by Yixing Liu's avatar Yixing Liu Committed by Jason Gunthorpe

RDMA/hns: Fix the gid problem caused by free mr

After the hns roce driver is loaded, if you modify the mac address of the
network port, the following error will appear:

   __ib_cache_gid_add: unable to add gid fe80:0000:0000:0000:4600:4dff:fe22:abb5 error=-28
    hns3 0000:7d:00.0 hns_0: attr path_mtu(1) invalid while modify qp

The reason for the error is that the gid being occupied will cause the
failure to modify the gid. The gid is occupied by the loopback QP used by
free mr. When the mac address is modified, the gid will change. If there
is a busy QP at this time, the gid will not be released and the
modification will fail. The QP of free mr is created using the ib
interface. The ib interface will add a reference count to the gid,
resulting in this error scenario.

Considering that free mr is solving a bug in HIP08, not an actual
business, it is not necessary to use ib interfaces.

Fixes: 70f92521 ("RDMA/hns: Use the reserved loopback QPs to free MR before destroying MPT")
Link: https://lore.kernel.org/r/20221126102911.2921820-2-xuhaoyue1@hisilicon.comSigned-off-by: default avatarYixing Liu <liuyixing1@huawei.com>
Signed-off-by: default avatarHaoyue Xu <xuhaoyue1@hisilicon.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent fb4907f4
...@@ -2634,31 +2634,124 @@ static void free_dip_list(struct hns_roce_dev *hr_dev) ...@@ -2634,31 +2634,124 @@ static void free_dip_list(struct hns_roce_dev *hr_dev)
spin_unlock_irqrestore(&hr_dev->dip_list_lock, flags); spin_unlock_irqrestore(&hr_dev->dip_list_lock, flags);
} }
static void free_mr_exit(struct hns_roce_dev *hr_dev) static struct ib_pd *free_mr_init_pd(struct hns_roce_dev *hr_dev)
{
struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_pd *hr_pd;
struct ib_pd *pd;
hr_pd = kzalloc(sizeof(*hr_pd), GFP_KERNEL);
if (ZERO_OR_NULL_PTR(hr_pd))
return NULL;
pd = &hr_pd->ibpd;
pd->device = ibdev;
if (hns_roce_alloc_pd(pd, NULL)) {
ibdev_err(ibdev, "failed to create pd for free mr.\n");
kfree(hr_pd);
return NULL;
}
free_mr->rsv_pd = to_hr_pd(pd);
free_mr->rsv_pd->ibpd.device = &hr_dev->ib_dev;
free_mr->rsv_pd->ibpd.uobject = NULL;
free_mr->rsv_pd->ibpd.__internal_mr = NULL;
atomic_set(&free_mr->rsv_pd->ibpd.usecnt, 0);
return pd;
}
static struct ib_cq *free_mr_init_cq(struct hns_roce_dev *hr_dev)
{
struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
struct ib_device *ibdev = &hr_dev->ib_dev;
struct ib_cq_init_attr cq_init_attr = {};
struct hns_roce_cq *hr_cq;
struct ib_cq *cq;
cq_init_attr.cqe = HNS_ROCE_FREE_MR_USED_CQE_NUM;
hr_cq = kzalloc(sizeof(*hr_cq), GFP_KERNEL);
if (ZERO_OR_NULL_PTR(hr_cq))
return NULL;
cq = &hr_cq->ib_cq;
cq->device = ibdev;
if (hns_roce_create_cq(cq, &cq_init_attr, NULL)) {
ibdev_err(ibdev, "failed to create cq for free mr.\n");
kfree(hr_cq);
return NULL;
}
free_mr->rsv_cq = to_hr_cq(cq);
free_mr->rsv_cq->ib_cq.device = &hr_dev->ib_dev;
free_mr->rsv_cq->ib_cq.uobject = NULL;
free_mr->rsv_cq->ib_cq.comp_handler = NULL;
free_mr->rsv_cq->ib_cq.event_handler = NULL;
free_mr->rsv_cq->ib_cq.cq_context = NULL;
atomic_set(&free_mr->rsv_cq->ib_cq.usecnt, 0);
return cq;
}
static int free_mr_init_qp(struct hns_roce_dev *hr_dev, struct ib_cq *cq,
struct ib_qp_init_attr *init_attr, int i)
{ {
struct hns_roce_v2_priv *priv = hr_dev->priv; struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hns_roce_v2_free_mr *free_mr = &priv->free_mr; struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_qp *hr_qp;
struct ib_qp *qp;
int ret; int ret;
hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
if (ZERO_OR_NULL_PTR(hr_qp))
return -ENOMEM;
qp = &hr_qp->ibqp;
qp->device = ibdev;
ret = hns_roce_create_qp(qp, init_attr, NULL);
if (ret) {
ibdev_err(ibdev, "failed to create qp for free mr.\n");
kfree(hr_qp);
return ret;
}
free_mr->rsv_qp[i] = hr_qp;
free_mr->rsv_qp[i]->ibqp.recv_cq = cq;
free_mr->rsv_qp[i]->ibqp.send_cq = cq;
return 0;
}
static void free_mr_exit(struct hns_roce_dev *hr_dev)
{
struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
struct ib_qp *qp;
int i; int i;
for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) { for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) {
if (free_mr->rsv_qp[i]) { if (free_mr->rsv_qp[i]) {
ret = ib_destroy_qp(free_mr->rsv_qp[i]); qp = &free_mr->rsv_qp[i]->ibqp;
if (ret) hns_roce_v2_destroy_qp(qp, NULL);
ibdev_err(&hr_dev->ib_dev, kfree(free_mr->rsv_qp[i]);
"failed to destroy qp in free mr.\n");
free_mr->rsv_qp[i] = NULL; free_mr->rsv_qp[i] = NULL;
} }
} }
if (free_mr->rsv_cq) { if (free_mr->rsv_cq) {
ib_destroy_cq(free_mr->rsv_cq); hns_roce_destroy_cq(&free_mr->rsv_cq->ib_cq, NULL);
kfree(free_mr->rsv_cq);
free_mr->rsv_cq = NULL; free_mr->rsv_cq = NULL;
} }
if (free_mr->rsv_pd) { if (free_mr->rsv_pd) {
ib_dealloc_pd(free_mr->rsv_pd); hns_roce_dealloc_pd(&free_mr->rsv_pd->ibpd, NULL);
kfree(free_mr->rsv_pd);
free_mr->rsv_pd = NULL; free_mr->rsv_pd = NULL;
} }
} }
...@@ -2667,55 +2760,46 @@ static int free_mr_alloc_res(struct hns_roce_dev *hr_dev) ...@@ -2667,55 +2760,46 @@ static int free_mr_alloc_res(struct hns_roce_dev *hr_dev)
{ {
struct hns_roce_v2_priv *priv = hr_dev->priv; struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hns_roce_v2_free_mr *free_mr = &priv->free_mr; struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
struct ib_device *ibdev = &hr_dev->ib_dev;
struct ib_cq_init_attr cq_init_attr = {};
struct ib_qp_init_attr qp_init_attr = {}; struct ib_qp_init_attr qp_init_attr = {};
struct ib_pd *pd; struct ib_pd *pd;
struct ib_cq *cq; struct ib_cq *cq;
struct ib_qp *qp;
int ret; int ret;
int i; int i;
pd = ib_alloc_pd(ibdev, 0); pd = free_mr_init_pd(hr_dev);
if (IS_ERR(pd)) { if (!pd)
ibdev_err(ibdev, "failed to create pd for free mr.\n"); return -ENOMEM;
return PTR_ERR(pd);
}
free_mr->rsv_pd = pd;
cq_init_attr.cqe = HNS_ROCE_FREE_MR_USED_CQE_NUM; cq = free_mr_init_cq(hr_dev);
cq = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_init_attr); if (!cq) {
if (IS_ERR(cq)) { ret = -ENOMEM;
ibdev_err(ibdev, "failed to create cq for free mr.\n"); goto create_failed_cq;
ret = PTR_ERR(cq);
goto create_failed;
} }
free_mr->rsv_cq = cq;
qp_init_attr.qp_type = IB_QPT_RC; qp_init_attr.qp_type = IB_QPT_RC;
qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
qp_init_attr.send_cq = free_mr->rsv_cq; qp_init_attr.send_cq = cq;
qp_init_attr.recv_cq = free_mr->rsv_cq; qp_init_attr.recv_cq = cq;
for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) { for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) {
qp_init_attr.cap.max_send_wr = HNS_ROCE_FREE_MR_USED_SQWQE_NUM; qp_init_attr.cap.max_send_wr = HNS_ROCE_FREE_MR_USED_SQWQE_NUM;
qp_init_attr.cap.max_send_sge = HNS_ROCE_FREE_MR_USED_SQSGE_NUM; qp_init_attr.cap.max_send_sge = HNS_ROCE_FREE_MR_USED_SQSGE_NUM;
qp_init_attr.cap.max_recv_wr = HNS_ROCE_FREE_MR_USED_RQWQE_NUM; qp_init_attr.cap.max_recv_wr = HNS_ROCE_FREE_MR_USED_RQWQE_NUM;
qp_init_attr.cap.max_recv_sge = HNS_ROCE_FREE_MR_USED_RQSGE_NUM; qp_init_attr.cap.max_recv_sge = HNS_ROCE_FREE_MR_USED_RQSGE_NUM;
qp = ib_create_qp(free_mr->rsv_pd, &qp_init_attr); ret = free_mr_init_qp(hr_dev, cq, &qp_init_attr, i);
if (IS_ERR(qp)) { if (ret)
ibdev_err(ibdev, "failed to create qp for free mr.\n"); goto create_failed_qp;
ret = PTR_ERR(qp);
goto create_failed;
}
free_mr->rsv_qp[i] = qp;
} }
return 0; return 0;
create_failed: create_failed_qp:
free_mr_exit(hr_dev); hns_roce_destroy_cq(cq, NULL);
kfree(cq);
create_failed_cq:
hns_roce_dealloc_pd(pd, NULL);
kfree(pd);
return ret; return ret;
} }
...@@ -2731,14 +2815,17 @@ static int free_mr_modify_rsv_qp(struct hns_roce_dev *hr_dev, ...@@ -2731,14 +2815,17 @@ static int free_mr_modify_rsv_qp(struct hns_roce_dev *hr_dev,
int mask; int mask;
int ret; int ret;
hr_qp = to_hr_qp(free_mr->rsv_qp[sl_num]); hr_qp = to_hr_qp(&free_mr->rsv_qp[sl_num]->ibqp);
hr_qp->free_mr_en = 1; hr_qp->free_mr_en = 1;
hr_qp->ibqp.device = ibdev;
hr_qp->ibqp.qp_type = IB_QPT_RC;
mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS; mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS;
attr->qp_state = IB_QPS_INIT; attr->qp_state = IB_QPS_INIT;
attr->port_num = 1; attr->port_num = 1;
attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE; attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
ret = ib_modify_qp(&hr_qp->ibqp, attr, mask); ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, attr, mask, IB_QPS_INIT,
IB_QPS_INIT);
if (ret) { if (ret) {
ibdev_err(ibdev, "failed to modify qp to init, ret = %d.\n", ibdev_err(ibdev, "failed to modify qp to init, ret = %d.\n",
ret); ret);
...@@ -2759,7 +2846,8 @@ static int free_mr_modify_rsv_qp(struct hns_roce_dev *hr_dev, ...@@ -2759,7 +2846,8 @@ static int free_mr_modify_rsv_qp(struct hns_roce_dev *hr_dev,
rdma_ah_set_sl(&attr->ah_attr, (u8)sl_num); rdma_ah_set_sl(&attr->ah_attr, (u8)sl_num);
ret = ib_modify_qp(&hr_qp->ibqp, attr, mask); ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, attr, mask, IB_QPS_INIT,
IB_QPS_RTR);
hr_dev->loop_idc = loopback; hr_dev->loop_idc = loopback;
if (ret) { if (ret) {
ibdev_err(ibdev, "failed to modify qp to rtr, ret = %d.\n", ibdev_err(ibdev, "failed to modify qp to rtr, ret = %d.\n",
...@@ -2773,7 +2861,8 @@ static int free_mr_modify_rsv_qp(struct hns_roce_dev *hr_dev, ...@@ -2773,7 +2861,8 @@ static int free_mr_modify_rsv_qp(struct hns_roce_dev *hr_dev,
attr->sq_psn = HNS_ROCE_FREE_MR_USED_PSN; attr->sq_psn = HNS_ROCE_FREE_MR_USED_PSN;
attr->retry_cnt = HNS_ROCE_FREE_MR_USED_QP_RETRY_CNT; attr->retry_cnt = HNS_ROCE_FREE_MR_USED_QP_RETRY_CNT;
attr->timeout = HNS_ROCE_FREE_MR_USED_QP_TIMEOUT; attr->timeout = HNS_ROCE_FREE_MR_USED_QP_TIMEOUT;
ret = ib_modify_qp(&hr_qp->ibqp, attr, mask); ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, attr, mask, IB_QPS_RTR,
IB_QPS_RTS);
if (ret) if (ret)
ibdev_err(ibdev, "failed to modify qp to rts, ret = %d.\n", ibdev_err(ibdev, "failed to modify qp to rts, ret = %d.\n",
ret); ret);
...@@ -3416,7 +3505,7 @@ static void free_mr_send_cmd_to_hw(struct hns_roce_dev *hr_dev) ...@@ -3416,7 +3505,7 @@ static void free_mr_send_cmd_to_hw(struct hns_roce_dev *hr_dev)
mutex_lock(&free_mr->mutex); mutex_lock(&free_mr->mutex);
for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) { for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) {
hr_qp = to_hr_qp(free_mr->rsv_qp[i]); hr_qp = free_mr->rsv_qp[i];
ret = free_mr_post_send_lp_wqe(hr_qp); ret = free_mr_post_send_lp_wqe(hr_qp);
if (ret) { if (ret) {
...@@ -3431,7 +3520,7 @@ static void free_mr_send_cmd_to_hw(struct hns_roce_dev *hr_dev) ...@@ -3431,7 +3520,7 @@ static void free_mr_send_cmd_to_hw(struct hns_roce_dev *hr_dev)
end = msecs_to_jiffies(HNS_ROCE_V2_FREE_MR_TIMEOUT) + jiffies; end = msecs_to_jiffies(HNS_ROCE_V2_FREE_MR_TIMEOUT) + jiffies;
while (cqe_cnt) { while (cqe_cnt) {
npolled = hns_roce_v2_poll_cq(free_mr->rsv_cq, cqe_cnt, wc); npolled = hns_roce_v2_poll_cq(&free_mr->rsv_cq->ib_cq, cqe_cnt, wc);
if (npolled < 0) { if (npolled < 0) {
ibdev_err(ibdev, ibdev_err(ibdev,
"failed to poll cqe for free mr, remain %d cqe.\n", "failed to poll cqe for free mr, remain %d cqe.\n",
...@@ -5474,7 +5563,7 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev, ...@@ -5474,7 +5563,7 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
return ret; return ret;
} }
static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
......
...@@ -1329,9 +1329,9 @@ struct hns_roce_link_table { ...@@ -1329,9 +1329,9 @@ struct hns_roce_link_table {
#define HNS_ROCE_EXT_LLM_MIN_PAGES(que_num) ((que_num) * 4 + 2) #define HNS_ROCE_EXT_LLM_MIN_PAGES(que_num) ((que_num) * 4 + 2)
struct hns_roce_v2_free_mr { struct hns_roce_v2_free_mr {
struct ib_qp *rsv_qp[HNS_ROCE_FREE_MR_USED_QP_NUM]; struct hns_roce_qp *rsv_qp[HNS_ROCE_FREE_MR_USED_QP_NUM];
struct ib_cq *rsv_cq; struct hns_roce_cq *rsv_cq;
struct ib_pd *rsv_pd; struct hns_roce_pd *rsv_pd;
struct mutex mutex; struct mutex mutex;
}; };
...@@ -1461,6 +1461,8 @@ struct hns_roce_sccc_clr_done { ...@@ -1461,6 +1461,8 @@ struct hns_roce_sccc_clr_done {
__le32 rsv[5]; __le32 rsv[5];
}; };
int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
static inline void hns_roce_write64(struct hns_roce_dev *hr_dev, __le32 val[2], static inline void hns_roce_write64(struct hns_roce_dev *hr_dev, __le32 val[2],
void __iomem *dest) void __iomem *dest)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment