Commit aa84fa18 authored by Yangyang Li's avatar Yangyang Li Committed by Jason Gunthorpe

RDMA/hns: Add SCC context clr support for hip08

This patch adds SCC context clear support for DCQCN in kernel space
driver.
Signed-off-by: default avatarYangyang Li <liyangyang20@huawei.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 6a157f7d
...@@ -202,6 +202,7 @@ enum { ...@@ -202,6 +202,7 @@ enum {
HNS_ROCE_CAP_FLAG_SRQ = BIT(5), HNS_ROCE_CAP_FLAG_SRQ = BIT(5),
HNS_ROCE_CAP_FLAG_MW = BIT(7), HNS_ROCE_CAP_FLAG_MW = BIT(7),
HNS_ROCE_CAP_FLAG_FRMR = BIT(8), HNS_ROCE_CAP_FLAG_FRMR = BIT(8),
HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL = BIT(9),
HNS_ROCE_CAP_FLAG_ATOMIC = BIT(10), HNS_ROCE_CAP_FLAG_ATOMIC = BIT(10),
}; };
...@@ -483,6 +484,7 @@ struct hns_roce_qp_table { ...@@ -483,6 +484,7 @@ struct hns_roce_qp_table {
struct hns_roce_hem_table irrl_table; struct hns_roce_hem_table irrl_table;
struct hns_roce_hem_table trrl_table; struct hns_roce_hem_table trrl_table;
struct hns_roce_hem_table sccc_table; struct hns_roce_hem_table sccc_table;
struct mutex scc_mutex;
}; };
struct hns_roce_cq_table { struct hns_roce_cq_table {
...@@ -867,6 +869,8 @@ struct hns_roce_hw { ...@@ -867,6 +869,8 @@ struct hns_roce_hw {
int attr_mask, enum ib_qp_state cur_state, int attr_mask, enum ib_qp_state cur_state,
enum ib_qp_state new_state); enum ib_qp_state new_state);
int (*destroy_qp)(struct ib_qp *ibqp); int (*destroy_qp)(struct ib_qp *ibqp);
int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp);
int (*post_send)(struct ib_qp *ibqp, const struct ib_send_wr *wr, int (*post_send)(struct ib_qp *ibqp, const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr); const struct ib_send_wr **bad_wr);
int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr, int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
......
...@@ -1436,7 +1436,9 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) ...@@ -1436,7 +1436,9 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
if (hr_dev->pci_dev->revision == 0x21) { if (hr_dev->pci_dev->revision == 0x21) {
caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC | caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC |
HNS_ROCE_CAP_FLAG_SRQ; HNS_ROCE_CAP_FLAG_SRQ |
HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL;
caps->sccc_entry_sz = HNS_ROCE_V2_SCCC_ENTRY_SZ; caps->sccc_entry_sz = HNS_ROCE_V2_SCCC_ENTRY_SZ;
caps->sccc_ba_pg_sz = 0; caps->sccc_ba_pg_sz = 0;
caps->sccc_buf_pg_sz = 0; caps->sccc_buf_pg_sz = 0;
...@@ -4277,6 +4279,60 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp) ...@@ -4277,6 +4279,60 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp)
return 0; return 0;
} }
static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp)
{
struct hns_roce_sccc_clr_done *rst, *resp;
struct hns_roce_sccc_clr *clr;
struct hns_roce_cmq_desc desc;
int ret, i;
mutex_lock(&hr_dev->qp_table.scc_mutex);
/* set scc ctx clear done flag */
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false);
rst = (struct hns_roce_sccc_clr_done *)desc.data;
ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret) {
dev_err(hr_dev->dev, "Reset SCC ctx failed(%d)\n", ret);
goto out;
}
/* clear scc context */
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLR_SCCC, false);
clr = (struct hns_roce_sccc_clr *)desc.data;
clr->qpn = cpu_to_le32(hr_qp->qpn);
ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret) {
dev_err(hr_dev->dev, "Clear SCC ctx failed(%d)\n", ret);
goto out;
}
/* query scc context clear is done or not */
resp = (struct hns_roce_sccc_clr_done *)desc.data;
for (i = 0; i <= HNS_ROCE_CMQ_SCC_CLR_DONE_CNT; i++) {
hns_roce_cmq_setup_basic_desc(&desc,
HNS_ROCE_OPC_QUERY_SCCC, true);
ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret) {
dev_err(hr_dev->dev, "Query clr cmq failed(%d)\n", ret);
goto out;
}
if (resp->clr_done)
goto out;
msleep(20);
}
dev_err(hr_dev->dev, "Query SCC clr done flag overtime.\n");
ret = -ETIMEDOUT;
out:
mutex_unlock(&hr_dev->qp_table.scc_mutex);
return ret;
}
static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(cq->device); struct hns_roce_dev *hr_dev = to_hr_dev(cq->device);
...@@ -5835,6 +5891,7 @@ static const struct hns_roce_hw hns_roce_hw_v2 = { ...@@ -5835,6 +5891,7 @@ static const struct hns_roce_hw hns_roce_hw_v2 = {
.modify_qp = hns_roce_v2_modify_qp, .modify_qp = hns_roce_v2_modify_qp,
.query_qp = hns_roce_v2_query_qp, .query_qp = hns_roce_v2_query_qp,
.destroy_qp = hns_roce_v2_destroy_qp, .destroy_qp = hns_roce_v2_destroy_qp,
.qp_flow_control_init = hns_roce_v2_qp_flow_control_init,
.modify_cq = hns_roce_v2_modify_cq, .modify_cq = hns_roce_v2_modify_cq,
.post_send = hns_roce_v2_post_send, .post_send = hns_roce_v2_post_send,
.post_recv = hns_roce_v2_post_recv, .post_recv = hns_roce_v2_post_recv,
......
...@@ -123,6 +123,8 @@ ...@@ -123,6 +123,8 @@
#define HNS_ROCE_CMQ_EN_B 16 #define HNS_ROCE_CMQ_EN_B 16
#define HNS_ROCE_CMQ_ENABLE BIT(HNS_ROCE_CMQ_EN_B) #define HNS_ROCE_CMQ_ENABLE BIT(HNS_ROCE_CMQ_EN_B)
#define HNS_ROCE_CMQ_SCC_CLR_DONE_CNT 5
#define check_whether_last_step(hop_num, step_idx) \ #define check_whether_last_step(hop_num, step_idx) \
((step_idx == 0 && hop_num == HNS_ROCE_HOP_NUM_0) || \ ((step_idx == 0 && hop_num == HNS_ROCE_HOP_NUM_0) || \
(step_idx == 1 && hop_num == 1) || \ (step_idx == 1 && hop_num == 1) || \
...@@ -232,6 +234,9 @@ enum hns_roce_opcode_type { ...@@ -232,6 +234,9 @@ enum hns_roce_opcode_type {
HNS_ROCE_OPC_POST_MB = 0x8504, HNS_ROCE_OPC_POST_MB = 0x8504,
HNS_ROCE_OPC_QUERY_MB_ST = 0x8505, HNS_ROCE_OPC_QUERY_MB_ST = 0x8505,
HNS_ROCE_OPC_CFG_BT_ATTR = 0x8506, HNS_ROCE_OPC_CFG_BT_ATTR = 0x8506,
HNS_ROCE_OPC_CLR_SCCC = 0x8509,
HNS_ROCE_OPC_QUERY_SCCC = 0x850a,
HNS_ROCE_OPC_RESET_SCCC = 0x850b,
HNS_SWITCH_PARAMETER_CFG = 0x1033, HNS_SWITCH_PARAMETER_CFG = 0x1033,
}; };
...@@ -1757,4 +1762,14 @@ struct hns_roce_wqe_atomic_seg { ...@@ -1757,4 +1762,14 @@ struct hns_roce_wqe_atomic_seg {
__le64 cmp_data; __le64 cmp_data;
}; };
struct hns_roce_sccc_clr {
__le32 qpn;
__le32 rsv[5];
};
struct hns_roce_sccc_clr_done {
__le32 clr_done;
__le32 rsv[5];
};
#endif #endif
...@@ -811,6 +811,13 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, ...@@ -811,6 +811,13 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
if (ret) if (ret)
goto err_qp; goto err_qp;
} }
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp);
if (ret)
goto err_qp;
}
hr_qp->event = hns_roce_ib_qp_event; hr_qp->event = hns_roce_ib_qp_event;
return 0; return 0;
...@@ -1152,6 +1159,7 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev) ...@@ -1152,6 +1159,7 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
int reserved_from_bot; int reserved_from_bot;
int ret; int ret;
mutex_init(&qp_table->scc_mutex);
spin_lock_init(&qp_table->lock); spin_lock_init(&qp_table->lock);
INIT_RADIX_TREE(&hr_dev->qp_table_tree, GFP_ATOMIC); INIT_RADIX_TREE(&hr_dev->qp_table_tree, GFP_ATOMIC);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment