Commit 926a01dc authored by Wei Hu(Xavier)'s avatar Wei Hu(Xavier) Committed by Doug Ledford

RDMA/hns: Add QP operations support for hip08 SoC

This patch implements QP operations for hip08 RoCE driver and
fixes some checkpatch warning about print message in QP function.
The QP operations includes create QP, query QP, modify QP and
destroy QP.
Signed-off-by: default avatarLijun Ou <oulijun@huawei.com>
Signed-off-by: default avatarShaobo Xu <xushaobo2@huawei.com>
Signed-off-by: default avatarWei Hu (Xavier) <xavier.huwei@huawei.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 93aa2187
...@@ -48,6 +48,10 @@ enum { ...@@ -48,6 +48,10 @@ enum {
HNS_ROCE_CMD_DESTROY_QPC_BT1 = 0x9, HNS_ROCE_CMD_DESTROY_QPC_BT1 = 0x9,
HNS_ROCE_CMD_DESTROY_QPC_BT2 = 0xa, HNS_ROCE_CMD_DESTROY_QPC_BT2 = 0xa,
/* QPC operation */
HNS_ROCE_CMD_MODIFY_QPC = 0x41,
HNS_ROCE_CMD_QUERY_QPC = 0x42,
/* CQC BT commands */ /* CQC BT commands */
HNS_ROCE_CMD_WRITE_CQC_BT0 = 0x10, HNS_ROCE_CMD_WRITE_CQC_BT0 = 0x10,
HNS_ROCE_CMD_WRITE_CQC_BT1 = 0x11, HNS_ROCE_CMD_WRITE_CQC_BT1 = 0x11,
......
...@@ -304,6 +304,12 @@ struct hns_roce_wq { ...@@ -304,6 +304,12 @@ struct hns_roce_wq {
void __iomem *db_reg_l; void __iomem *db_reg_l;
}; };
struct hns_roce_sge {
int sge_cnt; /* SGE num */
int offset;
int sge_shift;/* SGE size */
};
struct hns_roce_buf_list { struct hns_roce_buf_list {
void *buf; void *buf;
dma_addr_t map; dma_addr_t map;
...@@ -455,6 +461,9 @@ struct hns_roce_qp { ...@@ -455,6 +461,9 @@ struct hns_roce_qp {
atomic_t refcount; atomic_t refcount;
struct completion free; struct completion free;
struct hns_roce_sge sge;
u32 next_sge;
}; };
struct hns_roce_sqp { struct hns_roce_sqp {
...@@ -509,6 +518,7 @@ struct hns_roce_caps { ...@@ -509,6 +518,7 @@ struct hns_roce_caps {
int num_cqs; int num_cqs;
int max_cqes; int max_cqes;
int min_cqes; int min_cqes;
u32 min_wqes;
int reserved_cqs; int reserved_cqs;
int num_aeq_vectors; /* 1 */ int num_aeq_vectors; /* 1 */
int num_comp_vectors; /* 32 ceq */ int num_comp_vectors; /* 32 ceq */
...@@ -788,6 +798,7 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -788,6 +798,7 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata); int attr_mask, struct ib_udata *udata);
void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n); void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n);
void *get_send_wqe(struct hns_roce_qp *hr_qp, int n); void *get_send_wqe(struct hns_roce_qp *hr_qp, int n);
void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n);
bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq, bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
struct ib_cq *ib_cq); struct ib_cq *ib_cq);
enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state); enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state);
......
...@@ -1475,6 +1475,7 @@ int hns_roce_v1_profile(struct hns_roce_dev *hr_dev) ...@@ -1475,6 +1475,7 @@ int hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
caps->num_qps = HNS_ROCE_V1_MAX_QP_NUM; caps->num_qps = HNS_ROCE_V1_MAX_QP_NUM;
caps->max_wqes = HNS_ROCE_V1_MAX_WQE_NUM; caps->max_wqes = HNS_ROCE_V1_MAX_WQE_NUM;
caps->min_wqes = HNS_ROCE_MIN_WQE_NUM;
caps->num_cqs = HNS_ROCE_V1_MAX_CQ_NUM; caps->num_cqs = HNS_ROCE_V1_MAX_CQ_NUM;
caps->min_cqes = HNS_ROCE_MIN_CQE_NUM; caps->min_cqes = HNS_ROCE_MIN_CQE_NUM;
caps->max_cqes = HNS_ROCE_V1_MAX_CQE_NUM; caps->max_cqes = HNS_ROCE_V1_MAX_CQE_NUM;
......
This diff is collapsed.
This diff is collapsed.
...@@ -286,20 +286,27 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev, ...@@ -286,20 +286,27 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
return -EINVAL; return -EINVAL;
} }
/* In v1 engine, parameter verification procession */ if (hr_dev->caps.min_wqes)
max_cnt = cap->max_recv_wr > HNS_ROCE_MIN_WQE_NUM ? max_cnt = max(cap->max_recv_wr, hr_dev->caps.min_wqes);
cap->max_recv_wr : HNS_ROCE_MIN_WQE_NUM; else
max_cnt = cap->max_recv_wr;
hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt); hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt);
if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) { if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) {
dev_err(dev, "hns_roce_set_rq_size rq.wqe_cnt too large\n"); dev_err(dev, "while setting rq size, rq.wqe_cnt too large\n");
return -EINVAL; return -EINVAL;
} }
max_cnt = max(1U, cap->max_recv_sge); max_cnt = max(1U, cap->max_recv_sge);
hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt); hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt);
/* WQE is fixed for 64B */ if (hr_dev->caps.max_rq_sg <= 2)
hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz); hr_qp->rq.wqe_shift =
ilog2(hr_dev->caps.max_rq_desc_sz);
else
hr_qp->rq.wqe_shift =
ilog2(hr_dev->caps.max_rq_desc_sz
* hr_qp->rq.max_gs);
} }
cap->max_recv_wr = hr_qp->rq.max_post = hr_qp->rq.wqe_cnt; cap->max_recv_wr = hr_qp->rq.max_post = hr_qp->rq.wqe_cnt;
...@@ -309,11 +316,13 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev, ...@@ -309,11 +316,13 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
} }
static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
struct ib_qp_cap *cap,
struct hns_roce_qp *hr_qp, struct hns_roce_qp *hr_qp,
struct hns_roce_ib_create_qp *ucmd) struct hns_roce_ib_create_qp *ucmd)
{ {
u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz); u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz);
u8 max_sq_stride = ilog2(roundup_sq_stride); u8 max_sq_stride = ilog2(roundup_sq_stride);
u32 max_cnt;
/* Sanity check SQ size before proceeding */ /* Sanity check SQ size before proceeding */
if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes || if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes ||
...@@ -323,18 +332,61 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, ...@@ -323,18 +332,61 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
return -EINVAL; return -EINVAL;
} }
if (cap->max_send_sge > hr_dev->caps.max_sq_sg) {
dev_err(hr_dev->dev, "SQ sge error! max_send_sge=%d\n",
cap->max_send_sge);
return -EINVAL;
}
hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count; hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count;
hr_qp->sq.wqe_shift = ucmd->log_sq_stride; hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
max_cnt = max(1U, cap->max_send_sge);
if (hr_dev->caps.max_sq_sg <= 2)
hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
else
hr_qp->sq.max_gs = max_cnt;
if (hr_qp->sq.max_gs > 2)
hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
(hr_qp->sq.max_gs - 2));
hr_qp->sge.sge_shift = 4;
/* Get buf size, SQ and RQ are aligned to page_szie */ /* Get buf size, SQ and RQ are aligned to page_szie */
hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << if (hr_dev->caps.max_sq_sg <= 2) {
hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
hr_qp->rq.wqe_shift), PAGE_SIZE) + hr_qp->rq.wqe_shift), PAGE_SIZE) +
HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt << HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift), PAGE_SIZE); hr_qp->sq.wqe_shift), PAGE_SIZE);
hr_qp->sq.offset = 0; hr_qp->sq.offset = 0;
hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt << hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift), PAGE_SIZE); hr_qp->sq.wqe_shift), PAGE_SIZE);
} else {
hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
hr_qp->rq.wqe_shift), PAGE_SIZE) +
HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
hr_qp->sge.sge_shift), PAGE_SIZE) +
HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift), PAGE_SIZE);
hr_qp->sq.offset = 0;
if (hr_qp->sge.sge_cnt) {
hr_qp->sge.offset = HNS_ROCE_ALOGN_UP(
(hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift),
PAGE_SIZE);
hr_qp->rq.offset = hr_qp->sge.offset +
HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
hr_qp->sge.sge_shift),
PAGE_SIZE);
} else {
hr_qp->rq.offset = HNS_ROCE_ALOGN_UP(
(hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift),
PAGE_SIZE);
}
}
return 0; return 0;
} }
...@@ -345,11 +397,12 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, ...@@ -345,11 +397,12 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
{ {
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
u32 max_cnt; u32 max_cnt;
int size;
if (cap->max_send_wr > hr_dev->caps.max_wqes || if (cap->max_send_wr > hr_dev->caps.max_wqes ||
cap->max_send_sge > hr_dev->caps.max_sq_sg || cap->max_send_sge > hr_dev->caps.max_sq_sg ||
cap->max_inline_data > hr_dev->caps.max_sq_inline) { cap->max_inline_data > hr_dev->caps.max_sq_inline) {
dev_err(dev, "hns_roce_set_kernel_sq_size error1\n"); dev_err(dev, "SQ WR or sge or inline data error!\n");
return -EINVAL; return -EINVAL;
} }
...@@ -357,27 +410,45 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, ...@@ -357,27 +410,45 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
hr_qp->sq_max_wqes_per_wr = 1; hr_qp->sq_max_wqes_per_wr = 1;
hr_qp->sq_spare_wqes = 0; hr_qp->sq_spare_wqes = 0;
/* In v1 engine, parameter verification procession */ if (hr_dev->caps.min_wqes)
max_cnt = cap->max_send_wr > HNS_ROCE_MIN_WQE_NUM ? max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes);
cap->max_send_wr : HNS_ROCE_MIN_WQE_NUM; else
max_cnt = cap->max_send_wr;
hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt); hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt);
if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) { if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) {
dev_err(dev, "hns_roce_set_kernel_sq_size sq.wqe_cnt too large\n"); dev_err(dev, "while setting kernel sq size, sq.wqe_cnt too large\n");
return -EINVAL; return -EINVAL;
} }
/* Get data_seg numbers */ /* Get data_seg numbers */
max_cnt = max(1U, cap->max_send_sge); max_cnt = max(1U, cap->max_send_sge);
hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt); if (hr_dev->caps.max_sq_sg <= 2)
hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
else
hr_qp->sq.max_gs = max_cnt;
/* Get buf size, SQ and RQ are aligned to page_szie */ if (hr_qp->sq.max_gs > 2) {
hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
hr_qp->rq.wqe_shift), PAGE_SIZE) + (hr_qp->sq.max_gs - 2));
HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt << hr_qp->sge.sge_shift = 4;
hr_qp->sq.wqe_shift), PAGE_SIZE); }
/* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
hr_qp->sq.offset = 0; hr_qp->sq.offset = 0;
hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt << size = HNS_ROCE_ALOGN_UP(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift,
hr_qp->sq.wqe_shift), PAGE_SIZE); PAGE_SIZE);
if (hr_dev->caps.max_sq_sg > 2 && hr_qp->sge.sge_cnt) {
hr_qp->sge.offset = size;
size += HNS_ROCE_ALOGN_UP(hr_qp->sge.sge_cnt <<
hr_qp->sge.sge_shift, PAGE_SIZE);
}
hr_qp->rq.offset = size;
size += HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift),
PAGE_SIZE);
hr_qp->buff_size = size;
/* Get wr and sge number which send */ /* Get wr and sge number which send */
cap->max_send_wr = hr_qp->sq.max_post = hr_qp->sq.wqe_cnt; cap->max_send_wr = hr_qp->sq.max_post = hr_qp->sq.wqe_cnt;
...@@ -425,7 +496,8 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, ...@@ -425,7 +496,8 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
goto err_out; goto err_out;
} }
ret = hns_roce_set_user_sq_size(hr_dev, hr_qp, &ucmd); ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp,
&ucmd);
if (ret) { if (ret) {
dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n"); dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n");
goto err_out; goto err_out;
...@@ -528,7 +600,9 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, ...@@ -528,7 +600,9 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
} }
} }
if ((init_attr->qp_type) == IB_QPT_GSI) { if (init_attr->qp_type == IB_QPT_GSI &&
hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
/* In v1 engine, GSI QP context in RoCE engine's register */
ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp); ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp);
if (ret) { if (ret) {
dev_err(dev, "hns_roce_qp_alloc failed!\n"); dev_err(dev, "hns_roce_qp_alloc failed!\n");
...@@ -700,7 +774,10 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -700,7 +774,10 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu); active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
if (attr->path_mtu > IB_MTU_2048 || if ((hr_dev->caps.max_mtu == IB_MTU_4096 &&
attr->path_mtu > IB_MTU_4096) ||
(hr_dev->caps.max_mtu == IB_MTU_2048 &&
attr->path_mtu > IB_MTU_2048) ||
attr->path_mtu < IB_MTU_256 || attr->path_mtu < IB_MTU_256 ||
attr->path_mtu > active_mtu) { attr->path_mtu > active_mtu) {
dev_err(dev, "attr path_mtu(%d)invalid while modify qp", dev_err(dev, "attr path_mtu(%d)invalid while modify qp",
...@@ -724,9 +801,7 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -724,9 +801,7 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
} }
if (cur_state == new_state && cur_state == IB_QPS_RESET) { if (cur_state == new_state && cur_state == IB_QPS_RESET) {
ret = -EPERM; ret = 0;
dev_err(dev, "cur_state=%d new_state=%d\n", cur_state,
new_state);
goto out; goto out;
} }
...@@ -804,6 +879,13 @@ void *get_send_wqe(struct hns_roce_qp *hr_qp, int n) ...@@ -804,6 +879,13 @@ void *get_send_wqe(struct hns_roce_qp *hr_qp, int n)
} }
EXPORT_SYMBOL_GPL(get_send_wqe); EXPORT_SYMBOL_GPL(get_send_wqe);
void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n)
{
return hns_roce_buf_offset(&hr_qp->hr_buf, hr_qp->sge.offset +
(n << hr_qp->sge.sge_shift));
}
EXPORT_SYMBOL_GPL(get_send_extend_sge);
bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq, bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
struct ib_cq *ib_cq) struct ib_cq *ib_cq)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment