Commit fe15bcc6 authored by Jason Gunthorpe's avatar Jason Gunthorpe

Merge branch 'mlx5-packet-credit-fc' into rdma.git

Danit Goldberg says:

Packet based credit mode

Packet based credit mode is an alternative end-to-end credit mode for QPs
set during their creation. Credits are transported from the responder to
the requester to optimize the use of its receive resources.  In
packet-based credit mode, credits are issued on a per packet basis.

The advantage of this feature comes while sending large RDMA messages
through switches that are short in memory.

The first commit exposes QP creation flag and the HCA capability. The
second commit adds support for a new DV QP creation flag. The last commit
report packet based credit mode capability via the MLX5DV device
capabilities.

* branch 'mlx5-packet-credit-fc':
  IB/mlx5: Report packet based credit mode device capability
  IB/mlx5: Add packet based credit mode support
  net/mlx5: Expose packet based credit mode
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parents e7521d82 7e11b911
...@@ -1018,6 +1018,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, ...@@ -1018,6 +1018,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (MLX5_CAP_GEN(mdev, cqe_128_always)) if (MLX5_CAP_GEN(mdev, cqe_128_always))
resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD; resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD;
if (MLX5_CAP_GEN(mdev, qp_packet_based))
resp.flags |=
MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE;
} }
if (field_avail(typeof(resp), sw_parsing_caps, if (field_avail(typeof(resp), sw_parsing_caps,
......
...@@ -462,6 +462,7 @@ enum mlx5_ib_qp_flags { ...@@ -462,6 +462,7 @@ enum mlx5_ib_qp_flags {
MLX5_IB_QP_UNDERLAY = 1 << 10, MLX5_IB_QP_UNDERLAY = 1 << 10,
MLX5_IB_QP_PCI_WRITE_END_PADDING = 1 << 11, MLX5_IB_QP_PCI_WRITE_END_PADDING = 1 << 11,
MLX5_IB_QP_TUNNEL_OFFLOAD = 1 << 12, MLX5_IB_QP_TUNNEL_OFFLOAD = 1 << 12,
MLX5_IB_QP_PACKET_BASED_CREDIT = 1 << 13,
}; };
struct mlx5_umr_wr { struct mlx5_umr_wr {
......
...@@ -1917,7 +1917,8 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -1917,7 +1917,8 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
MLX5_QP_FLAG_BFREG_INDEX | MLX5_QP_FLAG_BFREG_INDEX |
MLX5_QP_FLAG_TYPE_DCT | MLX5_QP_FLAG_TYPE_DCT |
MLX5_QP_FLAG_TYPE_DCI | MLX5_QP_FLAG_TYPE_DCI |
MLX5_QP_FLAG_ALLOW_SCATTER_CQE)) MLX5_QP_FLAG_ALLOW_SCATTER_CQE |
MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE))
return -EINVAL; return -EINVAL;
err = get_qp_user_index(to_mucontext(pd->uobject->context), err = get_qp_user_index(to_mucontext(pd->uobject->context),
...@@ -1953,6 +1954,15 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -1953,6 +1954,15 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC; qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC;
} }
if (ucmd.flags & MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE) {
if (init_attr->qp_type != IB_QPT_RC ||
!MLX5_CAP_GEN(dev->mdev, qp_packet_based)) {
mlx5_ib_dbg(dev, "packet based credit mode isn't supported\n");
return -EOPNOTSUPP;
}
qp->flags |= MLX5_IB_QP_PACKET_BASED_CREDIT;
}
if (init_attr->create_flags & IB_QP_CREATE_SOURCE_QPN) { if (init_attr->create_flags & IB_QP_CREATE_SOURCE_QPN) {
if (init_attr->qp_type != IB_QPT_UD || if (init_attr->qp_type != IB_QPT_UD ||
(MLX5_CAP_GEN(dev->mdev, port_type) != (MLX5_CAP_GEN(dev->mdev, port_type) !=
...@@ -2049,7 +2059,8 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -2049,7 +2059,8 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
MLX5_SET(qpc, qpc, cd_slave_send, 1); MLX5_SET(qpc, qpc, cd_slave_send, 1);
if (qp->flags & MLX5_IB_QP_MANAGED_RECV) if (qp->flags & MLX5_IB_QP_MANAGED_RECV)
MLX5_SET(qpc, qpc, cd_slave_receive, 1); MLX5_SET(qpc, qpc, cd_slave_receive, 1);
if (qp->flags & MLX5_IB_QP_PACKET_BASED_CREDIT)
MLX5_SET(qpc, qpc, req_e2e_credit_mode, 1);
if (qp->scat_cqe && is_connected(init_attr->qp_type)) { if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
configure_responder_scat_cqe(init_attr, qpc); configure_responder_scat_cqe(init_attr, qpc);
configure_requester_scat_cqe(dev, init_attr, configure_requester_scat_cqe(dev, init_attr,
......
...@@ -1057,7 +1057,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { ...@@ -1057,7 +1057,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 vector_calc[0x1]; u8 vector_calc[0x1];
u8 umr_ptr_rlky[0x1]; u8 umr_ptr_rlky[0x1];
u8 imaicl[0x1]; u8 imaicl[0x1];
u8 reserved_at_232[0x4]; u8 qp_packet_based[0x1];
u8 reserved_at_233[0x3];
u8 qkv[0x1]; u8 qkv[0x1];
u8 pkv[0x1]; u8 pkv[0x1];
u8 set_deth_sqpn[0x1]; u8 set_deth_sqpn[0x1];
...@@ -2269,7 +2270,8 @@ struct mlx5_ifc_qpc_bits { ...@@ -2269,7 +2270,8 @@ struct mlx5_ifc_qpc_bits {
u8 st[0x8]; u8 st[0x8];
u8 reserved_at_10[0x3]; u8 reserved_at_10[0x3];
u8 pm_state[0x2]; u8 pm_state[0x2];
u8 reserved_at_15[0x3]; u8 reserved_at_15[0x1];
u8 req_e2e_credit_mode[0x2];
u8 offload_type[0x4]; u8 offload_type[0x4];
u8 end_padding_mode[0x2]; u8 end_padding_mode[0x2];
u8 reserved_at_1e[0x2]; u8 reserved_at_1e[0x2];
......
...@@ -48,6 +48,7 @@ enum { ...@@ -48,6 +48,7 @@ enum {
MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC = 1 << 6, MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC = 1 << 6,
MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC = 1 << 7, MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC = 1 << 7,
MLX5_QP_FLAG_ALLOW_SCATTER_CQE = 1 << 8, MLX5_QP_FLAG_ALLOW_SCATTER_CQE = 1 << 8,
MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE = 1 << 9,
}; };
enum { enum {
...@@ -236,6 +237,7 @@ enum mlx5_ib_query_dev_resp_flags { ...@@ -236,6 +237,7 @@ enum mlx5_ib_query_dev_resp_flags {
/* Support 128B CQE compression */ /* Support 128B CQE compression */
MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP = 1 << 0, MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP = 1 << 0,
MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD = 1 << 1, MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD = 1 << 1,
MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE = 1 << 2,
}; };
enum mlx5_ib_tunnel_offloads { enum mlx5_ib_tunnel_offloads {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment