Commit 051f2630 authored by Leon Romanovsky's avatar Leon Romanovsky Committed by Doug Ledford

IB/mlx5: Add driver cross-channel support

Add support of cross-channel functionality to mlx5
driver. This includes ability to ignore overrun for CQ
which intended for cross-channel, export device capability and
configure the QP to be sync master/slave queues.

The cross-channel enabled QP supports combination of
three possible properties:
* WQE processing on the receive queue of this QP
* WQE processing on the send queue of this QP
* WQE are supported on the send queue
Reviewed-by: default avatarSagi Grimberg <sagig@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 8a06ce59
...@@ -778,7 +778,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, ...@@ -778,7 +778,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
int eqn; int eqn;
int err; int err;
if (attr->flags) if (check_cq_create_flags(attr->flags))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
if (entries < 0) if (entries < 0)
...@@ -800,6 +800,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, ...@@ -800,6 +800,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
spin_lock_init(&cq->lock); spin_lock_init(&cq->lock);
cq->resize_buf = NULL; cq->resize_buf = NULL;
cq->resize_umem = NULL; cq->resize_umem = NULL;
cq->create_flags = attr->flags;
if (context) { if (context) {
err = create_cq_user(dev, udata, context, cq, entries, err = create_cq_user(dev, udata, context, cq, entries,
...@@ -817,6 +818,10 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, ...@@ -817,6 +818,10 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
cq->cqe_size = cqe_size; cq->cqe_size = cqe_size;
cqb->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5; cqb->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5;
if (cq->create_flags & IB_CQ_FLAGS_IGNORE_OVERRUN)
cqb->ctx.cqe_sz_flags |= (1 << 1);
cqb->ctx.log_sz_usr_page = cpu_to_be32((ilog2(entries) << 24) | index); cqb->ctx.log_sz_usr_page = cpu_to_be32((ilog2(entries) << 24) | index);
err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn); err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn);
if (err) if (err)
......
...@@ -512,6 +512,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, ...@@ -512,6 +512,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->odp_caps = dev->odp_caps; props->odp_caps = dev->odp_caps;
#endif #endif
if (MLX5_CAP_GEN(mdev, cd))
props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL;
return 0; return 0;
} }
......
...@@ -90,6 +90,10 @@ enum mlx5_ib_mad_ifc_flags { ...@@ -90,6 +90,10 @@ enum mlx5_ib_mad_ifc_flags {
MLX5_MAD_IFC_NET_VIEW = 4, MLX5_MAD_IFC_NET_VIEW = 4,
}; };
enum {
MLX5_CROSS_CHANNEL_UUAR = 0,
};
struct mlx5_ib_ucontext { struct mlx5_ib_ucontext {
struct ib_ucontext ibucontext; struct ib_ucontext ibucontext;
struct list_head db_page_list; struct list_head db_page_list;
...@@ -247,6 +251,9 @@ struct mlx5_ib_cq_buf { ...@@ -247,6 +251,9 @@ struct mlx5_ib_cq_buf {
enum mlx5_ib_qp_flags { enum mlx5_ib_qp_flags {
MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = 1 << 0, MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = 1 << 0,
MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 1, MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 1,
MLX5_IB_QP_CROSS_CHANNEL = 1 << 2,
MLX5_IB_QP_MANAGED_SEND = 1 << 3,
MLX5_IB_QP_MANAGED_RECV = 1 << 4,
}; };
struct mlx5_umr_wr { struct mlx5_umr_wr {
...@@ -289,6 +296,7 @@ struct mlx5_ib_cq { ...@@ -289,6 +296,7 @@ struct mlx5_ib_cq {
struct mlx5_ib_cq_buf *resize_buf; struct mlx5_ib_cq_buf *resize_buf;
struct ib_umem *resize_umem; struct ib_umem *resize_umem;
int cqe_size; int cqe_size;
u32 create_flags;
}; };
struct mlx5_ib_srq { struct mlx5_ib_srq {
...@@ -678,4 +686,12 @@ static inline int is_qp1(enum ib_qp_type qp_type) ...@@ -678,4 +686,12 @@ static inline int is_qp1(enum ib_qp_type qp_type)
#define MLX5_MAX_UMR_SHIFT 16 #define MLX5_MAX_UMR_SHIFT 16
#define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT) #define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
static inline u32 check_cq_create_flags(u32 flags)
{
/*
* It returns non-zero value for unsupported CQ
* create flags, otherwise it returns zero.
*/
return (flags & ~IB_CQ_FLAGS_IGNORE_OVERRUN);
}
#endif /* MLX5_IB_H */ #endif /* MLX5_IB_H */
...@@ -616,18 +616,23 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -616,18 +616,23 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
/* /*
* TBD: should come from the verbs when we have the API * TBD: should come from the verbs when we have the API
*/ */
uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_HIGH); if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
if (uuarn < 0) { /* In CROSS_CHANNEL CQ and QP must use the same UAR */
mlx5_ib_dbg(dev, "failed to allocate low latency UUAR\n"); uuarn = MLX5_CROSS_CHANNEL_UUAR;
mlx5_ib_dbg(dev, "reverting to medium latency\n"); else {
uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_MEDIUM); uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_HIGH);
if (uuarn < 0) { if (uuarn < 0) {
mlx5_ib_dbg(dev, "failed to allocate medium latency UUAR\n"); mlx5_ib_dbg(dev, "failed to allocate low latency UUAR\n");
mlx5_ib_dbg(dev, "reverting to high latency\n"); mlx5_ib_dbg(dev, "reverting to medium latency\n");
uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW); uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_MEDIUM);
if (uuarn < 0) { if (uuarn < 0) {
mlx5_ib_warn(dev, "uuar allocation failed\n"); mlx5_ib_dbg(dev, "failed to allocate medium latency UUAR\n");
return uuarn; mlx5_ib_dbg(dev, "reverting to high latency\n");
uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW);
if (uuarn < 0) {
mlx5_ib_warn(dev, "uuar allocation failed\n");
return uuarn;
}
} }
} }
} }
...@@ -881,6 +886,21 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -881,6 +886,21 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
} }
} }
if (init_attr->create_flags &
(IB_QP_CREATE_CROSS_CHANNEL |
IB_QP_CREATE_MANAGED_SEND |
IB_QP_CREATE_MANAGED_RECV)) {
if (!MLX5_CAP_GEN(mdev, cd)) {
mlx5_ib_dbg(dev, "cross-channel isn't supported\n");
return -EINVAL;
}
if (init_attr->create_flags & IB_QP_CREATE_CROSS_CHANNEL)
qp->flags |= MLX5_IB_QP_CROSS_CHANNEL;
if (init_attr->create_flags & IB_QP_CREATE_MANAGED_SEND)
qp->flags |= MLX5_IB_QP_MANAGED_SEND;
if (init_attr->create_flags & IB_QP_CREATE_MANAGED_RECV)
qp->flags |= MLX5_IB_QP_MANAGED_RECV;
}
if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE; qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
...@@ -955,6 +975,13 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -955,6 +975,13 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK) if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_BLOCK_MCAST); in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_BLOCK_MCAST);
if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
in->ctx.params2 |= cpu_to_be32(MLX5_QP_BIT_CC_MASTER);
if (qp->flags & MLX5_IB_QP_MANAGED_SEND)
in->ctx.params2 |= cpu_to_be32(MLX5_QP_BIT_CC_SLAVE_SEND);
if (qp->flags & MLX5_IB_QP_MANAGED_RECV)
in->ctx.params2 |= cpu_to_be32(MLX5_QP_BIT_CC_SLAVE_RECV);
if (qp->scat_cqe && is_connected(init_attr->qp_type)) { if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
int rcqe_sz; int rcqe_sz;
int scqe_sz; int scqe_sz;
...@@ -3130,6 +3157,13 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr ...@@ -3130,6 +3157,13 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr
if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK) if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK; qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
qp_init_attr->create_flags |= IB_QP_CREATE_CROSS_CHANNEL;
if (qp->flags & MLX5_IB_QP_MANAGED_SEND)
qp_init_attr->create_flags |= IB_QP_CREATE_MANAGED_SEND;
if (qp->flags & MLX5_IB_QP_MANAGED_RECV)
qp_init_attr->create_flags |= IB_QP_CREATE_MANAGED_RECV;
qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ? qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ?
IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
......
...@@ -130,6 +130,9 @@ enum { ...@@ -130,6 +130,9 @@ enum {
MLX5_QP_BIT_RWE = 1 << 14, MLX5_QP_BIT_RWE = 1 << 14,
MLX5_QP_BIT_RAE = 1 << 13, MLX5_QP_BIT_RAE = 1 << 13,
MLX5_QP_BIT_RIC = 1 << 4, MLX5_QP_BIT_RIC = 1 << 4,
MLX5_QP_BIT_CC_SLAVE_RECV = 1 << 2,
MLX5_QP_BIT_CC_SLAVE_SEND = 1 << 1,
MLX5_QP_BIT_CC_MASTER = 1 << 0
}; };
enum { enum {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment