Commit af1ba291 authored by Artemy Kovalyov's avatar Artemy Kovalyov Committed by Doug Ledford

{net, IB}/mlx5: Refactor internal SRQ API

Currently, the SRQ API uses the obsolete mlx5_*_srq_mbox_{in,out}
structs which limit the ability to pass the SRQ attributes between
net and IB parts of the driver.

This patch changes the SRQ API so as to use auto-generated structs
and provides a better way to pass attributes which will be in use by
coming features.
Signed-off-by: default avatarArtemy Kovalyov <artemyko@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leon@kernel.org>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent e3353c26
......@@ -74,14 +74,12 @@ static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, enum mlx5_event type)
}
static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
struct mlx5_create_srq_mbox_in **in,
struct ib_udata *udata, int buf_size, int *inlen,
int is_xrc)
struct mlx5_srq_attr *in,
struct ib_udata *udata, int buf_size)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_create_srq ucmd = {};
size_t ucmdlen;
void *xsrqc;
int err;
int npages;
int page_shift;
......@@ -104,7 +102,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
udata->inlen - sizeof(ucmd)))
return -EINVAL;
if (is_xrc) {
if (in->type == IB_SRQT_XRC) {
err = get_srq_user_index(to_mucontext(pd->uobject->context),
&ucmd, udata->inlen, &uidx);
if (err)
......@@ -130,14 +128,13 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
goto err_umem;
}
*inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont;
*in = mlx5_vzalloc(*inlen);
if (!(*in)) {
in->pas = mlx5_vzalloc(sizeof(*in->pas) * ncont);
if (!in->pas) {
err = -ENOMEM;
goto err_umem;
}
mlx5_ib_populate_pas(dev, srq->umem, page_shift, (*in)->pas, 0);
mlx5_ib_populate_pas(dev, srq->umem, page_shift, in->pas, 0);
err = mlx5_ib_db_map_user(to_mucontext(pd->uobject->context),
ucmd.db_addr, &srq->db);
......@@ -146,20 +143,16 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
goto err_in;
}
(*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
(*in)->ctx.pgoff_cqn = cpu_to_be32(offset << 26);
if ((MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) &&
is_xrc){
xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in,
xrc_srq_context_entry);
MLX5_SET(xrc_srqc, xsrqc, user_index, uidx);
}
in->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
in->page_offset = offset;
if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
in->type == IB_SRQT_XRC)
in->user_index = uidx;
return 0;
err_in:
kvfree(*in);
kvfree(in->pas);
err_umem:
ib_umem_release(srq->umem);
......@@ -168,15 +161,13 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
}
static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
struct mlx5_create_srq_mbox_in **in, int buf_size,
int *inlen, int is_xrc)
struct mlx5_srq_attr *in, int buf_size)
{
int err;
int i;
struct mlx5_wqe_srq_next_seg *next;
int page_shift;
int npages;
void *xsrqc;
err = mlx5_db_alloc(dev->mdev, &srq->db);
if (err) {
......@@ -204,13 +195,12 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
npages = DIV_ROUND_UP(srq->buf.npages, 1 << (page_shift - PAGE_SHIFT));
mlx5_ib_dbg(dev, "buf_size %d, page_shift %d, npages %d, calc npages %d\n",
buf_size, page_shift, srq->buf.npages, npages);
*inlen = sizeof(**in) + sizeof(*(*in)->pas) * npages;
*in = mlx5_vzalloc(*inlen);
if (!*in) {
in->pas = mlx5_vzalloc(sizeof(*in->pas) * npages);
if (!in->pas) {
err = -ENOMEM;
goto err_buf;
}
mlx5_fill_page_array(&srq->buf, (*in)->pas);
mlx5_fill_page_array(&srq->buf, in->pas);
srq->wrid = kmalloc(srq->msrq.max * sizeof(u64), GFP_KERNEL);
if (!srq->wrid) {
......@@ -221,20 +211,15 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
}
srq->wq_sig = !!srq_signature;
(*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
if ((MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) &&
is_xrc){
xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in,
xrc_srq_context_entry);
/* 0xffffff means we ask to work with cqe version 0 */
MLX5_SET(xrc_srqc, xsrqc, user_index, MLX5_IB_DEFAULT_UIDX);
}
in->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
in->type == IB_SRQT_XRC)
in->user_index = MLX5_IB_DEFAULT_UIDX;
return 0;
err_in:
kvfree(*in);
kvfree(in->pas);
err_buf:
mlx5_buf_free(dev->mdev, &srq->buf);
......@@ -267,10 +252,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
int desc_size;
int buf_size;
int err;
struct mlx5_create_srq_mbox_in *uninitialized_var(in);
int uninitialized_var(inlen);
int is_xrc;
u32 flgs, xrcdn;
struct mlx5_srq_attr in = {0};
__u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
/* Sanity check SRQ size before proceeding */
......@@ -302,14 +284,10 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs,
srq->msrq.max_avail_gather);
is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
if (pd->uobject)
err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen,
is_xrc);
err = create_srq_user(pd, srq, &in, udata, buf_size);
else
err = create_srq_kernel(dev, srq, &in, buf_size, &inlen,
is_xrc);
err = create_srq_kernel(dev, srq, &in, buf_size);
if (err) {
mlx5_ib_warn(dev, "create srq %s failed, err %d\n",
......@@ -317,23 +295,23 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
goto err_srq;
}
in->ctx.state_log_sz = ilog2(srq->msrq.max);
flgs = ((srq->msrq.wqe_shift - 4) | (is_xrc << 5) | (srq->wq_sig << 7)) << 24;
xrcdn = 0;
if (is_xrc) {
xrcdn = to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn;
in->ctx.pgoff_cqn |= cpu_to_be32(to_mcq(init_attr->ext.xrc.cq)->mcq.cqn);
in.type = init_attr->srq_type;
in.log_size = ilog2(srq->msrq.max);
in.wqe_shift = srq->msrq.wqe_shift - 4;
if (srq->wq_sig)
in.flags |= MLX5_SRQ_FLAG_WQ_SIG;
if (init_attr->srq_type == IB_SRQT_XRC) {
in.xrcd = to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn;
in.cqn = to_mcq(init_attr->ext.xrc.cq)->mcq.cqn;
} else if (init_attr->srq_type == IB_SRQT_BASIC) {
xrcdn = to_mxrcd(dev->devr.x0)->xrcdn;
in->ctx.pgoff_cqn |= cpu_to_be32(to_mcq(dev->devr.c0)->mcq.cqn);
in.xrcd = to_mxrcd(dev->devr.x0)->xrcdn;
in.cqn = to_mcq(dev->devr.c0)->mcq.cqn;
}
in->ctx.flags_xrcd = cpu_to_be32((flgs & 0xFF000000) | (xrcdn & 0xFFFFFF));
in->ctx.pd = cpu_to_be32(to_mpd(pd)->pdn);
in->ctx.db_record = cpu_to_be64(srq->db.dma);
err = mlx5_core_create_srq(dev->mdev, &srq->msrq, in, inlen, is_xrc);
kvfree(in);
in.pd = to_mpd(pd)->pdn;
in.db_record = srq->db.dma;
err = mlx5_core_create_srq(dev->mdev, &srq->msrq, &in);
kvfree(in.pas);
if (err) {
mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
goto err_usr_kern_srq;
......@@ -401,7 +379,7 @@ int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
struct mlx5_ib_srq *srq = to_msrq(ibsrq);
int ret;
struct mlx5_query_srq_mbox_out *out;
struct mlx5_srq_attr *out;
out = kzalloc(sizeof(*out), GFP_KERNEL);
if (!out)
......@@ -411,7 +389,7 @@ int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
if (ret)
goto out_box;
srq_attr->srq_limit = be16_to_cpu(out->ctx.lwm);
srq_attr->srq_limit = out->lwm;
srq_attr->max_wr = srq->msrq.max - 1;
srq_attr->max_sge = srq->msrq.max_gs;
......
......@@ -63,12 +63,12 @@ void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type)
complete(&srq->free);
}
static int get_pas_size(void *srqc)
static int get_pas_size(struct mlx5_srq_attr *in)
{
u32 log_page_size = MLX5_GET(srqc, srqc, log_page_size) + 12;
u32 log_srq_size = MLX5_GET(srqc, srqc, log_srq_size);
u32 log_rq_stride = MLX5_GET(srqc, srqc, log_rq_stride);
u32 page_offset = MLX5_GET(srqc, srqc, page_offset);
u32 log_page_size = in->log_page_size + 12;
u32 log_srq_size = in->log_size;
u32 log_rq_stride = in->wqe_shift;
u32 page_offset = in->page_offset;
u32 po_quanta = 1 << (log_page_size - 6);
u32 rq_sz = 1 << (log_srq_size + 4 + log_rq_stride);
u32 page_size = 1 << log_page_size;
......@@ -78,57 +78,58 @@ static int get_pas_size(void *srqc)
return rq_num_pas * sizeof(u64);
}
static void rmpc_srqc_reformat(void *srqc, void *rmpc, bool srqc_to_rmpc)
static void set_wq(void *wq, struct mlx5_srq_attr *in)
{
void *wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
if (srqc_to_rmpc) {
switch (MLX5_GET(srqc, srqc, state)) {
case MLX5_SRQC_STATE_GOOD:
MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
break;
case MLX5_SRQC_STATE_ERROR:
MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_ERR);
break;
default:
pr_warn("%s: %d: Unknown srq state = 0x%x\n", __func__,
__LINE__, MLX5_GET(srqc, srqc, state));
MLX5_SET(rmpc, rmpc, state, MLX5_GET(srqc, srqc, state));
}
MLX5_SET(wq, wq, wq_signature, MLX5_GET(srqc, srqc, wq_signature));
MLX5_SET(wq, wq, log_wq_pg_sz, MLX5_GET(srqc, srqc, log_page_size));
MLX5_SET(wq, wq, log_wq_stride, MLX5_GET(srqc, srqc, log_rq_stride) + 4);
MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(srqc, srqc, log_srq_size));
MLX5_SET(wq, wq, page_offset, MLX5_GET(srqc, srqc, page_offset));
MLX5_SET(wq, wq, lwm, MLX5_GET(srqc, srqc, lwm));
MLX5_SET(wq, wq, pd, MLX5_GET(srqc, srqc, pd));
MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(srqc, srqc, dbr_addr));
} else {
switch (MLX5_GET(rmpc, rmpc, state)) {
case MLX5_RMPC_STATE_RDY:
MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_GOOD);
break;
case MLX5_RMPC_STATE_ERR:
MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_ERROR);
break;
default:
pr_warn("%s: %d: Unknown rmp state = 0x%x\n",
__func__, __LINE__,
MLX5_GET(rmpc, rmpc, state));
MLX5_SET(srqc, srqc, state,
MLX5_GET(rmpc, rmpc, state));
}
MLX5_SET(srqc, srqc, wq_signature, MLX5_GET(wq, wq, wq_signature));
MLX5_SET(srqc, srqc, log_page_size, MLX5_GET(wq, wq, log_wq_pg_sz));
MLX5_SET(srqc, srqc, log_rq_stride, MLX5_GET(wq, wq, log_wq_stride) - 4);
MLX5_SET(srqc, srqc, log_srq_size, MLX5_GET(wq, wq, log_wq_sz));
MLX5_SET(srqc, srqc, page_offset, MLX5_GET(wq, wq, page_offset));
MLX5_SET(srqc, srqc, lwm, MLX5_GET(wq, wq, lwm));
MLX5_SET(srqc, srqc, pd, MLX5_GET(wq, wq, pd));
MLX5_SET64(srqc, srqc, dbr_addr, MLX5_GET64(wq, wq, dbr_addr));
}
MLX5_SET(wq, wq, wq_signature, !!(in->flags
& MLX5_SRQ_FLAG_WQ_SIG));
MLX5_SET(wq, wq, log_wq_pg_sz, in->log_page_size);
MLX5_SET(wq, wq, log_wq_stride, in->wqe_shift + 4);
MLX5_SET(wq, wq, log_wq_sz, in->log_size);
MLX5_SET(wq, wq, page_offset, in->page_offset);
MLX5_SET(wq, wq, lwm, in->lwm);
MLX5_SET(wq, wq, pd, in->pd);
MLX5_SET64(wq, wq, dbr_addr, in->db_record);
}
static void set_srqc(void *srqc, struct mlx5_srq_attr *in)
{
MLX5_SET(srqc, srqc, wq_signature, !!(in->flags
& MLX5_SRQ_FLAG_WQ_SIG));
MLX5_SET(srqc, srqc, log_page_size, in->log_page_size);
MLX5_SET(srqc, srqc, log_rq_stride, in->wqe_shift);
MLX5_SET(srqc, srqc, log_srq_size, in->log_size);
MLX5_SET(srqc, srqc, page_offset, in->page_offset);
MLX5_SET(srqc, srqc, lwm, in->lwm);
MLX5_SET(srqc, srqc, pd, in->pd);
MLX5_SET64(srqc, srqc, dbr_addr, in->db_record);
MLX5_SET(srqc, srqc, xrcd, in->xrcd);
MLX5_SET(srqc, srqc, cqn, in->cqn);
}
static void get_wq(void *wq, struct mlx5_srq_attr *in)
{
if (MLX5_GET(wq, wq, wq_signature))
in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
in->log_page_size = MLX5_GET(wq, wq, log_wq_pg_sz);
in->wqe_shift = MLX5_GET(wq, wq, log_wq_stride) - 4;
in->log_size = MLX5_GET(wq, wq, log_wq_sz);
in->page_offset = MLX5_GET(wq, wq, page_offset);
in->lwm = MLX5_GET(wq, wq, lwm);
in->pd = MLX5_GET(wq, wq, pd);
in->db_record = MLX5_GET64(wq, wq, dbr_addr);
}
static void get_srqc(void *srqc, struct mlx5_srq_attr *in)
{
if (MLX5_GET(srqc, srqc, wq_signature))
in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
in->log_page_size = MLX5_GET(srqc, srqc, log_page_size);
in->wqe_shift = MLX5_GET(srqc, srqc, log_rq_stride);
in->log_size = MLX5_GET(srqc, srqc, log_srq_size);
in->page_offset = MLX5_GET(srqc, srqc, page_offset);
in->lwm = MLX5_GET(srqc, srqc, lwm);
in->pd = MLX5_GET(srqc, srqc, pd);
in->db_record = MLX5_GET64(srqc, srqc, dbr_addr);
}
struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
......@@ -149,19 +150,36 @@ struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
EXPORT_SYMBOL(mlx5_core_get_srq);
static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_create_srq_mbox_in *in, int inlen)
struct mlx5_srq_attr *in)
{
struct mlx5_create_srq_mbox_out out;
u32 create_out[MLX5_ST_SZ_DW(create_srq_out)] = {0};
void *create_in;
void *srqc;
void *pas;
int pas_size;
int inlen;
int err;
memset(&out, 0, sizeof(out));
pas_size = get_pas_size(in);
inlen = MLX5_ST_SZ_BYTES(create_srq_in) + pas_size;
create_in = mlx5_vzalloc(inlen);
if (!create_in)
return -ENOMEM;
srqc = MLX5_ADDR_OF(create_srq_in, create_in, srq_context_entry);
pas = MLX5_ADDR_OF(create_srq_in, create_in, pas);
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_SRQ);
set_srqc(srqc, in);
memcpy(pas, in->pas, pas_size);
err = mlx5_cmd_exec_check_status(dev, (u32 *)in, inlen, (u32 *)(&out),
sizeof(out));
MLX5_SET(create_srq_in, create_in, opcode,
MLX5_CMD_OP_CREATE_SRQ);
srq->srqn = be32_to_cpu(out.srqn) & 0xffffff;
err = mlx5_cmd_exec_check_status(dev, create_in, inlen, create_out,
sizeof(create_out));
kvfree(create_in);
if (!err)
srq->srqn = MLX5_GET(create_srq_out, create_out, srqn);
return err;
}
......@@ -169,67 +187,75 @@ static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
static int destroy_srq_cmd(struct mlx5_core_dev *dev,
struct mlx5_core_srq *srq)
{
struct mlx5_destroy_srq_mbox_in in;
struct mlx5_destroy_srq_mbox_out out;
u32 srq_in[MLX5_ST_SZ_DW(destroy_srq_in)] = {0};
u32 srq_out[MLX5_ST_SZ_DW(destroy_srq_out)] = {0};
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ);
in.srqn = cpu_to_be32(srq->srqn);
MLX5_SET(destroy_srq_in, srq_in, opcode,
MLX5_CMD_OP_DESTROY_SRQ);
MLX5_SET(destroy_srq_in, srq_in, srqn, srq->srqn);
return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in),
(u32 *)(&out), sizeof(out));
return mlx5_cmd_exec_check_status(dev, srq_in, sizeof(srq_in),
srq_out, sizeof(srq_out));
}
static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
u16 lwm, int is_srq)
{
struct mlx5_arm_srq_mbox_in in;
struct mlx5_arm_srq_mbox_out out;
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
/* arm_srq structs missing using identical xrc ones */
u32 srq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0};
u32 srq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ);
in.hdr.opmod = cpu_to_be16(!!is_srq);
in.srqn = cpu_to_be32(srq->srqn);
in.lwm = cpu_to_be16(lwm);
MLX5_SET(arm_xrc_srq_in, srq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
MLX5_SET(arm_xrc_srq_in, srq_in, xrc_srqn, srq->srqn);
MLX5_SET(arm_xrc_srq_in, srq_in, lwm, lwm);
return mlx5_cmd_exec_check_status(dev, (u32 *)(&in),
sizeof(in), (u32 *)(&out),
sizeof(out));
return mlx5_cmd_exec_check_status(dev, srq_in, sizeof(srq_in),
srq_out, sizeof(srq_out));
}
static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_query_srq_mbox_out *out)
struct mlx5_srq_attr *out)
{
struct mlx5_query_srq_mbox_in in;
u32 srq_in[MLX5_ST_SZ_DW(query_srq_in)] = {0};
u32 *srq_out;
void *srqc;
int err;
memset(&in, 0, sizeof(in));
srq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_srq_out));
if (!srq_out)
return -ENOMEM;
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ);
in.srqn = cpu_to_be32(srq->srqn);
MLX5_SET(query_srq_in, srq_in, opcode,
MLX5_CMD_OP_QUERY_SRQ);
MLX5_SET(query_srq_in, srq_in, srqn, srq->srqn);
err = mlx5_cmd_exec_check_status(dev, srq_in, sizeof(srq_in),
srq_out,
MLX5_ST_SZ_BYTES(query_srq_out));
if (err)
goto out;
return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in),
(u32 *)out, sizeof(*out));
srqc = MLX5_ADDR_OF(query_srq_out, srq_out, srq_context_entry);
get_srqc(srqc, out);
if (MLX5_GET(srqc, srqc, state) != MLX5_SRQC_STATE_GOOD)
out->flags |= MLX5_SRQ_FLAG_ERR;
out:
kvfree(srq_out);
return err;
}
static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
struct mlx5_core_srq *srq,
struct mlx5_create_srq_mbox_in *in,
int srq_inlen)
struct mlx5_srq_attr *in)
{
u32 create_out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
void *create_in;
void *srqc;
void *xrc_srqc;
void *pas;
int pas_size;
int inlen;
int err;
srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry);
pas_size = get_pas_size(srqc);
pas_size = get_pas_size(in);
inlen = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
create_in = mlx5_vzalloc(inlen);
if (!create_in)
......@@ -239,7 +265,8 @@ static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
xrc_srq_context_entry);
pas = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
memcpy(xrc_srqc, srqc, MLX5_ST_SZ_BYTES(srqc));
set_srqc(xrc_srqc, in);
MLX5_SET(xrc_srqc, xrc_srqc, user_index, in->user_index);
memcpy(pas, in->pas, pas_size);
MLX5_SET(create_xrc_srq_in, create_in, opcode,
MLX5_CMD_OP_CREATE_XRC_SRQ);
......@@ -293,11 +320,10 @@ static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev,
static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
struct mlx5_core_srq *srq,
struct mlx5_query_srq_mbox_out *out)
struct mlx5_srq_attr *out)
{
u32 xrcsrq_in[MLX5_ST_SZ_DW(query_xrc_srq_in)];
u32 *xrcsrq_out;
void *srqc;
void *xrc_srqc;
int err;
......@@ -317,8 +343,9 @@ static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out,
xrc_srq_context_entry);
srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry);
memcpy(srqc, xrc_srqc, MLX5_ST_SZ_BYTES(srqc));
get_srqc(xrc_srqc, out);
if (MLX5_GET(xrc_srqc, xrc_srqc, state) != MLX5_XRC_SRQC_STATE_GOOD)
out->flags |= MLX5_SRQ_FLAG_ERR;
out:
kvfree(xrcsrq_out);
......@@ -326,26 +353,27 @@ static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
}
static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_create_srq_mbox_in *in, int srq_inlen)
struct mlx5_srq_attr *in)
{
void *create_in;
void *rmpc;
void *srqc;
void *wq;
int pas_size;
int inlen;
int err;
srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry);
pas_size = get_pas_size(srqc);
pas_size = get_pas_size(in);
inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
create_in = mlx5_vzalloc(inlen);
if (!create_in)
return -ENOMEM;
rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
set_wq(wq, in);
memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size);
rmpc_srqc_reformat(srqc, rmpc, true);
err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn);
......@@ -390,11 +418,10 @@ static int arm_rmp_cmd(struct mlx5_core_dev *dev,
}
static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_query_srq_mbox_out *out)
struct mlx5_srq_attr *out)
{
u32 *rmp_out;
void *rmpc;
void *srqc;
int err;
rmp_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_rmp_out));
......@@ -405,9 +432,10 @@ static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
if (err)
goto out;
srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry);
rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context);
rmpc_srqc_reformat(srqc, rmpc, false);
get_wq(MLX5_ADDR_OF(rmpc, rmpc, wq), out);
if (MLX5_GET(rmpc, rmpc, state) != MLX5_RMPC_STATE_RDY)
out->flags |= MLX5_SRQ_FLAG_ERR;
out:
kvfree(rmp_out);
......@@ -416,15 +444,14 @@ static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
static int create_srq_split(struct mlx5_core_dev *dev,
struct mlx5_core_srq *srq,
struct mlx5_create_srq_mbox_in *in,
int inlen, int is_xrc)
struct mlx5_srq_attr *in)
{
if (!dev->issi)
return create_srq_cmd(dev, srq, in, inlen);
return create_srq_cmd(dev, srq, in);
else if (srq->common.res == MLX5_RES_XSRQ)
return create_xrc_srq_cmd(dev, srq, in, inlen);
return create_xrc_srq_cmd(dev, srq, in);
else
return create_rmp_cmd(dev, srq, in, inlen);
return create_rmp_cmd(dev, srq, in);
}
static int destroy_srq_split(struct mlx5_core_dev *dev,
......@@ -439,15 +466,17 @@ static int destroy_srq_split(struct mlx5_core_dev *dev,
}
int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_create_srq_mbox_in *in, int inlen,
int is_xrc)
struct mlx5_srq_attr *in)
{
int err;
struct mlx5_srq_table *table = &dev->priv.srq_table;
srq->common.res = is_xrc ? MLX5_RES_XSRQ : MLX5_RES_SRQ;
if (in->type == IB_SRQT_XRC)
srq->common.res = MLX5_RES_XSRQ;
else
srq->common.res = MLX5_RES_SRQ;
err = create_srq_split(dev, srq, in, inlen, is_xrc);
err = create_srq_split(dev, srq, in);
if (err)
return err;
......@@ -502,7 +531,7 @@ int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
EXPORT_SYMBOL(mlx5_core_destroy_srq);
int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_query_srq_mbox_out *out)
struct mlx5_srq_attr *out)
{
if (!dev->issi)
return query_srq_cmd(dev, srq, out);
......
......@@ -46,6 +46,7 @@
#include <linux/mlx5/device.h>
#include <linux/mlx5/doorbell.h>
#include <linux/mlx5/srq.h>
enum {
MLX5_RQ_BITMASK_VSD = 1 << 1,
......@@ -772,11 +773,10 @@ struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
struct mlx5_cmd_mailbox *head);
int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_create_srq_mbox_in *in, int inlen,
int is_xrc);
struct mlx5_srq_attr *in);
int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq);
int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_query_srq_mbox_out *out);
struct mlx5_srq_attr *out);
int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
u16 lwm, int is_srq);
void mlx5_init_mkey_table(struct mlx5_core_dev *dev);
......
......@@ -35,6 +35,31 @@
#include <linux/mlx5/driver.h>
enum {
MLX5_SRQ_FLAG_ERR = (1 << 0),
MLX5_SRQ_FLAG_WQ_SIG = (1 << 1),
};
struct mlx5_srq_attr {
u32 type;
u32 flags;
u32 log_size;
u32 wqe_shift;
u32 log_page_size;
u32 wqe_cnt;
u32 srqn;
u32 xrcd;
u32 page_offset;
u32 cqn;
u32 pd;
u32 lwm;
u32 user_index;
u64 db_record;
u64 *pas;
};
struct mlx5_core_dev;
void mlx5_init_srq_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment