Commit 4d0b7ef9 authored by Aya Levin's avatar Aya Levin Committed by Saeed Mahameed

net/mlx5e: Allow CQ outside of channel context

In order to be able to create a CQ outside of a channel context, remove
cq->channel direct pointer. This requires adding a direct pointer to
channel statistics, netdevice, priv and to mlx5_core in order to support
CQs that are a part of mlx5e_channel.
In addition, parameters the were previously derived from the channel
like napi, NUMA node, channel stats and index are now assembled in
struct mlx5e_create_cq_param which is given to mlx5e_open_cq() instead
of channel pointer. Generalizing mlx5e_open_cq() allows opening CQ
outside of channel context which will be used in following patches in
the patch-set.
Signed-off-by: default avatarAya Levin <ayal@nvidia.com>
Signed-off-by: default avatarEran Ben Elisha <eranbe@nvidia.com>
Reviewed-by: default avatarTariq Toukan <tariqt@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent cdd3f236
......@@ -282,10 +282,12 @@ struct mlx5e_cq {
u16 event_ctr;
struct napi_struct *napi;
struct mlx5_core_cq mcq;
struct mlx5e_channel *channel;
struct mlx5e_ch_stats *ch_stats;
/* control */
struct net_device *netdev;
struct mlx5_core_dev *mdev;
struct mlx5e_priv *priv;
struct mlx5_wq_ctrl wq_ctrl;
} ____cacheline_aligned_in_smp;
......@@ -923,9 +925,17 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
struct mlx5e_xdpsq *sq, bool is_redirect);
void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq);
struct mlx5e_create_cq_param {
struct napi_struct *napi;
struct mlx5e_ch_stats *ch_stats;
int node;
int ix;
};
struct mlx5e_cq_param;
int mlx5e_open_cq(struct mlx5e_channel *c, struct dim_cq_moder moder,
struct mlx5e_cq_param *param, struct mlx5e_cq *cq);
int mlx5e_open_cq(struct mlx5e_priv *priv, struct dim_cq_moder moder,
struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp,
struct mlx5e_cq *cq);
void mlx5e_close_cq(struct mlx5e_cq *cq);
int mlx5e_open_locked(struct net_device *netdev);
......
......@@ -37,13 +37,12 @@ int mlx5e_health_fmsg_named_obj_nest_end(struct devlink_fmsg *fmsg)
int mlx5e_health_cq_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg)
{
struct mlx5e_priv *priv = cq->channel->priv;
u32 out[MLX5_ST_SZ_DW(query_cq_out)] = {};
u8 hw_status;
void *cqc;
int err;
err = mlx5_core_query_cq(priv->mdev, &cq->mcq, out);
err = mlx5_core_query_cq(cq->mdev, &cq->mcq, out);
if (err)
return err;
......
......@@ -308,7 +308,7 @@ static inline void mlx5e_dump_error_cqe(struct mlx5e_cq *cq, u32 qn,
ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1);
netdev_err(cq->channel->netdev,
netdev_err(cq->netdev,
"Error cqe on cqn 0x%x, ci 0x%x, qn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
cq->mcq.cqn, ci, qn,
get_cqe_opcode((struct mlx5_cqe64 *)err_cqe),
......
......@@ -48,9 +48,15 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk, struct xsk_buff_pool *pool,
struct mlx5e_channel *c)
{
struct mlx5e_create_cq_param ccp = {};
struct mlx5e_channel_param *cparam;
int err;
ccp.napi = &c->napi;
ccp.ch_stats = c->stats;
ccp.node = cpu_to_node(c->cpu);
ccp.ix = c->ix;
if (!mlx5e_validate_xsk_param(params, xsk, priv->mdev))
return -EINVAL;
......@@ -60,7 +66,8 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
mlx5e_build_xsk_cparam(priv, params, xsk, cparam);
err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rq.cqp, &c->xskrq.cq);
err = mlx5e_open_cq(c->priv, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
&c->xskrq.cq);
if (unlikely(err))
goto err_free_cparam;
......@@ -68,7 +75,8 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
if (unlikely(err))
goto err_close_rx_cq;
err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &c->xsksq.cq);
err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp,
&c->xsksq.cq);
if (unlikely(err))
goto err_close_rq;
......
......@@ -1515,10 +1515,11 @@ void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
mlx5e_free_xdpsq(sq);
}
static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
static int mlx5e_alloc_cq_common(struct mlx5e_priv *priv,
struct mlx5e_cq_param *param,
struct mlx5e_cq *cq)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_core_cq *mcq = &cq->mcq;
int eqn_not_used;
unsigned int irqn;
......@@ -1551,25 +1552,27 @@ static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
}
cq->mdev = mdev;
cq->netdev = priv->netdev;
cq->priv = priv;
return 0;
}
static int mlx5e_alloc_cq(struct mlx5e_channel *c,
static int mlx5e_alloc_cq(struct mlx5e_priv *priv,
struct mlx5e_cq_param *param,
struct mlx5e_create_cq_param *ccp,
struct mlx5e_cq *cq)
{
struct mlx5_core_dev *mdev = c->priv->mdev;
int err;
param->wq.buf_numa_node = cpu_to_node(c->cpu);
param->wq.db_numa_node = cpu_to_node(c->cpu);
param->eq_ix = c->ix;
param->wq.buf_numa_node = ccp->node;
param->wq.db_numa_node = ccp->node;
param->eq_ix = ccp->ix;
err = mlx5e_alloc_cq_common(mdev, param, cq);
err = mlx5e_alloc_cq_common(priv, param, cq);
cq->napi = &c->napi;
cq->channel = c;
cq->napi = ccp->napi;
cq->ch_stats = ccp->ch_stats;
return err;
}
......@@ -1633,13 +1636,14 @@ static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
}
int mlx5e_open_cq(struct mlx5e_channel *c, struct dim_cq_moder moder,
struct mlx5e_cq_param *param, struct mlx5e_cq *cq)
int mlx5e_open_cq(struct mlx5e_priv *priv, struct dim_cq_moder moder,
struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp,
struct mlx5e_cq *cq)
{
struct mlx5_core_dev *mdev = c->mdev;
struct mlx5_core_dev *mdev = priv->mdev;
int err;
err = mlx5e_alloc_cq(c, param, cq);
err = mlx5e_alloc_cq(priv, param, ccp, cq);
if (err)
return err;
......@@ -1665,14 +1669,15 @@ void mlx5e_close_cq(struct mlx5e_cq *cq)
static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_create_cq_param *ccp,
struct mlx5e_channel_param *cparam)
{
int err;
int tc;
for (tc = 0; tc < c->num_tc; tc++) {
err = mlx5e_open_cq(c, params->tx_cq_moderation,
&cparam->txq_sq.cqp, &c->sq[tc].cq);
err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->txq_sq.cqp,
ccp, &c->sq[tc].cq);
if (err)
goto err_close_tx_cqs;
}
......@@ -1812,30 +1817,40 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
struct mlx5e_channel_param *cparam)
{
struct dim_cq_moder icocq_moder = {0, 0};
struct mlx5e_create_cq_param ccp = {};
int err;
err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq.cqp, &c->async_icosq.cq);
ccp.napi = &c->napi;
ccp.ch_stats = c->stats;
ccp.node = cpu_to_node(c->cpu);
ccp.ix = c->ix;
err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->icosq.cqp, &ccp,
&c->async_icosq.cq);
if (err)
return err;
err = mlx5e_open_cq(c, icocq_moder, &cparam->async_icosq.cqp, &c->icosq.cq);
err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->async_icosq.cqp, &ccp,
&c->icosq.cq);
if (err)
goto err_close_async_icosq_cq;
err = mlx5e_open_tx_cqs(c, params, cparam);
err = mlx5e_open_tx_cqs(c, params, &ccp, cparam);
if (err)
goto err_close_icosq_cq;
err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &c->xdpsq.cq);
err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp,
&c->xdpsq.cq);
if (err)
goto err_close_tx_cqs;
err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rq.cqp, &c->rq.cq);
err = mlx5e_open_cq(c->priv, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
&c->rq.cq);
if (err)
goto err_close_xdp_tx_cqs;
err = c->xdp ? mlx5e_open_cq(c, params->tx_cq_moderation,
&cparam->xdp_sq.cqp, &c->rq_xdpsq.cq) : 0;
err = c->xdp ? mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp,
&ccp, &c->rq_xdpsq.cq) : 0;
if (err)
goto err_close_rx_cq;
......@@ -3221,14 +3236,16 @@ static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
return 0;
}
static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
static int mlx5e_alloc_drop_cq(struct mlx5e_priv *priv,
struct mlx5e_cq *cq,
struct mlx5e_cq_param *param)
{
struct mlx5_core_dev *mdev = priv->mdev;
param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
param->wq.db_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
return mlx5e_alloc_cq_common(mdev, param, cq);
return mlx5e_alloc_cq_common(priv, param, cq);
}
int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
......@@ -3242,7 +3259,7 @@ int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
mlx5e_build_drop_rq_param(priv, &rq_param);
err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param);
err = mlx5e_alloc_drop_cq(priv, cq, &cq_param);
if (err)
return err;
......
......@@ -670,13 +670,13 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
sqcc += wi->num_wqebbs;
if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
netdev_WARN_ONCE(cq->channel->netdev,
netdev_WARN_ONCE(cq->netdev,
"Bad OP in ICOSQ CQE: 0x%x\n",
get_cqe_opcode(cqe));
mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
(struct mlx5_err_cqe *)cqe);
if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
queue_work(cq->channel->priv->wq, &sq->recover_work);
queue_work(cq->priv->wq, &sq->recover_work);
break;
}
......@@ -697,7 +697,7 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
break;
#endif
default:
netdev_WARN_ONCE(cq->channel->netdev,
netdev_WARN_ONCE(cq->netdev,
"Bad WQE type in ICOSQ WQE info: 0x%x\n",
wi->wqe_type);
}
......
......@@ -805,8 +805,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
(struct mlx5_err_cqe *)cqe);
mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
queue_work(cq->channel->priv->wq,
&sq->recover_work);
queue_work(cq->priv->wq, &sq->recover_work);
}
stats->cqe_err++;
}
......
......@@ -221,14 +221,13 @@ void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
napi_schedule(cq->napi);
cq->event_ctr++;
cq->channel->stats->events++;
cq->ch_stats->events++;
}
void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event)
{
struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
struct mlx5e_channel *c = cq->channel;
struct net_device *netdev = c->netdev;
struct net_device *netdev = cq->netdev;
netdev_err(netdev, "%s: cqn=0x%.6x event=0x%.2x\n",
__func__, mcq->cqn, event);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment