Commit 80743c4f authored by Tariq Toukan's avatar Tariq Toukan Committed by Saeed Mahameed

net/mlx5e: Add TX max rate support for MQPRIO channel mode

Add driver max_rate support for the MQPRIO bw_rlimit shaper
in channel mode.
Signed-off-by: default avatarTariq Toukan <tariqt@nvidia.com>
Reviewed-by: default avatarMaxim Mikityanskiy <maximmi@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent e0ee6891
...@@ -251,6 +251,9 @@ struct mlx5e_params { ...@@ -251,6 +251,9 @@ struct mlx5e_params {
u16 mode; u16 mode;
u8 num_tc; u8 num_tc;
struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
struct {
struct mlx5e_mqprio_rl *rl;
} channel;
} mqprio; } mqprio;
bool rx_cqe_compress_def; bool rx_cqe_compress_def;
bool tunneled_offload_en; bool tunneled_offload_en;
...@@ -877,6 +880,7 @@ struct mlx5e_priv { ...@@ -877,6 +880,7 @@ struct mlx5e_priv {
#endif #endif
struct mlx5e_scratchpad scratchpad; struct mlx5e_scratchpad scratchpad;
struct mlx5e_htb htb; struct mlx5e_htb htb;
struct mlx5e_mqprio_rl *mqprio_rl;
}; };
struct mlx5e_rx_handlers { struct mlx5e_rx_handlers {
......
...@@ -7,6 +7,21 @@ ...@@ -7,6 +7,21 @@
#define BYTES_IN_MBIT 125000 #define BYTES_IN_MBIT 125000
int mlx5e_qos_bytes_rate_check(struct mlx5_core_dev *mdev, u64 nbytes)
{
if (nbytes < BYTES_IN_MBIT) {
qos_warn(mdev, "Input rate (%llu Bytes/sec) below minimum supported (%u Bytes/sec)\n",
nbytes, BYTES_IN_MBIT);
return -EINVAL;
}
return 0;
}
static u32 mlx5e_qos_bytes2mbits(struct mlx5_core_dev *mdev, u64 nbytes)
{
return div_u64(nbytes, BYTES_IN_MBIT);
}
int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev) int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev)
{ {
return min(MLX5E_QOS_MAX_LEAF_NODES, mlx5_qos_max_leaf_nodes(mdev)); return min(MLX5E_QOS_MAX_LEAF_NODES, mlx5_qos_max_leaf_nodes(mdev));
...@@ -980,3 +995,87 @@ int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ce ...@@ -980,3 +995,87 @@ int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ce
return err; return err;
} }
struct mlx5e_mqprio_rl {
struct mlx5_core_dev *mdev;
u32 root_id;
u32 *leaves_id;
u8 num_tc;
};
struct mlx5e_mqprio_rl *mlx5e_mqprio_rl_alloc(void)
{
return kvzalloc(sizeof(struct mlx5e_mqprio_rl), GFP_KERNEL);
}
void mlx5e_mqprio_rl_free(struct mlx5e_mqprio_rl *rl)
{
kvfree(rl);
}
int mlx5e_mqprio_rl_init(struct mlx5e_mqprio_rl *rl, struct mlx5_core_dev *mdev, u8 num_tc,
u64 max_rate[])
{
int err;
int tc;
if (!mlx5_qos_is_supported(mdev)) {
qos_warn(mdev, "Missing QoS capabilities. Try disabling SRIOV or use a supported device.");
return -EOPNOTSUPP;
}
if (num_tc > mlx5e_qos_max_leaf_nodes(mdev))
return -EINVAL;
rl->mdev = mdev;
rl->num_tc = num_tc;
rl->leaves_id = kvcalloc(num_tc, sizeof(*rl->leaves_id), GFP_KERNEL);
if (!rl->leaves_id)
return -ENOMEM;
err = mlx5_qos_create_root_node(mdev, &rl->root_id);
if (err)
goto err_free_leaves;
qos_dbg(mdev, "Root created, id %#x\n", rl->root_id);
for (tc = 0; tc < num_tc; tc++) {
u32 max_average_bw;
max_average_bw = mlx5e_qos_bytes2mbits(mdev, max_rate[tc]);
err = mlx5_qos_create_leaf_node(mdev, rl->root_id, 0, max_average_bw,
&rl->leaves_id[tc]);
if (err)
goto err_destroy_leaves;
qos_dbg(mdev, "Leaf[%d] created, id %#x, max average bw %u Mbits/sec\n",
tc, rl->leaves_id[tc], max_average_bw);
}
return 0;
err_destroy_leaves:
while (--tc >= 0)
mlx5_qos_destroy_node(mdev, rl->leaves_id[tc]);
mlx5_qos_destroy_node(mdev, rl->root_id);
err_free_leaves:
kvfree(rl->leaves_id);
return err;
}
void mlx5e_mqprio_rl_cleanup(struct mlx5e_mqprio_rl *rl)
{
int tc;
for (tc = 0; tc < rl->num_tc; tc++)
mlx5_qos_destroy_node(rl->mdev, rl->leaves_id[tc]);
mlx5_qos_destroy_node(rl->mdev, rl->root_id);
kvfree(rl->leaves_id);
}
int mlx5e_mqprio_rl_get_node_hw_id(struct mlx5e_mqprio_rl *rl, int tc, u32 *hw_id)
{
if (tc >= rl->num_tc)
return -EINVAL;
*hw_id = rl->leaves_id[tc];
return 0;
}
...@@ -12,6 +12,7 @@ struct mlx5e_priv; ...@@ -12,6 +12,7 @@ struct mlx5e_priv;
struct mlx5e_channels; struct mlx5e_channels;
struct mlx5e_channel; struct mlx5e_channel;
int mlx5e_qos_bytes_rate_check(struct mlx5_core_dev *mdev, u64 nbytes);
int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev); int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev);
int mlx5e_qos_cur_leaf_nodes(struct mlx5e_priv *priv); int mlx5e_qos_cur_leaf_nodes(struct mlx5e_priv *priv);
...@@ -41,4 +42,12 @@ int mlx5e_htb_leaf_del_last(struct mlx5e_priv *priv, u16 classid, bool force, ...@@ -41,4 +42,12 @@ int mlx5e_htb_leaf_del_last(struct mlx5e_priv *priv, u16 classid, bool force,
int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ceil, int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ceil,
struct netlink_ext_ack *extack); struct netlink_ext_ack *extack);
/* MQPRIO TX rate limit */
struct mlx5e_mqprio_rl;
struct mlx5e_mqprio_rl *mlx5e_mqprio_rl_alloc(void);
void mlx5e_mqprio_rl_free(struct mlx5e_mqprio_rl *rl);
int mlx5e_mqprio_rl_init(struct mlx5e_mqprio_rl *rl, struct mlx5_core_dev *mdev, u8 num_tc,
u64 max_rate[]);
void mlx5e_mqprio_rl_cleanup(struct mlx5e_mqprio_rl *rl);
int mlx5e_mqprio_rl_get_node_hw_id(struct mlx5e_mqprio_rl *rl, int tc, u32 *hw_id);
#endif #endif
...@@ -1705,6 +1705,36 @@ static void mlx5e_close_tx_cqs(struct mlx5e_channel *c) ...@@ -1705,6 +1705,36 @@ static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
mlx5e_close_cq(&c->sq[tc].cq); mlx5e_close_cq(&c->sq[tc].cq);
} }
static int mlx5e_mqprio_txq_to_tc(struct netdev_tc_txq *tc_to_txq, unsigned int txq)
{
int tc;
for (tc = 0; tc < TC_MAX_QUEUE; tc++)
if (txq - tc_to_txq[tc].offset < tc_to_txq[tc].count)
return tc;
WARN(1, "Unexpected TCs configuration. No match found for txq %u", txq);
return -ENOENT;
}
static int mlx5e_txq_get_qos_node_hw_id(struct mlx5e_params *params, int txq_ix,
u32 *hw_id)
{
int tc;
if (params->mqprio.mode != TC_MQPRIO_MODE_CHANNEL ||
!params->mqprio.channel.rl) {
*hw_id = 0;
return 0;
}
tc = mlx5e_mqprio_txq_to_tc(params->mqprio.tc_to_txq, txq_ix);
if (tc < 0)
return tc;
return mlx5e_mqprio_rl_get_node_hw_id(params->mqprio.channel.rl, tc, hw_id);
}
static int mlx5e_open_sqs(struct mlx5e_channel *c, static int mlx5e_open_sqs(struct mlx5e_channel *c,
struct mlx5e_params *params, struct mlx5e_params *params,
struct mlx5e_channel_param *cparam) struct mlx5e_channel_param *cparam)
...@@ -1713,9 +1743,15 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c, ...@@ -1713,9 +1743,15 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
for (tc = 0; tc < mlx5e_get_dcb_num_tc(params); tc++) { for (tc = 0; tc < mlx5e_get_dcb_num_tc(params); tc++) {
int txq_ix = c->ix + tc * params->num_channels; int txq_ix = c->ix + tc * params->num_channels;
u32 qos_queue_group_id;
err = mlx5e_txq_get_qos_node_hw_id(params, txq_ix, &qos_queue_group_id);
if (err)
goto err_close_sqs;
err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix, err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
params, &cparam->txq_sq, &c->sq[tc], tc, 0, params, &cparam->txq_sq, &c->sq[tc], tc,
qos_queue_group_id,
&c->priv->channel_stats[c->ix].sq[tc]); &c->priv->channel_stats[c->ix].sq[tc]);
if (err) if (err)
goto err_close_sqs; goto err_close_sqs;
...@@ -2341,6 +2377,13 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv) ...@@ -2341,6 +2377,13 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err); netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err);
goto err_txqs; goto err_txqs;
} }
if (priv->mqprio_rl != priv->channels.params.mqprio.channel.rl) {
if (priv->mqprio_rl) {
mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
mlx5e_mqprio_rl_free(priv->mqprio_rl);
}
priv->mqprio_rl = priv->channels.params.mqprio.channel.rl;
}
return 0; return 0;
...@@ -2902,15 +2945,18 @@ static void mlx5e_params_mqprio_dcb_set(struct mlx5e_params *params, u8 num_tc) ...@@ -2902,15 +2945,18 @@ static void mlx5e_params_mqprio_dcb_set(struct mlx5e_params *params, u8 num_tc)
{ {
params->mqprio.mode = TC_MQPRIO_MODE_DCB; params->mqprio.mode = TC_MQPRIO_MODE_DCB;
params->mqprio.num_tc = num_tc; params->mqprio.num_tc = num_tc;
params->mqprio.channel.rl = NULL;
mlx5e_mqprio_build_default_tc_to_txq(params->mqprio.tc_to_txq, num_tc, mlx5e_mqprio_build_default_tc_to_txq(params->mqprio.tc_to_txq, num_tc,
params->num_channels); params->num_channels);
} }
static void mlx5e_params_mqprio_channel_set(struct mlx5e_params *params, static void mlx5e_params_mqprio_channel_set(struct mlx5e_params *params,
struct tc_mqprio_qopt *qopt) struct tc_mqprio_qopt *qopt,
struct mlx5e_mqprio_rl *rl)
{ {
params->mqprio.mode = TC_MQPRIO_MODE_CHANNEL; params->mqprio.mode = TC_MQPRIO_MODE_CHANNEL;
params->mqprio.num_tc = qopt->num_tc; params->mqprio.num_tc = qopt->num_tc;
params->mqprio.channel.rl = rl;
mlx5e_mqprio_build_tc_to_txq(params->mqprio.tc_to_txq, qopt); mlx5e_mqprio_build_tc_to_txq(params->mqprio.tc_to_txq, qopt);
} }
...@@ -2970,9 +3016,13 @@ static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv, ...@@ -2970,9 +3016,13 @@ static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv,
netdev_err(netdev, "Min tx rate is not supported\n"); netdev_err(netdev, "Min tx rate is not supported\n");
return -EINVAL; return -EINVAL;
} }
if (mqprio->max_rate[i]) { if (mqprio->max_rate[i]) {
netdev_err(netdev, "Max tx rate is not supported\n"); int err;
return -EINVAL;
err = mlx5e_qos_bytes_rate_check(priv->mdev, mqprio->max_rate[i]);
if (err)
return err;
} }
if (mqprio->qopt.offset[i] != agg_count) { if (mqprio->qopt.offset[i] != agg_count) {
...@@ -2991,11 +3041,22 @@ static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv, ...@@ -2991,11 +3041,22 @@ static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv,
return 0; return 0;
} }
static bool mlx5e_mqprio_rate_limit(struct tc_mqprio_qopt_offload *mqprio)
{
int tc;
for (tc = 0; tc < mqprio->qopt.num_tc; tc++)
if (mqprio->max_rate[tc])
return true;
return false;
}
static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv, static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
struct tc_mqprio_qopt_offload *mqprio) struct tc_mqprio_qopt_offload *mqprio)
{ {
mlx5e_fp_preactivate preactivate; mlx5e_fp_preactivate preactivate;
struct mlx5e_params new_params; struct mlx5e_params new_params;
struct mlx5e_mqprio_rl *rl;
bool nch_changed; bool nch_changed;
int err; int err;
...@@ -3003,13 +3064,32 @@ static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv, ...@@ -3003,13 +3064,32 @@ static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
if (err) if (err)
return err; return err;
rl = NULL;
if (mlx5e_mqprio_rate_limit(mqprio)) {
rl = mlx5e_mqprio_rl_alloc();
if (!rl)
return -ENOMEM;
err = mlx5e_mqprio_rl_init(rl, priv->mdev, mqprio->qopt.num_tc,
mqprio->max_rate);
if (err) {
mlx5e_mqprio_rl_free(rl);
return err;
}
}
new_params = priv->channels.params; new_params = priv->channels.params;
mlx5e_params_mqprio_channel_set(&new_params, &mqprio->qopt); mlx5e_params_mqprio_channel_set(&new_params, &mqprio->qopt, rl);
nch_changed = mlx5e_get_dcb_num_tc(&priv->channels.params) > 1; nch_changed = mlx5e_get_dcb_num_tc(&priv->channels.params) > 1;
preactivate = nch_changed ? mlx5e_num_channels_changed_ctx : preactivate = nch_changed ? mlx5e_num_channels_changed_ctx :
mlx5e_update_netdev_queues_ctx; mlx5e_update_netdev_queues_ctx;
return mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true); err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true);
if (err && rl) {
mlx5e_mqprio_rl_cleanup(rl);
mlx5e_mqprio_rl_free(rl);
}
return err;
} }
static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv, static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
...@@ -4809,6 +4889,11 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv) ...@@ -4809,6 +4889,11 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
kfree(priv->htb.qos_sq_stats[i]); kfree(priv->htb.qos_sq_stats[i]);
kvfree(priv->htb.qos_sq_stats); kvfree(priv->htb.qos_sq_stats);
if (priv->mqprio_rl) {
mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
mlx5e_mqprio_rl_free(priv->mqprio_rl);
}
memset(priv, 0, sizeof(*priv)); memset(priv, 0, sizeof(*priv));
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment