Commit bbfeba26 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'mlx5-misc-patches-2024-08-08'

Tariq Toukan says:

====================
mlx5 misc patches 2024-08-08

This patchset contains multiple enhancements from the team to the mlx5
core and Eth drivers.

Patch #1 by Chris bumps a defined value to permit more devices doing TC
offloads.

Patch #2 by Jianbo adds an IPsec fast-path optimization to replace the
slow async handling.

Patches #3 and #4 by Jianbo add TC offload support for complicated rules
to overcome firmware limitation.

Patch #5 by Gal unifies the access macro to advertised/supported link
modes.

Patches #6 to #9 by Gal adds extack messages in ethtool ops to replace
prints to the kernel log.

Patch #10 by Cosmin switches to using 'update' verb instead of 'replace'
to better reflect the operation.

Patch #11 by Cosmin exposes an update connection tracking operation to
replace the assumed delete+add implementaiton.
====================

Link: https://patch.msgid.link/20240808055927.2059700-1-tariqt@nvidia.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 1862923b 6b5662b7
...@@ -1172,14 +1172,16 @@ void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv, ...@@ -1172,14 +1172,16 @@ void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
struct ethtool_ringparam *param, struct ethtool_ringparam *param,
struct kernel_ethtool_ringparam *kernel_param); struct kernel_ethtool_ringparam *kernel_param);
int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
struct ethtool_ringparam *param); struct ethtool_ringparam *param,
struct netlink_ext_ack *extack);
void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv, void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
struct ethtool_channels *ch); struct ethtool_channels *ch);
int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
struct ethtool_channels *ch); struct ethtool_channels *ch);
int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv, int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
struct ethtool_coalesce *coal, struct ethtool_coalesce *coal,
struct kernel_ethtool_coalesce *kernel_coal); struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack);
int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
struct ethtool_coalesce *coal, struct ethtool_coalesce *coal,
struct kernel_ethtool_coalesce *kernel_coal, struct kernel_ethtool_coalesce *kernel_coal,
......
...@@ -25,6 +25,8 @@ struct mlx5_ct_fs_ops { ...@@ -25,6 +25,8 @@ struct mlx5_ct_fs_ops {
struct mlx5_flow_attr *attr, struct mlx5_flow_attr *attr,
struct flow_rule *flow_rule); struct flow_rule *flow_rule);
void (*ct_rule_del)(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule); void (*ct_rule_del)(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule);
int (*ct_rule_update)(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule,
struct mlx5_flow_spec *spec, struct mlx5_flow_attr *attr);
size_t priv_size; size_t priv_size;
}; };
......
...@@ -65,9 +65,30 @@ mlx5_ct_fs_dmfs_ct_rule_del(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_ru ...@@ -65,9 +65,30 @@ mlx5_ct_fs_dmfs_ct_rule_del(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_ru
kfree(dmfs_rule); kfree(dmfs_rule);
} }
static int mlx5_ct_fs_dmfs_ct_rule_update(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule,
struct mlx5_flow_spec *spec, struct mlx5_flow_attr *attr)
{
struct mlx5_ct_fs_dmfs_rule *dmfs_rule = container_of(fs_rule,
struct mlx5_ct_fs_dmfs_rule,
fs_rule);
struct mlx5e_priv *priv = netdev_priv(fs->netdev);
struct mlx5_flow_handle *rule;
rule = mlx5_tc_rule_insert(priv, spec, attr);
if (IS_ERR(rule))
return PTR_ERR(rule);
mlx5_tc_rule_delete(priv, dmfs_rule->rule, dmfs_rule->attr);
dmfs_rule->rule = rule;
dmfs_rule->attr = attr;
return 0;
}
static struct mlx5_ct_fs_ops dmfs_ops = { static struct mlx5_ct_fs_ops dmfs_ops = {
.ct_rule_add = mlx5_ct_fs_dmfs_ct_rule_add, .ct_rule_add = mlx5_ct_fs_dmfs_ct_rule_add,
.ct_rule_del = mlx5_ct_fs_dmfs_ct_rule_del, .ct_rule_del = mlx5_ct_fs_dmfs_ct_rule_del,
.ct_rule_update = mlx5_ct_fs_dmfs_ct_rule_update,
.init = mlx5_ct_fs_dmfs_init, .init = mlx5_ct_fs_dmfs_init,
.destroy = mlx5_ct_fs_dmfs_destroy, .destroy = mlx5_ct_fs_dmfs_destroy,
......
...@@ -368,9 +368,35 @@ mlx5_ct_fs_smfs_ct_rule_del(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_ru ...@@ -368,9 +368,35 @@ mlx5_ct_fs_smfs_ct_rule_del(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_ru
kfree(smfs_rule); kfree(smfs_rule);
} }
static int mlx5_ct_fs_smfs_ct_rule_update(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule,
struct mlx5_flow_spec *spec, struct mlx5_flow_attr *attr)
{
struct mlx5_ct_fs_smfs_rule *smfs_rule = container_of(fs_rule,
struct mlx5_ct_fs_smfs_rule,
fs_rule);
struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
struct mlx5dr_action *actions[3]; /* We only need to create 3 actions, see below. */
struct mlx5dr_rule *rule;
actions[0] = smfs_rule->count_action;
actions[1] = attr->modify_hdr->action.dr_action;
actions[2] = fs_smfs->fwd_action;
rule = mlx5_smfs_rule_create(smfs_rule->smfs_matcher->dr_matcher, spec,
ARRAY_SIZE(actions), actions, spec->flow_context.flow_source);
if (!rule)
return -EINVAL;
mlx5_smfs_rule_destroy(smfs_rule->rule);
smfs_rule->rule = rule;
return 0;
}
static struct mlx5_ct_fs_ops fs_smfs_ops = { static struct mlx5_ct_fs_ops fs_smfs_ops = {
.ct_rule_add = mlx5_ct_fs_smfs_ct_rule_add, .ct_rule_add = mlx5_ct_fs_smfs_ct_rule_add,
.ct_rule_del = mlx5_ct_fs_smfs_ct_rule_del, .ct_rule_del = mlx5_ct_fs_smfs_ct_rule_del,
.ct_rule_update = mlx5_ct_fs_smfs_ct_rule_update,
.init = mlx5_ct_fs_smfs_init, .init = mlx5_ct_fs_smfs_init,
.destroy = mlx5_ct_fs_smfs_destroy, .destroy = mlx5_ct_fs_smfs_destroy,
......
...@@ -876,15 +876,14 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, ...@@ -876,15 +876,14 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
} }
static int static int
mlx5_tc_ct_entry_replace_rule(struct mlx5_tc_ct_priv *ct_priv, mlx5_tc_ct_entry_update_rule(struct mlx5_tc_ct_priv *ct_priv,
struct flow_rule *flow_rule, struct flow_rule *flow_rule,
struct mlx5_ct_entry *entry, struct mlx5_ct_entry *entry,
bool nat, u8 zone_restore_id) bool nat, u8 zone_restore_id)
{ {
struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat]; struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat];
struct mlx5_flow_attr *attr = zone_rule->attr, *old_attr; struct mlx5_flow_attr *attr = zone_rule->attr, *old_attr;
struct mlx5e_mod_hdr_handle *mh; struct mlx5e_mod_hdr_handle *mh;
struct mlx5_ct_fs_rule *rule;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
int err; int err;
...@@ -902,29 +901,26 @@ mlx5_tc_ct_entry_replace_rule(struct mlx5_tc_ct_priv *ct_priv, ...@@ -902,29 +901,26 @@ mlx5_tc_ct_entry_replace_rule(struct mlx5_tc_ct_priv *ct_priv,
err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule, &mh, zone_restore_id, err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule, &mh, zone_restore_id,
nat, mlx5_tc_ct_entry_in_ct_nat_table(entry)); nat, mlx5_tc_ct_entry_in_ct_nat_table(entry));
if (err) { if (err) {
ct_dbg("Failed to create ct entry mod hdr"); ct_dbg("Failed to create ct entry mod hdr, err: %d", err);
goto err_mod_hdr; goto err_mod_hdr;
} }
mlx5_tc_ct_set_tuple_match(ct_priv, spec, flow_rule); mlx5_tc_ct_set_tuple_match(ct_priv, spec, flow_rule);
mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, entry->tuple.zone, MLX5_CT_ZONE_MASK); mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, entry->tuple.zone, MLX5_CT_ZONE_MASK);
rule = ct_priv->fs_ops->ct_rule_add(ct_priv->fs, spec, attr, flow_rule); err = ct_priv->fs_ops->ct_rule_update(ct_priv->fs, zone_rule->rule, spec, attr);
if (IS_ERR(rule)) { if (err) {
err = PTR_ERR(rule); ct_dbg("Failed to update ct entry rule, nat: %d, err: %d", nat, err);
ct_dbg("Failed to add replacement ct entry rule, nat: %d", nat);
goto err_rule; goto err_rule;
} }
ct_priv->fs_ops->ct_rule_del(ct_priv->fs, zone_rule->rule);
zone_rule->rule = rule;
mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, old_attr, zone_rule->mh); mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, old_attr, zone_rule->mh);
zone_rule->mh = mh; zone_rule->mh = mh;
mlx5_put_label_mapping(ct_priv, old_attr->ct_attr.ct_labels_id); mlx5_put_label_mapping(ct_priv, old_attr->ct_attr.ct_labels_id);
kfree(old_attr); kfree(old_attr);
kvfree(spec); kvfree(spec);
ct_dbg("Replaced ct entry rule in zone %d", entry->tuple.zone); ct_dbg("Updated ct entry rule in zone %d", entry->tuple.zone);
return 0; return 0;
...@@ -1141,23 +1137,23 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv, ...@@ -1141,23 +1137,23 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
} }
static int static int
mlx5_tc_ct_entry_replace_rules(struct mlx5_tc_ct_priv *ct_priv, mlx5_tc_ct_entry_update_rules(struct mlx5_tc_ct_priv *ct_priv,
struct flow_rule *flow_rule, struct flow_rule *flow_rule,
struct mlx5_ct_entry *entry, struct mlx5_ct_entry *entry,
u8 zone_restore_id) u8 zone_restore_id)
{ {
int err = 0; int err = 0;
if (mlx5_tc_ct_entry_in_ct_table(entry)) { if (mlx5_tc_ct_entry_in_ct_table(entry)) {
err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, false, err = mlx5_tc_ct_entry_update_rule(ct_priv, flow_rule, entry, false,
zone_restore_id); zone_restore_id);
if (err) if (err)
return err; return err;
} }
if (mlx5_tc_ct_entry_in_ct_nat_table(entry)) { if (mlx5_tc_ct_entry_in_ct_nat_table(entry)) {
err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, true, err = mlx5_tc_ct_entry_update_rule(ct_priv, flow_rule, entry, true,
zone_restore_id); zone_restore_id);
if (err && mlx5_tc_ct_entry_in_ct_table(entry)) if (err && mlx5_tc_ct_entry_in_ct_table(entry))
mlx5_tc_ct_entry_del_rule(ct_priv, entry, false); mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
} }
...@@ -1165,13 +1161,13 @@ mlx5_tc_ct_entry_replace_rules(struct mlx5_tc_ct_priv *ct_priv, ...@@ -1165,13 +1161,13 @@ mlx5_tc_ct_entry_replace_rules(struct mlx5_tc_ct_priv *ct_priv,
} }
static int static int
mlx5_tc_ct_block_flow_offload_replace(struct mlx5_ct_ft *ft, struct flow_rule *flow_rule, mlx5_tc_ct_block_flow_offload_update(struct mlx5_ct_ft *ft, struct flow_rule *flow_rule,
struct mlx5_ct_entry *entry, unsigned long cookie) struct mlx5_ct_entry *entry, unsigned long cookie)
{ {
struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv; struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv;
int err; int err;
err = mlx5_tc_ct_entry_replace_rules(ct_priv, flow_rule, entry, ft->zone_restore_id); err = mlx5_tc_ct_entry_update_rules(ct_priv, flow_rule, entry, ft->zone_restore_id);
if (!err) if (!err)
return 0; return 0;
...@@ -1216,7 +1212,7 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft, ...@@ -1216,7 +1212,7 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
entry->restore_cookie = meta_action->ct_metadata.cookie; entry->restore_cookie = meta_action->ct_metadata.cookie;
spin_unlock_bh(&ct_priv->ht_lock); spin_unlock_bh(&ct_priv->ht_lock);
err = mlx5_tc_ct_block_flow_offload_replace(ft, flow_rule, entry, cookie); err = mlx5_tc_ct_block_flow_offload_update(ft, flow_rule, entry, cookie);
mlx5_tc_ct_entry_put(entry); mlx5_tc_ct_entry_put(entry);
return err; return err;
} }
......
...@@ -109,6 +109,7 @@ struct mlx5e_tc_flow { ...@@ -109,6 +109,7 @@ struct mlx5e_tc_flow {
struct completion init_done; struct completion init_done;
struct completion del_hw_done; struct completion del_hw_done;
struct mlx5_flow_attr *attr; struct mlx5_flow_attr *attr;
struct mlx5_flow_attr *extra_split_attr;
struct list_head attrs; struct list_head attrs;
u32 chain_mapping; u32 chain_mapping;
}; };
......
...@@ -127,6 +127,7 @@ static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn, ...@@ -127,6 +127,7 @@ static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
MLX5_SET(ipsec_aso, aso_ctx, remove_flow_pkt_cnt, MLX5_SET(ipsec_aso, aso_ctx, remove_flow_pkt_cnt,
attrs->lft.hard_packet_limit); attrs->lft.hard_packet_limit);
MLX5_SET(ipsec_aso, aso_ctx, hard_lft_arm, 1); MLX5_SET(ipsec_aso, aso_ctx, hard_lft_arm, 1);
MLX5_SET(ipsec_aso, aso_ctx, remove_flow_enable, 1);
} }
if (attrs->lft.soft_packet_limit != XFRM_INF) { if (attrs->lft.soft_packet_limit != XFRM_INF) {
......
...@@ -83,17 +83,15 @@ struct ptys2ethtool_config ptys2ext_ethtool_table[MLX5E_EXT_LINK_MODES_NUMBER]; ...@@ -83,17 +83,15 @@ struct ptys2ethtool_config ptys2ext_ethtool_table[MLX5E_EXT_LINK_MODES_NUMBER];
({ \ ({ \
struct ptys2ethtool_config *cfg; \ struct ptys2ethtool_config *cfg; \
const unsigned int modes[] = { __VA_ARGS__ }; \ const unsigned int modes[] = { __VA_ARGS__ }; \
unsigned int i, bit, idx; \ unsigned int i; \
cfg = &ptys2##table##_ethtool_table[reg_]; \ cfg = &ptys2##table##_ethtool_table[reg_]; \
bitmap_zero(cfg->supported, \ bitmap_zero(cfg->supported, \
__ETHTOOL_LINK_MODE_MASK_NBITS); \ __ETHTOOL_LINK_MODE_MASK_NBITS); \
bitmap_zero(cfg->advertised, \ bitmap_zero(cfg->advertised, \
__ETHTOOL_LINK_MODE_MASK_NBITS); \ __ETHTOOL_LINK_MODE_MASK_NBITS); \
for (i = 0 ; i < ARRAY_SIZE(modes) ; ++i) { \ for (i = 0 ; i < ARRAY_SIZE(modes) ; ++i) { \
bit = modes[i] % 64; \ bitmap_set(cfg->supported, modes[i], 1); \
idx = modes[i] / 64; \ bitmap_set(cfg->advertised, modes[i], 1); \
__set_bit(bit, &cfg->supported[idx]); \
__set_bit(bit, &cfg->advertised[idx]); \
} \ } \
}) })
...@@ -354,35 +352,25 @@ static void mlx5e_get_ringparam(struct net_device *dev, ...@@ -354,35 +352,25 @@ static void mlx5e_get_ringparam(struct net_device *dev,
} }
int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
struct ethtool_ringparam *param) struct ethtool_ringparam *param,
struct netlink_ext_ack *extack)
{ {
struct mlx5e_params new_params; struct mlx5e_params new_params;
u8 log_rq_size; u8 log_rq_size;
u8 log_sq_size; u8 log_sq_size;
int err = 0; int err = 0;
if (param->rx_jumbo_pending) {
netdev_info(priv->netdev, "%s: rx_jumbo_pending not supported\n",
__func__);
return -EINVAL;
}
if (param->rx_mini_pending) {
netdev_info(priv->netdev, "%s: rx_mini_pending not supported\n",
__func__);
return -EINVAL;
}
if (param->rx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)) { if (param->rx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)) {
netdev_info(priv->netdev, "%s: rx_pending (%d) < min (%d)\n", NL_SET_ERR_MSG_FMT_MOD(extack, "rx (%d) < min (%d)",
__func__, param->rx_pending, param->rx_pending,
1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE); 1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE);
return -EINVAL; return -EINVAL;
} }
if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) { if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) {
netdev_info(priv->netdev, "%s: tx_pending (%d) < min (%d)\n", NL_SET_ERR_MSG_FMT_MOD(extack, "tx (%d) < min (%d)",
__func__, param->tx_pending, param->tx_pending,
1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE); 1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE);
return -EINVAL; return -EINVAL;
} }
...@@ -418,7 +406,7 @@ static int mlx5e_set_ringparam(struct net_device *dev, ...@@ -418,7 +406,7 @@ static int mlx5e_set_ringparam(struct net_device *dev,
{ {
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
return mlx5e_ethtool_set_ringparam(priv, param); return mlx5e_ethtool_set_ringparam(priv, param, extack);
} }
void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv, void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
...@@ -557,12 +545,15 @@ static int mlx5e_set_channels(struct net_device *dev, ...@@ -557,12 +545,15 @@ static int mlx5e_set_channels(struct net_device *dev,
int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv, int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
struct ethtool_coalesce *coal, struct ethtool_coalesce *coal,
struct kernel_ethtool_coalesce *kernel_coal) struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{ {
struct dim_cq_moder *rx_moder, *tx_moder; struct dim_cq_moder *rx_moder, *tx_moder;
if (!MLX5_CAP_GEN(priv->mdev, cq_moderation)) if (!MLX5_CAP_GEN(priv->mdev, cq_moderation)) {
NL_SET_ERR_MSG_MOD(extack, "CQ moderation not supported");
return -EOPNOTSUPP; return -EOPNOTSUPP;
}
rx_moder = &priv->channels.params.rx_cq_moderation; rx_moder = &priv->channels.params.rx_cq_moderation;
coal->rx_coalesce_usecs = rx_moder->usec; coal->rx_coalesce_usecs = rx_moder->usec;
...@@ -586,7 +577,7 @@ static int mlx5e_get_coalesce(struct net_device *netdev, ...@@ -586,7 +577,7 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
{ {
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal); return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal, extack);
} }
static int mlx5e_ethtool_get_per_queue_coalesce(struct mlx5e_priv *priv, u32 queue, static int mlx5e_ethtool_get_per_queue_coalesce(struct mlx5e_priv *priv, u32 queue,
...@@ -708,26 +699,34 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, ...@@ -708,26 +699,34 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
int err = 0; int err = 0;
if (!MLX5_CAP_GEN(mdev, cq_moderation) || if (!MLX5_CAP_GEN(mdev, cq_moderation) ||
!MLX5_CAP_GEN(mdev, cq_period_mode_modify)) !MLX5_CAP_GEN(mdev, cq_period_mode_modify)) {
NL_SET_ERR_MSG_MOD(extack, "CQ moderation not supported");
return -EOPNOTSUPP; return -EOPNOTSUPP;
}
if (coal->tx_coalesce_usecs > MLX5E_MAX_COAL_TIME || if (coal->tx_coalesce_usecs > MLX5E_MAX_COAL_TIME ||
coal->rx_coalesce_usecs > MLX5E_MAX_COAL_TIME) { coal->rx_coalesce_usecs > MLX5E_MAX_COAL_TIME) {
netdev_info(priv->netdev, "%s: maximum coalesce time supported is %lu usecs\n", NL_SET_ERR_MSG_FMT_MOD(
__func__, MLX5E_MAX_COAL_TIME); extack,
"Max coalesce time %lu usecs, tx-usecs (%u) rx-usecs (%u)",
MLX5E_MAX_COAL_TIME, coal->tx_coalesce_usecs,
coal->rx_coalesce_usecs);
return -ERANGE; return -ERANGE;
} }
if (coal->tx_max_coalesced_frames > MLX5E_MAX_COAL_FRAMES || if (coal->tx_max_coalesced_frames > MLX5E_MAX_COAL_FRAMES ||
coal->rx_max_coalesced_frames > MLX5E_MAX_COAL_FRAMES) { coal->rx_max_coalesced_frames > MLX5E_MAX_COAL_FRAMES) {
netdev_info(priv->netdev, "%s: maximum coalesced frames supported is %lu\n", NL_SET_ERR_MSG_FMT_MOD(
__func__, MLX5E_MAX_COAL_FRAMES); extack,
"Max coalesce frames %lu, tx-frames (%u) rx-frames (%u)",
MLX5E_MAX_COAL_FRAMES, coal->tx_max_coalesced_frames,
coal->rx_max_coalesced_frames);
return -ERANGE; return -ERANGE;
} }
if ((kernel_coal->use_cqe_mode_rx || kernel_coal->use_cqe_mode_tx) && if ((kernel_coal->use_cqe_mode_rx || kernel_coal->use_cqe_mode_tx) &&
!MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) { !MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) {
NL_SET_ERR_MSG_MOD(extack, "cqe_mode_rx/tx is not supported on this device"); NL_SET_ERR_MSG_MOD(extack, "cqe-mode-rx/tx is not supported on this device");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -1299,7 +1298,8 @@ static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes) ...@@ -1299,7 +1298,8 @@ static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes)
u32 i, ptys_modes = 0; u32 i, ptys_modes = 0;
for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
if (*ptys2legacy_ethtool_table[i].advertised == 0) if (bitmap_empty(ptys2legacy_ethtool_table[i].advertised,
__ETHTOOL_LINK_MODE_MASK_NBITS))
continue; continue;
if (bitmap_intersects(ptys2legacy_ethtool_table[i].advertised, if (bitmap_intersects(ptys2legacy_ethtool_table[i].advertised,
link_modes, link_modes,
...@@ -1313,18 +1313,18 @@ static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes) ...@@ -1313,18 +1313,18 @@ static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes)
static u32 mlx5e_ethtool2ptys_ext_adver_link(const unsigned long *link_modes) static u32 mlx5e_ethtool2ptys_ext_adver_link(const unsigned long *link_modes)
{ {
u32 i, ptys_modes = 0; u32 i, ptys_modes = 0;
unsigned long modes[2]; __ETHTOOL_DECLARE_LINK_MODE_MASK(modes);
for (i = 0; i < MLX5E_EXT_LINK_MODES_NUMBER; ++i) { for (i = 0; i < MLX5E_EXT_LINK_MODES_NUMBER; ++i) {
if (ptys2ext_ethtool_table[i].advertised[0] == 0 && if (bitmap_empty(ptys2ext_ethtool_table[i].advertised,
ptys2ext_ethtool_table[i].advertised[1] == 0) __ETHTOOL_LINK_MODE_MASK_NBITS))
continue; continue;
memset(modes, 0, sizeof(modes)); bitmap_zero(modes, __ETHTOOL_LINK_MODE_MASK_NBITS);
bitmap_and(modes, ptys2ext_ethtool_table[i].advertised, bitmap_and(modes, ptys2ext_ethtool_table[i].advertised,
link_modes, __ETHTOOL_LINK_MODE_MASK_NBITS); link_modes, __ETHTOOL_LINK_MODE_MASK_NBITS);
if (modes[0] == ptys2ext_ethtool_table[i].advertised[0] && if (bitmap_equal(modes, ptys2ext_ethtool_table[i].advertised,
modes[1] == ptys2ext_ethtool_table[i].advertised[1]) __ETHTOOL_LINK_MODE_MASK_NBITS))
ptys_modes |= MLX5E_PROT_MASK(i); ptys_modes |= MLX5E_PROT_MASK(i);
} }
return ptys_modes; return ptys_modes;
...@@ -2015,8 +2015,10 @@ static int mlx5e_get_module_eeprom_by_page(struct net_device *netdev, ...@@ -2015,8 +2015,10 @@ static int mlx5e_get_module_eeprom_by_page(struct net_device *netdev,
if (size_read == -EINVAL) if (size_read == -EINVAL)
return -EINVAL; return -EINVAL;
if (size_read < 0) { if (size_read < 0) {
netdev_err(priv->netdev, "%s: mlx5_query_module_eeprom_by_page failed:0x%x\n", NL_SET_ERR_MSG_FMT_MOD(
__func__, size_read); extack,
"Query module eeprom by page failed, read %u bytes, err %d\n",
i, size_read);
return i; return i;
} }
......
...@@ -360,7 +360,7 @@ mlx5e_rep_set_ringparam(struct net_device *dev, ...@@ -360,7 +360,7 @@ mlx5e_rep_set_ringparam(struct net_device *dev,
{ {
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
return mlx5e_ethtool_set_ringparam(priv, param); return mlx5e_ethtool_set_ringparam(priv, param, extack);
} }
static void mlx5e_rep_get_channels(struct net_device *dev, static void mlx5e_rep_get_channels(struct net_device *dev,
...@@ -386,7 +386,7 @@ static int mlx5e_rep_get_coalesce(struct net_device *netdev, ...@@ -386,7 +386,7 @@ static int mlx5e_rep_get_coalesce(struct net_device *netdev,
{ {
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal); return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal, extack);
} }
static int mlx5e_rep_set_coalesce(struct net_device *netdev, static int mlx5e_rep_set_coalesce(struct net_device *netdev,
......
...@@ -1739,11 +1739,119 @@ has_encap_dests(struct mlx5_flow_attr *attr) ...@@ -1739,11 +1739,119 @@ has_encap_dests(struct mlx5_flow_attr *attr)
return false; return false;
} }
static int
extra_split_attr_dests_needed(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr)
{
bool int_dest = false, ext_dest = false;
struct mlx5_esw_flow_attr *esw_attr;
int i;
if (flow->attr != attr ||
!list_is_first(&attr->list, &flow->attrs))
return 0;
if (flow_flag_test(flow, SLOW))
return 0;
esw_attr = attr->esw_attr;
if (!esw_attr->split_count ||
esw_attr->split_count == esw_attr->out_count - 1)
return 0;
if (esw_attr->dest_int_port &&
(esw_attr->dests[esw_attr->split_count].flags &
MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE))
return esw_attr->split_count + 1;
for (i = esw_attr->split_count; i < esw_attr->out_count; i++) {
/* external dest with encap is considered as internal by firmware */
if (esw_attr->dests[i].vport == MLX5_VPORT_UPLINK &&
!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID))
ext_dest = true;
else
int_dest = true;
if (ext_dest && int_dest)
return esw_attr->split_count;
}
return 0;
}
static int
extra_split_attr_dests(struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr, int split_count)
{
struct mlx5e_post_act *post_act = get_post_action(flow->priv);
struct mlx5e_tc_flow_parse_attr *parse_attr, *parse_attr2;
struct mlx5_esw_flow_attr *esw_attr, *esw_attr2;
struct mlx5e_post_act_handle *handle;
struct mlx5_flow_attr *attr2;
int i, j, err;
if (IS_ERR(post_act))
return PTR_ERR(post_act);
attr2 = mlx5_alloc_flow_attr(mlx5e_get_flow_namespace(flow));
parse_attr2 = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
if (!attr2 || !parse_attr2) {
err = -ENOMEM;
goto err_free;
}
attr2->parse_attr = parse_attr2;
handle = mlx5e_tc_post_act_add(post_act, attr2);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
goto err_free;
}
esw_attr = attr->esw_attr;
esw_attr2 = attr2->esw_attr;
esw_attr2->in_rep = esw_attr->in_rep;
parse_attr = attr->parse_attr;
parse_attr2->filter_dev = parse_attr->filter_dev;
for (i = split_count, j = 0; i < esw_attr->out_count; i++, j++)
esw_attr2->dests[j] = esw_attr->dests[i];
esw_attr2->out_count = j;
attr2->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
err = mlx5e_tc_post_act_offload(post_act, handle);
if (err)
goto err_post_act_offload;
err = mlx5e_tc_post_act_set_handle(flow->priv->mdev, handle,
&parse_attr->mod_hdr_acts);
if (err)
goto err_post_act_set_handle;
esw_attr->out_count = split_count;
attr->extra_split_ft = mlx5e_tc_post_act_get_ft(post_act);
flow->extra_split_attr = attr2;
attr2->post_act_handle = handle;
return 0;
err_post_act_set_handle:
mlx5e_tc_post_act_unoffload(post_act, handle);
err_post_act_offload:
mlx5e_tc_post_act_del(post_act, handle);
err_free:
kvfree(parse_attr2);
kfree(attr2);
return err;
}
static int static int
post_process_attr(struct mlx5e_tc_flow *flow, post_process_attr(struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr, struct mlx5_flow_attr *attr,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
int extra_split;
bool vf_tun; bool vf_tun;
int err = 0; int err = 0;
...@@ -1757,6 +1865,13 @@ post_process_attr(struct mlx5e_tc_flow *flow, ...@@ -1757,6 +1865,13 @@ post_process_attr(struct mlx5e_tc_flow *flow,
goto err_out; goto err_out;
} }
extra_split = extra_split_attr_dests_needed(flow, attr);
if (extra_split > 0) {
err = extra_split_attr_dests(flow, attr, extra_split);
if (err)
goto err_out;
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr); err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr);
if (err) if (err)
...@@ -1971,6 +2086,11 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, ...@@ -1971,6 +2086,11 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
mlx5e_tc_act_stats_del_flow(get_act_stats_handle(priv), flow); mlx5e_tc_act_stats_del_flow(get_act_stats_handle(priv), flow);
free_flow_post_acts(flow); free_flow_post_acts(flow);
if (flow->extra_split_attr) {
mlx5_free_flow_attr_actions(flow, flow->extra_split_attr);
kvfree(flow->extra_split_attr->parse_attr);
kfree(flow->extra_split_attr);
}
mlx5_free_flow_attr_actions(flow, attr); mlx5_free_flow_attr_actions(flow, attr);
kvfree(attr->esw_attr->rx_tun_attr); kvfree(attr->esw_attr->rx_tun_attr);
......
...@@ -86,6 +86,7 @@ struct mlx5_flow_attr { ...@@ -86,6 +86,7 @@ struct mlx5_flow_attr {
u32 dest_chain; u32 dest_chain;
struct mlx5_flow_table *ft; struct mlx5_flow_table *ft;
struct mlx5_flow_table *dest_ft; struct mlx5_flow_table *dest_ft;
struct mlx5_flow_table *extra_split_ft;
u8 inner_match_level; u8 inner_match_level;
u8 outer_match_level; u8 outer_match_level;
u8 tun_ip_version; u8 tun_ip_version;
...@@ -139,7 +140,7 @@ struct mlx5_rx_tun_attr { ...@@ -139,7 +140,7 @@ struct mlx5_rx_tun_attr {
#define MLX5E_TC_TABLE_CHAIN_TAG_BITS 16 #define MLX5E_TC_TABLE_CHAIN_TAG_BITS 16
#define MLX5E_TC_TABLE_CHAIN_TAG_MASK GENMASK(MLX5E_TC_TABLE_CHAIN_TAG_BITS - 1, 0) #define MLX5E_TC_TABLE_CHAIN_TAG_MASK GENMASK(MLX5E_TC_TABLE_CHAIN_TAG_BITS - 1, 0)
#define MLX5E_TC_MAX_INT_PORT_NUM (8) #define MLX5E_TC_MAX_INT_PORT_NUM (32)
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT) #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
......
...@@ -613,6 +613,13 @@ esw_setup_dests(struct mlx5_flow_destination *dest, ...@@ -613,6 +613,13 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
} }
} }
if (attr->extra_split_ft) {
flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[*i].ft = attr->extra_split_ft;
(*i)++;
}
out: out:
return err; return err;
} }
......
...@@ -74,7 +74,7 @@ static int mlx5i_set_ringparam(struct net_device *dev, ...@@ -74,7 +74,7 @@ static int mlx5i_set_ringparam(struct net_device *dev,
{ {
struct mlx5e_priv *priv = mlx5i_epriv(dev); struct mlx5e_priv *priv = mlx5i_epriv(dev);
return mlx5e_ethtool_set_ringparam(priv, param); return mlx5e_ethtool_set_ringparam(priv, param, extack);
} }
static void mlx5i_get_ringparam(struct net_device *dev, static void mlx5i_get_ringparam(struct net_device *dev,
...@@ -132,7 +132,7 @@ static int mlx5i_get_coalesce(struct net_device *netdev, ...@@ -132,7 +132,7 @@ static int mlx5i_get_coalesce(struct net_device *netdev,
{ {
struct mlx5e_priv *priv = mlx5i_epriv(netdev); struct mlx5e_priv *priv = mlx5i_epriv(netdev);
return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal); return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal, extack);
} }
static int mlx5i_get_ts_info(struct net_device *netdev, static int mlx5i_get_ts_info(struct net_device *netdev,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment