Commit 1cd5ea44 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2022-08-22' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

mlx5-updates-2022-08-22

Roi Dayan Says:
===============
Add support for SF tunnel offload

Mlx5 driver only supports VF tunnel offload.
To add support for SF tunnel offload the driver needs to:
1. Add send-to-vport metadata matching rules like done for VFs.
2. Set an indirect table for SF vport, same as VF vport.

info smaller sub functions for better maintainability.

rules from esw init phase to representor load phase.
SFs could be created after esw initialized and thus the send-to-vport
meta rules would not be created for those SFs.
By moving the creation of the rules to representor load phase
we ensure creating the rules also for SFs created later.

===============

Lama Kayal Says:
================
Make flow steering API loosely coupled from mlx5e_priv, in a manner to
introduce more readable and maintainable modules.

Make TC's private, let mlx5e_flow_steering struct be dynamically allocated,
and introduce its API to maintain the code via setters and getters
instead of publicly exposing it.

Introduce flow steering debug macros to provide an elegant finish to the
decoupled flow steering API, where errors related to flow steering shall
be reported via them.

All flow steering related files will drop any coupling to mlx5e_priv,
instead they will get the relevant members as input. Among these,
fs_tt_redirect, fs_tc, and arfs.
================
parents fef5de75 72e0bcd1
......@@ -856,11 +856,6 @@ enum {
MLX5E_STATE_XDP_ACTIVE,
};
enum {
MLX5E_TC_PRIO = 0,
MLX5E_NIC_PRIO
};
struct mlx5e_modify_sq_param {
int curr_state;
int next_state;
......
......@@ -8,6 +8,7 @@
#include "lib/fs_ttc.h"
struct mlx5e_post_act;
struct mlx5e_tc_table;
enum {
MLX5E_TC_FT_LEVEL = 0,
......@@ -15,6 +16,11 @@ enum {
MLX5E_TC_MISS_LEVEL,
};
enum {
MLX5E_TC_PRIO = 0,
MLX5E_NIC_PRIO
};
struct mlx5e_flow_table {
int num_groups;
struct mlx5_flow_table *t;
......@@ -83,54 +89,28 @@ enum {
#endif
};
struct mlx5e_priv;
#ifdef CONFIG_MLX5_EN_RXNFC
struct mlx5e_ethtool_table {
struct mlx5_flow_table *ft;
int num_rules;
};
#define ETHTOOL_NUM_L3_L4_FTS 7
#define ETHTOOL_NUM_L2_FTS 4
struct mlx5e_ethtool_steering {
struct mlx5e_ethtool_table l3_l4_ft[ETHTOOL_NUM_L3_L4_FTS];
struct mlx5e_ethtool_table l2_ft[ETHTOOL_NUM_L2_FTS];
struct list_head rules;
int tot_num_rules;
};
void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv);
void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv);
int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd);
int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
struct ethtool_rxnfc *info, u32 *rule_locs);
#else
static inline void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv) { }
static inline void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv) { }
static inline int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd)
{ return -EOPNOTSUPP; }
static inline int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
struct ethtool_rxnfc *info, u32 *rule_locs)
{ return -EOPNOTSUPP; }
#endif /* CONFIG_MLX5_EN_RXNFC */
struct mlx5e_flow_steering;
struct mlx5e_rx_res;
#ifdef CONFIG_MLX5_EN_ARFS
struct mlx5e_arfs_tables;
int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv);
int mlx5e_arfs_enable(struct mlx5e_priv *priv);
int mlx5e_arfs_disable(struct mlx5e_priv *priv);
int mlx5e_arfs_create_tables(struct mlx5e_flow_steering *fs,
struct mlx5e_rx_res *rx_res, bool ntuple);
void mlx5e_arfs_destroy_tables(struct mlx5e_flow_steering *fs, bool ntuple);
int mlx5e_arfs_enable(struct mlx5e_flow_steering *fs);
int mlx5e_arfs_disable(struct mlx5e_flow_steering *fs);
int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id);
#else
static inline int mlx5e_arfs_create_tables(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {}
static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv) { return -EOPNOTSUPP; }
static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv) { return -EOPNOTSUPP; }
static inline int mlx5e_arfs_create_tables(struct mlx5e_flow_steering *fs,
struct mlx5e_rx_res *rx_res, bool ntuple)
{ return 0; }
static inline void mlx5e_arfs_destroy_tables(struct mlx5e_flow_steering *fs, bool ntuple) {}
static inline int mlx5e_arfs_enable(struct mlx5e_flow_steering *fs)
{ return -EOPNOTSUPP; }
static inline int mlx5e_arfs_disable(struct mlx5e_flow_steering *fs)
{ return -EOPNOTSUPP; }
#endif
#ifdef CONFIG_MLX5_EN_TLS
......@@ -142,54 +122,63 @@ struct mlx5e_fs_udp;
struct mlx5e_fs_any;
struct mlx5e_ptp_fs;
struct mlx5e_flow_steering {
bool state_destroy;
bool vlan_strip_disable;
struct mlx5_core_dev *mdev;
struct mlx5_flow_namespace *ns;
#ifdef CONFIG_MLX5_EN_RXNFC
struct mlx5e_ethtool_steering ethtool;
#endif
struct mlx5e_tc_table *tc;
struct mlx5e_promisc_table promisc;
struct mlx5e_vlan_table *vlan;
struct mlx5e_l2_table l2;
struct mlx5_ttc_table *ttc;
struct mlx5_ttc_table *inner_ttc;
#ifdef CONFIG_MLX5_EN_ARFS
struct mlx5e_arfs_tables *arfs;
#endif
#ifdef CONFIG_MLX5_EN_TLS
struct mlx5e_accel_fs_tcp *accel_tcp;
#endif
struct mlx5e_fs_udp *udp;
struct mlx5e_fs_any *any;
struct mlx5e_ptp_fs *ptp_fs;
};
void mlx5e_set_ttc_params(struct mlx5e_priv *priv,
void mlx5e_set_ttc_params(struct mlx5e_flow_steering *fs,
struct mlx5e_rx_res *rx_res,
struct ttc_params *ttc_params, bool tunnel);
void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv);
int mlx5e_create_ttc_table(struct mlx5e_priv *priv);
void mlx5e_destroy_ttc_table(struct mlx5e_flow_steering *fs);
int mlx5e_create_ttc_table(struct mlx5e_flow_steering *fs,
struct mlx5e_rx_res *rx_res);
void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft);
void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv);
void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
void mlx5e_enable_cvlan_filter(struct mlx5e_flow_steering *fs, bool promisc);
void mlx5e_disable_cvlan_filter(struct mlx5e_flow_steering *fs, bool promisc);
int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
int mlx5e_create_flow_steering(struct mlx5e_flow_steering *fs,
struct mlx5e_rx_res *rx_res,
const struct mlx5e_profile *profile,
struct net_device *netdev);
void mlx5e_destroy_flow_steering(struct mlx5e_flow_steering *fs, bool ntuple,
const struct mlx5e_profile *profile);
struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile,
struct mlx5_core_dev *mdev,
bool state_destroy);
void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs);
int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv);
int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
void mlx5e_remove_mac_trap(struct mlx5e_priv *priv);
struct mlx5e_vlan_table *mlx5e_fs_get_vlan(struct mlx5e_flow_steering *fs);
void mlx5e_fs_set_tc(struct mlx5e_flow_steering *fs, struct mlx5e_tc_table *tc);
struct mlx5e_tc_table *mlx5e_fs_get_tc(struct mlx5e_flow_steering *fs);
struct mlx5e_l2_table *mlx5e_fs_get_l2(struct mlx5e_flow_steering *fs);
struct mlx5_flow_namespace *mlx5e_fs_get_ns(struct mlx5e_flow_steering *fs, bool egress);
void mlx5e_fs_set_ns(struct mlx5e_flow_steering *fs, struct mlx5_flow_namespace *ns, bool egress);
#ifdef CONFIG_MLX5_EN_RXNFC
struct mlx5e_ethtool_steering *mlx5e_fs_get_ethtool(struct mlx5e_flow_steering *fs);
#endif
struct mlx5_ttc_table *mlx5e_fs_get_ttc(struct mlx5e_flow_steering *fs, bool inner);
void mlx5e_fs_set_ttc(struct mlx5e_flow_steering *fs, struct mlx5_ttc_table *ttc, bool inner);
#ifdef CONFIG_MLX5_EN_ARFS
struct mlx5e_arfs_tables *mlx5e_fs_get_arfs(struct mlx5e_flow_steering *fs);
void mlx5e_fs_set_arfs(struct mlx5e_flow_steering *fs, struct mlx5e_arfs_tables *arfs);
#endif
struct mlx5e_ptp_fs *mlx5e_fs_get_ptp(struct mlx5e_flow_steering *fs);
void mlx5e_fs_set_ptp(struct mlx5e_flow_steering *fs, struct mlx5e_ptp_fs *ptp_fs);
struct mlx5e_fs_any *mlx5e_fs_get_any(struct mlx5e_flow_steering *fs);
void mlx5e_fs_set_any(struct mlx5e_flow_steering *fs, struct mlx5e_fs_any *any);
struct mlx5e_fs_udp *mlx5e_fs_get_udp(struct mlx5e_flow_steering *fs);
void mlx5e_fs_set_udp(struct mlx5e_flow_steering *fs, struct mlx5e_fs_udp *udp);
#ifdef CONFIG_MLX5_EN_TLS
struct mlx5e_accel_fs_tcp *mlx5e_fs_get_accel_tcp(struct mlx5e_flow_steering *fs);
void mlx5e_fs_set_accel_tcp(struct mlx5e_flow_steering *fs, struct mlx5e_accel_fs_tcp *accel_tcp);
#endif
void mlx5e_fs_set_state_destroy(struct mlx5e_flow_steering *fs, bool state_destroy);
void mlx5e_fs_set_vlan_strip_disable(struct mlx5e_flow_steering *fs, bool vlan_strip_disable);
struct mlx5_core_dev *mlx5e_fs_get_mdev(struct mlx5e_flow_steering *fs);
int mlx5e_add_vlan_trap(struct mlx5e_flow_steering *fs, int trap_id, int tir_num);
void mlx5e_remove_vlan_trap(struct mlx5e_flow_steering *fs);
int mlx5e_add_mac_trap(struct mlx5e_flow_steering *fs, int trap_id, int tir_num);
void mlx5e_remove_mac_trap(struct mlx5e_flow_steering *fs);
void mlx5e_fs_set_rx_mode_work(struct mlx5e_flow_steering *fs, struct net_device *netdev);
int mlx5e_fs_vlan_rx_add_vid(struct mlx5e_flow_steering *fs,
struct net_device *netdev,
......@@ -198,5 +187,18 @@ int mlx5e_fs_vlan_rx_kill_vid(struct mlx5e_flow_steering *fs,
struct net_device *netdev,
__be16 proto, u16 vid);
void mlx5e_fs_init_l2_addr(struct mlx5e_flow_steering *fs, struct net_device *netdev);
#define fs_err(fs, fmt, ...) \
mlx5_core_err(mlx5e_fs_get_mdev(fs), fmt, ##__VA_ARGS__)
#define fs_dbg(fs, fmt, ...) \
mlx5_core_dbg(mlx5e_fs_get_mdev(fs), fmt, ##__VA_ARGS__)
#define fs_warn(fs, fmt, ...) \
mlx5_core_warn(mlx5e_fs_get_mdev(fs), fmt, ##__VA_ARGS__)
#define fs_warn_once(fs, fmt, ...) \
mlx5_core_warn_once(mlx5e_fs_get_mdev(fs), fmt, ##__VA_ARGS__)
#endif /* __MLX5E_FLOW_STEER_H__ */
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. */
#ifndef __MLX5E_FS_ETHTOOL_H__
#define __MLX5E_FS_ETHTOOL_H__
struct mlx5e_priv;
struct mlx5e_ethtool_steering;
#ifdef CONFIG_MLX5_EN_RXNFC
int mlx5e_ethtool_alloc(struct mlx5e_ethtool_steering **ethtool);
void mlx5e_ethtool_free(struct mlx5e_ethtool_steering *ethtool);
void mlx5e_ethtool_init_steering(struct mlx5e_flow_steering *fs);
void mlx5e_ethtool_cleanup_steering(struct mlx5e_flow_steering *fs);
int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd);
int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
struct ethtool_rxnfc *info, u32 *rule_locs);
#else
static inline int mlx5e_ethtool_alloc(struct mlx5e_ethtool_steering **ethtool)
{ return 0; }
static inline void mlx5e_ethtool_free(struct mlx5e_ethtool_steering *ethtool) { }
static inline void mlx5e_ethtool_init_steering(struct mlx5e_flow_steering *fs) { }
static inline void mlx5e_ethtool_cleanup_steering(struct mlx5e_flow_steering *fs) { }
static inline int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd)
{ return -EOPNOTSUPP; }
static inline int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
struct ethtool_rxnfc *info, u32 *rule_locs)
{ return -EOPNOTSUPP; }
#endif
#endif
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */
#include <linux/netdevice.h>
#include "en/fs_tt_redirect.h"
#include "fs_core.h"
#include "mlx5_core.h"
enum fs_udp_type {
FS_IPV4_UDP,
......@@ -74,17 +74,17 @@ static void fs_udp_set_dport_flow(struct mlx5_flow_spec *spec, enum fs_udp_type
}
struct mlx5_flow_handle *
mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_priv *priv,
mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_flow_steering *fs,
enum mlx5_traffic_types ttc_type,
u32 tir_num, u16 d_port)
{
struct mlx5e_fs_udp *fs_udp = mlx5e_fs_get_udp(fs);
enum fs_udp_type type = tt2fs_udp(ttc_type);
struct mlx5_flow_destination dest = {};
struct mlx5_flow_table *ft = NULL;
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
struct mlx5e_fs_udp *fs_udp;
int err;
if (type == FS_UDP_NUM_TYPES)
......@@ -94,7 +94,6 @@ mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_priv *priv,
if (!spec)
return ERR_PTR(-ENOMEM);
fs_udp = priv->fs->udp;
ft = fs_udp->tables[type].t;
fs_udp_set_dport_flow(spec, type, d_port);
......@@ -106,31 +105,30 @@ mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_priv *priv,
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
netdev_err(priv->netdev, "%s: add %s rule failed, err %d\n",
__func__, fs_udp_type2str(type), err);
fs_err(fs, "%s: add %s rule failed, err %d\n",
__func__, fs_udp_type2str(type), err);
}
return rule;
}
static int fs_udp_add_default_rule(struct mlx5e_priv *priv, enum fs_udp_type type)
static int fs_udp_add_default_rule(struct mlx5e_flow_steering *fs, enum fs_udp_type type)
{
struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
struct mlx5e_fs_udp *fs_udp = mlx5e_fs_get_udp(fs);
struct mlx5e_flow_table *fs_udp_t;
struct mlx5_flow_destination dest;
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_handle *rule;
struct mlx5e_fs_udp *fs_udp;
int err;
fs_udp = priv->fs->udp;
fs_udp_t = &fs_udp->tables[type];
dest = mlx5_ttc_get_default_dest(priv->fs->ttc, fs_udp2tt(type));
dest = mlx5_ttc_get_default_dest(ttc, fs_udp2tt(type));
rule = mlx5_add_flow_rules(fs_udp_t->t, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
netdev_err(priv->netdev,
"%s: add default rule failed, fs type=%d, err %d\n",
__func__, type, err);
fs_err(fs, "%s: add default rule failed, fs type=%d, err %d\n",
__func__, type, err);
return err;
}
......@@ -206,33 +204,36 @@ static int fs_udp_create_groups(struct mlx5e_flow_table *ft, enum fs_udp_type ty
return err;
}
static int fs_udp_create_table(struct mlx5e_priv *priv, enum fs_udp_type type)
static int fs_udp_create_table(struct mlx5e_flow_steering *fs, enum fs_udp_type type)
{
struct mlx5e_flow_table *ft = &priv->fs->udp->tables[type];
struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(fs, false);
struct mlx5e_fs_udp *fs_udp = mlx5e_fs_get_udp(fs);
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5e_flow_table *ft;
int err;
ft = &fs_udp->tables[type];
ft->num_groups = 0;
ft_attr.max_fte = MLX5E_FS_UDP_TABLE_SIZE;
ft_attr.level = MLX5E_FS_TT_UDP_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
ft->t = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
return err;
}
netdev_dbg(priv->netdev, "Created fs %s table id %u level %u\n",
fs_udp_type2str(type), ft->t->id, ft->t->level);
mlx5_core_dbg(mlx5e_fs_get_mdev(fs), "Created fs %s table id %u level %u\n",
fs_udp_type2str(type), ft->t->id, ft->t->level);
err = fs_udp_create_groups(ft, type);
if (err)
goto err;
err = fs_udp_add_default_rule(priv, type);
err = fs_udp_add_default_rule(fs, type);
if (err)
goto err;
......@@ -253,17 +254,17 @@ static void fs_udp_destroy_table(struct mlx5e_fs_udp *fs_udp, int i)
fs_udp->tables[i].t = NULL;
}
static int fs_udp_disable(struct mlx5e_priv *priv)
static int fs_udp_disable(struct mlx5e_flow_steering *fs)
{
struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
int err, i;
for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
/* Modify ttc rules destination to point back to the indir TIRs */
err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, fs_udp2tt(i));
err = mlx5_ttc_fwd_default_dest(ttc, fs_udp2tt(i));
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] default destination failed, err(%d)\n",
__func__, fs_udp2tt(i), err);
fs_err(fs, "%s: modify ttc[%d] default destination failed, err(%d)\n",
__func__, fs_udp2tt(i), err);
return err;
}
}
......@@ -271,30 +272,31 @@ static int fs_udp_disable(struct mlx5e_priv *priv)
return 0;
}
static int fs_udp_enable(struct mlx5e_priv *priv)
static int fs_udp_enable(struct mlx5e_flow_steering *fs)
{
struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
struct mlx5e_fs_udp *udp = mlx5e_fs_get_udp(fs);
struct mlx5_flow_destination dest = {};
int err, i;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
dest.ft = priv->fs->udp->tables[i].t;
dest.ft = udp->tables[i].t;
/* Modify ttc rules destination to point on the accel_fs FTs */
err = mlx5_ttc_fwd_dest(priv->fs->ttc, fs_udp2tt(i), &dest);
err = mlx5_ttc_fwd_dest(ttc, fs_udp2tt(i), &dest);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] destination to accel failed, err(%d)\n",
__func__, fs_udp2tt(i), err);
fs_err(fs, "%s: modify ttc[%d] destination to accel failed, err(%d)\n",
__func__, fs_udp2tt(i), err);
return err;
}
}
return 0;
}
void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_priv *priv)
void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_flow_steering *fs)
{
struct mlx5e_fs_udp *fs_udp = priv->fs->udp;
struct mlx5e_fs_udp *fs_udp = mlx5e_fs_get_udp(fs);
int i;
if (!fs_udp)
......@@ -303,48 +305,50 @@ void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_priv *priv)
if (--fs_udp->ref_cnt)
return;
fs_udp_disable(priv);
fs_udp_disable(fs);
for (i = 0; i < FS_UDP_NUM_TYPES; i++)
fs_udp_destroy_table(fs_udp, i);
kfree(fs_udp);
priv->fs->udp = NULL;
mlx5e_fs_set_udp(fs, NULL);
}
int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_priv *priv)
int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_flow_steering *fs)
{
struct mlx5e_fs_udp *udp = mlx5e_fs_get_udp(fs);
int i, err;
if (priv->fs->udp) {
priv->fs->udp->ref_cnt++;
if (udp) {
udp->ref_cnt++;
return 0;
}
priv->fs->udp = kzalloc(sizeof(*priv->fs->udp), GFP_KERNEL);
if (!priv->fs->udp)
udp = kzalloc(sizeof(*udp), GFP_KERNEL);
if (!udp)
return -ENOMEM;
mlx5e_fs_set_udp(fs, udp);
for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
err = fs_udp_create_table(priv, i);
err = fs_udp_create_table(fs, i);
if (err)
goto err_destroy_tables;
}
err = fs_udp_enable(priv);
err = fs_udp_enable(fs);
if (err)
goto err_destroy_tables;
priv->fs->udp->ref_cnt = 1;
udp->ref_cnt = 1;
return 0;
err_destroy_tables:
while (--i >= 0)
fs_udp_destroy_table(priv->fs->udp, i);
fs_udp_destroy_table(udp, i);
kfree(priv->fs->udp);
priv->fs->udp = NULL;
kfree(udp);
mlx5e_fs_set_udp(fs, NULL);
return err;
}
......@@ -356,22 +360,21 @@ static void fs_any_set_ethertype_flow(struct mlx5_flow_spec *spec, u16 ether_typ
}
struct mlx5_flow_handle *
mlx5e_fs_tt_redirect_any_add_rule(struct mlx5e_priv *priv,
mlx5e_fs_tt_redirect_any_add_rule(struct mlx5e_flow_steering *fs,
u32 tir_num, u16 ether_type)
{
struct mlx5e_fs_any *fs_any = mlx5e_fs_get_any(fs);
struct mlx5_flow_destination dest = {};
struct mlx5_flow_table *ft = NULL;
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
struct mlx5e_fs_any *fs_any;
int err;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return ERR_PTR(-ENOMEM);
fs_any = priv->fs->any;
ft = fs_any->table.t;
fs_any_set_ethertype_flow(spec, ether_type);
......@@ -383,31 +386,29 @@ mlx5e_fs_tt_redirect_any_add_rule(struct mlx5e_priv *priv,
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
netdev_err(priv->netdev, "%s: add ANY rule failed, err %d\n",
__func__, err);
fs_err(fs, "%s: add ANY rule failed, err %d\n",
__func__, err);
}
return rule;
}
static int fs_any_add_default_rule(struct mlx5e_priv *priv)
static int fs_any_add_default_rule(struct mlx5e_flow_steering *fs)
{
struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
struct mlx5e_fs_any *fs_any = mlx5e_fs_get_any(fs);
struct mlx5e_flow_table *fs_any_t;
struct mlx5_flow_destination dest;
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_handle *rule;
struct mlx5e_fs_any *fs_any;
int err;
fs_any = priv->fs->any;
fs_any_t = &fs_any->table;
dest = mlx5_ttc_get_default_dest(priv->fs->ttc, MLX5_TT_ANY);
dest = mlx5_ttc_get_default_dest(ttc, MLX5_TT_ANY);
rule = mlx5_add_flow_rules(fs_any_t->t, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
netdev_err(priv->netdev,
"%s: add default rule failed, fs type=ANY, err %d\n",
__func__, err);
fs_err(fs, "%s: add default rule failed, fs type=ANY, err %d\n",
__func__, err);
return err;
}
......@@ -472,9 +473,11 @@ static int fs_any_create_groups(struct mlx5e_flow_table *ft)
return err;
}
static int fs_any_create_table(struct mlx5e_priv *priv)
static int fs_any_create_table(struct mlx5e_flow_steering *fs)
{
struct mlx5e_flow_table *ft = &priv->fs->any->table;
struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(fs, false);
struct mlx5e_fs_any *fs_any = mlx5e_fs_get_any(fs);
struct mlx5e_flow_table *ft = &fs_any->table;
struct mlx5_flow_table_attr ft_attr = {};
int err;
......@@ -484,21 +487,21 @@ static int fs_any_create_table(struct mlx5e_priv *priv)
ft_attr.level = MLX5E_FS_TT_ANY_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
ft->t = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
return err;
}
netdev_dbg(priv->netdev, "Created fs ANY table id %u level %u\n",
ft->t->id, ft->t->level);
mlx5_core_dbg(mlx5e_fs_get_mdev(fs), "Created fs ANY table id %u level %u\n",
ft->t->id, ft->t->level);
err = fs_any_create_groups(ft);
if (err)
goto err;
err = fs_any_add_default_rule(priv);
err = fs_any_add_default_rule(fs);
if (err)
goto err;
......@@ -509,35 +512,38 @@ static int fs_any_create_table(struct mlx5e_priv *priv)
return err;
}
static int fs_any_disable(struct mlx5e_priv *priv)
static int fs_any_disable(struct mlx5e_flow_steering *fs)
{
struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
int err;
/* Modify ttc rules destination to point back to the indir TIRs */
err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, MLX5_TT_ANY);
err = mlx5_ttc_fwd_default_dest(ttc, MLX5_TT_ANY);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] default destination failed, err(%d)\n",
__func__, MLX5_TT_ANY, err);
fs_err(fs,
"%s: modify ttc[%d] default destination failed, err(%d)\n",
__func__, MLX5_TT_ANY, err);
return err;
}
return 0;
}
static int fs_any_enable(struct mlx5e_priv *priv)
static int fs_any_enable(struct mlx5e_flow_steering *fs)
{
struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
struct mlx5e_fs_any *any = mlx5e_fs_get_any(fs);
struct mlx5_flow_destination dest = {};
int err;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = priv->fs->any->table.t;
dest.ft = any->table.t;
/* Modify ttc rules destination to point on the accel_fs FTs */
err = mlx5_ttc_fwd_dest(priv->fs->ttc, MLX5_TT_ANY, &dest);
err = mlx5_ttc_fwd_dest(ttc, MLX5_TT_ANY, &dest);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] destination to accel failed, err(%d)\n",
__func__, MLX5_TT_ANY, err);
fs_err(fs,
"%s: modify ttc[%d] destination to accel failed, err(%d)\n",
__func__, MLX5_TT_ANY, err);
return err;
}
return 0;
......@@ -553,9 +559,9 @@ static void fs_any_destroy_table(struct mlx5e_fs_any *fs_any)
fs_any->table.t = NULL;
}
void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_priv *priv)
void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_flow_steering *fs)
{
struct mlx5e_fs_any *fs_any = priv->fs->any;
struct mlx5e_fs_any *fs_any = mlx5e_fs_get_any(fs);
if (!fs_any)
return;
......@@ -563,43 +569,45 @@ void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_priv *priv)
if (--fs_any->ref_cnt)
return;
fs_any_disable(priv);
fs_any_disable(fs);
fs_any_destroy_table(fs_any);
kfree(fs_any);
priv->fs->any = NULL;
mlx5e_fs_set_any(fs, NULL);
}
int mlx5e_fs_tt_redirect_any_create(struct mlx5e_priv *priv)
int mlx5e_fs_tt_redirect_any_create(struct mlx5e_flow_steering *fs)
{
struct mlx5e_fs_any *fs_any = mlx5e_fs_get_any(fs);
int err;
if (priv->fs->any) {
priv->fs->any->ref_cnt++;
if (fs_any) {
fs_any->ref_cnt++;
return 0;
}
priv->fs->any = kzalloc(sizeof(*priv->fs->any), GFP_KERNEL);
if (!priv->fs->any)
fs_any = kzalloc(sizeof(*fs_any), GFP_KERNEL);
if (!fs_any)
return -ENOMEM;
mlx5e_fs_set_any(fs, fs_any);
err = fs_any_create_table(priv);
err = fs_any_create_table(fs);
if (err)
return err;
err = fs_any_enable(priv);
err = fs_any_enable(fs);
if (err)
goto err_destroy_table;
priv->fs->any->ref_cnt = 1;
fs_any->ref_cnt = 1;
return 0;
err_destroy_table:
fs_any_destroy_table(priv->fs->any);
fs_any_destroy_table(fs_any);
kfree(priv->fs->any);
priv->fs->any = NULL;
kfree(fs_any);
mlx5e_fs_set_any(fs, NULL);
return err;
}
......@@ -4,23 +4,22 @@
#ifndef __MLX5E_FS_TT_REDIRECT_H__
#define __MLX5E_FS_TT_REDIRECT_H__
#include "en.h"
#include "en/fs.h"
void mlx5e_fs_tt_redirect_del_rule(struct mlx5_flow_handle *rule);
/* UDP traffic type redirect */
struct mlx5_flow_handle *
mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_priv *priv,
mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_flow_steering *fs,
enum mlx5_traffic_types ttc_type,
u32 tir_num, u16 d_port);
void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_priv *priv);
int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_priv *priv);
void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_flow_steering *fs);
int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_flow_steering *fs);
/* ANY traffic type redirect*/
struct mlx5_flow_handle *
mlx5e_fs_tt_redirect_any_add_rule(struct mlx5e_priv *priv,
mlx5e_fs_tt_redirect_any_add_rule(struct mlx5e_flow_steering *fs,
u32 tir_num, u16 ether_type);
void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_priv *priv);
int mlx5e_fs_tt_redirect_any_create(struct mlx5e_priv *priv);
void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_flow_steering *fs);
int mlx5e_fs_tt_redirect_any_create(struct mlx5e_flow_steering *fs);
#endif
......@@ -622,37 +622,39 @@ static int mlx5e_ptp_set_state(struct mlx5e_ptp *c, struct mlx5e_params *params)
return bitmap_empty(c->state, MLX5E_PTP_STATE_NUM_STATES) ? -EINVAL : 0;
}
static void mlx5e_ptp_rx_unset_fs(struct mlx5e_priv *priv)
static void mlx5e_ptp_rx_unset_fs(struct mlx5e_flow_steering *fs)
{
struct mlx5e_ptp_fs *ptp_fs = priv->fs->ptp_fs;
struct mlx5e_ptp_fs *ptp_fs = mlx5e_fs_get_ptp(fs);
if (!ptp_fs->valid)
return;
mlx5e_fs_tt_redirect_del_rule(ptp_fs->l2_rule);
mlx5e_fs_tt_redirect_any_destroy(priv);
mlx5e_fs_tt_redirect_any_destroy(fs);
mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v6_rule);
mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v4_rule);
mlx5e_fs_tt_redirect_udp_destroy(priv);
mlx5e_fs_tt_redirect_udp_destroy(fs);
ptp_fs->valid = false;
}
static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv)
{
u32 tirn = mlx5e_rx_res_get_tirn_ptp(priv->rx_res);
struct mlx5e_ptp_fs *ptp_fs = priv->fs->ptp_fs;
struct mlx5e_flow_steering *fs = priv->fs;
struct mlx5_flow_handle *rule;
struct mlx5e_ptp_fs *ptp_fs;
int err;
ptp_fs = mlx5e_fs_get_ptp(fs);
if (ptp_fs->valid)
return 0;
err = mlx5e_fs_tt_redirect_udp_create(priv);
err = mlx5e_fs_tt_redirect_udp_create(fs);
if (err)
goto out_free;
rule = mlx5e_fs_tt_redirect_udp_add_rule(priv, MLX5_TT_IPV4_UDP,
rule = mlx5e_fs_tt_redirect_udp_add_rule(fs, MLX5_TT_IPV4_UDP,
tirn, PTP_EV_PORT);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
......@@ -660,7 +662,7 @@ static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv)
}
ptp_fs->udp_v4_rule = rule;
rule = mlx5e_fs_tt_redirect_udp_add_rule(priv, MLX5_TT_IPV6_UDP,
rule = mlx5e_fs_tt_redirect_udp_add_rule(fs, MLX5_TT_IPV6_UDP,
tirn, PTP_EV_PORT);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
......@@ -668,11 +670,11 @@ static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv)
}
ptp_fs->udp_v6_rule = rule;
err = mlx5e_fs_tt_redirect_any_create(priv);
err = mlx5e_fs_tt_redirect_any_create(fs);
if (err)
goto out_destroy_udp_v6_rule;
rule = mlx5e_fs_tt_redirect_any_add_rule(priv, tirn, ETH_P_1588);
rule = mlx5e_fs_tt_redirect_any_add_rule(fs, tirn, ETH_P_1588);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
goto out_destroy_fs_any;
......@@ -683,13 +685,13 @@ static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv)
return 0;
out_destroy_fs_any:
mlx5e_fs_tt_redirect_any_destroy(priv);
mlx5e_fs_tt_redirect_any_destroy(fs);
out_destroy_udp_v6_rule:
mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v6_rule);
out_destroy_udp_v4_rule:
mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v4_rule);
out_destroy_fs_udp:
mlx5e_fs_tt_redirect_udp_destroy(priv);
mlx5e_fs_tt_redirect_udp_destroy(fs);
out_free:
return err;
}
......@@ -797,29 +799,31 @@ int mlx5e_ptp_get_rqn(struct mlx5e_ptp *c, u32 *rqn)
return 0;
}
int mlx5e_ptp_alloc_rx_fs(struct mlx5e_priv *priv)
int mlx5e_ptp_alloc_rx_fs(struct mlx5e_flow_steering *fs,
const struct mlx5e_profile *profile)
{
struct mlx5e_ptp_fs *ptp_fs;
if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
if (!mlx5e_profile_feature_cap(profile, PTP_RX))
return 0;
ptp_fs = kzalloc(sizeof(*ptp_fs), GFP_KERNEL);
if (!ptp_fs)
return -ENOMEM;
mlx5e_fs_set_ptp(fs, ptp_fs);
priv->fs->ptp_fs = ptp_fs;
return 0;
}
void mlx5e_ptp_free_rx_fs(struct mlx5e_priv *priv)
void mlx5e_ptp_free_rx_fs(struct mlx5e_flow_steering *fs,
const struct mlx5e_profile *profile)
{
struct mlx5e_ptp_fs *ptp_fs = priv->fs->ptp_fs;
struct mlx5e_ptp_fs *ptp_fs = mlx5e_fs_get_ptp(fs);
if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
if (!mlx5e_profile_feature_cap(profile, PTP_RX))
return;
mlx5e_ptp_rx_unset_fs(priv);
mlx5e_ptp_rx_unset_fs(fs);
kfree(ptp_fs);
}
......@@ -845,6 +849,6 @@ int mlx5e_ptp_rx_manage_fs(struct mlx5e_priv *priv, bool set)
netdev_WARN_ONCE(priv->netdev, "Don't try to remove PTP RX-FS rules");
return -EINVAL;
}
mlx5e_ptp_rx_unset_fs(priv);
mlx5e_ptp_rx_unset_fs(priv->fs);
return 0;
}
......@@ -74,8 +74,10 @@ void mlx5e_ptp_close(struct mlx5e_ptp *c);
void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c);
void mlx5e_ptp_deactivate_channel(struct mlx5e_ptp *c);
int mlx5e_ptp_get_rqn(struct mlx5e_ptp *c, u32 *rqn);
int mlx5e_ptp_alloc_rx_fs(struct mlx5e_priv *priv);
void mlx5e_ptp_free_rx_fs(struct mlx5e_priv *priv);
int mlx5e_ptp_alloc_rx_fs(struct mlx5e_flow_steering *fs,
const struct mlx5e_profile *profile);
void mlx5e_ptp_free_rx_fs(struct mlx5e_flow_steering *fs,
const struct mlx5e_profile *profile);
int mlx5e_ptp_rx_manage_fs(struct mlx5e_priv *priv, bool set);
enum {
......
......@@ -12,6 +12,7 @@ validate_goto_chain(struct mlx5e_priv *priv,
const struct flow_action_entry *act,
struct netlink_ext_ack *extack)
{
struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
bool is_esw = mlx5e_is_eswitch_flow(flow);
bool ft_flow = mlx5e_is_ft_flow(flow);
u32 dest_chain = act->chain_index;
......@@ -21,7 +22,7 @@ validate_goto_chain(struct mlx5e_priv *priv,
u32 max_chain;
esw = priv->mdev->priv.eswitch;
chains = is_esw ? esw_chains(esw) : mlx5e_nic_chains(priv->fs->tc);
chains = is_esw ? esw_chains(esw) : mlx5e_nic_chains(tc);
max_chain = mlx5_chains_get_chain_range(chains);
reformat_and_fwd = is_esw ?
MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_and_fwd_to_table) :
......
......@@ -230,12 +230,12 @@ static int mlx5e_handle_action_trap(struct mlx5e_priv *priv, int trap_id)
switch (trap_id) {
case DEVLINK_TRAP_GENERIC_ID_INGRESS_VLAN_FILTER:
err = mlx5e_add_vlan_trap(priv, trap_id, mlx5e_trap_get_tirn(priv->en_trap));
err = mlx5e_add_vlan_trap(priv->fs, trap_id, mlx5e_trap_get_tirn(priv->en_trap));
if (err)
goto err_out;
break;
case DEVLINK_TRAP_GENERIC_ID_DMAC_FILTER:
err = mlx5e_add_mac_trap(priv, trap_id, mlx5e_trap_get_tirn(priv->en_trap));
err = mlx5e_add_mac_trap(priv->fs, trap_id, mlx5e_trap_get_tirn(priv->en_trap));
if (err)
goto err_out;
break;
......@@ -256,10 +256,10 @@ static int mlx5e_handle_action_drop(struct mlx5e_priv *priv, int trap_id)
{
switch (trap_id) {
case DEVLINK_TRAP_GENERIC_ID_INGRESS_VLAN_FILTER:
mlx5e_remove_vlan_trap(priv);
mlx5e_remove_vlan_trap(priv->fs);
break;
case DEVLINK_TRAP_GENERIC_ID_DMAC_FILTER:
mlx5e_remove_mac_trap(priv);
mlx5e_remove_mac_trap(priv->fs);
break;
default:
netdev_warn(priv->netdev, "%s: Unknown trap id %d\n", __func__, trap_id);
......
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
#include <linux/netdevice.h>
#include <mlx5_core.h>
#include "en_accel/fs_tcp.h"
#include "fs_core.h"
......@@ -71,13 +71,13 @@ void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule)
mlx5_del_flow_rules(rule);
}
struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv,
struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs,
struct sock *sk, u32 tirn,
uint32_t flow_tag)
{
struct mlx5e_accel_fs_tcp *fs_tcp = mlx5e_fs_get_accel_tcp(fs);
struct mlx5_flow_destination dest = {};
struct mlx5e_flow_table *ft = NULL;
struct mlx5e_accel_fs_tcp *fs_tcp;
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_handle *flow;
struct mlx5_flow_spec *spec;
......@@ -86,19 +86,17 @@ struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv,
if (!spec)
return ERR_PTR(-ENOMEM);
fs_tcp = priv->fs->accel_tcp;
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
switch (sk->sk_family) {
case AF_INET:
accel_fs_tcp_set_ipv4_flow(spec, sk);
ft = &fs_tcp->tables[ACCEL_FS_IPV4_TCP];
mlx5e_dbg(HW, priv, "%s flow is %pI4:%d -> %pI4:%d\n", __func__,
&inet_sk(sk)->inet_rcv_saddr,
inet_sk(sk)->inet_sport,
&inet_sk(sk)->inet_daddr,
inet_sk(sk)->inet_dport);
fs_dbg(fs, "%s flow is %pI4:%d -> %pI4:%d\n", __func__,
&inet_sk(sk)->inet_rcv_saddr,
inet_sk(sk)->inet_sport,
&inet_sk(sk)->inet_daddr,
inet_sk(sk)->inet_dport);
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
......@@ -140,34 +138,32 @@ struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv,
flow = mlx5_add_flow_rules(ft->t, spec, &flow_act, &dest, 1);
if (IS_ERR(flow))
netdev_err(priv->netdev, "mlx5_add_flow_rules() failed, flow is %ld\n",
PTR_ERR(flow));
fs_err(fs, "mlx5_add_flow_rules() failed, flow is %ld\n", PTR_ERR(flow));
out:
kvfree(spec);
return flow;
}
static int accel_fs_tcp_add_default_rule(struct mlx5e_priv *priv,
static int accel_fs_tcp_add_default_rule(struct mlx5e_flow_steering *fs,
enum accel_fs_tcp_type type)
{
struct mlx5e_accel_fs_tcp *fs_tcp = mlx5e_fs_get_accel_tcp(fs);
struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
struct mlx5e_flow_table *accel_fs_t;
struct mlx5_flow_destination dest;
struct mlx5e_accel_fs_tcp *fs_tcp;
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_handle *rule;
int err = 0;
fs_tcp = priv->fs->accel_tcp;
accel_fs_t = &fs_tcp->tables[type];
dest = mlx5_ttc_get_default_dest(priv->fs->ttc, fs_accel2tt(type));
dest = mlx5_ttc_get_default_dest(ttc, fs_accel2tt(type));
rule = mlx5_add_flow_rules(accel_fs_t->t, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
netdev_err(priv->netdev,
"%s: add default rule failed, accel_fs type=%d, err %d\n",
__func__, type, err);
fs_err(fs, "%s: add default rule failed, accel_fs type=%d, err %d\n",
__func__, type, err);
return err;
}
......@@ -265,9 +261,11 @@ static int accel_fs_tcp_create_groups(struct mlx5e_flow_table *ft,
return err;
}
static int accel_fs_tcp_create_table(struct mlx5e_priv *priv, enum accel_fs_tcp_type type)
static int accel_fs_tcp_create_table(struct mlx5e_flow_steering *fs, enum accel_fs_tcp_type type)
{
struct mlx5e_flow_table *ft = &priv->fs->accel_tcp->tables[type];
struct mlx5e_accel_fs_tcp *accel_tcp = mlx5e_fs_get_accel_tcp(fs);
struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(fs, false);
struct mlx5e_flow_table *ft = &accel_tcp->tables[type];
struct mlx5_flow_table_attr ft_attr = {};
int err;
......@@ -277,21 +275,21 @@ static int accel_fs_tcp_create_table(struct mlx5e_priv *priv, enum accel_fs_tcp_
ft_attr.level = MLX5E_ACCEL_FS_TCP_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
ft->t = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
return err;
}
netdev_dbg(priv->netdev, "Created fs accel table id %u level %u\n",
ft->t->id, ft->t->level);
fs_dbg(fs, "Created fs accel table id %u level %u\n",
ft->t->id, ft->t->level);
err = accel_fs_tcp_create_groups(ft, type);
if (err)
goto err;
err = accel_fs_tcp_add_default_rule(priv, type);
err = accel_fs_tcp_add_default_rule(fs, type);
if (err)
goto err;
......@@ -301,17 +299,18 @@ static int accel_fs_tcp_create_table(struct mlx5e_priv *priv, enum accel_fs_tcp_
return err;
}
static int accel_fs_tcp_disable(struct mlx5e_priv *priv)
static int accel_fs_tcp_disable(struct mlx5e_flow_steering *fs)
{
struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
int err, i;
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
/* Modify ttc rules destination to point back to the indir TIRs */
err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, fs_accel2tt(i));
err = mlx5_ttc_fwd_default_dest(ttc, fs_accel2tt(i));
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] default destination failed, err(%d)\n",
__func__, fs_accel2tt(i), err);
fs_err(fs,
"%s: modify ttc[%d] default destination failed, err(%d)\n",
__func__, fs_accel2tt(i), err);
return err;
}
}
......@@ -319,32 +318,32 @@ static int accel_fs_tcp_disable(struct mlx5e_priv *priv)
return 0;
}
static int accel_fs_tcp_enable(struct mlx5e_priv *priv)
static int accel_fs_tcp_enable(struct mlx5e_flow_steering *fs)
{
struct mlx5e_accel_fs_tcp *accel_tcp = mlx5e_fs_get_accel_tcp(fs);
struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
struct mlx5_flow_destination dest = {};
int err, i;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
dest.ft = priv->fs->accel_tcp->tables[i].t;
dest.ft = accel_tcp->tables[i].t;
/* Modify ttc rules destination to point on the accel_fs FTs */
err = mlx5_ttc_fwd_dest(priv->fs->ttc, fs_accel2tt(i), &dest);
err = mlx5_ttc_fwd_dest(ttc, fs_accel2tt(i), &dest);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] destination to accel failed, err(%d)\n",
__func__, fs_accel2tt(i), err);
fs_err(fs, "%s: modify ttc[%d] destination to accel failed, err(%d)\n",
__func__, fs_accel2tt(i), err);
return err;
}
}
return 0;
}
static void accel_fs_tcp_destroy_table(struct mlx5e_priv *priv, int i)
static void accel_fs_tcp_destroy_table(struct mlx5e_flow_steering *fs, int i)
{
struct mlx5e_accel_fs_tcp *fs_tcp;
struct mlx5e_accel_fs_tcp *fs_tcp = mlx5e_fs_get_accel_tcp(fs);
fs_tcp = priv->fs->accel_tcp;
if (IS_ERR_OR_NULL(fs_tcp->tables[i].t))
return;
......@@ -353,40 +352,43 @@ static void accel_fs_tcp_destroy_table(struct mlx5e_priv *priv, int i)
fs_tcp->tables[i].t = NULL;
}
void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv)
void mlx5e_accel_fs_tcp_destroy(struct mlx5e_flow_steering *fs)
{
struct mlx5e_accel_fs_tcp *accel_tcp = mlx5e_fs_get_accel_tcp(fs);
int i;
if (!priv->fs->accel_tcp)
if (!accel_tcp)
return;
accel_fs_tcp_disable(priv);
accel_fs_tcp_disable(fs);
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++)
accel_fs_tcp_destroy_table(priv, i);
accel_fs_tcp_destroy_table(fs, i);
kfree(priv->fs->accel_tcp);
priv->fs->accel_tcp = NULL;
kfree(accel_tcp);
mlx5e_fs_set_accel_tcp(fs, NULL);
}
int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv)
int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs)
{
struct mlx5e_accel_fs_tcp *accel_tcp;
int i, err;
if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version))
if (!MLX5_CAP_FLOWTABLE_NIC_RX(mlx5e_fs_get_mdev(fs), ft_field_support.outer_ip_version))
return -EOPNOTSUPP;
priv->fs->accel_tcp = kzalloc(sizeof(*priv->fs->accel_tcp), GFP_KERNEL);
if (!priv->fs->accel_tcp)
accel_tcp = kvzalloc(sizeof(*accel_tcp), GFP_KERNEL);
if (!accel_tcp)
return -ENOMEM;
mlx5e_fs_set_accel_tcp(fs, accel_tcp);
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
err = accel_fs_tcp_create_table(priv, i);
err = accel_fs_tcp_create_table(fs, i);
if (err)
goto err_destroy_tables;
}
err = accel_fs_tcp_enable(priv);
err = accel_fs_tcp_enable(fs);
if (err)
goto err_destroy_tables;
......@@ -394,9 +396,8 @@ int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv)
err_destroy_tables:
while (--i >= 0)
accel_fs_tcp_destroy_table(priv, i);
kfree(priv->fs->accel_tcp);
priv->fs->accel_tcp = NULL;
accel_fs_tcp_destroy_table(fs, i);
kfree(accel_tcp);
mlx5e_fs_set_accel_tcp(fs, NULL);
return err;
}
......@@ -4,19 +4,19 @@
#ifndef __MLX5E_ACCEL_FS_TCP_H__
#define __MLX5E_ACCEL_FS_TCP_H__
#include "en.h"
#include "en/fs.h"
#ifdef CONFIG_MLX5_EN_TLS
int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv);
void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv);
struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv,
int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs);
void mlx5e_accel_fs_tcp_destroy(struct mlx5e_flow_steering *fs);
struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs,
struct sock *sk, u32 tirn,
uint32_t flow_tag);
void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule);
#else
static inline int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv) {}
static inline struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv,
static inline int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs) { return 0; }
static inline void mlx5e_accel_fs_tcp_destroy(struct mlx5e_flow_steering *fs) {}
static inline struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs,
struct sock *sk, u32 tirn,
uint32_t flow_tag)
{ return ERR_PTR(-EOPNOTSUPP); }
......
......@@ -174,6 +174,8 @@ static void rx_destroy(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
{
struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(priv->fs, false);
struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(priv->fs, false);
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5e_accel_fs_esp_prot *fs_prot;
struct mlx5e_accel_fs_esp *accel_esp;
......@@ -182,15 +184,14 @@ static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
accel_esp = priv->ipsec->rx_fs;
fs_prot = &accel_esp->fs_prot[type];
fs_prot->default_dest =
mlx5_ttc_get_default_dest(priv->fs->ttc, fs_esp2tt(type));
mlx5_ttc_get_default_dest(ttc, fs_esp2tt(type));
ft_attr.max_fte = 1;
ft_attr.autogroup.max_num_groups = 1;
ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
ft = mlx5_create_auto_grouped_flow_table(priv->fs->ns, &ft_attr);
ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(ft))
return PTR_ERR(ft);
......@@ -205,7 +206,7 @@ static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
ft_attr.prio = MLX5E_NIC_PRIO;
ft_attr.autogroup.num_reserved_entries = 1;
ft_attr.autogroup.max_num_groups = 1;
ft = mlx5_create_auto_grouped_flow_table(priv->fs->ns, &ft_attr);
ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_fs_ft;
......@@ -230,6 +231,7 @@ static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
static int rx_ft_get(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
{
struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(priv->fs, false);
struct mlx5e_accel_fs_esp_prot *fs_prot;
struct mlx5_flow_destination dest = {};
struct mlx5e_accel_fs_esp *accel_esp;
......@@ -249,7 +251,7 @@ static int rx_ft_get(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
/* connect */
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = fs_prot->ft;
mlx5_ttc_fwd_dest(priv->fs->ttc, fs_esp2tt(type), &dest);
mlx5_ttc_fwd_dest(ttc, fs_esp2tt(type), &dest);
skip:
fs_prot->refcnt++;
......@@ -260,6 +262,7 @@ static int rx_ft_get(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
static void rx_ft_put(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
{
struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(priv->fs, false);
struct mlx5e_accel_fs_esp_prot *fs_prot;
struct mlx5e_accel_fs_esp *accel_esp;
......@@ -271,7 +274,7 @@ static void rx_ft_put(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
goto out;
/* disconnect */
mlx5_ttc_fwd_default_dest(priv->fs->ttc, fs_esp2tt(type));
mlx5_ttc_fwd_default_dest(ttc, fs_esp2tt(type));
/* remove FT */
rx_destroy(priv, type);
......
......@@ -118,9 +118,9 @@ int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enable)
mutex_lock(&priv->state_lock);
if (enable)
err = mlx5e_accel_fs_tcp_create(priv);
err = mlx5e_accel_fs_tcp_create(priv->fs);
else
mlx5e_accel_fs_tcp_destroy(priv);
mlx5e_accel_fs_tcp_destroy(priv->fs);
mutex_unlock(&priv->state_lock);
return err;
......@@ -138,7 +138,7 @@ int mlx5e_ktls_init_rx(struct mlx5e_priv *priv)
return -ENOMEM;
if (priv->netdev->features & NETIF_F_HW_TLS_RX) {
err = mlx5e_accel_fs_tcp_create(priv);
err = mlx5e_accel_fs_tcp_create(priv->fs);
if (err) {
destroy_workqueue(priv->tls->rx_wq);
return err;
......@@ -154,7 +154,7 @@ void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv)
return;
if (priv->netdev->features & NETIF_F_HW_TLS_RX)
mlx5e_accel_fs_tcp_destroy(priv);
mlx5e_accel_fs_tcp_destroy(priv->fs);
destroy_workqueue(priv->tls->rx_wq);
}
......
......@@ -111,7 +111,7 @@ static void accel_rule_handle_work(struct work_struct *work)
if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags)))
goto out;
rule = mlx5e_accel_fs_add_sk(accel_rule->priv, priv_rx->sk,
rule = mlx5e_accel_fs_add_sk(accel_rule->priv->fs, priv_rx->sk,
mlx5e_tir_get_tirn(&priv_rx->tir),
MLX5_FS_DEFAULT_FLOW_TAG);
if (!IS_ERR_OR_NULL(rule))
......
......@@ -114,47 +114,49 @@ static enum mlx5_traffic_types arfs_get_tt(enum arfs_type type)
}
}
static int arfs_disable(struct mlx5e_priv *priv)
static int arfs_disable(struct mlx5e_flow_steering *fs)
{
struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
int err, i;
for (i = 0; i < ARFS_NUM_TYPES; i++) {
/* Modify ttc rules destination back to their default */
err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, arfs_get_tt(i));
err = mlx5_ttc_fwd_default_dest(ttc, arfs_get_tt(i));
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] default destination failed, err(%d)\n",
__func__, arfs_get_tt(i), err);
fs_err(fs,
"%s: modify ttc[%d] default destination failed, err(%d)\n",
__func__, arfs_get_tt(i), err);
return err;
}
}
return 0;
}
static void arfs_del_rules(struct mlx5e_priv *priv);
static void arfs_del_rules(struct mlx5e_flow_steering *fs);
int mlx5e_arfs_disable(struct mlx5e_priv *priv)
int mlx5e_arfs_disable(struct mlx5e_flow_steering *fs)
{
arfs_del_rules(priv);
arfs_del_rules(fs);
return arfs_disable(priv);
return arfs_disable(fs);
}
int mlx5e_arfs_enable(struct mlx5e_priv *priv)
int mlx5e_arfs_enable(struct mlx5e_flow_steering *fs)
{
struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
struct mlx5_flow_destination dest = {};
int err, i;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
for (i = 0; i < ARFS_NUM_TYPES; i++) {
dest.ft = priv->fs->arfs->arfs_tables[i].ft.t;
dest.ft = arfs->arfs_tables[i].ft.t;
/* Modify ttc rules destination to point on the aRFS FTs */
err = mlx5_ttc_fwd_dest(priv->fs->ttc, arfs_get_tt(i), &dest);
err = mlx5_ttc_fwd_dest(ttc, arfs_get_tt(i), &dest);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] dest to arfs, failed err(%d)\n",
__func__, arfs_get_tt(i), err);
arfs_disable(priv);
fs_err(fs, "%s: modify ttc[%d] dest to arfs, failed err(%d)\n",
__func__, arfs_get_tt(i), err);
arfs_disable(fs);
return err;
}
}
......@@ -167,31 +169,37 @@ static void arfs_destroy_table(struct arfs_table *arfs_t)
mlx5e_destroy_flow_table(&arfs_t->ft);
}
static void _mlx5e_cleanup_tables(struct mlx5e_priv *priv)
static void _mlx5e_cleanup_tables(struct mlx5e_flow_steering *fs)
{
struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
int i;
arfs_del_rules(priv);
destroy_workqueue(priv->fs->arfs->wq);
arfs_del_rules(fs);
destroy_workqueue(arfs->wq);
for (i = 0; i < ARFS_NUM_TYPES; i++) {
if (!IS_ERR_OR_NULL(priv->fs->arfs->arfs_tables[i].ft.t))
arfs_destroy_table(&priv->fs->arfs->arfs_tables[i]);
if (!IS_ERR_OR_NULL(arfs->arfs_tables[i].ft.t))
arfs_destroy_table(&arfs->arfs_tables[i]);
}
}
void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv)
void mlx5e_arfs_destroy_tables(struct mlx5e_flow_steering *fs, bool ntuple)
{
if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
if (!ntuple)
return;
_mlx5e_cleanup_tables(priv);
kvfree(priv->fs->arfs);
_mlx5e_cleanup_tables(fs);
mlx5e_fs_set_arfs(fs, NULL);
kvfree(arfs);
}
static int arfs_add_default_rule(struct mlx5e_priv *priv,
static int arfs_add_default_rule(struct mlx5e_flow_steering *fs,
struct mlx5e_rx_res *rx_res,
enum arfs_type type)
{
struct arfs_table *arfs_t = &priv->fs->arfs->arfs_tables[type];
struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
struct arfs_table *arfs_t = &arfs->arfs_tables[type];
struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
enum mlx5_traffic_types tt;
......@@ -200,23 +208,21 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
tt = arfs_get_tt(type);
if (tt == -EINVAL) {
netdev_err(priv->netdev, "%s: bad arfs_type: %d\n",
__func__, type);
fs_err(fs, "%s: bad arfs_type: %d\n", __func__, type);
return -EINVAL;
}
/* FIXME: Must use mlx5_ttc_get_default_dest(),
* but can't since TTC default is not setup yet !
*/
dest.tir_num = mlx5e_rx_res_get_tirn_rss(priv->rx_res, tt);
dest.tir_num = mlx5e_rx_res_get_tirn_rss(rx_res, tt);
arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, NULL,
&flow_act,
&dest, 1);
if (IS_ERR(arfs_t->default_rule)) {
err = PTR_ERR(arfs_t->default_rule);
arfs_t->default_rule = NULL;
netdev_err(priv->netdev, "%s: add rule failed, arfs type=%d\n",
__func__, type);
fs_err(fs, "%s: add rule failed, arfs type=%d\n", __func__, type);
}
return err;
......@@ -318,10 +324,12 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
return err;
}
static int arfs_create_table(struct mlx5e_priv *priv,
static int arfs_create_table(struct mlx5e_flow_steering *fs,
struct mlx5e_rx_res *rx_res,
enum arfs_type type)
{
struct mlx5e_arfs_tables *arfs = priv->fs->arfs;
struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(fs, false);
struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft;
struct mlx5_flow_table_attr ft_attr = {};
int err;
......@@ -332,7 +340,7 @@ static int arfs_create_table(struct mlx5e_priv *priv,
ft_attr.level = MLX5E_ARFS_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
ft->t = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
......@@ -343,7 +351,7 @@ static int arfs_create_table(struct mlx5e_priv *priv,
if (err)
goto err;
err = arfs_add_default_rule(priv, type);
err = arfs_add_default_rule(fs, rx_res, type);
if (err)
goto err;
......@@ -353,35 +361,40 @@ static int arfs_create_table(struct mlx5e_priv *priv,
return err;
}
int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
int mlx5e_arfs_create_tables(struct mlx5e_flow_steering *fs,
struct mlx5e_rx_res *rx_res, bool ntuple)
{
struct mlx5e_arfs_tables *arfs;
int err = -ENOMEM;
int i;
if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
if (!ntuple)
return 0;
priv->fs->arfs = kvzalloc(sizeof(*priv->fs->arfs), GFP_KERNEL);
if (!priv->fs->arfs)
arfs = kvzalloc(sizeof(*arfs), GFP_KERNEL);
if (!arfs)
return -ENOMEM;
spin_lock_init(&priv->fs->arfs->arfs_lock);
INIT_LIST_HEAD(&priv->fs->arfs->rules);
priv->fs->arfs->wq = create_singlethread_workqueue("mlx5e_arfs");
if (!priv->fs->arfs->wq)
spin_lock_init(&arfs->arfs_lock);
INIT_LIST_HEAD(&arfs->rules);
arfs->wq = create_singlethread_workqueue("mlx5e_arfs");
if (!arfs->wq)
goto err;
mlx5e_fs_set_arfs(fs, arfs);
for (i = 0; i < ARFS_NUM_TYPES; i++) {
err = arfs_create_table(priv, i);
err = arfs_create_table(fs, rx_res, i);
if (err)
goto err_des;
}
return 0;
err_des:
_mlx5e_cleanup_tables(priv);
_mlx5e_cleanup_tables(fs);
err:
kvfree(priv->fs->arfs);
mlx5e_fs_set_arfs(fs, NULL);
kvfree(arfs);
return err;
}
......@@ -389,6 +402,7 @@ int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
static void arfs_may_expire_flow(struct mlx5e_priv *priv)
{
struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(priv->fs);
struct arfs_rule *arfs_rule;
struct hlist_node *htmp;
HLIST_HEAD(del_list);
......@@ -396,8 +410,8 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
int i;
int j;
spin_lock_bh(&priv->fs->arfs->arfs_lock);
mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs->arfs->arfs_tables, i, j) {
spin_lock_bh(&arfs->arfs_lock);
mlx5e_for_each_arfs_rule(arfs_rule, htmp, arfs->arfs_tables, i, j) {
if (!work_pending(&arfs_rule->arfs_work) &&
rps_may_expire_flow(priv->netdev,
arfs_rule->rxq, arfs_rule->flow_id,
......@@ -408,7 +422,7 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
break;
}
}
spin_unlock_bh(&priv->fs->arfs->arfs_lock);
spin_unlock_bh(&arfs->arfs_lock);
hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) {
if (arfs_rule->rule)
mlx5_del_flow_rules(arfs_rule->rule);
......@@ -417,20 +431,21 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
}
}
static void arfs_del_rules(struct mlx5e_priv *priv)
static void arfs_del_rules(struct mlx5e_flow_steering *fs)
{
struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
struct hlist_node *htmp;
struct arfs_rule *rule;
HLIST_HEAD(del_list);
int i;
int j;
spin_lock_bh(&priv->fs->arfs->arfs_lock);
mlx5e_for_each_arfs_rule(rule, htmp, priv->fs->arfs->arfs_tables, i, j) {
spin_lock_bh(&arfs->arfs_lock);
mlx5e_for_each_arfs_rule(rule, htmp, arfs->arfs_tables, i, j) {
hlist_del_init(&rule->hlist);
hlist_add_head(&rule->hlist, &del_list);
}
spin_unlock_bh(&priv->fs->arfs->arfs_lock);
spin_unlock_bh(&arfs->arfs_lock);
hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) {
cancel_work_sync(&rule->arfs_work);
......@@ -474,7 +489,7 @@ static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
struct arfs_rule *arfs_rule)
{
struct mlx5e_arfs_tables *arfs = priv->fs->arfs;
struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(priv->fs);
struct arfs_tuple *tuple = &arfs_rule->tuple;
struct mlx5_flow_handle *rule = NULL;
struct mlx5_flow_destination dest = {};
......@@ -588,13 +603,15 @@ static void arfs_handle_work(struct work_struct *work)
struct arfs_rule,
arfs_work);
struct mlx5e_priv *priv = arfs_rule->priv;
struct mlx5e_arfs_tables *arfs;
struct mlx5_flow_handle *rule;
arfs = mlx5e_fs_get_arfs(priv->fs);
mutex_lock(&priv->state_lock);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
spin_lock_bh(&priv->fs->arfs->arfs_lock);
spin_lock_bh(&arfs->arfs_lock);
hlist_del(&arfs_rule->hlist);
spin_unlock_bh(&priv->fs->arfs->arfs_lock);
spin_unlock_bh(&arfs->arfs_lock);
mutex_unlock(&priv->state_lock);
kfree(arfs_rule);
......@@ -620,6 +637,7 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
const struct flow_keys *fk,
u16 rxq, u32 flow_id)
{
struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(priv->fs);
struct arfs_rule *rule;
struct arfs_tuple *tuple;
......@@ -647,7 +665,7 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
tuple->dst_port = fk->ports.dst;
rule->flow_id = flow_id;
rule->filter_id = priv->fs->arfs->last_filter_id++ % RPS_NO_FILTER;
rule->filter_id = arfs->last_filter_id++ % RPS_NO_FILTER;
hlist_add_head(&rule->hlist,
arfs_hash_bucket(arfs_t, tuple->src_port,
......@@ -691,11 +709,12 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_arfs_tables *arfs = priv->fs->arfs;
struct arfs_table *arfs_t;
struct mlx5e_arfs_tables *arfs;
struct arfs_rule *arfs_rule;
struct arfs_table *arfs_t;
struct flow_keys fk;
arfs = mlx5e_fs_get_arfs(priv->fs);
if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
return -EPROTONOSUPPORT;
......@@ -725,7 +744,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
return -ENOMEM;
}
}
queue_work(priv->fs->arfs->wq, &arfs_rule->arfs_work);
queue_work(arfs->wq, &arfs_rule->arfs_work);
spin_unlock_bh(&arfs->arfs_lock);
return arfs_rule->filter_id;
}
......
......@@ -38,6 +38,7 @@
#include "en/xsk/pool.h"
#include "en/ptp.h"
#include "lib/clock.h"
#include "en/fs_ethtool.h"
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
struct ethtool_drvinfo *drvinfo)
......@@ -494,14 +495,14 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
arfs_enabled = opened && (priv->netdev->features & NETIF_F_NTUPLE);
if (arfs_enabled)
mlx5e_arfs_disable(priv);
mlx5e_arfs_disable(priv->fs);
/* Switch to new channels, set new parameters and close old ones */
err = mlx5e_safe_switch_params(priv, &new_params,
mlx5e_num_channels_changed_ctx, NULL, true);
if (arfs_enabled) {
int err2 = mlx5e_arfs_enable(priv);
int err2 = mlx5e_arfs_enable(priv->fs);
if (err2)
netdev_err(priv->netdev, "%s: mlx5e_arfs_enable failed: %d\n",
......
......@@ -36,10 +36,38 @@
#include <linux/tcp.h>
#include <linux/mlx5/fs.h>
#include <linux/mlx5/mpfs.h>
#include "en.h"
#include "en_tc.h"
#include "lib/mpfs.h"
#include "en/ptp.h"
#include "en/fs_ethtool.h"
struct mlx5e_flow_steering {
struct work_struct set_rx_mode_work;
bool state_destroy;
bool vlan_strip_disable;
struct mlx5_core_dev *mdev;
struct net_device *netdev;
struct mlx5_flow_namespace *ns;
struct mlx5_flow_namespace *egress_ns;
#ifdef CONFIG_MLX5_EN_RXNFC
struct mlx5e_ethtool_steering *ethtool;
#endif
struct mlx5e_tc_table *tc;
struct mlx5e_promisc_table promisc;
struct mlx5e_vlan_table *vlan;
struct mlx5e_l2_table l2;
struct mlx5_ttc_table *ttc;
struct mlx5_ttc_table *inner_ttc;
#ifdef CONFIG_MLX5_EN_ARFS
struct mlx5e_arfs_tables *arfs;
#endif
#ifdef CONFIG_MLX5_EN_TLS
struct mlx5e_accel_fs_tcp *accel_tcp;
#endif
struct mlx5e_fs_udp *udp;
struct mlx5e_fs_any *any;
struct mlx5e_ptp_fs *ptp_fs;
};
static int mlx5e_add_l2_flow_rule(struct mlx5e_flow_steering *fs,
struct mlx5e_l2_rule *ai, int type);
......@@ -148,9 +176,8 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_flow_steering *fs)
max_list_size = 1 << MLX5_CAP_GEN(fs->mdev, log_max_vlan_list);
if (list_size > max_list_size) {
mlx5_core_warn(fs->mdev,
"netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
list_size, max_list_size);
fs_warn(fs, "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
list_size, max_list_size);
list_size = max_list_size;
}
......@@ -167,8 +194,8 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_flow_steering *fs)
err = mlx5_modify_nic_vport_vlans(fs->mdev, vlans, list_size);
if (err)
mlx5_core_err(fs->mdev, "Failed to modify vport vlans list err(%d)\n",
err);
fs_err(fs, "Failed to modify vport vlans list err(%d)\n",
err);
kvfree(vlans);
return err;
......@@ -249,7 +276,7 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_flow_steering *fs,
if (IS_ERR(*rule_p)) {
err = PTR_ERR(*rule_p);
*rule_p = NULL;
mlx5_core_err(fs->mdev, "%s: add rule failed\n", __func__);
fs_err(fs, "%s: add rule failed\n", __func__);
}
return err;
......@@ -351,78 +378,78 @@ mlx5e_add_trap_rule(struct mlx5_flow_table *ft, int trap_id, int tir_num)
return rule;
}
int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num)
int mlx5e_add_vlan_trap(struct mlx5e_flow_steering *fs, int trap_id, int tir_num)
{
struct mlx5_flow_table *ft = priv->fs->vlan->ft.t;
struct mlx5_flow_table *ft = fs->vlan->ft.t;
struct mlx5_flow_handle *rule;
int err;
rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
priv->fs->vlan->trap_rule = NULL;
mlx5_core_err(priv->fs->mdev, "%s: add VLAN trap rule failed, err %d\n",
__func__, err);
fs->vlan->trap_rule = NULL;
fs_err(fs, "%s: add VLAN trap rule failed, err %d\n",
__func__, err);
return err;
}
priv->fs->vlan->trap_rule = rule;
fs->vlan->trap_rule = rule;
return 0;
}
void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv)
void mlx5e_remove_vlan_trap(struct mlx5e_flow_steering *fs)
{
if (priv->fs->vlan->trap_rule) {
mlx5_del_flow_rules(priv->fs->vlan->trap_rule);
priv->fs->vlan->trap_rule = NULL;
if (fs->vlan->trap_rule) {
mlx5_del_flow_rules(fs->vlan->trap_rule);
fs->vlan->trap_rule = NULL;
}
}
int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num)
int mlx5e_add_mac_trap(struct mlx5e_flow_steering *fs, int trap_id, int tir_num)
{
struct mlx5_flow_table *ft = priv->fs->l2.ft.t;
struct mlx5_flow_table *ft = fs->l2.ft.t;
struct mlx5_flow_handle *rule;
int err;
rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
priv->fs->l2.trap_rule = NULL;
mlx5_core_err(priv->fs->mdev, "%s: add MAC trap rule failed, err %d\n",
__func__, err);
fs->l2.trap_rule = NULL;
fs_err(fs, "%s: add MAC trap rule failed, err %d\n",
__func__, err);
return err;
}
priv->fs->l2.trap_rule = rule;
fs->l2.trap_rule = rule;
return 0;
}
void mlx5e_remove_mac_trap(struct mlx5e_priv *priv)
void mlx5e_remove_mac_trap(struct mlx5e_flow_steering *fs)
{
if (priv->fs->l2.trap_rule) {
mlx5_del_flow_rules(priv->fs->l2.trap_rule);
priv->fs->l2.trap_rule = NULL;
if (fs->l2.trap_rule) {
mlx5_del_flow_rules(fs->l2.trap_rule);
fs->l2.trap_rule = NULL;
}
}
void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv)
void mlx5e_enable_cvlan_filter(struct mlx5e_flow_steering *fs, bool promisc)
{
if (!priv->fs->vlan->cvlan_filter_disabled)
if (!fs->vlan->cvlan_filter_disabled)
return;
priv->fs->vlan->cvlan_filter_disabled = false;
if (priv->netdev->flags & IFF_PROMISC)
fs->vlan->cvlan_filter_disabled = false;
if (promisc)
return;
mlx5e_fs_del_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
}
void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv)
void mlx5e_disable_cvlan_filter(struct mlx5e_flow_steering *fs, bool promisc)
{
if (priv->fs->vlan->cvlan_filter_disabled)
if (fs->vlan->cvlan_filter_disabled)
return;
priv->fs->vlan->cvlan_filter_disabled = true;
if (priv->netdev->flags & IFF_PROMISC)
fs->vlan->cvlan_filter_disabled = true;
if (promisc)
return;
mlx5e_add_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
}
static int mlx5e_vlan_rx_add_cvid(struct mlx5e_flow_steering *fs, u16 vid)
......@@ -462,7 +489,7 @@ int mlx5e_fs_vlan_rx_add_vid(struct mlx5e_flow_steering *fs,
{
if (!fs->vlan) {
mlx5_core_err(fs->mdev, "Vlan doesn't exist\n");
fs_err(fs, "Vlan doesn't exist\n");
return -EINVAL;
}
......@@ -479,7 +506,7 @@ int mlx5e_fs_vlan_rx_kill_vid(struct mlx5e_flow_steering *fs,
__be16 proto, u16 vid)
{
if (!fs->vlan) {
mlx5_core_err(fs->mdev, "Vlan doesn't exist\n");
fs_err(fs, "Vlan doesn't exist\n");
return -EINVAL;
}
......@@ -512,28 +539,28 @@ static void mlx5e_fs_add_vlan_rules(struct mlx5e_flow_steering *fs)
mlx5e_fs_add_any_vid_rules(fs);
}
static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
static void mlx5e_del_vlan_rules(struct mlx5e_flow_steering *fs)
{
int i;
mlx5e_fs_del_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
for_each_set_bit(i, priv->fs->vlan->active_cvlans, VLAN_N_VID) {
mlx5e_fs_del_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
for_each_set_bit(i, fs->vlan->active_cvlans, VLAN_N_VID) {
mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
}
for_each_set_bit(i, priv->fs->vlan->active_svlans, VLAN_N_VID)
mlx5e_fs_del_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
for_each_set_bit(i, fs->vlan->active_svlans, VLAN_N_VID)
mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
WARN_ON_ONCE(priv->fs->state_destroy);
WARN_ON_ONCE(fs->state_destroy);
mlx5e_remove_vlan_trap(priv);
mlx5e_remove_vlan_trap(fs);
/* must be called after DESTROY bit is set and
* set_rx_mode is called and flushed
*/
if (priv->fs->vlan->cvlan_filter_disabled)
mlx5e_fs_del_any_vid_rules(priv->fs);
if (fs->vlan->cvlan_filter_disabled)
mlx5e_fs_del_any_vid_rules(fs);
}
#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
......@@ -568,8 +595,9 @@ static void mlx5e_execute_l2_action(struct mlx5e_flow_steering *fs,
}
if (l2_err)
mlx5_core_warn(fs->mdev, "MPFS, failed to %s mac %pM, err(%d)\n",
action == MLX5E_ACTION_ADD ? "add" : "del", mac_addr, l2_err);
fs_warn(fs, "MPFS, failed to %s mac %pM, err(%d)\n",
action == MLX5E_ACTION_ADD ? "add" : "del",
mac_addr, l2_err);
}
static void mlx5e_sync_netdev_addr(struct mlx5e_flow_steering *fs,
......@@ -640,9 +668,8 @@ static void mlx5e_vport_context_update_addr_list(struct mlx5e_flow_steering *fs,
size++;
if (size > max_size) {
mlx5_core_warn(fs->mdev,
"mdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
is_uc ? "UC" : "MC", size, max_size);
fs_warn(fs, "mdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
is_uc ? "UC" : "MC", size, max_size);
size = max_size;
}
......@@ -658,9 +685,8 @@ static void mlx5e_vport_context_update_addr_list(struct mlx5e_flow_steering *fs,
err = mlx5_modify_nic_vport_mac_list(fs->mdev, list_type, addr_array, size);
out:
if (err)
mlx5_core_err(fs->mdev,
"Failed to modify vport %s list err(%d)\n",
is_uc ? "UC" : "MC", err);
fs_err(fs, "Failed to modify vport %s list err(%d)\n",
is_uc ? "UC" : "MC", err);
kfree(addr_array);
}
......@@ -730,7 +756,7 @@ static int mlx5e_add_promisc_rule(struct mlx5e_flow_steering *fs)
if (IS_ERR(*rule_p)) {
err = PTR_ERR(*rule_p);
*rule_p = NULL;
mlx5_core_err(fs->mdev, "%s: add promiscuous rule failed\n", __func__);
fs_err(fs, "%s: add promiscuous rule failed\n", __func__);
}
kvfree(spec);
return err;
......@@ -750,7 +776,7 @@ static int mlx5e_create_promisc_table(struct mlx5e_flow_steering *fs)
ft->t = mlx5_create_auto_grouped_flow_table(fs->ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
mlx5_core_err(fs->mdev, "fail to create promisc table err=%d\n", err);
fs_err(fs, "fail to create promisc table err=%d\n", err);
return err;
}
......@@ -807,8 +833,8 @@ void mlx5e_fs_set_rx_mode_work(struct mlx5e_flow_steering *fs,
if (err)
enable_promisc = false;
if (!fs->vlan_strip_disable && !err)
mlx5_core_warn_once(fs->mdev,
"S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n");
fs_warn_once(fs,
"S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n");
}
if (enable_allmulti)
mlx5e_add_l2_flow_rule(fs, &ea->allmulti, MLX5E_ALLMULTI);
......@@ -856,14 +882,15 @@ void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
ft->t = NULL;
}
static void mlx5e_set_inner_ttc_params(struct mlx5e_priv *priv,
static void mlx5e_set_inner_ttc_params(struct mlx5e_flow_steering *fs,
struct mlx5e_rx_res *rx_res,
struct ttc_params *ttc_params)
{
struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
int tt;
memset(ttc_params, 0, sizeof(*ttc_params));
ttc_params->ns = mlx5_get_flow_namespace(priv->fs->mdev,
ttc_params->ns = mlx5_get_flow_namespace(fs->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL;
ft_attr->prio = MLX5E_NIC_PRIO;
......@@ -872,13 +899,14 @@ static void mlx5e_set_inner_ttc_params(struct mlx5e_priv *priv,
ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
ttc_params->dests[tt].tir_num =
tt == MLX5_TT_ANY ?
mlx5e_rx_res_get_tirn_direct(priv->rx_res, 0) :
mlx5e_rx_res_get_tirn_rss_inner(priv->rx_res,
mlx5e_rx_res_get_tirn_direct(rx_res, 0) :
mlx5e_rx_res_get_tirn_rss_inner(rx_res,
tt);
}
}
void mlx5e_set_ttc_params(struct mlx5e_priv *priv,
void mlx5e_set_ttc_params(struct mlx5e_flow_steering *fs,
struct mlx5e_rx_res *rx_res,
struct ttc_params *ttc_params, bool tunnel)
{
......@@ -886,7 +914,7 @@ void mlx5e_set_ttc_params(struct mlx5e_priv *priv,
int tt;
memset(ttc_params, 0, sizeof(*ttc_params));
ttc_params->ns = mlx5_get_flow_namespace(priv->fs->mdev,
ttc_params->ns = mlx5_get_flow_namespace(fs->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
ft_attr->level = MLX5E_TTC_FT_LEVEL;
ft_attr->prio = MLX5E_NIC_PRIO;
......@@ -895,19 +923,19 @@ void mlx5e_set_ttc_params(struct mlx5e_priv *priv,
ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
ttc_params->dests[tt].tir_num =
tt == MLX5_TT_ANY ?
mlx5e_rx_res_get_tirn_direct(priv->rx_res, 0) :
mlx5e_rx_res_get_tirn_rss(priv->rx_res, tt);
mlx5e_rx_res_get_tirn_direct(rx_res, 0) :
mlx5e_rx_res_get_tirn_rss(rx_res, tt);
}
ttc_params->inner_ttc = tunnel;
if (!tunnel || !mlx5_tunnel_inner_ft_supported(priv->fs->mdev))
if (!tunnel || !mlx5_tunnel_inner_ft_supported(fs->mdev))
return;
for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
ttc_params->tunnel_dests[tt].type =
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
ttc_params->tunnel_dests[tt].ft =
mlx5_get_ttc_flow_table(priv->fs->inner_ttc);
mlx5_get_ttc_flow_table(fs->inner_ttc);
}
}
......@@ -959,8 +987,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_flow_steering *fs,
ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(ai->rule)) {
mlx5_core_err(fs->mdev, "%s: add l2 rule(mac:%pM) failed\n",
__func__, mv_dmac);
fs_err(fs, "%s: add l2 rule(mac:%pM) failed\n", __func__, mv_dmac);
err = PTR_ERR(ai->rule);
ai->rule = NULL;
}
......@@ -1044,14 +1071,14 @@ static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
return err;
}
static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv)
static void mlx5e_destroy_l2_table(struct mlx5e_flow_steering *fs)
{
mlx5e_destroy_flow_table(&priv->fs->l2.ft);
mlx5e_destroy_flow_table(&fs->l2.ft);
}
static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
static int mlx5e_create_l2_table(struct mlx5e_flow_steering *fs)
{
struct mlx5e_l2_table *l2_table = &priv->fs->l2;
struct mlx5e_l2_table *l2_table = &fs->l2;
struct mlx5e_flow_table *ft = &l2_table->ft;
struct mlx5_flow_table_attr ft_attr = {};
int err;
......@@ -1062,7 +1089,7 @@ static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
ft_attr.level = MLX5E_L2_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
ft->t = mlx5_create_flow_table(fs->ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
......@@ -1221,126 +1248,128 @@ static int mlx5e_fs_create_vlan_table(struct mlx5e_flow_steering *fs)
return err;
}
static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
static void mlx5e_destroy_vlan_table(struct mlx5e_flow_steering *fs)
{
mlx5e_del_vlan_rules(priv);
mlx5e_destroy_flow_table(&priv->fs->vlan->ft);
mlx5e_del_vlan_rules(fs);
mlx5e_destroy_flow_table(&fs->vlan->ft);
}
static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
static void mlx5e_destroy_inner_ttc_table(struct mlx5e_flow_steering *fs)
{
if (!mlx5_tunnel_inner_ft_supported(priv->fs->mdev))
if (!mlx5_tunnel_inner_ft_supported(fs->mdev))
return;
mlx5_destroy_ttc_table(priv->fs->inner_ttc);
mlx5_destroy_ttc_table(fs->inner_ttc);
}
void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
void mlx5e_destroy_ttc_table(struct mlx5e_flow_steering *fs)
{
mlx5_destroy_ttc_table(priv->fs->ttc);
mlx5_destroy_ttc_table(fs->ttc);
}
static int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv)
static int mlx5e_create_inner_ttc_table(struct mlx5e_flow_steering *fs,
struct mlx5e_rx_res *rx_res)
{
struct ttc_params ttc_params = {};
if (!mlx5_tunnel_inner_ft_supported(priv->fs->mdev))
if (!mlx5_tunnel_inner_ft_supported(fs->mdev))
return 0;
mlx5e_set_inner_ttc_params(priv, &ttc_params);
priv->fs->inner_ttc = mlx5_create_inner_ttc_table(priv->fs->mdev,
&ttc_params);
if (IS_ERR(priv->fs->inner_ttc))
return PTR_ERR(priv->fs->inner_ttc);
mlx5e_set_inner_ttc_params(fs, rx_res, &ttc_params);
fs->inner_ttc = mlx5_create_inner_ttc_table(fs->mdev,
&ttc_params);
if (IS_ERR(fs->inner_ttc))
return PTR_ERR(fs->inner_ttc);
return 0;
}
int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
int mlx5e_create_ttc_table(struct mlx5e_flow_steering *fs,
struct mlx5e_rx_res *rx_res)
{
struct ttc_params ttc_params = {};
mlx5e_set_ttc_params(priv, &ttc_params, true);
priv->fs->ttc = mlx5_create_ttc_table(priv->fs->mdev, &ttc_params);
if (IS_ERR(priv->fs->ttc))
return PTR_ERR(priv->fs->ttc);
mlx5e_set_ttc_params(fs, rx_res, &ttc_params, true);
fs->ttc = mlx5_create_ttc_table(fs->mdev, &ttc_params);
if (IS_ERR(fs->ttc))
return PTR_ERR(fs->ttc);
return 0;
}
int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
int mlx5e_create_flow_steering(struct mlx5e_flow_steering *fs,
struct mlx5e_rx_res *rx_res,
const struct mlx5e_profile *profile,
struct net_device *netdev)
{
struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(fs->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
int err;
priv->fs->ns = mlx5_get_flow_namespace(priv->fs->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
if (!priv->fs->ns)
if (!ns)
return -EOPNOTSUPP;
err = mlx5e_arfs_create_tables(priv);
mlx5e_fs_set_ns(fs, ns, false);
err = mlx5e_arfs_create_tables(fs, rx_res,
!!(netdev->hw_features & NETIF_F_NTUPLE));
if (err) {
mlx5_core_err(priv->fs->mdev, "Failed to create arfs tables, err=%d\n",
err);
priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
fs_err(fs, "Failed to create arfs tables, err=%d\n", err);
netdev->hw_features &= ~NETIF_F_NTUPLE;
}
err = mlx5e_create_inner_ttc_table(priv);
err = mlx5e_create_inner_ttc_table(fs, rx_res);
if (err) {
mlx5_core_err(priv->fs->mdev,
"Failed to create inner ttc table, err=%d\n", err);
fs_err(fs, "Failed to create inner ttc table, err=%d\n", err);
goto err_destroy_arfs_tables;
}
err = mlx5e_create_ttc_table(priv);
err = mlx5e_create_ttc_table(fs, rx_res);
if (err) {
mlx5_core_err(priv->fs->mdev, "Failed to create ttc table, err=%d\n",
err);
fs_err(fs, "Failed to create ttc table, err=%d\n", err);
goto err_destroy_inner_ttc_table;
}
err = mlx5e_create_l2_table(priv);
err = mlx5e_create_l2_table(fs);
if (err) {
mlx5_core_err(priv->fs->mdev, "Failed to create l2 table, err=%d\n",
err);
fs_err(fs, "Failed to create l2 table, err=%d\n", err);
goto err_destroy_ttc_table;
}
err = mlx5e_fs_create_vlan_table(priv->fs);
err = mlx5e_fs_create_vlan_table(fs);
if (err) {
mlx5_core_err(priv->fs->mdev, "Failed to create vlan table, err=%d\n",
err);
fs_err(fs, "Failed to create vlan table, err=%d\n", err);
goto err_destroy_l2_table;
}
err = mlx5e_ptp_alloc_rx_fs(priv);
err = mlx5e_ptp_alloc_rx_fs(fs, profile);
if (err)
goto err_destory_vlan_table;
mlx5e_ethtool_init_steering(priv);
mlx5e_ethtool_init_steering(fs);
return 0;
err_destory_vlan_table:
mlx5e_destroy_vlan_table(priv);
mlx5e_destroy_vlan_table(fs);
err_destroy_l2_table:
mlx5e_destroy_l2_table(priv);
mlx5e_destroy_l2_table(fs);
err_destroy_ttc_table:
mlx5e_destroy_ttc_table(priv);
mlx5e_destroy_ttc_table(fs);
err_destroy_inner_ttc_table:
mlx5e_destroy_inner_ttc_table(priv);
mlx5e_destroy_inner_ttc_table(fs);
err_destroy_arfs_tables:
mlx5e_arfs_destroy_tables(priv);
mlx5e_arfs_destroy_tables(fs, !!(netdev->hw_features & NETIF_F_NTUPLE));
return err;
}
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
void mlx5e_destroy_flow_steering(struct mlx5e_flow_steering *fs, bool ntuple,
const struct mlx5e_profile *profile)
{
mlx5e_ptp_free_rx_fs(priv);
mlx5e_destroy_vlan_table(priv);
mlx5e_destroy_l2_table(priv);
mlx5e_destroy_ttc_table(priv);
mlx5e_destroy_inner_ttc_table(priv);
mlx5e_arfs_destroy_tables(priv);
mlx5e_ethtool_cleanup_steering(priv);
mlx5e_ptp_free_rx_fs(fs, profile);
mlx5e_destroy_vlan_table(fs);
mlx5e_destroy_l2_table(fs);
mlx5e_destroy_ttc_table(fs);
mlx5e_destroy_inner_ttc_table(fs);
mlx5e_arfs_destroy_tables(fs, ntuple);
mlx5e_ethtool_cleanup_steering(fs);
}
static int mlx5e_fs_vlan_alloc(struct mlx5e_flow_steering *fs)
......@@ -1356,6 +1385,11 @@ static void mlx5e_fs_vlan_free(struct mlx5e_flow_steering *fs)
kvfree(fs->vlan);
}
struct mlx5e_vlan_table *mlx5e_fs_get_vlan(struct mlx5e_flow_steering *fs)
{
return fs->vlan;
}
static int mlx5e_fs_tc_alloc(struct mlx5e_flow_steering *fs)
{
fs->tc = mlx5e_tc_table_alloc();
......@@ -1369,6 +1403,32 @@ static void mlx5e_fs_tc_free(struct mlx5e_flow_steering *fs)
mlx5e_tc_table_free(fs->tc);
}
struct mlx5e_tc_table *mlx5e_fs_get_tc(struct mlx5e_flow_steering *fs)
{
return fs->tc;
}
#ifdef CONFIG_MLX5_EN_RXNFC
static int mlx5e_fs_ethtool_alloc(struct mlx5e_flow_steering *fs)
{
return mlx5e_ethtool_alloc(&fs->ethtool);
}
static void mlx5e_fs_ethtool_free(struct mlx5e_flow_steering *fs)
{
mlx5e_ethtool_free(fs->ethtool);
}
struct mlx5e_ethtool_steering *mlx5e_fs_get_ethtool(struct mlx5e_flow_steering *fs)
{
return fs->ethtool;
}
#else
static int mlx5e_fs_ethtool_alloc(struct mlx5e_flow_steering *fs)
{ return 0; }
static void mlx5e_fs_ethtool_free(struct mlx5e_flow_steering *fs) { }
#endif
struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile,
struct mlx5_core_dev *mdev,
bool state_destroy)
......@@ -1394,7 +1454,13 @@ struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile,
goto err_free_vlan;
}
err = mlx5e_fs_ethtool_alloc(fs);
if (err)
goto err_free_tc;
return fs;
err_free_tc:
mlx5e_fs_tc_free(fs);
err_free_fs:
kvfree(fs);
err_free_vlan:
......@@ -1405,7 +1471,109 @@ struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile,
void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs)
{
mlx5e_fs_ethtool_free(fs);
mlx5e_fs_tc_free(fs);
mlx5e_fs_vlan_free(fs);
kvfree(fs);
}
struct mlx5e_l2_table *mlx5e_fs_get_l2(struct mlx5e_flow_steering *fs)
{
return &fs->l2;
}
struct mlx5_flow_namespace *mlx5e_fs_get_ns(struct mlx5e_flow_steering *fs, bool egress)
{
return egress ? fs->egress_ns : fs->ns;
}
void mlx5e_fs_set_ns(struct mlx5e_flow_steering *fs, struct mlx5_flow_namespace *ns, bool egress)
{
if (!egress)
fs->ns = ns;
else
fs->egress_ns = ns;
}
struct mlx5_ttc_table *mlx5e_fs_get_ttc(struct mlx5e_flow_steering *fs, bool inner)
{
return inner ? fs->inner_ttc : fs->ttc;
}
void mlx5e_fs_set_ttc(struct mlx5e_flow_steering *fs, struct mlx5_ttc_table *ttc, bool inner)
{
if (!inner)
fs->ttc = ttc;
else
fs->inner_ttc = ttc;
}
#ifdef CONFIG_MLX5_EN_ARFS
struct mlx5e_arfs_tables *mlx5e_fs_get_arfs(struct mlx5e_flow_steering *fs)
{
return fs->arfs;
}
void mlx5e_fs_set_arfs(struct mlx5e_flow_steering *fs, struct mlx5e_arfs_tables *arfs)
{
fs->arfs = arfs;
}
#endif
struct mlx5e_ptp_fs *mlx5e_fs_get_ptp(struct mlx5e_flow_steering *fs)
{
return fs->ptp_fs;
}
void mlx5e_fs_set_ptp(struct mlx5e_flow_steering *fs, struct mlx5e_ptp_fs *ptp_fs)
{
fs->ptp_fs = ptp_fs;
}
struct mlx5e_fs_any *mlx5e_fs_get_any(struct mlx5e_flow_steering *fs)
{
return fs->any;
}
void mlx5e_fs_set_any(struct mlx5e_flow_steering *fs, struct mlx5e_fs_any *any)
{
fs->any = any;
}
#ifdef CONFIG_MLX5_EN_TLS
struct mlx5e_accel_fs_tcp *mlx5e_fs_get_accel_tcp(struct mlx5e_flow_steering *fs)
{
return fs->accel_tcp;
}
void mlx5e_fs_set_accel_tcp(struct mlx5e_flow_steering *fs, struct mlx5e_accel_fs_tcp *accel_tcp)
{
fs->accel_tcp = accel_tcp;
}
#endif
void mlx5e_fs_set_state_destroy(struct mlx5e_flow_steering *fs, bool state_destroy)
{
fs->state_destroy = state_destroy;
}
void mlx5e_fs_set_vlan_strip_disable(struct mlx5e_flow_steering *fs,
bool vlan_strip_disable)
{
fs->vlan_strip_disable = vlan_strip_disable;
}
struct mlx5e_fs_udp *mlx5e_fs_get_udp(struct mlx5e_flow_steering *fs)
{
return fs->udp;
}
void mlx5e_fs_set_udp(struct mlx5e_flow_steering *fs, struct mlx5e_fs_udp *udp)
{
fs->udp = udp;
}
struct mlx5_core_dev *mlx5e_fs_get_mdev(struct mlx5e_flow_steering *fs)
{
return fs->mdev;
}
......@@ -34,6 +34,22 @@
#include "en.h"
#include "en/params.h"
#include "en/xsk/pool.h"
#include "en/fs_ethtool.h"
struct mlx5e_ethtool_table {
struct mlx5_flow_table *ft;
int num_rules;
};
#define ETHTOOL_NUM_L3_L4_FTS 7
#define ETHTOOL_NUM_L2_FTS 4
struct mlx5e_ethtool_steering {
struct mlx5e_ethtool_table l3_l4_ft[ETHTOOL_NUM_L3_L4_FTS];
struct mlx5e_ethtool_table l2_ft[ETHTOOL_NUM_L2_FTS];
struct list_head rules;
int tot_num_rules;
};
static int flow_type_to_traffic_type(u32 flow_type);
......@@ -66,6 +82,7 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
struct ethtool_rx_flow_spec *fs,
int num_tuples)
{
struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(priv->fs);
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5e_ethtool_table *eth_ft;
struct mlx5_flow_namespace *ns;
......@@ -81,18 +98,18 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
case UDP_V6_FLOW:
max_tuples = ETHTOOL_NUM_L3_L4_FTS;
prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
eth_ft = &priv->fs->ethtool.l3_l4_ft[prio];
eth_ft = &ethtool->l3_l4_ft[prio];
break;
case IP_USER_FLOW:
case IPV6_USER_FLOW:
max_tuples = ETHTOOL_NUM_L3_L4_FTS;
prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
eth_ft = &priv->fs->ethtool.l3_l4_ft[prio];
eth_ft = &ethtool->l3_l4_ft[prio];
break;
case ETHER_FLOW:
max_tuples = ETHTOOL_NUM_L2_FTS;
prio = max_tuples - num_tuples;
eth_ft = &priv->fs->ethtool.l2_ft[prio];
eth_ft = &ethtool->l2_ft[prio];
prio += MLX5E_ETHTOOL_L2_PRIO;
break;
default:
......@@ -382,15 +399,16 @@ static int set_flow_attrs(u32 *match_c, u32 *match_v,
static void add_rule_to_list(struct mlx5e_priv *priv,
struct mlx5e_ethtool_rule *rule)
{
struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(priv->fs);
struct list_head *head = &ethtool->rules;
struct mlx5e_ethtool_rule *iter;
struct list_head *head = &priv->fs->ethtool.rules;
list_for_each_entry(iter, &priv->fs->ethtool.rules, list) {
list_for_each_entry(iter, &ethtool->rules, list) {
if (iter->flow_spec.location > rule->flow_spec.location)
break;
head = &iter->list;
}
priv->fs->ethtool.tot_num_rules++;
ethtool->tot_num_rules++;
list_add(&rule->list, head);
}
......@@ -499,15 +517,16 @@ add_ethtool_flow_rule(struct mlx5e_priv *priv,
return err ? ERR_PTR(err) : rule;
}
static void del_ethtool_rule(struct mlx5e_priv *priv,
static void del_ethtool_rule(struct mlx5e_flow_steering *fs,
struct mlx5e_ethtool_rule *eth_rule)
{
struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(fs);
if (eth_rule->rule)
mlx5_del_flow_rules(eth_rule->rule);
if (eth_rule->rss)
mlx5e_rss_refcnt_dec(eth_rule->rss);
list_del(&eth_rule->list);
priv->fs->ethtool.tot_num_rules--;
ethtool->tot_num_rules--;
put_flow_table(eth_rule->eth_ft);
kfree(eth_rule);
}
......@@ -515,9 +534,10 @@ static void del_ethtool_rule(struct mlx5e_priv *priv,
static struct mlx5e_ethtool_rule *find_ethtool_rule(struct mlx5e_priv *priv,
int location)
{
struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(priv->fs);
struct mlx5e_ethtool_rule *iter;
list_for_each_entry(iter, &priv->fs->ethtool.rules, list) {
list_for_each_entry(iter, &ethtool->rules, list) {
if (iter->flow_spec.location == location)
return iter;
}
......@@ -531,7 +551,7 @@ static struct mlx5e_ethtool_rule *get_ethtool_rule(struct mlx5e_priv *priv,
eth_rule = find_ethtool_rule(priv, location);
if (eth_rule)
del_ethtool_rule(priv, eth_rule);
del_ethtool_rule(priv->fs, eth_rule);
eth_rule = kzalloc(sizeof(*eth_rule), GFP_KERNEL);
if (!eth_rule)
......@@ -754,7 +774,7 @@ mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
return 0;
del_ethtool_rule:
del_ethtool_rule(priv, eth_rule);
del_ethtool_rule(priv->fs, eth_rule);
return err;
}
......@@ -774,7 +794,7 @@ mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv, int location)
goto out;
}
del_ethtool_rule(priv, eth_rule);
del_ethtool_rule(priv->fs, eth_rule);
out:
return err;
}
......@@ -783,12 +803,13 @@ static int
mlx5e_ethtool_get_flow(struct mlx5e_priv *priv,
struct ethtool_rxnfc *info, int location)
{
struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(priv->fs);
struct mlx5e_ethtool_rule *eth_rule;
if (location < 0 || location >= MAX_NUM_OF_ETHTOOL_RULES)
return -EINVAL;
list_for_each_entry(eth_rule, &priv->fs->ethtool.rules, list) {
list_for_each_entry(eth_rule, &ethtool->rules, list) {
int index;
if (eth_rule->flow_spec.location != location)
......@@ -826,18 +847,34 @@ mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv,
return err;
}
void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv)
int mlx5e_ethtool_alloc(struct mlx5e_ethtool_steering **ethtool)
{
*ethtool = kvzalloc(sizeof(**ethtool), GFP_KERNEL);
if (!*ethtool)
return -ENOMEM;
return 0;
}
void mlx5e_ethtool_free(struct mlx5e_ethtool_steering *ethtool)
{
kvfree(ethtool);
}
void mlx5e_ethtool_cleanup_steering(struct mlx5e_flow_steering *fs)
{
struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(fs);
struct mlx5e_ethtool_rule *iter;
struct mlx5e_ethtool_rule *temp;
list_for_each_entry_safe(iter, temp, &priv->fs->ethtool.rules, list)
del_ethtool_rule(priv, iter);
list_for_each_entry_safe(iter, temp, &ethtool->rules, list)
del_ethtool_rule(fs, iter);
}
void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv)
void mlx5e_ethtool_init_steering(struct mlx5e_flow_steering *fs)
{
INIT_LIST_HEAD(&priv->fs->ethtool.rules);
struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(fs);
INIT_LIST_HEAD(&ethtool->rules);
}
static int flow_type_to_traffic_type(u32 flow_type)
......@@ -959,11 +996,12 @@ int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd)
int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
struct ethtool_rxnfc *info, u32 *rule_locs)
{
struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(priv->fs);
int err = 0;
switch (info->cmd) {
case ETHTOOL_GRXCLSRLCNT:
info->rule_cnt = priv->fs->ethtool.tot_num_rules;
info->rule_cnt = ethtool->tot_num_rules;
break;
case ETHTOOL_GRXCLSRULE:
err = mlx5e_ethtool_get_flow(priv, info, info->fs.location);
......
......@@ -2738,7 +2738,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
netif_tx_start_all_queues(priv->netdev);
if (mlx5e_is_vport_rep(priv))
mlx5e_add_sqs_fwd_rules(priv);
mlx5e_rep_activate_channels(priv);
mlx5e_wait_channels_min_rx_wqes(&priv->channels);
......@@ -2752,7 +2752,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
mlx5e_rx_res_channels_deactivate(priv->rx_res);
if (mlx5e_is_vport_rep(priv))
mlx5e_remove_sqs_fwd_rules(priv);
mlx5e_rep_deactivate_channels(priv);
/* The results of ndo_select_queue are unreliable, while netdev config
* is being changed (real_num_tx_queues, num_tc). Stop all queues to
......@@ -3669,9 +3669,11 @@ static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
struct mlx5e_priv *priv = netdev_priv(netdev);
if (enable)
mlx5e_enable_cvlan_filter(priv);
mlx5e_enable_cvlan_filter(priv->fs,
!!(priv->netdev->flags & IFF_PROMISC));
else
mlx5e_disable_cvlan_filter(priv);
mlx5e_disable_cvlan_filter(priv->fs,
!!(priv->netdev->flags & IFF_PROMISC));
return 0;
}
......@@ -3778,7 +3780,7 @@ static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
mutex_lock(&priv->state_lock);
priv->fs->vlan_strip_disable = !enable;
mlx5e_fs_set_vlan_strip_disable(priv->fs, !enable);
priv->channels.params.vlan_strip_disable = !enable;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
......@@ -3786,7 +3788,7 @@ static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
err = mlx5e_modify_channels_vsd(&priv->channels, !enable);
if (err) {
priv->fs->vlan_strip_disable = enable;
mlx5e_fs_set_vlan_strip_disable(priv->fs, enable);
priv->channels.params.vlan_strip_disable = enable;
}
unlock:
......@@ -3824,9 +3826,9 @@ static int set_feature_arfs(struct net_device *netdev, bool enable)
int err;
if (enable)
err = mlx5e_arfs_enable(priv);
err = mlx5e_arfs_enable(priv->fs);
else
err = mlx5e_arfs_disable(priv);
err = mlx5e_arfs_disable(priv->fs);
return err;
}
......@@ -3910,12 +3912,14 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
netdev_features_t features)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_vlan_table *vlan;
struct mlx5e_params *params;
vlan = mlx5e_fs_get_vlan(priv->fs);
mutex_lock(&priv->state_lock);
params = &priv->channels.params;
if (!priv->fs->vlan ||
!bitmap_empty(mlx5e_vlan_get_active_svlans(priv->fs->vlan), VLAN_N_VID)) {
if (!vlan ||
!bitmap_empty(mlx5e_vlan_get_active_svlans(vlan), VLAN_N_VID)) {
/* HW strips the outer C-tag header, this is a problem
* for S-tag traffic.
*/
......@@ -5103,7 +5107,8 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
if (err)
goto err_close_drop_rq;
err = mlx5e_create_flow_steering(priv);
err = mlx5e_create_flow_steering(priv->fs, priv->rx_res, priv->profile,
priv->netdev);
if (err) {
mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
goto err_destroy_rx_res;
......@@ -5126,7 +5131,8 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
err_tc_nic_cleanup:
mlx5e_tc_nic_cleanup(priv);
err_destroy_flow_steering:
mlx5e_destroy_flow_steering(priv);
mlx5e_destroy_flow_steering(priv->fs, !!(priv->netdev->hw_features & NETIF_F_NTUPLE),
priv->profile);
err_destroy_rx_res:
mlx5e_rx_res_destroy(priv->rx_res);
err_close_drop_rq:
......@@ -5142,7 +5148,8 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
{
mlx5e_accel_cleanup_rx(priv);
mlx5e_tc_nic_cleanup(priv);
mlx5e_destroy_flow_steering(priv);
mlx5e_destroy_flow_steering(priv->fs, !!(priv->netdev->hw_features & NETIF_F_NTUPLE),
priv->profile);
mlx5e_rx_res_destroy(priv->rx_res);
mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_destroy_q_counters(priv);
......@@ -5518,7 +5525,8 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
if (priv->fs)
priv->fs->state_destroy = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
mlx5e_fs_set_state_destroy(priv->fs,
!test_bit(MLX5E_STATE_DESTROYING, &priv->state));
/* max number of channels may have changed */
max_nch = mlx5e_calc_max_nch(priv->mdev, priv->netdev, profile);
......@@ -5579,7 +5587,8 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
mlx5e_reset_channels(priv->netdev);
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
if (priv->fs)
priv->fs->state_destroy = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
mlx5e_fs_set_state_destroy(priv->fs,
!test_bit(MLX5E_STATE_DESTROYING, &priv->state));
cancel_work_sync(&priv->update_stats_work);
return err;
}
......@@ -5590,7 +5599,8 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv)
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
if (priv->fs)
priv->fs->state_destroy = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
mlx5e_fs_set_state_destroy(priv->fs,
!test_bit(MLX5E_STATE_DESTROYING, &priv->state));
if (profile->disable)
profile->disable(priv);
......
......@@ -56,6 +56,7 @@
#include "en_accel/ipsec.h"
#include "en/tc/int_port.h"
#include "en/ptp.h"
#include "en/fs_ethtool.h"
#define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
......@@ -397,7 +398,8 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
return err;
}
int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
static int
mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
{
int sqs_per_channel = mlx5e_get_dcb_num_tc(&priv->channels.params);
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
......@@ -451,7 +453,8 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
return err;
}
void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
static void
mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
......@@ -460,6 +463,53 @@ void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
mlx5e_sqs2vport_stop(esw, rep);
}
static int
mlx5e_rep_add_meta_tunnel_rule(struct mlx5e_priv *priv)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_group *g;
int err;
g = esw->fdb_table.offloads.send_to_vport_meta_grp;
if (!g)
return 0;
flow_rule = mlx5_eswitch_add_send_to_vport_meta_rule(esw, rep->vport);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
goto out;
}
rpriv->send_to_vport_meta_rule = flow_rule;
out:
return err;
}
static void
mlx5e_rep_del_meta_tunnel_rule(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
if (rpriv->send_to_vport_meta_rule)
mlx5_eswitch_del_send_to_vport_meta_rule(rpriv->send_to_vport_meta_rule);
}
void mlx5e_rep_activate_channels(struct mlx5e_priv *priv)
{
mlx5e_add_sqs_fwd_rules(priv);
mlx5e_rep_add_meta_tunnel_rule(priv);
}
void mlx5e_rep_deactivate_channels(struct mlx5e_priv *priv)
{
mlx5e_rep_del_meta_tunnel_rule(priv);
mlx5e_remove_sqs_fwd_rules(priv);
}
static int mlx5e_rep_open(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
......@@ -745,19 +795,20 @@ static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
struct ttc_params ttc_params = {};
int err;
priv->fs->ns = mlx5_get_flow_namespace(priv->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
mlx5e_fs_set_ns(priv->fs,
mlx5_get_flow_namespace(priv->mdev,
MLX5_FLOW_NAMESPACE_KERNEL), false);
/* The inner_ttc in the ttc params is intentionally not set */
mlx5e_set_ttc_params(priv, &ttc_params, false);
mlx5e_set_ttc_params(priv->fs, priv->rx_res, &ttc_params, false);
if (rep->vport != MLX5_VPORT_UPLINK)
/* To give uplik rep TTC a lower level for chaining from root ft */
ttc_params.ft_attr.level = MLX5E_TTC_FT_LEVEL + 1;
priv->fs->ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
if (IS_ERR(priv->fs->ttc)) {
err = PTR_ERR(priv->fs->ttc);
mlx5e_fs_set_ttc(priv->fs, mlx5_create_ttc_table(priv->mdev, &ttc_params), false);
if (IS_ERR(mlx5e_fs_get_ttc(priv->fs, false))) {
err = PTR_ERR(mlx5e_fs_get_ttc(priv->fs, false));
netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n",
err);
return err;
......@@ -777,7 +828,7 @@ static int mlx5e_create_rep_root_ft(struct mlx5e_priv *priv)
/* non uplik reps will skip any bypass tables and go directly to
* their own ttc
*/
rpriv->root_ft = mlx5_get_ttc_flow_table(priv->fs->ttc);
rpriv->root_ft = mlx5_get_ttc_flow_table(mlx5e_fs_get_ttc(priv->fs, false));
return 0;
}
......@@ -885,14 +936,14 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
if (err)
goto err_destroy_root_ft;
mlx5e_ethtool_init_steering(priv);
mlx5e_ethtool_init_steering(priv->fs);
return 0;
err_destroy_root_ft:
mlx5e_destroy_rep_root_ft(priv);
err_destroy_ttc_table:
mlx5_destroy_ttc_table(priv->fs->ttc);
mlx5_destroy_ttc_table(mlx5e_fs_get_ttc(priv->fs, false));
err_destroy_rx_res:
mlx5e_rx_res_destroy(priv->rx_res);
err_close_drop_rq:
......@@ -906,10 +957,10 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
{
mlx5e_ethtool_cleanup_steering(priv);
mlx5e_ethtool_cleanup_steering(priv->fs);
rep_vport_rx_rule_destroy(priv);
mlx5e_destroy_rep_root_ft(priv);
mlx5_destroy_ttc_table(priv->fs->ttc);
mlx5_destroy_ttc_table(mlx5e_fs_get_ttc(priv->fs, false));
mlx5e_rx_res_destroy(priv->rx_res);
mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_rx_res_free(priv->rx_res);
......
......@@ -111,6 +111,7 @@ struct mlx5e_rep_priv {
struct list_head vport_sqs_list;
struct mlx5_rep_uplink_priv uplink_priv; /* valid for uplink rep */
struct rtnl_link_stats64 prev_vf_vport_stats;
struct mlx5_flow_handle *send_to_vport_meta_rule;
struct rhashtable tc_ht;
};
......@@ -241,8 +242,8 @@ int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev,
void *sp);
bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv);
int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv);
void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv);
void mlx5e_rep_activate_channels(struct mlx5e_priv *priv);
void mlx5e_rep_deactivate_channels(struct mlx5e_priv *priv);
void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv);
......@@ -256,8 +257,8 @@ static inline bool mlx5e_eswitch_rep(const struct net_device *netdev)
#else /* CONFIG_MLX5_ESWITCH */
static inline bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) { return false; }
static inline int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv) {}
static inline void mlx5e_rep_activate_channels(struct mlx5e_priv *priv) {}
static inline void mlx5e_rep_deactivate_channels(struct mlx5e_priv *priv) {}
static inline int mlx5e_rep_init(void) { return 0; };
static inline void mlx5e_rep_cleanup(void) {};
static inline bool mlx5e_rep_has_offload_stats(const struct net_device *dev,
......
......@@ -311,6 +311,7 @@ mlx5e_get_flow_meters(struct mlx5_core_dev *dev)
static struct mlx5_tc_ct_priv *
get_ct_priv(struct mlx5e_priv *priv)
{
struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_rep_uplink_priv *uplink_priv;
struct mlx5e_rep_priv *uplink_rpriv;
......@@ -322,7 +323,7 @@ get_ct_priv(struct mlx5e_priv *priv)
return uplink_priv->ct_priv;
}
return priv->fs->tc->ct;
return tc->ct;
}
static struct mlx5e_tc_psample *
......@@ -345,6 +346,7 @@ get_sample_priv(struct mlx5e_priv *priv)
static struct mlx5e_post_act *
get_post_action(struct mlx5e_priv *priv)
{
struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_rep_uplink_priv *uplink_priv;
struct mlx5e_rep_priv *uplink_rpriv;
......@@ -356,7 +358,7 @@ get_post_action(struct mlx5e_priv *priv)
return uplink_priv->post_act;
}
return priv->fs->tc->post_act;
return tc->post_act;
}
struct mlx5_flow_handle *
......@@ -607,11 +609,12 @@ int mlx5e_get_flow_namespace(struct mlx5e_tc_flow *flow)
static struct mod_hdr_tbl *
get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
{
struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
return mlx5e_get_flow_namespace(flow) == MLX5_FLOW_NAMESPACE_FDB ?
&esw->offloads.mod_hdr :
&priv->fs->tc->mod_hdr;
&tc->mod_hdr;
}
static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
......@@ -810,6 +813,7 @@ static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
{
struct mlx5e_priv *priv = hp->func_priv;
struct ttc_params ttc_params;
struct mlx5_ttc_table *ttc;
int err;
err = mlx5e_hairpin_create_indirect_rqt(hp);
......@@ -827,9 +831,10 @@ static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
goto err_create_ttc_table;
}
ttc = mlx5e_fs_get_ttc(priv->fs, false);
netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
hp->num_channels,
mlx5_get_ttc_flow_table(priv->fs->ttc)->id);
mlx5_get_ttc_flow_table(ttc)->id);
return 0;
......@@ -916,10 +921,11 @@ static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
u16 peer_vhca_id, u8 prio)
{
struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5e_hairpin_entry *hpe;
u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
hash_for_each_possible(priv->fs->tc->hairpin_tbl, hpe,
hash_for_each_possible(tc->hairpin_tbl, hpe,
hairpin_hlist, hash_key) {
if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) {
refcount_inc(&hpe->refcnt);
......@@ -933,11 +939,12 @@ static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
static void mlx5e_hairpin_put(struct mlx5e_priv *priv,
struct mlx5e_hairpin_entry *hpe)
{
struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
/* no more hairpin flows for us, release the hairpin pair */
if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs->tc->hairpin_tbl_lock))
if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &tc->hairpin_tbl_lock))
return;
hash_del(&hpe->hairpin_hlist);
mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
mutex_unlock(&tc->hairpin_tbl_lock);
if (!IS_ERR_OR_NULL(hpe->hp)) {
netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
......@@ -993,6 +1000,7 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct netlink_ext_ack *extack)
{
struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
int peer_ifindex = parse_attr->mirred_ifindex[0];
struct mlx5_hairpin_params params;
struct mlx5_core_dev *peer_mdev;
......@@ -1021,10 +1029,10 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
if (err)
return err;
mutex_lock(&priv->fs->tc->hairpin_tbl_lock);
mutex_lock(&tc->hairpin_tbl_lock);
hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
if (hpe) {
mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
mutex_unlock(&tc->hairpin_tbl_lock);
wait_for_completion(&hpe->res_ready);
if (IS_ERR(hpe->hp)) {
......@@ -1036,7 +1044,7 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
if (!hpe) {
mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
mutex_unlock(&tc->hairpin_tbl_lock);
return -ENOMEM;
}
......@@ -1048,9 +1056,9 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
refcount_set(&hpe->refcnt, 1);
init_completion(&hpe->res_ready);
hash_add(priv->fs->tc->hairpin_tbl, &hpe->hairpin_hlist,
hash_add(tc->hairpin_tbl, &hpe->hairpin_hlist,
hash_hairpin_info(peer_id, match_prio));
mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
mutex_unlock(&tc->hairpin_tbl_lock);
params.log_data_size = 16;
params.log_data_size = min_t(u8, params.log_data_size,
......@@ -1126,8 +1134,9 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
struct mlx5_flow_context *flow_context = &spec->flow_context;
struct mlx5e_vlan_table *vlan = mlx5e_fs_get_vlan(priv->fs);
struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5_nic_flow_attr *nic_attr = attr->nic_attr;
struct mlx5e_tc_table *tc = priv->fs->tc;
struct mlx5_flow_destination dest[2] = {};
struct mlx5_fs_chains *nic_chains;
struct mlx5_flow_act flow_act = {
......@@ -1163,7 +1172,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
if (IS_ERR(dest[dest_ix].ft))
return ERR_CAST(dest[dest_ix].ft);
} else {
dest[dest_ix].ft = mlx5e_vlan_get_flowtable(priv->fs->vlan);
dest[dest_ix].ft = mlx5e_vlan_get_flowtable(vlan);
}
dest_ix++;
}
......@@ -1191,7 +1200,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
mutex_unlock(&tc->t_lock);
netdev_err(priv->netdev,
"Failed to create tc offload table\n");
rule = ERR_CAST(priv->fs->tc->t);
rule = ERR_CAST(tc->t);
goto err_ft_get;
}
}
......@@ -1293,8 +1302,10 @@ void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
struct mlx5_flow_handle *rule,
struct mlx5_flow_attr *attr)
{
struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv->fs->tc);
struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5_fs_chains *nic_chains;
nic_chains = mlx5e_nic_chains(tc);
mlx5_del_flow_rules(rule);
if (attr->chain || attr->prio)
......@@ -1309,8 +1320,8 @@ void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5_flow_attr *attr = flow->attr;
struct mlx5e_tc_table *tc = priv->fs->tc;
flow_flag_clear(flow, OFFLOADED);
......@@ -1322,13 +1333,13 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
/* Remove root table if no rules are left to avoid
* extra steering hops.
*/
mutex_lock(&priv->fs->tc->t_lock);
mutex_lock(&tc->t_lock);
if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) &&
!IS_ERR_OR_NULL(tc->t)) {
mlx5_chains_put_table(mlx5e_nic_chains(tc), 0, 1, MLX5E_TC_FT_LEVEL);
priv->fs->tc->t = NULL;
tc->t = NULL;
}
mutex_unlock(&priv->fs->tc->t_lock);
mutex_unlock(&tc->t_lock);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow);
......@@ -1494,8 +1505,11 @@ bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_
route_priv = netdev_priv(route_dev);
route_mdev = route_priv->mdev;
if (out_mdev->coredev_type != MLX5_COREDEV_PF ||
route_mdev->coredev_type != MLX5_COREDEV_VF)
if (out_mdev->coredev_type != MLX5_COREDEV_PF)
return false;
if (route_mdev->coredev_type != MLX5_COREDEV_VF &&
route_mdev->coredev_type != MLX5_COREDEV_SF)
return false;
return mlx5e_same_hw_devs(out_priv, route_priv);
......@@ -4058,13 +4072,14 @@ static const struct rhashtable_params tc_ht_params = {
static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
unsigned long flags)
{
struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5e_rep_priv *rpriv;
if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) {
rpriv = priv->ppriv;
return &rpriv->tc_ht;
} else /* NIC offload */
return &priv->fs->tc->ht;
return &tc->ht;
}
static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
......@@ -4772,6 +4787,7 @@ void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
struct mlx5e_priv *peer_priv)
{
struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
struct mlx5e_hairpin_entry *hpe, *tmp;
LIST_HEAD(init_wait_list);
......@@ -4783,11 +4799,11 @@ static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
mutex_lock(&priv->fs->tc->hairpin_tbl_lock);
hash_for_each(priv->fs->tc->hairpin_tbl, bkt, hpe, hairpin_hlist)
mutex_lock(&tc->hairpin_tbl_lock);
hash_for_each(tc->hairpin_tbl, bkt, hpe, hairpin_hlist)
if (refcount_inc_not_zero(&hpe->refcnt))
list_add(&hpe->dead_peer_wait_list, &init_wait_list);
mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
mutex_unlock(&tc->hairpin_tbl_lock);
list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
wait_for_completion(&hpe->res_ready);
......@@ -4841,7 +4857,8 @@ static int mlx5e_tc_nic_get_ft_size(struct mlx5_core_dev *dev)
static int mlx5e_tc_nic_create_miss_table(struct mlx5e_priv *priv)
{
struct mlx5_flow_table **ft = &priv->fs->tc->miss_t;
struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5_flow_table **ft = &tc->miss_t;
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_namespace *ns;
int err = 0;
......@@ -4863,12 +4880,14 @@ static int mlx5e_tc_nic_create_miss_table(struct mlx5e_priv *priv)
static void mlx5e_tc_nic_destroy_miss_table(struct mlx5e_priv *priv)
{
mlx5_destroy_flow_table(priv->fs->tc->miss_t);
struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
mlx5_destroy_flow_table(tc->miss_t);
}
int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
{
struct mlx5e_tc_table *tc = priv->fs->tc;
struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5_core_dev *dev = priv->mdev;
struct mapping_ctx *chains_mapping;
struct mlx5_chains_attr attr = {};
......@@ -4909,7 +4928,7 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
attr.ns = MLX5_FLOW_NAMESPACE_KERNEL;
attr.max_ft_sz = mlx5e_tc_nic_get_ft_size(dev);
attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS;
attr.default_ft = priv->fs->tc->miss_t;
attr.default_ft = tc->miss_t;
attr.mapping = chains_mapping;
tc->chains = mlx5_chains_create(dev, &attr);
......@@ -4958,7 +4977,7 @@ static void _mlx5e_tc_del_flow(void *ptr, void *arg)
void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
{
struct mlx5e_tc_table *tc = priv->fs->tc;
struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
if (tc->netdevice_nb.notifier_call)
unregister_netdevice_notifier_dev_net(priv->netdev,
......@@ -5163,13 +5182,13 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
u32 chain = 0, chain_tag, reg_b, zone_restore_id;
struct mlx5e_priv *priv = netdev_priv(skb->dev);
struct mlx5e_tc_table *tc = priv->fs->tc;
struct mlx5_mapped_obj mapped_obj;
struct tc_skb_ext *tc_skb_ext;
struct mlx5e_tc_table *tc;
int err;
reg_b = be32_to_cpu(cqe->ft_metadata);
tc = mlx5e_fs_get_tc(priv->fs);
chain_tag = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
err = mapping_find(tc->mapping, chain_tag, &mapped_obj);
......
......@@ -54,6 +54,7 @@
ESW_FLOW_ATTR_SZ :\
NIC_FLOW_ATTR_SZ)
struct mlx5_fs_chains *mlx5e_nic_chains(struct mlx5e_tc_table *tc);
int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags);
struct mlx5e_tc_update_priv {
......
......@@ -78,12 +78,16 @@ mlx5_esw_indir_table_needed(struct mlx5_eswitch *esw,
struct mlx5_core_dev *dest_mdev)
{
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
bool vf_sf_vport;
vf_sf_vport = mlx5_eswitch_is_vf_vport(esw, vport_num) ||
mlx5_esw_is_sf_vport(esw, vport_num);
/* Use indirect table for all IP traffic from UL to VF with vport
* destination when source rewrite flag is set.
*/
return esw_attr->in_rep->vport == MLX5_VPORT_UPLINK &&
mlx5_eswitch_is_vf_vport(esw, vport_num) &&
vf_sf_vport &&
esw->dev == dest_mdev &&
attr->ip_version &&
attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE;
......
......@@ -1360,7 +1360,6 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf)
if (esw->mode == MLX5_ESWITCH_OFFLOADS) {
struct devlink *devlink = priv_to_devlink(esw->dev);
esw_offloads_del_send_to_vport_meta_rules(esw);
devl_rate_nodes_destroy(devlink);
}
......
......@@ -244,6 +244,8 @@ struct mlx5_esw_offload {
struct mlx5_flow_table *ft_offloads;
struct mlx5_flow_group *vport_rx_group;
struct mlx5_flow_group *vport_rx_drop_group;
struct mlx5_flow_handle *vport_rx_drop_rule;
struct xarray vport_reps;
struct list_head peer_flows;
struct mutex peer_mutex;
......@@ -344,7 +346,10 @@ void esw_offloads_disable(struct mlx5_eswitch *esw);
int esw_offloads_enable(struct mlx5_eswitch *esw);
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
int esw_offloads_init_reps(struct mlx5_eswitch *esw);
void esw_offloads_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw);
struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num);
void mlx5_eswitch_del_send_to_vport_meta_rule(struct mlx5_flow_handle *rule);
bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw);
int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable);
......
......@@ -70,6 +70,8 @@
#define MLX5_ESW_VPORT_TBL_SIZE 128
#define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
#define MLX5_ESW_FT_OFFLOADS_DROP_RULE (1)
static const struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = {
.max_fte = MLX5_ESW_VPORT_TBL_SIZE,
.max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS,
......@@ -1057,52 +1059,23 @@ void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
mlx5_del_flow_rules(rule);
}
static void mlx5_eswitch_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
{
struct mlx5_flow_handle **flows = esw->fdb_table.offloads.send_to_vport_meta_rules;
int i = 0, num_vfs = esw->esw_funcs.num_vfs;
if (!num_vfs || !flows)
return;
for (i = 0; i < num_vfs; i++)
mlx5_del_flow_rules(flows[i]);
kvfree(flows);
/* If changing eswitch mode from switchdev to legacy, but num_vfs is not 0,
* meta rules could be freed again. So set it to NULL.
*/
esw->fdb_table.offloads.send_to_vport_meta_rules = NULL;
}
void esw_offloads_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
void mlx5_eswitch_del_send_to_vport_meta_rule(struct mlx5_flow_handle *rule)
{
mlx5_eswitch_del_send_to_vport_meta_rules(esw);
if (rule)
mlx5_del_flow_rules(rule);
}
static int
mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {0};
int num_vfs, rule_idx = 0, err = 0;
struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_handle **flows;
struct mlx5_flow_spec *spec;
struct mlx5_vport *vport;
unsigned long i;
u16 vport_num;
num_vfs = esw->esw_funcs.num_vfs;
flows = kvcalloc(num_vfs, sizeof(*flows), GFP_KERNEL);
if (!flows)
return -ENOMEM;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
err = -ENOMEM;
goto alloc_err;
}
if (!spec)
return ERR_PTR(-ENOMEM);
MLX5_SET(fte_match_param, spec->match_criteria,
misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
......@@ -1115,34 +1088,18 @@ mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
vport_num = vport->vport;
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_0,
mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
dest.vport.num = vport_num;
flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
spec, &flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule idx %d, err %ld\n",
rule_idx, PTR_ERR(flow_rule));
goto rule_err;
}
flows[rule_idx++] = flow_rule;
}
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_0,
mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
dest.vport.num = vport_num;
esw->fdb_table.offloads.send_to_vport_meta_rules = flows;
kvfree(spec);
return 0;
flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
spec, &flow_act, &dest, 1);
if (IS_ERR(flow_rule))
esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule vport %d, err %ld\n",
vport_num, PTR_ERR(flow_rule));
rule_err:
while (--rule_idx >= 0)
mlx5_del_flow_rules(flows[rule_idx]);
kvfree(spec);
alloc_err:
kvfree(flows);
return err;
return flow_rule;
}
static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw)
......@@ -1667,18 +1624,200 @@ esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
#endif
static int
esw_create_send_to_vport_group(struct mlx5_eswitch *esw,
struct mlx5_flow_table *fdb,
u32 *flow_group_in,
int *ix)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *g;
void *match_criteria;
int count, err = 0;
memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
MLX5_MATCH_MISC_PARAMETERS);
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
misc_parameters.source_eswitch_owner_vhca_id);
MLX5_SET(create_flow_group_in, flow_group_in,
source_eswitch_owner_vhca_id_valid, 1);
}
/* See comment at table_size calculation */
count = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, *ix + count - 1);
*ix += count;
g = mlx5_create_flow_group(fdb, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(esw->dev, "Failed to create send-to-vport flow group err(%d)\n", err);
goto out;
}
esw->fdb_table.offloads.send_to_vport_grp = g;
out:
return err;
}
static int
esw_create_meta_send_to_vport_group(struct mlx5_eswitch *esw,
struct mlx5_flow_table *fdb,
u32 *flow_group_in,
int *ix)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *g;
void *match_criteria;
int err = 0;
if (!esw_src_port_rewrite_supported(esw))
return 0;
memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
MLX5_MATCH_MISC_PARAMETERS_2);
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
MLX5_SET(fte_match_param, match_criteria,
misc_parameters_2.metadata_reg_c_0,
mlx5_eswitch_get_vport_metadata_mask());
MLX5_SET(fte_match_param, match_criteria,
misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix);
MLX5_SET(create_flow_group_in, flow_group_in,
end_flow_index, *ix + esw->total_vports - 1);
*ix += esw->total_vports;
g = mlx5_create_flow_group(fdb, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(esw->dev,
"Failed to create send-to-vport meta flow group err(%d)\n", err);
goto send_vport_meta_err;
}
esw->fdb_table.offloads.send_to_vport_meta_grp = g;
return 0;
send_vport_meta_err:
return err;
}
static int
esw_create_peer_esw_miss_group(struct mlx5_eswitch *esw,
struct mlx5_flow_table *fdb,
u32 *flow_group_in,
int *ix)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *g;
void *match_criteria;
int err = 0;
if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
return 0;
memset(flow_group_in, 0, inlen);
esw_set_flow_group_source_port(esw, flow_group_in);
if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
match_criteria = MLX5_ADDR_OF(create_flow_group_in,
flow_group_in,
match_criteria);
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
misc_parameters.source_eswitch_owner_vhca_id);
MLX5_SET(create_flow_group_in, flow_group_in,
source_eswitch_owner_vhca_id_valid, 1);
}
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
*ix + esw->total_vports - 1);
*ix += esw->total_vports;
g = mlx5_create_flow_group(fdb, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(esw->dev, "Failed to create peer miss flow group err(%d)\n", err);
goto out;
}
esw->fdb_table.offloads.peer_miss_grp = g;
out:
return err;
}
static int
esw_create_miss_group(struct mlx5_eswitch *esw,
struct mlx5_flow_table *fdb,
u32 *flow_group_in,
int *ix)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *g;
void *match_criteria;
int err = 0;
u8 *dmac;
memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
MLX5_MATCH_OUTER_HEADERS);
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
match_criteria);
dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
outer_headers.dmac_47_16);
dmac[0] = 0x01;
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
*ix + MLX5_ESW_MISS_FLOWS);
g = mlx5_create_flow_group(fdb, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(esw->dev, "Failed to create miss flow group err(%d)\n", err);
goto miss_err;
}
esw->fdb_table.offloads.miss_grp = g;
err = esw_add_fdb_miss_rule(esw);
if (err)
goto miss_rule_err;
return 0;
miss_rule_err:
mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
miss_err:
return err;
}
static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_table_attr ft_attr = {};
int num_vfs, table_size, ix, err = 0;
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *fdb = NULL;
int table_size, ix = 0, err = 0;
u32 flags = 0, *flow_group_in;
struct mlx5_flow_group *g;
void *match_criteria;
u8 *dmac;
esw_debug(esw->dev, "Create offloads FDB Tables\n");
......@@ -1712,7 +1851,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
* total vports of the peer (currently is also uses esw->total_vports).
*/
table_size = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ) +
MLX5_ESW_MISS_FLOWS + esw->total_vports + esw->esw_funcs.num_vfs;
esw->total_vports * 2 + MLX5_ESW_MISS_FLOWS;
/* create the slow path fdb with encap set, so further table instances
* can be created at run time while VFs are probed if the FW allows that.
......@@ -1753,139 +1892,29 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
goto fdb_chains_err;
}
/* create send-to-vport group */
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
MLX5_MATCH_MISC_PARAMETERS);
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
misc_parameters.source_eswitch_owner_vhca_id);
MLX5_SET(create_flow_group_in, flow_group_in,
source_eswitch_owner_vhca_id_valid, 1);
}
/* See comment above table_size calculation */
ix = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
g = mlx5_create_flow_group(fdb, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
err = esw_create_send_to_vport_group(esw, fdb, flow_group_in, &ix);
if (err)
goto send_vport_err;
}
esw->fdb_table.offloads.send_to_vport_grp = g;
if (esw_src_port_rewrite_supported(esw)) {
/* meta send to vport */
memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
MLX5_MATCH_MISC_PARAMETERS_2);
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
MLX5_SET(fte_match_param, match_criteria,
misc_parameters_2.metadata_reg_c_0,
mlx5_eswitch_get_vport_metadata_mask());
MLX5_SET(fte_match_param, match_criteria,
misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
num_vfs = esw->esw_funcs.num_vfs;
if (num_vfs) {
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
MLX5_SET(create_flow_group_in, flow_group_in,
end_flow_index, ix + num_vfs - 1);
ix += num_vfs;
g = mlx5_create_flow_group(fdb, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create send-to-vport meta flow group err(%d)\n",
err);
goto send_vport_meta_err;
}
esw->fdb_table.offloads.send_to_vport_meta_grp = g;
err = mlx5_eswitch_add_send_to_vport_meta_rules(esw);
if (err)
goto meta_rule_err;
}
}
if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
/* create peer esw miss group */
memset(flow_group_in, 0, inlen);
esw_set_flow_group_source_port(esw, flow_group_in);
if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
match_criteria = MLX5_ADDR_OF(create_flow_group_in,
flow_group_in,
match_criteria);
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
misc_parameters.source_eswitch_owner_vhca_id);
MLX5_SET(create_flow_group_in, flow_group_in,
source_eswitch_owner_vhca_id_valid, 1);
}
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
ix + esw->total_vports - 1);
ix += esw->total_vports;
g = mlx5_create_flow_group(fdb, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
goto peer_miss_err;
}
esw->fdb_table.offloads.peer_miss_grp = g;
}
/* create miss group */
memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
MLX5_MATCH_OUTER_HEADERS);
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
match_criteria);
dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
outer_headers.dmac_47_16);
dmac[0] = 0x01;
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
ix + MLX5_ESW_MISS_FLOWS);
err = esw_create_meta_send_to_vport_group(esw, fdb, flow_group_in, &ix);
if (err)
goto send_vport_meta_err;
g = mlx5_create_flow_group(fdb, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
goto miss_err;
}
esw->fdb_table.offloads.miss_grp = g;
err = esw_create_peer_esw_miss_group(esw, fdb, flow_group_in, &ix);
if (err)
goto peer_miss_err;
err = esw_add_fdb_miss_rule(esw);
err = esw_create_miss_group(esw, fdb, flow_group_in, &ix);
if (err)
goto miss_rule_err;
goto miss_err;
kvfree(flow_group_in);
return 0;
miss_rule_err:
mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
miss_err:
if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
peer_miss_err:
mlx5_eswitch_del_send_to_vport_meta_rules(esw);
meta_rule_err:
if (esw->fdb_table.offloads.send_to_vport_meta_grp)
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
send_vport_meta_err:
......@@ -1912,7 +1941,6 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
mlx5_eswitch_del_send_to_vport_meta_rules(esw);
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
if (esw->fdb_table.offloads.send_to_vport_meta_grp)
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
......@@ -1930,7 +1958,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
atomic64_set(&esw->user_count, 0);
}
static int esw_get_offloads_ft_size(struct mlx5_eswitch *esw)
static int esw_get_nr_ft_offloads_steering_src_ports(struct mlx5_eswitch *esw)
{
int nvports;
......@@ -1955,7 +1983,8 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw)
return -EOPNOTSUPP;
}
ft_attr.max_fte = esw_get_offloads_ft_size(esw);
ft_attr.max_fte = esw_get_nr_ft_offloads_steering_src_ports(esw) +
MLX5_ESW_FT_OFFLOADS_DROP_RULE;
ft_attr.prio = 1;
ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
......@@ -1984,7 +2013,7 @@ static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
int nvports;
int err = 0;
nvports = esw_get_offloads_ft_size(esw);
nvports = esw_get_nr_ft_offloads_steering_src_ports(esw);
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
......@@ -2014,6 +2043,52 @@ static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
}
static int esw_create_vport_rx_drop_rule_index(struct mlx5_eswitch *esw)
{
/* ft_offloads table is enlarged by MLX5_ESW_FT_OFFLOADS_DROP_RULE (1)
* for the drop rule, which is placed at the end of the table.
* So return the total of vport and int_port as rule index.
*/
return esw_get_nr_ft_offloads_steering_src_ports(esw);
}
static int esw_create_vport_rx_drop_group(struct mlx5_eswitch *esw)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *g;
u32 *flow_group_in;
int flow_index;
int err = 0;
flow_index = esw_create_vport_rx_drop_rule_index(esw);
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
mlx5_core_warn(esw->dev, "Failed to create vport rx drop group err %d\n", err);
goto out;
}
esw->offloads.vport_rx_drop_group = g;
out:
kvfree(flow_group_in);
return err;
}
static void esw_destroy_vport_rx_drop_group(struct mlx5_eswitch *esw)
{
if (esw->offloads.vport_rx_drop_group)
mlx5_destroy_flow_group(esw->offloads.vport_rx_drop_group);
}
struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
struct mlx5_flow_destination *dest)
......@@ -2062,6 +2137,32 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
return flow_rule;
}
static int esw_create_vport_rx_drop_rule(struct mlx5_eswitch *esw)
{
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *flow_rule;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, NULL,
&flow_act, NULL, 0);
if (IS_ERR(flow_rule)) {
esw_warn(esw->dev,
"fs offloads: Failed to add vport rx drop rule err %ld\n",
PTR_ERR(flow_rule));
return PTR_ERR(flow_rule);
}
esw->offloads.vport_rx_drop_rule = flow_rule;
return 0;
}
static void esw_destroy_vport_rx_drop_rule(struct mlx5_eswitch *esw)
{
if (esw->offloads.vport_rx_drop_rule)
mlx5_del_flow_rules(esw->offloads.vport_rx_drop_rule);
}
static int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
{
u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
......@@ -3062,8 +3163,20 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
if (err)
goto create_fg_err;
err = esw_create_vport_rx_drop_group(esw);
if (err)
goto create_rx_drop_fg_err;
err = esw_create_vport_rx_drop_rule(esw);
if (err)
goto create_rx_drop_rule_err;
return 0;
create_rx_drop_rule_err:
esw_destroy_vport_rx_drop_group(esw);
create_rx_drop_fg_err:
esw_destroy_vport_rx_group(esw);
create_fg_err:
esw_destroy_offloads_fdb_tables(esw);
create_fdb_err:
......@@ -3081,6 +3194,8 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
{
esw_destroy_vport_rx_drop_rule(esw);
esw_destroy_vport_rx_drop_group(esw);
esw_destroy_vport_rx_group(esw);
esw_destroy_offloads_fdb_tables(esw);
esw_destroy_restore_table(esw);
......
......@@ -32,6 +32,7 @@
#include "en.h"
#include "ipoib.h"
#include "en/fs_ethtool.h"
static void mlx5i_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
......
......@@ -35,6 +35,7 @@
#include "en.h"
#include "en/params.h"
#include "ipoib.h"
#include "en/fs_ethtool.h"
#define IB_DEFAULT_Q_KEY 0xb1b
#define MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE 9
......@@ -320,43 +321,47 @@ static void mlx5i_cleanup_tx(struct mlx5e_priv *priv)
static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
{
struct mlx5_flow_namespace *ns =
mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL);
int err;
priv->fs->ns = mlx5_get_flow_namespace(priv->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
if (!priv->fs->ns)
if (!ns)
return -EINVAL;
err = mlx5e_arfs_create_tables(priv);
mlx5e_fs_set_ns(priv->fs, ns, false);
err = mlx5e_arfs_create_tables(priv->fs, priv->rx_res,
!!(priv->netdev->hw_features & NETIF_F_NTUPLE));
if (err) {
netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
err);
priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
}
err = mlx5e_create_ttc_table(priv);
err = mlx5e_create_ttc_table(priv->fs, priv->rx_res);
if (err) {
netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
err);
goto err_destroy_arfs_tables;
}
mlx5e_ethtool_init_steering(priv);
mlx5e_ethtool_init_steering(priv->fs);
return 0;
err_destroy_arfs_tables:
mlx5e_arfs_destroy_tables(priv);
mlx5e_arfs_destroy_tables(priv->fs,
!!(priv->netdev->hw_features & NETIF_F_NTUPLE));
return err;
}
static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
{
mlx5e_destroy_ttc_table(priv);
mlx5e_arfs_destroy_tables(priv);
mlx5e_ethtool_cleanup_steering(priv);
mlx5e_destroy_ttc_table(priv->fs);
mlx5e_arfs_destroy_tables(priv->fs,
!!(priv->netdev->hw_features & NETIF_F_NTUPLE));
mlx5e_ethtool_cleanup_steering(priv->fs);
}
static int mlx5i_init_rx(struct mlx5e_priv *priv)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment