Commit f6755b80 authored by Vu Pham's avatar Vu Pham Committed by Saeed Mahameed

net/mlx5e: Dynamic alloc arfs table for netdev when needed

Dynamic allocate arfs table in mlx5e_priv for EN netdev
when needed. Don't allocate it for representor netdev.
Signed-off-by: default avatarVu Pham <vuhuong@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent bb569657
...@@ -200,31 +200,7 @@ static inline int mlx5e_ethtool_get_rxnfc(struct net_device *dev, ...@@ -200,31 +200,7 @@ static inline int mlx5e_ethtool_get_rxnfc(struct net_device *dev,
#endif /* CONFIG_MLX5_EN_RXNFC */ #endif /* CONFIG_MLX5_EN_RXNFC */
#ifdef CONFIG_MLX5_EN_ARFS #ifdef CONFIG_MLX5_EN_ARFS
#define ARFS_HASH_SHIFT BITS_PER_BYTE struct mlx5e_arfs_tables;
#define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)
struct arfs_table {
struct mlx5e_flow_table ft;
struct mlx5_flow_handle *default_rule;
struct hlist_head rules_hash[ARFS_HASH_SIZE];
};
enum arfs_type {
ARFS_IPV4_TCP,
ARFS_IPV6_TCP,
ARFS_IPV4_UDP,
ARFS_IPV6_UDP,
ARFS_NUM_TYPES,
};
struct mlx5e_arfs_tables {
struct arfs_table arfs_tables[ARFS_NUM_TYPES];
/* Protect aRFS rules list */
spinlock_t arfs_lock;
struct list_head rules;
int last_filter_id;
struct workqueue_struct *wq;
};
int mlx5e_arfs_create_tables(struct mlx5e_priv *priv); int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv); void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv);
...@@ -260,7 +236,7 @@ struct mlx5e_flow_steering { ...@@ -260,7 +236,7 @@ struct mlx5e_flow_steering {
struct mlx5e_ttc_table ttc; struct mlx5e_ttc_table ttc;
struct mlx5e_ttc_table inner_ttc; struct mlx5e_ttc_table inner_ttc;
#ifdef CONFIG_MLX5_EN_ARFS #ifdef CONFIG_MLX5_EN_ARFS
struct mlx5e_arfs_tables arfs; struct mlx5e_arfs_tables *arfs;
#endif #endif
#ifdef CONFIG_MLX5_EN_TLS #ifdef CONFIG_MLX5_EN_TLS
struct mlx5e_accel_fs_tcp *accel_tcp; struct mlx5e_accel_fs_tcp *accel_tcp;
......
...@@ -36,6 +36,32 @@ ...@@ -36,6 +36,32 @@
#include <linux/ipv6.h> #include <linux/ipv6.h>
#include "en.h" #include "en.h"
#define ARFS_HASH_SHIFT BITS_PER_BYTE
#define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)
struct arfs_table {
struct mlx5e_flow_table ft;
struct mlx5_flow_handle *default_rule;
struct hlist_head rules_hash[ARFS_HASH_SIZE];
};
enum arfs_type {
ARFS_IPV4_TCP,
ARFS_IPV6_TCP,
ARFS_IPV4_UDP,
ARFS_IPV6_UDP,
ARFS_NUM_TYPES,
};
struct mlx5e_arfs_tables {
struct arfs_table arfs_tables[ARFS_NUM_TYPES];
/* Protect aRFS rules list */
spinlock_t arfs_lock;
struct list_head rules;
int last_filter_id;
struct workqueue_struct *wq;
};
struct arfs_tuple { struct arfs_tuple {
__be16 etype; __be16 etype;
u8 ip_proto; u8 ip_proto;
...@@ -121,7 +147,7 @@ int mlx5e_arfs_enable(struct mlx5e_priv *priv) ...@@ -121,7 +147,7 @@ int mlx5e_arfs_enable(struct mlx5e_priv *priv)
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
for (i = 0; i < ARFS_NUM_TYPES; i++) { for (i = 0; i < ARFS_NUM_TYPES; i++) {
dest.ft = priv->fs.arfs.arfs_tables[i].ft.t; dest.ft = priv->fs.arfs->arfs_tables[i].ft.t;
/* Modify ttc rules destination to point on the aRFS FTs */ /* Modify ttc rules destination to point on the aRFS FTs */
err = mlx5e_ttc_fwd_dest(priv, arfs_get_tt(i), &dest); err = mlx5e_ttc_fwd_dest(priv, arfs_get_tt(i), &dest);
if (err) { if (err) {
...@@ -141,25 +167,31 @@ static void arfs_destroy_table(struct arfs_table *arfs_t) ...@@ -141,25 +167,31 @@ static void arfs_destroy_table(struct arfs_table *arfs_t)
mlx5e_destroy_flow_table(&arfs_t->ft); mlx5e_destroy_flow_table(&arfs_t->ft);
} }
void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) static void _mlx5e_cleanup_tables(struct mlx5e_priv *priv)
{ {
int i; int i;
if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
return;
arfs_del_rules(priv); arfs_del_rules(priv);
destroy_workqueue(priv->fs.arfs.wq); destroy_workqueue(priv->fs.arfs->wq);
for (i = 0; i < ARFS_NUM_TYPES; i++) { for (i = 0; i < ARFS_NUM_TYPES; i++) {
if (!IS_ERR_OR_NULL(priv->fs.arfs.arfs_tables[i].ft.t)) if (!IS_ERR_OR_NULL(priv->fs.arfs->arfs_tables[i].ft.t))
arfs_destroy_table(&priv->fs.arfs.arfs_tables[i]); arfs_destroy_table(&priv->fs.arfs->arfs_tables[i]);
} }
} }
void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv)
{
if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
return;
_mlx5e_cleanup_tables(priv);
kvfree(priv->fs.arfs);
}
static int arfs_add_default_rule(struct mlx5e_priv *priv, static int arfs_add_default_rule(struct mlx5e_priv *priv,
enum arfs_type type) enum arfs_type type)
{ {
struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type]; struct arfs_table *arfs_t = &priv->fs.arfs->arfs_tables[type];
struct mlx5e_tir *tir = priv->indir_tir; struct mlx5e_tir *tir = priv->indir_tir;
struct mlx5_flow_destination dest = {}; struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act); MLX5_DECLARE_FLOW_ACT(flow_act);
...@@ -290,7 +322,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft, ...@@ -290,7 +322,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
static int arfs_create_table(struct mlx5e_priv *priv, static int arfs_create_table(struct mlx5e_priv *priv,
enum arfs_type type) enum arfs_type type)
{ {
struct mlx5e_arfs_tables *arfs = &priv->fs.arfs; struct mlx5e_arfs_tables *arfs = priv->fs.arfs;
struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft; struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft;
struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_flow_table_attr ft_attr = {};
int err; int err;
...@@ -330,20 +362,27 @@ int mlx5e_arfs_create_tables(struct mlx5e_priv *priv) ...@@ -330,20 +362,27 @@ int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
if (!(priv->netdev->hw_features & NETIF_F_NTUPLE)) if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
return 0; return 0;
spin_lock_init(&priv->fs.arfs.arfs_lock); priv->fs.arfs = kvzalloc(sizeof(*priv->fs.arfs), GFP_KERNEL);
INIT_LIST_HEAD(&priv->fs.arfs.rules); if (!priv->fs.arfs)
priv->fs.arfs.wq = create_singlethread_workqueue("mlx5e_arfs");
if (!priv->fs.arfs.wq)
return -ENOMEM; return -ENOMEM;
spin_lock_init(&priv->fs.arfs->arfs_lock);
INIT_LIST_HEAD(&priv->fs.arfs->rules);
priv->fs.arfs->wq = create_singlethread_workqueue("mlx5e_arfs");
if (!priv->fs.arfs->wq)
goto err;
for (i = 0; i < ARFS_NUM_TYPES; i++) { for (i = 0; i < ARFS_NUM_TYPES; i++) {
err = arfs_create_table(priv, i); err = arfs_create_table(priv, i);
if (err) if (err)
goto err; goto err_des;
} }
return 0; return 0;
err_des:
_mlx5e_cleanup_tables(priv);
err: err:
mlx5e_arfs_destroy_tables(priv); kvfree(priv->fs.arfs);
return err; return err;
} }
...@@ -358,8 +397,8 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv) ...@@ -358,8 +397,8 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
int j; int j;
HLIST_HEAD(del_list); HLIST_HEAD(del_list);
spin_lock_bh(&priv->fs.arfs.arfs_lock); spin_lock_bh(&priv->fs.arfs->arfs_lock);
mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) { mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs->arfs_tables, i, j) {
if (!work_pending(&arfs_rule->arfs_work) && if (!work_pending(&arfs_rule->arfs_work) &&
rps_may_expire_flow(priv->netdev, rps_may_expire_flow(priv->netdev,
arfs_rule->rxq, arfs_rule->flow_id, arfs_rule->rxq, arfs_rule->flow_id,
...@@ -370,7 +409,7 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv) ...@@ -370,7 +409,7 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
break; break;
} }
} }
spin_unlock_bh(&priv->fs.arfs.arfs_lock); spin_unlock_bh(&priv->fs.arfs->arfs_lock);
hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) { hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) {
if (arfs_rule->rule) if (arfs_rule->rule)
mlx5_del_flow_rules(arfs_rule->rule); mlx5_del_flow_rules(arfs_rule->rule);
...@@ -387,12 +426,12 @@ static void arfs_del_rules(struct mlx5e_priv *priv) ...@@ -387,12 +426,12 @@ static void arfs_del_rules(struct mlx5e_priv *priv)
int j; int j;
HLIST_HEAD(del_list); HLIST_HEAD(del_list);
spin_lock_bh(&priv->fs.arfs.arfs_lock); spin_lock_bh(&priv->fs.arfs->arfs_lock);
mlx5e_for_each_arfs_rule(rule, htmp, priv->fs.arfs.arfs_tables, i, j) { mlx5e_for_each_arfs_rule(rule, htmp, priv->fs.arfs->arfs_tables, i, j) {
hlist_del_init(&rule->hlist); hlist_del_init(&rule->hlist);
hlist_add_head(&rule->hlist, &del_list); hlist_add_head(&rule->hlist, &del_list);
} }
spin_unlock_bh(&priv->fs.arfs.arfs_lock); spin_unlock_bh(&priv->fs.arfs->arfs_lock);
hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) { hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) {
cancel_work_sync(&rule->arfs_work); cancel_work_sync(&rule->arfs_work);
...@@ -436,7 +475,7 @@ static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs, ...@@ -436,7 +475,7 @@ static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv, static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
struct arfs_rule *arfs_rule) struct arfs_rule *arfs_rule)
{ {
struct mlx5e_arfs_tables *arfs = &priv->fs.arfs; struct mlx5e_arfs_tables *arfs = priv->fs.arfs;
struct arfs_tuple *tuple = &arfs_rule->tuple; struct arfs_tuple *tuple = &arfs_rule->tuple;
struct mlx5_flow_handle *rule = NULL; struct mlx5_flow_handle *rule = NULL;
struct mlx5_flow_destination dest = {}; struct mlx5_flow_destination dest = {};
...@@ -554,9 +593,9 @@ static void arfs_handle_work(struct work_struct *work) ...@@ -554,9 +593,9 @@ static void arfs_handle_work(struct work_struct *work)
mutex_lock(&priv->state_lock); mutex_lock(&priv->state_lock);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
spin_lock_bh(&priv->fs.arfs.arfs_lock); spin_lock_bh(&priv->fs.arfs->arfs_lock);
hlist_del(&arfs_rule->hlist); hlist_del(&arfs_rule->hlist);
spin_unlock_bh(&priv->fs.arfs.arfs_lock); spin_unlock_bh(&priv->fs.arfs->arfs_lock);
mutex_unlock(&priv->state_lock); mutex_unlock(&priv->state_lock);
kfree(arfs_rule); kfree(arfs_rule);
...@@ -609,7 +648,7 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv, ...@@ -609,7 +648,7 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
tuple->dst_port = fk->ports.dst; tuple->dst_port = fk->ports.dst;
rule->flow_id = flow_id; rule->flow_id = flow_id;
rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER; rule->filter_id = priv->fs.arfs->last_filter_id++ % RPS_NO_FILTER;
hlist_add_head(&rule->hlist, hlist_add_head(&rule->hlist,
arfs_hash_bucket(arfs_t, tuple->src_port, arfs_hash_bucket(arfs_t, tuple->src_port,
...@@ -653,7 +692,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, ...@@ -653,7 +692,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id) u16 rxq_index, u32 flow_id)
{ {
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_arfs_tables *arfs = &priv->fs.arfs; struct mlx5e_arfs_tables *arfs = priv->fs.arfs;
struct arfs_table *arfs_t; struct arfs_table *arfs_t;
struct arfs_rule *arfs_rule; struct arfs_rule *arfs_rule;
struct flow_keys fk; struct flow_keys fk;
...@@ -687,7 +726,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, ...@@ -687,7 +726,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
return -ENOMEM; return -ENOMEM;
} }
} }
queue_work(priv->fs.arfs.wq, &arfs_rule->arfs_work); queue_work(priv->fs.arfs->wq, &arfs_rule->arfs_work);
spin_unlock_bh(&arfs->arfs_lock); spin_unlock_bh(&arfs->arfs_lock);
return arfs_rule->filter_id; return arfs_rule->filter_id;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment