Commit 3cfef195 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlx5-aRFS'

Saeed Mahameed says:

====================
Mellanox 100G mlx5 ethernet aRFS support

This series adds accelerated RFS support for the mlx5e driver.
I have added one patch non-related to aRFS that fixes the rtnl_lock
warning mlx5 driver been getting since b7aade15 ('vxlan: break dependency with netdev drivers')

aRFS support in details:

A direct TIR per RQ is now required in order to have the essential building blocks
for aRFS.  Today the driver has one direct TIR that forwards traffic to RQ[0] (core 0),
and one indirect TIR for RSS indirection table.  For that we've added one direct TIR
per RQ, e.g.: TIR[i] -> RQ[i] (core i).

Publicize Modify flow rule destination and reveal it in flow steering API, to have the
ability to dynamically modify the destination TIR(core) for aRFS rules from the
ethernet driver.

Initializing CPU reverse mapping to notify upper layer on internal receive queue cpu
mappings.

Some design refactoring for mlx5e ethernet driver flow tables and flow steering API.
Now the caller of create_flow_table can choose the level of the flow table, this way
we will create the mlx5e flow tables in a reversed order and connect them as we go,
we create flow table[i+1] before flow table[i] to be able to set flow table[i + 1] as
a destination of flow table[i] once flow table[i] is created.
also we have split the main flow table in the following manner:
    - From before: RX packet had to visit two flow tables until it is delivered to its receive queue:
        RX packet -> vlan filter flow table -> main flow table.
        > vlan filter will check the packet vlan field is allowed.
        > main flow will check if the dest mac is allowed and will check the l3/l4 headers to
        retrieve the RSS hash for steering the packet into its final receive queue.

    - Now main flow table is split into l2 dst mac steering table and ttc (traffic type classifier) table:
        RX packet -> vlan filter -> l2 table -> ttc table
        > vlan filter - same as before
        > L2 filter - filter packets according their destination mac address
        > ttc table - classify packet headers for RSS steering
            - L3/L4 classification rules to steer the packet according to thier headers hash
            - in case of none of the rules applies the packet is steered to RQ[0]

After the above refactoring all left to-do is to create aRFS flow table which will manage
aRFS steering rules to forward traffic to the desired RQ (core) and just connect the ttc
table rules destinations to aRFS flow table.

aRFS flow table in case of a miss will deliver the traffic to the core where the original
ttc hash would have chosen.

TTC table is not initialized and enabled until the user explicitly asks to, i.e. setting the NETIF_F_NTUPLE
to ON.  This way there is no need for ttc table to forward traffic to aRFS table unless required.
When setting back to OFF aRFS flow table is disabled and disconnected.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 4b2523c1 45bf454a
...@@ -1438,7 +1438,8 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev, ...@@ -1438,7 +1438,8 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
if (!ft) { if (!ft) {
ft = mlx5_create_auto_grouped_flow_table(ns, priority, ft = mlx5_create_auto_grouped_flow_table(ns, priority,
num_entries, num_entries,
num_groups); num_groups,
0);
if (!IS_ERR(ft)) { if (!IS_ERR(ft)) {
prio->refcount = 0; prio->refcount = 0;
......
...@@ -9,3 +9,4 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o \ ...@@ -9,3 +9,4 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o \
en_txrx.o en_clock.o vxlan.o en_tc.o en_txrx.o en_clock.o vxlan.o en_tc.o
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o
mlx5_core-$(CONFIG_RFS_ACCEL) += en_arfs.o
...@@ -48,6 +48,8 @@ ...@@ -48,6 +48,8 @@
#include "mlx5_core.h" #include "mlx5_core.h"
#include "en_stats.h" #include "en_stats.h"
#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
#define MLX5E_MAX_NUM_TC 8 #define MLX5E_MAX_NUM_TC 8
#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6 #define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6
...@@ -385,42 +387,42 @@ enum mlx5e_traffic_types { ...@@ -385,42 +387,42 @@ enum mlx5e_traffic_types {
MLX5E_TT_IPV6, MLX5E_TT_IPV6,
MLX5E_TT_ANY, MLX5E_TT_ANY,
MLX5E_NUM_TT, MLX5E_NUM_TT,
MLX5E_NUM_INDIR_TIRS = MLX5E_TT_ANY,
}; };
#define IS_HASHING_TT(tt) (tt != MLX5E_TT_ANY) enum {
MLX5E_STATE_ASYNC_EVENTS_ENABLE,
MLX5E_STATE_OPENED,
MLX5E_STATE_DESTROYING,
};
enum mlx5e_rqt_ix { struct mlx5e_vxlan_db {
MLX5E_INDIRECTION_RQT, spinlock_t lock; /* protect vxlan table */
MLX5E_SINGLE_RQ_RQT, struct radix_tree_root tree;
MLX5E_NUM_RQT,
}; };
struct mlx5e_eth_addr_info { struct mlx5e_l2_rule {
u8 addr[ETH_ALEN + 2]; u8 addr[ETH_ALEN + 2];
u32 tt_vec; struct mlx5_flow_rule *rule;
struct mlx5_flow_rule *ft_rule[MLX5E_NUM_TT];
}; };
#define MLX5E_ETH_ADDR_HASH_SIZE (1 << BITS_PER_BYTE) struct mlx5e_flow_table {
int num_groups;
struct mlx5e_eth_addr_db { struct mlx5_flow_table *t;
struct hlist_head netdev_uc[MLX5E_ETH_ADDR_HASH_SIZE]; struct mlx5_flow_group **g;
struct hlist_head netdev_mc[MLX5E_ETH_ADDR_HASH_SIZE];
struct mlx5e_eth_addr_info broadcast;
struct mlx5e_eth_addr_info allmulti;
struct mlx5e_eth_addr_info promisc;
bool broadcast_enabled;
bool allmulti_enabled;
bool promisc_enabled;
}; };
enum { #define MLX5E_L2_ADDR_HASH_SIZE BIT(BITS_PER_BYTE)
MLX5E_STATE_ASYNC_EVENTS_ENABLE,
MLX5E_STATE_OPENED, struct mlx5e_tc_table {
MLX5E_STATE_DESTROYING, struct mlx5_flow_table *t;
struct rhashtable_params ht_params;
struct rhashtable ht;
}; };
struct mlx5e_vlan_db { struct mlx5e_vlan_table {
struct mlx5e_flow_table ft;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct mlx5_flow_rule *active_vlans_rule[VLAN_N_VID]; struct mlx5_flow_rule *active_vlans_rule[VLAN_N_VID];
struct mlx5_flow_rule *untagged_rule; struct mlx5_flow_rule *untagged_rule;
...@@ -428,29 +430,74 @@ struct mlx5e_vlan_db { ...@@ -428,29 +430,74 @@ struct mlx5e_vlan_db {
bool filter_disabled; bool filter_disabled;
}; };
struct mlx5e_vxlan_db { struct mlx5e_l2_table {
spinlock_t lock; /* protect vxlan table */ struct mlx5e_flow_table ft;
struct radix_tree_root tree; struct hlist_head netdev_uc[MLX5E_L2_ADDR_HASH_SIZE];
struct hlist_head netdev_mc[MLX5E_L2_ADDR_HASH_SIZE];
struct mlx5e_l2_rule broadcast;
struct mlx5e_l2_rule allmulti;
struct mlx5e_l2_rule promisc;
bool broadcast_enabled;
bool allmulti_enabled;
bool promisc_enabled;
}; };
struct mlx5e_flow_table { /* L3/L4 traffic type classifier */
int num_groups; struct mlx5e_ttc_table {
struct mlx5_flow_table *t; struct mlx5e_flow_table ft;
struct mlx5_flow_group **g; struct mlx5_flow_rule *rules[MLX5E_NUM_TT];
}; };
struct mlx5e_tc_flow_table { #define ARFS_HASH_SHIFT BITS_PER_BYTE
struct mlx5_flow_table *t; #define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)
struct arfs_table {
struct mlx5e_flow_table ft;
struct mlx5_flow_rule *default_rule;
struct hlist_head rules_hash[ARFS_HASH_SIZE];
};
struct rhashtable_params ht_params; enum arfs_type {
struct rhashtable ht; ARFS_IPV4_TCP,
ARFS_IPV6_TCP,
ARFS_IPV4_UDP,
ARFS_IPV6_UDP,
ARFS_NUM_TYPES,
};
struct mlx5e_arfs_tables {
struct arfs_table arfs_tables[ARFS_NUM_TYPES];
/* Protect aRFS rules list */
spinlock_t arfs_lock;
struct list_head rules;
int last_filter_id;
struct workqueue_struct *wq;
}; };
struct mlx5e_flow_tables { /* NIC prio FTS */
enum {
MLX5E_VLAN_FT_LEVEL = 0,
MLX5E_L2_FT_LEVEL,
MLX5E_TTC_FT_LEVEL,
MLX5E_ARFS_FT_LEVEL
};
struct mlx5e_flow_steering {
struct mlx5_flow_namespace *ns; struct mlx5_flow_namespace *ns;
struct mlx5e_tc_flow_table tc; struct mlx5e_tc_table tc;
struct mlx5e_flow_table vlan; struct mlx5e_vlan_table vlan;
struct mlx5e_flow_table main; struct mlx5e_l2_table l2;
struct mlx5e_ttc_table ttc;
struct mlx5e_arfs_tables arfs;
};
struct mlx5e_direct_tir {
u32 tirn;
u32 rqtn;
};
enum {
MLX5E_TC_PRIO = 0,
MLX5E_NIC_PRIO
}; };
struct mlx5e_priv { struct mlx5e_priv {
...@@ -470,12 +517,11 @@ struct mlx5e_priv { ...@@ -470,12 +517,11 @@ struct mlx5e_priv {
struct mlx5e_channel **channel; struct mlx5e_channel **channel;
u32 tisn[MLX5E_MAX_NUM_TC]; u32 tisn[MLX5E_MAX_NUM_TC];
u32 rqtn[MLX5E_NUM_RQT]; u32 indir_rqtn;
u32 tirn[MLX5E_NUM_TT]; u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
struct mlx5e_direct_tir direct_tir[MLX5E_MAX_NUM_CHANNELS];
struct mlx5e_flow_tables fts; struct mlx5e_flow_steering fs;
struct mlx5e_eth_addr_db eth_addr;
struct mlx5e_vlan_db vlan;
struct mlx5e_vxlan_db vxlan; struct mlx5e_vxlan_db vxlan;
struct mlx5e_params params; struct mlx5e_params params;
...@@ -557,9 +603,10 @@ struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq); ...@@ -557,9 +603,10 @@ struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
void mlx5e_update_stats(struct mlx5e_priv *priv); void mlx5e_update_stats(struct mlx5e_priv *priv);
int mlx5e_create_flow_tables(struct mlx5e_priv *priv); int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv); void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
void mlx5e_init_eth_addr(struct mlx5e_priv *priv); void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft);
void mlx5e_set_rx_mode_work(struct work_struct *work); void mlx5e_set_rx_mode_work(struct work_struct *work);
void mlx5e_fill_hwstamp(struct mlx5e_tstamp *clock, u64 timestamp, void mlx5e_fill_hwstamp(struct mlx5e_tstamp *clock, u64 timestamp,
...@@ -578,7 +625,7 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv); ...@@ -578,7 +625,7 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd); int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd);
int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix); int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix);
void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv); void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv);
int mlx5e_open_locked(struct net_device *netdev); int mlx5e_open_locked(struct net_device *netdev);
...@@ -636,6 +683,32 @@ extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops; ...@@ -636,6 +683,32 @@ extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets); int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets);
#endif #endif
#ifndef CONFIG_RFS_ACCEL
static inline int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
{
return 0;
}
static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {}
static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv)
{
return -ENOTSUPP;
}
static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv)
{
return -ENOTSUPP;
}
#else
int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv);
int mlx5e_arfs_enable(struct mlx5e_priv *priv);
int mlx5e_arfs_disable(struct mlx5e_priv *priv);
int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id);
#endif
u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev); u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev);
#endif /* __MLX5_EN_H__ */ #endif /* __MLX5_EN_H__ */
/*
* Copyright (c) 2016, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/hash.h>
#include <linux/mlx5/fs.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include "en.h"
struct arfs_tuple {
__be16 etype;
u8 ip_proto;
union {
__be32 src_ipv4;
struct in6_addr src_ipv6;
};
union {
__be32 dst_ipv4;
struct in6_addr dst_ipv6;
};
__be16 src_port;
__be16 dst_port;
};
struct arfs_rule {
struct mlx5e_priv *priv;
struct work_struct arfs_work;
struct mlx5_flow_rule *rule;
struct hlist_node hlist;
int rxq;
/* Flow ID passed to ndo_rx_flow_steer */
int flow_id;
/* Filter ID returned by ndo_rx_flow_steer */
int filter_id;
struct arfs_tuple tuple;
};
#define mlx5e_for_each_arfs_rule(hn, tmp, arfs_tables, i, j) \
for (i = 0; i < ARFS_NUM_TYPES; i++) \
mlx5e_for_each_hash_arfs_rule(hn, tmp, arfs_tables[i].rules_hash, j)
#define mlx5e_for_each_hash_arfs_rule(hn, tmp, hash, j) \
for (j = 0; j < ARFS_HASH_SIZE; j++) \
hlist_for_each_entry_safe(hn, tmp, &hash[j], hlist)
static enum mlx5e_traffic_types arfs_get_tt(enum arfs_type type)
{
switch (type) {
case ARFS_IPV4_TCP:
return MLX5E_TT_IPV4_TCP;
case ARFS_IPV4_UDP:
return MLX5E_TT_IPV4_UDP;
case ARFS_IPV6_TCP:
return MLX5E_TT_IPV6_TCP;
case ARFS_IPV6_UDP:
return MLX5E_TT_IPV6_UDP;
default:
return -EINVAL;
}
}
static int arfs_disable(struct mlx5e_priv *priv)
{
struct mlx5_flow_destination dest;
u32 *tirn = priv->indir_tirn;
int err = 0;
int tt;
int i;
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
for (i = 0; i < ARFS_NUM_TYPES; i++) {
dest.tir_num = tirn[i];
tt = arfs_get_tt(i);
/* Modify ttc rules destination to bypass the aRFS tables*/
err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt],
&dest);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc destination failed\n",
__func__);
return err;
}
}
return 0;
}
static void arfs_del_rules(struct mlx5e_priv *priv);
int mlx5e_arfs_disable(struct mlx5e_priv *priv)
{
arfs_del_rules(priv);
return arfs_disable(priv);
}
int mlx5e_arfs_enable(struct mlx5e_priv *priv)
{
struct mlx5_flow_destination dest;
int err = 0;
int tt;
int i;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
for (i = 0; i < ARFS_NUM_TYPES; i++) {
dest.ft = priv->fs.arfs.arfs_tables[i].ft.t;
tt = arfs_get_tt(i);
/* Modify ttc rules destination to point on the aRFS FTs */
err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt],
&dest);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc destination failed err=%d\n",
__func__, err);
arfs_disable(priv);
return err;
}
}
return 0;
}
static void arfs_destroy_table(struct arfs_table *arfs_t)
{
mlx5_del_flow_rule(arfs_t->default_rule);
mlx5e_destroy_flow_table(&arfs_t->ft);
}
void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv)
{
int i;
if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
return;
arfs_del_rules(priv);
destroy_workqueue(priv->fs.arfs.wq);
for (i = 0; i < ARFS_NUM_TYPES; i++) {
if (!IS_ERR_OR_NULL(priv->fs.arfs.arfs_tables[i].ft.t))
arfs_destroy_table(&priv->fs.arfs.arfs_tables[i]);
}
}
static int arfs_add_default_rule(struct mlx5e_priv *priv,
enum arfs_type type)
{
struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type];
struct mlx5_flow_destination dest;
u8 match_criteria_enable = 0;
u32 *tirn = priv->indir_tirn;
u32 *match_criteria;
u32 *match_value;
int err = 0;
match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
if (!match_value || !match_criteria) {
netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
err = -ENOMEM;
goto out;
}
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
switch (type) {
case ARFS_IPV4_TCP:
dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
break;
case ARFS_IPV4_UDP:
dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
break;
case ARFS_IPV6_TCP:
dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
break;
case ARFS_IPV6_UDP:
dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
break;
default:
err = -EINVAL;
goto out;
}
arfs_t->default_rule = mlx5_add_flow_rule(arfs_t->ft.t, match_criteria_enable,
match_criteria, match_value,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG,
&dest);
if (IS_ERR(arfs_t->default_rule)) {
err = PTR_ERR(arfs_t->default_rule);
arfs_t->default_rule = NULL;
netdev_err(priv->netdev, "%s: add rule failed, arfs type=%d\n",
__func__, type);
}
out:
kvfree(match_criteria);
kvfree(match_value);
return err;
}
#define MLX5E_ARFS_NUM_GROUPS 2
#define MLX5E_ARFS_GROUP1_SIZE BIT(12)
#define MLX5E_ARFS_GROUP2_SIZE BIT(0)
#define MLX5E_ARFS_TABLE_SIZE (MLX5E_ARFS_GROUP1_SIZE +\
MLX5E_ARFS_GROUP2_SIZE)
static int arfs_create_groups(struct mlx5e_flow_table *ft,
enum arfs_type type)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
void *outer_headers_c;
int ix = 0;
u32 *in;
int err;
u8 *mc;
ft->g = kcalloc(MLX5E_ARFS_NUM_GROUPS,
sizeof(*ft->g), GFP_KERNEL);
in = mlx5_vzalloc(inlen);
if (!in || !ft->g) {
kvfree(ft->g);
kvfree(in);
return -ENOMEM;
}
mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc,
outer_headers);
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ethertype);
switch (type) {
case ARFS_IPV4_TCP:
case ARFS_IPV6_TCP:
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport);
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport);
break;
case ARFS_IPV4_UDP:
case ARFS_IPV6_UDP:
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport);
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_sport);
break;
default:
err = -EINVAL;
goto out;
}
switch (type) {
case ARFS_IPV4_TCP:
case ARFS_IPV4_UDP:
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
src_ipv4_src_ipv6.ipv4_layout.ipv4);
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
break;
case ARFS_IPV6_TCP:
case ARFS_IPV6_UDP:
memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
src_ipv4_src_ipv6.ipv6_layout.ipv6),
0xff, 16);
memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
0xff, 16);
break;
default:
err = -EINVAL;
goto out;
}
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5E_ARFS_GROUP1_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
goto err;
ft->num_groups++;
memset(in, 0, inlen);
MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5E_ARFS_GROUP2_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
goto err;
ft->num_groups++;
kvfree(in);
return 0;
err:
err = PTR_ERR(ft->g[ft->num_groups]);
ft->g[ft->num_groups] = NULL;
out:
kvfree(in);
return err;
}
static int arfs_create_table(struct mlx5e_priv *priv,
enum arfs_type type)
{
struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft;
int err;
ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
MLX5E_ARFS_TABLE_SIZE, MLX5E_ARFS_FT_LEVEL);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
return err;
}
err = arfs_create_groups(ft, type);
if (err)
goto err;
err = arfs_add_default_rule(priv, type);
if (err)
goto err;
return 0;
err:
mlx5e_destroy_flow_table(ft);
return err;
}
int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
{
int err = 0;
int i;
if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
return 0;
spin_lock_init(&priv->fs.arfs.arfs_lock);
INIT_LIST_HEAD(&priv->fs.arfs.rules);
priv->fs.arfs.wq = create_singlethread_workqueue("mlx5e_arfs");
if (!priv->fs.arfs.wq)
return -ENOMEM;
for (i = 0; i < ARFS_NUM_TYPES; i++) {
err = arfs_create_table(priv, i);
if (err)
goto err;
}
return 0;
err:
mlx5e_arfs_destroy_tables(priv);
return err;
}
#define MLX5E_ARFS_EXPIRY_QUOTA 60
static void arfs_may_expire_flow(struct mlx5e_priv *priv)
{
struct arfs_rule *arfs_rule;
struct hlist_node *htmp;
int quota = 0;
int i;
int j;
HLIST_HEAD(del_list);
spin_lock_bh(&priv->fs.arfs.arfs_lock);
mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
break;
if (!work_pending(&arfs_rule->arfs_work) &&
rps_may_expire_flow(priv->netdev,
arfs_rule->rxq, arfs_rule->flow_id,
arfs_rule->filter_id)) {
hlist_del_init(&arfs_rule->hlist);
hlist_add_head(&arfs_rule->hlist, &del_list);
}
}
spin_unlock_bh(&priv->fs.arfs.arfs_lock);
hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) {
if (arfs_rule->rule)
mlx5_del_flow_rule(arfs_rule->rule);
hlist_del(&arfs_rule->hlist);
kfree(arfs_rule);
}
}
static void arfs_del_rules(struct mlx5e_priv *priv)
{
struct hlist_node *htmp;
struct arfs_rule *rule;
int i;
int j;
HLIST_HEAD(del_list);
spin_lock_bh(&priv->fs.arfs.arfs_lock);
mlx5e_for_each_arfs_rule(rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
hlist_del_init(&rule->hlist);
hlist_add_head(&rule->hlist, &del_list);
}
spin_unlock_bh(&priv->fs.arfs.arfs_lock);
hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) {
cancel_work_sync(&rule->arfs_work);
if (rule->rule)
mlx5_del_flow_rule(rule->rule);
hlist_del(&rule->hlist);
kfree(rule);
}
}
static struct hlist_head *
arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port,
__be16 dst_port)
{
unsigned long l;
int bucket_idx;
l = (__force unsigned long)src_port |
((__force unsigned long)dst_port << 2);
bucket_idx = hash_long(l, ARFS_HASH_SHIFT);
return &arfs_t->rules_hash[bucket_idx];
}
static u8 arfs_get_ip_proto(const struct sk_buff *skb)
{
return (skb->protocol == htons(ETH_P_IP)) ?
ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
}
static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
u8 ip_proto, __be16 etype)
{
if (etype == htons(ETH_P_IP) && ip_proto == IPPROTO_TCP)
return &arfs->arfs_tables[ARFS_IPV4_TCP];
if (etype == htons(ETH_P_IP) && ip_proto == IPPROTO_UDP)
return &arfs->arfs_tables[ARFS_IPV4_UDP];
if (etype == htons(ETH_P_IPV6) && ip_proto == IPPROTO_TCP)
return &arfs->arfs_tables[ARFS_IPV6_TCP];
if (etype == htons(ETH_P_IPV6) && ip_proto == IPPROTO_UDP)
return &arfs->arfs_tables[ARFS_IPV6_UDP];
return NULL;
}
static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv,
struct arfs_rule *arfs_rule)
{
struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
struct arfs_tuple *tuple = &arfs_rule->tuple;
struct mlx5_flow_rule *rule = NULL;
struct mlx5_flow_destination dest;
struct arfs_table *arfs_table;
u8 match_criteria_enable = 0;
struct mlx5_flow_table *ft;
u32 *match_criteria;
u32 *match_value;
int err = 0;
match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
if (!match_value || !match_criteria) {
netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
err = -ENOMEM;
goto out;
}
match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
outer_headers.ethertype);
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
ntohs(tuple->etype));
arfs_table = arfs_get_table(arfs, tuple->ip_proto, tuple->etype);
if (!arfs_table) {
err = -EINVAL;
goto out;
}
ft = arfs_table->ft.t;
if (tuple->ip_proto == IPPROTO_TCP) {
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
outer_headers.tcp_dport);
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
outer_headers.tcp_sport);
MLX5_SET(fte_match_param, match_value, outer_headers.tcp_dport,
ntohs(tuple->dst_port));
MLX5_SET(fte_match_param, match_value, outer_headers.tcp_sport,
ntohs(tuple->src_port));
} else {
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
outer_headers.udp_dport);
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
outer_headers.udp_sport);
MLX5_SET(fte_match_param, match_value, outer_headers.udp_dport,
ntohs(tuple->dst_port));
MLX5_SET(fte_match_param, match_value, outer_headers.udp_sport,
ntohs(tuple->src_port));
}
if (tuple->etype == htons(ETH_P_IP)) {
memcpy(MLX5_ADDR_OF(fte_match_param, match_value,
outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4),
&tuple->src_ipv4,
4);
memcpy(MLX5_ADDR_OF(fte_match_param, match_value,
outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
&tuple->dst_ipv4,
4);
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
} else {
memcpy(MLX5_ADDR_OF(fte_match_param, match_value,
outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
&tuple->src_ipv6,
16);
memcpy(MLX5_ADDR_OF(fte_match_param, match_value,
outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
&tuple->dst_ipv6,
16);
memset(MLX5_ADDR_OF(fte_match_param, match_criteria,
outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
0xff,
16);
memset(MLX5_ADDR_OF(fte_match_param, match_criteria,
outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
0xff,
16);
}
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dest.tir_num = priv->direct_tir[arfs_rule->rxq].tirn;
rule = mlx5_add_flow_rule(ft, match_criteria_enable, match_criteria,
match_value, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG,
&dest);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
netdev_err(priv->netdev, "%s: add rule(filter id=%d, rq idx=%d) failed, err=%d\n",
__func__, arfs_rule->filter_id, arfs_rule->rxq, err);
}
out:
kvfree(match_criteria);
kvfree(match_value);
return err ? ERR_PTR(err) : rule;
}
static void arfs_modify_rule_rq(struct mlx5e_priv *priv,
struct mlx5_flow_rule *rule, u16 rxq)
{
struct mlx5_flow_destination dst;
int err = 0;
dst.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dst.tir_num = priv->direct_tir[rxq].tirn;
err = mlx5_modify_rule_destination(rule, &dst);
if (err)
netdev_warn(priv->netdev,
"Failed to modfiy aRFS rule destination to rq=%d\n", rxq);
}
static void arfs_handle_work(struct work_struct *work)
{
struct arfs_rule *arfs_rule = container_of(work,
struct arfs_rule,
arfs_work);
struct mlx5e_priv *priv = arfs_rule->priv;
struct mlx5_flow_rule *rule;
mutex_lock(&priv->state_lock);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
spin_lock_bh(&priv->fs.arfs.arfs_lock);
hlist_del(&arfs_rule->hlist);
spin_unlock_bh(&priv->fs.arfs.arfs_lock);
mutex_unlock(&priv->state_lock);
kfree(arfs_rule);
goto out;
}
mutex_unlock(&priv->state_lock);
if (!arfs_rule->rule) {
rule = arfs_add_rule(priv, arfs_rule);
if (IS_ERR(rule))
goto out;
arfs_rule->rule = rule;
} else {
arfs_modify_rule_rq(priv, arfs_rule->rule,
arfs_rule->rxq);
}
out:
arfs_may_expire_flow(priv);
}
/* return L4 destination port from ip4/6 packets */
static __be16 arfs_get_dst_port(const struct sk_buff *skb)
{
char *transport_header;
transport_header = skb_transport_header(skb);
if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
return ((struct tcphdr *)transport_header)->dest;
return ((struct udphdr *)transport_header)->dest;
}
/* return L4 source port from ip4/6 packets */
static __be16 arfs_get_src_port(const struct sk_buff *skb)
{
char *transport_header;
transport_header = skb_transport_header(skb);
if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
return ((struct tcphdr *)transport_header)->source;
return ((struct udphdr *)transport_header)->source;
}
static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
struct arfs_table *arfs_t,
const struct sk_buff *skb,
u16 rxq, u32 flow_id)
{
struct arfs_rule *rule;
struct arfs_tuple *tuple;
rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
if (!rule)
return NULL;
rule->priv = priv;
rule->rxq = rxq;
INIT_WORK(&rule->arfs_work, arfs_handle_work);
tuple = &rule->tuple;
tuple->etype = skb->protocol;
if (tuple->etype == htons(ETH_P_IP)) {
tuple->src_ipv4 = ip_hdr(skb)->saddr;
tuple->dst_ipv4 = ip_hdr(skb)->daddr;
} else {
memcpy(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
sizeof(struct in6_addr));
memcpy(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
sizeof(struct in6_addr));
}
tuple->ip_proto = arfs_get_ip_proto(skb);
tuple->src_port = arfs_get_src_port(skb);
tuple->dst_port = arfs_get_dst_port(skb);
rule->flow_id = flow_id;
rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER;
hlist_add_head(&rule->hlist,
arfs_hash_bucket(arfs_t, tuple->src_port,
tuple->dst_port));
return rule;
}
static bool arfs_cmp_ips(struct arfs_tuple *tuple,
const struct sk_buff *skb)
{
if (tuple->etype == htons(ETH_P_IP) &&
tuple->src_ipv4 == ip_hdr(skb)->saddr &&
tuple->dst_ipv4 == ip_hdr(skb)->daddr)
return true;
if (tuple->etype == htons(ETH_P_IPV6) &&
(!memcmp(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
sizeof(struct in6_addr))) &&
(!memcmp(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
sizeof(struct in6_addr))))
return true;
return false;
}
static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t,
const struct sk_buff *skb)
{
struct arfs_rule *arfs_rule;
struct hlist_head *head;
__be16 src_port = arfs_get_src_port(skb);
__be16 dst_port = arfs_get_dst_port(skb);
head = arfs_hash_bucket(arfs_t, src_port, dst_port);
hlist_for_each_entry(arfs_rule, head, hlist) {
if (arfs_rule->tuple.src_port == src_port &&
arfs_rule->tuple.dst_port == dst_port &&
arfs_cmp_ips(&arfs_rule->tuple, skb)) {
return arfs_rule;
}
}
return NULL;
}
int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
struct arfs_table *arfs_t;
struct arfs_rule *arfs_rule;
if (skb->protocol != htons(ETH_P_IP) &&
skb->protocol != htons(ETH_P_IPV6))
return -EPROTONOSUPPORT;
arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol);
if (!arfs_t)
return -EPROTONOSUPPORT;
spin_lock_bh(&arfs->arfs_lock);
arfs_rule = arfs_find_rule(arfs_t, skb);
if (arfs_rule) {
if (arfs_rule->rxq == rxq_index) {
spin_unlock_bh(&arfs->arfs_lock);
return arfs_rule->filter_id;
}
arfs_rule->rxq = rxq_index;
} else {
arfs_rule = arfs_alloc_rule(priv, arfs_t, skb,
rxq_index, flow_id);
if (!arfs_rule) {
spin_unlock_bh(&arfs->arfs_lock);
return -ENOMEM;
}
}
queue_work(priv->fs.arfs.wq, &arfs_rule->arfs_work);
spin_unlock_bh(&arfs->arfs_lock);
return arfs_rule->filter_id;
}
...@@ -456,6 +456,7 @@ static int mlx5e_set_channels(struct net_device *dev, ...@@ -456,6 +456,7 @@ static int mlx5e_set_channels(struct net_device *dev,
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
int ncv = mlx5e_get_max_num_channels(priv->mdev); int ncv = mlx5e_get_max_num_channels(priv->mdev);
unsigned int count = ch->combined_count; unsigned int count = ch->combined_count;
bool arfs_enabled;
bool was_opened; bool was_opened;
int err = 0; int err = 0;
...@@ -484,13 +485,27 @@ static int mlx5e_set_channels(struct net_device *dev, ...@@ -484,13 +485,27 @@ static int mlx5e_set_channels(struct net_device *dev,
if (was_opened) if (was_opened)
mlx5e_close_locked(dev); mlx5e_close_locked(dev);
arfs_enabled = dev->features & NETIF_F_NTUPLE;
if (arfs_enabled)
mlx5e_arfs_disable(priv);
priv->params.num_channels = count; priv->params.num_channels = count;
mlx5e_build_default_indir_rqt(priv->mdev, priv->params.indirection_rqt, mlx5e_build_default_indir_rqt(priv->mdev, priv->params.indirection_rqt,
MLX5E_INDIR_RQT_SIZE, count); MLX5E_INDIR_RQT_SIZE, count);
if (was_opened) if (was_opened)
err = mlx5e_open_locked(dev); err = mlx5e_open_locked(dev);
if (err)
goto out;
if (arfs_enabled) {
err = mlx5e_arfs_enable(priv);
if (err)
netdev_err(dev, "%s: mlx5e_arfs_enable failed: %d\n",
__func__, err);
}
out:
mutex_unlock(&priv->state_lock); mutex_unlock(&priv->state_lock);
return err; return err;
...@@ -826,9 +841,8 @@ static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen) ...@@ -826,9 +841,8 @@ static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
MLX5_SET(modify_tir_in, in, bitmask.hash, 1); MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
mlx5e_build_tir_ctx_hash(tirc, priv); mlx5e_build_tir_ctx_hash(tirc, priv);
for (i = 0; i < MLX5E_NUM_TT; i++) for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
if (IS_HASHING_TT(i)) mlx5_core_modify_tir(mdev, priv->indir_tirn[i], in, inlen);
mlx5_core_modify_tir(mdev, priv->tirn[i], in, inlen);
} }
static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
...@@ -850,9 +864,11 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, ...@@ -850,9 +864,11 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
mutex_lock(&priv->state_lock); mutex_lock(&priv->state_lock);
if (indir) { if (indir) {
u32 rqtn = priv->indir_rqtn;
memcpy(priv->params.indirection_rqt, indir, memcpy(priv->params.indirection_rqt, indir,
sizeof(priv->params.indirection_rqt)); sizeof(priv->params.indirection_rqt));
mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT); mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
} }
if (key) if (key)
......
...@@ -37,7 +37,10 @@ ...@@ -37,7 +37,10 @@
#include <linux/mlx5/fs.h> #include <linux/mlx5/fs.h>
#include "en.h" #include "en.h"
#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v) static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
struct mlx5e_l2_rule *ai, int type);
static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
struct mlx5e_l2_rule *ai);
enum { enum {
MLX5E_FULLMATCH = 0, MLX5E_FULLMATCH = 0,
...@@ -58,21 +61,21 @@ enum { ...@@ -58,21 +61,21 @@ enum {
MLX5E_ACTION_DEL = 2, MLX5E_ACTION_DEL = 2,
}; };
struct mlx5e_eth_addr_hash_node { struct mlx5e_l2_hash_node {
struct hlist_node hlist; struct hlist_node hlist;
u8 action; u8 action;
struct mlx5e_eth_addr_info ai; struct mlx5e_l2_rule ai;
}; };
static inline int mlx5e_hash_eth_addr(u8 *addr) static inline int mlx5e_hash_l2(u8 *addr)
{ {
return addr[5]; return addr[5];
} }
static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr) static void mlx5e_add_l2_to_hash(struct hlist_head *hash, u8 *addr)
{ {
struct mlx5e_eth_addr_hash_node *hn; struct mlx5e_l2_hash_node *hn;
int ix = mlx5e_hash_eth_addr(addr); int ix = mlx5e_hash_l2(addr);
int found = 0; int found = 0;
hlist_for_each_entry(hn, &hash[ix], hlist) hlist_for_each_entry(hn, &hash[ix], hlist)
...@@ -96,371 +99,12 @@ static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr) ...@@ -96,371 +99,12 @@ static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr)
hlist_add_head(&hn->hlist, &hash[ix]); hlist_add_head(&hn->hlist, &hash[ix]);
} }
static void mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn) static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node *hn)
{ {
hlist_del(&hn->hlist); hlist_del(&hn->hlist);
kfree(hn); kfree(hn);
} }
static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
struct mlx5e_eth_addr_info *ai)
{
if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP))
mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP))
mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH))
mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]);
if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH))
mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]);
if (ai->tt_vec & BIT(MLX5E_TT_IPV6_TCP))
mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_TCP]);
if (ai->tt_vec & BIT(MLX5E_TT_IPV4_TCP))
mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_TCP]);
if (ai->tt_vec & BIT(MLX5E_TT_IPV6_UDP))
mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_UDP]);
if (ai->tt_vec & BIT(MLX5E_TT_IPV4_UDP))
mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_UDP]);
if (ai->tt_vec & BIT(MLX5E_TT_IPV6))
mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6]);
if (ai->tt_vec & BIT(MLX5E_TT_IPV4))
mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4]);
if (ai->tt_vec & BIT(MLX5E_TT_ANY))
mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_ANY]);
}
static int mlx5e_get_eth_addr_type(u8 *addr)
{
if (is_unicast_ether_addr(addr))
return MLX5E_UC;
if ((addr[0] == 0x01) &&
(addr[1] == 0x00) &&
(addr[2] == 0x5e) &&
!(addr[3] & 0x80))
return MLX5E_MC_IPV4;
if ((addr[0] == 0x33) &&
(addr[1] == 0x33))
return MLX5E_MC_IPV6;
return MLX5E_MC_OTHER;
}
static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
{
int eth_addr_type;
u32 ret;
switch (type) {
case MLX5E_FULLMATCH:
eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
switch (eth_addr_type) {
case MLX5E_UC:
ret =
BIT(MLX5E_TT_IPV4_TCP) |
BIT(MLX5E_TT_IPV6_TCP) |
BIT(MLX5E_TT_IPV4_UDP) |
BIT(MLX5E_TT_IPV6_UDP) |
BIT(MLX5E_TT_IPV4_IPSEC_AH) |
BIT(MLX5E_TT_IPV6_IPSEC_AH) |
BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
BIT(MLX5E_TT_IPV4) |
BIT(MLX5E_TT_IPV6) |
BIT(MLX5E_TT_ANY) |
0;
break;
case MLX5E_MC_IPV4:
ret =
BIT(MLX5E_TT_IPV4_UDP) |
BIT(MLX5E_TT_IPV4) |
0;
break;
case MLX5E_MC_IPV6:
ret =
BIT(MLX5E_TT_IPV6_UDP) |
BIT(MLX5E_TT_IPV6) |
0;
break;
case MLX5E_MC_OTHER:
ret =
BIT(MLX5E_TT_ANY) |
0;
break;
}
break;
case MLX5E_ALLMULTI:
ret =
BIT(MLX5E_TT_IPV4_UDP) |
BIT(MLX5E_TT_IPV6_UDP) |
BIT(MLX5E_TT_IPV4) |
BIT(MLX5E_TT_IPV6) |
BIT(MLX5E_TT_ANY) |
0;
break;
default: /* MLX5E_PROMISC */
ret =
BIT(MLX5E_TT_IPV4_TCP) |
BIT(MLX5E_TT_IPV6_TCP) |
BIT(MLX5E_TT_IPV4_UDP) |
BIT(MLX5E_TT_IPV6_UDP) |
BIT(MLX5E_TT_IPV4_IPSEC_AH) |
BIT(MLX5E_TT_IPV6_IPSEC_AH) |
BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
BIT(MLX5E_TT_IPV4) |
BIT(MLX5E_TT_IPV6) |
BIT(MLX5E_TT_ANY) |
0;
break;
}
return ret;
}
static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
struct mlx5e_eth_addr_info *ai,
int type, u32 *mc, u32 *mv)
{
struct mlx5_flow_destination dest;
u8 match_criteria_enable = 0;
struct mlx5_flow_rule **rule_p;
struct mlx5_flow_table *ft = priv->fts.main.t;
u8 *mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
outer_headers.dmac_47_16);
u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, mv,
outer_headers.dmac_47_16);
u32 *tirn = priv->tirn;
u32 tt_vec;
int err = 0;
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
switch (type) {
case MLX5E_FULLMATCH:
match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
eth_broadcast_addr(mc_dmac);
ether_addr_copy(mv_dmac, ai->addr);
break;
case MLX5E_ALLMULTI:
match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
mc_dmac[0] = 0x01;
mv_dmac[0] = 0x01;
break;
case MLX5E_PROMISC:
break;
}
tt_vec = mlx5e_get_tt_vec(ai, type);
if (tt_vec & BIT(MLX5E_TT_ANY)) {
rule_p = &ai->ft_rule[MLX5E_TT_ANY];
dest.tir_num = tirn[MLX5E_TT_ANY];
*rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG, &dest);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
ai->tt_vec |= BIT(MLX5E_TT_ANY);
}
match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
if (tt_vec & BIT(MLX5E_TT_IPV4)) {
rule_p = &ai->ft_rule[MLX5E_TT_IPV4];
dest.tir_num = tirn[MLX5E_TT_IPV4];
MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETH_P_IP);
*rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG, &dest);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
ai->tt_vec |= BIT(MLX5E_TT_IPV4);
}
if (tt_vec & BIT(MLX5E_TT_IPV6)) {
rule_p = &ai->ft_rule[MLX5E_TT_IPV6];
dest.tir_num = tirn[MLX5E_TT_IPV6];
MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETH_P_IPV6);
*rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG, &dest);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
ai->tt_vec |= BIT(MLX5E_TT_IPV6);
}
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP);
if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
rule_p = &ai->ft_rule[MLX5E_TT_IPV4_UDP];
dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETH_P_IP);
*rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG, &dest);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP);
}
if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
rule_p = &ai->ft_rule[MLX5E_TT_IPV6_UDP];
dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETH_P_IPV6);
*rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG, &dest);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP);
}
MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_TCP);
if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
rule_p = &ai->ft_rule[MLX5E_TT_IPV4_TCP];
dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETH_P_IP);
*rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG, &dest);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP);
}
if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
rule_p = &ai->ft_rule[MLX5E_TT_IPV6_TCP];
dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETH_P_IPV6);
*rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG, &dest);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP);
}
MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_AH);
if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH];
dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH];
MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETH_P_IP);
*rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG, &dest);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH);
}
if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH];
dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH];
MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETH_P_IPV6);
*rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG, &dest);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH);
}
MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_ESP);
if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP];
dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP];
MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETH_P_IP);
*rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG, &dest);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP);
}
if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP];
dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP];
MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETH_P_IPV6);
*rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG, &dest);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP);
}
return 0;
err_del_ai:
err = PTR_ERR(*rule_p);
*rule_p = NULL;
mlx5e_del_eth_addr_from_flow_table(priv, ai);
return err;
}
static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
struct mlx5e_eth_addr_info *ai, int type)
{
u32 *match_criteria;
u32 *match_value;
int err = 0;
match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
if (!match_value || !match_criteria) {
netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
err = -ENOMEM;
goto add_eth_addr_rule_out;
}
err = __mlx5e_add_eth_addr_rule(priv, ai, type, match_criteria,
match_value);
add_eth_addr_rule_out:
kvfree(match_criteria);
kvfree(match_value);
return err;
}
static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv) static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
{ {
struct net_device *ndev = priv->netdev; struct net_device *ndev = priv->netdev;
...@@ -472,7 +116,7 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv) ...@@ -472,7 +116,7 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
int i; int i;
list_size = 0; list_size = 0;
for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID)
list_size++; list_size++;
max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list); max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
...@@ -489,7 +133,7 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv) ...@@ -489,7 +133,7 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
return -ENOMEM; return -ENOMEM;
i = 0; i = 0;
for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) { for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID) {
if (i >= list_size) if (i >= list_size)
break; break;
vlans[i++] = vlan; vlans[i++] = vlan;
...@@ -514,28 +158,28 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, ...@@ -514,28 +158,28 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
enum mlx5e_vlan_rule_type rule_type, enum mlx5e_vlan_rule_type rule_type,
u16 vid, u32 *mc, u32 *mv) u16 vid, u32 *mc, u32 *mv)
{ {
struct mlx5_flow_table *ft = priv->fts.vlan.t; struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
struct mlx5_flow_destination dest; struct mlx5_flow_destination dest;
u8 match_criteria_enable = 0; u8 match_criteria_enable = 0;
struct mlx5_flow_rule **rule_p; struct mlx5_flow_rule **rule_p;
int err = 0; int err = 0;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = priv->fts.main.t; dest.ft = priv->fs.l2.ft.t;
match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag); MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
switch (rule_type) { switch (rule_type) {
case MLX5E_VLAN_RULE_TYPE_UNTAGGED: case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
rule_p = &priv->vlan.untagged_rule; rule_p = &priv->fs.vlan.untagged_rule;
break; break;
case MLX5E_VLAN_RULE_TYPE_ANY_VID: case MLX5E_VLAN_RULE_TYPE_ANY_VID:
rule_p = &priv->vlan.any_vlan_rule; rule_p = &priv->fs.vlan.any_vlan_rule;
MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1); MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1);
break; break;
default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */ default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
rule_p = &priv->vlan.active_vlans_rule[vid]; rule_p = &priv->fs.vlan.active_vlans_rule[vid];
MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1); MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1);
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid); MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid); MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid);
...@@ -589,22 +233,22 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv, ...@@ -589,22 +233,22 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
{ {
switch (rule_type) { switch (rule_type) {
case MLX5E_VLAN_RULE_TYPE_UNTAGGED: case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
if (priv->vlan.untagged_rule) { if (priv->fs.vlan.untagged_rule) {
mlx5_del_flow_rule(priv->vlan.untagged_rule); mlx5_del_flow_rule(priv->fs.vlan.untagged_rule);
priv->vlan.untagged_rule = NULL; priv->fs.vlan.untagged_rule = NULL;
} }
break; break;
case MLX5E_VLAN_RULE_TYPE_ANY_VID: case MLX5E_VLAN_RULE_TYPE_ANY_VID:
if (priv->vlan.any_vlan_rule) { if (priv->fs.vlan.any_vlan_rule) {
mlx5_del_flow_rule(priv->vlan.any_vlan_rule); mlx5_del_flow_rule(priv->fs.vlan.any_vlan_rule);
priv->vlan.any_vlan_rule = NULL; priv->fs.vlan.any_vlan_rule = NULL;
} }
break; break;
case MLX5E_VLAN_RULE_TYPE_MATCH_VID: case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
mlx5e_vport_context_update_vlans(priv); mlx5e_vport_context_update_vlans(priv);
if (priv->vlan.active_vlans_rule[vid]) { if (priv->fs.vlan.active_vlans_rule[vid]) {
mlx5_del_flow_rule(priv->vlan.active_vlans_rule[vid]); mlx5_del_flow_rule(priv->fs.vlan.active_vlans_rule[vid]);
priv->vlan.active_vlans_rule[vid] = NULL; priv->fs.vlan.active_vlans_rule[vid] = NULL;
} }
mlx5e_vport_context_update_vlans(priv); mlx5e_vport_context_update_vlans(priv);
break; break;
...@@ -613,10 +257,10 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv, ...@@ -613,10 +257,10 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv) void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
{ {
if (!priv->vlan.filter_disabled) if (!priv->fs.vlan.filter_disabled)
return; return;
priv->vlan.filter_disabled = false; priv->fs.vlan.filter_disabled = false;
if (priv->netdev->flags & IFF_PROMISC) if (priv->netdev->flags & IFF_PROMISC)
return; return;
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
...@@ -624,10 +268,10 @@ void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv) ...@@ -624,10 +268,10 @@ void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv) void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
{ {
if (priv->vlan.filter_disabled) if (priv->fs.vlan.filter_disabled)
return; return;
priv->vlan.filter_disabled = true; priv->fs.vlan.filter_disabled = true;
if (priv->netdev->flags & IFF_PROMISC) if (priv->netdev->flags & IFF_PROMISC)
return; return;
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
...@@ -638,7 +282,7 @@ int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, ...@@ -638,7 +282,7 @@ int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
{ {
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
set_bit(vid, priv->vlan.active_vlans); set_bit(vid, priv->fs.vlan.active_vlans);
return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid); return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
} }
...@@ -648,7 +292,7 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, ...@@ -648,7 +292,7 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
{ {
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
clear_bit(vid, priv->vlan.active_vlans); clear_bit(vid, priv->fs.vlan.active_vlans);
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid); mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
...@@ -656,21 +300,21 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, ...@@ -656,21 +300,21 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
} }
#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \ #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \ for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist) hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
static void mlx5e_execute_action(struct mlx5e_priv *priv, static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
struct mlx5e_eth_addr_hash_node *hn) struct mlx5e_l2_hash_node *hn)
{ {
switch (hn->action) { switch (hn->action) {
case MLX5E_ACTION_ADD: case MLX5E_ACTION_ADD:
mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH); mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH);
hn->action = MLX5E_ACTION_NONE; hn->action = MLX5E_ACTION_NONE;
break; break;
case MLX5E_ACTION_DEL: case MLX5E_ACTION_DEL:
mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai); mlx5e_del_l2_flow_rule(priv, &hn->ai);
mlx5e_del_eth_addr_from_hash(hn); mlx5e_del_l2_from_hash(hn);
break; break;
} }
} }
...@@ -682,14 +326,14 @@ static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv) ...@@ -682,14 +326,14 @@ static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
netif_addr_lock_bh(netdev); netif_addr_lock_bh(netdev);
mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc,
priv->netdev->dev_addr); priv->netdev->dev_addr);
netdev_for_each_uc_addr(ha, netdev) netdev_for_each_uc_addr(ha, netdev)
mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, ha->addr); mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, ha->addr);
netdev_for_each_mc_addr(ha, netdev) netdev_for_each_mc_addr(ha, netdev)
mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_mc, ha->addr); mlx5e_add_l2_to_hash(priv->fs.l2.netdev_mc, ha->addr);
netif_addr_unlock_bh(netdev); netif_addr_unlock_bh(netdev);
} }
...@@ -699,17 +343,17 @@ static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type, ...@@ -699,17 +343,17 @@ static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
{ {
bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC); bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
struct net_device *ndev = priv->netdev; struct net_device *ndev = priv->netdev;
struct mlx5e_eth_addr_hash_node *hn; struct mlx5e_l2_hash_node *hn;
struct hlist_head *addr_list; struct hlist_head *addr_list;
struct hlist_node *tmp; struct hlist_node *tmp;
int i = 0; int i = 0;
int hi; int hi;
addr_list = is_uc ? priv->eth_addr.netdev_uc : priv->eth_addr.netdev_mc; addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
if (is_uc) /* Make sure our own address is pushed first */ if (is_uc) /* Make sure our own address is pushed first */
ether_addr_copy(addr_array[i++], ndev->dev_addr); ether_addr_copy(addr_array[i++], ndev->dev_addr);
else if (priv->eth_addr.broadcast_enabled) else if (priv->fs.l2.broadcast_enabled)
ether_addr_copy(addr_array[i++], ndev->broadcast); ether_addr_copy(addr_array[i++], ndev->broadcast);
mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) { mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
...@@ -725,7 +369,7 @@ static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv, ...@@ -725,7 +369,7 @@ static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
int list_type) int list_type)
{ {
bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC); bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
struct mlx5e_eth_addr_hash_node *hn; struct mlx5e_l2_hash_node *hn;
u8 (*addr_array)[ETH_ALEN] = NULL; u8 (*addr_array)[ETH_ALEN] = NULL;
struct hlist_head *addr_list; struct hlist_head *addr_list;
struct hlist_node *tmp; struct hlist_node *tmp;
...@@ -734,12 +378,12 @@ static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv, ...@@ -734,12 +378,12 @@ static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
int err; int err;
int hi; int hi;
size = is_uc ? 0 : (priv->eth_addr.broadcast_enabled ? 1 : 0); size = is_uc ? 0 : (priv->fs.l2.broadcast_enabled ? 1 : 0);
max_size = is_uc ? max_size = is_uc ?
1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) : 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list); 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
addr_list = is_uc ? priv->eth_addr.netdev_uc : priv->eth_addr.netdev_mc; addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
size++; size++;
...@@ -770,7 +414,7 @@ static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv, ...@@ -770,7 +414,7 @@ static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
static void mlx5e_vport_context_update(struct mlx5e_priv *priv) static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
{ {
struct mlx5e_eth_addr_db *ea = &priv->eth_addr; struct mlx5e_l2_table *ea = &priv->fs.l2;
mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC); mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC);
mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC); mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC);
...@@ -781,26 +425,26 @@ static void mlx5e_vport_context_update(struct mlx5e_priv *priv) ...@@ -781,26 +425,26 @@ static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv) static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
{ {
struct mlx5e_eth_addr_hash_node *hn; struct mlx5e_l2_hash_node *hn;
struct hlist_node *tmp; struct hlist_node *tmp;
int i; int i;
mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i) mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
mlx5e_execute_action(priv, hn); mlx5e_execute_l2_action(priv, hn);
mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i) mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
mlx5e_execute_action(priv, hn); mlx5e_execute_l2_action(priv, hn);
} }
static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv) static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
{ {
struct mlx5e_eth_addr_hash_node *hn; struct mlx5e_l2_hash_node *hn;
struct hlist_node *tmp; struct hlist_node *tmp;
int i; int i;
mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i) mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
hn->action = MLX5E_ACTION_DEL; hn->action = MLX5E_ACTION_DEL;
mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i) mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
hn->action = MLX5E_ACTION_DEL; hn->action = MLX5E_ACTION_DEL;
if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state)) if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
...@@ -814,7 +458,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work) ...@@ -814,7 +458,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
set_rx_mode_work); set_rx_mode_work);
struct mlx5e_eth_addr_db *ea = &priv->eth_addr; struct mlx5e_l2_table *ea = &priv->fs.l2;
struct net_device *ndev = priv->netdev; struct net_device *ndev = priv->netdev;
bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state); bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
...@@ -830,27 +474,27 @@ void mlx5e_set_rx_mode_work(struct work_struct *work) ...@@ -830,27 +474,27 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled; bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
if (enable_promisc) { if (enable_promisc) {
mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC); mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC);
if (!priv->vlan.filter_disabled) if (!priv->fs.vlan.filter_disabled)
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
0); 0);
} }
if (enable_allmulti) if (enable_allmulti)
mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI); mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
if (enable_broadcast) if (enable_broadcast)
mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH); mlx5e_add_l2_flow_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
mlx5e_handle_netdev_addr(priv); mlx5e_handle_netdev_addr(priv);
if (disable_broadcast) if (disable_broadcast)
mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast); mlx5e_del_l2_flow_rule(priv, &ea->broadcast);
if (disable_allmulti) if (disable_allmulti)
mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti); mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
if (disable_promisc) { if (disable_promisc) {
if (!priv->vlan.filter_disabled) if (!priv->fs.vlan.filter_disabled)
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
0); 0);
mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc); mlx5e_del_l2_flow_rule(priv, &ea->promisc);
} }
ea->promisc_enabled = promisc_enabled; ea->promisc_enabled = promisc_enabled;
...@@ -872,223 +516,453 @@ static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft) ...@@ -872,223 +516,453 @@ static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
ft->num_groups = 0; ft->num_groups = 0;
} }
void mlx5e_init_eth_addr(struct mlx5e_priv *priv) void mlx5e_init_l2_addr(struct mlx5e_priv *priv)
{ {
ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast); ether_addr_copy(priv->fs.l2.broadcast.addr, priv->netdev->broadcast);
} }
#define MLX5E_MAIN_GROUP0_SIZE BIT(3) void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
#define MLX5E_MAIN_GROUP1_SIZE BIT(1)
#define MLX5E_MAIN_GROUP2_SIZE BIT(0)
#define MLX5E_MAIN_GROUP3_SIZE BIT(14)
#define MLX5E_MAIN_GROUP4_SIZE BIT(13)
#define MLX5E_MAIN_GROUP5_SIZE BIT(11)
#define MLX5E_MAIN_GROUP6_SIZE BIT(2)
#define MLX5E_MAIN_GROUP7_SIZE BIT(1)
#define MLX5E_MAIN_GROUP8_SIZE BIT(0)
#define MLX5E_MAIN_TABLE_SIZE (MLX5E_MAIN_GROUP0_SIZE +\
MLX5E_MAIN_GROUP1_SIZE +\
MLX5E_MAIN_GROUP2_SIZE +\
MLX5E_MAIN_GROUP3_SIZE +\
MLX5E_MAIN_GROUP4_SIZE +\
MLX5E_MAIN_GROUP5_SIZE +\
MLX5E_MAIN_GROUP6_SIZE +\
MLX5E_MAIN_GROUP7_SIZE +\
MLX5E_MAIN_GROUP8_SIZE)
static int __mlx5e_create_main_groups(struct mlx5e_flow_table *ft, u32 *in,
int inlen)
{ {
u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); mlx5e_destroy_groups(ft);
u8 *dmac = MLX5_ADDR_OF(create_flow_group_in, in, kfree(ft->g);
match_criteria.outer_headers.dmac_47_16); mlx5_destroy_flow_table(ft->t);
ft->t = NULL;
}
static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc)
{
int i;
for (i = 0; i < MLX5E_NUM_TT; i++) {
if (!IS_ERR_OR_NULL(ttc->rules[i])) {
mlx5_del_flow_rule(ttc->rules[i]);
ttc->rules[i] = NULL;
}
}
}
static struct {
u16 etype;
u8 proto;
} ttc_rules[] = {
[MLX5E_TT_IPV4_TCP] = {
.etype = ETH_P_IP,
.proto = IPPROTO_TCP,
},
[MLX5E_TT_IPV6_TCP] = {
.etype = ETH_P_IPV6,
.proto = IPPROTO_TCP,
},
[MLX5E_TT_IPV4_UDP] = {
.etype = ETH_P_IP,
.proto = IPPROTO_UDP,
},
[MLX5E_TT_IPV6_UDP] = {
.etype = ETH_P_IPV6,
.proto = IPPROTO_UDP,
},
[MLX5E_TT_IPV4_IPSEC_AH] = {
.etype = ETH_P_IP,
.proto = IPPROTO_AH,
},
[MLX5E_TT_IPV6_IPSEC_AH] = {
.etype = ETH_P_IPV6,
.proto = IPPROTO_AH,
},
[MLX5E_TT_IPV4_IPSEC_ESP] = {
.etype = ETH_P_IP,
.proto = IPPROTO_ESP,
},
[MLX5E_TT_IPV6_IPSEC_ESP] = {
.etype = ETH_P_IPV6,
.proto = IPPROTO_ESP,
},
[MLX5E_TT_IPV4] = {
.etype = ETH_P_IP,
.proto = 0,
},
[MLX5E_TT_IPV6] = {
.etype = ETH_P_IPV6,
.proto = 0,
},
[MLX5E_TT_ANY] = {
.etype = 0,
.proto = 0,
},
};
static struct mlx5_flow_rule *mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
struct mlx5_flow_table *ft,
struct mlx5_flow_destination *dest,
u16 etype,
u8 proto)
{
struct mlx5_flow_rule *rule;
u8 match_criteria_enable = 0;
u32 *match_criteria;
u32 *match_value;
int err = 0;
match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
if (!match_value || !match_criteria) {
netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
err = -ENOMEM;
goto out;
}
if (proto) {
match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.ip_protocol);
MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol, proto);
}
if (etype) {
match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.ethertype);
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, etype);
}
rule = mlx5_add_flow_rule(ft, match_criteria_enable,
match_criteria, match_value,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG,
dest);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
}
out:
kvfree(match_criteria);
kvfree(match_value);
return err ? ERR_PTR(err) : rule;
}
static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv)
{
struct mlx5_flow_destination dest;
struct mlx5e_ttc_table *ttc;
struct mlx5_flow_rule **rules;
struct mlx5_flow_table *ft;
int tt;
int err; int err;
ttc = &priv->fs.ttc;
ft = ttc->ft.t;
rules = ttc->rules;
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
if (tt == MLX5E_TT_ANY)
dest.tir_num = priv->direct_tir[0].tirn;
else
dest.tir_num = priv->indir_tirn[tt];
rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
ttc_rules[tt].etype,
ttc_rules[tt].proto);
if (IS_ERR(rules[tt]))
goto del_rules;
}
return 0;
del_rules:
err = PTR_ERR(rules[tt]);
rules[tt] = NULL;
mlx5e_cleanup_ttc_rules(ttc);
return err;
}
#define MLX5E_TTC_NUM_GROUPS 3
#define MLX5E_TTC_GROUP1_SIZE BIT(3)
#define MLX5E_TTC_GROUP2_SIZE BIT(1)
#define MLX5E_TTC_GROUP3_SIZE BIT(0)
#define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\
MLX5E_TTC_GROUP2_SIZE +\
MLX5E_TTC_GROUP3_SIZE)
static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5e_flow_table *ft = &ttc->ft;
int ix = 0; int ix = 0;
u32 *in;
int err;
u8 *mc;
memset(in, 0, inlen); ft->g = kcalloc(MLX5E_TTC_NUM_GROUPS,
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); sizeof(*ft->g), GFP_KERNEL);
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); if (!ft->g)
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); return -ENOMEM;
MLX5_SET_CFG(in, start_flow_index, ix); in = mlx5_vzalloc(inlen);
ix += MLX5E_MAIN_GROUP0_SIZE; if (!in) {
MLX5_SET_CFG(in, end_flow_index, ix - 1); kfree(ft->g);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); return -ENOMEM;
if (IS_ERR(ft->g[ft->num_groups])) }
goto err_destroy_groups;
ft->num_groups++;
memset(in, 0, inlen); /* L4 Group */
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
MLX5_SET_CFG(in, start_flow_index, ix); MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5E_MAIN_GROUP1_SIZE; ix += MLX5E_TTC_GROUP1_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1); MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups])) if (IS_ERR(ft->g[ft->num_groups]))
goto err_destroy_groups; goto err;
ft->num_groups++; ft->num_groups++;
memset(in, 0, inlen); /* L3 Group */
MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
MLX5_SET_CFG(in, start_flow_index, ix); MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5E_MAIN_GROUP2_SIZE; ix += MLX5E_TTC_GROUP2_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1); MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups])) if (IS_ERR(ft->g[ft->num_groups]))
goto err_destroy_groups; goto err;
ft->num_groups++; ft->num_groups++;
/* Any Group */
memset(in, 0, inlen); memset(in, 0, inlen);
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
eth_broadcast_addr(dmac);
MLX5_SET_CFG(in, start_flow_index, ix); MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5E_MAIN_GROUP3_SIZE; ix += MLX5E_TTC_GROUP3_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1); MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups])) if (IS_ERR(ft->g[ft->num_groups]))
goto err_destroy_groups; goto err;
ft->num_groups++; ft->num_groups++;
memset(in, 0, inlen); kvfree(in);
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); return 0;
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
eth_broadcast_addr(dmac);
MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5E_MAIN_GROUP4_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
goto err_destroy_groups;
ft->num_groups++;
memset(in, 0, inlen); err:
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); err = PTR_ERR(ft->g[ft->num_groups]);
eth_broadcast_addr(dmac); ft->g[ft->num_groups] = NULL;
MLX5_SET_CFG(in, start_flow_index, ix); kvfree(in);
ix += MLX5E_MAIN_GROUP5_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
goto err_destroy_groups;
ft->num_groups++;
memset(in, 0, inlen); return err;
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); }
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); static void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
dmac[0] = 0x01; {
struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
mlx5e_cleanup_ttc_rules(ttc);
mlx5e_destroy_flow_table(&ttc->ft);
}
static int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
{
struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
struct mlx5e_flow_table *ft = &ttc->ft;
int err;
ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
MLX5E_TTC_TABLE_SIZE, MLX5E_TTC_FT_LEVEL);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
return err;
}
err = mlx5e_create_ttc_table_groups(ttc);
if (err)
goto err;
err = mlx5e_generate_ttc_table_rules(priv);
if (err)
goto err;
return 0;
err:
mlx5e_destroy_flow_table(ft);
return err;
}
static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
struct mlx5e_l2_rule *ai)
{
if (!IS_ERR_OR_NULL(ai->rule)) {
mlx5_del_flow_rule(ai->rule);
ai->rule = NULL;
}
}
static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
struct mlx5e_l2_rule *ai, int type)
{
struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
struct mlx5_flow_destination dest;
u8 match_criteria_enable = 0;
u32 *match_criteria;
u32 *match_value;
int err = 0;
u8 *mc_dmac;
u8 *mv_dmac;
match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
if (!match_value || !match_criteria) {
netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
err = -ENOMEM;
goto add_l2_rule_out;
}
mc_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
outer_headers.dmac_47_16);
mv_dmac = MLX5_ADDR_OF(fte_match_param, match_value,
outer_headers.dmac_47_16);
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = priv->fs.ttc.ft.t;
switch (type) {
case MLX5E_FULLMATCH:
match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
eth_broadcast_addr(mc_dmac);
ether_addr_copy(mv_dmac, ai->addr);
break;
case MLX5E_ALLMULTI:
match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
mc_dmac[0] = 0x01;
mv_dmac[0] = 0x01;
break;
case MLX5E_PROMISC:
break;
}
ai->rule = mlx5_add_flow_rule(ft, match_criteria_enable, match_criteria,
match_value,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG, &dest);
if (IS_ERR(ai->rule)) {
netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n",
__func__, mv_dmac);
err = PTR_ERR(ai->rule);
ai->rule = NULL;
}
add_l2_rule_out:
kvfree(match_criteria);
kvfree(match_value);
return err;
}
#define MLX5E_NUM_L2_GROUPS 3
#define MLX5E_L2_GROUP1_SIZE BIT(0)
#define MLX5E_L2_GROUP2_SIZE BIT(15)
#define MLX5E_L2_GROUP3_SIZE BIT(0)
#define MLX5E_L2_TABLE_SIZE (MLX5E_L2_GROUP1_SIZE +\
MLX5E_L2_GROUP2_SIZE +\
MLX5E_L2_GROUP3_SIZE)
static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5e_flow_table *ft = &l2_table->ft;
int ix = 0;
u8 *mc_dmac;
u32 *in;
int err;
u8 *mc;
ft->g = kcalloc(MLX5E_NUM_L2_GROUPS, sizeof(*ft->g), GFP_KERNEL);
if (!ft->g)
return -ENOMEM;
in = mlx5_vzalloc(inlen);
if (!in) {
kfree(ft->g);
return -ENOMEM;
}
mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
outer_headers.dmac_47_16);
/* Flow Group for promiscuous */
MLX5_SET_CFG(in, start_flow_index, ix); MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5E_MAIN_GROUP6_SIZE; ix += MLX5E_L2_GROUP1_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1); MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups])) if (IS_ERR(ft->g[ft->num_groups]))
goto err_destroy_groups; goto err_destroy_groups;
ft->num_groups++; ft->num_groups++;
memset(in, 0, inlen); /* Flow Group for full match */
eth_broadcast_addr(mc_dmac);
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
dmac[0] = 0x01;
MLX5_SET_CFG(in, start_flow_index, ix); MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5E_MAIN_GROUP7_SIZE; ix += MLX5E_L2_GROUP2_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1); MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups])) if (IS_ERR(ft->g[ft->num_groups]))
goto err_destroy_groups; goto err_destroy_groups;
ft->num_groups++; ft->num_groups++;
memset(in, 0, inlen); /* Flow Group for allmulti */
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); eth_zero_addr(mc_dmac);
dmac[0] = 0x01; mc_dmac[0] = 0x01;
MLX5_SET_CFG(in, start_flow_index, ix); MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5E_MAIN_GROUP8_SIZE; ix += MLX5E_L2_GROUP3_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1); MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups])) if (IS_ERR(ft->g[ft->num_groups]))
goto err_destroy_groups; goto err_destroy_groups;
ft->num_groups++; ft->num_groups++;
kvfree(in);
return 0; return 0;
err_destroy_groups: err_destroy_groups:
err = PTR_ERR(ft->g[ft->num_groups]); err = PTR_ERR(ft->g[ft->num_groups]);
ft->g[ft->num_groups] = NULL; ft->g[ft->num_groups] = NULL;
mlx5e_destroy_groups(ft); mlx5e_destroy_groups(ft);
kvfree(in);
return err; return err;
} }
static int mlx5e_create_main_groups(struct mlx5e_flow_table *ft) static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv)
{ {
u32 *in; mlx5e_destroy_flow_table(&priv->fs.l2.ft);
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
int err;
in = mlx5_vzalloc(inlen);
if (!in)
return -ENOMEM;
err = __mlx5e_create_main_groups(ft, in, inlen);
kvfree(in);
return err;
} }
static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv) static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
{ {
struct mlx5e_flow_table *ft = &priv->fts.main; struct mlx5e_l2_table *l2_table = &priv->fs.l2;
struct mlx5e_flow_table *ft = &l2_table->ft;
int err; int err;
ft->num_groups = 0; ft->num_groups = 0;
ft->t = mlx5_create_flow_table(priv->fts.ns, 1, MLX5E_MAIN_TABLE_SIZE); ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
MLX5E_L2_TABLE_SIZE, MLX5E_L2_FT_LEVEL);
if (IS_ERR(ft->t)) { if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t); err = PTR_ERR(ft->t);
ft->t = NULL; ft->t = NULL;
return err; return err;
} }
ft->g = kcalloc(MLX5E_NUM_MAIN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
if (!ft->g) {
err = -ENOMEM;
goto err_destroy_main_flow_table;
}
err = mlx5e_create_main_groups(ft); err = mlx5e_create_l2_table_groups(l2_table);
if (err) if (err)
goto err_free_g; goto err_destroy_flow_table;
return 0;
err_free_g: return 0;
kfree(ft->g);
err_destroy_main_flow_table: err_destroy_flow_table:
mlx5_destroy_flow_table(ft->t); mlx5_destroy_flow_table(ft->t);
ft->t = NULL; ft->t = NULL;
return err; return err;
} }
static void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
{
mlx5e_destroy_groups(ft);
kfree(ft->g);
mlx5_destroy_flow_table(ft->t);
ft->t = NULL;
}
static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
{
mlx5e_destroy_flow_table(&priv->fts.main);
}
#define MLX5E_NUM_VLAN_GROUPS 2 #define MLX5E_NUM_VLAN_GROUPS 2
#define MLX5E_VLAN_GROUP0_SIZE BIT(12) #define MLX5E_VLAN_GROUP0_SIZE BIT(12)
#define MLX5E_VLAN_GROUP1_SIZE BIT(1) #define MLX5E_VLAN_GROUP1_SIZE BIT(1)
#define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\ #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
MLX5E_VLAN_GROUP1_SIZE) MLX5E_VLAN_GROUP1_SIZE)
static int __mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft, u32 *in, static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
int inlen) int inlen)
{ {
int err; int err;
...@@ -1128,7 +1002,7 @@ static int __mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft, u32 *in, ...@@ -1128,7 +1002,7 @@ static int __mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft, u32 *in,
return err; return err;
} }
static int mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft) static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
{ {
u32 *in; u32 *in;
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
...@@ -1138,19 +1012,20 @@ static int mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft) ...@@ -1138,19 +1012,20 @@ static int mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft)
if (!in) if (!in)
return -ENOMEM; return -ENOMEM;
err = __mlx5e_create_vlan_groups(ft, in, inlen); err = __mlx5e_create_vlan_table_groups(ft, in, inlen);
kvfree(in); kvfree(in);
return err; return err;
} }
static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv) static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
{ {
struct mlx5e_flow_table *ft = &priv->fts.vlan; struct mlx5e_flow_table *ft = &priv->fs.vlan.ft;
int err; int err;
ft->num_groups = 0; ft->num_groups = 0;
ft->t = mlx5_create_flow_table(priv->fts.ns, 1, MLX5E_VLAN_TABLE_SIZE); ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
MLX5E_VLAN_TABLE_SIZE, MLX5E_VLAN_FT_LEVEL);
if (IS_ERR(ft->t)) { if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t); err = PTR_ERR(ft->t);
...@@ -1160,65 +1035,90 @@ static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv) ...@@ -1160,65 +1035,90 @@ static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL); ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
if (!ft->g) { if (!ft->g) {
err = -ENOMEM; err = -ENOMEM;
goto err_destroy_vlan_flow_table; goto err_destroy_vlan_table;
} }
err = mlx5e_create_vlan_groups(ft); err = mlx5e_create_vlan_table_groups(ft);
if (err) if (err)
goto err_free_g; goto err_free_g;
err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
if (err)
goto err_destroy_vlan_flow_groups;
return 0; return 0;
err_destroy_vlan_flow_groups:
mlx5e_destroy_groups(ft);
err_free_g: err_free_g:
kfree(ft->g); kfree(ft->g);
err_destroy_vlan_table:
err_destroy_vlan_flow_table:
mlx5_destroy_flow_table(ft->t); mlx5_destroy_flow_table(ft->t);
ft->t = NULL; ft->t = NULL;
return err; return err;
} }
static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv) static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
{ {
mlx5e_destroy_flow_table(&priv->fts.vlan); mlx5e_destroy_flow_table(&priv->fs.vlan.ft);
} }
int mlx5e_create_flow_tables(struct mlx5e_priv *priv) int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
{ {
int err; int err;
priv->fts.ns = mlx5_get_flow_namespace(priv->mdev, priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
MLX5_FLOW_NAMESPACE_KERNEL); MLX5_FLOW_NAMESPACE_KERNEL);
if (!priv->fts.ns) if (!priv->fs.ns)
return -EINVAL; return -EINVAL;
err = mlx5e_create_vlan_flow_table(priv); err = mlx5e_arfs_create_tables(priv);
if (err) if (err) {
return err; netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
err);
priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
}
err = mlx5e_create_main_flow_table(priv); err = mlx5e_create_ttc_table(priv);
if (err) if (err) {
goto err_destroy_vlan_flow_table; netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
err);
goto err_destroy_arfs_tables;
}
err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); err = mlx5e_create_l2_table(priv);
if (err) if (err) {
goto err_destroy_main_flow_table; netdev_err(priv->netdev, "Failed to create l2 table, err=%d\n",
err);
goto err_destroy_ttc_table;
}
err = mlx5e_create_vlan_table(priv);
if (err) {
netdev_err(priv->netdev, "Failed to create vlan table, err=%d\n",
err);
goto err_destroy_l2_table;
}
return 0; return 0;
err_destroy_main_flow_table: err_destroy_l2_table:
mlx5e_destroy_main_flow_table(priv); mlx5e_destroy_l2_table(priv);
err_destroy_vlan_flow_table: err_destroy_ttc_table:
mlx5e_destroy_vlan_flow_table(priv); mlx5e_destroy_ttc_table(priv);
err_destroy_arfs_tables:
mlx5e_arfs_destroy_tables(priv);
return err; return err;
} }
void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv) void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
{ {
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
mlx5e_destroy_main_flow_table(priv); mlx5e_destroy_vlan_table(priv);
mlx5e_destroy_vlan_flow_table(priv); mlx5e_destroy_l2_table(priv);
mlx5e_destroy_ttc_table(priv);
mlx5e_arfs_destroy_tables(priv);
} }
...@@ -1340,48 +1340,36 @@ static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc) ...@@ -1340,48 +1340,36 @@ static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc)
for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) { for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) {
int ix = i; int ix = i;
u32 rqn;
if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR) if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE); ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE);
ix = priv->params.indirection_rqt[ix]; ix = priv->params.indirection_rqt[ix];
MLX5_SET(rqtc, rqtc, rq_num[i], rqn = test_bit(MLX5E_STATE_OPENED, &priv->state) ?
test_bit(MLX5E_STATE_OPENED, &priv->state) ?
priv->channel[ix]->rq.rqn : priv->channel[ix]->rq.rqn :
priv->drop_rq.rqn); priv->drop_rq.rqn;
MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
} }
} }
static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, void *rqtc, static void mlx5e_fill_direct_rqt_rqn(struct mlx5e_priv *priv, void *rqtc,
enum mlx5e_rqt_ix rqt_ix) int ix)
{ {
u32 rqn = test_bit(MLX5E_STATE_OPENED, &priv->state) ?
priv->channel[ix]->rq.rqn :
priv->drop_rq.rqn;
switch (rqt_ix) { MLX5_SET(rqtc, rqtc, rq_num[0], rqn);
case MLX5E_INDIRECTION_RQT:
mlx5e_fill_indir_rqt_rqns(priv, rqtc);
break;
default: /* MLX5E_SINGLE_RQ_RQT */
MLX5_SET(rqtc, rqtc, rq_num[0],
test_bit(MLX5E_STATE_OPENED, &priv->state) ?
priv->channel[0]->rq.rqn :
priv->drop_rq.rqn);
break;
}
} }
static int mlx5e_create_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix) static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, int ix, u32 *rqtn)
{ {
struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_dev *mdev = priv->mdev;
u32 *in;
void *rqtc; void *rqtc;
int inlen; int inlen;
int sz;
int err; int err;
u32 *in;
sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
in = mlx5_vzalloc(inlen); in = mlx5_vzalloc(inlen);
...@@ -1393,26 +1381,73 @@ static int mlx5e_create_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix) ...@@ -1393,26 +1381,73 @@ static int mlx5e_create_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
MLX5_SET(rqtc, rqtc, rqt_max_size, sz); MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix); if (sz > 1) /* RSS */
mlx5e_fill_indir_rqt_rqns(priv, rqtc);
else
mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix);
err = mlx5_core_create_rqt(mdev, in, inlen, &priv->rqtn[rqt_ix]); err = mlx5_core_create_rqt(mdev, in, inlen, rqtn);
kvfree(in); kvfree(in);
return err;
}
static void mlx5e_destroy_rqt(struct mlx5e_priv *priv, u32 rqtn)
{
mlx5_core_destroy_rqt(priv->mdev, rqtn);
}
static int mlx5e_create_rqts(struct mlx5e_priv *priv)
{
int nch = mlx5e_get_max_num_channels(priv->mdev);
u32 *rqtn;
int err;
int ix;
/* Indirect RQT */
rqtn = &priv->indir_rqtn;
err = mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, 0, rqtn);
if (err)
return err; return err;
/* Direct RQTs */
for (ix = 0; ix < nch; ix++) {
rqtn = &priv->direct_tir[ix].rqtn;
err = mlx5e_create_rqt(priv, 1 /*size */, ix, rqtn);
if (err)
goto err_destroy_rqts;
}
return 0;
err_destroy_rqts:
for (ix--; ix >= 0; ix--)
mlx5e_destroy_rqt(priv, priv->direct_tir[ix].rqtn);
mlx5e_destroy_rqt(priv, priv->indir_rqtn);
return err;
}
static void mlx5e_destroy_rqts(struct mlx5e_priv *priv)
{
int nch = mlx5e_get_max_num_channels(priv->mdev);
int i;
for (i = 0; i < nch; i++)
mlx5e_destroy_rqt(priv, priv->direct_tir[i].rqtn);
mlx5e_destroy_rqt(priv, priv->indir_rqtn);
} }
int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix) int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix)
{ {
struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_dev *mdev = priv->mdev;
u32 *in;
void *rqtc; void *rqtc;
int inlen; int inlen;
int sz; u32 *in;
int err; int err;
sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz; inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
in = mlx5_vzalloc(inlen); in = mlx5_vzalloc(inlen);
if (!in) if (!in)
...@@ -1421,27 +1456,31 @@ int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix) ...@@ -1421,27 +1456,31 @@ int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx); rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
if (sz > 1) /* RSS */
mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix); mlx5e_fill_indir_rqt_rqns(priv, rqtc);
else
mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix);
MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1); MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
err = mlx5_core_modify_rqt(mdev, priv->rqtn[rqt_ix], in, inlen); err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen);
kvfree(in); kvfree(in);
return err; return err;
} }
static void mlx5e_destroy_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
{
mlx5_core_destroy_rqt(priv->mdev, priv->rqtn[rqt_ix]);
}
static void mlx5e_redirect_rqts(struct mlx5e_priv *priv) static void mlx5e_redirect_rqts(struct mlx5e_priv *priv)
{ {
mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT); u32 rqtn;
mlx5e_redirect_rqt(priv, MLX5E_SINGLE_RQ_RQT); int ix;
rqtn = priv->indir_rqtn;
mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
for (ix = 0; ix < priv->params.num_channels; ix++) {
rqtn = priv->direct_tir[ix].rqtn;
mlx5e_redirect_rqt(priv, rqtn, 1, ix);
}
} }
static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv) static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
...@@ -1486,6 +1525,7 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv) ...@@ -1486,6 +1525,7 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
int inlen; int inlen;
int err; int err;
int tt; int tt;
int ix;
inlen = MLX5_ST_SZ_BYTES(modify_tir_in); inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
in = mlx5_vzalloc(inlen); in = mlx5_vzalloc(inlen);
...@@ -1497,23 +1537,32 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv) ...@@ -1497,23 +1537,32 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
mlx5e_build_tir_ctx_lro(tirc, priv); mlx5e_build_tir_ctx_lro(tirc, priv);
for (tt = 0; tt < MLX5E_NUM_TT; tt++) { for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen); err = mlx5_core_modify_tir(mdev, priv->indir_tirn[tt], in,
inlen);
if (err) if (err)
break; goto free_in;
} }
for (ix = 0; ix < mlx5e_get_max_num_channels(mdev); ix++) {
err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
in, inlen);
if (err)
goto free_in;
}
free_in:
kvfree(in); kvfree(in);
return err; return err;
} }
static int mlx5e_refresh_tir_self_loopback_enable(struct mlx5_core_dev *mdev, static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
u32 tirn)
{ {
void *in; void *in;
int inlen; int inlen;
int err; int err;
int i;
inlen = MLX5_ST_SZ_BYTES(modify_tir_in); inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
in = mlx5_vzalloc(inlen); in = mlx5_vzalloc(inlen);
...@@ -1522,25 +1571,23 @@ static int mlx5e_refresh_tir_self_loopback_enable(struct mlx5_core_dev *mdev, ...@@ -1522,25 +1571,23 @@ static int mlx5e_refresh_tir_self_loopback_enable(struct mlx5_core_dev *mdev,
MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1); MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
err = mlx5_core_modify_tir(mdev, tirn, in, inlen); for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) {
err = mlx5_core_modify_tir(priv->mdev, priv->indir_tirn[i], in,
kvfree(in); inlen);
if (err)
return err; return err;
} }
static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
{
int err;
int i;
for (i = 0; i < MLX5E_NUM_TT; i++) { for (i = 0; i < priv->params.num_channels; i++) {
err = mlx5e_refresh_tir_self_loopback_enable(priv->mdev, err = mlx5_core_modify_tir(priv->mdev,
priv->tirn[i]); priv->direct_tir[i].tirn, in,
inlen);
if (err) if (err)
return err; return err;
} }
kvfree(in);
return 0; return 0;
} }
...@@ -1644,6 +1691,9 @@ int mlx5e_open_locked(struct net_device *netdev) ...@@ -1644,6 +1691,9 @@ int mlx5e_open_locked(struct net_device *netdev)
mlx5e_redirect_rqts(priv); mlx5e_redirect_rqts(priv);
mlx5e_update_carrier(priv); mlx5e_update_carrier(priv);
mlx5e_timestamp_init(priv); mlx5e_timestamp_init(priv);
#ifdef CONFIG_RFS_ACCEL
priv->netdev->rx_cpu_rmap = priv->mdev->rmap;
#endif
schedule_delayed_work(&priv->update_stats_work, 0); schedule_delayed_work(&priv->update_stats_work, 0);
...@@ -1851,7 +1901,8 @@ static void mlx5e_destroy_tises(struct mlx5e_priv *priv) ...@@ -1851,7 +1901,8 @@ static void mlx5e_destroy_tises(struct mlx5e_priv *priv)
mlx5e_destroy_tis(priv, tc); mlx5e_destroy_tis(priv, tc);
} }
static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
enum mlx5e_traffic_types tt)
{ {
void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
...@@ -1872,19 +1923,8 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) ...@@ -1872,19 +1923,8 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
mlx5e_build_tir_ctx_lro(tirc, priv); mlx5e_build_tir_ctx_lro(tirc, priv);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqtn);
switch (tt) {
case MLX5E_TT_ANY:
MLX5_SET(tirc, tirc, indirect_table,
priv->rqtn[MLX5E_SINGLE_RQ_RQT]);
MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
break;
default:
MLX5_SET(tirc, tirc, indirect_table,
priv->rqtn[MLX5E_INDIRECTION_RQT]);
mlx5e_build_tir_ctx_hash(tirc, priv); mlx5e_build_tir_ctx_hash(tirc, priv);
break;
}
switch (tt) { switch (tt) {
case MLX5E_TT_IPV4_TCP: case MLX5E_TT_IPV4_TCP:
...@@ -1964,64 +2004,90 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) ...@@ -1964,64 +2004,90 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
MLX5_SET(rx_hash_field_select, hfso, selected_fields, MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP); MLX5_HASH_IP);
break; break;
default:
WARN_ONCE(true,
"mlx5e_build_indir_tir_ctx: bad traffic type!\n");
} }
} }
static int mlx5e_create_tir(struct mlx5e_priv *priv, int tt) static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
u32 rqtn)
{ {
struct mlx5_core_dev *mdev = priv->mdev; MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
u32 *in;
mlx5e_build_tir_ctx_lro(tirc, priv);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
MLX5_SET(tirc, tirc, indirect_table, rqtn);
MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
}
static int mlx5e_create_tirs(struct mlx5e_priv *priv)
{
int nch = mlx5e_get_max_num_channels(priv->mdev);
void *tirc; void *tirc;
int inlen; int inlen;
u32 *tirn;
int err; int err;
u32 *in;
int ix;
int tt;
inlen = MLX5_ST_SZ_BYTES(create_tir_in); inlen = MLX5_ST_SZ_BYTES(create_tir_in);
in = mlx5_vzalloc(inlen); in = mlx5_vzalloc(inlen);
if (!in) if (!in)
return -ENOMEM; return -ENOMEM;
/* indirect tirs */
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
memset(in, 0, inlen);
tirn = &priv->indir_tirn[tt];
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
mlx5e_build_indir_tir_ctx(priv, tirc, tt);
mlx5e_build_tir_ctx(priv, tirc, tt); err = mlx5_core_create_tir(priv->mdev, in, inlen, tirn);
err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
kvfree(in);
return err;
}
static void mlx5e_destroy_tir(struct mlx5e_priv *priv, int tt)
{
mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
}
static int mlx5e_create_tirs(struct mlx5e_priv *priv)
{
int err;
int i;
for (i = 0; i < MLX5E_NUM_TT; i++) {
err = mlx5e_create_tir(priv, i);
if (err) if (err)
goto err_destroy_tirs; goto err_destroy_tirs;
} }
/* direct tirs */
for (ix = 0; ix < nch; ix++) {
memset(in, 0, inlen);
tirn = &priv->direct_tir[ix].tirn;
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
mlx5e_build_direct_tir_ctx(priv, tirc,
priv->direct_tir[ix].rqtn);
err = mlx5_core_create_tir(priv->mdev, in, inlen, tirn);
if (err)
goto err_destroy_ch_tirs;
}
kvfree(in);
return 0; return 0;
err_destroy_ch_tirs:
for (ix--; ix >= 0; ix--)
mlx5_core_destroy_tir(priv->mdev, priv->direct_tir[ix].tirn);
err_destroy_tirs: err_destroy_tirs:
for (i--; i >= 0; i--) for (tt--; tt >= 0; tt--)
mlx5e_destroy_tir(priv, i); mlx5_core_destroy_tir(priv->mdev, priv->indir_tirn[tt]);
kvfree(in);
return err; return err;
} }
static void mlx5e_destroy_tirs(struct mlx5e_priv *priv) static void mlx5e_destroy_tirs(struct mlx5e_priv *priv)
{ {
int nch = mlx5e_get_max_num_channels(priv->mdev);
int i; int i;
for (i = 0; i < MLX5E_NUM_TT; i++) for (i = 0; i < nch; i++)
mlx5e_destroy_tir(priv, i); mlx5_core_destroy_tir(priv->mdev, priv->direct_tir[i].tirn);
for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
mlx5_core_destroy_tir(priv->mdev, priv->indir_tirn[i]);
} }
int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd) int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd)
...@@ -2242,6 +2308,21 @@ static int set_feature_rx_vlan(struct net_device *netdev, bool enable) ...@@ -2242,6 +2308,21 @@ static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
return err; return err;
} }
#ifdef CONFIG_RFS_ACCEL
static int set_feature_arfs(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
if (enable)
err = mlx5e_arfs_enable(priv);
else
err = mlx5e_arfs_disable(priv);
return err;
}
#endif
static int mlx5e_handle_feature(struct net_device *netdev, static int mlx5e_handle_feature(struct net_device *netdev,
netdev_features_t wanted_features, netdev_features_t wanted_features,
netdev_features_t feature, netdev_features_t feature,
...@@ -2281,6 +2362,10 @@ static int mlx5e_set_features(struct net_device *netdev, ...@@ -2281,6 +2362,10 @@ static int mlx5e_set_features(struct net_device *netdev,
set_feature_rx_all); set_feature_rx_all);
err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_VLAN_CTAG_RX, err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_VLAN_CTAG_RX,
set_feature_rx_vlan); set_feature_rx_vlan);
#ifdef CONFIG_RFS_ACCEL
err |= mlx5e_handle_feature(netdev, features, NETIF_F_NTUPLE,
set_feature_arfs);
#endif
return err ? -EINVAL : 0; return err ? -EINVAL : 0;
} }
...@@ -2496,6 +2581,9 @@ static const struct net_device_ops mlx5e_netdev_ops_basic = { ...@@ -2496,6 +2581,9 @@ static const struct net_device_ops mlx5e_netdev_ops_basic = {
.ndo_set_features = mlx5e_set_features, .ndo_set_features = mlx5e_set_features,
.ndo_change_mtu = mlx5e_change_mtu, .ndo_change_mtu = mlx5e_change_mtu,
.ndo_do_ioctl = mlx5e_ioctl, .ndo_do_ioctl = mlx5e_ioctl,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx5e_rx_flow_steer,
#endif
}; };
static const struct net_device_ops mlx5e_netdev_ops_sriov = { static const struct net_device_ops mlx5e_netdev_ops_sriov = {
...@@ -2515,6 +2603,9 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = { ...@@ -2515,6 +2603,9 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
.ndo_add_vxlan_port = mlx5e_add_vxlan_port, .ndo_add_vxlan_port = mlx5e_add_vxlan_port,
.ndo_del_vxlan_port = mlx5e_del_vxlan_port, .ndo_del_vxlan_port = mlx5e_del_vxlan_port,
.ndo_features_check = mlx5e_features_check, .ndo_features_check = mlx5e_features_check,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx5e_rx_flow_steer,
#endif
.ndo_set_vf_mac = mlx5e_set_vf_mac, .ndo_set_vf_mac = mlx5e_set_vf_mac,
.ndo_set_vf_vlan = mlx5e_set_vf_vlan, .ndo_set_vf_vlan = mlx5e_set_vf_vlan,
.ndo_get_vf_config = mlx5e_get_vf_config, .ndo_get_vf_config = mlx5e_get_vf_config,
...@@ -2737,8 +2828,12 @@ static void mlx5e_build_netdev(struct net_device *netdev) ...@@ -2737,8 +2828,12 @@ static void mlx5e_build_netdev(struct net_device *netdev)
if (FT_CAP(flow_modify_en) && if (FT_CAP(flow_modify_en) &&
FT_CAP(modify_root) && FT_CAP(modify_root) &&
FT_CAP(identified_miss_table_mode) && FT_CAP(identified_miss_table_mode) &&
FT_CAP(flow_table_modify)) FT_CAP(flow_table_modify)) {
priv->netdev->hw_features |= NETIF_F_HW_TC; netdev->hw_features |= NETIF_F_HW_TC;
#ifdef CONFIG_RFS_ACCEL
netdev->hw_features |= NETIF_F_NTUPLE;
#endif
}
netdev->features |= NETIF_F_HIGHDMA; netdev->features |= NETIF_F_HIGHDMA;
...@@ -2894,33 +2989,27 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) ...@@ -2894,33 +2989,27 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
goto err_destroy_tises; goto err_destroy_tises;
} }
err = mlx5e_create_rqt(priv, MLX5E_INDIRECTION_RQT); err = mlx5e_create_rqts(priv);
if (err) { if (err) {
mlx5_core_warn(mdev, "create rqt(INDIR) failed, %d\n", err); mlx5_core_warn(mdev, "create rqts failed, %d\n", err);
goto err_close_drop_rq; goto err_close_drop_rq;
} }
err = mlx5e_create_rqt(priv, MLX5E_SINGLE_RQ_RQT);
if (err) {
mlx5_core_warn(mdev, "create rqt(SINGLE) failed, %d\n", err);
goto err_destroy_rqt_indir;
}
err = mlx5e_create_tirs(priv); err = mlx5e_create_tirs(priv);
if (err) { if (err) {
mlx5_core_warn(mdev, "create tirs failed, %d\n", err); mlx5_core_warn(mdev, "create tirs failed, %d\n", err);
goto err_destroy_rqt_single; goto err_destroy_rqts;
} }
err = mlx5e_create_flow_tables(priv); err = mlx5e_create_flow_steering(priv);
if (err) { if (err) {
mlx5_core_warn(mdev, "create flow tables failed, %d\n", err); mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
goto err_destroy_tirs; goto err_destroy_tirs;
} }
mlx5e_create_q_counter(priv); mlx5e_create_q_counter(priv);
mlx5e_init_eth_addr(priv); mlx5e_init_l2_addr(priv);
mlx5e_vxlan_init(priv); mlx5e_vxlan_init(priv);
...@@ -2938,8 +3027,11 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) ...@@ -2938,8 +3027,11 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
goto err_tc_cleanup; goto err_tc_cleanup;
} }
if (mlx5e_vxlan_allowed(mdev)) if (mlx5e_vxlan_allowed(mdev)) {
rtnl_lock();
vxlan_get_rx_port(netdev); vxlan_get_rx_port(netdev);
rtnl_unlock();
}
mlx5e_enable_async_events(priv); mlx5e_enable_async_events(priv);
schedule_work(&priv->set_rx_mode_work); schedule_work(&priv->set_rx_mode_work);
...@@ -2951,16 +3043,13 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) ...@@ -2951,16 +3043,13 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
err_dealloc_q_counters: err_dealloc_q_counters:
mlx5e_destroy_q_counter(priv); mlx5e_destroy_q_counter(priv);
mlx5e_destroy_flow_tables(priv); mlx5e_destroy_flow_steering(priv);
err_destroy_tirs: err_destroy_tirs:
mlx5e_destroy_tirs(priv); mlx5e_destroy_tirs(priv);
err_destroy_rqt_single: err_destroy_rqts:
mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT); mlx5e_destroy_rqts(priv);
err_destroy_rqt_indir:
mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
err_close_drop_rq: err_close_drop_rq:
mlx5e_close_drop_rq(priv); mlx5e_close_drop_rq(priv);
...@@ -3012,10 +3101,9 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv) ...@@ -3012,10 +3101,9 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
mlx5e_tc_cleanup(priv); mlx5e_tc_cleanup(priv);
mlx5e_vxlan_cleanup(priv); mlx5e_vxlan_cleanup(priv);
mlx5e_destroy_q_counter(priv); mlx5e_destroy_q_counter(priv);
mlx5e_destroy_flow_tables(priv); mlx5e_destroy_flow_steering(priv);
mlx5e_destroy_tirs(priv); mlx5e_destroy_tirs(priv);
mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT); mlx5e_destroy_rqts(priv);
mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
mlx5e_close_drop_rq(priv); mlx5e_close_drop_rq(priv);
mlx5e_destroy_tises(priv); mlx5e_destroy_tises(priv);
mlx5_core_destroy_mkey(priv->mdev, &priv->umr_mkey); mlx5_core_destroy_mkey(priv->mdev, &priv->umr_mkey);
......
...@@ -46,8 +46,8 @@ struct mlx5e_tc_flow { ...@@ -46,8 +46,8 @@ struct mlx5e_tc_flow {
struct mlx5_flow_rule *rule; struct mlx5_flow_rule *rule;
}; };
#define MLX5E_TC_FLOW_TABLE_NUM_ENTRIES 1024 #define MLX5E_TC_TABLE_NUM_ENTRIES 1024
#define MLX5E_TC_FLOW_TABLE_NUM_GROUPS 4 #define MLX5E_TC_TABLE_NUM_GROUPS 4
static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv, static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv,
u32 *match_c, u32 *match_v, u32 *match_c, u32 *match_v,
...@@ -55,33 +55,35 @@ static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv, ...@@ -55,33 +55,35 @@ static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv,
{ {
struct mlx5_flow_destination dest = { struct mlx5_flow_destination dest = {
.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE, .type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
{.ft = priv->fts.vlan.t}, {.ft = priv->fs.vlan.ft.t},
}; };
struct mlx5_flow_rule *rule; struct mlx5_flow_rule *rule;
bool table_created = false; bool table_created = false;
if (IS_ERR_OR_NULL(priv->fts.tc.t)) { if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
priv->fts.tc.t = priv->fs.tc.t =
mlx5_create_auto_grouped_flow_table(priv->fts.ns, 0, mlx5_create_auto_grouped_flow_table(priv->fs.ns,
MLX5E_TC_FLOW_TABLE_NUM_ENTRIES, MLX5E_TC_PRIO,
MLX5E_TC_FLOW_TABLE_NUM_GROUPS); MLX5E_TC_TABLE_NUM_ENTRIES,
if (IS_ERR(priv->fts.tc.t)) { MLX5E_TC_TABLE_NUM_GROUPS,
0);
if (IS_ERR(priv->fs.tc.t)) {
netdev_err(priv->netdev, netdev_err(priv->netdev,
"Failed to create tc offload table\n"); "Failed to create tc offload table\n");
return ERR_CAST(priv->fts.tc.t); return ERR_CAST(priv->fs.tc.t);
} }
table_created = true; table_created = true;
} }
rule = mlx5_add_flow_rule(priv->fts.tc.t, MLX5_MATCH_OUTER_HEADERS, rule = mlx5_add_flow_rule(priv->fs.tc.t, MLX5_MATCH_OUTER_HEADERS,
match_c, match_v, match_c, match_v,
action, flow_tag, action, flow_tag,
action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST ? &dest : NULL); action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST ? &dest : NULL);
if (IS_ERR(rule) && table_created) { if (IS_ERR(rule) && table_created) {
mlx5_destroy_flow_table(priv->fts.tc.t); mlx5_destroy_flow_table(priv->fs.tc.t);
priv->fts.tc.t = NULL; priv->fs.tc.t = NULL;
} }
return rule; return rule;
...@@ -93,8 +95,8 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, ...@@ -93,8 +95,8 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
mlx5_del_flow_rule(rule); mlx5_del_flow_rule(rule);
if (!mlx5e_tc_num_filters(priv)) { if (!mlx5e_tc_num_filters(priv)) {
mlx5_destroy_flow_table(priv->fts.tc.t); mlx5_destroy_flow_table(priv->fs.tc.t);
priv->fts.tc.t = NULL; priv->fs.tc.t = NULL;
} }
} }
...@@ -310,7 +312,7 @@ static int parse_tc_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, ...@@ -310,7 +312,7 @@ static int parse_tc_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
struct tc_cls_flower_offload *f) struct tc_cls_flower_offload *f)
{ {
struct mlx5e_tc_flow_table *tc = &priv->fts.tc; struct mlx5e_tc_table *tc = &priv->fs.tc;
u32 *match_c; u32 *match_c;
u32 *match_v; u32 *match_v;
int err = 0; int err = 0;
...@@ -376,7 +378,7 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv, ...@@ -376,7 +378,7 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f) struct tc_cls_flower_offload *f)
{ {
struct mlx5e_tc_flow *flow; struct mlx5e_tc_flow *flow;
struct mlx5e_tc_flow_table *tc = &priv->fts.tc; struct mlx5e_tc_table *tc = &priv->fs.tc;
flow = rhashtable_lookup_fast(&tc->ht, &f->cookie, flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
tc->ht_params); tc->ht_params);
...@@ -401,7 +403,7 @@ static const struct rhashtable_params mlx5e_tc_flow_ht_params = { ...@@ -401,7 +403,7 @@ static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
int mlx5e_tc_init(struct mlx5e_priv *priv) int mlx5e_tc_init(struct mlx5e_priv *priv)
{ {
struct mlx5e_tc_flow_table *tc = &priv->fts.tc; struct mlx5e_tc_table *tc = &priv->fs.tc;
tc->ht_params = mlx5e_tc_flow_ht_params; tc->ht_params = mlx5e_tc_flow_ht_params;
return rhashtable_init(&tc->ht, &tc->ht_params); return rhashtable_init(&tc->ht, &tc->ht_params);
...@@ -418,12 +420,12 @@ static void _mlx5e_tc_del_flow(void *ptr, void *arg) ...@@ -418,12 +420,12 @@ static void _mlx5e_tc_del_flow(void *ptr, void *arg)
void mlx5e_tc_cleanup(struct mlx5e_priv *priv) void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
{ {
struct mlx5e_tc_flow_table *tc = &priv->fts.tc; struct mlx5e_tc_table *tc = &priv->fs.tc;
rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv); rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
if (!IS_ERR_OR_NULL(priv->fts.tc.t)) { if (!IS_ERR_OR_NULL(tc->t)) {
mlx5_destroy_flow_table(priv->fts.tc.t); mlx5_destroy_flow_table(tc->t);
priv->fts.tc.t = NULL; tc->t = NULL;
} }
} }
...@@ -45,7 +45,7 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv, ...@@ -45,7 +45,7 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv,
static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv) static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv)
{ {
return atomic_read(&priv->fts.tc.ht.nelems); return atomic_read(&priv->fs.tc.ht.nelems);
} }
#endif /* __MLX5_EN_TC_H__ */ #endif /* __MLX5_EN_TC_H__ */
...@@ -401,7 +401,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports) ...@@ -401,7 +401,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
memset(flow_group_in, 0, inlen); memset(flow_group_in, 0, inlen);
table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
fdb = mlx5_create_flow_table(root_ns, 0, table_size); fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0);
if (IS_ERR_OR_NULL(fdb)) { if (IS_ERR_OR_NULL(fdb)) {
err = PTR_ERR(fdb); err = PTR_ERR(fdb);
esw_warn(dev, "Failed to create FDB Table err %d\n", err); esw_warn(dev, "Failed to create FDB Table err %d\n", err);
......
...@@ -40,18 +40,18 @@ ...@@ -40,18 +40,18 @@
#define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\ #define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
sizeof(struct init_tree_node)) sizeof(struct init_tree_node))
#define ADD_PRIO(num_prios_val, min_level_val, max_ft_val, caps_val,\ #define ADD_PRIO(num_prios_val, min_level_val, num_levels_val, caps_val,\
...) {.type = FS_TYPE_PRIO,\ ...) {.type = FS_TYPE_PRIO,\
.min_ft_level = min_level_val,\ .min_ft_level = min_level_val,\
.max_ft = max_ft_val,\ .num_levels = num_levels_val,\
.num_leaf_prios = num_prios_val,\ .num_leaf_prios = num_prios_val,\
.caps = caps_val,\ .caps = caps_val,\
.children = (struct init_tree_node[]) {__VA_ARGS__},\ .children = (struct init_tree_node[]) {__VA_ARGS__},\
.ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \ .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
} }
#define ADD_MULTIPLE_PRIO(num_prios_val, max_ft_val, ...)\ #define ADD_MULTIPLE_PRIO(num_prios_val, num_levels_val, ...)\
ADD_PRIO(num_prios_val, 0, max_ft_val, {},\ ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
__VA_ARGS__)\ __VA_ARGS__)\
#define ADD_NS(...) {.type = FS_TYPE_NAMESPACE,\ #define ADD_NS(...) {.type = FS_TYPE_NAMESPACE,\
...@@ -67,17 +67,20 @@ ...@@ -67,17 +67,20 @@
#define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \ #define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
.caps = (long[]) {__VA_ARGS__} } .caps = (long[]) {__VA_ARGS__} }
#define LEFTOVERS_MAX_FT 1 #define LEFTOVERS_NUM_LEVELS 1
#define LEFTOVERS_NUM_PRIOS 1 #define LEFTOVERS_NUM_PRIOS 1
#define BY_PASS_PRIO_MAX_FT 1
#define BY_PASS_MIN_LEVEL (KENREL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
LEFTOVERS_MAX_FT)
#define KERNEL_MAX_FT 3 #define BY_PASS_PRIO_NUM_LEVELS 1
#define KERNEL_NUM_PRIOS 2 #define BY_PASS_MIN_LEVEL (KERNEL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
#define KENREL_MIN_LEVEL 2 LEFTOVERS_NUM_PRIOS)
#define ANCHOR_MAX_FT 1 /* Vlan, mac, ttc, aRFS */
#define KERNEL_NIC_PRIO_NUM_LEVELS 4
#define KERNEL_NIC_NUM_PRIOS 1
/* One more level for tc */
#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
#define ANCHOR_NUM_LEVELS 1
#define ANCHOR_NUM_PRIOS 1 #define ANCHOR_NUM_PRIOS 1
#define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1) #define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
struct node_caps { struct node_caps {
...@@ -92,7 +95,7 @@ static struct init_tree_node { ...@@ -92,7 +95,7 @@ static struct init_tree_node {
int min_ft_level; int min_ft_level;
int num_leaf_prios; int num_leaf_prios;
int prio; int prio;
int max_ft; int num_levels;
} root_fs = { } root_fs = {
.type = FS_TYPE_NAMESPACE, .type = FS_TYPE_NAMESPACE,
.ar_size = 4, .ar_size = 4,
...@@ -102,17 +105,20 @@ static struct init_tree_node { ...@@ -102,17 +105,20 @@ static struct init_tree_node {
FS_CAP(flow_table_properties_nic_receive.modify_root), FS_CAP(flow_table_properties_nic_receive.modify_root),
FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode),
FS_CAP(flow_table_properties_nic_receive.flow_table_modify)), FS_CAP(flow_table_properties_nic_receive.flow_table_modify)),
ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, BY_PASS_PRIO_MAX_FT))), ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
ADD_PRIO(0, KENREL_MIN_LEVEL, 0, {}, BY_PASS_PRIO_NUM_LEVELS))),
ADD_NS(ADD_MULTIPLE_PRIO(KERNEL_NUM_PRIOS, KERNEL_MAX_FT))), ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
ADD_NS(ADD_MULTIPLE_PRIO(1, 1),
ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
KERNEL_NIC_PRIO_NUM_LEVELS))),
ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en),
FS_CAP(flow_table_properties_nic_receive.modify_root), FS_CAP(flow_table_properties_nic_receive.modify_root),
FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode),
FS_CAP(flow_table_properties_nic_receive.flow_table_modify)), FS_CAP(flow_table_properties_nic_receive.flow_table_modify)),
ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_MAX_FT))), ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_NUM_LEVELS))),
ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {}, ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_MAX_FT))), ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_NUM_LEVELS))),
} }
}; };
...@@ -222,19 +228,6 @@ static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns, ...@@ -222,19 +228,6 @@ static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
return NULL; return NULL;
} }
static unsigned int find_next_free_level(struct fs_prio *prio)
{
if (!list_empty(&prio->node.children)) {
struct mlx5_flow_table *ft;
ft = list_last_entry(&prio->node.children,
struct mlx5_flow_table,
node.list);
return ft->level + 1;
}
return prio->start_level;
}
static bool masked_memcmp(void *mask, void *val1, void *val2, size_t size) static bool masked_memcmp(void *mask, void *val1, void *val2, size_t size)
{ {
unsigned int i; unsigned int i;
...@@ -615,7 +608,7 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio ...@@ -615,7 +608,7 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
return err; return err;
} }
static int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
struct mlx5_flow_destination *dest) struct mlx5_flow_destination *dest)
{ {
struct mlx5_flow_table *ft; struct mlx5_flow_table *ft;
...@@ -693,9 +686,23 @@ static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table ...@@ -693,9 +686,23 @@ static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table
return err; return err;
} }
static void list_add_flow_table(struct mlx5_flow_table *ft,
struct fs_prio *prio)
{
struct list_head *prev = &prio->node.children;
struct mlx5_flow_table *iter;
fs_for_each_ft(iter, prio) {
if (iter->level > ft->level)
break;
prev = &iter->node.list;
}
list_add(&ft->node.list, prev);
}
struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns, struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
int prio, int prio, int max_fte,
int max_fte) u32 level)
{ {
struct mlx5_flow_table *next_ft = NULL; struct mlx5_flow_table *next_ft = NULL;
struct mlx5_flow_table *ft; struct mlx5_flow_table *ft;
...@@ -716,12 +723,15 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns, ...@@ -716,12 +723,15 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
err = -EINVAL; err = -EINVAL;
goto unlock_root; goto unlock_root;
} }
if (fs_prio->num_ft == fs_prio->max_ft) { if (level >= fs_prio->num_levels) {
err = -ENOSPC; err = -ENOSPC;
goto unlock_root; goto unlock_root;
} }
/* The level is related to the
ft = alloc_flow_table(find_next_free_level(fs_prio), * priority level range.
*/
level += fs_prio->start_level;
ft = alloc_flow_table(level,
roundup_pow_of_two(max_fte), roundup_pow_of_two(max_fte),
root->table_type); root->table_type);
if (!ft) { if (!ft) {
...@@ -742,7 +752,7 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns, ...@@ -742,7 +752,7 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
goto destroy_ft; goto destroy_ft;
lock_ref_node(&fs_prio->node); lock_ref_node(&fs_prio->node);
tree_add_node(&ft->node, &fs_prio->node); tree_add_node(&ft->node, &fs_prio->node);
list_add_tail(&ft->node.list, &fs_prio->node.children); list_add_flow_table(ft, fs_prio);
fs_prio->num_ft++; fs_prio->num_ft++;
unlock_ref_node(&fs_prio->node); unlock_ref_node(&fs_prio->node);
mutex_unlock(&root->chain_lock); mutex_unlock(&root->chain_lock);
...@@ -759,14 +769,15 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns, ...@@ -759,14 +769,15 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
struct mlx5_flow_table *mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, struct mlx5_flow_table *mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
int prio, int prio,
int num_flow_table_entries, int num_flow_table_entries,
int max_num_groups) int max_num_groups,
u32 level)
{ {
struct mlx5_flow_table *ft; struct mlx5_flow_table *ft;
if (max_num_groups > num_flow_table_entries) if (max_num_groups > num_flow_table_entries)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
ft = mlx5_create_flow_table(ns, prio, num_flow_table_entries); ft = mlx5_create_flow_table(ns, prio, num_flow_table_entries, level);
if (IS_ERR(ft)) if (IS_ERR(ft))
return ft; return ft;
...@@ -1065,6 +1076,20 @@ static struct mlx5_flow_rule *add_rule_fg(struct mlx5_flow_group *fg, ...@@ -1065,6 +1076,20 @@ static struct mlx5_flow_rule *add_rule_fg(struct mlx5_flow_group *fg,
return rule; return rule;
} }
static bool dest_is_valid(struct mlx5_flow_destination *dest,
u32 action,
struct mlx5_flow_table *ft)
{
if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
return true;
if (!dest || ((dest->type ==
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) &&
(dest->ft->level <= ft->level)))
return false;
return true;
}
static struct mlx5_flow_rule * static struct mlx5_flow_rule *
_mlx5_add_flow_rule(struct mlx5_flow_table *ft, _mlx5_add_flow_rule(struct mlx5_flow_table *ft,
u8 match_criteria_enable, u8 match_criteria_enable,
...@@ -1077,7 +1102,7 @@ _mlx5_add_flow_rule(struct mlx5_flow_table *ft, ...@@ -1077,7 +1102,7 @@ _mlx5_add_flow_rule(struct mlx5_flow_table *ft,
struct mlx5_flow_group *g; struct mlx5_flow_group *g;
struct mlx5_flow_rule *rule; struct mlx5_flow_rule *rule;
if ((action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && !dest) if (!dest_is_valid(dest, action, ft))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT); nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT);
...@@ -1311,7 +1336,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, ...@@ -1311,7 +1336,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
EXPORT_SYMBOL(mlx5_get_flow_namespace); EXPORT_SYMBOL(mlx5_get_flow_namespace);
static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns, static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
unsigned prio, int max_ft) unsigned int prio, int num_levels)
{ {
struct fs_prio *fs_prio; struct fs_prio *fs_prio;
...@@ -1322,7 +1347,7 @@ static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns, ...@@ -1322,7 +1347,7 @@ static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
fs_prio->node.type = FS_TYPE_PRIO; fs_prio->node.type = FS_TYPE_PRIO;
tree_init_node(&fs_prio->node, 1, NULL); tree_init_node(&fs_prio->node, 1, NULL);
tree_add_node(&fs_prio->node, &ns->node); tree_add_node(&fs_prio->node, &ns->node);
fs_prio->max_ft = max_ft; fs_prio->num_levels = num_levels;
fs_prio->prio = prio; fs_prio->prio = prio;
list_add_tail(&fs_prio->node.list, &ns->node.children); list_add_tail(&fs_prio->node.list, &ns->node.children);
...@@ -1353,14 +1378,14 @@ static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio) ...@@ -1353,14 +1378,14 @@ static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
return ns; return ns;
} }
static int create_leaf_prios(struct mlx5_flow_namespace *ns, struct init_tree_node static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio,
*prio_metadata) struct init_tree_node *prio_metadata)
{ {
struct fs_prio *fs_prio; struct fs_prio *fs_prio;
int i; int i;
for (i = 0; i < prio_metadata->num_leaf_prios; i++) { for (i = 0; i < prio_metadata->num_leaf_prios; i++) {
fs_prio = fs_create_prio(ns, i, prio_metadata->max_ft); fs_prio = fs_create_prio(ns, prio++, prio_metadata->num_levels);
if (IS_ERR(fs_prio)) if (IS_ERR(fs_prio))
return PTR_ERR(fs_prio); return PTR_ERR(fs_prio);
} }
...@@ -1387,7 +1412,7 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev, ...@@ -1387,7 +1412,7 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev,
struct init_tree_node *init_node, struct init_tree_node *init_node,
struct fs_node *fs_parent_node, struct fs_node *fs_parent_node,
struct init_tree_node *init_parent_node, struct init_tree_node *init_parent_node,
int index) int prio)
{ {
int max_ft_level = MLX5_CAP_FLOWTABLE(dev, int max_ft_level = MLX5_CAP_FLOWTABLE(dev,
flow_table_properties_nic_receive. flow_table_properties_nic_receive.
...@@ -1405,8 +1430,8 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev, ...@@ -1405,8 +1430,8 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev,
fs_get_obj(fs_ns, fs_parent_node); fs_get_obj(fs_ns, fs_parent_node);
if (init_node->num_leaf_prios) if (init_node->num_leaf_prios)
return create_leaf_prios(fs_ns, init_node); return create_leaf_prios(fs_ns, prio, init_node);
fs_prio = fs_create_prio(fs_ns, index, init_node->max_ft); fs_prio = fs_create_prio(fs_ns, prio, init_node->num_levels);
if (IS_ERR(fs_prio)) if (IS_ERR(fs_prio))
return PTR_ERR(fs_prio); return PTR_ERR(fs_prio);
base = &fs_prio->node; base = &fs_prio->node;
...@@ -1419,11 +1444,16 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev, ...@@ -1419,11 +1444,16 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev,
} else { } else {
return -EINVAL; return -EINVAL;
} }
prio = 0;
for (i = 0; i < init_node->ar_size; i++) { for (i = 0; i < init_node->ar_size; i++) {
err = init_root_tree_recursive(dev, &init_node->children[i], err = init_root_tree_recursive(dev, &init_node->children[i],
base, init_node, i); base, init_node, prio);
if (err) if (err)
return err; return err;
if (init_node->children[i].type == FS_TYPE_PRIO &&
init_node->children[i].num_leaf_prios) {
prio += init_node->children[i].num_leaf_prios;
}
} }
return 0; return 0;
...@@ -1479,9 +1509,9 @@ static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level) ...@@ -1479,9 +1509,9 @@ static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level)
struct fs_prio *prio; struct fs_prio *prio;
fs_for_each_prio(prio, ns) { fs_for_each_prio(prio, ns) {
/* This updates prio start_level and max_ft */ /* This updates prio start_level and num_levels */
set_prio_attrs_in_prio(prio, acc_level); set_prio_attrs_in_prio(prio, acc_level);
acc_level += prio->max_ft; acc_level += prio->num_levels;
} }
return acc_level; return acc_level;
} }
...@@ -1493,11 +1523,11 @@ static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level) ...@@ -1493,11 +1523,11 @@ static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
prio->start_level = acc_level; prio->start_level = acc_level;
fs_for_each_ns(ns, prio) fs_for_each_ns(ns, prio)
/* This updates start_level and max_ft of ns's priority descendants */ /* This updates start_level and num_levels of ns's priority descendants */
acc_level_ns = set_prio_attrs_in_ns(ns, acc_level); acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
if (!prio->max_ft) if (!prio->num_levels)
prio->max_ft = acc_level_ns - prio->start_level; prio->num_levels = acc_level_ns - prio->start_level;
WARN_ON(prio->max_ft < acc_level_ns - prio->start_level); WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
} }
static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns) static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
...@@ -1508,12 +1538,13 @@ static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns) ...@@ -1508,12 +1538,13 @@ static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
fs_for_each_prio(prio, ns) { fs_for_each_prio(prio, ns) {
set_prio_attrs_in_prio(prio, start_level); set_prio_attrs_in_prio(prio, start_level);
start_level += prio->max_ft; start_level += prio->num_levels;
} }
} }
#define ANCHOR_PRIO 0 #define ANCHOR_PRIO 0
#define ANCHOR_SIZE 1 #define ANCHOR_SIZE 1
#define ANCHOR_LEVEL 0
static int create_anchor_flow_table(struct mlx5_core_dev static int create_anchor_flow_table(struct mlx5_core_dev
*dev) *dev)
{ {
...@@ -1523,7 +1554,7 @@ static int create_anchor_flow_table(struct mlx5_core_dev ...@@ -1523,7 +1554,7 @@ static int create_anchor_flow_table(struct mlx5_core_dev
ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ANCHOR); ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ANCHOR);
if (!ns) if (!ns)
return -EINVAL; return -EINVAL;
ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE); ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL);
if (IS_ERR(ft)) { if (IS_ERR(ft)) {
mlx5_core_err(dev, "Failed to create last anchor flow table"); mlx5_core_err(dev, "Failed to create last anchor flow table");
return PTR_ERR(ft); return PTR_ERR(ft);
......
...@@ -107,7 +107,7 @@ struct fs_fte { ...@@ -107,7 +107,7 @@ struct fs_fte {
/* Type of children is mlx5_flow_table/namespace */ /* Type of children is mlx5_flow_table/namespace */
struct fs_prio { struct fs_prio {
struct fs_node node; struct fs_node node;
unsigned int max_ft; unsigned int num_levels;
unsigned int start_level; unsigned int start_level;
unsigned int prio; unsigned int prio;
unsigned int num_ft; unsigned int num_ft;
......
...@@ -48,6 +48,9 @@ ...@@ -48,6 +48,9 @@
#include <linux/kmod.h> #include <linux/kmod.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/mlx5/mlx5_ifc.h> #include <linux/mlx5/mlx5_ifc.h>
#ifdef CONFIG_RFS_ACCEL
#include <linux/cpu_rmap.h>
#endif
#include "mlx5_core.h" #include "mlx5_core.h"
#include "fs_core.h" #include "fs_core.h"
#ifdef CONFIG_MLX5_CORE_EN #ifdef CONFIG_MLX5_CORE_EN
...@@ -665,6 +668,12 @@ static void free_comp_eqs(struct mlx5_core_dev *dev) ...@@ -665,6 +668,12 @@ static void free_comp_eqs(struct mlx5_core_dev *dev)
struct mlx5_eq_table *table = &dev->priv.eq_table; struct mlx5_eq_table *table = &dev->priv.eq_table;
struct mlx5_eq *eq, *n; struct mlx5_eq *eq, *n;
#ifdef CONFIG_RFS_ACCEL
if (dev->rmap) {
free_irq_cpu_rmap(dev->rmap);
dev->rmap = NULL;
}
#endif
spin_lock(&table->lock); spin_lock(&table->lock);
list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
list_del(&eq->list); list_del(&eq->list);
...@@ -691,6 +700,11 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev) ...@@ -691,6 +700,11 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev)
INIT_LIST_HEAD(&table->comp_eqs_list); INIT_LIST_HEAD(&table->comp_eqs_list);
ncomp_vec = table->num_comp_vectors; ncomp_vec = table->num_comp_vectors;
nent = MLX5_COMP_EQ_SIZE; nent = MLX5_COMP_EQ_SIZE;
#ifdef CONFIG_RFS_ACCEL
dev->rmap = alloc_irq_cpu_rmap(ncomp_vec);
if (!dev->rmap)
return -ENOMEM;
#endif
for (i = 0; i < ncomp_vec; i++) { for (i = 0; i < ncomp_vec; i++) {
eq = kzalloc(sizeof(*eq), GFP_KERNEL); eq = kzalloc(sizeof(*eq), GFP_KERNEL);
if (!eq) { if (!eq) {
...@@ -698,6 +712,10 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev) ...@@ -698,6 +712,10 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev)
goto clean; goto clean;
} }
#ifdef CONFIG_RFS_ACCEL
irq_cpu_rmap_add(dev->rmap,
dev->priv.msix_arr[i + MLX5_EQ_VEC_COMP_BASE].vector);
#endif
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i); snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
err = mlx5_create_map_eq(dev, eq, err = mlx5_create_map_eq(dev, eq,
i + MLX5_EQ_VEC_COMP_BASE, nent, 0, i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
......
...@@ -560,6 +560,9 @@ struct mlx5_core_dev { ...@@ -560,6 +560,9 @@ struct mlx5_core_dev {
struct mlx5_profile *profile; struct mlx5_profile *profile;
atomic_t num_qps; atomic_t num_qps;
u32 issi; u32 issi;
#ifdef CONFIG_RFS_ACCEL
struct cpu_rmap *rmap;
#endif
}; };
struct mlx5_db { struct mlx5_db {
......
...@@ -82,12 +82,14 @@ struct mlx5_flow_table * ...@@ -82,12 +82,14 @@ struct mlx5_flow_table *
mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
int prio, int prio,
int num_flow_table_entries, int num_flow_table_entries,
int max_num_groups); int max_num_groups,
u32 level);
struct mlx5_flow_table * struct mlx5_flow_table *
mlx5_create_flow_table(struct mlx5_flow_namespace *ns, mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
int prio, int prio,
int num_flow_table_entries); int num_flow_table_entries,
u32 level);
int mlx5_destroy_flow_table(struct mlx5_flow_table *ft); int mlx5_destroy_flow_table(struct mlx5_flow_table *ft);
/* inbox should be set with the following values: /* inbox should be set with the following values:
...@@ -113,4 +115,7 @@ mlx5_add_flow_rule(struct mlx5_flow_table *ft, ...@@ -113,4 +115,7 @@ mlx5_add_flow_rule(struct mlx5_flow_table *ft,
struct mlx5_flow_destination *dest); struct mlx5_flow_destination *dest);
void mlx5_del_flow_rule(struct mlx5_flow_rule *fr); void mlx5_del_flow_rule(struct mlx5_flow_rule *fr);
int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
struct mlx5_flow_destination *dest);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment