Commit 46c54f95 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2020-05-22' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2020-05-22

This series includes two updates and one cleanup patch

1) Tang Bim, clean-up with IS_ERR() usage

2) Vlad introduces a new mlx5 kconfig flag for TC support

   This is required due to the high volume of current and upcoming
   development in the eswitch and representors areas where some of the
   feature are TC based such as the downstream patches of MPLSoUDP and
   the following representor bonding support for VF live migration and
   uplink representor dynamic loading.
   For this Vlad kept TC specific code in tc.c and rep/tc.c and
   organized non TC code in representors specific files.

3) Eli Cohen adds support for MPLS over UPD encap and decap TC offloads.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 07a7f308 582234b4
...@@ -78,9 +78,24 @@ config MLX5_ESWITCH ...@@ -78,9 +78,24 @@ config MLX5_ESWITCH
Legacy SRIOV mode (L2 mac vlan steering based). Legacy SRIOV mode (L2 mac vlan steering based).
Switchdev mode (eswitch offloads). Switchdev mode (eswitch offloads).
config MLX5_CLS_ACT
bool "MLX5 TC classifier action support"
depends on MLX5_ESWITCH && NET_CLS_ACT
default y
help
mlx5 ConnectX offloads support for TC classifier action (NET_CLS_ACT),
works in both native NIC mdoe and Switchdev SRIOV mode.
Actions get attached to a Hardware offloaded classifiers and are
invoked after a successful classification. Actions are used to
overwrite the classification result, instantly drop or redirect and/or
reformat packets in wire speeds without involving the host cpu.
If set to N, TC offloads in both NIC and switchdev modes will be disabled.
If unsure, set to Y
config MLX5_TC_CT config MLX5_TC_CT
bool "MLX5 TC connection tracking offload support" bool "MLX5 TC connection tracking offload support"
depends on MLX5_CORE_EN && NET_SWITCHDEV && NF_FLOW_TABLE && NET_ACT_CT && NET_TC_SKB_EXT depends on MLX5_CLS_ACT && NF_FLOW_TABLE && NET_ACT_CT && NET_TC_SKB_EXT
default y default y
help help
Say Y here if you want to support offloading connection tracking rules Say Y here if you want to support offloading connection tracking rules
......
...@@ -33,17 +33,19 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ ...@@ -33,17 +33,19 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
mlx5_core-$(CONFIG_MLX5_EN_ARFS) += en_arfs.o mlx5_core-$(CONFIG_MLX5_EN_ARFS) += en_arfs.o
mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o lib/port_tun.o lag_mp.o \
lib/geneve.o en/mapping.o en/tc_tun_vxlan.o en/tc_tun_gre.o \
en/tc_tun_geneve.o diag/en_tc_tracepoint.o
mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o lib/geneve.o lib/port_tun.o lag_mp.o
mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \
en/mapping.o esw/chains.o en/tc_tun.o \
en/tc_tun_vxlan.o en/tc_tun_gre.o en/tc_tun_geneve.o \
en/tc_tun_mplsoudp.o diag/en_tc_tracepoint.o
mlx5_core-$(CONFIG_MLX5_TC_CT) += en/tc_ct.o mlx5_core-$(CONFIG_MLX5_TC_CT) += en/tc_ct.o
# #
# Core extra # Core extra
# #
mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \ mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \
ecpf.o rdma.o esw/chains.o ecpf.o rdma.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o
mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o
......
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2020 Mellanox Technologies. */
#ifndef __MLX5_EN_REP_NEIGH__
#define __MLX5_EN_REP_NEIGH__
#include "en.h"
#include "en_rep.h"
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv);
void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv);
struct mlx5e_neigh_hash_entry *
mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
struct mlx5e_neigh *m_neigh);
int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e,
struct mlx5e_neigh_hash_entry **nhe);
void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe);
void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv);
#else /* CONFIG_MLX5_CLS_ACT */
static inline int
mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv) { return 0; }
static inline void
mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv) {}
#endif /* CONFIG_MLX5_CLS_ACT */
#endif /* __MLX5_EN_REP_NEIGH__ */
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2020 Mellanox Technologies. */
#ifndef __MLX5_EN_REP_TC_H__
#define __MLX5_EN_REP_TC_H__
#include <linux/skbuff.h>
#include "en_tc.h"
#include "en_rep.h"
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
int mlx5e_rep_tc_init(struct mlx5e_rep_priv *rpriv);
void mlx5e_rep_tc_cleanup(struct mlx5e_rep_priv *rpriv);
int mlx5e_rep_tc_netdevice_event_register(struct mlx5e_rep_priv *rpriv);
void mlx5e_rep_tc_netdevice_event_unregister(struct mlx5e_rep_priv *rpriv);
void mlx5e_rep_tc_enable(struct mlx5e_priv *priv);
void mlx5e_rep_tc_disable(struct mlx5e_priv *priv);
int mlx5e_rep_tc_event_port_affinity(struct mlx5e_priv *priv);
void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e,
bool neigh_connected,
unsigned char ha[ETH_ALEN]);
int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e);
void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e);
int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data);
void mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv);
bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
struct sk_buff *skb,
struct mlx5e_tc_update_priv *tc_priv);
void mlx5_rep_tc_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv);
#else /* CONFIG_MLX5_CLS_ACT */
struct mlx5e_rep_priv;
static inline int
mlx5e_rep_tc_init(struct mlx5e_rep_priv *rpriv) { return 0; }
static inline void
mlx5e_rep_tc_cleanup(struct mlx5e_rep_priv *rpriv) {}
static inline int
mlx5e_rep_tc_netdevice_event_register(struct mlx5e_rep_priv *rpriv) { return 0; }
static inline void
mlx5e_rep_tc_netdevice_event_unregister(struct mlx5e_rep_priv *rpriv) {}
static inline void
mlx5e_rep_tc_enable(struct mlx5e_priv *priv) {}
static inline void
mlx5e_rep_tc_disable(struct mlx5e_priv *priv) {}
static inline int
mlx5e_rep_tc_event_port_affinity(struct mlx5e_priv *priv) { return NOTIFY_DONE; }
static inline int
mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data) { return -EOPNOTSUPP; }
static inline void
mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv) {}
struct mlx5e_tc_update_priv;
static inline bool
mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
struct sk_buff *skb,
struct mlx5e_tc_update_priv *tc_priv) { return true; }
static inline void
mlx5_rep_tc_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv) {}
#endif /* CONFIG_MLX5_CLS_ACT */
#endif /* __MLX5_EN_REP_TC_H__ */
...@@ -4,8 +4,11 @@ ...@@ -4,8 +4,11 @@
#include <net/vxlan.h> #include <net/vxlan.h>
#include <net/gre.h> #include <net/gre.h>
#include <net/geneve.h> #include <net/geneve.h>
#include <net/bareudp.h>
#include "en/tc_tun.h" #include "en/tc_tun.h"
#include "en_tc.h" #include "en_tc.h"
#include "rep/tc.h"
#include "rep/neigh.h"
struct mlx5e_tc_tunnel *mlx5e_get_tc_tun(struct net_device *tunnel_dev) struct mlx5e_tc_tunnel *mlx5e_get_tc_tun(struct net_device *tunnel_dev)
{ {
...@@ -16,6 +19,8 @@ struct mlx5e_tc_tunnel *mlx5e_get_tc_tun(struct net_device *tunnel_dev) ...@@ -16,6 +19,8 @@ struct mlx5e_tc_tunnel *mlx5e_get_tc_tun(struct net_device *tunnel_dev)
else if (netif_is_gretap(tunnel_dev) || else if (netif_is_gretap(tunnel_dev) ||
netif_is_ip6gretap(tunnel_dev)) netif_is_ip6gretap(tunnel_dev))
return &gre_tunnel; return &gre_tunnel;
else if (netif_is_bareudp(tunnel_dev))
return &mplsoudp_tunnel;
else else
return NULL; return NULL;
} }
...@@ -96,9 +101,8 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, ...@@ -96,9 +101,8 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
} }
rt = ip_route_output_key(dev_net(mirred_dev), fl4); rt = ip_route_output_key(dev_net(mirred_dev), fl4);
ret = PTR_ERR_OR_ZERO(rt); if (IS_ERR(rt))
if (ret) return PTR_ERR(rt);
return ret;
if (mlx5_lag_is_multipath(mdev) && rt->rt_gw_family != AF_INET) { if (mlx5_lag_is_multipath(mdev) && rt->rt_gw_family != AF_INET) {
ip_rt_put(rt); ip_rt_put(rt);
......
...@@ -16,6 +16,7 @@ enum { ...@@ -16,6 +16,7 @@ enum {
MLX5E_TC_TUNNEL_TYPE_VXLAN, MLX5E_TC_TUNNEL_TYPE_VXLAN,
MLX5E_TC_TUNNEL_TYPE_GENEVE, MLX5E_TC_TUNNEL_TYPE_GENEVE,
MLX5E_TC_TUNNEL_TYPE_GRETAP, MLX5E_TC_TUNNEL_TYPE_GRETAP,
MLX5E_TC_TUNNEL_TYPE_MPLSOUDP,
}; };
struct mlx5e_tc_tunnel { struct mlx5e_tc_tunnel {
...@@ -46,6 +47,7 @@ struct mlx5e_tc_tunnel { ...@@ -46,6 +47,7 @@ struct mlx5e_tc_tunnel {
extern struct mlx5e_tc_tunnel vxlan_tunnel; extern struct mlx5e_tc_tunnel vxlan_tunnel;
extern struct mlx5e_tc_tunnel geneve_tunnel; extern struct mlx5e_tc_tunnel geneve_tunnel;
extern struct mlx5e_tc_tunnel gre_tunnel; extern struct mlx5e_tc_tunnel gre_tunnel;
extern struct mlx5e_tc_tunnel mplsoudp_tunnel;
struct mlx5e_tc_tunnel *mlx5e_get_tc_tun(struct net_device *tunnel_dev); struct mlx5e_tc_tunnel *mlx5e_get_tc_tun(struct net_device *tunnel_dev);
......
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2018 Mellanox Technologies. */
#include <net/bareudp.h>
#include <net/mpls.h>
#include "en/tc_tun.h"
static bool can_offload(struct mlx5e_priv *priv)
{
return MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_l3_tunnel_to_l2);
}
static int calc_hlen(struct mlx5e_encap_entry *e)
{
return sizeof(struct udphdr) + MPLS_HLEN;
}
static int init_encap_attr(struct net_device *tunnel_dev,
struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e,
struct netlink_ext_ack *extack)
{
e->tunnel = &mplsoudp_tunnel;
e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
return 0;
}
static inline __be32 mpls_label_id_field(__be32 label, u8 tos, u8 ttl)
{
u32 res;
/* mpls label is 32 bits long and construction as follows:
* 20 bits label
* 3 bits tos
* 1 bit bottom of stack. Since we support only one label, this bit is
* always set.
* 8 bits TTL
*/
res = be32_to_cpu(label) << 12 | 1 << 8 | (tos & 7) << 9 | ttl;
return cpu_to_be32(res);
}
static int generate_ip_tun_hdr(char buf[],
__u8 *ip_proto,
struct mlx5e_encap_entry *r)
{
const struct ip_tunnel_key *tun_key = &r->tun_info->key;
__be32 tun_id = tunnel_id_to_key32(tun_key->tun_id);
struct udphdr *udp = (struct udphdr *)(buf);
struct mpls_shim_hdr *mpls;
mpls = (struct mpls_shim_hdr *)(udp + 1);
*ip_proto = IPPROTO_UDP;
udp->dest = tun_key->tp_dst;
mpls->label_stack_entry = mpls_label_id_field(tun_id, tun_key->tos, tun_key->ttl);
return 0;
}
static int parse_udp_ports(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct flow_cls_offload *f,
void *headers_c,
void *headers_v)
{
return mlx5e_tc_tun_parse_udp_ports(priv, spec, f, headers_c, headers_v);
}
static int parse_tunnel(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct flow_cls_offload *f,
void *headers_c,
void *headers_v)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_match_enc_keyid enc_keyid;
struct flow_match_mpls match;
void *misc2_c;
void *misc2_v;
misc2_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
misc_parameters_2);
misc2_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters_2);
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS))
return 0;
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID))
return 0;
flow_rule_match_enc_keyid(rule, &enc_keyid);
if (!enc_keyid.mask->keyid)
return 0;
if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
MLX5_FLEX_PROTO_CW_MPLS_UDP))
return -EOPNOTSUPP;
flow_rule_match_mpls(rule, &match);
MLX5_SET(fte_match_set_misc2, misc2_c,
outer_first_mpls_over_udp.mpls_label, match.mask->mpls_label);
MLX5_SET(fte_match_set_misc2, misc2_v,
outer_first_mpls_over_udp.mpls_label, match.key->mpls_label);
MLX5_SET(fte_match_set_misc2, misc2_c,
outer_first_mpls_over_udp.mpls_exp, match.mask->mpls_tc);
MLX5_SET(fte_match_set_misc2, misc2_v,
outer_first_mpls_over_udp.mpls_exp, match.key->mpls_tc);
MLX5_SET(fte_match_set_misc2, misc2_c,
outer_first_mpls_over_udp.mpls_s_bos, match.mask->mpls_bos);
MLX5_SET(fte_match_set_misc2, misc2_v,
outer_first_mpls_over_udp.mpls_s_bos, match.key->mpls_bos);
MLX5_SET(fte_match_set_misc2, misc2_c,
outer_first_mpls_over_udp.mpls_ttl, match.mask->mpls_ttl);
MLX5_SET(fte_match_set_misc2, misc2_v,
outer_first_mpls_over_udp.mpls_ttl, match.key->mpls_ttl);
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
return 0;
}
struct mlx5e_tc_tunnel mplsoudp_tunnel = {
.tunnel_type = MLX5E_TC_TUNNEL_TYPE_MPLSOUDP,
.match_level = MLX5_MATCH_L4,
.can_offload = can_offload,
.calc_hlen = calc_hlen,
.init_encap_attr = init_encap_attr,
.generate_ip_tun_hdr = generate_ip_tun_hdr,
.parse_udp_ports = parse_udp_ports,
.parse_tunnel = parse_tunnel,
};
...@@ -3520,41 +3520,6 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv, ...@@ -3520,41 +3520,6 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
return err; return err;
} }
#ifdef CONFIG_MLX5_ESWITCH
static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
struct flow_cls_offload *cls_flower,
unsigned long flags)
{
switch (cls_flower->command) {
case FLOW_CLS_REPLACE:
return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
flags);
case FLOW_CLS_DESTROY:
return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
flags);
case FLOW_CLS_STATS:
return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
flags);
default:
return -EOPNOTSUPP;
}
}
static int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
void *cb_priv)
{
unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(NIC_OFFLOAD);
struct mlx5e_priv *priv = cb_priv;
switch (type) {
case TC_SETUP_CLSFLOWER:
return mlx5e_setup_tc_cls_flower(priv, type_data, flags);
default:
return -EOPNOTSUPP;
}
}
#endif
static LIST_HEAD(mlx5e_block_cb_list); static LIST_HEAD(mlx5e_block_cb_list);
static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
...@@ -3563,7 +3528,6 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, ...@@ -3563,7 +3528,6 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
switch (type) { switch (type) {
#ifdef CONFIG_MLX5_ESWITCH
case TC_SETUP_BLOCK: { case TC_SETUP_BLOCK: {
struct flow_block_offload *f = type_data; struct flow_block_offload *f = type_data;
...@@ -3573,7 +3537,6 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, ...@@ -3573,7 +3537,6 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
mlx5e_setup_tc_block_cb, mlx5e_setup_tc_block_cb,
priv, priv, true); priv, priv, true);
} }
#endif
case TC_SETUP_QDISC_MQPRIO: case TC_SETUP_QDISC_MQPRIO:
return mlx5e_setup_tc_mqprio(priv, type_data); return mlx5e_setup_tc_mqprio(priv, type_data);
default: default:
...@@ -3746,7 +3709,7 @@ static int set_feature_cvlan_filter(struct net_device *netdev, bool enable) ...@@ -3746,7 +3709,7 @@ static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
return 0; return 0;
} }
#ifdef CONFIG_MLX5_ESWITCH #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
static int set_feature_tc_num_filters(struct net_device *netdev, bool enable) static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
{ {
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
...@@ -3857,7 +3820,7 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features) ...@@ -3857,7 +3820,7 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro); err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER, err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
set_feature_cvlan_filter); set_feature_cvlan_filter);
#ifdef CONFIG_MLX5_ESWITCH #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters); err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters);
#endif #endif
err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all); err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
......
...@@ -158,6 +158,22 @@ struct mlx5e_neigh_hash_entry { ...@@ -158,6 +158,22 @@ struct mlx5e_neigh_hash_entry {
enum { enum {
/* set when the encap entry is successfully offloaded into HW */ /* set when the encap entry is successfully offloaded into HW */
MLX5_ENCAP_ENTRY_VALID = BIT(0), MLX5_ENCAP_ENTRY_VALID = BIT(0),
MLX5_REFORMAT_DECAP = BIT(1),
};
struct mlx5e_decap_key {
struct ethhdr key;
};
struct mlx5e_decap_entry {
struct mlx5e_decap_key key;
struct list_head flows;
struct hlist_node hlist;
refcount_t refcnt;
struct completion res_ready;
int compl_result;
struct mlx5_pkt_reformat *pkt_reformat;
struct rcu_head rcu;
}; };
struct mlx5e_encap_entry { struct mlx5e_encap_entry {
...@@ -203,11 +219,6 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); ...@@ -203,11 +219,6 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq,
struct mlx5_cqe64 *cqe); struct mlx5_cqe64 *cqe);
int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e);
void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e);
void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv); void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv);
bool mlx5e_eswitch_rep(struct net_device *netdev); bool mlx5e_eswitch_rep(struct net_device *netdev);
......
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
#include "en_tc.h" #include "en_tc.h"
#include "eswitch.h" #include "eswitch.h"
#include "en_rep.h" #include "en_rep.h"
#include "en/rep/tc.h"
#include "ipoib/ipoib.h" #include "ipoib/ipoib.h"
#include "en_accel/ipsec_rxtx.h" #include "en_accel/ipsec_rxtx.h"
#include "en_accel/tls_rxtx.h" #include "en_accel/tls_rxtx.h"
...@@ -1237,12 +1238,12 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) ...@@ -1237,12 +1238,12 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
if (rep->vlan && skb_vlan_tag_present(skb)) if (rep->vlan && skb_vlan_tag_present(skb))
skb_vlan_pop(skb); skb_vlan_pop(skb);
if (!mlx5e_tc_rep_update_skb(cqe, skb, &tc_priv)) if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))
goto free_wqe; goto free_wqe;
napi_gro_receive(rq->cq.napi, skb); napi_gro_receive(rq->cq.napi, skb);
mlx5_tc_rep_post_napi_receive(&tc_priv); mlx5_rep_tc_post_napi_receive(&tc_priv);
free_wqe: free_wqe:
mlx5e_free_rx_wqe(rq, wi, true); mlx5e_free_rx_wqe(rq, wi, true);
...@@ -1293,12 +1294,12 @@ void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, ...@@ -1293,12 +1294,12 @@ void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq,
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
if (!mlx5e_tc_rep_update_skb(cqe, skb, &tc_priv)) if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))
goto mpwrq_cqe_out; goto mpwrq_cqe_out;
napi_gro_receive(rq->cq.napi, skb); napi_gro_receive(rq->cq.napi, skb);
mlx5_tc_rep_post_napi_receive(&tc_priv); mlx5_rep_tc_post_napi_receive(&tc_priv);
mpwrq_cqe_out: mpwrq_cqe_out:
if (likely(wi->consumed_strides < rq->mpwqe.num_strides)) if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
......
...@@ -34,11 +34,41 @@ ...@@ -34,11 +34,41 @@
#define __MLX5_EN_TC_H__ #define __MLX5_EN_TC_H__
#include <net/pkt_cls.h> #include <net/pkt_cls.h>
#include "en.h"
#define MLX5E_TC_FLOW_ID_MASK 0x0000ffff #define MLX5E_TC_FLOW_ID_MASK 0x0000ffff
#ifdef CONFIG_MLX5_ESWITCH #ifdef CONFIG_MLX5_ESWITCH
struct tunnel_match_key {
struct flow_dissector_key_control enc_control;
struct flow_dissector_key_keyid enc_key_id;
struct flow_dissector_key_ports enc_tp;
struct flow_dissector_key_ip enc_ip;
union {
struct flow_dissector_key_ipv4_addrs enc_ipv4;
struct flow_dissector_key_ipv6_addrs enc_ipv6;
};
int filter_ifindex;
};
struct tunnel_match_enc_opts {
struct flow_dissector_key_enc_opts key;
struct flow_dissector_key_enc_opts mask;
};
/* Tunnel_id mapping is TUNNEL_INFO_BITS + ENC_OPTS_BITS.
* Upper TUNNEL_INFO_BITS for general tunnel info.
* Lower ENC_OPTS_BITS bits for enc_opts.
*/
#define TUNNEL_INFO_BITS 6
#define TUNNEL_INFO_BITS_MASK GENMASK(TUNNEL_INFO_BITS - 1, 0)
#define ENC_OPTS_BITS 2
#define ENC_OPTS_BITS_MASK GENMASK(ENC_OPTS_BITS - 1, 0)
#define TUNNEL_ID_BITS (TUNNEL_INFO_BITS + ENC_OPTS_BITS)
#define TUNNEL_ID_MASK GENMASK(TUNNEL_ID_BITS - 1, 0)
enum { enum {
MLX5E_TC_FLAG_INGRESS_BIT, MLX5E_TC_FLAG_INGRESS_BIT,
MLX5E_TC_FLAG_EGRESS_BIT, MLX5E_TC_FLAG_EGRESS_BIT,
...@@ -50,9 +80,6 @@ enum { ...@@ -50,9 +80,6 @@ enum {
#define MLX5_TC_FLAG(flag) BIT(MLX5E_TC_FLAG_##flag##_BIT) #define MLX5_TC_FLAG(flag) BIT(MLX5E_TC_FLAG_##flag##_BIT)
int mlx5e_tc_nic_init(struct mlx5e_priv *priv);
void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv);
int mlx5e_tc_esw_init(struct rhashtable *tc_ht); int mlx5e_tc_esw_init(struct rhashtable *tc_ht);
void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht); void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht);
...@@ -119,11 +146,6 @@ struct mlx5e_tc_update_priv { ...@@ -119,11 +146,6 @@ struct mlx5e_tc_update_priv {
struct net_device *tun_dev; struct net_device *tun_dev;
}; };
bool mlx5e_tc_rep_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb,
struct mlx5e_tc_update_priv *tc_priv);
void mlx5_tc_rep_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv);
struct mlx5e_tc_mod_hdr_acts { struct mlx5e_tc_mod_hdr_acts {
int num_actions; int num_actions;
int max_actions; int max_actions;
...@@ -148,6 +170,22 @@ void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts); ...@@ -148,6 +170,22 @@ void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
struct mlx5e_tc_flow; struct mlx5e_tc_flow;
u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow); u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow);
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
int mlx5e_tc_nic_init(struct mlx5e_priv *priv);
void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv);
int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
void *cb_priv);
#else /* CONFIG_MLX5_CLS_ACT */
static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
static inline int
mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
{ return -EOPNOTSUPP; }
#endif /* CONFIG_MLX5_CLS_ACT */
#else /* CONFIG_MLX5_ESWITCH */ #else /* CONFIG_MLX5_ESWITCH */
static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; } static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {} static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
...@@ -156,6 +194,10 @@ static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv, ...@@ -156,6 +194,10 @@ static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv,
{ {
return 0; return 0;
} }
static inline int
mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
{ return -EOPNOTSUPP; }
#endif #endif
#endif /* __MLX5_EN_TC_H__ */ #endif /* __MLX5_EN_TC_H__ */
...@@ -6,6 +6,8 @@ ...@@ -6,6 +6,8 @@
#include "eswitch.h" #include "eswitch.h"
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
bool bool
mlx5_esw_chains_prios_supported(struct mlx5_eswitch *esw); mlx5_esw_chains_prios_supported(struct mlx5_eswitch *esw);
bool bool
...@@ -46,4 +48,21 @@ void mlx5_esw_chains_destroy(struct mlx5_eswitch *esw); ...@@ -46,4 +48,21 @@ void mlx5_esw_chains_destroy(struct mlx5_eswitch *esw);
int int
mlx5_eswitch_get_chain_for_tag(struct mlx5_eswitch *esw, u32 tag, u32 *chain); mlx5_eswitch_get_chain_for_tag(struct mlx5_eswitch *esw, u32 tag, u32 *chain);
#else /* CONFIG_MLX5_CLS_ACT */
static inline struct mlx5_flow_table *
mlx5_esw_chains_get_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
u32 level) { return ERR_PTR(-EOPNOTSUPP); }
static inline void
mlx5_esw_chains_put_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
u32 level) {}
static inline struct mlx5_flow_table *
mlx5_esw_chains_get_tc_end_ft(struct mlx5_eswitch *esw) { return ERR_PTR(-EOPNOTSUPP); }
static inline int mlx5_esw_chains_create(struct mlx5_eswitch *esw) { return 0; }
static inline void mlx5_esw_chains_destroy(struct mlx5_eswitch *esw) {}
#endif /* CONFIG_MLX5_CLS_ACT */
#endif /* __ML5_ESW_CHAINS_H__ */ #endif /* __ML5_ESW_CHAINS_H__ */
...@@ -2262,6 +2262,8 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) ...@@ -2262,6 +2262,8 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
hash_init(esw->offloads.encap_tbl); hash_init(esw->offloads.encap_tbl);
mutex_init(&esw->offloads.mod_hdr.lock); mutex_init(&esw->offloads.mod_hdr.lock);
hash_init(esw->offloads.mod_hdr.hlist); hash_init(esw->offloads.mod_hdr.hlist);
mutex_init(&esw->offloads.decap_tbl_lock);
hash_init(esw->offloads.decap_tbl);
atomic64_set(&esw->offloads.num_flows, 0); atomic64_set(&esw->offloads.num_flows, 0);
mutex_init(&esw->state_lock); mutex_init(&esw->state_lock);
mutex_init(&esw->mode_lock); mutex_init(&esw->mode_lock);
...@@ -2303,6 +2305,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) ...@@ -2303,6 +2305,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
mutex_destroy(&esw->state_lock); mutex_destroy(&esw->state_lock);
mutex_destroy(&esw->offloads.mod_hdr.lock); mutex_destroy(&esw->offloads.mod_hdr.lock);
mutex_destroy(&esw->offloads.encap_tbl_lock); mutex_destroy(&esw->offloads.encap_tbl_lock);
mutex_destroy(&esw->offloads.decap_tbl_lock);
kfree(esw->vports); kfree(esw->vports);
kfree(esw); kfree(esw);
} }
......
...@@ -209,6 +209,8 @@ struct mlx5_esw_offload { ...@@ -209,6 +209,8 @@ struct mlx5_esw_offload {
struct mutex peer_mutex; struct mutex peer_mutex;
struct mutex encap_tbl_lock; /* protects encap_tbl */ struct mutex encap_tbl_lock; /* protects encap_tbl */
DECLARE_HASHTABLE(encap_tbl, 8); DECLARE_HASHTABLE(encap_tbl, 8);
struct mutex decap_tbl_lock; /* protects decap_tbl */
DECLARE_HASHTABLE(decap_tbl, 8);
struct mod_hdr_tbl mod_hdr; struct mod_hdr_tbl mod_hdr;
DECLARE_HASHTABLE(termtbl_tbl, 8); DECLARE_HASHTABLE(termtbl_tbl, 8);
struct mutex termtbl_mutex; /* protects termtbl hash */ struct mutex termtbl_mutex; /* protects termtbl hash */
...@@ -432,6 +434,7 @@ struct mlx5_esw_flow_attr { ...@@ -432,6 +434,7 @@ struct mlx5_esw_flow_attr {
struct mlx5_flow_table *fdb; struct mlx5_flow_table *fdb;
struct mlx5_flow_table *dest_ft; struct mlx5_flow_table *dest_ft;
struct mlx5_ct_attr ct_attr; struct mlx5_ct_attr ct_attr;
struct mlx5_pkt_reformat *decap_pkt_reformat;
struct mlx5e_tc_flow_parse_attr *parse_attr; struct mlx5e_tc_flow_parse_attr *parse_attr;
}; };
......
...@@ -366,6 +366,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, ...@@ -366,6 +366,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
} }
} }
} }
if (attr->decap_pkt_reformat)
flow_act.pkt_reformat = attr->decap_pkt_reformat;
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
dest[i].counter_id = mlx5_fc_id(attr->counter); dest[i].counter_id = mlx5_fc_id(attr->counter);
...@@ -1727,7 +1731,9 @@ static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw, ...@@ -1727,7 +1731,9 @@ static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw) static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
{ {
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
mlx5e_tc_clean_fdb_peer_flows(esw); mlx5e_tc_clean_fdb_peer_flows(esw);
#endif
esw_del_fdb_peer_miss_rules(esw); esw_del_fdb_peer_miss_rules(esw);
} }
......
...@@ -144,11 +144,11 @@ static int mlx5_set_entropy(struct mlx5_tun_entropy *tun_entropy, ...@@ -144,11 +144,11 @@ static int mlx5_set_entropy(struct mlx5_tun_entropy *tun_entropy,
int mlx5_tun_entropy_refcount_inc(struct mlx5_tun_entropy *tun_entropy, int mlx5_tun_entropy_refcount_inc(struct mlx5_tun_entropy *tun_entropy,
int reformat_type) int reformat_type)
{ {
/* the default is error for unknown (non VXLAN/GRE tunnel types) */
int err = -EOPNOTSUPP; int err = -EOPNOTSUPP;
mutex_lock(&tun_entropy->lock); mutex_lock(&tun_entropy->lock);
if (reformat_type == MLX5_REFORMAT_TYPE_L2_TO_VXLAN && if ((reformat_type == MLX5_REFORMAT_TYPE_L2_TO_VXLAN ||
reformat_type == MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL) &&
tun_entropy->enabled) { tun_entropy->enabled) {
/* in case entropy calculation is enabled for all tunneling /* in case entropy calculation is enabled for all tunneling
* types, it is ok for VXLAN, so approve. * types, it is ok for VXLAN, so approve.
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <net/rtnetlink.h>
struct bareudp_conf { struct bareudp_conf {
__be16 ethertype; __be16 ethertype;
...@@ -17,4 +18,10 @@ struct net_device *bareudp_dev_create(struct net *net, const char *name, ...@@ -17,4 +18,10 @@ struct net_device *bareudp_dev_create(struct net *net, const char *name,
u8 name_assign_type, u8 name_assign_type,
struct bareudp_conf *info); struct bareudp_conf *info);
static inline bool netif_is_bareudp(const struct net_device *dev)
{
return dev->rtnl_link_ops &&
!strcmp(dev->rtnl_link_ops->kind, "bareudp");
}
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment