Commit d386939a authored by Yevgeny Kliteynik's avatar Yevgeny Kliteynik Committed by Saeed Mahameed

net/mlx5e: Rearrange tc tunnel code in a modular way

Rearrange tc tunnel code so that it would be easy to add future tunnels:
 - Define tc tunnel object with the fields and callbacks that any
   tunnel must implement.
 - Define tc UDP tunnel object for UDP tunnels, such as VXLAN
 - Move each tunnel code (GRE, VXLAN) to its own separate file
 - Rewrite tc tunnel implementation in a general way - using only
   the objects and their callbacks.
Reviewed-by: default avatarOz Shlomo <ozsh@mellanox.com>
Signed-off-by: default avatarYevgeny Kliteynik <kliteyn@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent 1f6da306
......@@ -32,7 +32,7 @@ mlx5_core-$(CONFIG_MLX5_EN_ARFS) += en_arfs.o
mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o lib/port_tun.o lag_mp.o \
lib/geneve.o
lib/geneve.o en/tc_tun_vxlan.o en/tc_tun_gre.o
#
# Core extra
......
......@@ -14,9 +14,39 @@
enum {
MLX5E_TC_TUNNEL_TYPE_UNKNOWN,
MLX5E_TC_TUNNEL_TYPE_VXLAN,
MLX5E_TC_TUNNEL_TYPE_GRETAP
MLX5E_TC_TUNNEL_TYPE_GRETAP,
};
struct mlx5e_tc_tunnel {
int tunnel_type;
enum mlx5_flow_match_level match_level;
bool (*can_offload)(struct mlx5e_priv *priv);
int (*calc_hlen)(struct mlx5e_encap_entry *e);
int (*init_encap_attr)(struct net_device *tunnel_dev,
struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e,
struct netlink_ext_ack *extack);
int (*generate_ip_tun_hdr)(char buf[],
__u8 *ip_proto,
struct mlx5e_encap_entry *e);
int (*parse_udp_ports)(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct tc_cls_flower_offload *f,
void *headers_c,
void *headers_v);
int (*parse_tunnel)(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct tc_cls_flower_offload *f,
void *headers_c,
void *headers_v);
};
extern struct mlx5e_tc_tunnel vxlan_tunnel;
extern struct mlx5e_tc_tunnel gre_tunnel;
struct mlx5e_tc_tunnel *mlx5e_get_tc_tun(struct net_device *tunnel_dev);
int mlx5e_tc_tun_init_encap_attr(struct net_device *tunnel_dev,
struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e,
......@@ -30,7 +60,6 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
struct mlx5e_encap_entry *e);
int mlx5e_tc_tun_get_type(struct net_device *tunnel_dev);
bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv,
struct net_device *netdev);
......@@ -41,4 +70,10 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev,
void *headers_c,
void *headers_v, u8 *match_level);
int mlx5e_tc_tun_parse_udp_ports(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct tc_cls_flower_offload *f,
void *headers_c,
void *headers_v);
#endif //__MLX5_EN_TC_TUNNEL_H__
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2018 Mellanox Technologies. */
#include <net/gre.h>
#include "en/tc_tun.h"
static bool mlx5e_tc_tun_can_offload_gretap(struct mlx5e_priv *priv)
{
return !!MLX5_CAP_ESW(priv->mdev, nvgre_encap_decap);
}
static int mlx5e_tc_tun_calc_hlen_gretap(struct mlx5e_encap_entry *e)
{
return gre_calc_hlen(e->tun_info->key.tun_flags);
}
static int mlx5e_tc_tun_init_encap_attr_gretap(struct net_device *tunnel_dev,
struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e,
struct netlink_ext_ack *extack)
{
e->tunnel = &gre_tunnel;
e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_NVGRE;
return 0;
}
static int mlx5e_gen_ip_tunnel_header_gretap(char buf[],
__u8 *ip_proto,
struct mlx5e_encap_entry *e)
{
const struct ip_tunnel_key *tun_key = &e->tun_info->key;
struct gre_base_hdr *greh = (struct gre_base_hdr *)(buf);
__be32 tun_id = tunnel_id_to_key32(tun_key->tun_id);
int hdr_len;
*ip_proto = IPPROTO_GRE;
/* the HW does not calculate GRE csum or sequences */
if (tun_key->tun_flags & (TUNNEL_CSUM | TUNNEL_SEQ))
return -EOPNOTSUPP;
greh->protocol = htons(ETH_P_TEB);
/* GRE key */
hdr_len = mlx5e_tc_tun_calc_hlen_gretap(e);
greh->flags = gre_tnl_flags_to_gre_flags(tun_key->tun_flags);
if (tun_key->tun_flags & TUNNEL_KEY) {
__be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
*ptr = tun_id;
}
return 0;
}
static int mlx5e_tc_tun_parse_gretap(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct tc_cls_flower_offload *f,
void *headers_c,
void *headers_v)
{
void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
/* gre protocol */
MLX5_SET_TO_ONES(fte_match_set_misc, misc_c, gre_protocol);
MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, ETH_P_TEB);
/* gre key */
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
struct flow_match_enc_keyid enc_keyid;
flow_rule_match_enc_keyid(rule, &enc_keyid);
MLX5_SET(fte_match_set_misc, misc_c,
gre_key.key, be32_to_cpu(enc_keyid.mask->keyid));
MLX5_SET(fte_match_set_misc, misc_v,
gre_key.key, be32_to_cpu(enc_keyid.key->keyid));
}
return 0;
}
struct mlx5e_tc_tunnel gre_tunnel = {
.tunnel_type = MLX5E_TC_TUNNEL_TYPE_GRETAP,
.match_level = MLX5_MATCH_L3,
.can_offload = mlx5e_tc_tun_can_offload_gretap,
.calc_hlen = mlx5e_tc_tun_calc_hlen_gretap,
.init_encap_attr = mlx5e_tc_tun_init_encap_attr_gretap,
.generate_ip_tun_hdr = mlx5e_gen_ip_tunnel_header_gretap,
.parse_udp_ports = NULL,
.parse_tunnel = mlx5e_tc_tun_parse_gretap,
};
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2018 Mellanox Technologies. */
#include <net/vxlan.h>
#include "lib/vxlan.h"
#include "en/tc_tun.h"
static bool mlx5e_tc_tun_can_offload_vxlan(struct mlx5e_priv *priv)
{
return !!MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap);
}
static int mlx5e_tc_tun_calc_hlen_vxlan(struct mlx5e_encap_entry *e)
{
return VXLAN_HLEN;
}
static int mlx5e_tc_tun_check_udp_dport_vxlan(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f)
{
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
struct netlink_ext_ack *extack = f->common.extack;
struct flow_match_ports enc_ports;
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS))
return -EOPNOTSUPP;
flow_rule_match_enc_ports(rule, &enc_ports);
/* check the UDP destination port validity */
if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan,
be16_to_cpu(enc_ports.key->dst))) {
NL_SET_ERR_MSG_MOD(extack,
"Matched UDP dst port is not registered as a VXLAN port");
netdev_warn(priv->netdev,
"UDP port %d is not registered as a VXLAN port\n",
be16_to_cpu(enc_ports.key->dst));
return -EOPNOTSUPP;
}
return 0;
}
static int mlx5e_tc_tun_parse_udp_ports_vxlan(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct tc_cls_flower_offload *f,
void *headers_c,
void *headers_v)
{
int err = 0;
err = mlx5e_tc_tun_parse_udp_ports(priv, spec, f, headers_c, headers_v);
if (err)
return err;
return mlx5e_tc_tun_check_udp_dport_vxlan(priv, f);
}
static int mlx5e_tc_tun_init_encap_attr_vxlan(struct net_device *tunnel_dev,
struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e,
struct netlink_ext_ack *extack)
{
int dst_port = be16_to_cpu(e->tun_info->key.tp_dst);
e->tunnel = &vxlan_tunnel;
if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, dst_port)) {
NL_SET_ERR_MSG_MOD(extack,
"vxlan udp dport was not registered with the HW");
netdev_warn(priv->netdev,
"%d isn't an offloaded vxlan udp dport\n",
dst_port);
return -EOPNOTSUPP;
}
e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_VXLAN;
return 0;
}
static int mlx5e_gen_ip_tunnel_header_vxlan(char buf[],
__u8 *ip_proto,
struct mlx5e_encap_entry *e)
{
const struct ip_tunnel_key *tun_key = &e->tun_info->key;
__be32 tun_id = tunnel_id_to_key32(tun_key->tun_id);
struct udphdr *udp = (struct udphdr *)(buf);
struct vxlanhdr *vxh;
vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
*ip_proto = IPPROTO_UDP;
udp->dest = tun_key->tp_dst;
vxh->vx_flags = VXLAN_HF_VNI;
vxh->vx_vni = vxlan_vni_field(tun_id);
return 0;
}
static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct tc_cls_flower_offload *f,
void *headers_c,
void *headers_v)
{
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
struct netlink_ext_ack *extack = f->common.extack;
struct flow_match_enc_keyid enc_keyid;
void *misc_c, *misc_v;
misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID))
return 0;
flow_rule_match_enc_keyid(rule, &enc_keyid);
if (!enc_keyid.mask->keyid)
return 0;
/* match on VNI is required */
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
ft_field_support.outer_vxlan_vni)) {
NL_SET_ERR_MSG_MOD(extack,
"Matching on VXLAN VNI is not supported");
netdev_warn(priv->netdev,
"Matching on VXLAN VNI is not supported\n");
return -EOPNOTSUPP;
}
MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
be32_to_cpu(enc_keyid.mask->keyid));
MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
be32_to_cpu(enc_keyid.key->keyid));
return 0;
}
struct mlx5e_tc_tunnel vxlan_tunnel = {
.tunnel_type = MLX5E_TC_TUNNEL_TYPE_VXLAN,
.match_level = MLX5_MATCH_L4,
.can_offload = mlx5e_tc_tun_can_offload_vxlan,
.calc_hlen = mlx5e_tc_tun_calc_hlen_vxlan,
.init_encap_attr = mlx5e_tc_tun_init_encap_attr_vxlan,
.generate_ip_tun_hdr = mlx5e_gen_ip_tunnel_header_vxlan,
.parse_udp_ports = mlx5e_tc_tun_parse_udp_ports_vxlan,
.parse_tunnel = mlx5e_tc_tun_parse_vxlan,
};
......@@ -155,8 +155,7 @@ struct mlx5e_encap_entry {
struct net_device *out_dev;
struct net_device *route_dev;
int tunnel_type;
int tunnel_hlen;
struct mlx5e_tc_tunnel *tunnel;
int reformat_type;
u8 flags;
char *encap_header;
......
......@@ -2569,20 +2569,20 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
struct encap_key {
const struct ip_tunnel_key *ip_tun_key;
int tunnel_type;
struct mlx5e_tc_tunnel *tc_tunnel;
};
static inline int cmp_encap_info(struct encap_key *a,
struct encap_key *b)
{
return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
a->tunnel_type != b->tunnel_type;
a->tc_tunnel->tunnel_type != b->tc_tunnel->tunnel_type;
}
static inline int hash_encap_info(struct encap_key *key)
{
return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
key->tunnel_type);
key->tc_tunnel->tunnel_type);
}
......@@ -2624,14 +2624,14 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
tun_info = parse_attr->tun_info[out_index];
family = ip_tunnel_info_af(tun_info);
key.ip_tun_key = &tun_info->key;
key.tunnel_type = mlx5e_tc_tun_get_type(mirred_dev);
key.tc_tunnel = mlx5e_get_tc_tun(mirred_dev);
hash_key = hash_encap_info(&key);
hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
encap_hlist, hash_key) {
e_key.ip_tun_key = &e->tun_info->key;
e_key.tunnel_type = e->tunnel_type;
e_key.tc_tunnel = e->tunnel;
if (!cmp_encap_info(&e_key, &key)) {
found = true;
break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment