Commit 6ee44c51 authored by Gavin Li's avatar Gavin Li Committed by Jakub Kicinski

net/mlx5e: TC, Add support for VxLAN GBP encap/decap flows offload

Add HW offloading support for TC flows with VxLAN GBP encap/decap.

Example of encap rule:
tc filter add dev eth0 protocol ip ingress flower \
    action tunnel_key set id 42 vxlan_opts 512 \
    action mirred egress redirect dev vxlan1

Example of decap rule:
tc filter add dev vxlan1 protocol ip ingress flower \
    enc_key_id 42 enc_dst_port 4789 vxlan_opts 1024 \
    action tunnel_key unset action mirred egress redirect dev eth0
Signed-off-by: default avatarGavin Li <gavinl@nvidia.com>
Reviewed-by: default avatarGavi Teitz <gavi@nvidia.com>
Reviewed-by: default avatarRoi Dayan <roid@nvidia.com>
Reviewed-by: default avatarMaor Dickman <maord@nvidia.com>
Acked-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent bc9d003d
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2018 Mellanox Technologies. */
#include <net/ip_tunnels.h>
#include <net/vxlan.h>
#include "lib/vxlan.h"
#include "en/tc_tun.h"
......@@ -86,9 +87,11 @@ static int mlx5e_gen_ip_tunnel_header_vxlan(char buf[],
const struct ip_tunnel_key *tun_key = &e->tun_info->key;
__be32 tun_id = tunnel_id_to_key32(tun_key->tun_id);
struct udphdr *udp = (struct udphdr *)(buf);
const struct vxlan_metadata *md;
struct vxlanhdr *vxh;
if (tun_key->tun_flags & TUNNEL_VXLAN_OPT)
if ((tun_key->tun_flags & TUNNEL_VXLAN_OPT) &&
e->tun_info->options_len != sizeof(*md))
return -EOPNOTSUPP;
vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
*ip_proto = IPPROTO_UDP;
......@@ -96,6 +99,57 @@ static int mlx5e_gen_ip_tunnel_header_vxlan(char buf[],
udp->dest = tun_key->tp_dst;
vxh->vx_flags = VXLAN_HF_VNI;
vxh->vx_vni = vxlan_vni_field(tun_id);
if (tun_key->tun_flags & TUNNEL_VXLAN_OPT) {
md = ip_tunnel_info_opts(e->tun_info);
vxlan_build_gbp_hdr(vxh, md);
}
return 0;
}
static int mlx5e_tc_tun_parse_vxlan_gbp_option(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct flow_cls_offload *f)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct netlink_ext_ack *extack = f->common.extack;
struct flow_match_enc_opts enc_opts;
void *misc5_c, *misc5_v;
u32 *gbp, *gbp_mask;
flow_rule_match_enc_opts(rule, &enc_opts);
if (memchr_inv(&enc_opts.mask->data, 0, sizeof(enc_opts.mask->data)) &&
!MLX5_CAP_ESW_FT_FIELD_SUPPORT_2(priv->mdev, tunnel_header_0_1)) {
NL_SET_ERR_MSG_MOD(extack, "Matching on VxLAN GBP is not supported");
return -EOPNOTSUPP;
}
if (enc_opts.key->dst_opt_type != TUNNEL_VXLAN_OPT) {
NL_SET_ERR_MSG_MOD(extack, "Wrong VxLAN option type: not GBP");
return -EOPNOTSUPP;
}
if (enc_opts.key->len != sizeof(*gbp) ||
enc_opts.mask->len != sizeof(*gbp_mask)) {
NL_SET_ERR_MSG_MOD(extack, "VxLAN GBP option/mask len is not 32 bits");
return -EINVAL;
}
gbp = (u32 *)&enc_opts.key->data[0];
gbp_mask = (u32 *)&enc_opts.mask->data[0];
if (*gbp_mask & ~VXLAN_GBP_MASK) {
NL_SET_ERR_MSG_FMT_MOD(extack, "Wrong VxLAN GBP mask(0x%08X)\n", *gbp_mask);
return -EINVAL;
}
misc5_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_5);
misc5_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_5);
MLX5_SET(fte_match_set_misc5, misc5_c, tunnel_header_0, *gbp_mask);
MLX5_SET(fte_match_set_misc5, misc5_v, tunnel_header_0, *gbp);
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5;
return 0;
}
......@@ -122,6 +176,14 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
if (!enc_keyid.mask->keyid)
return 0;
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) {
int err;
err = mlx5e_tc_tun_parse_vxlan_gbp_option(priv, spec, f);
if (err)
return err;
}
/* match on VNI is required */
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
......@@ -143,6 +205,12 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
return 0;
}
static bool mlx5e_tc_tun_encap_info_equal_vxlan(struct mlx5e_encap_key *a,
struct mlx5e_encap_key *b)
{
return mlx5e_tc_tun_encap_info_equal_options(a, b, TUNNEL_VXLAN_OPT);
}
static int mlx5e_tc_tun_get_remote_ifindex(struct net_device *mirred_dev)
{
const struct vxlan_dev *vxlan = netdev_priv(mirred_dev);
......@@ -160,6 +228,6 @@ struct mlx5e_tc_tunnel vxlan_tunnel = {
.generate_ip_tun_hdr = mlx5e_gen_ip_tunnel_header_vxlan,
.parse_udp_ports = mlx5e_tc_tun_parse_udp_ports_vxlan,
.parse_tunnel = mlx5e_tc_tun_parse_vxlan,
.encap_info_equal = mlx5e_tc_tun_encap_info_equal_generic,
.encap_info_equal = mlx5e_tc_tun_encap_info_equal_vxlan,
.get_remote_ifindex = mlx5e_tc_tun_get_remote_ifindex,
};
......@@ -1357,6 +1357,12 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \
MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap)
#define MLX5_CAP_ESW_FT_FIELD_SUPPORT_2(mdev, cap) \
MLX5_CAP_ESW_FLOWTABLE(mdev, ft_field_support_2_esw_fdb.cap)
#define MLX5_CAP_ESW_FT_FIELD_SUPPORT_2_MAX(mdev, cap) \
MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, ft_field_support_2_esw_fdb.cap)
#define MLX5_CAP_ESW(mdev, cap) \
MLX5_GET(e_switch_cap, \
mdev->caps.hca[MLX5_CAP_ESWITCH]->cur, cap)
......
......@@ -404,10 +404,13 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
u8 metadata_reg_c_0[0x1];
};
/* Table 2170 - Flow Table Fields Supported 2 Format */
struct mlx5_ifc_flow_table_fields_supported_2_bits {
u8 reserved_at_0[0xe];
u8 bth_opcode[0x1];
u8 reserved_at_f[0x11];
u8 reserved_at_f[0x1];
u8 tunnel_header_0_1[0x1];
u8 reserved_at_11[0xf];
u8 reserved_at_20[0x60];
};
......@@ -895,7 +898,13 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits {
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_egress;
u8 reserved_at_800[0x1000];
u8 reserved_at_800[0xC00];
struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_esw_fdb;
struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_bitmask_support_2_esw_fdb;
u8 reserved_at_1500[0x300];
u8 sw_steering_fdb_action_drop_icm_address_rx[0x40];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment