Commit 3a495844 authored by David S. Miller's avatar David S. Miller

Merge branch 'nfp-extend-flower-capabilities-for-GRE-tunnel-offload'

Jakub Kicinski says:

====================
nfp: extend flower capabilities for GRE tunnel offload

Pieter says:

This set extends the flower match and action components to offload
GRE decapsulation with classification and encapsulation actions. The
first 3 patches are refactor and cleanup patches for improving
readability and reusability. Patch 4 and 5 implement GRE decap and
encap functionality respectively.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 702999ea fccac580
......@@ -170,13 +170,36 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
return 0;
}
static bool
nfp_flower_tun_is_gre(struct tc_cls_flower_offload *flow, int start_idx)
{
struct flow_action_entry *act = flow->rule->action.entries;
int num_act = flow->rule->action.num_entries;
int act_idx;
/* Preparse action list for next mirred or redirect action */
for (act_idx = start_idx + 1; act_idx < num_act; act_idx++)
if (act[act_idx].id == FLOW_ACTION_REDIRECT ||
act[act_idx].id == FLOW_ACTION_MIRRED)
return netif_is_gretap(act[act_idx].dev);
return false;
}
static enum nfp_flower_tun_type
nfp_fl_get_tun_from_act_l4_port(struct nfp_app *app,
const struct flow_action_entry *act)
nfp_fl_get_tun_from_act(struct nfp_app *app,
struct tc_cls_flower_offload *flow,
const struct flow_action_entry *act, int act_idx)
{
const struct ip_tunnel_info *tun = act->tunnel;
struct nfp_flower_priv *priv = app->priv;
/* Determine the tunnel type based on the egress netdev
* in the mirred action for tunnels without l4.
*/
if (nfp_flower_tun_is_gre(flow, act_idx))
return NFP_FL_TUNNEL_GRE;
switch (tun->key.tp_dst) {
case htons(IANA_VXLAN_UDP_PORT):
return NFP_FL_TUNNEL_VXLAN;
......@@ -281,15 +304,13 @@ nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len,
}
static int
nfp_fl_set_ipv4_udp_tun(struct nfp_app *app,
struct nfp_fl_set_ipv4_udp_tun *set_tun,
const struct flow_action_entry *act,
struct nfp_fl_pre_tunnel *pre_tun,
enum nfp_flower_tun_type tun_type,
struct net_device *netdev,
struct netlink_ext_ack *extack)
nfp_fl_set_ipv4_tun(struct nfp_app *app, struct nfp_fl_set_ipv4_tun *set_tun,
const struct flow_action_entry *act,
struct nfp_fl_pre_tunnel *pre_tun,
enum nfp_flower_tun_type tun_type,
struct net_device *netdev, struct netlink_ext_ack *extack)
{
size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun);
size_t act_size = sizeof(struct nfp_fl_set_ipv4_tun);
const struct ip_tunnel_info *ip_tun = act->tunnel;
struct nfp_flower_priv *priv = app->priv;
u32 tmp_set_ip_tun_type_index = 0;
......@@ -843,9 +864,9 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
int *out_cnt, u32 *csum_updated,
struct nfp_flower_pedit_acts *set_act,
struct netlink_ext_ack *extack)
struct netlink_ext_ack *extack, int act_idx)
{
struct nfp_fl_set_ipv4_udp_tun *set_tun;
struct nfp_fl_set_ipv4_tun *set_tun;
struct nfp_fl_pre_tunnel *pre_tun;
struct nfp_fl_push_vlan *psh_v;
struct nfp_fl_pop_vlan *pop_v;
......@@ -898,7 +919,7 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
case FLOW_ACTION_TUNNEL_ENCAP: {
const struct ip_tunnel_info *ip_tun = act->tunnel;
*tun_type = nfp_fl_get_tun_from_act_l4_port(app, act);
*tun_type = nfp_fl_get_tun_from_act(app, flow, act, act_idx);
if (*tun_type == NFP_FL_TUNNEL_NONE) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported tunnel type in action list");
return -EOPNOTSUPP;
......@@ -914,7 +935,7 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
* If none, the packet falls back before applying other actions.
*/
if (*a_len + sizeof(struct nfp_fl_pre_tunnel) +
sizeof(struct nfp_fl_set_ipv4_udp_tun) > NFP_FL_MAX_A_SIZ) {
sizeof(struct nfp_fl_set_ipv4_tun) > NFP_FL_MAX_A_SIZ) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at tunnel encap");
return -EOPNOTSUPP;
}
......@@ -928,11 +949,11 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
return err;
set_tun = (void *)&nfp_fl->action_data[*a_len];
err = nfp_fl_set_ipv4_udp_tun(app, set_tun, act, pre_tun,
*tun_type, netdev, extack);
err = nfp_fl_set_ipv4_tun(app, set_tun, act, pre_tun,
*tun_type, netdev, extack);
if (err)
return err;
*a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun);
*a_len += sizeof(struct nfp_fl_set_ipv4_tun);
}
break;
case FLOW_ACTION_TUNNEL_DECAP:
......@@ -1024,8 +1045,8 @@ int nfp_flower_compile_action(struct nfp_app *app,
memset(&set_act, 0, sizeof(set_act));
err = nfp_flower_loop_action(app, act, flow, nfp_flow, &act_len,
netdev, &tun_type, &tun_out_cnt,
&out_cnt, &csum_updated, &set_act,
extack);
&out_cnt, &csum_updated,
&set_act, extack, i);
if (err)
return err;
act_cnt++;
......
......@@ -8,6 +8,7 @@
#include <linux/skbuff.h>
#include <linux/types.h>
#include <net/geneve.h>
#include <net/gre.h>
#include <net/vxlan.h>
#include "../nfp_app.h"
......@@ -22,6 +23,7 @@
#define NFP_FLOWER_LAYER_CT BIT(6)
#define NFP_FLOWER_LAYER_VXLAN BIT(7)
#define NFP_FLOWER_LAYER2_GRE BIT(0)
#define NFP_FLOWER_LAYER2_GENEVE BIT(5)
#define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6)
......@@ -37,6 +39,9 @@
#define NFP_FL_IP_FRAG_FIRST BIT(7)
#define NFP_FL_IP_FRAGMENTED BIT(6)
/* GRE Tunnel flags */
#define NFP_FL_GRE_FLAG_KEY BIT(2)
/* Compressed HW representation of TCP Flags */
#define NFP_FL_TCP_FLAG_URG BIT(4)
#define NFP_FL_TCP_FLAG_PSH BIT(3)
......@@ -107,6 +112,7 @@
enum nfp_flower_tun_type {
NFP_FL_TUNNEL_NONE = 0,
NFP_FL_TUNNEL_GRE = 1,
NFP_FL_TUNNEL_VXLAN = 2,
NFP_FL_TUNNEL_GENEVE = 4,
};
......@@ -203,7 +209,7 @@ struct nfp_fl_pre_tunnel {
__be32 extra[3];
};
struct nfp_fl_set_ipv4_udp_tun {
struct nfp_fl_set_ipv4_tun {
struct nfp_fl_act_head head;
__be16 reserved;
__be64 tun_id __packed;
......@@ -354,6 +360,16 @@ struct nfp_flower_ipv6 {
struct in6_addr ipv6_dst;
};
struct nfp_flower_tun_ipv4 {
__be32 src;
__be32 dst;
};
struct nfp_flower_tun_ip_ext {
u8 tos;
u8 ttl;
};
/* Flow Frame IPv4 UDP TUNNEL --> Tunnel details (4W/16B)
* -----------------------------------------------------------------
* 3 2 1
......@@ -371,15 +387,42 @@ struct nfp_flower_ipv6 {
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
struct nfp_flower_ipv4_udp_tun {
__be32 ip_src;
__be32 ip_dst;
struct nfp_flower_tun_ipv4 ipv4;
__be16 reserved1;
u8 tos;
u8 ttl;
struct nfp_flower_tun_ip_ext ip_ext;
__be32 reserved2;
__be32 tun_id;
};
/* Flow Frame GRE TUNNEL --> Tunnel details (6W/24B)
* -----------------------------------------------------------------
* 3 2 1
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | ipv4_addr_src |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | ipv4_addr_dst |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | tun_flags | tos | ttl |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Reserved | Ethertype |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Key |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Reserved |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
struct nfp_flower_ipv4_gre_tun {
struct nfp_flower_tun_ipv4 ipv4;
__be16 tun_flags;
struct nfp_flower_tun_ip_ext ip_ext;
__be16 reserved1;
__be16 ethertype;
__be32 tun_key;
__be32 reserved2;
};
struct nfp_flower_geneve_options {
u8 data[NFP_FL_MAX_GENEVE_OPT_KEY];
};
......@@ -530,6 +573,8 @@ nfp_fl_netdev_is_tunnel_type(struct net_device *netdev,
{
if (netif_is_vxlan(netdev))
return tun_type == NFP_FL_TUNNEL_VXLAN;
if (netif_is_gretap(netdev))
return tun_type == NFP_FL_TUNNEL_GRE;
if (netif_is_geneve(netdev))
return tun_type == NFP_FL_TUNNEL_GENEVE;
......@@ -546,6 +591,8 @@ static inline bool nfp_fl_is_netdev_to_offload(struct net_device *netdev)
return true;
if (netif_is_geneve(netdev))
return true;
if (netif_is_gretap(netdev))
return true;
return false;
}
......
......@@ -280,6 +280,71 @@ nfp_flower_compile_geneve_opt(void *ext, void *msk,
return 0;
}
static void
nfp_flower_compile_tun_ipv4_addrs(struct nfp_flower_tun_ipv4 *ext,
struct nfp_flower_tun_ipv4 *msk,
struct tc_cls_flower_offload *flow)
{
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
struct flow_match_ipv4_addrs match;
flow_rule_match_enc_ipv4_addrs(rule, &match);
ext->src = match.key->src;
ext->dst = match.key->dst;
msk->src = match.mask->src;
msk->dst = match.mask->dst;
}
}
static void
nfp_flower_compile_tun_ip_ext(struct nfp_flower_tun_ip_ext *ext,
struct nfp_flower_tun_ip_ext *msk,
struct tc_cls_flower_offload *flow)
{
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
struct flow_match_ip match;
flow_rule_match_enc_ip(rule, &match);
ext->tos = match.key->tos;
ext->ttl = match.key->ttl;
msk->tos = match.mask->tos;
msk->ttl = match.mask->ttl;
}
}
static void
nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext,
struct nfp_flower_ipv4_gre_tun *msk,
struct tc_cls_flower_offload *flow)
{
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
memset(ext, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
memset(msk, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
/* NVGRE is the only supported GRE tunnel type */
ext->ethertype = cpu_to_be16(ETH_P_TEB);
msk->ethertype = cpu_to_be16(~0);
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
struct flow_match_enc_keyid match;
flow_rule_match_enc_keyid(rule, &match);
ext->tun_key = match.key->keyid;
msk->tun_key = match.mask->keyid;
ext->tun_flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
msk->tun_flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
}
nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, flow);
nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, flow);
}
static void
nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext,
struct nfp_flower_ipv4_udp_tun *msk,
......@@ -301,25 +366,8 @@ nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext,
msk->tun_id = cpu_to_be32(temp_vni);
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
struct flow_match_ipv4_addrs match;
flow_rule_match_enc_ipv4_addrs(rule, &match);
ext->ip_src = match.key->src;
ext->ip_dst = match.key->dst;
msk->ip_src = match.mask->src;
msk->ip_dst = match.mask->dst;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
struct flow_match_ip match;
flow_rule_match_enc_ip(rule, &match);
ext->tos = match.key->tos;
ext->ttl = match.key->ttl;
msk->tos = match.mask->tos;
msk->ttl = match.mask->ttl;
}
nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, flow);
nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, flow);
}
int nfp_flower_compile_flow_match(struct nfp_app *app,
......@@ -406,12 +454,27 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
msk += sizeof(struct nfp_flower_ipv6);
}
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GRE) {
__be32 tun_dst;
nfp_flower_compile_ipv4_gre_tun((void *)ext, (void *)msk, flow);
tun_dst = ((struct nfp_flower_ipv4_gre_tun *)ext)->ipv4.dst;
ext += sizeof(struct nfp_flower_ipv4_gre_tun);
msk += sizeof(struct nfp_flower_ipv4_gre_tun);
/* Store the tunnel destination in the rule data.
* This must be present and be an exact match.
*/
nfp_flow->nfp_tun_ipv4_addr = tun_dst;
nfp_tunnel_add_ipv4_off(app, tun_dst);
}
if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
__be32 tun_dst;
nfp_flower_compile_ipv4_udp_tun((void *)ext, (void *)msk, flow);
tun_dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ip_dst;
tun_dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ipv4.dst;
ext += sizeof(struct nfp_flower_ipv4_udp_tun);
msk += sizeof(struct nfp_flower_ipv4_udp_tun);
......
......@@ -52,8 +52,7 @@
#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \
(BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS))
#define NFP_FLOWER_MERGE_FIELDS \
(NFP_FLOWER_LAYER_PORT | \
......@@ -141,16 +140,16 @@ static bool nfp_flower_check_higher_than_l3(struct tc_cls_flower_offload *f)
}
static int
nfp_flower_calc_opt_layer(struct flow_match_enc_opts *enc_opts,
nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts,
u32 *key_layer_two, int *key_size,
struct netlink_ext_ack *extack)
{
if (enc_opts->key->len > NFP_FL_MAX_GENEVE_OPT_KEY) {
if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: geneve options exceed maximum length");
return -EOPNOTSUPP;
}
if (enc_opts->key->len > 0) {
if (enc_opts->len > 0) {
*key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP;
*key_size += sizeof(struct nfp_flower_geneve_options);
}
......@@ -158,6 +157,57 @@ nfp_flower_calc_opt_layer(struct flow_match_enc_opts *enc_opts,
return 0;
}
static int
nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports,
struct flow_dissector_key_enc_opts *enc_op,
u32 *key_layer_two, u8 *key_layer, int *key_size,
struct nfp_flower_priv *priv,
enum nfp_flower_tun_type *tun_type,
struct netlink_ext_ack *extack)
{
int err;
switch (enc_ports->dst) {
case htons(IANA_VXLAN_UDP_PORT):
*tun_type = NFP_FL_TUNNEL_VXLAN;
*key_layer |= NFP_FLOWER_LAYER_VXLAN;
*key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
if (enc_op) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on vxlan tunnels");
return -EOPNOTSUPP;
}
break;
case htons(GENEVE_UDP_PORT):
if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve offload");
return -EOPNOTSUPP;
}
*tun_type = NFP_FL_TUNNEL_GENEVE;
*key_layer |= NFP_FLOWER_LAYER_EXT_META;
*key_size += sizeof(struct nfp_flower_ext_meta);
*key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
*key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
if (!enc_op)
break;
if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve option offload");
return -EOPNOTSUPP;
}
err = nfp_flower_calc_opt_layer(enc_op, key_layer_two,
key_size, extack);
if (err)
return err;
break;
default:
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel type unknown");
return -EOPNOTSUPP;
}
return 0;
}
static int
nfp_flower_calculate_key_layers(struct nfp_app *app,
struct net_device *netdev,
......@@ -234,58 +284,51 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
return -EOPNOTSUPP;
}
flow_rule_match_enc_ports(rule, &enc_ports);
if (enc_ports.mask->dst != cpu_to_be16(~0)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match L4 destination port is supported");
return -EOPNOTSUPP;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS))
flow_rule_match_enc_opts(rule, &enc_op);
switch (enc_ports.key->dst) {
case htons(IANA_VXLAN_UDP_PORT):
*tun_type = NFP_FL_TUNNEL_VXLAN;
key_layer |= NFP_FLOWER_LAYER_VXLAN;
key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
if (enc_op.key) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on vxlan tunnels");
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
/* check if GRE, which has no enc_ports */
if (netif_is_gretap(netdev)) {
*tun_type = NFP_FL_TUNNEL_GRE;
key_layer |= NFP_FLOWER_LAYER_EXT_META;
key_size += sizeof(struct nfp_flower_ext_meta);
key_layer_two |= NFP_FLOWER_LAYER2_GRE;
key_size +=
sizeof(struct nfp_flower_ipv4_gre_tun);
if (enc_op.key) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on GRE tunnels");
return -EOPNOTSUPP;
}
} else {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels");
return -EOPNOTSUPP;
}
break;
case htons(GENEVE_UDP_PORT):
if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve offload");
} else {
flow_rule_match_enc_ports(rule, &enc_ports);
if (enc_ports.mask->dst != cpu_to_be16(~0)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match L4 destination port is supported");
return -EOPNOTSUPP;
}
*tun_type = NFP_FL_TUNNEL_GENEVE;
key_layer |= NFP_FLOWER_LAYER_EXT_META;
key_size += sizeof(struct nfp_flower_ext_meta);
key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
if (!enc_op.key)
break;
if (!(priv->flower_ext_feats &
NFP_FL_FEATS_GENEVE_OPT)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve option offload");
return -EOPNOTSUPP;
}
err = nfp_flower_calc_opt_layer(&enc_op, &key_layer_two,
&key_size, extack);
err = nfp_flower_calc_udp_tun_layer(enc_ports.key,
enc_op.key,
&key_layer_two,
&key_layer,
&key_size, priv,
tun_type, extack);
if (err)
return err;
break;
default:
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel type unknown");
return -EOPNOTSUPP;
}
/* Ensure the ingress netdev matches the expected tun type. */
if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ingress netdev does not match the expected tunnel type");
return -EOPNOTSUPP;
/* Ensure the ingress netdev matches the expected
* tun type.
*/
if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ingress netdev does not match the expected tunnel type");
return -EOPNOTSUPP;
}
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment