Commit 8928e19a authored by David S. Miller's avatar David S. Miller

Merge branch 'flow-mpls'

Guillaume Nault says:

====================
flow_dissector, cls_flower: Add support for multiple MPLS Label Stack Entries

Currently, the flow dissector and the Flower classifier can only handle
the first entry of an MPLS label stack. This patch series generalises
the code to allow parsing and matching the Label Stack Entries that
follow.

Patch 1 extends the flow dissector to parse MPLS LSEs until the Bottom
Of Stack bit is reached. The number of parsed LSEs is capped at
FLOW_DIS_MPLS_MAX (arbitrarily set to 7). Flower and the NFP driver
are updated to take into account the new layout of struct
flow_dissector_key_mpls.

Patch 2 extends Flower. It defines new netlink attributes, which are
independent from the previous MPLS ones. Mixing the old and the new
attributes in a same filter is not allowed. For backward compatibility,
the old attributes are used when dumping filters that don't require the
new ones.

Changes since v2:
  * Fix compilation with the new MLX5 bareudp tunnel code.

Changes since v1:
  * Fix compilation of NFP driver (kbuild test robot).
  * Fix sparse warning with entropy label (kbuild test robot).
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents fb8ddaa9 61aec25a
......@@ -101,25 +101,36 @@ static int parse_tunnel(struct mlx5e_priv *priv,
flow_rule_match_mpls(rule, &match);
/* Only support matching the first LSE */
if (match.mask->used_lses != 1)
return -EOPNOTSUPP;
MLX5_SET(fte_match_set_misc2, misc2_c,
outer_first_mpls_over_udp.mpls_label, match.mask->mpls_label);
outer_first_mpls_over_udp.mpls_label,
match.mask->ls[0].mpls_label);
MLX5_SET(fte_match_set_misc2, misc2_v,
outer_first_mpls_over_udp.mpls_label, match.key->mpls_label);
outer_first_mpls_over_udp.mpls_label,
match.key->ls[0].mpls_label);
MLX5_SET(fte_match_set_misc2, misc2_c,
outer_first_mpls_over_udp.mpls_exp, match.mask->mpls_tc);
outer_first_mpls_over_udp.mpls_exp,
match.mask->ls[0].mpls_tc);
MLX5_SET(fte_match_set_misc2, misc2_v,
outer_first_mpls_over_udp.mpls_exp, match.key->mpls_tc);
outer_first_mpls_over_udp.mpls_exp, match.key->ls[0].mpls_tc);
MLX5_SET(fte_match_set_misc2, misc2_c,
outer_first_mpls_over_udp.mpls_s_bos, match.mask->mpls_bos);
outer_first_mpls_over_udp.mpls_s_bos,
match.mask->ls[0].mpls_bos);
MLX5_SET(fte_match_set_misc2, misc2_v,
outer_first_mpls_over_udp.mpls_s_bos, match.key->mpls_bos);
outer_first_mpls_over_udp.mpls_s_bos,
match.key->ls[0].mpls_bos);
MLX5_SET(fte_match_set_misc2, misc2_c,
outer_first_mpls_over_udp.mpls_ttl, match.mask->mpls_ttl);
outer_first_mpls_over_udp.mpls_ttl,
match.mask->ls[0].mpls_ttl);
MLX5_SET(fte_match_set_misc2, misc2_v,
outer_first_mpls_over_udp.mpls_ttl, match.key->mpls_ttl);
outer_first_mpls_over_udp.mpls_ttl,
match.key->ls[0].mpls_ttl);
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
return 0;
......
......@@ -74,9 +74,10 @@ nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
return 0;
}
static void
static int
nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
struct nfp_flower_mac_mpls *msk, struct flow_rule *rule)
struct nfp_flower_mac_mpls *msk, struct flow_rule *rule,
struct netlink_ext_ack *extack)
{
memset(ext, 0, sizeof(struct nfp_flower_mac_mpls));
memset(msk, 0, sizeof(struct nfp_flower_mac_mpls));
......@@ -97,14 +98,28 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
u32 t_mpls;
flow_rule_match_mpls(rule, &match);
t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, match.key->mpls_label) |
FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, match.key->mpls_tc) |
FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, match.key->mpls_bos) |
/* Only support matching the first LSE */
if (match.mask->used_lses != 1) {
NL_SET_ERR_MSG_MOD(extack,
"unsupported offload: invalid LSE depth for MPLS match offload");
return -EOPNOTSUPP;
}
t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB,
match.key->ls[0].mpls_label) |
FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC,
match.key->ls[0].mpls_tc) |
FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS,
match.key->ls[0].mpls_bos) |
NFP_FLOWER_MASK_MPLS_Q;
ext->mpls_lse = cpu_to_be32(t_mpls);
t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, match.mask->mpls_label) |
FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, match.mask->mpls_tc) |
FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, match.mask->mpls_bos) |
t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB,
match.mask->ls[0].mpls_label) |
FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC,
match.mask->ls[0].mpls_tc) |
FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS,
match.mask->ls[0].mpls_bos) |
NFP_FLOWER_MASK_MPLS_Q;
msk->mpls_lse = cpu_to_be32(t_mpls);
} else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
......@@ -121,6 +136,8 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
msk->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
}
}
return 0;
}
static void
......@@ -461,9 +478,12 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
msk += sizeof(struct nfp_flower_in_port);
if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) {
nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext,
(struct nfp_flower_mac_mpls *)msk,
rule);
err = nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext,
(struct nfp_flower_mac_mpls *)msk,
rule, extack);
if (err)
return err;
ext += sizeof(struct nfp_flower_mac_mpls);
msk += sizeof(struct nfp_flower_mac_mpls);
}
......
......@@ -59,13 +59,25 @@ struct flow_dissector_key_vlan {
__be16 vlan_tpid;
};
struct flow_dissector_key_mpls {
struct flow_dissector_mpls_lse {
u32 mpls_ttl:8,
mpls_bos:1,
mpls_tc:3,
mpls_label:20;
};
#define FLOW_DIS_MPLS_MAX 7
struct flow_dissector_key_mpls {
struct flow_dissector_mpls_lse ls[FLOW_DIS_MPLS_MAX]; /* Label Stack */
u8 used_lses; /* One bit set for each Label Stack Entry in use */
};
static inline void dissector_set_mpls_lse(struct flow_dissector_key_mpls *mpls,
int lse_index)
{
mpls->used_lses |= 1 << lse_index;
}
#define FLOW_DIS_TUN_OPTS_MAX 255
/**
* struct flow_dissector_key_enc_opts:
......
......@@ -576,6 +576,8 @@ enum {
TCA_FLOWER_KEY_CT_LABELS, /* u128 */
TCA_FLOWER_KEY_CT_LABELS_MASK, /* u128 */
TCA_FLOWER_KEY_MPLS_OPTS,
__TCA_FLOWER_MAX,
};
......@@ -640,6 +642,27 @@ enum {
#define TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX \
(__TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX - 1)
enum {
TCA_FLOWER_KEY_MPLS_OPTS_UNSPEC,
TCA_FLOWER_KEY_MPLS_OPTS_LSE,
__TCA_FLOWER_KEY_MPLS_OPTS_MAX,
};
#define TCA_FLOWER_KEY_MPLS_OPTS_MAX (__TCA_FLOWER_KEY_MPLS_OPTS_MAX - 1)
enum {
TCA_FLOWER_KEY_MPLS_OPT_LSE_UNSPEC,
TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH,
TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL,
TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS,
TCA_FLOWER_KEY_MPLS_OPT_LSE_TC,
TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL,
__TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX,
};
#define TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX \
(__TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX - 1)
enum {
TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT = (1 << 0),
TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST = (1 << 1),
......
......@@ -480,47 +480,59 @@ EXPORT_SYMBOL(skb_flow_dissect_tunnel_info);
static enum flow_dissect_ret
__skb_flow_dissect_mpls(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container, void *data, int nhoff, int hlen)
void *target_container, void *data, int nhoff, int hlen,
int lse_index, bool *entropy_label)
{
struct flow_dissector_key_keyid *key_keyid;
struct mpls_label *hdr, _hdr[2];
u32 entry, label;
struct mpls_label *hdr, _hdr;
u32 entry, label, bos;
if (!dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_MPLS_ENTROPY) &&
!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_MPLS))
return FLOW_DISSECT_RET_OUT_GOOD;
if (lse_index >= FLOW_DIS_MPLS_MAX)
return FLOW_DISSECT_RET_OUT_GOOD;
hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data,
hlen, &_hdr);
if (!hdr)
return FLOW_DISSECT_RET_OUT_BAD;
entry = ntohl(hdr[0].entry);
entry = ntohl(hdr->entry);
label = (entry & MPLS_LS_LABEL_MASK) >> MPLS_LS_LABEL_SHIFT;
bos = (entry & MPLS_LS_S_MASK) >> MPLS_LS_S_SHIFT;
if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_MPLS)) {
struct flow_dissector_key_mpls *key_mpls;
struct flow_dissector_mpls_lse *lse;
key_mpls = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_MPLS,
target_container);
key_mpls->mpls_label = label;
key_mpls->mpls_ttl = (entry & MPLS_LS_TTL_MASK)
>> MPLS_LS_TTL_SHIFT;
key_mpls->mpls_tc = (entry & MPLS_LS_TC_MASK)
>> MPLS_LS_TC_SHIFT;
key_mpls->mpls_bos = (entry & MPLS_LS_S_MASK)
>> MPLS_LS_S_SHIFT;
lse = &key_mpls->ls[lse_index];
lse->mpls_ttl = (entry & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT;
lse->mpls_bos = bos;
lse->mpls_tc = (entry & MPLS_LS_TC_MASK) >> MPLS_LS_TC_SHIFT;
lse->mpls_label = label;
dissector_set_mpls_lse(key_mpls, lse_index);
}
if (label == MPLS_LABEL_ENTROPY) {
if (*entropy_label &&
dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_MPLS_ENTROPY)) {
struct flow_dissector_key_keyid *key_keyid;
key_keyid = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_MPLS_ENTROPY,
target_container);
key_keyid->keyid = hdr[1].entry & htonl(MPLS_LS_LABEL_MASK);
key_keyid->keyid = cpu_to_be32(label);
}
return FLOW_DISSECT_RET_OUT_GOOD;
*entropy_label = label == MPLS_LABEL_ENTROPY;
return bos ? FLOW_DISSECT_RET_OUT_GOOD : FLOW_DISSECT_RET_PROTO_AGAIN;
}
static enum flow_dissect_ret
......@@ -979,6 +991,8 @@ bool __skb_flow_dissect(const struct net *net,
struct bpf_prog *attached = NULL;
enum flow_dissect_ret fdret;
enum flow_dissector_key_id dissector_vlan = FLOW_DISSECTOR_KEY_MAX;
bool mpls_el = false;
int mpls_lse = 0;
int num_hdrs = 0;
u8 ip_proto = 0;
bool ret;
......@@ -1278,7 +1292,10 @@ bool __skb_flow_dissect(const struct net *net,
case htons(ETH_P_MPLS_MC):
fdret = __skb_flow_dissect_mpls(skb, flow_dissector,
target_container, data,
nhoff, hlen);
nhoff, hlen, mpls_lse,
&mpls_el);
nhoff += sizeof(struct mpls_label);
mpls_lse++;
break;
case htons(ETH_P_FCOE):
if ((hlen - nhoff) < FCOE_HEADER_LEN) {
......
......@@ -668,6 +668,7 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
[TCA_FLOWER_KEY_MPLS_BOS] = { .type = NLA_U8 },
[TCA_FLOWER_KEY_MPLS_TC] = { .type = NLA_U8 },
[TCA_FLOWER_KEY_MPLS_LABEL] = { .type = NLA_U32 },
[TCA_FLOWER_KEY_MPLS_OPTS] = { .type = NLA_NESTED },
[TCA_FLOWER_KEY_TCP_FLAGS] = { .type = NLA_U16 },
[TCA_FLOWER_KEY_TCP_FLAGS_MASK] = { .type = NLA_U16 },
[TCA_FLOWER_KEY_IP_TOS] = { .type = NLA_U8 },
......@@ -726,6 +727,20 @@ erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID] = { .type = NLA_U8 },
};
static const struct nla_policy
mpls_opts_policy[TCA_FLOWER_KEY_MPLS_OPTS_MAX + 1] = {
[TCA_FLOWER_KEY_MPLS_OPTS_LSE] = { .type = NLA_NESTED },
};
static const struct nla_policy
mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = {
[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH] = { .type = NLA_U8 },
[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL] = { .type = NLA_U8 },
[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS] = { .type = NLA_U8 },
[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC] = { .type = NLA_U8 },
[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL] = { .type = NLA_U32 },
};
static void fl_set_key_val(struct nlattr **tb,
void *val, int val_type,
void *mask, int mask_type, int len)
......@@ -776,14 +791,157 @@ static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
return 0;
}
static int fl_set_key_mpls_lse(const struct nlattr *nla_lse,
struct flow_dissector_key_mpls *key_val,
struct flow_dissector_key_mpls *key_mask,
struct netlink_ext_ack *extack)
{
struct nlattr *tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1];
struct flow_dissector_mpls_lse *lse_mask;
struct flow_dissector_mpls_lse *lse_val;
u8 lse_index;
u8 depth;
int err;
err = nla_parse_nested(tb, TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX, nla_lse,
mpls_stack_entry_policy, extack);
if (err < 0)
return err;
if (!tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]) {
NL_SET_ERR_MSG(extack, "Missing MPLS option \"depth\"");
return -EINVAL;
}
depth = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]);
/* LSE depth starts at 1, for consistency with terminology used by
* RFC 3031 (section 3.9), where depth 0 refers to unlabeled packets.
*/
if (depth < 1 || depth > FLOW_DIS_MPLS_MAX) {
NL_SET_ERR_MSG_ATTR(extack,
tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH],
"Invalid MPLS depth");
return -EINVAL;
}
lse_index = depth - 1;
dissector_set_mpls_lse(key_val, lse_index);
dissector_set_mpls_lse(key_mask, lse_index);
lse_val = &key_val->ls[lse_index];
lse_mask = &key_mask->ls[lse_index];
if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]) {
lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]);
lse_mask->mpls_ttl = MPLS_TTL_MASK;
}
if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]) {
u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]);
if (bos & ~MPLS_BOS_MASK) {
NL_SET_ERR_MSG_ATTR(extack,
tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS],
"Bottom Of Stack (BOS) must be 0 or 1");
return -EINVAL;
}
lse_val->mpls_bos = bos;
lse_mask->mpls_bos = MPLS_BOS_MASK;
}
if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]) {
u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]);
if (tc & ~MPLS_TC_MASK) {
NL_SET_ERR_MSG_ATTR(extack,
tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC],
"Traffic Class (TC) must be between 0 and 7");
return -EINVAL;
}
lse_val->mpls_tc = tc;
lse_mask->mpls_tc = MPLS_TC_MASK;
}
if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]) {
u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]);
if (label & ~MPLS_LABEL_MASK) {
NL_SET_ERR_MSG_ATTR(extack,
tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL],
"Label must be between 0 and 1048575");
return -EINVAL;
}
lse_val->mpls_label = label;
lse_mask->mpls_label = MPLS_LABEL_MASK;
}
return 0;
}
static int fl_set_key_mpls_opts(const struct nlattr *nla_mpls_opts,
struct flow_dissector_key_mpls *key_val,
struct flow_dissector_key_mpls *key_mask,
struct netlink_ext_ack *extack)
{
struct nlattr *nla_lse;
int rem;
int err;
if (!(nla_mpls_opts->nla_type & NLA_F_NESTED)) {
NL_SET_ERR_MSG_ATTR(extack, nla_mpls_opts,
"NLA_F_NESTED is missing");
return -EINVAL;
}
nla_for_each_nested(nla_lse, nla_mpls_opts, rem) {
if (nla_type(nla_lse) != TCA_FLOWER_KEY_MPLS_OPTS_LSE) {
NL_SET_ERR_MSG_ATTR(extack, nla_lse,
"Invalid MPLS option type");
return -EINVAL;
}
err = fl_set_key_mpls_lse(nla_lse, key_val, key_mask, extack);
if (err < 0)
return err;
}
if (rem) {
NL_SET_ERR_MSG(extack,
"Bytes leftover after parsing MPLS options");
return -EINVAL;
}
return 0;
}
static int fl_set_key_mpls(struct nlattr **tb,
struct flow_dissector_key_mpls *key_val,
struct flow_dissector_key_mpls *key_mask,
struct netlink_ext_ack *extack)
{
struct flow_dissector_mpls_lse *lse_mask;
struct flow_dissector_mpls_lse *lse_val;
if (tb[TCA_FLOWER_KEY_MPLS_OPTS]) {
if (tb[TCA_FLOWER_KEY_MPLS_TTL] ||
tb[TCA_FLOWER_KEY_MPLS_BOS] ||
tb[TCA_FLOWER_KEY_MPLS_TC] ||
tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
NL_SET_ERR_MSG_ATTR(extack,
tb[TCA_FLOWER_KEY_MPLS_OPTS],
"MPLS label, Traffic Class, Bottom Of Stack and Time To Live must be encapsulated in the MPLS options attribute");
return -EBADMSG;
}
return fl_set_key_mpls_opts(tb[TCA_FLOWER_KEY_MPLS_OPTS],
key_val, key_mask, extack);
}
lse_val = &key_val->ls[0];
lse_mask = &key_mask->ls[0];
if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
key_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
key_mask->mpls_ttl = MPLS_TTL_MASK;
lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
lse_mask->mpls_ttl = MPLS_TTL_MASK;
dissector_set_mpls_lse(key_val, 0);
dissector_set_mpls_lse(key_mask, 0);
}
if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
......@@ -794,8 +952,10 @@ static int fl_set_key_mpls(struct nlattr **tb,
"Bottom Of Stack (BOS) must be 0 or 1");
return -EINVAL;
}
key_val->mpls_bos = bos;
key_mask->mpls_bos = MPLS_BOS_MASK;
lse_val->mpls_bos = bos;
lse_mask->mpls_bos = MPLS_BOS_MASK;
dissector_set_mpls_lse(key_val, 0);
dissector_set_mpls_lse(key_mask, 0);
}
if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
......@@ -806,8 +966,10 @@ static int fl_set_key_mpls(struct nlattr **tb,
"Traffic Class (TC) must be between 0 and 7");
return -EINVAL;
}
key_val->mpls_tc = tc;
key_mask->mpls_tc = MPLS_TC_MASK;
lse_val->mpls_tc = tc;
lse_mask->mpls_tc = MPLS_TC_MASK;
dissector_set_mpls_lse(key_val, 0);
dissector_set_mpls_lse(key_mask, 0);
}
if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
......@@ -818,8 +980,10 @@ static int fl_set_key_mpls(struct nlattr **tb,
"Label must be between 0 and 1048575");
return -EINVAL;
}
key_val->mpls_label = label;
key_mask->mpls_label = MPLS_LABEL_MASK;
lse_val->mpls_label = label;
lse_mask->mpls_label = MPLS_LABEL_MASK;
dissector_set_mpls_lse(key_val, 0);
dissector_set_mpls_lse(key_mask, 0);
}
return 0;
}
......@@ -2218,35 +2382,132 @@ static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
return 0;
}
static int fl_dump_key_mpls_opt_lse(struct sk_buff *skb,
struct flow_dissector_key_mpls *mpls_key,
struct flow_dissector_key_mpls *mpls_mask,
u8 lse_index)
{
struct flow_dissector_mpls_lse *lse_mask = &mpls_mask->ls[lse_index];
struct flow_dissector_mpls_lse *lse_key = &mpls_key->ls[lse_index];
int err;
err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH,
lse_index + 1);
if (err)
return err;
if (lse_mask->mpls_ttl) {
err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL,
lse_key->mpls_ttl);
if (err)
return err;
}
if (lse_mask->mpls_bos) {
err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS,
lse_key->mpls_bos);
if (err)
return err;
}
if (lse_mask->mpls_tc) {
err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TC,
lse_key->mpls_tc);
if (err)
return err;
}
if (lse_mask->mpls_label) {
err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL,
lse_key->mpls_label);
if (err)
return err;
}
return 0;
}
static int fl_dump_key_mpls_opts(struct sk_buff *skb,
struct flow_dissector_key_mpls *mpls_key,
struct flow_dissector_key_mpls *mpls_mask)
{
struct nlattr *opts;
struct nlattr *lse;
u8 lse_index;
int err;
opts = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS);
if (!opts)
return -EMSGSIZE;
for (lse_index = 0; lse_index < FLOW_DIS_MPLS_MAX; lse_index++) {
if (!(mpls_mask->used_lses & 1 << lse_index))
continue;
lse = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS_LSE);
if (!lse) {
err = -EMSGSIZE;
goto err_opts;
}
err = fl_dump_key_mpls_opt_lse(skb, mpls_key, mpls_mask,
lse_index);
if (err)
goto err_opts_lse;
nla_nest_end(skb, lse);
}
nla_nest_end(skb, opts);
return 0;
err_opts_lse:
nla_nest_cancel(skb, lse);
err_opts:
nla_nest_cancel(skb, opts);
return err;
}
static int fl_dump_key_mpls(struct sk_buff *skb,
struct flow_dissector_key_mpls *mpls_key,
struct flow_dissector_key_mpls *mpls_mask)
{
struct flow_dissector_mpls_lse *lse_mask;
struct flow_dissector_mpls_lse *lse_key;
int err;
if (!memchr_inv(mpls_mask, 0, sizeof(*mpls_mask)))
if (!mpls_mask->used_lses)
return 0;
if (mpls_mask->mpls_ttl) {
lse_mask = &mpls_mask->ls[0];
lse_key = &mpls_key->ls[0];
/* For backward compatibility, don't use the MPLS nested attributes if
* the rule can be expressed using the old attributes.
*/
if (mpls_mask->used_lses & ~1 ||
(!lse_mask->mpls_ttl && !lse_mask->mpls_bos &&
!lse_mask->mpls_tc && !lse_mask->mpls_label))
return fl_dump_key_mpls_opts(skb, mpls_key, mpls_mask);
if (lse_mask->mpls_ttl) {
err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
mpls_key->mpls_ttl);
lse_key->mpls_ttl);
if (err)
return err;
}
if (mpls_mask->mpls_tc) {
if (lse_mask->mpls_tc) {
err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
mpls_key->mpls_tc);
lse_key->mpls_tc);
if (err)
return err;
}
if (mpls_mask->mpls_label) {
if (lse_mask->mpls_label) {
err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
mpls_key->mpls_label);
lse_key->mpls_label);
if (err)
return err;
}
if (mpls_mask->mpls_bos) {
if (lse_mask->mpls_bos) {
err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
mpls_key->mpls_bos);
lse_key->mpls_bos);
if (err)
return err;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment