Commit ff4d90a8 authored by Pablo Neira Ayuso's avatar Pablo Neira Ayuso

netfilter: nftables_offload: VLAN id needs host byteorder in flow dissector

The flow dissector representation expects the VLAN id in host byteorder.
Add the NFT_OFFLOAD_F_NETWORK2HOST flag to swap the bytes from nft_cmp.

Fixes: a82055af ("netfilter: nft_payload: add VLAN offload support")
Signed-off-by: default avatarPablo Neira Ayuso <pablo@netfilter.org>
parent 14c20643
...@@ -4,11 +4,16 @@ ...@@ -4,11 +4,16 @@
#include <net/flow_offload.h> #include <net/flow_offload.h>
#include <net/netfilter/nf_tables.h> #include <net/netfilter/nf_tables.h>
enum nft_offload_reg_flags {
NFT_OFFLOAD_F_NETWORK2HOST = (1 << 0),
};
struct nft_offload_reg { struct nft_offload_reg {
u32 key; u32 key;
u32 len; u32 len;
u32 base_offset; u32 base_offset;
u32 offset; u32 offset;
u32 flags;
struct nft_data data; struct nft_data data;
struct nft_data mask; struct nft_data mask;
}; };
...@@ -72,13 +77,17 @@ struct nft_flow_rule *nft_flow_rule_create(struct net *net, const struct nft_rul ...@@ -72,13 +77,17 @@ struct nft_flow_rule *nft_flow_rule_create(struct net *net, const struct nft_rul
void nft_flow_rule_destroy(struct nft_flow_rule *flow); void nft_flow_rule_destroy(struct nft_flow_rule *flow);
int nft_flow_rule_offload_commit(struct net *net); int nft_flow_rule_offload_commit(struct net *net);
#define NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg) \ #define NFT_OFFLOAD_MATCH_FLAGS(__key, __base, __field, __len, __reg, __flags) \
(__reg)->base_offset = \ (__reg)->base_offset = \
offsetof(struct nft_flow_key, __base); \ offsetof(struct nft_flow_key, __base); \
(__reg)->offset = \ (__reg)->offset = \
offsetof(struct nft_flow_key, __base.__field); \ offsetof(struct nft_flow_key, __base.__field); \
(__reg)->len = __len; \ (__reg)->len = __len; \
(__reg)->key = __key; \ (__reg)->key = __key; \
(__reg)->flags = __flags;
#define NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg) \
NFT_OFFLOAD_MATCH_FLAGS(__key, __base, __field, __len, __reg, 0)
#define NFT_OFFLOAD_MATCH_EXACT(__key, __base, __field, __len, __reg) \ #define NFT_OFFLOAD_MATCH_EXACT(__key, __base, __field, __len, __reg) \
NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg) \ NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg) \
......
...@@ -114,19 +114,56 @@ static int nft_cmp_dump(struct sk_buff *skb, const struct nft_expr *expr) ...@@ -114,19 +114,56 @@ static int nft_cmp_dump(struct sk_buff *skb, const struct nft_expr *expr)
return -1; return -1;
} }
union nft_cmp_offload_data {
u16 val16;
u32 val32;
u64 val64;
};
static void nft_payload_n2h(union nft_cmp_offload_data *data,
const u8 *val, u32 len)
{
switch (len) {
case 2:
data->val16 = ntohs(*((u16 *)val));
break;
case 4:
data->val32 = ntohl(*((u32 *)val));
break;
case 8:
data->val64 = be64_to_cpu(*((u64 *)val));
break;
default:
WARN_ON_ONCE(1);
break;
}
}
static int __nft_cmp_offload(struct nft_offload_ctx *ctx, static int __nft_cmp_offload(struct nft_offload_ctx *ctx,
struct nft_flow_rule *flow, struct nft_flow_rule *flow,
const struct nft_cmp_expr *priv) const struct nft_cmp_expr *priv)
{ {
struct nft_offload_reg *reg = &ctx->regs[priv->sreg]; struct nft_offload_reg *reg = &ctx->regs[priv->sreg];
union nft_cmp_offload_data _data, _datamask;
u8 *mask = (u8 *)&flow->match.mask; u8 *mask = (u8 *)&flow->match.mask;
u8 *key = (u8 *)&flow->match.key; u8 *key = (u8 *)&flow->match.key;
u8 *data, *datamask;
if (priv->op != NFT_CMP_EQ || priv->len > reg->len) if (priv->op != NFT_CMP_EQ || priv->len > reg->len)
return -EOPNOTSUPP; return -EOPNOTSUPP;
memcpy(key + reg->offset, &priv->data, reg->len); if (reg->flags & NFT_OFFLOAD_F_NETWORK2HOST) {
memcpy(mask + reg->offset, &reg->mask, reg->len); nft_payload_n2h(&_data, (u8 *)&priv->data, reg->len);
nft_payload_n2h(&_datamask, (u8 *)&reg->mask, reg->len);
data = (u8 *)&_data;
datamask = (u8 *)&_datamask;
} else {
data = (u8 *)&priv->data;
datamask = (u8 *)&reg->mask;
}
memcpy(key + reg->offset, data, reg->len);
memcpy(mask + reg->offset, datamask, reg->len);
flow->match.dissector.used_keys |= BIT(reg->key); flow->match.dissector.used_keys |= BIT(reg->key);
flow->match.dissector.offset[reg->key] = reg->base_offset; flow->match.dissector.offset[reg->key] = reg->base_offset;
......
...@@ -226,8 +226,9 @@ static int nft_payload_offload_ll(struct nft_offload_ctx *ctx, ...@@ -226,8 +226,9 @@ static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16))) if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
return -EOPNOTSUPP; return -EOPNOTSUPP;
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan, NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_VLAN, vlan,
vlan_tci, sizeof(__be16), reg); vlan_tci, sizeof(__be16), reg,
NFT_OFFLOAD_F_NETWORK2HOST);
break; break;
case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto): case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto):
if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16))) if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
...@@ -241,8 +242,9 @@ static int nft_payload_offload_ll(struct nft_offload_ctx *ctx, ...@@ -241,8 +242,9 @@ static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16))) if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
return -EOPNOTSUPP; return -EOPNOTSUPP;
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, cvlan, NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
vlan_tci, sizeof(__be16), reg); vlan_tci, sizeof(__be16), reg,
NFT_OFFLOAD_F_NETWORK2HOST);
break; break;
case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto) + case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto) +
sizeof(struct vlan_hdr): sizeof(struct vlan_hdr):
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment