Commit 3b27550c authored by Jesse Gross's avatar Jesse Gross Committed by Jiri Slaby

openvswitch: Zero flows on allocation.

[ Upstream commit ae5f2fb1 ]

When support for megaflows was introduced, OVS needed to start
installing flows with a mask applied to them. Since masking is an
expensive operation, OVS also had an optimization that would only
take the parts of the flow keys that were covered by a non-zero
mask. The values stored in the remaining pieces should not matter
because they are masked out.

While this works fine for the purposes of matching (which must always
look at the mask), serialization to netlink can be problematic. Since
the flow and the mask are serialized separately, the uninitialized
portions of the flow can be encoded with whatever values happen to be
present.

In terms of functionality, this has little effect since these fields
will be masked out by definition. However, it leaks kernel memory to
userspace, which is a potential security vulnerability. It is also
possible that other code paths could look at the masked key and get
uninitialized data, although this does not currently appear to be an
issue in practice.

This removes the mask optimization for flows that are being installed.
This was always intended to be the case as the mask optimizations were
really targetting per-packet flow operations.

Fixes: 03f0d916 ("openvswitch: Mega flow implementation")
Signed-off-by: default avatarJesse Gross <jesse@nicira.com>
Acked-by: default avatarPravin B Shelar <pshelar@nicira.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarJiri Slaby <jslaby@suse.cz>
parent 540a0bd9
...@@ -1266,7 +1266,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) ...@@ -1266,7 +1266,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
if (IS_ERR(acts)) if (IS_ERR(acts))
goto error; goto error;
ovs_flow_key_mask(&masked_key, &key, &mask); ovs_flow_key_mask(&masked_key, &key, true, &mask);
error = validate_and_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], error = validate_and_copy_actions(a[OVS_FLOW_ATTR_ACTIONS],
&masked_key, 0, &acts); &masked_key, 0, &acts);
if (error) { if (error) {
......
...@@ -373,18 +373,21 @@ static bool icmp6hdr_ok(struct sk_buff *skb) ...@@ -373,18 +373,21 @@ static bool icmp6hdr_ok(struct sk_buff *skb)
} }
void ovs_flow_key_mask(struct sw_flow_key *dst, const struct sw_flow_key *src, void ovs_flow_key_mask(struct sw_flow_key *dst, const struct sw_flow_key *src,
const struct sw_flow_mask *mask) bool full, const struct sw_flow_mask *mask)
{ {
const long *m = (long *)((u8 *)&mask->key + mask->range.start); int start = full ? 0 : mask->range.start;
const long *s = (long *)((u8 *)src + mask->range.start); int len = full ? sizeof *dst : range_n_bytes(&mask->range);
long *d = (long *)((u8 *)dst + mask->range.start); const long *m = (const long *)((const u8 *)&mask->key + start);
const long *s = (const long *)((const u8 *)src + start);
long *d = (long *)((u8 *)dst + start);
int i; int i;
/* The memory outside of the 'mask->range' are not set since /* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
* further operations on 'dst' only uses contents within * if 'full' is false the memory outside of the 'mask->range' is left
* 'mask->range'. * uninitialized. This can be used as an optimization when further
* operations on 'dst' only use contents within 'mask->range'.
*/ */
for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long)) for (i = 0; i < len; i += sizeof(long))
*d++ = *s++ & *m++; *d++ = *s++ & *m++;
} }
...@@ -1085,7 +1088,7 @@ static struct sw_flow *ovs_masked_flow_lookup(struct flow_table *table, ...@@ -1085,7 +1088,7 @@ static struct sw_flow *ovs_masked_flow_lookup(struct flow_table *table,
u32 hash; u32 hash;
struct sw_flow_key masked_key; struct sw_flow_key masked_key;
ovs_flow_key_mask(&masked_key, unmasked, mask); ovs_flow_key_mask(&masked_key, unmasked, false, mask);
hash = ovs_flow_hash(&masked_key, key_start, key_end); hash = ovs_flow_hash(&masked_key, key_start, key_end);
head = find_bucket(table, hash); head = find_bucket(table, hash);
hlist_for_each_entry_rcu(flow, head, hash_node[table->node_ver]) { hlist_for_each_entry_rcu(flow, head, hash_node[table->node_ver]) {
......
...@@ -255,5 +255,5 @@ void ovs_sw_flow_mask_insert(struct flow_table *, struct sw_flow_mask *); ...@@ -255,5 +255,5 @@ void ovs_sw_flow_mask_insert(struct flow_table *, struct sw_flow_mask *);
struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *, struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *,
const struct sw_flow_mask *); const struct sw_flow_mask *);
void ovs_flow_key_mask(struct sw_flow_key *dst, const struct sw_flow_key *src, void ovs_flow_key_mask(struct sw_flow_key *dst, const struct sw_flow_key *src,
const struct sw_flow_mask *mask); bool full, const struct sw_flow_mask *mask);
#endif /* flow.h */ #endif /* flow.h */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment