Commit 3cb00b90 authored by Sabrina Dubroca's avatar Sabrina Dubroca Committed by Greg Kroah-Hartman

net: add recursion limit to GRO

[ Upstream commit fcd91dd4 ]

Currently, GRO can do unlimited recursion through the gro_receive
handlers.  This was fixed for tunneling protocols by limiting tunnel GRO
to one level with encap_mark, but both VLAN and TEB still have this
problem.  Thus, the kernel is vulnerable to a stack overflow, if we
receive a packet composed entirely of VLAN headers.

This patch adds a recursion counter to the GRO layer to prevent stack
overflow.  When a gro_receive function hits the recursion limit, GRO is
aborted for this skb and it is processed normally.  This recursion
counter is put in the GRO CB, but could be turned into a percpu counter
if we run out of space in the CB.

Thanks to Vladimír Beneš <vbenes@redhat.com> for the initial bug report.

Fixes: CVE-2016-7039
Fixes: 9b174d88 ("net: Add Transparent Ethernet Bridging GRO support.")
Fixes: 66e5133f ("vlan: Add GRO support for non hardware accelerated vlan")
Signed-off-by: default avatarSabrina Dubroca <sd@queasysnail.net>
Reviewed-by: default avatarJiri Benc <jbenc@redhat.com>
Acked-by: default avatarHannes Frederic Sowa <hannes@stressinduktion.org>
Acked-by: default avatarTom Herbert <tom@herbertland.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 02558fa0
...@@ -440,7 +440,7 @@ static struct sk_buff **geneve_gro_receive(struct sk_buff **head, ...@@ -440,7 +440,7 @@ static struct sk_buff **geneve_gro_receive(struct sk_buff **head,
skb_gro_pull(skb, gh_len); skb_gro_pull(skb, gh_len);
skb_gro_postpull_rcsum(skb, gh, gh_len); skb_gro_postpull_rcsum(skb, gh, gh_len);
pp = ptype->callbacks.gro_receive(head, skb); pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
out_unlock: out_unlock:
rcu_read_unlock(); rcu_read_unlock();
......
...@@ -593,7 +593,7 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, ...@@ -593,7 +593,7 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
} }
} }
pp = eth_gro_receive(head, skb); pp = call_gro_receive(eth_gro_receive, head, skb);
out: out:
skb_gro_remcsum_cleanup(skb, &grc); skb_gro_remcsum_cleanup(skb, &grc);
......
...@@ -2003,7 +2003,10 @@ struct napi_gro_cb { ...@@ -2003,7 +2003,10 @@ struct napi_gro_cb {
/* Used in foo-over-udp, set in udp[46]_gro_receive */ /* Used in foo-over-udp, set in udp[46]_gro_receive */
u8 is_ipv6:1; u8 is_ipv6:1;
/* 7 bit hole */ /* Number of gro_receive callbacks this packet already went through */
u8 recursion_counter:4;
/* 3 bit hole */
/* used to support CHECKSUM_COMPLETE for tunneling protocols */ /* used to support CHECKSUM_COMPLETE for tunneling protocols */
__wsum csum; __wsum csum;
...@@ -2014,6 +2017,25 @@ struct napi_gro_cb { ...@@ -2014,6 +2017,25 @@ struct napi_gro_cb {
#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb) #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
#define GRO_RECURSION_LIMIT 15
static inline int gro_recursion_inc_test(struct sk_buff *skb)
{
return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
}
typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *);
static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
struct sk_buff **head,
struct sk_buff *skb)
{
if (unlikely(gro_recursion_inc_test(skb))) {
NAPI_GRO_CB(skb)->flush |= 1;
return NULL;
}
return cb(head, skb);
}
struct packet_type { struct packet_type {
__be16 type; /* This is really htons(ether_type). */ __be16 type; /* This is really htons(ether_type). */
struct net_device *dev; /* NULL is wildcarded here */ struct net_device *dev; /* NULL is wildcarded here */
...@@ -2059,6 +2081,22 @@ struct udp_offload { ...@@ -2059,6 +2081,22 @@ struct udp_offload {
struct udp_offload_callbacks callbacks; struct udp_offload_callbacks callbacks;
}; };
typedef struct sk_buff **(*gro_receive_udp_t)(struct sk_buff **,
struct sk_buff *,
struct udp_offload *);
static inline struct sk_buff **call_gro_receive_udp(gro_receive_udp_t cb,
struct sk_buff **head,
struct sk_buff *skb,
struct udp_offload *uoff)
{
if (unlikely(gro_recursion_inc_test(skb))) {
NAPI_GRO_CB(skb)->flush |= 1;
return NULL;
}
return cb(head, skb, uoff);
}
/* often modified stats are per cpu, other are shared (netdev->stats) */ /* often modified stats are per cpu, other are shared (netdev->stats) */
struct pcpu_sw_netstats { struct pcpu_sw_netstats {
u64 rx_packets; u64 rx_packets;
......
...@@ -659,7 +659,7 @@ static struct sk_buff **vlan_gro_receive(struct sk_buff **head, ...@@ -659,7 +659,7 @@ static struct sk_buff **vlan_gro_receive(struct sk_buff **head,
skb_gro_pull(skb, sizeof(*vhdr)); skb_gro_pull(skb, sizeof(*vhdr));
skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr)); skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
pp = ptype->callbacks.gro_receive(head, skb); pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
out_unlock: out_unlock:
rcu_read_unlock(); rcu_read_unlock();
......
...@@ -4240,6 +4240,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff ...@@ -4240,6 +4240,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
NAPI_GRO_CB(skb)->flush = 0; NAPI_GRO_CB(skb)->flush = 0;
NAPI_GRO_CB(skb)->free = 0; NAPI_GRO_CB(skb)->free = 0;
NAPI_GRO_CB(skb)->encap_mark = 0; NAPI_GRO_CB(skb)->encap_mark = 0;
NAPI_GRO_CB(skb)->recursion_counter = 0;
NAPI_GRO_CB(skb)->gro_remcsum_start = 0; NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
/* Setup for GRO checksum validation */ /* Setup for GRO checksum validation */
......
...@@ -436,7 +436,7 @@ struct sk_buff **eth_gro_receive(struct sk_buff **head, ...@@ -436,7 +436,7 @@ struct sk_buff **eth_gro_receive(struct sk_buff **head,
skb_gro_pull(skb, sizeof(*eh)); skb_gro_pull(skb, sizeof(*eh));
skb_gro_postpull_rcsum(skb, eh, sizeof(*eh)); skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
pp = ptype->callbacks.gro_receive(head, skb); pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
out_unlock: out_unlock:
rcu_read_unlock(); rcu_read_unlock();
......
...@@ -1372,7 +1372,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head, ...@@ -1372,7 +1372,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
skb_gro_pull(skb, sizeof(*iph)); skb_gro_pull(skb, sizeof(*iph));
skb_set_transport_header(skb, skb_gro_offset(skb)); skb_set_transport_header(skb, skb_gro_offset(skb));
pp = ops->callbacks.gro_receive(head, skb); pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
out_unlock: out_unlock:
rcu_read_unlock(); rcu_read_unlock();
......
...@@ -201,7 +201,7 @@ static struct sk_buff **fou_gro_receive(struct sk_buff **head, ...@@ -201,7 +201,7 @@ static struct sk_buff **fou_gro_receive(struct sk_buff **head,
if (!ops || !ops->callbacks.gro_receive) if (!ops || !ops->callbacks.gro_receive)
goto out_unlock; goto out_unlock;
pp = ops->callbacks.gro_receive(head, skb); pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
out_unlock: out_unlock:
rcu_read_unlock(); rcu_read_unlock();
...@@ -360,7 +360,7 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head, ...@@ -360,7 +360,7 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive)) if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive))
goto out_unlock; goto out_unlock;
pp = ops->callbacks.gro_receive(head, skb); pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
out_unlock: out_unlock:
rcu_read_unlock(); rcu_read_unlock();
......
...@@ -219,7 +219,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head, ...@@ -219,7 +219,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
/* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/ /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
skb_gro_postpull_rcsum(skb, greh, grehlen); skb_gro_postpull_rcsum(skb, greh, grehlen);
pp = ptype->callbacks.gro_receive(head, skb); pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
out_unlock: out_unlock:
rcu_read_unlock(); rcu_read_unlock();
......
...@@ -339,8 +339,8 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, ...@@ -339,8 +339,8 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */ skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr)); skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto; NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
pp = uo_priv->offload->callbacks.gro_receive(head, skb, pp = call_gro_receive_udp(uo_priv->offload->callbacks.gro_receive,
uo_priv->offload); head, skb, uo_priv->offload);
out_unlock: out_unlock:
rcu_read_unlock(); rcu_read_unlock();
......
...@@ -247,7 +247,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, ...@@ -247,7 +247,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
skb_gro_postpull_rcsum(skb, iph, nlen); skb_gro_postpull_rcsum(skb, iph, nlen);
pp = ops->callbacks.gro_receive(head, skb); pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
out_unlock: out_unlock:
rcu_read_unlock(); rcu_read_unlock();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment