Commit a70b506e authored by Daniel Borkmann's avatar Daniel Borkmann Committed by David S. Miller

bpf: enforce recursion limit on redirects

Respect the stack's xmit_recursion limit for calls into dev_queue_xmit().
Currently, they are not handeled by the limiter when attached to clsact's
egress parent, for example, and a buggy program redirecting it to the
same device again could run into stack overflow eventually. It would be
good if we could notify an admin to give him a chance to react. We reuse
xmit_recursion instead of having one private to eBPF, so that the stack's
current recursion depth will be taken into account as well. Follow-up to
commit 3896d655 ("bpf: introduce bpf_clone_redirect() helper") and
27b29f63 ("bpf: add bpf_redirect() helper").
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Acked-by: default avatarAlexei Starovoitov <ast@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f2a4d086
...@@ -2389,6 +2389,8 @@ void synchronize_net(void); ...@@ -2389,6 +2389,8 @@ void synchronize_net(void);
int init_dummy_netdev(struct net_device *dev); int init_dummy_netdev(struct net_device *dev);
DECLARE_PER_CPU(int, xmit_recursion); DECLARE_PER_CPU(int, xmit_recursion);
#define XMIT_RECURSION_LIMIT 10
static inline int dev_recursion_level(void) static inline int dev_recursion_level(void)
{ {
return this_cpu_read(xmit_recursion); return this_cpu_read(xmit_recursion);
......
...@@ -3144,8 +3144,6 @@ static void skb_update_prio(struct sk_buff *skb) ...@@ -3144,8 +3144,6 @@ static void skb_update_prio(struct sk_buff *skb)
DEFINE_PER_CPU(int, xmit_recursion); DEFINE_PER_CPU(int, xmit_recursion);
EXPORT_SYMBOL(xmit_recursion); EXPORT_SYMBOL(xmit_recursion);
#define RECURSION_LIMIT 10
/** /**
* dev_loopback_xmit - loop back @skb * dev_loopback_xmit - loop back @skb
* @net: network namespace this loopback is happening in * @net: network namespace this loopback is happening in
...@@ -3388,8 +3386,8 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) ...@@ -3388,8 +3386,8 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
int cpu = smp_processor_id(); /* ok because BHs are off */ int cpu = smp_processor_id(); /* ok because BHs are off */
if (txq->xmit_lock_owner != cpu) { if (txq->xmit_lock_owner != cpu) {
if (unlikely(__this_cpu_read(xmit_recursion) >
if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT) XMIT_RECURSION_LIMIT))
goto recursion_alert; goto recursion_alert;
skb = validate_xmit_skb(skb, dev); skb = validate_xmit_skb(skb, dev);
......
...@@ -1603,9 +1603,36 @@ static const struct bpf_func_proto bpf_csum_diff_proto = { ...@@ -1603,9 +1603,36 @@ static const struct bpf_func_proto bpf_csum_diff_proto = {
.arg5_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING,
}; };
static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
{
if (skb_at_tc_ingress(skb))
skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len);
return dev_forward_skb(dev, skb);
}
static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
{
int ret;
if (unlikely(__this_cpu_read(xmit_recursion) > XMIT_RECURSION_LIMIT)) {
net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
kfree_skb(skb);
return -ENETDOWN;
}
skb->dev = dev;
__this_cpu_inc(xmit_recursion);
ret = dev_queue_xmit(skb);
__this_cpu_dec(xmit_recursion);
return ret;
}
static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5) static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5)
{ {
struct sk_buff *skb = (struct sk_buff *) (long) r1, *skb2; struct sk_buff *skb = (struct sk_buff *) (long) r1;
struct net_device *dev; struct net_device *dev;
if (unlikely(flags & ~(BPF_F_INGRESS))) if (unlikely(flags & ~(BPF_F_INGRESS)))
...@@ -1615,19 +1642,12 @@ static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5) ...@@ -1615,19 +1642,12 @@ static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5)
if (unlikely(!dev)) if (unlikely(!dev))
return -EINVAL; return -EINVAL;
skb2 = skb_clone(skb, GFP_ATOMIC); skb = skb_clone(skb, GFP_ATOMIC);
if (unlikely(!skb2)) if (unlikely(!skb))
return -ENOMEM; return -ENOMEM;
if (flags & BPF_F_INGRESS) { return flags & BPF_F_INGRESS ?
if (skb_at_tc_ingress(skb2)) __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
skb_postpush_rcsum(skb2, skb_mac_header(skb2),
skb2->mac_len);
return dev_forward_skb(dev, skb2);
}
skb2->dev = dev;
return dev_queue_xmit(skb2);
} }
static const struct bpf_func_proto bpf_clone_redirect_proto = { static const struct bpf_func_proto bpf_clone_redirect_proto = {
...@@ -1671,15 +1691,8 @@ int skb_do_redirect(struct sk_buff *skb) ...@@ -1671,15 +1691,8 @@ int skb_do_redirect(struct sk_buff *skb)
return -EINVAL; return -EINVAL;
} }
if (ri->flags & BPF_F_INGRESS) { return ri->flags & BPF_F_INGRESS ?
if (skb_at_tc_ingress(skb)) __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
skb_postpush_rcsum(skb, skb_mac_header(skb),
skb->mac_len);
return dev_forward_skb(dev, skb);
}
skb->dev = dev;
return dev_queue_xmit(skb);
} }
static const struct bpf_func_proto bpf_redirect_proto = { static const struct bpf_func_proto bpf_redirect_proto = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment