Commit 6d715e30 authored by Nicolas Schichan's avatar Nicolas Schichan Committed by David S. Miller

ARM: net: handle negative offsets in BPF JIT.

Previously, the JIT would reject negative offsets known during code
generation and mishandle negative offsets provided at runtime.

Fix that by calling bpf_internal_load_pointer_neg_helper()
appropriately in the jit_get_skb_{b,h,w} slow path helpers and by forcing
the execution flow to the slow path helpers when the offset is
negative.
Signed-off-by: default avatarNicolas Schichan <nschichan@freebox.fr>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 7aed35cb
...@@ -74,32 +74,52 @@ struct jit_ctx { ...@@ -74,32 +74,52 @@ struct jit_ctx {
int bpf_jit_enable __read_mostly; int bpf_jit_enable __read_mostly;
static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset) static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret,
unsigned int size)
{
void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size);
if (!ptr)
return -EFAULT;
memcpy(ret, ptr, size);
return 0;
}
static u64 jit_get_skb_b(struct sk_buff *skb, int offset)
{ {
u8 ret; u8 ret;
int err; int err;
err = skb_copy_bits(skb, offset, &ret, 1); if (offset < 0)
err = call_neg_helper(skb, offset, &ret, 1);
else
err = skb_copy_bits(skb, offset, &ret, 1);
return (u64)err << 32 | ret; return (u64)err << 32 | ret;
} }
static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset) static u64 jit_get_skb_h(struct sk_buff *skb, int offset)
{ {
u16 ret; u16 ret;
int err; int err;
err = skb_copy_bits(skb, offset, &ret, 2); if (offset < 0)
err = call_neg_helper(skb, offset, &ret, 2);
else
err = skb_copy_bits(skb, offset, &ret, 2);
return (u64)err << 32 | ntohs(ret); return (u64)err << 32 | ntohs(ret);
} }
static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset) static u64 jit_get_skb_w(struct sk_buff *skb, int offset)
{ {
u32 ret; u32 ret;
int err; int err;
err = skb_copy_bits(skb, offset, &ret, 4); if (offset < 0)
err = call_neg_helper(skb, offset, &ret, 4);
else
err = skb_copy_bits(skb, offset, &ret, 4);
return (u64)err << 32 | ntohl(ret); return (u64)err << 32 | ntohl(ret);
} }
...@@ -536,9 +556,6 @@ static int build_body(struct jit_ctx *ctx) ...@@ -536,9 +556,6 @@ static int build_body(struct jit_ctx *ctx)
case BPF_LD | BPF_B | BPF_ABS: case BPF_LD | BPF_B | BPF_ABS:
load_order = 0; load_order = 0;
load: load:
/* the interpreter will deal with the negative K */
if ((int)k < 0)
return -ENOTSUPP;
emit_mov_i(r_off, k, ctx); emit_mov_i(r_off, k, ctx);
load_common: load_common:
ctx->seen |= SEEN_DATA | SEEN_CALL; ctx->seen |= SEEN_DATA | SEEN_CALL;
...@@ -553,6 +570,18 @@ static int build_body(struct jit_ctx *ctx) ...@@ -553,6 +570,18 @@ static int build_body(struct jit_ctx *ctx)
condt = ARM_COND_HI; condt = ARM_COND_HI;
} }
/*
* test for negative offset, only if we are
* currently scheduled to take the fast
* path. this will update the flags so that
* the slowpath instruction are ignored if the
* offset is negative.
*
* for loard_order == 0 the HI condition will
* make loads at offset 0 take the slow path too.
*/
_emit(condt, ARM_CMP_I(r_off, 0), ctx);
_emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data), _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
ctx); ctx);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment