Commit 078295fb authored by David S. Miller's avatar David S. Miller

Merge branch 'bpf-new-branches'

Daniel Borkmann says:

====================
bpf: Add BPF_J{LT,LE,SLT,SLE} instructions

This set adds BPF_J{LT,LE,SLT,SLE} instructions to the BPF
insn set, interpreter, JIT hardening code and all JITs are
also updated to support the new instructions. Basic idea is
to reduce register pressure by avoiding BPF_J{GT,GE,SGT,SGE}
rewrites. Removing the workaround for the rewrites in LLVM,
this can result in shorter BPF programs, less stack usage
and less verification complexity. First patch provides some
more details on rationale and integration.

Thanks a lot!

v1 -> v2:
  - Reworded commit msg in patch 1
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 0bdf7101 31e482bf
...@@ -906,6 +906,10 @@ If BPF_CLASS(code) == BPF_JMP, BPF_OP(code) is one of: ...@@ -906,6 +906,10 @@ If BPF_CLASS(code) == BPF_JMP, BPF_OP(code) is one of:
BPF_JSGE 0x70 /* eBPF only: signed '>=' */ BPF_JSGE 0x70 /* eBPF only: signed '>=' */
BPF_CALL 0x80 /* eBPF only: function call */ BPF_CALL 0x80 /* eBPF only: function call */
BPF_EXIT 0x90 /* eBPF only: function return */ BPF_EXIT 0x90 /* eBPF only: function return */
BPF_JLT 0xa0 /* eBPF only: unsigned '<' */
BPF_JLE 0xb0 /* eBPF only: unsigned '<=' */
BPF_JSLT 0xc0 /* eBPF only: signed '<' */
BPF_JSLE 0xd0 /* eBPF only: signed '<=' */
So BPF_ADD | BPF_X | BPF_ALU means 32-bit addition in both classic BPF So BPF_ADD | BPF_X | BPF_ALU means 32-bit addition in both classic BPF
and eBPF. There are only two registers in classic BPF, so it means A += X. and eBPF. There are only two registers in classic BPF, so it means A += X.
......
...@@ -44,8 +44,12 @@ ...@@ -44,8 +44,12 @@
#define A64_COND_NE AARCH64_INSN_COND_NE /* != */ #define A64_COND_NE AARCH64_INSN_COND_NE /* != */
#define A64_COND_CS AARCH64_INSN_COND_CS /* unsigned >= */ #define A64_COND_CS AARCH64_INSN_COND_CS /* unsigned >= */
#define A64_COND_HI AARCH64_INSN_COND_HI /* unsigned > */ #define A64_COND_HI AARCH64_INSN_COND_HI /* unsigned > */
#define A64_COND_LS AARCH64_INSN_COND_LS /* unsigned <= */
#define A64_COND_CC AARCH64_INSN_COND_CC /* unsigned < */
#define A64_COND_GE AARCH64_INSN_COND_GE /* signed >= */ #define A64_COND_GE AARCH64_INSN_COND_GE /* signed >= */
#define A64_COND_GT AARCH64_INSN_COND_GT /* signed > */ #define A64_COND_GT AARCH64_INSN_COND_GT /* signed > */
#define A64_COND_LE AARCH64_INSN_COND_LE /* signed <= */
#define A64_COND_LT AARCH64_INSN_COND_LT /* signed < */
#define A64_B_(cond, imm19) A64_COND_BRANCH(cond, (imm19) << 2) #define A64_B_(cond, imm19) A64_COND_BRANCH(cond, (imm19) << 2)
/* Unconditional branch (immediate) */ /* Unconditional branch (immediate) */
......
...@@ -527,10 +527,14 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) ...@@ -527,10 +527,14 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
/* IF (dst COND src) JUMP off */ /* IF (dst COND src) JUMP off */
case BPF_JMP | BPF_JEQ | BPF_X: case BPF_JMP | BPF_JEQ | BPF_X:
case BPF_JMP | BPF_JGT | BPF_X: case BPF_JMP | BPF_JGT | BPF_X:
case BPF_JMP | BPF_JLT | BPF_X:
case BPF_JMP | BPF_JGE | BPF_X: case BPF_JMP | BPF_JGE | BPF_X:
case BPF_JMP | BPF_JLE | BPF_X:
case BPF_JMP | BPF_JNE | BPF_X: case BPF_JMP | BPF_JNE | BPF_X:
case BPF_JMP | BPF_JSGT | BPF_X: case BPF_JMP | BPF_JSGT | BPF_X:
case BPF_JMP | BPF_JSLT | BPF_X:
case BPF_JMP | BPF_JSGE | BPF_X: case BPF_JMP | BPF_JSGE | BPF_X:
case BPF_JMP | BPF_JSLE | BPF_X:
emit(A64_CMP(1, dst, src), ctx); emit(A64_CMP(1, dst, src), ctx);
emit_cond_jmp: emit_cond_jmp:
jmp_offset = bpf2a64_offset(i + off, i, ctx); jmp_offset = bpf2a64_offset(i + off, i, ctx);
...@@ -542,9 +546,15 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) ...@@ -542,9 +546,15 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
case BPF_JGT: case BPF_JGT:
jmp_cond = A64_COND_HI; jmp_cond = A64_COND_HI;
break; break;
case BPF_JLT:
jmp_cond = A64_COND_CC;
break;
case BPF_JGE: case BPF_JGE:
jmp_cond = A64_COND_CS; jmp_cond = A64_COND_CS;
break; break;
case BPF_JLE:
jmp_cond = A64_COND_LS;
break;
case BPF_JSET: case BPF_JSET:
case BPF_JNE: case BPF_JNE:
jmp_cond = A64_COND_NE; jmp_cond = A64_COND_NE;
...@@ -552,9 +562,15 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) ...@@ -552,9 +562,15 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
case BPF_JSGT: case BPF_JSGT:
jmp_cond = A64_COND_GT; jmp_cond = A64_COND_GT;
break; break;
case BPF_JSLT:
jmp_cond = A64_COND_LT;
break;
case BPF_JSGE: case BPF_JSGE:
jmp_cond = A64_COND_GE; jmp_cond = A64_COND_GE;
break; break;
case BPF_JSLE:
jmp_cond = A64_COND_LE;
break;
default: default:
return -EFAULT; return -EFAULT;
} }
...@@ -566,10 +582,14 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) ...@@ -566,10 +582,14 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
/* IF (dst COND imm) JUMP off */ /* IF (dst COND imm) JUMP off */
case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_JMP | BPF_JGT | BPF_K: case BPF_JMP | BPF_JGT | BPF_K:
case BPF_JMP | BPF_JLT | BPF_K:
case BPF_JMP | BPF_JGE | BPF_K: case BPF_JMP | BPF_JGE | BPF_K:
case BPF_JMP | BPF_JLE | BPF_K:
case BPF_JMP | BPF_JNE | BPF_K: case BPF_JMP | BPF_JNE | BPF_K:
case BPF_JMP | BPF_JSGT | BPF_K: case BPF_JMP | BPF_JSGT | BPF_K:
case BPF_JMP | BPF_JSLT | BPF_K:
case BPF_JMP | BPF_JSGE | BPF_K: case BPF_JMP | BPF_JSGE | BPF_K:
case BPF_JMP | BPF_JSLE | BPF_K:
emit_a64_mov_i(1, tmp, imm, ctx); emit_a64_mov_i(1, tmp, imm, ctx);
emit(A64_CMP(1, dst, tmp), ctx); emit(A64_CMP(1, dst, tmp), ctx);
goto emit_cond_jmp; goto emit_cond_jmp;
......
...@@ -263,6 +263,7 @@ static inline bool is_nearbranch(int offset) ...@@ -263,6 +263,7 @@ static inline bool is_nearbranch(int offset)
#define COND_EQ (CR0_EQ | COND_CMP_TRUE) #define COND_EQ (CR0_EQ | COND_CMP_TRUE)
#define COND_NE (CR0_EQ | COND_CMP_FALSE) #define COND_NE (CR0_EQ | COND_CMP_FALSE)
#define COND_LT (CR0_LT | COND_CMP_TRUE) #define COND_LT (CR0_LT | COND_CMP_TRUE)
#define COND_LE (CR0_GT | COND_CMP_FALSE)
#endif #endif
......
...@@ -795,12 +795,24 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, ...@@ -795,12 +795,24 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
case BPF_JMP | BPF_JSGT | BPF_X: case BPF_JMP | BPF_JSGT | BPF_X:
true_cond = COND_GT; true_cond = COND_GT;
goto cond_branch; goto cond_branch;
case BPF_JMP | BPF_JLT | BPF_K:
case BPF_JMP | BPF_JLT | BPF_X:
case BPF_JMP | BPF_JSLT | BPF_K:
case BPF_JMP | BPF_JSLT | BPF_X:
true_cond = COND_LT;
goto cond_branch;
case BPF_JMP | BPF_JGE | BPF_K: case BPF_JMP | BPF_JGE | BPF_K:
case BPF_JMP | BPF_JGE | BPF_X: case BPF_JMP | BPF_JGE | BPF_X:
case BPF_JMP | BPF_JSGE | BPF_K: case BPF_JMP | BPF_JSGE | BPF_K:
case BPF_JMP | BPF_JSGE | BPF_X: case BPF_JMP | BPF_JSGE | BPF_X:
true_cond = COND_GE; true_cond = COND_GE;
goto cond_branch; goto cond_branch;
case BPF_JMP | BPF_JLE | BPF_K:
case BPF_JMP | BPF_JLE | BPF_X:
case BPF_JMP | BPF_JSLE | BPF_K:
case BPF_JMP | BPF_JSLE | BPF_X:
true_cond = COND_LE;
goto cond_branch;
case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_JMP | BPF_JEQ | BPF_X: case BPF_JMP | BPF_JEQ | BPF_X:
true_cond = COND_EQ; true_cond = COND_EQ;
...@@ -817,14 +829,18 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, ...@@ -817,14 +829,18 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
cond_branch: cond_branch:
switch (code) { switch (code) {
case BPF_JMP | BPF_JGT | BPF_X: case BPF_JMP | BPF_JGT | BPF_X:
case BPF_JMP | BPF_JLT | BPF_X:
case BPF_JMP | BPF_JGE | BPF_X: case BPF_JMP | BPF_JGE | BPF_X:
case BPF_JMP | BPF_JLE | BPF_X:
case BPF_JMP | BPF_JEQ | BPF_X: case BPF_JMP | BPF_JEQ | BPF_X:
case BPF_JMP | BPF_JNE | BPF_X: case BPF_JMP | BPF_JNE | BPF_X:
/* unsigned comparison */ /* unsigned comparison */
PPC_CMPLD(dst_reg, src_reg); PPC_CMPLD(dst_reg, src_reg);
break; break;
case BPF_JMP | BPF_JSGT | BPF_X: case BPF_JMP | BPF_JSGT | BPF_X:
case BPF_JMP | BPF_JSLT | BPF_X:
case BPF_JMP | BPF_JSGE | BPF_X: case BPF_JMP | BPF_JSGE | BPF_X:
case BPF_JMP | BPF_JSLE | BPF_X:
/* signed comparison */ /* signed comparison */
PPC_CMPD(dst_reg, src_reg); PPC_CMPD(dst_reg, src_reg);
break; break;
...@@ -834,7 +850,9 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, ...@@ -834,7 +850,9 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
case BPF_JMP | BPF_JNE | BPF_K: case BPF_JMP | BPF_JNE | BPF_K:
case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_JMP | BPF_JGT | BPF_K: case BPF_JMP | BPF_JGT | BPF_K:
case BPF_JMP | BPF_JLT | BPF_K:
case BPF_JMP | BPF_JGE | BPF_K: case BPF_JMP | BPF_JGE | BPF_K:
case BPF_JMP | BPF_JLE | BPF_K:
/* /*
* Need sign-extended load, so only positive * Need sign-extended load, so only positive
* values can be used as imm in cmpldi * values can be used as imm in cmpldi
...@@ -849,7 +867,9 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, ...@@ -849,7 +867,9 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
} }
break; break;
case BPF_JMP | BPF_JSGT | BPF_K: case BPF_JMP | BPF_JSGT | BPF_K:
case BPF_JMP | BPF_JSLT | BPF_K:
case BPF_JMP | BPF_JSGE | BPF_K: case BPF_JMP | BPF_JSGE | BPF_K:
case BPF_JMP | BPF_JSLE | BPF_K:
/* /*
* signed comparison, so any 16-bit value * signed comparison, so any 16-bit value
* can be used in cmpdi * can be used in cmpdi
......
...@@ -1093,15 +1093,27 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i ...@@ -1093,15 +1093,27 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
case BPF_JMP | BPF_JSGT | BPF_K: /* ((s64) dst > (s64) imm) */ case BPF_JMP | BPF_JSGT | BPF_K: /* ((s64) dst > (s64) imm) */
mask = 0x2000; /* jh */ mask = 0x2000; /* jh */
goto branch_ks; goto branch_ks;
case BPF_JMP | BPF_JSLT | BPF_K: /* ((s64) dst < (s64) imm) */
mask = 0x4000; /* jl */
goto branch_ks;
case BPF_JMP | BPF_JSGE | BPF_K: /* ((s64) dst >= (s64) imm) */ case BPF_JMP | BPF_JSGE | BPF_K: /* ((s64) dst >= (s64) imm) */
mask = 0xa000; /* jhe */ mask = 0xa000; /* jhe */
goto branch_ks; goto branch_ks;
case BPF_JMP | BPF_JSLE | BPF_K: /* ((s64) dst <= (s64) imm) */
mask = 0xc000; /* jle */
goto branch_ks;
case BPF_JMP | BPF_JGT | BPF_K: /* (dst_reg > imm) */ case BPF_JMP | BPF_JGT | BPF_K: /* (dst_reg > imm) */
mask = 0x2000; /* jh */ mask = 0x2000; /* jh */
goto branch_ku; goto branch_ku;
case BPF_JMP | BPF_JLT | BPF_K: /* (dst_reg < imm) */
mask = 0x4000; /* jl */
goto branch_ku;
case BPF_JMP | BPF_JGE | BPF_K: /* (dst_reg >= imm) */ case BPF_JMP | BPF_JGE | BPF_K: /* (dst_reg >= imm) */
mask = 0xa000; /* jhe */ mask = 0xa000; /* jhe */
goto branch_ku; goto branch_ku;
case BPF_JMP | BPF_JLE | BPF_K: /* (dst_reg <= imm) */
mask = 0xc000; /* jle */
goto branch_ku;
case BPF_JMP | BPF_JNE | BPF_K: /* (dst_reg != imm) */ case BPF_JMP | BPF_JNE | BPF_K: /* (dst_reg != imm) */
mask = 0x7000; /* jne */ mask = 0x7000; /* jne */
goto branch_ku; goto branch_ku;
...@@ -1119,15 +1131,27 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i ...@@ -1119,15 +1131,27 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
case BPF_JMP | BPF_JSGT | BPF_X: /* ((s64) dst > (s64) src) */ case BPF_JMP | BPF_JSGT | BPF_X: /* ((s64) dst > (s64) src) */
mask = 0x2000; /* jh */ mask = 0x2000; /* jh */
goto branch_xs; goto branch_xs;
case BPF_JMP | BPF_JSLT | BPF_X: /* ((s64) dst < (s64) src) */
mask = 0x4000; /* jl */
goto branch_xs;
case BPF_JMP | BPF_JSGE | BPF_X: /* ((s64) dst >= (s64) src) */ case BPF_JMP | BPF_JSGE | BPF_X: /* ((s64) dst >= (s64) src) */
mask = 0xa000; /* jhe */ mask = 0xa000; /* jhe */
goto branch_xs; goto branch_xs;
case BPF_JMP | BPF_JSLE | BPF_X: /* ((s64) dst <= (s64) src) */
mask = 0xc000; /* jle */
goto branch_xs;
case BPF_JMP | BPF_JGT | BPF_X: /* (dst > src) */ case BPF_JMP | BPF_JGT | BPF_X: /* (dst > src) */
mask = 0x2000; /* jh */ mask = 0x2000; /* jh */
goto branch_xu; goto branch_xu;
case BPF_JMP | BPF_JLT | BPF_X: /* (dst < src) */
mask = 0x4000; /* jl */
goto branch_xu;
case BPF_JMP | BPF_JGE | BPF_X: /* (dst >= src) */ case BPF_JMP | BPF_JGE | BPF_X: /* (dst >= src) */
mask = 0xa000; /* jhe */ mask = 0xa000; /* jhe */
goto branch_xu; goto branch_xu;
case BPF_JMP | BPF_JLE | BPF_X: /* (dst <= src) */
mask = 0xc000; /* jle */
goto branch_xu;
case BPF_JMP | BPF_JNE | BPF_X: /* (dst != src) */ case BPF_JMP | BPF_JNE | BPF_X: /* (dst != src) */
mask = 0x7000; /* jne */ mask = 0x7000; /* jne */
goto branch_xu; goto branch_xu;
......
...@@ -128,6 +128,8 @@ static u32 WDISP10(u32 off) ...@@ -128,6 +128,8 @@ static u32 WDISP10(u32 off)
#define BA (BRANCH | CONDA) #define BA (BRANCH | CONDA)
#define BG (BRANCH | CONDG) #define BG (BRANCH | CONDG)
#define BL (BRANCH | CONDL)
#define BLE (BRANCH | CONDLE)
#define BGU (BRANCH | CONDGU) #define BGU (BRANCH | CONDGU)
#define BLEU (BRANCH | CONDLEU) #define BLEU (BRANCH | CONDLEU)
#define BGE (BRANCH | CONDGE) #define BGE (BRANCH | CONDGE)
...@@ -715,9 +717,15 @@ static int emit_compare_and_branch(const u8 code, const u8 dst, u8 src, ...@@ -715,9 +717,15 @@ static int emit_compare_and_branch(const u8 code, const u8 dst, u8 src,
case BPF_JGT: case BPF_JGT:
br_opcode = BGU; br_opcode = BGU;
break; break;
case BPF_JLT:
br_opcode = BLU;
break;
case BPF_JGE: case BPF_JGE:
br_opcode = BGEU; br_opcode = BGEU;
break; break;
case BPF_JLE:
br_opcode = BLEU;
break;
case BPF_JSET: case BPF_JSET:
case BPF_JNE: case BPF_JNE:
br_opcode = BNE; br_opcode = BNE;
...@@ -725,9 +733,15 @@ static int emit_compare_and_branch(const u8 code, const u8 dst, u8 src, ...@@ -725,9 +733,15 @@ static int emit_compare_and_branch(const u8 code, const u8 dst, u8 src,
case BPF_JSGT: case BPF_JSGT:
br_opcode = BG; br_opcode = BG;
break; break;
case BPF_JSLT:
br_opcode = BL;
break;
case BPF_JSGE: case BPF_JSGE:
br_opcode = BGE; br_opcode = BGE;
break; break;
case BPF_JSLE:
br_opcode = BLE;
break;
default: default:
/* Make sure we dont leak kernel information to the /* Make sure we dont leak kernel information to the
* user. * user.
...@@ -746,18 +760,30 @@ static int emit_compare_and_branch(const u8 code, const u8 dst, u8 src, ...@@ -746,18 +760,30 @@ static int emit_compare_and_branch(const u8 code, const u8 dst, u8 src,
case BPF_JGT: case BPF_JGT:
cbcond_opcode = CBCONDGU; cbcond_opcode = CBCONDGU;
break; break;
case BPF_JLT:
cbcond_opcode = CBCONDLU;
break;
case BPF_JGE: case BPF_JGE:
cbcond_opcode = CBCONDGEU; cbcond_opcode = CBCONDGEU;
break; break;
case BPF_JLE:
cbcond_opcode = CBCONDLEU;
break;
case BPF_JNE: case BPF_JNE:
cbcond_opcode = CBCONDNE; cbcond_opcode = CBCONDNE;
break; break;
case BPF_JSGT: case BPF_JSGT:
cbcond_opcode = CBCONDG; cbcond_opcode = CBCONDG;
break; break;
case BPF_JSLT:
cbcond_opcode = CBCONDL;
break;
case BPF_JSGE: case BPF_JSGE:
cbcond_opcode = CBCONDGE; cbcond_opcode = CBCONDGE;
break; break;
case BPF_JSLE:
cbcond_opcode = CBCONDLE;
break;
default: default:
/* Make sure we dont leak kernel information to the /* Make sure we dont leak kernel information to the
* user. * user.
...@@ -1176,10 +1202,14 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) ...@@ -1176,10 +1202,14 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
/* IF (dst COND src) JUMP off */ /* IF (dst COND src) JUMP off */
case BPF_JMP | BPF_JEQ | BPF_X: case BPF_JMP | BPF_JEQ | BPF_X:
case BPF_JMP | BPF_JGT | BPF_X: case BPF_JMP | BPF_JGT | BPF_X:
case BPF_JMP | BPF_JLT | BPF_X:
case BPF_JMP | BPF_JGE | BPF_X: case BPF_JMP | BPF_JGE | BPF_X:
case BPF_JMP | BPF_JLE | BPF_X:
case BPF_JMP | BPF_JNE | BPF_X: case BPF_JMP | BPF_JNE | BPF_X:
case BPF_JMP | BPF_JSGT | BPF_X: case BPF_JMP | BPF_JSGT | BPF_X:
case BPF_JMP | BPF_JSLT | BPF_X:
case BPF_JMP | BPF_JSGE | BPF_X: case BPF_JMP | BPF_JSGE | BPF_X:
case BPF_JMP | BPF_JSLE | BPF_X:
case BPF_JMP | BPF_JSET | BPF_X: { case BPF_JMP | BPF_JSET | BPF_X: {
int err; int err;
...@@ -1191,10 +1221,14 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) ...@@ -1191,10 +1221,14 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
/* IF (dst COND imm) JUMP off */ /* IF (dst COND imm) JUMP off */
case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_JMP | BPF_JGT | BPF_K: case BPF_JMP | BPF_JGT | BPF_K:
case BPF_JMP | BPF_JLT | BPF_K:
case BPF_JMP | BPF_JGE | BPF_K: case BPF_JMP | BPF_JGE | BPF_K:
case BPF_JMP | BPF_JLE | BPF_K:
case BPF_JMP | BPF_JNE | BPF_K: case BPF_JMP | BPF_JNE | BPF_K:
case BPF_JMP | BPF_JSGT | BPF_K: case BPF_JMP | BPF_JSGT | BPF_K:
case BPF_JMP | BPF_JSLT | BPF_K:
case BPF_JMP | BPF_JSGE | BPF_K: case BPF_JMP | BPF_JSGE | BPF_K:
case BPF_JMP | BPF_JSLE | BPF_K:
case BPF_JMP | BPF_JSET | BPF_K: { case BPF_JMP | BPF_JSET | BPF_K: {
int err; int err;
......
...@@ -94,7 +94,9 @@ static int bpf_size_to_x86_bytes(int bpf_size) ...@@ -94,7 +94,9 @@ static int bpf_size_to_x86_bytes(int bpf_size)
#define X86_JNE 0x75 #define X86_JNE 0x75
#define X86_JBE 0x76 #define X86_JBE 0x76
#define X86_JA 0x77 #define X86_JA 0x77
#define X86_JL 0x7C
#define X86_JGE 0x7D #define X86_JGE 0x7D
#define X86_JLE 0x7E
#define X86_JG 0x7F #define X86_JG 0x7F
static void bpf_flush_icache(void *start, void *end) static void bpf_flush_icache(void *start, void *end)
...@@ -888,9 +890,13 @@ xadd: if (is_imm8(insn->off)) ...@@ -888,9 +890,13 @@ xadd: if (is_imm8(insn->off))
case BPF_JMP | BPF_JEQ | BPF_X: case BPF_JMP | BPF_JEQ | BPF_X:
case BPF_JMP | BPF_JNE | BPF_X: case BPF_JMP | BPF_JNE | BPF_X:
case BPF_JMP | BPF_JGT | BPF_X: case BPF_JMP | BPF_JGT | BPF_X:
case BPF_JMP | BPF_JLT | BPF_X:
case BPF_JMP | BPF_JGE | BPF_X: case BPF_JMP | BPF_JGE | BPF_X:
case BPF_JMP | BPF_JLE | BPF_X:
case BPF_JMP | BPF_JSGT | BPF_X: case BPF_JMP | BPF_JSGT | BPF_X:
case BPF_JMP | BPF_JSLT | BPF_X:
case BPF_JMP | BPF_JSGE | BPF_X: case BPF_JMP | BPF_JSGE | BPF_X:
case BPF_JMP | BPF_JSLE | BPF_X:
/* cmp dst_reg, src_reg */ /* cmp dst_reg, src_reg */
EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x39, EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x39,
add_2reg(0xC0, dst_reg, src_reg)); add_2reg(0xC0, dst_reg, src_reg));
...@@ -911,9 +917,13 @@ xadd: if (is_imm8(insn->off)) ...@@ -911,9 +917,13 @@ xadd: if (is_imm8(insn->off))
case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_JMP | BPF_JNE | BPF_K: case BPF_JMP | BPF_JNE | BPF_K:
case BPF_JMP | BPF_JGT | BPF_K: case BPF_JMP | BPF_JGT | BPF_K:
case BPF_JMP | BPF_JLT | BPF_K:
case BPF_JMP | BPF_JGE | BPF_K: case BPF_JMP | BPF_JGE | BPF_K:
case BPF_JMP | BPF_JLE | BPF_K:
case BPF_JMP | BPF_JSGT | BPF_K: case BPF_JMP | BPF_JSGT | BPF_K:
case BPF_JMP | BPF_JSLT | BPF_K:
case BPF_JMP | BPF_JSGE | BPF_K: case BPF_JMP | BPF_JSGE | BPF_K:
case BPF_JMP | BPF_JSLE | BPF_K:
/* cmp dst_reg, imm8/32 */ /* cmp dst_reg, imm8/32 */
EMIT1(add_1mod(0x48, dst_reg)); EMIT1(add_1mod(0x48, dst_reg));
...@@ -935,18 +945,34 @@ xadd: if (is_imm8(insn->off)) ...@@ -935,18 +945,34 @@ xadd: if (is_imm8(insn->off))
/* GT is unsigned '>', JA in x86 */ /* GT is unsigned '>', JA in x86 */
jmp_cond = X86_JA; jmp_cond = X86_JA;
break; break;
case BPF_JLT:
/* LT is unsigned '<', JB in x86 */
jmp_cond = X86_JB;
break;
case BPF_JGE: case BPF_JGE:
/* GE is unsigned '>=', JAE in x86 */ /* GE is unsigned '>=', JAE in x86 */
jmp_cond = X86_JAE; jmp_cond = X86_JAE;
break; break;
case BPF_JLE:
/* LE is unsigned '<=', JBE in x86 */
jmp_cond = X86_JBE;
break;
case BPF_JSGT: case BPF_JSGT:
/* signed '>', GT in x86 */ /* signed '>', GT in x86 */
jmp_cond = X86_JG; jmp_cond = X86_JG;
break; break;
case BPF_JSLT:
/* signed '<', LT in x86 */
jmp_cond = X86_JL;
break;
case BPF_JSGE: case BPF_JSGE:
/* signed '>=', GE in x86 */ /* signed '>=', GE in x86 */
jmp_cond = X86_JGE; jmp_cond = X86_JGE;
break; break;
case BPF_JSLE:
/* signed '<=', LE in x86 */
jmp_cond = X86_JLE;
break;
default: /* to silence gcc warning */ default: /* to silence gcc warning */
return -EFAULT; return -EFAULT;
} }
......
...@@ -1238,6 +1238,16 @@ static int jge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) ...@@ -1238,6 +1238,16 @@ static int jge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true); return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true);
} }
static int jlt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return wrp_cmp_imm(nfp_prog, meta, BR_BHS, false);
}
static int jle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return wrp_cmp_imm(nfp_prog, meta, BR_BLO, true);
}
static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{ {
const struct bpf_insn *insn = &meta->insn; const struct bpf_insn *insn = &meta->insn;
...@@ -1325,6 +1335,16 @@ static int jge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) ...@@ -1325,6 +1335,16 @@ static int jge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true); return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true);
} }
static int jlt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return wrp_cmp_reg(nfp_prog, meta, BR_BHS, false);
}
static int jle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return wrp_cmp_reg(nfp_prog, meta, BR_BLO, true);
}
static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{ {
return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE); return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE);
...@@ -1383,11 +1403,15 @@ static const instr_cb_t instr_cb[256] = { ...@@ -1383,11 +1403,15 @@ static const instr_cb_t instr_cb[256] = {
[BPF_JMP | BPF_JEQ | BPF_K] = jeq_imm, [BPF_JMP | BPF_JEQ | BPF_K] = jeq_imm,
[BPF_JMP | BPF_JGT | BPF_K] = jgt_imm, [BPF_JMP | BPF_JGT | BPF_K] = jgt_imm,
[BPF_JMP | BPF_JGE | BPF_K] = jge_imm, [BPF_JMP | BPF_JGE | BPF_K] = jge_imm,
[BPF_JMP | BPF_JLT | BPF_K] = jlt_imm,
[BPF_JMP | BPF_JLE | BPF_K] = jle_imm,
[BPF_JMP | BPF_JSET | BPF_K] = jset_imm, [BPF_JMP | BPF_JSET | BPF_K] = jset_imm,
[BPF_JMP | BPF_JNE | BPF_K] = jne_imm, [BPF_JMP | BPF_JNE | BPF_K] = jne_imm,
[BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg, [BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg,
[BPF_JMP | BPF_JGT | BPF_X] = jgt_reg, [BPF_JMP | BPF_JGT | BPF_X] = jgt_reg,
[BPF_JMP | BPF_JGE | BPF_X] = jge_reg, [BPF_JMP | BPF_JGE | BPF_X] = jge_reg,
[BPF_JMP | BPF_JLT | BPF_X] = jlt_reg,
[BPF_JMP | BPF_JLE | BPF_X] = jle_reg,
[BPF_JMP | BPF_JSET | BPF_X] = jset_reg, [BPF_JMP | BPF_JSET | BPF_X] = jset_reg,
[BPF_JMP | BPF_JNE | BPF_X] = jne_reg, [BPF_JMP | BPF_JNE | BPF_X] = jne_reg,
[BPF_JMP | BPF_EXIT] = goto_out, [BPF_JMP | BPF_EXIT] = goto_out,
......
...@@ -30,9 +30,14 @@ ...@@ -30,9 +30,14 @@
#define BPF_FROM_LE BPF_TO_LE #define BPF_FROM_LE BPF_TO_LE
#define BPF_FROM_BE BPF_TO_BE #define BPF_FROM_BE BPF_TO_BE
/* jmp encodings */
#define BPF_JNE 0x50 /* jump != */ #define BPF_JNE 0x50 /* jump != */
#define BPF_JLT 0xa0 /* LT is unsigned, '<' */
#define BPF_JLE 0xb0 /* LE is unsigned, '<=' */
#define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */ #define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */
#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */ #define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */
#define BPF_JSLT 0xc0 /* SLT is signed, '<' */
#define BPF_JSLE 0xd0 /* SLE is signed, '<=' */
#define BPF_CALL 0x80 /* function call */ #define BPF_CALL 0x80 /* function call */
#define BPF_EXIT 0x90 /* function return */ #define BPF_EXIT 0x90 /* function return */
......
...@@ -595,9 +595,13 @@ static int bpf_jit_blind_insn(const struct bpf_insn *from, ...@@ -595,9 +595,13 @@ static int bpf_jit_blind_insn(const struct bpf_insn *from,
case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_JMP | BPF_JNE | BPF_K: case BPF_JMP | BPF_JNE | BPF_K:
case BPF_JMP | BPF_JGT | BPF_K: case BPF_JMP | BPF_JGT | BPF_K:
case BPF_JMP | BPF_JLT | BPF_K:
case BPF_JMP | BPF_JGE | BPF_K: case BPF_JMP | BPF_JGE | BPF_K:
case BPF_JMP | BPF_JLE | BPF_K:
case BPF_JMP | BPF_JSGT | BPF_K: case BPF_JMP | BPF_JSGT | BPF_K:
case BPF_JMP | BPF_JSLT | BPF_K:
case BPF_JMP | BPF_JSGE | BPF_K: case BPF_JMP | BPF_JSGE | BPF_K:
case BPF_JMP | BPF_JSLE | BPF_K:
case BPF_JMP | BPF_JSET | BPF_K: case BPF_JMP | BPF_JSET | BPF_K:
/* Accommodate for extra offset in case of a backjump. */ /* Accommodate for extra offset in case of a backjump. */
off = from->off; off = from->off;
...@@ -833,12 +837,20 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, ...@@ -833,12 +837,20 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
[BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K, [BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
[BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X, [BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
[BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K, [BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
[BPF_JMP | BPF_JLT | BPF_X] = &&JMP_JLT_X,
[BPF_JMP | BPF_JLT | BPF_K] = &&JMP_JLT_K,
[BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X, [BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
[BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K, [BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
[BPF_JMP | BPF_JLE | BPF_X] = &&JMP_JLE_X,
[BPF_JMP | BPF_JLE | BPF_K] = &&JMP_JLE_K,
[BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X, [BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
[BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K, [BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
[BPF_JMP | BPF_JSLT | BPF_X] = &&JMP_JSLT_X,
[BPF_JMP | BPF_JSLT | BPF_K] = &&JMP_JSLT_K,
[BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X, [BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
[BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K, [BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
[BPF_JMP | BPF_JSLE | BPF_X] = &&JMP_JSLE_X,
[BPF_JMP | BPF_JSLE | BPF_K] = &&JMP_JSLE_K,
[BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X, [BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
[BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K, [BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
/* Program return */ /* Program return */
...@@ -1073,6 +1085,18 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, ...@@ -1073,6 +1085,18 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
CONT_JMP; CONT_JMP;
} }
CONT; CONT;
JMP_JLT_X:
if (DST < SRC) {
insn += insn->off;
CONT_JMP;
}
CONT;
JMP_JLT_K:
if (DST < IMM) {
insn += insn->off;
CONT_JMP;
}
CONT;
JMP_JGE_X: JMP_JGE_X:
if (DST >= SRC) { if (DST >= SRC) {
insn += insn->off; insn += insn->off;
...@@ -1085,6 +1109,18 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, ...@@ -1085,6 +1109,18 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
CONT_JMP; CONT_JMP;
} }
CONT; CONT;
JMP_JLE_X:
if (DST <= SRC) {
insn += insn->off;
CONT_JMP;
}
CONT;
JMP_JLE_K:
if (DST <= IMM) {
insn += insn->off;
CONT_JMP;
}
CONT;
JMP_JSGT_X: JMP_JSGT_X:
if (((s64) DST) > ((s64) SRC)) { if (((s64) DST) > ((s64) SRC)) {
insn += insn->off; insn += insn->off;
...@@ -1097,6 +1133,18 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, ...@@ -1097,6 +1133,18 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
CONT_JMP; CONT_JMP;
} }
CONT; CONT;
JMP_JSLT_X:
if (((s64) DST) < ((s64) SRC)) {
insn += insn->off;
CONT_JMP;
}
CONT;
JMP_JSLT_K:
if (((s64) DST) < ((s64) IMM)) {
insn += insn->off;
CONT_JMP;
}
CONT;
JMP_JSGE_X: JMP_JSGE_X:
if (((s64) DST) >= ((s64) SRC)) { if (((s64) DST) >= ((s64) SRC)) {
insn += insn->off; insn += insn->off;
...@@ -1109,6 +1157,18 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, ...@@ -1109,6 +1157,18 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
CONT_JMP; CONT_JMP;
} }
CONT; CONT;
JMP_JSLE_X:
if (((s64) DST) <= ((s64) SRC)) {
insn += insn->off;
CONT_JMP;
}
CONT;
JMP_JSLE_K:
if (((s64) DST) <= ((s64) IMM)) {
insn += insn->off;
CONT_JMP;
}
CONT;
JMP_JSET_X: JMP_JSET_X:
if (DST & SRC) { if (DST & SRC) {
insn += insn->off; insn += insn->off;
......
...@@ -312,11 +312,15 @@ static const char *const bpf_jmp_string[16] = { ...@@ -312,11 +312,15 @@ static const char *const bpf_jmp_string[16] = {
[BPF_JA >> 4] = "jmp", [BPF_JA >> 4] = "jmp",
[BPF_JEQ >> 4] = "==", [BPF_JEQ >> 4] = "==",
[BPF_JGT >> 4] = ">", [BPF_JGT >> 4] = ">",
[BPF_JLT >> 4] = "<",
[BPF_JGE >> 4] = ">=", [BPF_JGE >> 4] = ">=",
[BPF_JLE >> 4] = "<=",
[BPF_JSET >> 4] = "&", [BPF_JSET >> 4] = "&",
[BPF_JNE >> 4] = "!=", [BPF_JNE >> 4] = "!=",
[BPF_JSGT >> 4] = "s>", [BPF_JSGT >> 4] = "s>",
[BPF_JSLT >> 4] = "s<",
[BPF_JSGE >> 4] = "s>=", [BPF_JSGE >> 4] = "s>=",
[BPF_JSLE >> 4] = "s<=",
[BPF_CALL >> 4] = "call", [BPF_CALL >> 4] = "call",
[BPF_EXIT >> 4] = "exit", [BPF_EXIT >> 4] = "exit",
}; };
...@@ -2383,27 +2387,37 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state, ...@@ -2383,27 +2387,37 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
*/ */
return; return;
/* LLVM can generate two kind of checks: /* LLVM can generate four kind of checks:
* *
* Type 1: * Type 1/2:
* *
* r2 = r3; * r2 = r3;
* r2 += 8; * r2 += 8;
* if (r2 > pkt_end) goto <handle exception> * if (r2 > pkt_end) goto <handle exception>
* <access okay> * <access okay>
* *
* r2 = r3;
* r2 += 8;
* if (r2 < pkt_end) goto <access okay>
* <handle exception>
*
* Where: * Where:
* r2 == dst_reg, pkt_end == src_reg * r2 == dst_reg, pkt_end == src_reg
* r2=pkt(id=n,off=8,r=0) * r2=pkt(id=n,off=8,r=0)
* r3=pkt(id=n,off=0,r=0) * r3=pkt(id=n,off=0,r=0)
* *
* Type 2: * Type 3/4:
* *
* r2 = r3; * r2 = r3;
* r2 += 8; * r2 += 8;
* if (pkt_end >= r2) goto <access okay> * if (pkt_end >= r2) goto <access okay>
* <handle exception> * <handle exception>
* *
* r2 = r3;
* r2 += 8;
* if (pkt_end <= r2) goto <handle exception>
* <access okay>
*
* Where: * Where:
* pkt_end == dst_reg, r2 == src_reg * pkt_end == dst_reg, r2 == src_reg
* r2=pkt(id=n,off=8,r=0) * r2=pkt(id=n,off=8,r=0)
...@@ -2471,6 +2485,14 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, ...@@ -2471,6 +2485,14 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
false_reg->smax_value = min_t(s64, false_reg->smax_value, val); false_reg->smax_value = min_t(s64, false_reg->smax_value, val);
true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1); true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1);
break; break;
case BPF_JLT:
false_reg->umin_value = max(false_reg->umin_value, val);
true_reg->umax_value = min(true_reg->umax_value, val - 1);
break;
case BPF_JSLT:
false_reg->smin_value = max_t(s64, false_reg->smin_value, val);
true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1);
break;
case BPF_JGE: case BPF_JGE:
false_reg->umax_value = min(false_reg->umax_value, val - 1); false_reg->umax_value = min(false_reg->umax_value, val - 1);
true_reg->umin_value = max(true_reg->umin_value, val); true_reg->umin_value = max(true_reg->umin_value, val);
...@@ -2479,6 +2501,14 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, ...@@ -2479,6 +2501,14 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1); false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1);
true_reg->smin_value = max_t(s64, true_reg->smin_value, val); true_reg->smin_value = max_t(s64, true_reg->smin_value, val);
break; break;
case BPF_JLE:
false_reg->umin_value = max(false_reg->umin_value, val + 1);
true_reg->umax_value = min(true_reg->umax_value, val);
break;
case BPF_JSLE:
false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1);
true_reg->smax_value = min_t(s64, true_reg->smax_value, val);
break;
default: default:
break; break;
} }
...@@ -2527,6 +2557,14 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, ...@@ -2527,6 +2557,14 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1); true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1);
false_reg->smin_value = max_t(s64, false_reg->smin_value, val); false_reg->smin_value = max_t(s64, false_reg->smin_value, val);
break; break;
case BPF_JLT:
true_reg->umin_value = max(true_reg->umin_value, val + 1);
false_reg->umax_value = min(false_reg->umax_value, val);
break;
case BPF_JSLT:
true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1);
false_reg->smax_value = min_t(s64, false_reg->smax_value, val);
break;
case BPF_JGE: case BPF_JGE:
true_reg->umax_value = min(true_reg->umax_value, val); true_reg->umax_value = min(true_reg->umax_value, val);
false_reg->umin_value = max(false_reg->umin_value, val + 1); false_reg->umin_value = max(false_reg->umin_value, val + 1);
...@@ -2535,6 +2573,14 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, ...@@ -2535,6 +2573,14 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
true_reg->smax_value = min_t(s64, true_reg->smax_value, val); true_reg->smax_value = min_t(s64, true_reg->smax_value, val);
false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1); false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1);
break; break;
case BPF_JLE:
true_reg->umin_value = max(true_reg->umin_value, val);
false_reg->umax_value = min(false_reg->umax_value, val - 1);
break;
case BPF_JSLE:
true_reg->smin_value = max_t(s64, true_reg->smin_value, val);
false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1);
break;
default: default:
break; break;
} }
...@@ -2659,7 +2705,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, ...@@ -2659,7 +2705,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
u8 opcode = BPF_OP(insn->code); u8 opcode = BPF_OP(insn->code);
int err; int err;
if (opcode > BPF_EXIT) { if (opcode > BPF_JSLE) {
verbose("invalid BPF_JMP opcode %x\n", opcode); verbose("invalid BPF_JMP opcode %x\n", opcode);
return -EINVAL; return -EINVAL;
} }
...@@ -2761,10 +2807,18 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, ...@@ -2761,10 +2807,18 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
dst_reg->type == PTR_TO_PACKET && dst_reg->type == PTR_TO_PACKET &&
regs[insn->src_reg].type == PTR_TO_PACKET_END) { regs[insn->src_reg].type == PTR_TO_PACKET_END) {
find_good_pkt_pointers(this_branch, dst_reg); find_good_pkt_pointers(this_branch, dst_reg);
} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLT &&
dst_reg->type == PTR_TO_PACKET &&
regs[insn->src_reg].type == PTR_TO_PACKET_END) {
find_good_pkt_pointers(other_branch, dst_reg);
} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE && } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE &&
dst_reg->type == PTR_TO_PACKET_END && dst_reg->type == PTR_TO_PACKET_END &&
regs[insn->src_reg].type == PTR_TO_PACKET) { regs[insn->src_reg].type == PTR_TO_PACKET) {
find_good_pkt_pointers(other_branch, &regs[insn->src_reg]); find_good_pkt_pointers(other_branch, &regs[insn->src_reg]);
} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLE &&
dst_reg->type == PTR_TO_PACKET_END &&
regs[insn->src_reg].type == PTR_TO_PACKET) {
find_good_pkt_pointers(this_branch, &regs[insn->src_reg]);
} else if (is_pointer_value(env, insn->dst_reg)) { } else if (is_pointer_value(env, insn->dst_reg)) {
verbose("R%d pointer comparison prohibited\n", insn->dst_reg); verbose("R%d pointer comparison prohibited\n", insn->dst_reg);
return -EACCES; return -EACCES;
......
...@@ -951,6 +951,32 @@ static struct bpf_test tests[] = { ...@@ -951,6 +951,32 @@ static struct bpf_test tests[] = {
{ 4, 4, 4, 3, 3 }, { 4, 4, 4, 3, 3 },
{ { 2, 0 }, { 3, 1 }, { 4, MAX_K } }, { { 2, 0 }, { 3, 1 }, { 4, MAX_K } },
}, },
{
"JGE (jt 0), test 1",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_LEN, 0),
BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
BPF_JUMP(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 1),
BPF_STMT(BPF_RET | BPF_K, 1),
BPF_STMT(BPF_RET | BPF_K, MAX_K)
},
CLASSIC,
{ 4, 4, 4, 3, 3 },
{ { 2, 0 }, { 3, 1 }, { 4, 1 } },
},
{
"JGE (jt 0), test 2",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_LEN, 0),
BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
BPF_JUMP(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 1),
BPF_STMT(BPF_RET | BPF_K, 1),
BPF_STMT(BPF_RET | BPF_K, MAX_K)
},
CLASSIC,
{ 4, 4, 5, 3, 3 },
{ { 4, 1 }, { 5, 1 }, { 6, MAX_K } },
},
{ {
"JGE", "JGE",
.u.insns = { .u.insns = {
...@@ -4492,6 +4518,35 @@ static struct bpf_test tests[] = { ...@@ -4492,6 +4518,35 @@ static struct bpf_test tests[] = {
{ }, { },
{ { 0, 1 } }, { { 0, 1 } },
}, },
/* BPF_JMP | BPF_JSLT | BPF_K */
{
"JMP_JSLT_K: Signed jump: if (-2 < -1) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 0xfffffffffffffffeLL),
BPF_JMP_IMM(BPF_JSLT, R1, -1, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"JMP_JSLT_K: Signed jump: if (-1 < -1) return 0",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
BPF_JMP_IMM(BPF_JSLT, R1, -1, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
/* BPF_JMP | BPF_JSGT | BPF_K */ /* BPF_JMP | BPF_JSGT | BPF_K */
{ {
"JMP_JSGT_K: Signed jump: if (-1 > -2) return 1", "JMP_JSGT_K: Signed jump: if (-1 > -2) return 1",
...@@ -4521,6 +4576,73 @@ static struct bpf_test tests[] = { ...@@ -4521,6 +4576,73 @@ static struct bpf_test tests[] = {
{ }, { },
{ { 0, 1 } }, { { 0, 1 } },
}, },
/* BPF_JMP | BPF_JSLE | BPF_K */
{
"JMP_JSLE_K: Signed jump: if (-2 <= -1) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 0xfffffffffffffffeLL),
BPF_JMP_IMM(BPF_JSLE, R1, -1, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"JMP_JSLE_K: Signed jump: if (-1 <= -1) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
BPF_JMP_IMM(BPF_JSLE, R1, -1, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"JMP_JSLE_K: Signed jump: value walk 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 3),
BPF_JMP_IMM(BPF_JSLE, R1, 0, 6),
BPF_ALU64_IMM(BPF_SUB, R1, 1),
BPF_JMP_IMM(BPF_JSLE, R1, 0, 4),
BPF_ALU64_IMM(BPF_SUB, R1, 1),
BPF_JMP_IMM(BPF_JSLE, R1, 0, 2),
BPF_ALU64_IMM(BPF_SUB, R1, 1),
BPF_JMP_IMM(BPF_JSLE, R1, 0, 1),
BPF_EXIT_INSN(), /* bad exit */
BPF_ALU32_IMM(BPF_MOV, R0, 1), /* good exit */
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"JMP_JSLE_K: Signed jump: value walk 2",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 3),
BPF_JMP_IMM(BPF_JSLE, R1, 0, 4),
BPF_ALU64_IMM(BPF_SUB, R1, 2),
BPF_JMP_IMM(BPF_JSLE, R1, 0, 2),
BPF_ALU64_IMM(BPF_SUB, R1, 2),
BPF_JMP_IMM(BPF_JSLE, R1, 0, 1),
BPF_EXIT_INSN(), /* bad exit */
BPF_ALU32_IMM(BPF_MOV, R0, 1), /* good exit */
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
/* BPF_JMP | BPF_JSGE | BPF_K */ /* BPF_JMP | BPF_JSGE | BPF_K */
{ {
"JMP_JSGE_K: Signed jump: if (-1 >= -2) return 1", "JMP_JSGE_K: Signed jump: if (-1 >= -2) return 1",
...@@ -4617,6 +4739,35 @@ static struct bpf_test tests[] = { ...@@ -4617,6 +4739,35 @@ static struct bpf_test tests[] = {
{ }, { },
{ { 0, 1 } }, { { 0, 1 } },
}, },
/* BPF_JMP | BPF_JLT | BPF_K */
{
"JMP_JLT_K: if (2 < 3) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 2),
BPF_JMP_IMM(BPF_JLT, R1, 3, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"JMP_JGT_K: Unsigned jump: if (1 < -1) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 1),
BPF_JMP_IMM(BPF_JLT, R1, -1, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
/* BPF_JMP | BPF_JGE | BPF_K */ /* BPF_JMP | BPF_JGE | BPF_K */
{ {
"JMP_JGE_K: if (3 >= 2) return 1", "JMP_JGE_K: if (3 >= 2) return 1",
...@@ -4632,6 +4783,21 @@ static struct bpf_test tests[] = { ...@@ -4632,6 +4783,21 @@ static struct bpf_test tests[] = {
{ }, { },
{ { 0, 1 } }, { { 0, 1 } },
}, },
/* BPF_JMP | BPF_JLE | BPF_K */
{
"JMP_JLE_K: if (2 <= 3) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 2),
BPF_JMP_IMM(BPF_JLE, R1, 3, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
/* BPF_JMP | BPF_JGT | BPF_K jump backwards */ /* BPF_JMP | BPF_JGT | BPF_K jump backwards */
{ {
"JMP_JGT_K: if (3 > 2) return 1 (jump backwards)", "JMP_JGT_K: if (3 > 2) return 1 (jump backwards)",
...@@ -4662,6 +4828,36 @@ static struct bpf_test tests[] = { ...@@ -4662,6 +4828,36 @@ static struct bpf_test tests[] = {
{ }, { },
{ { 0, 1 } }, { { 0, 1 } },
}, },
/* BPF_JMP | BPF_JLT | BPF_K jump backwards */
{
"JMP_JGT_K: if (2 < 3) return 1 (jump backwards)",
.u.insns_int = {
BPF_JMP_IMM(BPF_JA, 0, 0, 2), /* goto start */
BPF_ALU32_IMM(BPF_MOV, R0, 1), /* out: */
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 0), /* start: */
BPF_LD_IMM64(R1, 2), /* note: this takes 2 insns */
BPF_JMP_IMM(BPF_JLT, R1, 3, -6), /* goto out */
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"JMP_JLE_K: if (3 <= 3) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 3),
BPF_JMP_IMM(BPF_JLE, R1, 3, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
/* BPF_JMP | BPF_JNE | BPF_K */ /* BPF_JMP | BPF_JNE | BPF_K */
{ {
"JMP_JNE_K: if (3 != 2) return 1", "JMP_JNE_K: if (3 != 2) return 1",
...@@ -4752,6 +4948,37 @@ static struct bpf_test tests[] = { ...@@ -4752,6 +4948,37 @@ static struct bpf_test tests[] = {
{ }, { },
{ { 0, 1 } }, { { 0, 1 } },
}, },
/* BPF_JMP | BPF_JSLT | BPF_X */
{
"JMP_JSLT_X: Signed jump: if (-2 < -1) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, -1),
BPF_LD_IMM64(R2, -2),
BPF_JMP_REG(BPF_JSLT, R2, R1, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"JMP_JSLT_X: Signed jump: if (-1 < -1) return 0",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_LD_IMM64(R1, -1),
BPF_LD_IMM64(R2, -1),
BPF_JMP_REG(BPF_JSLT, R1, R2, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
/* BPF_JMP | BPF_JSGE | BPF_X */ /* BPF_JMP | BPF_JSGE | BPF_X */
{ {
"JMP_JSGE_X: Signed jump: if (-1 >= -2) return 1", "JMP_JSGE_X: Signed jump: if (-1 >= -2) return 1",
...@@ -4783,6 +5010,37 @@ static struct bpf_test tests[] = { ...@@ -4783,6 +5010,37 @@ static struct bpf_test tests[] = {
{ }, { },
{ { 0, 1 } }, { { 0, 1 } },
}, },
/* BPF_JMP | BPF_JSLE | BPF_X */
{
"JMP_JSLE_X: Signed jump: if (-2 <= -1) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, -1),
BPF_LD_IMM64(R2, -2),
BPF_JMP_REG(BPF_JSLE, R2, R1, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"JMP_JSLE_X: Signed jump: if (-1 <= -1) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, -1),
BPF_LD_IMM64(R2, -1),
BPF_JMP_REG(BPF_JSLE, R1, R2, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
/* BPF_JMP | BPF_JGT | BPF_X */ /* BPF_JMP | BPF_JGT | BPF_X */
{ {
"JMP_JGT_X: if (3 > 2) return 1", "JMP_JGT_X: if (3 > 2) return 1",
...@@ -4814,6 +5072,37 @@ static struct bpf_test tests[] = { ...@@ -4814,6 +5072,37 @@ static struct bpf_test tests[] = {
{ }, { },
{ { 0, 1 } }, { { 0, 1 } },
}, },
/* BPF_JMP | BPF_JLT | BPF_X */
{
"JMP_JLT_X: if (2 < 3) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 3),
BPF_LD_IMM64(R2, 2),
BPF_JMP_REG(BPF_JLT, R2, R1, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"JMP_JLT_X: Unsigned jump: if (1 < -1) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, -1),
BPF_LD_IMM64(R2, 1),
BPF_JMP_REG(BPF_JLT, R2, R1, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
/* BPF_JMP | BPF_JGE | BPF_X */ /* BPF_JMP | BPF_JGE | BPF_X */
{ {
"JMP_JGE_X: if (3 >= 2) return 1", "JMP_JGE_X: if (3 >= 2) return 1",
...@@ -4845,6 +5134,37 @@ static struct bpf_test tests[] = { ...@@ -4845,6 +5134,37 @@ static struct bpf_test tests[] = {
{ }, { },
{ { 0, 1 } }, { { 0, 1 } },
}, },
/* BPF_JMP | BPF_JLE | BPF_X */
{
"JMP_JLE_X: if (2 <= 3) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 3),
BPF_LD_IMM64(R2, 2),
BPF_JMP_REG(BPF_JLE, R2, R1, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"JMP_JLE_X: if (3 <= 3) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 3),
BPF_LD_IMM64(R2, 3),
BPF_JMP_REG(BPF_JLE, R1, R2, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{ {
/* Mainly testing JIT + imm64 here. */ /* Mainly testing JIT + imm64 here. */
"JMP_JGE_X: ldimm64 test 1", "JMP_JGE_X: ldimm64 test 1",
...@@ -4890,6 +5210,50 @@ static struct bpf_test tests[] = { ...@@ -4890,6 +5210,50 @@ static struct bpf_test tests[] = {
{ }, { },
{ { 0, 1 } }, { { 0, 1 } },
}, },
{
"JMP_JLE_X: ldimm64 test 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 3),
BPF_LD_IMM64(R2, 2),
BPF_JMP_REG(BPF_JLE, R2, R1, 2),
BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeULL),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xeeeeeeeeU } },
},
{
"JMP_JLE_X: ldimm64 test 2",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 3),
BPF_LD_IMM64(R2, 2),
BPF_JMP_REG(BPF_JLE, R2, R1, 0),
BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xffffffffU } },
},
{
"JMP_JLE_X: ldimm64 test 3",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_LD_IMM64(R1, 3),
BPF_LD_IMM64(R2, 2),
BPF_JMP_REG(BPF_JLE, R2, R1, 4),
BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeULL),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
/* BPF_JMP | BPF_JNE | BPF_X */ /* BPF_JMP | BPF_JNE | BPF_X */
{ {
"JMP_JNE_X: if (3 != 2) return 1", "JMP_JNE_X: if (3 != 2) return 1",
......
...@@ -514,14 +514,27 @@ static int bpf_convert_filter(struct sock_filter *prog, int len, ...@@ -514,14 +514,27 @@ static int bpf_convert_filter(struct sock_filter *prog, int len,
break; break;
} }
/* Convert JEQ into JNE when 'jump_true' is next insn. */ /* Convert some jumps when 'jump_true' is next insn. */
if (fp->jt == 0 && BPF_OP(fp->code) == BPF_JEQ) { if (fp->jt == 0) {
insn->code = BPF_JMP | BPF_JNE | bpf_src; switch (BPF_OP(fp->code)) {
case BPF_JEQ:
insn->code = BPF_JMP | BPF_JNE | bpf_src;
break;
case BPF_JGT:
insn->code = BPF_JMP | BPF_JLE | bpf_src;
break;
case BPF_JGE:
insn->code = BPF_JMP | BPF_JLT | bpf_src;
break;
default:
goto jmp_rest;
}
target = i + fp->jf + 1; target = i + fp->jf + 1;
BPF_EMIT_JMP; BPF_EMIT_JMP;
break; break;
} }
jmp_rest:
/* Other jumps are mapped into two insns: Jxx and JA. */ /* Other jumps are mapped into two insns: Jxx and JA. */
target = i + fp->jt + 1; target = i + fp->jt + 1;
insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
......
...@@ -30,9 +30,14 @@ ...@@ -30,9 +30,14 @@
#define BPF_FROM_LE BPF_TO_LE #define BPF_FROM_LE BPF_TO_LE
#define BPF_FROM_BE BPF_TO_BE #define BPF_FROM_BE BPF_TO_BE
/* jmp encodings */
#define BPF_JNE 0x50 /* jump != */ #define BPF_JNE 0x50 /* jump != */
#define BPF_JLT 0xa0 /* LT is unsigned, '<' */
#define BPF_JLE 0xb0 /* LE is unsigned, '<=' */
#define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */ #define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */
#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */ #define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */
#define BPF_JSLT 0xc0 /* SLT is signed, '<' */
#define BPF_JSLE 0xd0 /* SLE is signed, '<=' */
#define BPF_CALL 0x80 /* function call */ #define BPF_CALL 0x80 /* function call */
#define BPF_EXIT 0x90 /* function return */ #define BPF_EXIT 0x90 /* function return */
......
...@@ -2830,6 +2830,79 @@ static struct bpf_test tests[] = { ...@@ -2830,6 +2830,79 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS, .prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT, .result = ACCEPT,
}, },
{
"direct packet access: test25 (marking on <, good access)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, -4),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test26 (marking on <, bad access)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JA, 0, 0, -3),
},
.result = REJECT,
.errstr = "invalid access to packet",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test27 (marking on <=, good access)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test28 (marking on <=, bad access)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, -4),
},
.result = REJECT,
.errstr = "invalid access to packet",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{ {
"helper access to packet: test1, valid packet_ptr range", "helper access to packet: test1, valid packet_ptr range",
.insns = { .insns = {
...@@ -4488,6 +4561,246 @@ static struct bpf_test tests[] = { ...@@ -4488,6 +4561,246 @@ static struct bpf_test tests[] = {
.result = REJECT, .result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT, .prog_type = BPF_PROG_TYPE_TRACEPOINT,
}, },
{
"helper access to map: bounds check using <, good access",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to map: bounds check using <, bad access",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.result = REJECT,
.errstr = "R1 unbounded memory access",
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to map: bounds check using <=, good access",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to map: bounds check using <=, bad access",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.result = REJECT,
.errstr = "R1 unbounded memory access",
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to map: bounds check using s<, good access",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to map: bounds check using s<, good access 2",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to map: bounds check using s<, bad access",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.result = REJECT,
.errstr = "R1 min value is negative",
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to map: bounds check using s<=, good access",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to map: bounds check using s<=, good access 2",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to map: bounds check using s<=, bad access",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.result = REJECT,
.errstr = "R1 min value is negative",
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{ {
"map element value is preserved across register spilling", "map element value is preserved across register spilling",
.insns = { .insns = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment