Commit ef9fde06 authored by Daniel Borkmann's avatar Daniel Borkmann

Merge branch 'bpf-to-bpf-function-calls'

Alexei Starovoitov says:

====================
First of all huge thank you to Daniel, John, Jakub, Edward and others who
reviewed multiple iterations of this patch set over the last many months
and to Dave and others who gave critical feedback during netconf/netdev.

The patch is solid enough and we thought through numerous corner cases,
but it's not the end. More followups with code reorg and features to follow.

TLDR: Allow arbitrary function calls from bpf function to another bpf function.

Since the beginning of bpf all bpf programs were represented as a single function
and program authors were forced to use always_inline for all functions
in their C code. That was causing llvm to unnecessary inflate the code size
and forcing developers to move code to header files with little code reuse.

With a bit of additional complexity teach verifier to recognize
arbitrary function calls from one bpf function to another as long as
all of functions are presented to the verifier as a single bpf program.
Extended program layout:
..
r1 = ..    // arg1
r2 = ..    // arg2
call pc+1  // function call pc-relative
exit
.. = r1    // access arg1
.. = r2    // access arg2
..
call pc+20 // second level of function call
...

It allows for better optimized code and finally allows to introduce
the core bpf libraries that can be reused in different projects,
since programs are no longer limited by single elf file.
With function calls bpf can be compiled into multiple .o files.

This patch is the first step. It detects programs that contain
multiple functions and checks that calls between them are valid.
It splits the sequence of bpf instructions (one program) into a set
of bpf functions that call each other. Calls to only known
functions are allowed. Since all functions are presented to
the verifier at once conceptually it is 'static linking'.

Future plans:
- introduce BPF_PROG_TYPE_LIBRARY and allow a set of bpf functions
  to be loaded into the kernel that can be later linked to other
  programs with concrete program types. Aka 'dynamic linking'.

- introduce function pointer type and indirect calls to allow
  bpf functions call other dynamically loaded bpf functions while
  the caller bpf function is already executing. Aka 'runtime linking'.
  This will be more generic and more flexible alternative
  to bpf_tail_calls.

FAQ:
Q: Interpreter and JIT changes mean that new instruction is introduced ?
A: No. The call instruction technically stays the same. Now it can call
   both kernel helpers and other bpf functions.
   Calling convention stays the same as well.
   From uapi point of view the call insn got new 'relocation' BPF_PSEUDO_CALL
   similar to BPF_PSEUDO_MAP_FD 'relocation' of bpf_ldimm64 insn.

Q: What had to change on LLVM side?
A: Trivial LLVM patch to allow calls was applied to upcoming 6.0 release:
   https://reviews.llvm.org/rL318614
   with few bugfixes as well.
   Make sure to build the latest llvm to have bpf_call support.

More details in the patches.
====================
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parents 0bce7c9a 28ab173e
......@@ -1824,7 +1824,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
/* If BPF JIT was not enabled then we must fall back to
* the interpreter.
*/
if (!bpf_jit_enable)
if (!prog->jit_requested)
return orig_prog;
/* If constant blinding was enabled and we failed during blinding
......
......@@ -99,6 +99,20 @@ static inline void emit_a64_mov_i64(const int reg, const u64 val,
}
}
static inline void emit_addr_mov_i64(const int reg, const u64 val,
struct jit_ctx *ctx)
{
u64 tmp = val;
int shift = 0;
emit(A64_MOVZ(1, reg, tmp & 0xffff, shift), ctx);
for (;shift < 48;) {
tmp >>= 16;
shift += 16;
emit(A64_MOVK(1, reg, tmp & 0xffff, shift), ctx);
}
}
static inline void emit_a64_mov_i(const int is64, const int reg,
const s32 val, struct jit_ctx *ctx)
{
......@@ -603,7 +617,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
const u8 r0 = bpf2a64[BPF_REG_0];
const u64 func = (u64)__bpf_call_base + imm;
emit_a64_mov_i64(tmp, func, ctx);
if (ctx->prog->is_func)
emit_addr_mov_i64(tmp, func, ctx);
else
emit_a64_mov_i64(tmp, func, ctx);
emit(A64_BLR(tmp), ctx);
emit(A64_MOV(1, r0, A64_R(0)), ctx);
break;
......@@ -835,16 +852,24 @@ static inline void bpf_flush_icache(void *start, void *end)
flush_icache_range((unsigned long)start, (unsigned long)end);
}
struct arm64_jit_data {
struct bpf_binary_header *header;
u8 *image;
struct jit_ctx ctx;
};
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
{
struct bpf_prog *tmp, *orig_prog = prog;
struct bpf_binary_header *header;
struct arm64_jit_data *jit_data;
bool tmp_blinded = false;
bool extra_pass = false;
struct jit_ctx ctx;
int image_size;
u8 *image_ptr;
if (!bpf_jit_enable)
if (!prog->jit_requested)
return orig_prog;
tmp = bpf_jit_blind_constants(prog);
......@@ -858,13 +883,29 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
prog = tmp;
}
jit_data = prog->aux->jit_data;
if (!jit_data) {
jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
if (!jit_data) {
prog = orig_prog;
goto out;
}
prog->aux->jit_data = jit_data;
}
if (jit_data->ctx.offset) {
ctx = jit_data->ctx;
image_ptr = jit_data->image;
header = jit_data->header;
extra_pass = true;
goto skip_init_ctx;
}
memset(&ctx, 0, sizeof(ctx));
ctx.prog = prog;
ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
if (ctx.offset == NULL) {
prog = orig_prog;
goto out;
goto out_off;
}
/* 1. Initial fake pass to compute ctx->idx. */
......@@ -895,6 +936,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
/* 2. Now, the actual pass. */
ctx.image = (__le32 *)image_ptr;
skip_init_ctx:
ctx.idx = 0;
build_prologue(&ctx);
......@@ -920,13 +962,31 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
bpf_flush_icache(header, ctx.image + ctx.idx);
bpf_jit_binary_lock_ro(header);
if (!prog->is_func || extra_pass) {
if (extra_pass && ctx.idx != jit_data->ctx.idx) {
pr_err_once("multi-func JIT bug %d != %d\n",
ctx.idx, jit_data->ctx.idx);
bpf_jit_binary_free(header);
prog->bpf_func = NULL;
prog->jited = 0;
goto out_off;
}
bpf_jit_binary_lock_ro(header);
} else {
jit_data->ctx = ctx;
jit_data->image = image_ptr;
jit_data->header = header;
}
prog->bpf_func = (void *)ctx.image;
prog->jited = 1;
prog->jited_len = image_size;
if (!prog->is_func || extra_pass) {
out_off:
kfree(ctx.offset);
kfree(ctx.offset);
kfree(jit_data);
prog->aux->jit_data = NULL;
}
out:
if (tmp_blinded)
bpf_jit_prog_release_other(prog, prog == orig_prog ?
......
......@@ -1869,7 +1869,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
unsigned int image_size;
u8 *image_ptr;
if (!bpf_jit_enable || !cpu_has_mips64r2)
if (!prog->jit_requested || !cpu_has_mips64r2)
return prog;
tmp = bpf_jit_blind_constants(prog);
......
......@@ -993,7 +993,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
struct bpf_prog *tmp_fp;
bool bpf_blinded = false;
if (!bpf_jit_enable)
if (!fp->jit_requested)
return org_fp;
tmp_fp = bpf_jit_blind_constants(org_fp);
......
......@@ -1300,7 +1300,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
struct bpf_jit jit;
int pass;
if (!bpf_jit_enable)
if (!fp->jit_requested)
return orig_fp;
tmp = bpf_jit_blind_constants(fp);
......
......@@ -1517,7 +1517,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
u8 *image_ptr;
int pass;
if (!bpf_jit_enable)
if (!prog->jit_requested)
return orig_prog;
tmp = bpf_jit_blind_constants(prog);
......
......@@ -1109,19 +1109,29 @@ xadd: if (is_imm8(insn->off))
return proglen;
}
struct x64_jit_data {
struct bpf_binary_header *header;
int *addrs;
u8 *image;
int proglen;
struct jit_context ctx;
};
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
{
struct bpf_binary_header *header = NULL;
struct bpf_prog *tmp, *orig_prog = prog;
struct x64_jit_data *jit_data;
int proglen, oldproglen = 0;
struct jit_context ctx = {};
bool tmp_blinded = false;
bool extra_pass = false;
u8 *image = NULL;
int *addrs;
int pass;
int i;
if (!bpf_jit_enable)
if (!prog->jit_requested)
return orig_prog;
tmp = bpf_jit_blind_constants(prog);
......@@ -1135,10 +1145,28 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
prog = tmp;
}
jit_data = prog->aux->jit_data;
if (!jit_data) {
jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
if (!jit_data) {
prog = orig_prog;
goto out;
}
prog->aux->jit_data = jit_data;
}
addrs = jit_data->addrs;
if (addrs) {
ctx = jit_data->ctx;
oldproglen = jit_data->proglen;
image = jit_data->image;
header = jit_data->header;
extra_pass = true;
goto skip_init_addrs;
}
addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
if (!addrs) {
prog = orig_prog;
goto out;
goto out_addrs;
}
/* Before first pass, make a rough estimation of addrs[]
......@@ -1149,6 +1177,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
addrs[i] = proglen;
}
ctx.cleanup_addr = proglen;
skip_init_addrs:
/* JITed image shrinks with every pass and the loop iterates
* until the image stops shrinking. Very large bpf programs
......@@ -1189,7 +1218,15 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
if (image) {
bpf_flush_icache(header, image + proglen);
bpf_jit_binary_lock_ro(header);
if (!prog->is_func || extra_pass) {
bpf_jit_binary_lock_ro(header);
} else {
jit_data->addrs = addrs;
jit_data->ctx = ctx;
jit_data->proglen = proglen;
jit_data->image = image;
jit_data->header = header;
}
prog->bpf_func = (void *)image;
prog->jited = 1;
prog->jited_len = proglen;
......@@ -1197,8 +1234,12 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
prog = orig_prog;
}
if (!prog->is_func || extra_pass) {
out_addrs:
kfree(addrs);
kfree(addrs);
kfree(jit_data);
prog->aux->jit_data = NULL;
}
out:
if (tmp_blinded)
bpf_jit_prog_release_other(prog, prog == orig_prog ?
......
......@@ -200,6 +200,9 @@ struct bpf_prog_aux {
u32 max_ctx_offset;
u32 stack_depth;
u32 id;
u32 func_cnt;
struct bpf_prog **func;
void *jit_data; /* JIT specific data. arch dependent */
struct latch_tree_node ksym_tnode;
struct list_head ksym_lnode;
const struct bpf_prog_ops *ops;
......@@ -402,6 +405,7 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
/* verify correctness of eBPF program */
int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
/* Map specifics */
struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
......
......@@ -76,6 +76,14 @@ struct bpf_reg_state {
s64 smax_value; /* maximum possible (s64)value */
u64 umin_value; /* minimum possible (u64)value */
u64 umax_value; /* maximum possible (u64)value */
/* Inside the callee two registers can be both PTR_TO_STACK like
* R1=fp-8 and R2=fp-8, but one of them points to this function stack
* while another to the caller's stack. To differentiate them 'frameno'
* is used which is an index in bpf_verifier_state->frame[] array
* pointing to bpf_func_state.
* This field must be second to last, for states_equal() reasons.
*/
u32 frameno;
/* This field must be last, for states_equal() reasons. */
enum bpf_reg_liveness live;
};
......@@ -83,7 +91,8 @@ struct bpf_reg_state {
enum bpf_stack_slot_type {
STACK_INVALID, /* nothing was stored in this stack slot */
STACK_SPILL, /* register spilled into stack */
STACK_MISC /* BPF program wrote some data into this slot */
STACK_MISC, /* BPF program wrote some data into this slot */
STACK_ZERO, /* BPF program wrote constant zero */
};
#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
......@@ -96,13 +105,34 @@ struct bpf_stack_state {
/* state of the program:
* type of all registers and stack info
*/
struct bpf_verifier_state {
struct bpf_func_state {
struct bpf_reg_state regs[MAX_BPF_REG];
struct bpf_verifier_state *parent;
/* index of call instruction that called into this func */
int callsite;
/* stack frame number of this function state from pov of
* enclosing bpf_verifier_state.
* 0 = main function, 1 = first callee.
*/
u32 frameno;
/* subprog number == index within subprog_stack_depth
* zero == main subprog
*/
u32 subprogno;
/* should be second to last. See copy_func_state() */
int allocated_stack;
struct bpf_stack_state *stack;
};
#define MAX_CALL_FRAMES 8
struct bpf_verifier_state {
/* call stack tracking */
struct bpf_func_state *frame[MAX_CALL_FRAMES];
struct bpf_verifier_state *parent;
u32 curframe;
};
/* linked list of verifier states used to prune search */
struct bpf_verifier_state_list {
struct bpf_verifier_state state;
......@@ -113,6 +143,7 @@ struct bpf_insn_aux_data {
union {
enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */
s32 call_imm; /* saved imm field of call insn */
};
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
bool seen; /* this insn was processed by the verifier */
......@@ -141,6 +172,8 @@ struct bpf_ext_analyzer_ops {
int insn_idx, int prev_insn_idx);
};
#define BPF_MAX_SUBPROGS 256
/* single container for all structs
* one verifier_env per bpf_check() call
*/
......@@ -159,13 +192,17 @@ struct bpf_verifier_env {
bool allow_ptr_leaks;
bool seen_direct_write;
struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
struct bpf_verifer_log log;
u32 subprog_starts[BPF_MAX_SUBPROGS];
u16 subprog_stack_depth[BPF_MAX_SUBPROGS + 1];
u32 subprog_cnt;
};
static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
{
return env->cur_state->regs;
struct bpf_verifier_state *cur = env->cur_state;
return cur->frame[cur->curframe]->regs;
}
#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
......
......@@ -58,6 +58,9 @@ struct bpf_prog_aux;
/* unused opcode to mark special call to bpf_tail_call() helper */
#define BPF_TAIL_CALL 0xf0
/* unused opcode to mark call to interpreter with arguments */
#define BPF_CALL_ARGS 0xe0
/* As per nm, we expose JITed images as text (code) section for
* kallsyms. That way, tools like perf can find it to match
* addresses.
......@@ -455,10 +458,13 @@ struct bpf_binary_header {
struct bpf_prog {
u16 pages; /* Number of allocated pages */
u16 jited:1, /* Is our filter JIT'ed? */
jit_requested:1,/* archs need to JIT the prog */
locked:1, /* Program image locked? */
gpl_compatible:1, /* Is filter GPL compatible? */
cb_access:1, /* Is control block accessed? */
dst_needed:1, /* Do we need dst entry? */
blinded:1, /* Was blinded */
is_func:1, /* program is a bpf function */
kprobe_override:1; /* Do we override a kprobe? */
enum bpf_prog_type type; /* Type of BPF program */
u32 len; /* Number of filter blocks */
......@@ -710,6 +716,9 @@ bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
#define __bpf_call_base_args \
((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \
__bpf_call_base)
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
void bpf_jit_compile(struct bpf_prog *prog);
......@@ -798,7 +807,7 @@ static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
return fp->jited && bpf_jit_is_ebpf();
}
static inline bool bpf_jit_blinding_enabled(void)
static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog)
{
/* These are the prerequisites, should someone ever have the
* idea to call blinding outside of them, we make sure to
......@@ -806,7 +815,7 @@ static inline bool bpf_jit_blinding_enabled(void)
*/
if (!bpf_jit_is_ebpf())
return false;
if (!bpf_jit_enable)
if (!prog->jit_requested)
return false;
if (!bpf_jit_harden)
return false;
......
......@@ -197,8 +197,14 @@ enum bpf_attach_type {
*/
#define BPF_F_STRICT_ALIGNMENT (1U << 0)
/* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */
#define BPF_PSEUDO_MAP_FD 1
/* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative
* offset to another bpf function
*/
#define BPF_PSEUDO_CALL 1
/* flags for BPF_MAP_UPDATE_ELEM command */
#define BPF_ANY 0 /* create new element or update existing */
#define BPF_NOEXIST 1 /* create new element if it didn't exist */
......
......@@ -94,6 +94,7 @@ struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
fp->pages = size / PAGE_SIZE;
fp->aux = aux;
fp->aux->prog = fp;
fp->jit_requested = ebpf_jit_enabled();
INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
......@@ -217,30 +218,40 @@ int bpf_prog_calc_tag(struct bpf_prog *fp)
return 0;
}
static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn)
{
return BPF_CLASS(insn->code) == BPF_JMP &&
/* Call and Exit are both special jumps with no
* target inside the BPF instruction image.
*/
BPF_OP(insn->code) != BPF_CALL &&
BPF_OP(insn->code) != BPF_EXIT;
}
static void bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta)
{
struct bpf_insn *insn = prog->insnsi;
u32 i, insn_cnt = prog->len;
bool pseudo_call;
u8 code;
int off;
for (i = 0; i < insn_cnt; i++, insn++) {
if (!bpf_is_jmp_and_has_target(insn))
code = insn->code;
if (BPF_CLASS(code) != BPF_JMP)
continue;
if (BPF_OP(code) == BPF_EXIT)
continue;
if (BPF_OP(code) == BPF_CALL) {
if (insn->src_reg == BPF_PSEUDO_CALL)
pseudo_call = true;
else
continue;
} else {
pseudo_call = false;
}
off = pseudo_call ? insn->imm : insn->off;
/* Adjust offset of jmps if we cross boundaries. */
if (i < pos && i + insn->off + 1 > pos)
insn->off += delta;
else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
insn->off -= delta;
if (i < pos && i + off + 1 > pos)
off += delta;
else if (i > pos + delta && i + off + 1 <= pos + delta)
off -= delta;
if (pseudo_call)
insn->imm = off;
else
insn->off = off;
}
}
......@@ -711,7 +722,7 @@ struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
struct bpf_insn *insn;
int i, rewritten;
if (!bpf_jit_blinding_enabled())
if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
return prog;
clone = bpf_prog_clone_create(prog, GFP_USER);
......@@ -753,6 +764,7 @@ struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
i += insn_delta;
}
clone->blinded = 1;
return clone;
}
#endif /* CONFIG_BPF_JIT */
......@@ -774,8 +786,7 @@ EXPORT_SYMBOL_GPL(__bpf_call_base);
*
* Decode and execute eBPF instructions.
*/
static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
u64 *stack)
static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
{
u64 tmp;
static const void *jumptable[256] = {
......@@ -835,6 +846,7 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
[BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
/* Call instruction */
[BPF_JMP | BPF_CALL] = &&JMP_CALL,
[BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
/* Jumps */
[BPF_JMP | BPF_JA] = &&JMP_JA,
......@@ -1025,6 +1037,13 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
BPF_R4, BPF_R5);
CONT;
JMP_CALL_ARGS:
BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
BPF_R3, BPF_R4,
BPF_R5,
insn + insn->off + 1);
CONT;
JMP_TAIL_CALL: {
struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
struct bpf_array *array = container_of(map, struct bpf_array, map);
......@@ -1297,6 +1316,23 @@ static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn
return ___bpf_prog_run(regs, insn, stack); \
}
#define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
#define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
const struct bpf_insn *insn) \
{ \
u64 stack[stack_size / sizeof(u64)]; \
u64 regs[MAX_BPF_REG]; \
\
FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
BPF_R1 = r1; \
BPF_R2 = r2; \
BPF_R3 = r3; \
BPF_R4 = r4; \
BPF_R5 = r5; \
return ___bpf_prog_run(regs, insn, stack); \
}
#define EVAL1(FN, X) FN(X)
#define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
#define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
......@@ -1308,6 +1344,10 @@ EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
#define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
static unsigned int (*interpreters[])(const void *ctx,
......@@ -1316,6 +1356,24 @@ EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
};
#undef PROG_NAME_LIST
#define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
const struct bpf_insn *insn) = {
EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
};
#undef PROG_NAME_LIST
void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
{
stack_depth = max_t(u32, stack_depth, 1);
insn->off = (s16) insn->imm;
insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
__bpf_call_base_args;
insn->code = BPF_JMP | BPF_CALL_ARGS;
}
bool bpf_prog_array_compatible(struct bpf_array *array,
const struct bpf_prog *fp)
......@@ -1572,11 +1630,19 @@ int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
static void bpf_prog_free_deferred(struct work_struct *work)
{
struct bpf_prog_aux *aux;
int i;
aux = container_of(work, struct bpf_prog_aux, work);
if (bpf_prog_is_dev_bound(aux))
bpf_prog_offload_destroy(aux->prog);
bpf_jit_free(aux->prog);
for (i = 0; i < aux->func_cnt; i++)
bpf_jit_free(aux->func[i]);
if (aux->func_cnt) {
kfree(aux->func);
bpf_prog_unlock_free(aux->prog);
} else {
bpf_jit_free(aux->prog);
}
}
/* Free internal BPF program */
......
......@@ -189,8 +189,12 @@ void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env,
u8 opcode = BPF_OP(insn->code);
if (opcode == BPF_CALL) {
verbose(env, "(%02x) call %s#%d\n", insn->code,
func_id_name(insn->imm), insn->imm);
if (insn->src_reg == BPF_PSEUDO_CALL)
verbose(env, "(%02x) call pc%+d\n", insn->code,
insn->imm);
else
verbose(env, "(%02x) call %s#%d\n", insn->code,
func_id_name(insn->imm), insn->imm);
} else if (insn->code == (BPF_JMP | BPF_JA)) {
verbose(env, "(%02x) goto pc%+d\n",
insn->code, insn->off);
......
......@@ -1194,7 +1194,8 @@ static int bpf_prog_load(union bpf_attr *attr)
goto free_used_maps;
/* eBPF program is ready to be JITed */
prog = bpf_prog_select_runtime(prog, &err);
if (!prog->bpf_func)
prog = bpf_prog_select_runtime(prog, &err);
if (err < 0)
goto free_used_maps;
......
......@@ -20,6 +20,8 @@
#include <linux/file.h>
#include <linux/vmalloc.h>
#include <linux/stringify.h>
#include <linux/bsearch.h>
#include <linux/sort.h>
#include "disasm.h"
......@@ -227,13 +229,23 @@ static void print_liveness(struct bpf_verifier_env *env,
verbose(env, "w");
}
static struct bpf_func_state *func(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg)
{
struct bpf_verifier_state *cur = env->cur_state;
return cur->frame[reg->frameno];
}
static void print_verifier_state(struct bpf_verifier_env *env,
struct bpf_verifier_state *state)
const struct bpf_func_state *state)
{
struct bpf_reg_state *reg;
const struct bpf_reg_state *reg;
enum bpf_reg_type t;
int i;
if (state->frameno)
verbose(env, " frame%d:", state->frameno);
for (i = 0; i < MAX_BPF_REG; i++) {
reg = &state->regs[i];
t = reg->type;
......@@ -246,6 +258,8 @@ static void print_verifier_state(struct bpf_verifier_env *env,
tnum_is_const(reg->var_off)) {
/* reg->off should be 0 for SCALAR_VALUE */
verbose(env, "%lld", reg->var_off.value + reg->off);
if (t == PTR_TO_STACK)
verbose(env, ",call_%d", func(env, reg)->callsite);
} else {
verbose(env, "(id=%d", reg->id);
if (t != SCALAR_VALUE)
......@@ -297,12 +311,14 @@ static void print_verifier_state(struct bpf_verifier_env *env,
verbose(env, "=%s",
reg_type_str[state->stack[i].spilled_ptr.type]);
}
if (state->stack[i].slot_type[0] == STACK_ZERO)
verbose(env, " fp%d=0", (-i - 1) * BPF_REG_SIZE);
}
verbose(env, "\n");
}
static int copy_stack_state(struct bpf_verifier_state *dst,
const struct bpf_verifier_state *src)
static int copy_stack_state(struct bpf_func_state *dst,
const struct bpf_func_state *src)
{
if (!src->stack)
return 0;
......@@ -318,13 +334,13 @@ static int copy_stack_state(struct bpf_verifier_state *dst,
/* do_check() starts with zero-sized stack in struct bpf_verifier_state to
* make it consume minimal amount of memory. check_stack_write() access from
* the program calls into realloc_verifier_state() to grow the stack size.
* the program calls into realloc_func_state() to grow the stack size.
* Note there is a non-zero 'parent' pointer inside bpf_verifier_state
* which this function copies over. It points to previous bpf_verifier_state
* which is never reallocated
*/
static int realloc_verifier_state(struct bpf_verifier_state *state, int size,
bool copy_old)
static int realloc_func_state(struct bpf_func_state *state, int size,
bool copy_old)
{
u32 old_size = state->allocated_stack;
struct bpf_stack_state *new_stack;
......@@ -357,10 +373,21 @@ static int realloc_verifier_state(struct bpf_verifier_state *state, int size,
return 0;
}
static void free_func_state(struct bpf_func_state *state)
{
kfree(state->stack);
kfree(state);
}
static void free_verifier_state(struct bpf_verifier_state *state,
bool free_self)
{
kfree(state->stack);
int i;
for (i = 0; i <= state->curframe; i++) {
free_func_state(state->frame[i]);
state->frame[i] = NULL;
}
if (free_self)
kfree(state);
}
......@@ -368,18 +395,46 @@ static void free_verifier_state(struct bpf_verifier_state *state,
/* copy verifier state from src to dst growing dst stack space
* when necessary to accommodate larger src stack
*/
static int copy_verifier_state(struct bpf_verifier_state *dst,
const struct bpf_verifier_state *src)
static int copy_func_state(struct bpf_func_state *dst,
const struct bpf_func_state *src)
{
int err;
err = realloc_verifier_state(dst, src->allocated_stack, false);
err = realloc_func_state(dst, src->allocated_stack, false);
if (err)
return err;
memcpy(dst, src, offsetof(struct bpf_verifier_state, allocated_stack));
memcpy(dst, src, offsetof(struct bpf_func_state, allocated_stack));
return copy_stack_state(dst, src);
}
static int copy_verifier_state(struct bpf_verifier_state *dst_state,
const struct bpf_verifier_state *src)
{
struct bpf_func_state *dst;
int i, err;
/* if dst has more stack frames then src frame, free them */
for (i = src->curframe + 1; i <= dst_state->curframe; i++) {
free_func_state(dst_state->frame[i]);
dst_state->frame[i] = NULL;
}
dst_state->curframe = src->curframe;
dst_state->parent = src->parent;
for (i = 0; i <= src->curframe; i++) {
dst = dst_state->frame[i];
if (!dst) {
dst = kzalloc(sizeof(*dst), GFP_KERNEL);
if (!dst)
return -ENOMEM;
dst_state->frame[i] = dst;
}
err = copy_func_state(dst, src->frame[i]);
if (err)
return err;
}
return 0;
}
static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
int *insn_idx)
{
......@@ -441,6 +496,10 @@ static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
static const int caller_saved[CALLER_SAVED_REGS] = {
BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
};
#define CALLEE_SAVED_REGS 5
static const int callee_saved[CALLEE_SAVED_REGS] = {
BPF_REG_6, BPF_REG_7, BPF_REG_8, BPF_REG_9
};
static void __mark_reg_not_init(struct bpf_reg_state *reg);
......@@ -465,6 +524,13 @@ static void __mark_reg_known_zero(struct bpf_reg_state *reg)
__mark_reg_known(reg, 0);
}
static void __mark_reg_const_zero(struct bpf_reg_state *reg)
{
__mark_reg_known(reg, 0);
reg->off = 0;
reg->type = SCALAR_VALUE;
}
static void mark_reg_known_zero(struct bpf_verifier_env *env,
struct bpf_reg_state *regs, u32 regno)
{
......@@ -576,6 +642,7 @@ static void __mark_reg_unknown(struct bpf_reg_state *reg)
reg->id = 0;
reg->off = 0;
reg->var_off = tnum_unknown;
reg->frameno = 0;
__mark_reg_unbounded(reg);
}
......@@ -612,8 +679,9 @@ static void mark_reg_not_init(struct bpf_verifier_env *env,
}
static void init_reg_state(struct bpf_verifier_env *env,
struct bpf_reg_state *regs)
struct bpf_func_state *state)
{
struct bpf_reg_state *regs = state->regs;
int i;
for (i = 0; i < MAX_BPF_REG; i++) {
......@@ -624,41 +692,217 @@ static void init_reg_state(struct bpf_verifier_env *env,
/* frame pointer */
regs[BPF_REG_FP].type = PTR_TO_STACK;
mark_reg_known_zero(env, regs, BPF_REG_FP);
regs[BPF_REG_FP].frameno = state->frameno;
/* 1st arg to a function */
regs[BPF_REG_1].type = PTR_TO_CTX;
mark_reg_known_zero(env, regs, BPF_REG_1);
}
#define BPF_MAIN_FUNC (-1)
static void init_func_state(struct bpf_verifier_env *env,
struct bpf_func_state *state,
int callsite, int frameno, int subprogno)
{
state->callsite = callsite;
state->frameno = frameno;
state->subprogno = subprogno;
init_reg_state(env, state);
}
enum reg_arg_type {
SRC_OP, /* register is used as source operand */
DST_OP, /* register is used as destination operand */
DST_OP_NO_MARK /* same as above, check only, don't mark */
};
static void mark_reg_read(const struct bpf_verifier_state *state, u32 regno)
static int cmp_subprogs(const void *a, const void *b)
{
return *(int *)a - *(int *)b;
}
static int find_subprog(struct bpf_verifier_env *env, int off)
{
struct bpf_verifier_state *parent = state->parent;
u32 *p;
p = bsearch(&off, env->subprog_starts, env->subprog_cnt,
sizeof(env->subprog_starts[0]), cmp_subprogs);
if (!p)
return -ENOENT;
return p - env->subprog_starts;
}
static int add_subprog(struct bpf_verifier_env *env, int off)
{
int insn_cnt = env->prog->len;
int ret;
if (off >= insn_cnt || off < 0) {
verbose(env, "call to invalid destination\n");
return -EINVAL;
}
ret = find_subprog(env, off);
if (ret >= 0)
return 0;
if (env->subprog_cnt >= BPF_MAX_SUBPROGS) {
verbose(env, "too many subprograms\n");
return -E2BIG;
}
env->subprog_starts[env->subprog_cnt++] = off;
sort(env->subprog_starts, env->subprog_cnt,
sizeof(env->subprog_starts[0]), cmp_subprogs, NULL);
return 0;
}
static int check_subprogs(struct bpf_verifier_env *env)
{
int i, ret, subprog_start, subprog_end, off, cur_subprog = 0;
struct bpf_insn *insn = env->prog->insnsi;
int insn_cnt = env->prog->len;
/* determine subprog starts. The end is one before the next starts */
for (i = 0; i < insn_cnt; i++) {
if (insn[i].code != (BPF_JMP | BPF_CALL))
continue;
if (insn[i].src_reg != BPF_PSEUDO_CALL)
continue;
if (!env->allow_ptr_leaks) {
verbose(env, "function calls to other bpf functions are allowed for root only\n");
return -EPERM;
}
if (bpf_prog_is_dev_bound(env->prog->aux)) {
verbose(env, "funcation calls in offloaded programs are not supported yet\n");
return -EINVAL;
}
ret = add_subprog(env, i + insn[i].imm + 1);
if (ret < 0)
return ret;
}
if (env->log.level > 1)
for (i = 0; i < env->subprog_cnt; i++)
verbose(env, "func#%d @%d\n", i, env->subprog_starts[i]);
/* now check that all jumps are within the same subprog */
subprog_start = 0;
if (env->subprog_cnt == cur_subprog)
subprog_end = insn_cnt;
else
subprog_end = env->subprog_starts[cur_subprog++];
for (i = 0; i < insn_cnt; i++) {
u8 code = insn[i].code;
if (BPF_CLASS(code) != BPF_JMP)
goto next;
if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
goto next;
off = i + insn[i].off + 1;
if (off < subprog_start || off >= subprog_end) {
verbose(env, "jump out of range from insn %d to %d\n", i, off);
return -EINVAL;
}
next:
if (i == subprog_end - 1) {
/* to avoid fall-through from one subprog into another
* the last insn of the subprog should be either exit
* or unconditional jump back
*/
if (code != (BPF_JMP | BPF_EXIT) &&
code != (BPF_JMP | BPF_JA)) {
verbose(env, "last insn is not an exit or jmp\n");
return -EINVAL;
}
subprog_start = subprog_end;
if (env->subprog_cnt == cur_subprog)
subprog_end = insn_cnt;
else
subprog_end = env->subprog_starts[cur_subprog++];
}
}
return 0;
}
struct bpf_verifier_state *skip_callee(struct bpf_verifier_env *env,
const struct bpf_verifier_state *state,
struct bpf_verifier_state *parent,
u32 regno)
{
struct bpf_verifier_state *tmp = NULL;
/* 'parent' could be a state of caller and
* 'state' could be a state of callee. In such case
* parent->curframe < state->curframe
* and it's ok for r1 - r5 registers
*
* 'parent' could be a callee's state after it bpf_exit-ed.
* In such case parent->curframe > state->curframe
* and it's ok for r0 only
*/
if (parent->curframe == state->curframe ||
(parent->curframe < state->curframe &&
regno >= BPF_REG_1 && regno <= BPF_REG_5) ||
(parent->curframe > state->curframe &&
regno == BPF_REG_0))
return parent;
if (parent->curframe > state->curframe &&
regno >= BPF_REG_6) {
/* for callee saved regs we have to skip the whole chain
* of states that belong to callee and mark as LIVE_READ
* the registers before the call
*/
tmp = parent;
while (tmp && tmp->curframe != state->curframe) {
tmp = tmp->parent;
}
if (!tmp)
goto bug;
parent = tmp;
} else {
goto bug;
}
return parent;
bug:
verbose(env, "verifier bug regno %d tmp %p\n", regno, tmp);
verbose(env, "regno %d parent frame %d current frame %d\n",
regno, parent->curframe, state->curframe);
return 0;
}
static int mark_reg_read(struct bpf_verifier_env *env,
const struct bpf_verifier_state *state,
struct bpf_verifier_state *parent,
u32 regno)
{
bool writes = parent == state->parent; /* Observe write marks */
if (regno == BPF_REG_FP)
/* We don't need to worry about FP liveness because it's read-only */
return;
return 0;
while (parent) {
/* if read wasn't screened by an earlier write ... */
if (state->regs[regno].live & REG_LIVE_WRITTEN)
if (writes && state->frame[state->curframe]->regs[regno].live & REG_LIVE_WRITTEN)
break;
parent = skip_callee(env, state, parent, regno);
if (!parent)
return -EFAULT;
/* ... then we depend on parent's value */
parent->regs[regno].live |= REG_LIVE_READ;
parent->frame[parent->curframe]->regs[regno].live |= REG_LIVE_READ;
state = parent;
parent = state->parent;
writes = true;
}
return 0;
}
static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
enum reg_arg_type t)
{
struct bpf_reg_state *regs = env->cur_state->regs;
struct bpf_verifier_state *vstate = env->cur_state;
struct bpf_func_state *state = vstate->frame[vstate->curframe];
struct bpf_reg_state *regs = state->regs;
if (regno >= MAX_BPF_REG) {
verbose(env, "R%d is invalid\n", regno);
......@@ -671,7 +915,7 @@ static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
verbose(env, "R%d !read_ok\n", regno);
return -EACCES;
}
mark_reg_read(env->cur_state, regno);
return mark_reg_read(env, vstate, vstate->parent, regno);
} else {
/* check whether register used as dest operand can be written to */
if (regno == BPF_REG_FP) {
......@@ -702,17 +946,25 @@ static bool is_spillable_regtype(enum bpf_reg_type type)
}
}
/* Does this register contain a constant zero? */
static bool register_is_null(struct bpf_reg_state *reg)
{
return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
}
/* check_stack_read/write functions track spill/fill of registers,
* stack boundary and alignment are checked in check_mem_access()
*/
static int check_stack_write(struct bpf_verifier_env *env,
struct bpf_verifier_state *state, int off,
int size, int value_regno)
struct bpf_func_state *state, /* func where register points to */
int off, int size, int value_regno)
{
struct bpf_func_state *cur; /* state of the current function */
int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
enum bpf_reg_type type;
err = realloc_verifier_state(state, round_up(slot + 1, BPF_REG_SIZE),
true);
err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE),
true);
if (err)
return err;
/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
......@@ -725,8 +977,9 @@ static int check_stack_write(struct bpf_verifier_env *env,
return -EACCES;
}
cur = env->cur_state->frame[env->cur_state->curframe];
if (value_regno >= 0 &&
is_spillable_regtype(state->regs[value_regno].type)) {
is_spillable_regtype((type = cur->regs[value_regno].type))) {
/* register containing pointer is being spilled into stack */
if (size != BPF_REG_SIZE) {
......@@ -734,51 +987,116 @@ static int check_stack_write(struct bpf_verifier_env *env,
return -EACCES;
}
if (state != cur && type == PTR_TO_STACK) {
verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
return -EINVAL;
}
/* save register state */
state->stack[spi].spilled_ptr = state->regs[value_regno];
state->stack[spi].spilled_ptr = cur->regs[value_regno];
state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
for (i = 0; i < BPF_REG_SIZE; i++)
state->stack[spi].slot_type[i] = STACK_SPILL;
} else {
u8 type = STACK_MISC;
/* regular write of data into stack */
state->stack[spi].spilled_ptr = (struct bpf_reg_state) {};
/* only mark the slot as written if all 8 bytes were written
* otherwise read propagation may incorrectly stop too soon
* when stack slots are partially written.
* This heuristic means that read propagation will be
* conservative, since it will add reg_live_read marks
* to stack slots all the way to first state when programs
* writes+reads less than 8 bytes
*/
if (size == BPF_REG_SIZE)
state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
/* when we zero initialize stack slots mark them as such */
if (value_regno >= 0 &&
register_is_null(&cur->regs[value_regno]))
type = STACK_ZERO;
for (i = 0; i < size; i++)
state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
STACK_MISC;
type;
}
return 0;
}
static void mark_stack_slot_read(const struct bpf_verifier_state *state, int slot)
/* registers of every function are unique and mark_reg_read() propagates
* the liveness in the following cases:
* - from callee into caller for R1 - R5 that were used as arguments
* - from caller into callee for R0 that used as result of the call
* - from caller to the same caller skipping states of the callee for R6 - R9,
* since R6 - R9 are callee saved by implicit function prologue and
* caller's R6 != callee's R6, so when we propagate liveness up to
* parent states we need to skip callee states for R6 - R9.
*
* stack slot marking is different, since stacks of caller and callee are
* accessible in both (since caller can pass a pointer to caller's stack to
* callee which can pass it to another function), hence mark_stack_slot_read()
* has to propagate the stack liveness to all parent states at given frame number.
* Consider code:
* f1() {
* ptr = fp - 8;
* *ptr = ctx;
* call f2 {
* .. = *ptr;
* }
* .. = *ptr;
* }
* First *ptr is reading from f1's stack and mark_stack_slot_read() has
* to mark liveness at the f1's frame and not f2's frame.
* Second *ptr is also reading from f1's stack and mark_stack_slot_read() has
* to propagate liveness to f2 states at f1's frame level and further into
* f1 states at f1's frame level until write into that stack slot
*/
static void mark_stack_slot_read(struct bpf_verifier_env *env,
const struct bpf_verifier_state *state,
struct bpf_verifier_state *parent,
int slot, int frameno)
{
struct bpf_verifier_state *parent = state->parent;
bool writes = parent == state->parent; /* Observe write marks */
while (parent) {
if (parent->frame[frameno]->allocated_stack <= slot * BPF_REG_SIZE)
/* since LIVE_WRITTEN mark is only done for full 8-byte
* write the read marks are conservative and parent
* state may not even have the stack allocated. In such case
* end the propagation, since the loop reached beginning
* of the function
*/
break;
/* if read wasn't screened by an earlier write ... */
if (state->stack[slot].spilled_ptr.live & REG_LIVE_WRITTEN)
if (writes && state->frame[frameno]->stack[slot].spilled_ptr.live & REG_LIVE_WRITTEN)
break;
/* ... then we depend on parent's value */
parent->stack[slot].spilled_ptr.live |= REG_LIVE_READ;
parent->frame[frameno]->stack[slot].spilled_ptr.live |= REG_LIVE_READ;
state = parent;
parent = state->parent;
writes = true;
}
}
static int check_stack_read(struct bpf_verifier_env *env,
struct bpf_verifier_state *state, int off, int size,
int value_regno)
struct bpf_func_state *reg_state /* func where register points to */,
int off, int size, int value_regno)
{
struct bpf_verifier_state *vstate = env->cur_state;
struct bpf_func_state *state = vstate->frame[vstate->curframe];
int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
u8 *stype;
if (state->allocated_stack <= slot) {
if (reg_state->allocated_stack <= slot) {
verbose(env, "invalid read from stack off %d+0 size %d\n",
off, size);
return -EACCES;
}
stype = state->stack[spi].slot_type;
stype = reg_state->stack[spi].slot_type;
if (stype[0] == STACK_SPILL) {
if (size != BPF_REG_SIZE) {
......@@ -794,26 +1112,44 @@ static int check_stack_read(struct bpf_verifier_env *env,
if (value_regno >= 0) {
/* restore register state from stack */
state->regs[value_regno] = state->stack[spi].spilled_ptr;
state->regs[value_regno] = reg_state->stack[spi].spilled_ptr;
/* mark reg as written since spilled pointer state likely
* has its liveness marks cleared by is_state_visited()
* which resets stack/reg liveness for state transitions
*/
state->regs[value_regno].live |= REG_LIVE_WRITTEN;
mark_stack_slot_read(state, spi);
}
mark_stack_slot_read(env, vstate, vstate->parent, spi,
reg_state->frameno);
return 0;
} else {
int zeros = 0;
for (i = 0; i < size; i++) {
if (stype[(slot - i) % BPF_REG_SIZE] != STACK_MISC) {
verbose(env, "invalid read from stack off %d+%d size %d\n",
off, i, size);
return -EACCES;
if (stype[(slot - i) % BPF_REG_SIZE] == STACK_MISC)
continue;
if (stype[(slot - i) % BPF_REG_SIZE] == STACK_ZERO) {
zeros++;
continue;
}
verbose(env, "invalid read from stack off %d+%d size %d\n",
off, i, size);
return -EACCES;
}
mark_stack_slot_read(env, vstate, vstate->parent, spi,
reg_state->frameno);
if (value_regno >= 0) {
if (zeros == size) {
/* any size read into register is zero extended,
* so the whole register == const_zero
*/
__mark_reg_const_zero(&state->regs[value_regno]);
} else {
/* have read misc data from the stack */
mark_reg_unknown(env, state->regs, value_regno);
}
state->regs[value_regno].live |= REG_LIVE_WRITTEN;
}
if (value_regno >= 0)
/* have read misc data from the stack */
mark_reg_unknown(env, state->regs, value_regno);
return 0;
}
}
......@@ -838,7 +1174,8 @@ static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
static int check_map_access(struct bpf_verifier_env *env, u32 regno,
int off, int size, bool zero_size_allowed)
{
struct bpf_verifier_state *state = env->cur_state;
struct bpf_verifier_state *vstate = env->cur_state;
struct bpf_func_state *state = vstate->frame[vstate->curframe];
struct bpf_reg_state *reg = &state->regs[regno];
int err;
......@@ -1088,6 +1425,54 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
strict);
}
static int update_stack_depth(struct bpf_verifier_env *env,
const struct bpf_func_state *func,
int off)
{
u16 stack = env->subprog_stack_depth[func->subprogno], total = 0;
struct bpf_verifier_state *cur = env->cur_state;
int i;
if (stack >= -off)
return 0;
/* update known max for given subprogram */
env->subprog_stack_depth[func->subprogno] = -off;
/* compute the total for current call chain */
for (i = 0; i <= cur->curframe; i++) {
u32 depth = env->subprog_stack_depth[cur->frame[i]->subprogno];
/* round up to 32-bytes, since this is granularity
* of interpreter stack sizes
*/
depth = round_up(depth, 32);
total += depth;
}
if (total > MAX_BPF_STACK) {
verbose(env, "combined stack size of %d calls is %d. Too large\n",
cur->curframe, total);
return -EACCES;
}
return 0;
}
static int get_callee_stack_depth(struct bpf_verifier_env *env,
const struct bpf_insn *insn, int idx)
{
int start = idx + insn->imm + 1, subprog;
subprog = find_subprog(env, start);
if (subprog < 0) {
WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
start);
return -EFAULT;
}
subprog++;
return env->subprog_stack_depth[subprog];
}
/* check whether memory at (regno + off) is accessible for t = (read | write)
* if t==write, value_regno is a register which value is stored into memory
* if t==read, value_regno is a register which will receive the value from memory
......@@ -1098,9 +1483,9 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
int bpf_size, enum bpf_access_type t,
int value_regno)
{
struct bpf_verifier_state *state = env->cur_state;
struct bpf_reg_state *regs = cur_regs(env);
struct bpf_reg_state *reg = regs + regno;
struct bpf_func_state *state;
int size, err = 0;
size = bpf_size_to_bytes(bpf_size);
......@@ -1189,8 +1574,10 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
return -EACCES;
}
if (env->prog->aux->stack_depth < -off)
env->prog->aux->stack_depth = -off;
state = func(env, reg);
err = update_stack_depth(env, state, off);
if (err)
return err;
if (t == BPF_WRITE)
err = check_stack_write(env, state, off, size,
......@@ -1264,12 +1651,6 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins
BPF_SIZE(insn->code), BPF_WRITE, -1);
}
/* Does this register contain a constant zero? */
static bool register_is_null(struct bpf_reg_state *reg)
{
return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
}
/* when register 'regno' is passed into function that will read 'access_size'
* bytes from that pointer, make sure that it's within stack boundary
* and all elements of stack are initialized.
......@@ -1281,7 +1662,7 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
struct bpf_call_arg_meta *meta)
{
struct bpf_reg_state *reg = cur_regs(env) + regno;
struct bpf_verifier_state *state = env->cur_state;
struct bpf_func_state *state = func(env, reg);
int off, i, slot, spi;
if (reg->type != PTR_TO_STACK) {
......@@ -1312,9 +1693,6 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
return -EACCES;
}
if (env->prog->aux->stack_depth < -off)
env->prog->aux->stack_depth = -off;
if (meta && meta->raw_mode) {
meta->access_size = access_size;
meta->regno = regno;
......@@ -1322,17 +1700,32 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
}
for (i = 0; i < access_size; i++) {
u8 *stype;
slot = -(off + i) - 1;
spi = slot / BPF_REG_SIZE;
if (state->allocated_stack <= slot ||
state->stack[spi].slot_type[slot % BPF_REG_SIZE] !=
STACK_MISC) {
verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
off, i, access_size);
return -EACCES;
if (state->allocated_stack <= slot)
goto err;
stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
if (*stype == STACK_MISC)
goto mark;
if (*stype == STACK_ZERO) {
/* helper can write anything into the stack */
*stype = STACK_MISC;
goto mark;
}
err:
verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
off, i, access_size);
return -EACCES;
mark:
/* reading any byte out of 8-byte 'spill_slot' will cause
* the whole slot to be marked as 'read'
*/
mark_stack_slot_read(env, env->cur_state, env->cur_state->parent,
spi, state->frameno);
}
return 0;
return update_stack_depth(env, state, off);
}
static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
......@@ -1585,6 +1978,10 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
case BPF_FUNC_tail_call:
if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
goto error;
if (env->subprog_cnt) {
verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n");
return -EINVAL;
}
break;
case BPF_FUNC_perf_event_read:
case BPF_FUNC_perf_event_output:
......@@ -1646,9 +2043,9 @@ static int check_raw_mode(const struct bpf_func_proto *fn)
/* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
* are now invalid, so turn them into unknown SCALAR_VALUE.
*/
static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
static void __clear_all_pkt_pointers(struct bpf_verifier_env *env,
struct bpf_func_state *state)
{
struct bpf_verifier_state *state = env->cur_state;
struct bpf_reg_state *regs = state->regs, *reg;
int i;
......@@ -1665,7 +2062,121 @@ static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
}
}
static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
{
struct bpf_verifier_state *vstate = env->cur_state;
int i;
for (i = 0; i <= vstate->curframe; i++)
__clear_all_pkt_pointers(env, vstate->frame[i]);
}
static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
int *insn_idx)
{
struct bpf_verifier_state *state = env->cur_state;
struct bpf_func_state *caller, *callee;
int i, subprog, target_insn;
if (state->curframe >= MAX_CALL_FRAMES) {
verbose(env, "the call stack of %d frames is too deep\n",
state->curframe);
return -E2BIG;
}
target_insn = *insn_idx + insn->imm;
subprog = find_subprog(env, target_insn + 1);
if (subprog < 0) {
verbose(env, "verifier bug. No program starts at insn %d\n",
target_insn + 1);
return -EFAULT;
}
caller = state->frame[state->curframe];
if (state->frame[state->curframe + 1]) {
verbose(env, "verifier bug. Frame %d already allocated\n",
state->curframe + 1);
return -EFAULT;
}
callee = kzalloc(sizeof(*callee), GFP_KERNEL);
if (!callee)
return -ENOMEM;
state->frame[state->curframe + 1] = callee;
/* callee cannot access r0, r6 - r9 for reading and has to write
* into its own stack before reading from it.
* callee can read/write into caller's stack
*/
init_func_state(env, callee,
/* remember the callsite, it will be used by bpf_exit */
*insn_idx /* callsite */,
state->curframe + 1 /* frameno within this callchain */,
subprog + 1 /* subprog number within this prog */);
/* copy r1 - r5 args that callee can access */
for (i = BPF_REG_1; i <= BPF_REG_5; i++)
callee->regs[i] = caller->regs[i];
/* after the call regsiters r0 - r5 were scratched */
for (i = 0; i < CALLER_SAVED_REGS; i++) {
mark_reg_not_init(env, caller->regs, caller_saved[i]);
check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
}
/* only increment it after check_reg_arg() finished */
state->curframe++;
/* and go analyze first insn of the callee */
*insn_idx = target_insn;
if (env->log.level) {
verbose(env, "caller:\n");
print_verifier_state(env, caller);
verbose(env, "callee:\n");
print_verifier_state(env, callee);
}
return 0;
}
static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
{
struct bpf_verifier_state *state = env->cur_state;
struct bpf_func_state *caller, *callee;
struct bpf_reg_state *r0;
callee = state->frame[state->curframe];
r0 = &callee->regs[BPF_REG_0];
if (r0->type == PTR_TO_STACK) {
/* technically it's ok to return caller's stack pointer
* (or caller's caller's pointer) back to the caller,
* since these pointers are valid. Only current stack
* pointer will be invalid as soon as function exits,
* but let's be conservative
*/
verbose(env, "cannot return stack pointer to the caller\n");
return -EINVAL;
}
state->curframe--;
caller = state->frame[state->curframe];
/* return to the caller whatever r0 had in the callee */
caller->regs[BPF_REG_0] = *r0;
*insn_idx = callee->callsite + 1;
if (env->log.level) {
verbose(env, "returning from callee:\n");
print_verifier_state(env, callee);
verbose(env, "to caller at %d:\n", *insn_idx);
print_verifier_state(env, caller);
}
/* clear everything in the callee */
free_func_state(callee);
state->frame[state->curframe + 1] = NULL;
return 0;
}
static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
{
const struct bpf_func_proto *fn = NULL;
struct bpf_reg_state *regs;
......@@ -1825,7 +2336,9 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
const struct bpf_reg_state *ptr_reg,
const struct bpf_reg_state *off_reg)
{
struct bpf_reg_state *regs = cur_regs(env), *dst_reg;
struct bpf_verifier_state *vstate = env->cur_state;
struct bpf_func_state *state = vstate->frame[vstate->curframe];
struct bpf_reg_state *regs = state->regs, *dst_reg;
bool known = tnum_is_const(off_reg->var_off);
s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
......@@ -1837,13 +2350,13 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
dst_reg = &regs[dst];
if (WARN_ON_ONCE(known && (smin_val != smax_val))) {
print_verifier_state(env, env->cur_state);
print_verifier_state(env, state);
verbose(env,
"verifier internal error: known but bad sbounds\n");
return -EINVAL;
}
if (WARN_ON_ONCE(known && (umin_val != umax_val))) {
print_verifier_state(env, env->cur_state);
print_verifier_state(env, state);
verbose(env,
"verifier internal error: known but bad ubounds\n");
return -EINVAL;
......@@ -2245,7 +2758,9 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
struct bpf_insn *insn)
{
struct bpf_reg_state *regs = cur_regs(env), *dst_reg, *src_reg;
struct bpf_verifier_state *vstate = env->cur_state;
struct bpf_func_state *state = vstate->frame[vstate->curframe];
struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
u8 opcode = BPF_OP(insn->code);
int rc;
......@@ -2319,12 +2834,12 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
/* Got here implies adding two SCALAR_VALUEs */
if (WARN_ON_ONCE(ptr_reg)) {
print_verifier_state(env, env->cur_state);
print_verifier_state(env, state);
verbose(env, "verifier internal error: unexpected ptr_reg\n");
return -EINVAL;
}
if (WARN_ON(!src_reg)) {
print_verifier_state(env, env->cur_state);
print_verifier_state(env, state);
verbose(env, "verifier internal error: no src_reg\n");
return -EINVAL;
}
......@@ -2478,14 +2993,15 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
return 0;
}
static void find_good_pkt_pointers(struct bpf_verifier_state *state,
static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
struct bpf_reg_state *dst_reg,
enum bpf_reg_type type,
bool range_right_open)
{
struct bpf_func_state *state = vstate->frame[vstate->curframe];
struct bpf_reg_state *regs = state->regs, *reg;
u16 new_range;
int i;
int i, j;
if (dst_reg->off < 0 ||
(dst_reg->off == 0 && range_right_open))
......@@ -2555,12 +3071,15 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
/* keep the maximum range already checked */
regs[i].range = max(regs[i].range, new_range);
for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
if (state->stack[i].slot_type[0] != STACK_SPILL)
continue;
reg = &state->stack[i].spilled_ptr;
if (reg->type == type && reg->id == dst_reg->id)
reg->range = max(reg->range, new_range);
for (j = 0; j <= vstate->curframe; j++) {
state = vstate->frame[j];
for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
if (state->stack[i].slot_type[0] != STACK_SPILL)
continue;
reg = &state->stack[i].spilled_ptr;
if (reg->type == type && reg->id == dst_reg->id)
reg->range = max(reg->range, new_range);
}
}
}
......@@ -2798,20 +3317,24 @@ static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id,
/* The logic is similar to find_good_pkt_pointers(), both could eventually
* be folded together at some point.
*/
static void mark_map_regs(struct bpf_verifier_state *state, u32 regno,
static void mark_map_regs(struct bpf_verifier_state *vstate, u32 regno,
bool is_null)
{
struct bpf_func_state *state = vstate->frame[vstate->curframe];
struct bpf_reg_state *regs = state->regs;
u32 id = regs[regno].id;
int i;
int i, j;
for (i = 0; i < MAX_BPF_REG; i++)
mark_map_reg(regs, i, id, is_null);
for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
if (state->stack[i].slot_type[0] != STACK_SPILL)
continue;
mark_map_reg(&state->stack[i].spilled_ptr, 0, id, is_null);
for (j = 0; j <= vstate->curframe; j++) {
state = vstate->frame[j];
for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
if (state->stack[i].slot_type[0] != STACK_SPILL)
continue;
mark_map_reg(&state->stack[i].spilled_ptr, 0, id, is_null);
}
}
}
......@@ -2911,8 +3434,10 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
static int check_cond_jmp_op(struct bpf_verifier_env *env,
struct bpf_insn *insn, int *insn_idx)
{
struct bpf_verifier_state *other_branch, *this_branch = env->cur_state;
struct bpf_reg_state *regs = this_branch->regs, *dst_reg;
struct bpf_verifier_state *this_branch = env->cur_state;
struct bpf_verifier_state *other_branch;
struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
struct bpf_reg_state *dst_reg, *other_branch_regs;
u8 opcode = BPF_OP(insn->code);
int err;
......@@ -2975,6 +3500,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx);
if (!other_branch)
return -EFAULT;
other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
/* detect if we are comparing against a constant value so we can adjust
* our min/max values for our dst register.
......@@ -2987,22 +3513,22 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
if (dst_reg->type == SCALAR_VALUE &&
regs[insn->src_reg].type == SCALAR_VALUE) {
if (tnum_is_const(regs[insn->src_reg].var_off))
reg_set_min_max(&other_branch->regs[insn->dst_reg],
reg_set_min_max(&other_branch_regs[insn->dst_reg],
dst_reg, regs[insn->src_reg].var_off.value,
opcode);
else if (tnum_is_const(dst_reg->var_off))
reg_set_min_max_inv(&other_branch->regs[insn->src_reg],
reg_set_min_max_inv(&other_branch_regs[insn->src_reg],
&regs[insn->src_reg],
dst_reg->var_off.value, opcode);
else if (opcode == BPF_JEQ || opcode == BPF_JNE)
/* Comparing for equality, we can combine knowledge */
reg_combine_min_max(&other_branch->regs[insn->src_reg],
&other_branch->regs[insn->dst_reg],
reg_combine_min_max(&other_branch_regs[insn->src_reg],
&other_branch_regs[insn->dst_reg],
&regs[insn->src_reg],
&regs[insn->dst_reg], opcode);
}
} else if (dst_reg->type == SCALAR_VALUE) {
reg_set_min_max(&other_branch->regs[insn->dst_reg],
reg_set_min_max(&other_branch_regs[insn->dst_reg],
dst_reg, insn->imm, opcode);
}
......@@ -3023,7 +3549,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
return -EACCES;
}
if (env->log.level)
print_verifier_state(env, this_branch);
print_verifier_state(env, this_branch->frame[this_branch->curframe]);
return 0;
}
......@@ -3108,6 +3634,18 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
return -EINVAL;
}
if (env->subprog_cnt) {
/* when program has LD_ABS insn JITs and interpreter assume
* that r1 == ctx == skb which is not the case for callees
* that can have arbitrary arguments. It's problematic
* for main prog as well since JITs would need to analyze
* all functions in order to make proper register save/restore
* decisions in the main prog. Hence disallow LD_ABS with calls
*/
verbose(env, "BPF_LD_[ABS|IND] instructions cannot be mixed with bpf-to-bpf calls\n");
return -EINVAL;
}
if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
BPF_SIZE(insn->code) == BPF_DW ||
(mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
......@@ -3284,6 +3822,10 @@ static int check_cfg(struct bpf_verifier_env *env)
int ret = 0;
int i, t;
ret = check_subprogs(env);
if (ret < 0)
return ret;
insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
if (!insn_state)
return -ENOMEM;
......@@ -3316,6 +3858,14 @@ static int check_cfg(struct bpf_verifier_env *env)
goto err_free;
if (t + 1 < insn_cnt)
env->explored_states[t + 1] = STATE_LIST_MARK;
if (insns[t].src_reg == BPF_PSEUDO_CALL) {
env->explored_states[t] = STATE_LIST_MARK;
ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env);
if (ret == 1)
goto peek_stack;
else if (ret < 0)
goto err_free;
}
} else if (opcode == BPF_JA) {
if (BPF_SRC(insns[t].code) != BPF_K) {
ret = -EINVAL;
......@@ -3434,11 +3984,21 @@ static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap)
static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
struct idpair *idmap)
{
bool equal;
if (!(rold->live & REG_LIVE_READ))
/* explored state didn't use this */
return true;
if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, live)) == 0)
equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, frameno)) == 0;
if (rold->type == PTR_TO_STACK)
/* two stack pointers are equal only if they're pointing to
* the same stack frame, since fp-8 in foo != fp-8 in bar
*/
return equal && rold->frameno == rcur->frameno;
if (equal)
return true;
if (rold->type == NOT_INIT)
......@@ -3511,7 +4071,6 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
tnum_in(rold->var_off, rcur->var_off);
case PTR_TO_CTX:
case CONST_PTR_TO_MAP:
case PTR_TO_STACK:
case PTR_TO_PACKET_END:
/* Only valid matches are exact, which memcmp() above
* would have accepted
......@@ -3526,8 +4085,8 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
return false;
}
static bool stacksafe(struct bpf_verifier_state *old,
struct bpf_verifier_state *cur,
static bool stacksafe(struct bpf_func_state *old,
struct bpf_func_state *cur,
struct idpair *idmap)
{
int i, spi;
......@@ -3545,8 +4104,19 @@ static bool stacksafe(struct bpf_verifier_state *old,
for (i = 0; i < old->allocated_stack; i++) {
spi = i / BPF_REG_SIZE;
if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ))
/* explored state didn't use this */
return true;
if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
continue;
/* if old state was safe with misc data in the stack
* it will be safe with zero-initialized stack.
* The opposite is not true
*/
if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC &&
cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO)
continue;
if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
cur->stack[spi].slot_type[i % BPF_REG_SIZE])
/* Ex: old explored (safe) state has STACK_SPILL in
......@@ -3603,9 +4173,8 @@ static bool stacksafe(struct bpf_verifier_state *old,
* whereas register type in current state is meaningful, it means that
* the current state will reach 'bpf_exit' instruction safely
*/
static bool states_equal(struct bpf_verifier_env *env,
struct bpf_verifier_state *old,
struct bpf_verifier_state *cur)
static bool func_states_equal(struct bpf_func_state *old,
struct bpf_func_state *cur)
{
struct idpair *idmap;
bool ret = false;
......@@ -3629,71 +4198,72 @@ static bool states_equal(struct bpf_verifier_env *env,
return ret;
}
static bool states_equal(struct bpf_verifier_env *env,
struct bpf_verifier_state *old,
struct bpf_verifier_state *cur)
{
int i;
if (old->curframe != cur->curframe)
return false;
/* for states to be equal callsites have to be the same
* and all frame states need to be equivalent
*/
for (i = 0; i <= old->curframe; i++) {
if (old->frame[i]->callsite != cur->frame[i]->callsite)
return false;
if (!func_states_equal(old->frame[i], cur->frame[i]))
return false;
}
return true;
}
/* A write screens off any subsequent reads; but write marks come from the
* straight-line code between a state and its parent. When we arrive at a
* jump target (in the first iteration of the propagate_liveness() loop),
* we didn't arrive by the straight-line code, so read marks in state must
* propagate to parent regardless of state's write marks.
* straight-line code between a state and its parent. When we arrive at an
* equivalent state (jump target or such) we didn't arrive by the straight-line
* code, so read marks in the state must propagate to the parent regardless
* of the state's write marks. That's what 'parent == state->parent' comparison
* in mark_reg_read() and mark_stack_slot_read() is for.
*/
static bool do_propagate_liveness(const struct bpf_verifier_state *state,
struct bpf_verifier_state *parent)
static int propagate_liveness(struct bpf_verifier_env *env,
const struct bpf_verifier_state *vstate,
struct bpf_verifier_state *vparent)
{
bool writes = parent == state->parent; /* Observe write marks */
bool touched = false; /* any changes made? */
int i;
int i, frame, err = 0;
struct bpf_func_state *state, *parent;
if (!parent)
return touched;
if (vparent->curframe != vstate->curframe) {
WARN(1, "propagate_live: parent frame %d current frame %d\n",
vparent->curframe, vstate->curframe);
return -EFAULT;
}
/* Propagate read liveness of registers... */
BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
/* We don't need to worry about FP liveness because it's read-only */
for (i = 0; i < BPF_REG_FP; i++) {
if (parent->regs[i].live & REG_LIVE_READ)
if (vparent->frame[vparent->curframe]->regs[i].live & REG_LIVE_READ)
continue;
if (writes && (state->regs[i].live & REG_LIVE_WRITTEN))
continue;
if (state->regs[i].live & REG_LIVE_READ) {
parent->regs[i].live |= REG_LIVE_READ;
touched = true;
if (vstate->frame[vstate->curframe]->regs[i].live & REG_LIVE_READ) {
err = mark_reg_read(env, vstate, vparent, i);
if (err)
return err;
}
}
/* ... and stack slots */
for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
i < parent->allocated_stack / BPF_REG_SIZE; i++) {
if (parent->stack[i].slot_type[0] != STACK_SPILL)
continue;
if (state->stack[i].slot_type[0] != STACK_SPILL)
continue;
if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ)
continue;
if (writes &&
(state->stack[i].spilled_ptr.live & REG_LIVE_WRITTEN))
continue;
if (state->stack[i].spilled_ptr.live & REG_LIVE_READ) {
parent->stack[i].spilled_ptr.live |= REG_LIVE_READ;
touched = true;
for (frame = 0; frame <= vstate->curframe; frame++) {
state = vstate->frame[frame];
parent = vparent->frame[frame];
for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
i < parent->allocated_stack / BPF_REG_SIZE; i++) {
if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ)
continue;
if (state->stack[i].spilled_ptr.live & REG_LIVE_READ)
mark_stack_slot_read(env, vstate, vparent, i, frame);
}
}
return touched;
}
/* "parent" is "a state from which we reach the current state", but initially
* it is not the state->parent (i.e. "the state whose straight-line code leads
* to the current state"), instead it is the state that happened to arrive at
* a (prunable) equivalent of the current state. See comment above
* do_propagate_liveness() for consequences of this.
* This function is just a more efficient way of calling mark_reg_read() or
* mark_stack_slot_read() on each reg in "parent" that is read in "state",
* though it requires that parent != state->parent in the call arguments.
*/
static void propagate_liveness(const struct bpf_verifier_state *state,
struct bpf_verifier_state *parent)
{
while (do_propagate_liveness(state, parent)) {
/* Something changed, so we need to feed those changes onward */
state = parent;
parent = state->parent;
}
return err;
}
static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
......@@ -3701,7 +4271,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
struct bpf_verifier_state_list *new_sl;
struct bpf_verifier_state_list *sl;
struct bpf_verifier_state *cur = env->cur_state;
int i, err;
int i, j, err;
sl = env->explored_states[insn_idx];
if (!sl)
......@@ -3722,7 +4292,9 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
* they'll be immediately forgotten as we're pruning
* this state and will pop a new one.
*/
propagate_liveness(&sl->state, cur);
err = propagate_liveness(env, &sl->state, cur);
if (err)
return err;
return 1;
}
sl = sl->next;
......@@ -3730,9 +4302,10 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
/* there were no equivalent states, remember current one.
* technically the current state is not proven to be safe yet,
* but it will either reach bpf_exit (which means it's safe) or
* it will be rejected. Since there are no loops, we won't be
* seeing this 'insn_idx' instruction again on the way to bpf_exit
* but it will either reach outer most bpf_exit (which means it's safe)
* or it will be rejected. Since there are no loops, we won't be
* seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
* again on the way to bpf_exit
*/
new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
if (!new_sl)
......@@ -3756,10 +4329,15 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
* explored_states can get read marks.)
*/
for (i = 0; i < BPF_REG_FP; i++)
cur->regs[i].live = REG_LIVE_NONE;
for (i = 0; i < cur->allocated_stack / BPF_REG_SIZE; i++)
if (cur->stack[i].slot_type[0] == STACK_SPILL)
cur->stack[i].spilled_ptr.live = REG_LIVE_NONE;
cur->frame[cur->curframe]->regs[i].live = REG_LIVE_NONE;
/* all stack frames are accessible from callee, clear them all */
for (j = 0; j <= cur->curframe; j++) {
struct bpf_func_state *frame = cur->frame[j];
for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++)
frame->stack[i].spilled_ptr.live = REG_LIVE_NONE;
}
return 0;
}
......@@ -3777,7 +4355,7 @@ static int do_check(struct bpf_verifier_env *env)
struct bpf_verifier_state *state;
struct bpf_insn *insns = env->prog->insnsi;
struct bpf_reg_state *regs;
int insn_cnt = env->prog->len;
int insn_cnt = env->prog->len, i;
int insn_idx, prev_insn_idx = 0;
int insn_processed = 0;
bool do_print_state = false;
......@@ -3785,9 +4363,18 @@ static int do_check(struct bpf_verifier_env *env)
state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
if (!state)
return -ENOMEM;
env->cur_state = state;
init_reg_state(env, state->regs);
state->curframe = 0;
state->parent = NULL;
state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
if (!state->frame[0]) {
kfree(state);
return -ENOMEM;
}
env->cur_state = state;
init_func_state(env, state->frame[0],
BPF_MAIN_FUNC /* callsite */,
0 /* frameno */,
0 /* subprogno, zero == main subprog */);
insn_idx = 0;
for (;;) {
struct bpf_insn *insn;
......@@ -3834,7 +4421,7 @@ static int do_check(struct bpf_verifier_env *env)
else
verbose(env, "\nfrom %d to %d:",
prev_insn_idx, insn_idx);
print_verifier_state(env, state);
print_verifier_state(env, state->frame[state->curframe]);
do_print_state = false;
}
......@@ -3967,13 +4554,17 @@ static int do_check(struct bpf_verifier_env *env)
if (opcode == BPF_CALL) {
if (BPF_SRC(insn->code) != BPF_K ||
insn->off != 0 ||
insn->src_reg != BPF_REG_0 ||
(insn->src_reg != BPF_REG_0 &&
insn->src_reg != BPF_PSEUDO_CALL) ||
insn->dst_reg != BPF_REG_0) {
verbose(env, "BPF_CALL uses reserved fields\n");
return -EINVAL;
}
err = check_call(env, insn->imm, insn_idx);
if (insn->src_reg == BPF_PSEUDO_CALL)
err = check_func_call(env, insn, &insn_idx);
else
err = check_helper_call(env, insn->imm, insn_idx);
if (err)
return err;
......@@ -3998,6 +4589,16 @@ static int do_check(struct bpf_verifier_env *env)
return -EINVAL;
}
if (state->curframe) {
/* exit from nested function */
prev_insn_idx = insn_idx;
err = prepare_func_exit(env, &insn_idx);
if (err)
return err;
do_print_state = true;
continue;
}
/* eBPF calling convetion is such that R0 is used
* to return the value from eBPF program.
* Make sure that it's readable at this time
......@@ -4058,8 +4659,16 @@ static int do_check(struct bpf_verifier_env *env)
insn_idx++;
}
verbose(env, "processed %d insns, stack depth %d\n", insn_processed,
env->prog->aux->stack_depth);
verbose(env, "processed %d insns, stack depth ", insn_processed);
for (i = 0; i < env->subprog_cnt + 1; i++) {
u32 depth = env->subprog_stack_depth[i];
verbose(env, "%d", depth);
if (i + 1 < env->subprog_cnt + 1)
verbose(env, "+");
}
verbose(env, "\n");
env->prog->aux->stack_depth = env->subprog_stack_depth[0];
return 0;
}
......@@ -4245,6 +4854,19 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
return 0;
}
static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
{
int i;
if (len == 1)
return;
for (i = 0; i < env->subprog_cnt; i++) {
if (env->subprog_starts[i] < off)
continue;
env->subprog_starts[i] += len - 1;
}
}
static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
const struct bpf_insn *patch, u32 len)
{
......@@ -4255,6 +4877,7 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
return NULL;
if (adjust_insn_aux_data(env, new_prog->len, off, len))
return NULL;
adjust_subprog_starts(env, off, len);
return new_prog;
}
......@@ -4389,6 +5012,150 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
return 0;
}
static int jit_subprogs(struct bpf_verifier_env *env)
{
struct bpf_prog *prog = env->prog, **func, *tmp;
int i, j, subprog_start, subprog_end = 0, len, subprog;
struct bpf_insn *insn = prog->insnsi;
void *old_bpf_func;
int err = -ENOMEM;
if (env->subprog_cnt == 0)
return 0;
for (i = 0; i < prog->len; i++, insn++) {
if (insn->code != (BPF_JMP | BPF_CALL) ||
insn->src_reg != BPF_PSEUDO_CALL)
continue;
subprog = find_subprog(env, i + insn->imm + 1);
if (subprog < 0) {
WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
i + insn->imm + 1);
return -EFAULT;
}
/* temporarily remember subprog id inside insn instead of
* aux_data, since next loop will split up all insns into funcs
*/
insn->off = subprog + 1;
/* remember original imm in case JIT fails and fallback
* to interpreter will be needed
*/
env->insn_aux_data[i].call_imm = insn->imm;
/* point imm to __bpf_call_base+1 from JITs point of view */
insn->imm = 1;
}
func = kzalloc(sizeof(prog) * (env->subprog_cnt + 1), GFP_KERNEL);
if (!func)
return -ENOMEM;
for (i = 0; i <= env->subprog_cnt; i++) {
subprog_start = subprog_end;
if (env->subprog_cnt == i)
subprog_end = prog->len;
else
subprog_end = env->subprog_starts[i];
len = subprog_end - subprog_start;
func[i] = bpf_prog_alloc(bpf_prog_size(len), GFP_USER);
if (!func[i])
goto out_free;
memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
len * sizeof(struct bpf_insn));
func[i]->len = len;
func[i]->is_func = 1;
/* Use bpf_prog_F_tag to indicate functions in stack traces.
* Long term would need debug info to populate names
*/
func[i]->aux->name[0] = 'F';
func[i]->aux->stack_depth = env->subprog_stack_depth[i];
func[i]->jit_requested = 1;
func[i] = bpf_int_jit_compile(func[i]);
if (!func[i]->jited) {
err = -ENOTSUPP;
goto out_free;
}
cond_resched();
}
/* at this point all bpf functions were successfully JITed
* now populate all bpf_calls with correct addresses and
* run last pass of JIT
*/
for (i = 0; i <= env->subprog_cnt; i++) {
insn = func[i]->insnsi;
for (j = 0; j < func[i]->len; j++, insn++) {
if (insn->code != (BPF_JMP | BPF_CALL) ||
insn->src_reg != BPF_PSEUDO_CALL)
continue;
subprog = insn->off;
insn->off = 0;
insn->imm = (u64 (*)(u64, u64, u64, u64, u64))
func[subprog]->bpf_func -
__bpf_call_base;
}
}
for (i = 0; i <= env->subprog_cnt; i++) {
old_bpf_func = func[i]->bpf_func;
tmp = bpf_int_jit_compile(func[i]);
if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
err = -EFAULT;
goto out_free;
}
cond_resched();
}
/* finally lock prog and jit images for all functions and
* populate kallsysm
*/
for (i = 0; i <= env->subprog_cnt; i++) {
bpf_prog_lock_ro(func[i]);
bpf_prog_kallsyms_add(func[i]);
}
prog->jited = 1;
prog->bpf_func = func[0]->bpf_func;
prog->aux->func = func;
prog->aux->func_cnt = env->subprog_cnt + 1;
return 0;
out_free:
for (i = 0; i <= env->subprog_cnt; i++)
if (func[i])
bpf_jit_free(func[i]);
kfree(func);
/* cleanup main prog to be interpreted */
prog->jit_requested = 0;
for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
if (insn->code != (BPF_JMP | BPF_CALL) ||
insn->src_reg != BPF_PSEUDO_CALL)
continue;
insn->off = 0;
insn->imm = env->insn_aux_data[i].call_imm;
}
return err;
}
static int fixup_call_args(struct bpf_verifier_env *env)
{
struct bpf_prog *prog = env->prog;
struct bpf_insn *insn = prog->insnsi;
int i, depth;
if (env->prog->jit_requested)
if (jit_subprogs(env) == 0)
return 0;
for (i = 0; i < prog->len; i++, insn++) {
if (insn->code != (BPF_JMP | BPF_CALL) ||
insn->src_reg != BPF_PSEUDO_CALL)
continue;
depth = get_callee_stack_depth(env, insn, i);
if (depth < 0)
return depth;
bpf_patch_call_args(insn, depth);
}
return 0;
}
/* fixup insn->imm field of bpf_call instructions
* and inline eligible helpers as explicit sequence of BPF instructions
*
......@@ -4408,6 +5175,8 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
for (i = 0; i < insn_cnt; i++, insn++) {
if (insn->code != (BPF_JMP | BPF_CALL))
continue;
if (insn->src_reg == BPF_PSEUDO_CALL)
continue;
if (insn->imm == BPF_FUNC_get_route_realm)
prog->dst_needed = 1;
......@@ -4437,7 +5206,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
/* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
* handlers are currently limited to 64 bit only.
*/
if (ebpf_jit_enabled() && BITS_PER_LONG == 64 &&
if (prog->jit_requested && BITS_PER_LONG == 64 &&
insn->imm == BPF_FUNC_map_lookup_elem) {
map_ptr = env->insn_aux_data[i + delta].map_ptr;
if (map_ptr == BPF_MAP_PTR_POISON ||
......@@ -4589,12 +5358,12 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
if (!env->explored_states)
goto skip_full_check;
env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
ret = check_cfg(env);
if (ret < 0)
goto skip_full_check;
env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
ret = do_check(env);
if (env->cur_state) {
free_verifier_state(env->cur_state, true);
......@@ -4615,6 +5384,9 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
if (ret == 0)
ret = fixup_bpf_calls(env);
if (ret == 0)
ret = fixup_call_args(env);
if (log->level && bpf_verifier_log_full(log))
ret = -ENOSPC;
if (log->level && !log->ubuf) {
......
......@@ -197,8 +197,14 @@ enum bpf_attach_type {
*/
#define BPF_F_STRICT_ALIGNMENT (1U << 0)
/* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */
#define BPF_PSEUDO_MAP_FD 1
/* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative
* offset to another bpf function
*/
#define BPF_PSEUDO_CALL 1
/* flags for BPF_MAP_UPDATE_ELEM command */
#define BPF_ANY 0 /* create new element or update existing */
#define BPF_NOEXIST 1 /* create new element if it didn't exist */
......
......@@ -40,7 +40,7 @@ int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name,
__u32 map_flags);
/* Recommend log buffer size */
#define BPF_LOG_BUF_SIZE 65536
#define BPF_LOG_BUF_SIZE (256 * 1024)
int bpf_load_program_name(enum bpf_prog_type type, const char *name,
const struct bpf_insn *insns,
size_t insns_cnt, const char *license,
......
......@@ -174,12 +174,19 @@ struct bpf_program {
char *name;
char *section_name;
struct bpf_insn *insns;
size_t insns_cnt;
size_t insns_cnt, main_prog_cnt;
enum bpf_prog_type type;
struct {
struct reloc_desc {
enum {
RELO_LD64,
RELO_CALL,
} type;
int insn_idx;
int map_idx;
union {
int map_idx;
int text_off;
};
} *reloc_desc;
int nr_reloc;
......@@ -234,6 +241,7 @@ struct bpf_object {
} *reloc;
int nr_reloc;
int maps_shndx;
int text_shndx;
} efile;
/*
* All loaded bpf_object is linked in a list, which is
......@@ -375,9 +383,13 @@ bpf_object__init_prog_names(struct bpf_object *obj)
size_t pi, si;
for (pi = 0; pi < obj->nr_programs; pi++) {
char *name = NULL;
const char *name = NULL;
prog = &obj->programs[pi];
if (prog->idx == obj->efile.text_shndx) {
name = ".text";
goto skip_search;
}
for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
si++) {
......@@ -405,7 +417,7 @@ bpf_object__init_prog_names(struct bpf_object *obj)
prog->section_name);
return -EINVAL;
}
skip_search:
prog->name = strdup(name);
if (!prog->name) {
pr_warning("failed to allocate memory for prog sym %s\n",
......@@ -795,6 +807,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
} else if ((sh.sh_type == SHT_PROGBITS) &&
(sh.sh_flags & SHF_EXECINSTR) &&
(data->d_size > 0)) {
if (strcmp(name, ".text") == 0)
obj->efile.text_shndx = idx;
err = bpf_object__add_program(obj, data->d_buf,
data->d_size, name, idx);
if (err) {
......@@ -856,11 +870,14 @@ bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
}
static int
bpf_program__collect_reloc(struct bpf_program *prog,
size_t nr_maps, GElf_Shdr *shdr,
Elf_Data *data, Elf_Data *symbols,
int maps_shndx, struct bpf_map *maps)
bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
Elf_Data *data, struct bpf_object *obj)
{
Elf_Data *symbols = obj->efile.symbols;
int text_shndx = obj->efile.text_shndx;
int maps_shndx = obj->efile.maps_shndx;
struct bpf_map *maps = obj->maps;
size_t nr_maps = obj->nr_maps;
int i, nrels;
pr_debug("collecting relocating info for: '%s'\n",
......@@ -893,8 +910,10 @@ bpf_program__collect_reloc(struct bpf_program *prog,
GELF_R_SYM(rel.r_info));
return -LIBBPF_ERRNO__FORMAT;
}
pr_debug("relo for %ld value %ld name %d\n",
rel.r_info >> 32, sym.st_value, sym.st_name);
if (sym.st_shndx != maps_shndx) {
if (sym.st_shndx != maps_shndx && sym.st_shndx != text_shndx) {
pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
prog->section_name, sym.st_shndx);
return -LIBBPF_ERRNO__RELOC;
......@@ -903,6 +922,17 @@ bpf_program__collect_reloc(struct bpf_program *prog,
insn_idx = rel.r_offset / sizeof(struct bpf_insn);
pr_debug("relocation: insn_idx=%u\n", insn_idx);
if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) {
if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) {
pr_warning("incorrect bpf_call opcode\n");
return -LIBBPF_ERRNO__RELOC;
}
prog->reloc_desc[i].type = RELO_CALL;
prog->reloc_desc[i].insn_idx = insn_idx;
prog->reloc_desc[i].text_off = sym.st_value;
continue;
}
if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
insn_idx, insns[insn_idx].code);
......@@ -924,6 +954,7 @@ bpf_program__collect_reloc(struct bpf_program *prog,
return -LIBBPF_ERRNO__RELOC;
}
prog->reloc_desc[i].type = RELO_LD64;
prog->reloc_desc[i].insn_idx = insn_idx;
prog->reloc_desc[i].map_idx = map_idx;
}
......@@ -962,28 +993,77 @@ bpf_object__create_maps(struct bpf_object *obj)
return 0;
}
static int
bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
struct reloc_desc *relo)
{
struct bpf_insn *insn, *new_insn;
struct bpf_program *text;
size_t new_cnt;
if (relo->type != RELO_CALL)
return -LIBBPF_ERRNO__RELOC;
if (prog->idx == obj->efile.text_shndx) {
pr_warning("relo in .text insn %d into off %d\n",
relo->insn_idx, relo->text_off);
return -LIBBPF_ERRNO__RELOC;
}
if (prog->main_prog_cnt == 0) {
text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
if (!text) {
pr_warning("no .text section found yet relo into text exist\n");
return -LIBBPF_ERRNO__RELOC;
}
new_cnt = prog->insns_cnt + text->insns_cnt;
new_insn = realloc(prog->insns, new_cnt * sizeof(*insn));
if (!new_insn) {
pr_warning("oom in prog realloc\n");
return -ENOMEM;
}
memcpy(new_insn + prog->insns_cnt, text->insns,
text->insns_cnt * sizeof(*insn));
prog->insns = new_insn;
prog->main_prog_cnt = prog->insns_cnt;
prog->insns_cnt = new_cnt;
}
insn = &prog->insns[relo->insn_idx];
insn->imm += prog->main_prog_cnt - relo->insn_idx;
pr_debug("added %zd insn from %s to prog %s\n",
text->insns_cnt, text->section_name, prog->section_name);
return 0;
}
static int
bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
{
int i;
int i, err;
if (!prog || !prog->reloc_desc)
return 0;
for (i = 0; i < prog->nr_reloc; i++) {
int insn_idx, map_idx;
struct bpf_insn *insns = prog->insns;
if (prog->reloc_desc[i].type == RELO_LD64) {
struct bpf_insn *insns = prog->insns;
int insn_idx, map_idx;
insn_idx = prog->reloc_desc[i].insn_idx;
map_idx = prog->reloc_desc[i].map_idx;
insn_idx = prog->reloc_desc[i].insn_idx;
map_idx = prog->reloc_desc[i].map_idx;
if (insn_idx >= (int)prog->insns_cnt) {
pr_warning("relocation out of range: '%s'\n",
prog->section_name);
return -LIBBPF_ERRNO__RELOC;
if (insn_idx >= (int)prog->insns_cnt) {
pr_warning("relocation out of range: '%s'\n",
prog->section_name);
return -LIBBPF_ERRNO__RELOC;
}
insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
insns[insn_idx].imm = obj->maps[map_idx].fd;
} else {
err = bpf_program__reloc_text(prog, obj,
&prog->reloc_desc[i]);
if (err)
return err;
}
insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
insns[insn_idx].imm = obj->maps[map_idx].fd;
}
zfree(&prog->reloc_desc);
......@@ -1026,7 +1106,6 @@ static int bpf_object__collect_reloc(struct bpf_object *obj)
Elf_Data *data = obj->efile.reloc[i].data;
int idx = shdr->sh_info;
struct bpf_program *prog;
size_t nr_maps = obj->nr_maps;
if (shdr->sh_type != SHT_REL) {
pr_warning("internal error at %d\n", __LINE__);
......@@ -1040,11 +1119,9 @@ static int bpf_object__collect_reloc(struct bpf_object *obj)
return -LIBBPF_ERRNO__RELOC;
}
err = bpf_program__collect_reloc(prog, nr_maps,
err = bpf_program__collect_reloc(prog,
shdr, data,
obj->efile.symbols,
obj->efile.maps_shndx,
obj->maps);
obj);
if (err)
return err;
}
......@@ -1197,6 +1274,8 @@ bpf_object__load_progs(struct bpf_object *obj)
int err;
for (i = 0; i < obj->nr_programs; i++) {
if (obj->programs[i].idx == obj->efile.text_shndx)
continue;
err = bpf_program__load(&obj->programs[i],
obj->license,
obj->kern_version);
......@@ -1859,7 +1938,7 @@ long libbpf_get_error(const void *ptr)
int bpf_prog_load(const char *file, enum bpf_prog_type type,
struct bpf_object **pobj, int *prog_fd)
{
struct bpf_program *prog;
struct bpf_program *prog, *first_prog = NULL;
struct bpf_object *obj;
int err;
......@@ -1867,25 +1946,30 @@ int bpf_prog_load(const char *file, enum bpf_prog_type type,
if (IS_ERR(obj))
return -ENOENT;
prog = bpf_program__next(NULL, obj);
if (!prog) {
bpf_object__close(obj);
return -ENOENT;
}
/*
* If type is not specified, try to guess it based on
* section name.
*/
if (type == BPF_PROG_TYPE_UNSPEC) {
type = bpf_program__guess_type(prog);
bpf_object__for_each_program(prog, obj) {
/*
* If type is not specified, try to guess it based on
* section name.
*/
if (type == BPF_PROG_TYPE_UNSPEC) {
bpf_object__close(obj);
return -EINVAL;
type = bpf_program__guess_type(prog);
if (type == BPF_PROG_TYPE_UNSPEC) {
bpf_object__close(obj);
return -EINVAL;
}
}
bpf_program__set_type(prog, type);
if (prog->idx != obj->efile.text_shndx && !first_prog)
first_prog = prog;
}
if (!first_prog) {
pr_warning("object file doesn't contain bpf program\n");
bpf_object__close(obj);
return -ENOENT;
}
bpf_program__set_type(prog, type);
err = bpf_object__load(obj);
if (err) {
bpf_object__close(obj);
......@@ -1893,6 +1977,6 @@ int bpf_prog_load(const char *file, enum bpf_prog_type type,
}
*pobj = obj;
*prog_fd = bpf_program__fd(prog);
*prog_fd = bpf_program__fd(first_prog);
return 0;
}
......@@ -17,7 +17,8 @@ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test
TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test_obj_id.o \
test_pkt_md_access.o test_xdp_redirect.o test_xdp_meta.o sockmap_parse_prog.o \
sockmap_verdict_prog.o dev_cgroup.o sample_ret0.o test_tracepoint.o
sockmap_verdict_prog.o dev_cgroup.o sample_ret0.o test_tracepoint.o \
test_l4lb_noinline.o test_xdp_noinline.o
TEST_PROGS := test_kmod.sh test_xdp_redirect.sh test_xdp_meta.sh \
test_offload.py
......@@ -49,8 +50,13 @@ else
CPU ?= generic
endif
CLANG_FLAGS = -I. -I./include/uapi -I../../../include/uapi \
-Wno-compare-distinct-pointer-types
$(OUTPUT)/test_l4lb_noinline.o: CLANG_FLAGS += -fno-inline
$(OUTPUT)/test_xdp_noinline.o: CLANG_FLAGS += -fno-inline
%.o: %.c
$(CLANG) -I. -I./include/uapi -I../../../include/uapi \
-Wno-compare-distinct-pointer-types \
$(CLANG) $(CLANG_FLAGS) \
-O2 -target bpf -emit-llvm -c $< -o - | \
$(LLC) -march=bpf -mcpu=$(CPU) -filetype=obj -o $@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017 Facebook
#include <stddef.h>
#include <stdbool.h>
#include <string.h>
#include <linux/pkt_cls.h>
#include <linux/bpf.h>
#include <linux/in.h>
#include <linux/if_ether.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/icmp.h>
#include <linux/icmpv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include "bpf_helpers.h"
#include "test_iptunnel_common.h"
#include "bpf_endian.h"
int _version SEC("version") = 1;
static __u32 rol32(__u32 word, unsigned int shift)
{
return (word << shift) | (word >> ((-shift) & 31));
}
/* copy paste of jhash from kernel sources to make sure llvm
* can compile it into valid sequence of bpf instructions
*/
#define __jhash_mix(a, b, c) \
{ \
a -= c; a ^= rol32(c, 4); c += b; \
b -= a; b ^= rol32(a, 6); a += c; \
c -= b; c ^= rol32(b, 8); b += a; \
a -= c; a ^= rol32(c, 16); c += b; \
b -= a; b ^= rol32(a, 19); a += c; \
c -= b; c ^= rol32(b, 4); b += a; \
}
#define __jhash_final(a, b, c) \
{ \
c ^= b; c -= rol32(b, 14); \
a ^= c; a -= rol32(c, 11); \
b ^= a; b -= rol32(a, 25); \
c ^= b; c -= rol32(b, 16); \
a ^= c; a -= rol32(c, 4); \
b ^= a; b -= rol32(a, 14); \
c ^= b; c -= rol32(b, 24); \
}
#define JHASH_INITVAL 0xdeadbeef
typedef unsigned int u32;
static u32 jhash(const void *key, u32 length, u32 initval)
{
u32 a, b, c;
const unsigned char *k = key;
a = b = c = JHASH_INITVAL + length + initval;
while (length > 12) {
a += *(u32 *)(k);
b += *(u32 *)(k + 4);
c += *(u32 *)(k + 8);
__jhash_mix(a, b, c);
length -= 12;
k += 12;
}
switch (length) {
case 12: c += (u32)k[11]<<24;
case 11: c += (u32)k[10]<<16;
case 10: c += (u32)k[9]<<8;
case 9: c += k[8];
case 8: b += (u32)k[7]<<24;
case 7: b += (u32)k[6]<<16;
case 6: b += (u32)k[5]<<8;
case 5: b += k[4];
case 4: a += (u32)k[3]<<24;
case 3: a += (u32)k[2]<<16;
case 2: a += (u32)k[1]<<8;
case 1: a += k[0];
__jhash_final(a, b, c);
case 0: /* Nothing left to add */
break;
}
return c;
}
static u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
{
a += initval;
b += initval;
c += initval;
__jhash_final(a, b, c);
return c;
}
static u32 jhash_2words(u32 a, u32 b, u32 initval)
{
return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2));
}
#define PCKT_FRAGMENTED 65343
#define IPV4_HDR_LEN_NO_OPT 20
#define IPV4_PLUS_ICMP_HDR 28
#define IPV6_PLUS_ICMP_HDR 48
#define RING_SIZE 2
#define MAX_VIPS 12
#define MAX_REALS 5
#define CTL_MAP_SIZE 16
#define CH_RINGS_SIZE (MAX_VIPS * RING_SIZE)
#define F_IPV6 (1 << 0)
#define F_HASH_NO_SRC_PORT (1 << 0)
#define F_ICMP (1 << 0)
#define F_SYN_SET (1 << 1)
struct packet_description {
union {
__be32 src;
__be32 srcv6[4];
};
union {
__be32 dst;
__be32 dstv6[4];
};
union {
__u32 ports;
__u16 port16[2];
};
__u8 proto;
__u8 flags;
};
struct ctl_value {
union {
__u64 value;
__u32 ifindex;
__u8 mac[6];
};
};
struct vip_meta {
__u32 flags;
__u32 vip_num;
};
struct real_definition {
union {
__be32 dst;
__be32 dstv6[4];
};
__u8 flags;
};
struct vip_stats {
__u64 bytes;
__u64 pkts;
};
struct eth_hdr {
unsigned char eth_dest[ETH_ALEN];
unsigned char eth_source[ETH_ALEN];
unsigned short eth_proto;
};
struct bpf_map_def SEC("maps") vip_map = {
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(struct vip),
.value_size = sizeof(struct vip_meta),
.max_entries = MAX_VIPS,
};
struct bpf_map_def SEC("maps") ch_rings = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(__u32),
.max_entries = CH_RINGS_SIZE,
};
struct bpf_map_def SEC("maps") reals = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct real_definition),
.max_entries = MAX_REALS,
};
struct bpf_map_def SEC("maps") stats = {
.type = BPF_MAP_TYPE_PERCPU_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct vip_stats),
.max_entries = MAX_VIPS,
};
struct bpf_map_def SEC("maps") ctl_array = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct ctl_value),
.max_entries = CTL_MAP_SIZE,
};
static __u32 get_packet_hash(struct packet_description *pckt,
bool ipv6)
{
if (ipv6)
return jhash_2words(jhash(pckt->srcv6, 16, MAX_VIPS),
pckt->ports, CH_RINGS_SIZE);
else
return jhash_2words(pckt->src, pckt->ports, CH_RINGS_SIZE);
}
static bool get_packet_dst(struct real_definition **real,
struct packet_description *pckt,
struct vip_meta *vip_info,
bool is_ipv6)
{
__u32 hash = get_packet_hash(pckt, is_ipv6);
__u32 key = RING_SIZE * vip_info->vip_num + hash % RING_SIZE;
__u32 *real_pos;
if (hash != 0x358459b7 /* jhash of ipv4 packet */ &&
hash != 0x2f4bc6bb /* jhash of ipv6 packet */)
return 0;
real_pos = bpf_map_lookup_elem(&ch_rings, &key);
if (!real_pos)
return false;
key = *real_pos;
*real = bpf_map_lookup_elem(&reals, &key);
if (!(*real))
return false;
return true;
}
static int parse_icmpv6(void *data, void *data_end, __u64 off,
struct packet_description *pckt)
{
struct icmp6hdr *icmp_hdr;
struct ipv6hdr *ip6h;
icmp_hdr = data + off;
if (icmp_hdr + 1 > data_end)
return TC_ACT_SHOT;
if (icmp_hdr->icmp6_type != ICMPV6_PKT_TOOBIG)
return TC_ACT_OK;
off += sizeof(struct icmp6hdr);
ip6h = data + off;
if (ip6h + 1 > data_end)
return TC_ACT_SHOT;
pckt->proto = ip6h->nexthdr;
pckt->flags |= F_ICMP;
memcpy(pckt->srcv6, ip6h->daddr.s6_addr32, 16);
memcpy(pckt->dstv6, ip6h->saddr.s6_addr32, 16);
return TC_ACT_UNSPEC;
}
static int parse_icmp(void *data, void *data_end, __u64 off,
struct packet_description *pckt)
{
struct icmphdr *icmp_hdr;
struct iphdr *iph;
icmp_hdr = data + off;
if (icmp_hdr + 1 > data_end)
return TC_ACT_SHOT;
if (icmp_hdr->type != ICMP_DEST_UNREACH ||
icmp_hdr->code != ICMP_FRAG_NEEDED)
return TC_ACT_OK;
off += sizeof(struct icmphdr);
iph = data + off;
if (iph + 1 > data_end)
return TC_ACT_SHOT;
if (iph->ihl != 5)
return TC_ACT_SHOT;
pckt->proto = iph->protocol;
pckt->flags |= F_ICMP;
pckt->src = iph->daddr;
pckt->dst = iph->saddr;
return TC_ACT_UNSPEC;
}
static bool parse_udp(void *data, __u64 off, void *data_end,
struct packet_description *pckt)
{
struct udphdr *udp;
udp = data + off;
if (udp + 1 > data_end)
return false;
if (!(pckt->flags & F_ICMP)) {
pckt->port16[0] = udp->source;
pckt->port16[1] = udp->dest;
} else {
pckt->port16[0] = udp->dest;
pckt->port16[1] = udp->source;
}
return true;
}
static bool parse_tcp(void *data, __u64 off, void *data_end,
struct packet_description *pckt)
{
struct tcphdr *tcp;
tcp = data + off;
if (tcp + 1 > data_end)
return false;
if (tcp->syn)
pckt->flags |= F_SYN_SET;
if (!(pckt->flags & F_ICMP)) {
pckt->port16[0] = tcp->source;
pckt->port16[1] = tcp->dest;
} else {
pckt->port16[0] = tcp->dest;
pckt->port16[1] = tcp->source;
}
return true;
}
static int process_packet(void *data, __u64 off, void *data_end,
bool is_ipv6, struct __sk_buff *skb)
{
void *pkt_start = (void *)(long)skb->data;
struct packet_description pckt = {};
struct eth_hdr *eth = pkt_start;
struct bpf_tunnel_key tkey = {};
struct vip_stats *data_stats;
struct real_definition *dst;
struct vip_meta *vip_info;
struct ctl_value *cval;
__u32 v4_intf_pos = 1;
__u32 v6_intf_pos = 2;
struct ipv6hdr *ip6h;
struct vip vip = {};
struct iphdr *iph;
int tun_flag = 0;
__u16 pkt_bytes;
__u64 iph_len;
__u32 ifindex;
__u8 protocol;
__u32 vip_num;
int action;
tkey.tunnel_ttl = 64;
if (is_ipv6) {
ip6h = data + off;
if (ip6h + 1 > data_end)
return TC_ACT_SHOT;
iph_len = sizeof(struct ipv6hdr);
protocol = ip6h->nexthdr;
pckt.proto = protocol;
pkt_bytes = bpf_ntohs(ip6h->payload_len);
off += iph_len;
if (protocol == IPPROTO_FRAGMENT) {
return TC_ACT_SHOT;
} else if (protocol == IPPROTO_ICMPV6) {
action = parse_icmpv6(data, data_end, off, &pckt);
if (action >= 0)
return action;
off += IPV6_PLUS_ICMP_HDR;
} else {
memcpy(pckt.srcv6, ip6h->saddr.s6_addr32, 16);
memcpy(pckt.dstv6, ip6h->daddr.s6_addr32, 16);
}
} else {
iph = data + off;
if (iph + 1 > data_end)
return TC_ACT_SHOT;
if (iph->ihl != 5)
return TC_ACT_SHOT;
protocol = iph->protocol;
pckt.proto = protocol;
pkt_bytes = bpf_ntohs(iph->tot_len);
off += IPV4_HDR_LEN_NO_OPT;
if (iph->frag_off & PCKT_FRAGMENTED)
return TC_ACT_SHOT;
if (protocol == IPPROTO_ICMP) {
action = parse_icmp(data, data_end, off, &pckt);
if (action >= 0)
return action;
off += IPV4_PLUS_ICMP_HDR;
} else {
pckt.src = iph->saddr;
pckt.dst = iph->daddr;
}
}
protocol = pckt.proto;
if (protocol == IPPROTO_TCP) {
if (!parse_tcp(data, off, data_end, &pckt))
return TC_ACT_SHOT;
} else if (protocol == IPPROTO_UDP) {
if (!parse_udp(data, off, data_end, &pckt))
return TC_ACT_SHOT;
} else {
return TC_ACT_SHOT;
}
if (is_ipv6)
memcpy(vip.daddr.v6, pckt.dstv6, 16);
else
vip.daddr.v4 = pckt.dst;
vip.dport = pckt.port16[1];
vip.protocol = pckt.proto;
vip_info = bpf_map_lookup_elem(&vip_map, &vip);
if (!vip_info) {
vip.dport = 0;
vip_info = bpf_map_lookup_elem(&vip_map, &vip);
if (!vip_info)
return TC_ACT_SHOT;
pckt.port16[1] = 0;
}
if (vip_info->flags & F_HASH_NO_SRC_PORT)
pckt.port16[0] = 0;
if (!get_packet_dst(&dst, &pckt, vip_info, is_ipv6))
return TC_ACT_SHOT;
if (dst->flags & F_IPV6) {
cval = bpf_map_lookup_elem(&ctl_array, &v6_intf_pos);
if (!cval)
return TC_ACT_SHOT;
ifindex = cval->ifindex;
memcpy(tkey.remote_ipv6, dst->dstv6, 16);
tun_flag = BPF_F_TUNINFO_IPV6;
} else {
cval = bpf_map_lookup_elem(&ctl_array, &v4_intf_pos);
if (!cval)
return TC_ACT_SHOT;
ifindex = cval->ifindex;
tkey.remote_ipv4 = dst->dst;
}
vip_num = vip_info->vip_num;
data_stats = bpf_map_lookup_elem(&stats, &vip_num);
if (!data_stats)
return TC_ACT_SHOT;
data_stats->pkts++;
data_stats->bytes += pkt_bytes;
bpf_skb_set_tunnel_key(skb, &tkey, sizeof(tkey), tun_flag);
*(u32 *)eth->eth_dest = tkey.remote_ipv4;
return bpf_redirect(ifindex, 0);
}
SEC("l4lb-demo")
int balancer_ingress(struct __sk_buff *ctx)
{
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
struct eth_hdr *eth = data;
__u32 eth_proto;
__u32 nh_off;
nh_off = sizeof(struct eth_hdr);
if (data + nh_off > data_end)
return TC_ACT_SHOT;
eth_proto = eth->eth_proto;
if (eth_proto == bpf_htons(ETH_P_IP))
return process_packet(data, nh_off, data_end, false, ctx);
else if (eth_proto == bpf_htons(ETH_P_IPV6))
return process_packet(data, nh_off, data_end, true, ctx);
else
return TC_ACT_SHOT;
}
char _license[] SEC("license") = "GPL";
......@@ -169,10 +169,9 @@ static void test_xdp(void)
#define NUM_ITER 100000
#define VIP_NUM 5
static void test_l4lb(void)
static void test_l4lb(const char *file)
{
unsigned int nr_cpus = bpf_num_possible_cpus();
const char *file = "./test_l4lb.o";
struct vip key = {.protocol = 6};
struct vip_meta {
__u32 flags;
......@@ -249,6 +248,95 @@ static void test_l4lb(void)
bpf_object__close(obj);
}
static void test_l4lb_all(void)
{
const char *file1 = "./test_l4lb.o";
const char *file2 = "./test_l4lb_noinline.o";
test_l4lb(file1);
test_l4lb(file2);
}
static void test_xdp_noinline(void)
{
const char *file = "./test_xdp_noinline.o";
unsigned int nr_cpus = bpf_num_possible_cpus();
struct vip key = {.protocol = 6};
struct vip_meta {
__u32 flags;
__u32 vip_num;
} value = {.vip_num = VIP_NUM};
__u32 stats_key = VIP_NUM;
struct vip_stats {
__u64 bytes;
__u64 pkts;
} stats[nr_cpus];
struct real_definition {
union {
__be32 dst;
__be32 dstv6[4];
};
__u8 flags;
} real_def = {.dst = MAGIC_VAL};
__u32 ch_key = 11, real_num = 3;
__u32 duration, retval, size;
int err, i, prog_fd, map_fd;
__u64 bytes = 0, pkts = 0;
struct bpf_object *obj;
char buf[128];
u32 *magic = (u32 *)buf;
err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
if (err) {
error_cnt++;
return;
}
map_fd = bpf_find_map(__func__, obj, "vip_map");
if (map_fd < 0)
goto out;
bpf_map_update_elem(map_fd, &key, &value, 0);
map_fd = bpf_find_map(__func__, obj, "ch_rings");
if (map_fd < 0)
goto out;
bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
map_fd = bpf_find_map(__func__, obj, "reals");
if (map_fd < 0)
goto out;
bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
buf, &size, &retval, &duration);
CHECK(err || errno || retval != 1 || size != 54 ||
*magic != MAGIC_VAL, "ipv4",
"err %d errno %d retval %d size %d magic %x\n",
err, errno, retval, size, *magic);
err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
buf, &size, &retval, &duration);
CHECK(err || errno || retval != 1 || size != 74 ||
*magic != MAGIC_VAL, "ipv6",
"err %d errno %d retval %d size %d magic %x\n",
err, errno, retval, size, *magic);
map_fd = bpf_find_map(__func__, obj, "stats");
if (map_fd < 0)
goto out;
bpf_map_lookup_elem(map_fd, &stats_key, stats);
for (i = 0; i < nr_cpus; i++) {
bytes += stats[i].bytes;
pkts += stats[i].pkts;
}
if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
error_cnt++;
printf("test_xdp_noinline:FAIL:stats %lld %lld\n", bytes, pkts);
}
out:
bpf_object__close(obj);
}
static void test_tcp_estats(void)
{
const char *file = "./test_tcp_estats.o";
......@@ -757,7 +845,8 @@ int main(void)
test_pkt_access();
test_xdp();
test_l4lb();
test_l4lb_all();
test_xdp_noinline();
test_tcp_estats();
test_bpf_obj_id();
test_pkt_md_access();
......
......@@ -2,6 +2,7 @@
* Testsuite for eBPF verifier
*
* Copyright (c) 2014 PLUMgrid, http://plumgrid.com
* Copyright (c) 2017 Facebook
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
......@@ -277,7 +278,7 @@ static struct bpf_test tests[] = {
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
},
.errstr = "jump out of range",
.errstr = "not an exit",
.result = REJECT,
},
{
......@@ -5648,7 +5649,7 @@ static struct bpf_test tests[] = {
"helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
......@@ -5883,7 +5884,7 @@ static struct bpf_test tests[] = {
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
......@@ -8097,6 +8098,1623 @@ static struct bpf_test tests[] = {
.result = REJECT,
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
},
{
"calls: basic sanity",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.result = ACCEPT,
},
{
"calls: not on unpriviledged",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "function calls to other bpf functions are allowed for root only",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"calls: overlapping caller/callee",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "last insn is not an exit or jmp",
.result = REJECT,
},
{
"calls: wrong recursive calls",
.insns = {
BPF_JMP_IMM(BPF_JA, 0, 0, 4),
BPF_JMP_IMM(BPF_JA, 0, 0, 4),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "jump out of range",
.result = REJECT,
},
{
"calls: wrong src reg",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "BPF_CALL uses reserved fields",
.result = REJECT,
},
{
"calls: wrong off value",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "BPF_CALL uses reserved fields",
.result = REJECT,
},
{
"calls: jump back loop",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "back-edge from insn 0 to 0",
.result = REJECT,
},
{
"calls: conditional call",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "jump out of range",
.result = REJECT,
},
{
"calls: conditional call 2",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.result = ACCEPT,
},
{
"calls: conditional call 3",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
BPF_JMP_IMM(BPF_JA, 0, 0, 4),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_IMM(BPF_JA, 0, 0, -6),
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_JMP_IMM(BPF_JA, 0, 0, -6),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "back-edge from insn",
.result = REJECT,
},
{
"calls: conditional call 4",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_IMM(BPF_JA, 0, 0, -5),
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.result = ACCEPT,
},
{
"calls: conditional call 5",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_IMM(BPF_JA, 0, 0, -6),
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "back-edge from insn",
.result = REJECT,
},
{
"calls: conditional call 6",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "back-edge from insn",
.result = REJECT,
},
{
"calls: using r0 returned by callee",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.result = ACCEPT,
},
{
"calls: using uninit r0 from callee",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "!read_ok",
.result = REJECT,
},
{
"calls: callee is using r1",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, len)),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_ACT,
.result = ACCEPT,
},
{
"calls: callee using args1",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "allowed for root only",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"calls: callee using wrong args2",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "R2 !read_ok",
.result = REJECT,
},
{
"calls: callee using two args",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
offsetof(struct __sk_buff, len)),
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
offsetof(struct __sk_buff, len)),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "allowed for root only",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"calls: callee changing pkt pointers",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct xdp_md, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
offsetof(struct xdp_md, data_end)),
BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
/* clear_all_pkt_pointers() has to walk all frames
* to make sure that pkt pointers in the caller
* are cleared when callee is calling a helper that
* adjusts packet size
*/
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_xdp_adjust_head),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "R6 invalid mem access 'inv'",
.prog_type = BPF_PROG_TYPE_XDP,
},
{
"calls: two calls with args",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, len)),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
},
{
"calls: calls with stack arith",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
},
{
"calls: calls with misaligned stack access",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
.errstr = "misaligned stack access",
.result = REJECT,
},
{
"calls: calls control flow, jump test",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 43),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
BPF_JMP_IMM(BPF_JA, 0, 0, -3),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
},
{
"calls: calls control flow, jump test 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 43),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.errstr = "jump out of range from insn 1 to 4",
.result = REJECT,
},
{
"calls: two calls with bad jump",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, len)),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "jump out of range from insn 11 to 9",
.result = REJECT,
},
{
"calls: recursive call. test1",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "back-edge",
.result = REJECT,
},
{
"calls: recursive call. test2",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "back-edge",
.result = REJECT,
},
{
"calls: unreachable code",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "unreachable insn 6",
.result = REJECT,
},
{
"calls: invalid call",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "invalid destination",
.result = REJECT,
},
{
"calls: invalid call 2",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "invalid destination",
.result = REJECT,
},
{
"calls: jumping across function bodies. test1",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "jump out of range",
.result = REJECT,
},
{
"calls: jumping across function bodies. test2",
.insns = {
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "jump out of range",
.result = REJECT,
},
{
"calls: call without exit",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "not an exit",
.result = REJECT,
},
{
"calls: call into middle of ld_imm64",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_LD_IMM64(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "last insn",
.result = REJECT,
},
{
"calls: call into middle of other call",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "last insn",
.result = REJECT,
},
{
"calls: ld_abs with changing ctx data in callee",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_LD_ABS(BPF_B, 0),
BPF_LD_ABS(BPF_H, 0),
BPF_LD_ABS(BPF_W, 0),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
BPF_LD_ABS(BPF_B, 0),
BPF_LD_ABS(BPF_H, 0),
BPF_LD_ABS(BPF_W, 0),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_MOV64_IMM(BPF_REG_3, 2),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_vlan_push),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
.result = REJECT,
},
{
"calls: two calls with bad fallthrough",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, len)),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "not an exit",
.result = REJECT,
},
{
"calls: two calls with stack read",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_XDP,
.result = ACCEPT,
},
{
"calls: two calls with stack write",
.insns = {
/* main prog */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
BPF_EXIT_INSN(),
/* subprog 1 */
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
/* write into stack frame of main prog */
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
BPF_EXIT_INSN(),
/* subprog 2 */
/* read from stack frame of main prog */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_XDP,
.result = ACCEPT,
},
{
"calls: spill into caller stack frame",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_XDP,
.errstr = "cannot spill",
.result = REJECT,
},
{
"calls: write into caller stack frame",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_XDP,
.result = ACCEPT,
},
{
"calls: write into callee stack frame",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_XDP,
.errstr = "cannot return stack pointer",
.result = REJECT,
},
{
"calls: two calls with stack write and void return",
.insns = {
/* main prog */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
BPF_EXIT_INSN(),
/* subprog 1 */
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
/* subprog 2 */
/* write into stack frame of main prog */
BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
BPF_EXIT_INSN(), /* void return */
},
.prog_type = BPF_PROG_TYPE_XDP,
.result = ACCEPT,
},
{
"calls: ambiguous return value",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "allowed for root only",
.result_unpriv = REJECT,
.errstr = "R0 !read_ok",
.result = REJECT,
},
{
"calls: two calls that return map_value",
.insns = {
/* main prog */
/* pass fp-16, fp-8 into a function */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
/* fetch map_value_ptr from the stack of this function */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
/* write into map value */
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
/* fetch secound map_value_ptr from the stack */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
/* write into map value */
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
/* subprog 1 */
/* call 3rd function twice */
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
/* first time with fp-8 */
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
/* second time with fp-16 */
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
/* subprog 2 */
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
/* lookup from map */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
/* write map_value_ptr into stack frame of main prog */
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(), /* return 0 */
},
.prog_type = BPF_PROG_TYPE_XDP,
.fixup_map1 = { 23 },
.result = ACCEPT,
},
{
"calls: two calls that return map_value with bool condition",
.insns = {
/* main prog */
/* pass fp-16, fp-8 into a function */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
/* subprog 1 */
/* call 3rd function twice */
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
/* first time with fp-8 */
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
/* fetch map_value_ptr from the stack of this function */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
/* write into map value */
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
/* second time with fp-16 */
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
/* fetch secound map_value_ptr from the stack */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
/* write into map value */
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
BPF_EXIT_INSN(),
/* subprog 2 */
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
/* lookup from map */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(), /* return 0 */
/* write map_value_ptr into stack frame of main prog */
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(), /* return 1 */
},
.prog_type = BPF_PROG_TYPE_XDP,
.fixup_map1 = { 23 },
.result = ACCEPT,
},
{
"calls: two calls that return map_value with incorrect bool check",
.insns = {
/* main prog */
/* pass fp-16, fp-8 into a function */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
/* subprog 1 */
/* call 3rd function twice */
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
/* first time with fp-8 */
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
/* fetch map_value_ptr from the stack of this function */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
/* write into map value */
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
/* second time with fp-16 */
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
/* fetch secound map_value_ptr from the stack */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
/* write into map value */
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
BPF_EXIT_INSN(),
/* subprog 2 */
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
/* lookup from map */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(), /* return 0 */
/* write map_value_ptr into stack frame of main prog */
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(), /* return 1 */
},
.prog_type = BPF_PROG_TYPE_XDP,
.fixup_map1 = { 23 },
.result = REJECT,
.errstr = "invalid read from stack off -16+0 size 8",
},
{
"calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
.insns = {
/* main prog */
/* pass fp-16, fp-8 into a function */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
/* subprog 1 */
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
/* 1st lookup from map */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_8, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
/* write map_value_ptr into stack frame of main prog at fp-8 */
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_8, 1),
/* 2nd lookup from map */
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_9, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
/* write map_value_ptr into stack frame of main prog at fp-16 */
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_9, 1),
/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
BPF_EXIT_INSN(),
/* subprog 2 */
/* if arg2 == 1 do *arg1 = 0 */
BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
/* fetch map_value_ptr from the stack of this function */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
/* write into map value */
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
/* if arg4 == 1 do *arg3 = 0 */
BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
/* fetch map_value_ptr from the stack of this function */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
/* write into map value */
BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map1 = { 12, 22 },
.result = REJECT,
.errstr = "invalid access to map value, value_size=8 off=2 size=8",
},
{
"calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
.insns = {
/* main prog */
/* pass fp-16, fp-8 into a function */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
/* subprog 1 */
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
/* 1st lookup from map */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_8, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
/* write map_value_ptr into stack frame of main prog at fp-8 */
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_8, 1),
/* 2nd lookup from map */
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_9, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
/* write map_value_ptr into stack frame of main prog at fp-16 */
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_9, 1),
/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
BPF_EXIT_INSN(),
/* subprog 2 */
/* if arg2 == 1 do *arg1 = 0 */
BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
/* fetch map_value_ptr from the stack of this function */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
/* write into map value */
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
/* if arg4 == 1 do *arg3 = 0 */
BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
/* fetch map_value_ptr from the stack of this function */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
/* write into map value */
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map1 = { 12, 22 },
.result = ACCEPT,
},
{
"calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
.insns = {
/* main prog */
/* pass fp-16, fp-8 into a function */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
/* subprog 1 */
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
/* 1st lookup from map */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_8, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
/* write map_value_ptr into stack frame of main prog at fp-8 */
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_8, 1),
/* 2nd lookup from map */
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_9, 0), // 26
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
/* write map_value_ptr into stack frame of main prog at fp-16 */
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_9, 1),
/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
BPF_JMP_IMM(BPF_JA, 0, 0, -30),
/* subprog 2 */
/* if arg2 == 1 do *arg1 = 0 */
BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
/* fetch map_value_ptr from the stack of this function */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
/* write into map value */
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
/* if arg4 == 1 do *arg3 = 0 */
BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
/* fetch map_value_ptr from the stack of this function */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
/* write into map value */
BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, -8),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map1 = { 12, 22 },
.result = REJECT,
.errstr = "invalid access to map value, value_size=8 off=2 size=8",
},
{
"calls: two calls that receive map_value_ptr_or_null via arg. test1",
.insns = {
/* main prog */
/* pass fp-16, fp-8 into a function */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
/* subprog 1 */
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
/* 1st lookup from map */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_8, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
BPF_MOV64_IMM(BPF_REG_8, 1),
/* 2nd lookup from map */
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
/* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_9, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
BPF_MOV64_IMM(BPF_REG_9, 1),
/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
/* subprog 2 */
/* if arg2 == 1 do *arg1 = 0 */
BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
/* fetch map_value_ptr from the stack of this function */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
/* write into map value */
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
/* if arg4 == 1 do *arg3 = 0 */
BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
/* fetch map_value_ptr from the stack of this function */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
/* write into map value */
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map1 = { 12, 22 },
.result = ACCEPT,
},
{
"calls: two calls that receive map_value_ptr_or_null via arg. test2",
.insns = {
/* main prog */
/* pass fp-16, fp-8 into a function */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
/* subprog 1 */
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
/* 1st lookup from map */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_8, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
BPF_MOV64_IMM(BPF_REG_8, 1),
/* 2nd lookup from map */
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
/* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_9, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
BPF_MOV64_IMM(BPF_REG_9, 1),
/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
/* subprog 2 */
/* if arg2 == 1 do *arg1 = 0 */
BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
/* fetch map_value_ptr from the stack of this function */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
/* write into map value */
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
/* if arg4 == 0 do *arg3 = 0 */
BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
/* fetch map_value_ptr from the stack of this function */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
/* write into map value */
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map1 = { 12, 22 },
.result = REJECT,
.errstr = "R0 invalid mem access 'inv'",
},
{
"calls: pkt_ptr spill into caller stack",
.insns = {
BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
/* subprog 1 */
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
/* spill unchecked pkt_ptr into stack of caller */
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
/* now the pkt range is verified, read pkt_ptr from stack */
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
/* write 4 bytes into packet */
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"calls: pkt_ptr spill into caller stack 2",
.insns = {
BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
/* Marking is still kept, but not in all cases safe. */
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
BPF_EXIT_INSN(),
/* subprog 1 */
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
/* spill unchecked pkt_ptr into stack of caller */
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
/* now the pkt range is verified, read pkt_ptr from stack */
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
/* write 4 bytes into packet */
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.errstr = "invalid access to packet",
.result = REJECT,
},
{
"calls: pkt_ptr spill into caller stack 3",
.insns = {
BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
/* Marking is still kept and safe here. */
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
BPF_EXIT_INSN(),
/* subprog 1 */
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
/* spill unchecked pkt_ptr into stack of caller */
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
BPF_MOV64_IMM(BPF_REG_5, 1),
/* now the pkt range is verified, read pkt_ptr from stack */
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
/* write 4 bytes into packet */
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
},
{
"calls: pkt_ptr spill into caller stack 4",
.insns = {
BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
/* Check marking propagated. */
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
BPF_EXIT_INSN(),
/* subprog 1 */
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
/* spill unchecked pkt_ptr into stack of caller */
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
BPF_MOV64_IMM(BPF_REG_5, 1),
/* don't read back pkt_ptr from stack here */
/* write 4 bytes into packet */
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
},
{
"calls: pkt_ptr spill into caller stack 5",
.insns = {
BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
BPF_EXIT_INSN(),
/* subprog 1 */
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
/* spill checked pkt_ptr into stack of caller */
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_5, 1),
/* don't read back pkt_ptr from stack here */
/* write 4 bytes into packet */
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.errstr = "same insn cannot be used with different",
.result = REJECT,
},
{
"calls: pkt_ptr spill into caller stack 6",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
BPF_EXIT_INSN(),
/* subprog 1 */
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
/* spill checked pkt_ptr into stack of caller */
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_5, 1),
/* don't read back pkt_ptr from stack here */
/* write 4 bytes into packet */
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.errstr = "R4 invalid mem access",
.result = REJECT,
},
{
"calls: pkt_ptr spill into caller stack 7",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
BPF_EXIT_INSN(),
/* subprog 1 */
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
/* spill checked pkt_ptr into stack of caller */
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_5, 1),
/* don't read back pkt_ptr from stack here */
/* write 4 bytes into packet */
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.errstr = "R4 invalid mem access",
.result = REJECT,
},
{
"calls: pkt_ptr spill into caller stack 8",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
BPF_EXIT_INSN(),
/* subprog 1 */
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
/* spill checked pkt_ptr into stack of caller */
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_5, 1),
/* don't read back pkt_ptr from stack here */
/* write 4 bytes into packet */
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
},
{
"calls: pkt_ptr spill into caller stack 9",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
BPF_EXIT_INSN(),
/* subprog 1 */
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_MOV64_IMM(BPF_REG_5, 0),
/* spill unchecked pkt_ptr into stack of caller */
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
BPF_MOV64_IMM(BPF_REG_5, 1),
/* don't read back pkt_ptr from stack here */
/* write 4 bytes into packet */
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.errstr = "invalid access to packet",
.result = REJECT,
},
{
"calls: caller stack init to zero or map_value_or_null",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
/* fetch map_value_or_null or const_zero from stack */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
/* store into map_value */
BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
BPF_EXIT_INSN(),
/* subprog 1 */
/* if (ctx == 0) return; */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
/* else bpf_map_lookup() and *(fp - 8) = r0 */
BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 13 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_XDP,
},
{
"calls: stack init to zero and pruning",
.insns = {
/* first make allocated_stack 16 byte */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
/* now fork the execution such that the false branch
* of JGT insn will be verified second and it skisp zero
* init of fp-8 stack slot. If stack liveness marking
* is missing live_read marks from call map_lookup
* processing then pruning will incorrectly assume
* that fp-8 stack slot was unused in the fall-through
* branch and will accept the program incorrectly
*/
BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 6 },
.errstr = "invalid indirect read from stack off -8+0 size 8",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_XDP,
},
};
static int probe_filter_length(const struct bpf_insn *fp)
......
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017 Facebook
#include <stddef.h>
#include <stdbool.h>
#include <string.h>
#include <linux/pkt_cls.h>
#include <linux/bpf.h>
#include <linux/in.h>
#include <linux/if_ether.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/icmp.h>
#include <linux/icmpv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include "bpf_helpers.h"
#define bpf_printk(fmt, ...) \
({ \
char ____fmt[] = fmt; \
bpf_trace_printk(____fmt, sizeof(____fmt), \
##__VA_ARGS__); \
})
static __u32 rol32(__u32 word, unsigned int shift)
{
return (word << shift) | (word >> ((-shift) & 31));
}
/* copy paste of jhash from kernel sources to make sure llvm
* can compile it into valid sequence of bpf instructions
*/
#define __jhash_mix(a, b, c) \
{ \
a -= c; a ^= rol32(c, 4); c += b; \
b -= a; b ^= rol32(a, 6); a += c; \
c -= b; c ^= rol32(b, 8); b += a; \
a -= c; a ^= rol32(c, 16); c += b; \
b -= a; b ^= rol32(a, 19); a += c; \
c -= b; c ^= rol32(b, 4); b += a; \
}
#define __jhash_final(a, b, c) \
{ \
c ^= b; c -= rol32(b, 14); \
a ^= c; a -= rol32(c, 11); \
b ^= a; b -= rol32(a, 25); \
c ^= b; c -= rol32(b, 16); \
a ^= c; a -= rol32(c, 4); \
b ^= a; b -= rol32(a, 14); \
c ^= b; c -= rol32(b, 24); \
}
#define JHASH_INITVAL 0xdeadbeef
typedef unsigned int u32;
static __attribute__ ((noinline))
u32 jhash(const void *key, u32 length, u32 initval)
{
u32 a, b, c;
const unsigned char *k = key;
a = b = c = JHASH_INITVAL + length + initval;
while (length > 12) {
a += *(u32 *)(k);
b += *(u32 *)(k + 4);
c += *(u32 *)(k + 8);
__jhash_mix(a, b, c);
length -= 12;
k += 12;
}
switch (length) {
case 12: c += (u32)k[11]<<24;
case 11: c += (u32)k[10]<<16;
case 10: c += (u32)k[9]<<8;
case 9: c += k[8];
case 8: b += (u32)k[7]<<24;
case 7: b += (u32)k[6]<<16;
case 6: b += (u32)k[5]<<8;
case 5: b += k[4];
case 4: a += (u32)k[3]<<24;
case 3: a += (u32)k[2]<<16;
case 2: a += (u32)k[1]<<8;
case 1: a += k[0];
__jhash_final(a, b, c);
case 0: /* Nothing left to add */
break;
}
return c;
}
static __attribute__ ((noinline))
u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
{
a += initval;
b += initval;
c += initval;
__jhash_final(a, b, c);
return c;
}
static __attribute__ ((noinline))
u32 jhash_2words(u32 a, u32 b, u32 initval)
{
return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2));
}
struct flow_key {
union {
__be32 src;
__be32 srcv6[4];
};
union {
__be32 dst;
__be32 dstv6[4];
};
union {
__u32 ports;
__u16 port16[2];
};
__u8 proto;
};
struct packet_description {
struct flow_key flow;
__u8 flags;
};
struct ctl_value {
union {
__u64 value;
__u32 ifindex;
__u8 mac[6];
};
};
struct vip_definition {
union {
__be32 vip;
__be32 vipv6[4];
};
__u16 port;
__u16 family;
__u8 proto;
};
struct vip_meta {
__u32 flags;
__u32 vip_num;
};
struct real_pos_lru {
__u32 pos;
__u64 atime;
};
struct real_definition {
union {
__be32 dst;
__be32 dstv6[4];
};
__u8 flags;
};
struct lb_stats {
__u64 v2;
__u64 v1;
};
struct bpf_map_def __attribute__ ((section("maps"), used)) vip_map = {
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(struct vip_definition),
.value_size = sizeof(struct vip_meta),
.max_entries = 512,
.map_flags = 0,
};
struct bpf_map_def __attribute__ ((section("maps"), used)) lru_cache = {
.type = BPF_MAP_TYPE_LRU_HASH,
.key_size = sizeof(struct flow_key),
.value_size = sizeof(struct real_pos_lru),
.max_entries = 300,
.map_flags = 1U << 1,
};
struct bpf_map_def __attribute__ ((section("maps"), used)) ch_rings = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(__u32),
.max_entries = 12 * 655,
.map_flags = 0,
};
struct bpf_map_def __attribute__ ((section("maps"), used)) reals = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct real_definition),
.max_entries = 40,
.map_flags = 0,
};
struct bpf_map_def __attribute__ ((section("maps"), used)) stats = {
.type = BPF_MAP_TYPE_PERCPU_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct lb_stats),
.max_entries = 515,
.map_flags = 0,
};
struct bpf_map_def __attribute__ ((section("maps"), used)) ctl_array = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct ctl_value),
.max_entries = 16,
.map_flags = 0,
};
struct eth_hdr {
unsigned char eth_dest[6];
unsigned char eth_source[6];
unsigned short eth_proto;
};
static inline __u64 calc_offset(bool is_ipv6, bool is_icmp)
{
__u64 off = sizeof(struct eth_hdr);
if (is_ipv6) {
off += sizeof(struct ipv6hdr);
if (is_icmp)
off += sizeof(struct icmp6hdr) + sizeof(struct ipv6hdr);
} else {
off += sizeof(struct iphdr);
if (is_icmp)
off += sizeof(struct icmphdr) + sizeof(struct iphdr);
}
return off;
}
static __attribute__ ((noinline))
bool parse_udp(void *data, void *data_end,
bool is_ipv6, struct packet_description *pckt)
{
bool is_icmp = !((pckt->flags & (1 << 0)) == 0);
__u64 off = calc_offset(is_ipv6, is_icmp);
struct udphdr *udp;
udp = data + off;
if (udp + 1 > data_end)
return 0;
if (!is_icmp) {
pckt->flow.port16[0] = udp->source;
pckt->flow.port16[1] = udp->dest;
} else {
pckt->flow.port16[0] = udp->dest;
pckt->flow.port16[1] = udp->source;
}
return 1;
}
static __attribute__ ((noinline))
bool parse_tcp(void *data, void *data_end,
bool is_ipv6, struct packet_description *pckt)
{
bool is_icmp = !((pckt->flags & (1 << 0)) == 0);
__u64 off = calc_offset(is_ipv6, is_icmp);
struct tcphdr *tcp;
tcp = data + off;
if (tcp + 1 > data_end)
return 0;
if (tcp->syn)
pckt->flags |= (1 << 1);
if (!is_icmp) {
pckt->flow.port16[0] = tcp->source;
pckt->flow.port16[1] = tcp->dest;
} else {
pckt->flow.port16[0] = tcp->dest;
pckt->flow.port16[1] = tcp->source;
}
return 1;
}
static __attribute__ ((noinline))
bool encap_v6(struct xdp_md *xdp, struct ctl_value *cval,
struct packet_description *pckt,
struct real_definition *dst, __u32 pkt_bytes)
{
struct eth_hdr *new_eth;
struct eth_hdr *old_eth;
struct ipv6hdr *ip6h;
__u32 ip_suffix;
void *data_end;
void *data;
if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct ipv6hdr)))
return 0;
data = (void *)(long)xdp->data;
data_end = (void *)(long)xdp->data_end;
new_eth = data;
ip6h = data + sizeof(struct eth_hdr);
old_eth = data + sizeof(struct ipv6hdr);
if (new_eth + 1 > data_end ||
old_eth + 1 > data_end || ip6h + 1 > data_end)
return 0;
memcpy(new_eth->eth_dest, cval->mac, 6);
memcpy(new_eth->eth_source, old_eth->eth_dest, 6);
new_eth->eth_proto = 56710;
ip6h->version = 6;
ip6h->priority = 0;
memset(ip6h->flow_lbl, 0, sizeof(ip6h->flow_lbl));
ip6h->nexthdr = IPPROTO_IPV6;
ip_suffix = pckt->flow.srcv6[3] ^ pckt->flow.port16[0];
ip6h->payload_len =
__builtin_bswap16(pkt_bytes + sizeof(struct ipv6hdr));
ip6h->hop_limit = 4;
ip6h->saddr.in6_u.u6_addr32[0] = 1;
ip6h->saddr.in6_u.u6_addr32[1] = 2;
ip6h->saddr.in6_u.u6_addr32[2] = 3;
ip6h->saddr.in6_u.u6_addr32[3] = ip_suffix;
memcpy(ip6h->daddr.in6_u.u6_addr32, dst->dstv6, 16);
return 1;
}
static __attribute__ ((noinline))
bool encap_v4(struct xdp_md *xdp, struct ctl_value *cval,
struct packet_description *pckt,
struct real_definition *dst, __u32 pkt_bytes)
{
__u32 ip_suffix = __builtin_bswap16(pckt->flow.port16[0]);
struct eth_hdr *new_eth;
struct eth_hdr *old_eth;
__u16 *next_iph_u16;
struct iphdr *iph;
__u32 csum = 0;
void *data_end;
void *data;
ip_suffix <<= 15;
ip_suffix ^= pckt->flow.src;
if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct iphdr)))
return 0;
data = (void *)(long)xdp->data;
data_end = (void *)(long)xdp->data_end;
new_eth = data;
iph = data + sizeof(struct eth_hdr);
old_eth = data + sizeof(struct iphdr);
if (new_eth + 1 > data_end ||
old_eth + 1 > data_end || iph + 1 > data_end)
return 0;
memcpy(new_eth->eth_dest, cval->mac, 6);
memcpy(new_eth->eth_source, old_eth->eth_dest, 6);
new_eth->eth_proto = 8;
iph->version = 4;
iph->ihl = 5;
iph->frag_off = 0;
iph->protocol = IPPROTO_IPIP;
iph->check = 0;
iph->tos = 1;
iph->tot_len = __builtin_bswap16(pkt_bytes + sizeof(struct iphdr));
/* don't update iph->daddr, since it will overwrite old eth_proto
* and multiple iterations of bpf_prog_run() will fail
*/
iph->saddr = ((0xFFFF0000 & ip_suffix) | 4268) ^ dst->dst;
iph->ttl = 4;
next_iph_u16 = (__u16 *) iph;
#pragma clang loop unroll(full)
for (int i = 0; i < sizeof(struct iphdr) >> 1; i++)
csum += *next_iph_u16++;
iph->check = ~((csum & 0xffff) + (csum >> 16));
if (bpf_xdp_adjust_head(xdp, (int)sizeof(struct iphdr)))
return 0;
return 1;
}
static __attribute__ ((noinline))
bool decap_v6(struct xdp_md *xdp, void **data, void **data_end, bool inner_v4)
{
struct eth_hdr *new_eth;
struct eth_hdr *old_eth;
old_eth = *data;
new_eth = *data + sizeof(struct ipv6hdr);
memcpy(new_eth->eth_source, old_eth->eth_source, 6);
memcpy(new_eth->eth_dest, old_eth->eth_dest, 6);
if (inner_v4)
new_eth->eth_proto = 8;
else
new_eth->eth_proto = 56710;
if (bpf_xdp_adjust_head(xdp, (int)sizeof(struct ipv6hdr)))
return 0;
*data = (void *)(long)xdp->data;
*data_end = (void *)(long)xdp->data_end;
return 1;
}
static __attribute__ ((noinline))
bool decap_v4(struct xdp_md *xdp, void **data, void **data_end)
{
struct eth_hdr *new_eth;
struct eth_hdr *old_eth;
old_eth = *data;
new_eth = *data + sizeof(struct iphdr);
memcpy(new_eth->eth_source, old_eth->eth_source, 6);
memcpy(new_eth->eth_dest, old_eth->eth_dest, 6);
new_eth->eth_proto = 8;
if (bpf_xdp_adjust_head(xdp, (int)sizeof(struct iphdr)))
return 0;
*data = (void *)(long)xdp->data;
*data_end = (void *)(long)xdp->data_end;
return 1;
}
static __attribute__ ((noinline))
int swap_mac_and_send(void *data, void *data_end)
{
unsigned char tmp_mac[6];
struct eth_hdr *eth;
eth = data;
memcpy(tmp_mac, eth->eth_source, 6);
memcpy(eth->eth_source, eth->eth_dest, 6);
memcpy(eth->eth_dest, tmp_mac, 6);
return XDP_TX;
}
static __attribute__ ((noinline))
int send_icmp_reply(void *data, void *data_end)
{
struct icmphdr *icmp_hdr;
__u16 *next_iph_u16;
__u32 tmp_addr = 0;
struct iphdr *iph;
__u32 csum1 = 0;
__u32 csum = 0;
__u64 off = 0;
if (data + sizeof(struct eth_hdr)
+ sizeof(struct iphdr) + sizeof(struct icmphdr) > data_end)
return XDP_DROP;
off += sizeof(struct eth_hdr);
iph = data + off;
off += sizeof(struct iphdr);
icmp_hdr = data + off;
icmp_hdr->type = 0;
icmp_hdr->checksum += 0x0007;
iph->ttl = 4;
tmp_addr = iph->daddr;
iph->daddr = iph->saddr;
iph->saddr = tmp_addr;
iph->check = 0;
next_iph_u16 = (__u16 *) iph;
#pragma clang loop unroll(full)
for (int i = 0; i < sizeof(struct iphdr) >> 1; i++)
csum += *next_iph_u16++;
iph->check = ~((csum & 0xffff) + (csum >> 16));
return swap_mac_and_send(data, data_end);
}
static __attribute__ ((noinline))
int send_icmp6_reply(void *data, void *data_end)
{
struct icmp6hdr *icmp_hdr;
struct ipv6hdr *ip6h;
__be32 tmp_addr[4];
__u64 off = 0;
if (data + sizeof(struct eth_hdr)
+ sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr) > data_end)
return XDP_DROP;
off += sizeof(struct eth_hdr);
ip6h = data + off;
off += sizeof(struct ipv6hdr);
icmp_hdr = data + off;
icmp_hdr->icmp6_type = 129;
icmp_hdr->icmp6_cksum -= 0x0001;
ip6h->hop_limit = 4;
memcpy(tmp_addr, ip6h->saddr.in6_u.u6_addr32, 16);
memcpy(ip6h->saddr.in6_u.u6_addr32, ip6h->daddr.in6_u.u6_addr32, 16);
memcpy(ip6h->daddr.in6_u.u6_addr32, tmp_addr, 16);
return swap_mac_and_send(data, data_end);
}
static __attribute__ ((noinline))
int parse_icmpv6(void *data, void *data_end, __u64 off,
struct packet_description *pckt)
{
struct icmp6hdr *icmp_hdr;
struct ipv6hdr *ip6h;
icmp_hdr = data + off;
if (icmp_hdr + 1 > data_end)
return XDP_DROP;
if (icmp_hdr->icmp6_type == 128)
return send_icmp6_reply(data, data_end);
if (icmp_hdr->icmp6_type != 3)
return XDP_PASS;
off += sizeof(struct icmp6hdr);
ip6h = data + off;
if (ip6h + 1 > data_end)
return XDP_DROP;
pckt->flow.proto = ip6h->nexthdr;
pckt->flags |= (1 << 0);
memcpy(pckt->flow.srcv6, ip6h->daddr.in6_u.u6_addr32, 16);
memcpy(pckt->flow.dstv6, ip6h->saddr.in6_u.u6_addr32, 16);
return -1;
}
static __attribute__ ((noinline))
int parse_icmp(void *data, void *data_end, __u64 off,
struct packet_description *pckt)
{
struct icmphdr *icmp_hdr;
struct iphdr *iph;
icmp_hdr = data + off;
if (icmp_hdr + 1 > data_end)
return XDP_DROP;
if (icmp_hdr->type == 8)
return send_icmp_reply(data, data_end);
if ((icmp_hdr->type != 3) || (icmp_hdr->code != 4))
return XDP_PASS;
off += sizeof(struct icmphdr);
iph = data + off;
if (iph + 1 > data_end)
return XDP_DROP;
if (iph->ihl != 5)
return XDP_DROP;
pckt->flow.proto = iph->protocol;
pckt->flags |= (1 << 0);
pckt->flow.src = iph->daddr;
pckt->flow.dst = iph->saddr;
return -1;
}
static __attribute__ ((noinline))
__u32 get_packet_hash(struct packet_description *pckt,
bool hash_16bytes)
{
if (hash_16bytes)
return jhash_2words(jhash(pckt->flow.srcv6, 16, 12),
pckt->flow.ports, 24);
else
return jhash_2words(pckt->flow.src, pckt->flow.ports,
24);
}
__attribute__ ((noinline))
static bool get_packet_dst(struct real_definition **real,
struct packet_description *pckt,
struct vip_meta *vip_info,
bool is_ipv6, void *lru_map)
{
struct real_pos_lru new_dst_lru = { };
bool hash_16bytes = is_ipv6;
__u32 *real_pos, hash, key;
__u64 cur_time;
if (vip_info->flags & (1 << 2))
hash_16bytes = 1;
if (vip_info->flags & (1 << 3)) {
pckt->flow.port16[0] = pckt->flow.port16[1];
memset(pckt->flow.srcv6, 0, 16);
}
hash = get_packet_hash(pckt, hash_16bytes);
if (hash != 0x358459b7 /* jhash of ipv4 packet */ &&
hash != 0x2f4bc6bb /* jhash of ipv6 packet */)
return 0;
key = 2 * vip_info->vip_num + hash % 2;
real_pos = bpf_map_lookup_elem(&ch_rings, &key);
if (!real_pos)
return 0;
key = *real_pos;
*real = bpf_map_lookup_elem(&reals, &key);
if (!(*real))
return 0;
if (!(vip_info->flags & (1 << 1))) {
__u32 conn_rate_key = 512 + 2;
struct lb_stats *conn_rate_stats =
bpf_map_lookup_elem(&stats, &conn_rate_key);
if (!conn_rate_stats)
return 1;
cur_time = bpf_ktime_get_ns();
if ((cur_time - conn_rate_stats->v2) >> 32 > 0xffFFFF) {
conn_rate_stats->v1 = 1;
conn_rate_stats->v2 = cur_time;
} else {
conn_rate_stats->v1 += 1;
if (conn_rate_stats->v1 >= 1)
return 1;
}
if (pckt->flow.proto == IPPROTO_UDP)
new_dst_lru.atime = cur_time;
new_dst_lru.pos = key;
bpf_map_update_elem(lru_map, &pckt->flow, &new_dst_lru, 0);
}
return 1;
}
__attribute__ ((noinline))
static void connection_table_lookup(struct real_definition **real,
struct packet_description *pckt,
void *lru_map)
{
struct real_pos_lru *dst_lru;
__u64 cur_time;
__u32 key;
dst_lru = bpf_map_lookup_elem(lru_map, &pckt->flow);
if (!dst_lru)
return;
if (pckt->flow.proto == IPPROTO_UDP) {
cur_time = bpf_ktime_get_ns();
if (cur_time - dst_lru->atime > 300000)
return;
dst_lru->atime = cur_time;
}
key = dst_lru->pos;
*real = bpf_map_lookup_elem(&reals, &key);
}
/* don't believe your eyes!
* below function has 6 arguments whereas bpf and llvm allow maximum of 5
* but since it's _static_ llvm can optimize one argument away
*/
__attribute__ ((noinline))
static int process_l3_headers_v6(struct packet_description *pckt,
__u8 *protocol, __u64 off,
__u16 *pkt_bytes, void *data,
void *data_end)
{
struct ipv6hdr *ip6h;
__u64 iph_len;
int action;
ip6h = data + off;
if (ip6h + 1 > data_end)
return XDP_DROP;
iph_len = sizeof(struct ipv6hdr);
*protocol = ip6h->nexthdr;
pckt->flow.proto = *protocol;
*pkt_bytes = __builtin_bswap16(ip6h->payload_len);
off += iph_len;
if (*protocol == 45) {
return XDP_DROP;
} else if (*protocol == 59) {
action = parse_icmpv6(data, data_end, off, pckt);
if (action >= 0)
return action;
} else {
memcpy(pckt->flow.srcv6, ip6h->saddr.in6_u.u6_addr32, 16);
memcpy(pckt->flow.dstv6, ip6h->daddr.in6_u.u6_addr32, 16);
}
return -1;
}
__attribute__ ((noinline))
static int process_l3_headers_v4(struct packet_description *pckt,
__u8 *protocol, __u64 off,
__u16 *pkt_bytes, void *data,
void *data_end)
{
struct iphdr *iph;
__u64 iph_len;
int action;
iph = data + off;
if (iph + 1 > data_end)
return XDP_DROP;
if (iph->ihl != 5)
return XDP_DROP;
*protocol = iph->protocol;
pckt->flow.proto = *protocol;
*pkt_bytes = __builtin_bswap16(iph->tot_len);
off += 20;
if (iph->frag_off & 65343)
return XDP_DROP;
if (*protocol == IPPROTO_ICMP) {
action = parse_icmp(data, data_end, off, pckt);
if (action >= 0)
return action;
} else {
pckt->flow.src = iph->saddr;
pckt->flow.dst = iph->daddr;
}
return -1;
}
__attribute__ ((noinline))
static int process_packet(void *data, __u64 off, void *data_end,
bool is_ipv6, struct xdp_md *xdp)
{
struct real_definition *dst = NULL;
struct packet_description pckt = { };
struct vip_definition vip = { };
struct lb_stats *data_stats;
struct eth_hdr *eth = data;
void *lru_map = &lru_cache;
struct vip_meta *vip_info;
__u32 lru_stats_key = 513;
__u32 mac_addr_pos = 0;
__u32 stats_key = 512;
struct ctl_value *cval;
__u16 pkt_bytes;
__u64 iph_len;
__u8 protocol;
__u32 vip_num;
int action;
if (is_ipv6)
action = process_l3_headers_v6(&pckt, &protocol, off,
&pkt_bytes, data, data_end);
else
action = process_l3_headers_v4(&pckt, &protocol, off,
&pkt_bytes, data, data_end);
if (action >= 0)
return action;
protocol = pckt.flow.proto;
if (protocol == IPPROTO_TCP) {
if (!parse_tcp(data, data_end, is_ipv6, &pckt))
return XDP_DROP;
} else if (protocol == IPPROTO_UDP) {
if (!parse_udp(data, data_end, is_ipv6, &pckt))
return XDP_DROP;
} else {
return XDP_TX;
}
if (is_ipv6)
memcpy(vip.vipv6, pckt.flow.dstv6, 16);
else
vip.vip = pckt.flow.dst;
vip.port = pckt.flow.port16[1];
vip.proto = pckt.flow.proto;
vip_info = bpf_map_lookup_elem(&vip_map, &vip);
if (!vip_info) {
vip.port = 0;
vip_info = bpf_map_lookup_elem(&vip_map, &vip);
if (!vip_info)
return XDP_PASS;
if (!(vip_info->flags & (1 << 4)))
pckt.flow.port16[1] = 0;
}
if (data_end - data > 1400)
return XDP_DROP;
data_stats = bpf_map_lookup_elem(&stats, &stats_key);
if (!data_stats)
return XDP_DROP;
data_stats->v1 += 1;
if (!dst) {
if (vip_info->flags & (1 << 0))
pckt.flow.port16[0] = 0;
if (!(pckt.flags & (1 << 1)) && !(vip_info->flags & (1 << 1)))
connection_table_lookup(&dst, &pckt, lru_map);
if (dst)
goto out;
if (pckt.flow.proto == IPPROTO_TCP) {
struct lb_stats *lru_stats =
bpf_map_lookup_elem(&stats, &lru_stats_key);
if (!lru_stats)
return XDP_DROP;
if (pckt.flags & (1 << 1))
lru_stats->v1 += 1;
else
lru_stats->v2 += 1;
}
if (!get_packet_dst(&dst, &pckt, vip_info, is_ipv6, lru_map))
return XDP_DROP;
data_stats->v2 += 1;
}
out:
cval = bpf_map_lookup_elem(&ctl_array, &mac_addr_pos);
if (!cval)
return XDP_DROP;
if (dst->flags & (1 << 0)) {
if (!encap_v6(xdp, cval, &pckt, dst, pkt_bytes))
return XDP_DROP;
} else {
if (!encap_v4(xdp, cval, &pckt, dst, pkt_bytes))
return XDP_DROP;
}
vip_num = vip_info->vip_num;
data_stats = bpf_map_lookup_elem(&stats, &vip_num);
if (!data_stats)
return XDP_DROP;
data_stats->v1 += 1;
data_stats->v2 += pkt_bytes;
data = (void *)(long)xdp->data;
data_end = (void *)(long)xdp->data_end;
if (data + 4 > data_end)
return XDP_DROP;
*(u32 *)data = dst->dst;
return XDP_DROP;
}
__attribute__ ((section("xdp-test"), used))
int balancer_ingress(struct xdp_md *ctx)
{
void *data = (void *)(long)ctx->data;
void *data_end = (void *)(long)ctx->data_end;
struct eth_hdr *eth = data;
__u32 eth_proto;
__u32 nh_off;
nh_off = sizeof(struct eth_hdr);
if (data + nh_off > data_end)
return XDP_DROP;
eth_proto = eth->eth_proto;
if (eth_proto == 8)
return process_packet(data, nh_off, data_end, 0, ctx);
else if (eth_proto == 56710)
return process_packet(data, nh_off, data_end, 1, ctx);
else
return XDP_DROP;
}
char _license[] __attribute__ ((section("license"), used)) = "GPL";
int _version __attribute__ ((section("version"), used)) = 1;
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment