Commit 1391040b authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'bpf-misc-improvements'

Daniel Borkmann says:

====================
This series adds various misc improvements to BPF: detection
of BPF helper definition misconfiguration for mem/size argument
pairs, csum_diff helper also for XDP, various test cases,
removal of the recently added pure_initcall(), restriction
of the jit sysctls to cap_sys_admin for initns, a minor size
improvement for x86 jit in alu ops, output of complexity limit
to verifier log and last but not least having the event output
more flexible with moving to const_size_or_zero type.

Thanks!
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 417f1d9f 1728a4f2
......@@ -25,8 +25,6 @@
#include "bpf_jit_32.h"
int bpf_jit_enable __read_mostly;
#define STACK_OFFSET(k) (k)
#define TMP_REG_1 (MAX_BPF_JIT_REG + 0) /* TEMP Register 1 */
#define TMP_REG_2 (MAX_BPF_JIT_REG + 1) /* TEMP Register 2 */
......
......@@ -31,8 +31,6 @@
#include "bpf_jit.h"
int bpf_jit_enable __read_mostly;
#define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
#define TCALL_CNT (MAX_BPF_JIT_REG + 2)
......
......@@ -1207,8 +1207,6 @@ static int build_body(struct jit_ctx *ctx)
return 0;
}
int bpf_jit_enable __read_mostly;
void bpf_jit_compile(struct bpf_prog *fp)
{
struct jit_ctx ctx;
......
......@@ -177,8 +177,6 @@ static u32 b_imm(unsigned int tgt, struct jit_ctx *ctx)
(ctx->idx * 4) - 4;
}
int bpf_jit_enable __read_mostly;
enum which_ebpf_reg {
src_reg,
src_reg_no_fp,
......
......@@ -18,8 +18,6 @@
#include "bpf_jit32.h"
int bpf_jit_enable __read_mostly;
static inline void bpf_flush_icache(void *start, void *end)
{
smp_wmb();
......
......@@ -21,8 +21,6 @@
#include "bpf_jit64.h"
int bpf_jit_enable __read_mostly;
static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
{
memset32(area, BREAKPOINT_INSTRUCTION, size/4);
......
......@@ -28,8 +28,6 @@
#include <asm/set_memory.h>
#include "bpf_jit.h"
int bpf_jit_enable __read_mostly;
struct bpf_jit {
u32 seen; /* Flags to remember seen eBPF instructions */
u32 seen_reg[16]; /* Array to remember which registers are used */
......
......@@ -11,8 +11,6 @@
#include "bpf_jit_32.h"
int bpf_jit_enable __read_mostly;
static inline bool is_simm13(unsigned int value)
{
return value + 0x1000 < 0x2000;
......
......@@ -12,8 +12,6 @@
#include "bpf_jit_64.h"
int bpf_jit_enable __read_mostly;
static inline bool is_simm13(unsigned int value)
{
return value + 0x1000 < 0x2000;
......
......@@ -15,8 +15,6 @@
#include <asm/set_memory.h>
#include <linux/bpf.h>
int bpf_jit_enable __read_mostly;
/*
* assembly code in arch/x86/net/bpf_jit.S
*/
......@@ -154,6 +152,11 @@ static bool is_ereg(u32 reg)
BIT(BPF_REG_AX));
}
static bool is_axreg(u32 reg)
{
return reg == BPF_REG_0;
}
/* add modifiers if 'reg' maps to x64 registers r8..r15 */
static u8 add_1mod(u8 byte, u32 reg)
{
......@@ -447,16 +450,36 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
else if (is_ereg(dst_reg))
EMIT1(add_1mod(0x40, dst_reg));
/* b3 holds 'normal' opcode, b2 short form only valid
* in case dst is eax/rax.
*/
switch (BPF_OP(insn->code)) {
case BPF_ADD: b3 = 0xC0; break;
case BPF_SUB: b3 = 0xE8; break;
case BPF_AND: b3 = 0xE0; break;
case BPF_OR: b3 = 0xC8; break;
case BPF_XOR: b3 = 0xF0; break;
case BPF_ADD:
b3 = 0xC0;
b2 = 0x05;
break;
case BPF_SUB:
b3 = 0xE8;
b2 = 0x2D;
break;
case BPF_AND:
b3 = 0xE0;
b2 = 0x25;
break;
case BPF_OR:
b3 = 0xC8;
b2 = 0x0D;
break;
case BPF_XOR:
b3 = 0xF0;
b2 = 0x35;
break;
}
if (is_imm8(imm32))
EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
else if (is_axreg(dst_reg))
EMIT1_off32(b2, imm32);
else
EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
break;
......
......@@ -300,6 +300,11 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
}
#ifdef CONFIG_BPF_JIT
/* All BPF JIT sysctl knobs here. */
int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
int bpf_jit_harden __read_mostly;
int bpf_jit_kallsyms __read_mostly;
static __always_inline void
bpf_get_prog_addr_region(const struct bpf_prog *prog,
unsigned long *symbol_start,
......@@ -381,8 +386,6 @@ static DEFINE_SPINLOCK(bpf_lock);
static LIST_HEAD(bpf_kallsyms);
static struct latch_tree_root bpf_tree __cacheline_aligned;
int bpf_jit_kallsyms __read_mostly;
static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
{
WARN_ON_ONCE(!list_empty(&aux->ksym_lnode));
......@@ -563,8 +566,6 @@ void __weak bpf_jit_free(struct bpf_prog *fp)
bpf_prog_unlock_free(fp);
}
int bpf_jit_harden __read_mostly;
static int bpf_jit_blind_insn(const struct bpf_insn *from,
const struct bpf_insn *aux,
struct bpf_insn *to_buff)
......@@ -1379,9 +1380,13 @@ void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
}
#else
static unsigned int __bpf_prog_ret0(const void *ctx,
static unsigned int __bpf_prog_ret0_warn(const void *ctx,
const struct bpf_insn *insn)
{
/* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
* is not working properly, so warn about it!
*/
WARN_ON_ONCE(1);
return 0;
}
#endif
......@@ -1441,7 +1446,7 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
#else
fp->bpf_func = __bpf_prog_ret0;
fp->bpf_func = __bpf_prog_ret0_warn;
#endif
/* eBPF JITs can rewrite the program in case constant
......
......@@ -1837,6 +1837,19 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
}
}
static bool arg_type_is_mem_ptr(enum bpf_arg_type type)
{
return type == ARG_PTR_TO_MEM ||
type == ARG_PTR_TO_MEM_OR_NULL ||
type == ARG_PTR_TO_UNINIT_MEM;
}
static bool arg_type_is_mem_size(enum bpf_arg_type type)
{
return type == ARG_CONST_SIZE ||
type == ARG_CONST_SIZE_OR_ZERO;
}
static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
enum bpf_arg_type arg_type,
struct bpf_call_arg_meta *meta)
......@@ -1886,9 +1899,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
expected_type = PTR_TO_CTX;
if (type != expected_type)
goto err_type;
} else if (arg_type == ARG_PTR_TO_MEM ||
arg_type == ARG_PTR_TO_MEM_OR_NULL ||
arg_type == ARG_PTR_TO_UNINIT_MEM) {
} else if (arg_type_is_mem_ptr(arg_type)) {
expected_type = PTR_TO_STACK;
/* One exception here. In case function allows for NULL to be
* passed in as argument, it's a SCALAR_VALUE type. Final test
......@@ -1949,25 +1960,12 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
err = check_stack_boundary(env, regno,
meta->map_ptr->value_size,
false, NULL);
} else if (arg_type == ARG_CONST_SIZE ||
arg_type == ARG_CONST_SIZE_OR_ZERO) {
} else if (arg_type_is_mem_size(arg_type)) {
bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
/* bpf_xxx(..., buf, len) call will access 'len' bytes
* from stack pointer 'buf'. Check it
* note: regno == len, regno - 1 == buf
*/
if (regno == 0) {
/* kernel subsystem misconfigured verifier */
verbose(env,
"ARG_CONST_SIZE cannot be first argument\n");
return -EACCES;
}
/* The register is SCALAR_VALUE; the access check
* happens using its boundaries.
*/
if (!tnum_is_const(reg->var_off))
/* For unprivileged variable accesses, disable raw
* mode so that the program is required to
......@@ -2111,7 +2109,7 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
return -EINVAL;
}
static int check_raw_mode(const struct bpf_func_proto *fn)
static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
{
int count = 0;
......@@ -2126,7 +2124,44 @@ static int check_raw_mode(const struct bpf_func_proto *fn)
if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
count++;
return count > 1 ? -EINVAL : 0;
/* We only support one arg being in raw mode at the moment,
* which is sufficient for the helper functions we have
* right now.
*/
return count <= 1;
}
static bool check_args_pair_invalid(enum bpf_arg_type arg_curr,
enum bpf_arg_type arg_next)
{
return (arg_type_is_mem_ptr(arg_curr) &&
!arg_type_is_mem_size(arg_next)) ||
(!arg_type_is_mem_ptr(arg_curr) &&
arg_type_is_mem_size(arg_next));
}
static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
{
/* bpf_xxx(..., buf, len) call will access 'len'
* bytes from memory 'buf'. Both arg types need
* to be paired, so make sure there's no buggy
* helper function specification.
*/
if (arg_type_is_mem_size(fn->arg1_type) ||
arg_type_is_mem_ptr(fn->arg5_type) ||
check_args_pair_invalid(fn->arg1_type, fn->arg2_type) ||
check_args_pair_invalid(fn->arg2_type, fn->arg3_type) ||
check_args_pair_invalid(fn->arg3_type, fn->arg4_type) ||
check_args_pair_invalid(fn->arg4_type, fn->arg5_type))
return false;
return true;
}
static int check_func_proto(const struct bpf_func_proto *fn)
{
return check_raw_mode_ok(fn) &&
check_arg_pair_ok(fn) ? 0 : -EINVAL;
}
/* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
......@@ -2282,7 +2317,6 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
if (env->ops->get_func_proto)
fn = env->ops->get_func_proto(func_id);
if (!fn) {
verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
func_id);
......@@ -2306,10 +2340,7 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
memset(&meta, 0, sizeof(meta));
meta.pkt_access = fn->pkt_access;
/* We only support one arg being in raw mode at the moment, which
* is sufficient for the helper functions we have right now.
*/
err = check_raw_mode(fn);
err = check_func_proto(fn);
if (err) {
verbose(env, "kernel subsystem misconfigured func %s#%d\n",
func_id_name(func_id), func_id);
......@@ -4779,7 +4810,8 @@ static int do_check(struct bpf_verifier_env *env)
insn_idx++;
}
verbose(env, "processed %d insns, stack depth ", insn_processed);
verbose(env, "processed %d insns (limit %d), stack depth ",
insn_processed, BPF_COMPLEXITY_LIMIT_INSNS);
for (i = 0; i < env->subprog_cnt + 1; i++) {
u32 depth = env->subprog_stack_depth[i];
......
......@@ -6109,6 +6109,110 @@ static struct bpf_test tests[] = {
{ { ETH_HLEN, 42 } },
.fill_helper = bpf_fill_ld_abs_vlan_push_pop2,
},
/* Checking interpreter vs JIT wrt signed extended imms. */
{
"JNE signed compare, test 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R1, 0xfefbbc12),
BPF_ALU32_IMM(BPF_MOV, R3, 0xffff0000),
BPF_MOV64_REG(R2, R1),
BPF_ALU64_REG(BPF_AND, R2, R3),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_JMP_IMM(BPF_JNE, R2, -17104896, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 2),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"JNE signed compare, test 2",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R1, 0xfefbbc12),
BPF_ALU32_IMM(BPF_MOV, R3, 0xffff0000),
BPF_MOV64_REG(R2, R1),
BPF_ALU64_REG(BPF_AND, R2, R3),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_JMP_IMM(BPF_JNE, R2, 0xfefb0000, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 2),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"JNE signed compare, test 3",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R1, 0xfefbbc12),
BPF_ALU32_IMM(BPF_MOV, R3, 0xffff0000),
BPF_ALU32_IMM(BPF_MOV, R4, 0xfefb0000),
BPF_MOV64_REG(R2, R1),
BPF_ALU64_REG(BPF_AND, R2, R3),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_JMP_REG(BPF_JNE, R2, R4, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 2),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 2 } },
},
{
"JNE signed compare, test 4",
.u.insns_int = {
BPF_LD_IMM64(R1, -17104896),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_JMP_IMM(BPF_JNE, R1, -17104896, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 2),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 2 } },
},
{
"JNE signed compare, test 5",
.u.insns_int = {
BPF_LD_IMM64(R1, 0xfefb0000),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_JMP_IMM(BPF_JNE, R1, 0xfefb0000, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 2),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"JNE signed compare, test 6",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x7efb0000),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_JMP_IMM(BPF_JNE, R1, 0x7efb0000, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 2),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 2 } },
},
{
"JNE signed compare, test 7",
.u.insns = {
BPF_STMT(BPF_LD | BPF_IMM, 0xffff0000),
BPF_STMT(BPF_MISC | BPF_TAX, 0),
BPF_STMT(BPF_LD | BPF_IMM, 0xfefbbc12),
BPF_STMT(BPF_ALU | BPF_AND | BPF_X, 0),
BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0xfefb0000, 1, 0),
BPF_STMT(BPF_RET | BPF_K, 1),
BPF_STMT(BPF_RET | BPF_K, 2),
},
CLASSIC | FLAG_NO_DATA,
{},
{ { 0, 2 } },
},
};
static struct net_device dev;
......
......@@ -2861,7 +2861,7 @@ static const struct bpf_func_proto bpf_skb_event_output_proto = {
.arg2_type = ARG_CONST_MAP_PTR,
.arg3_type = ARG_ANYTHING,
.arg4_type = ARG_PTR_TO_MEM,
.arg5_type = ARG_CONST_SIZE,
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
};
static unsigned short bpf_tunnel_key_af(u64 flags)
......@@ -3150,7 +3150,7 @@ static const struct bpf_func_proto bpf_xdp_event_output_proto = {
.arg2_type = ARG_CONST_MAP_PTR,
.arg3_type = ARG_ANYTHING,
.arg4_type = ARG_PTR_TO_MEM,
.arg5_type = ARG_CONST_SIZE,
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
};
BPF_CALL_1(bpf_get_socket_cookie, struct sk_buff *, skb)
......@@ -3456,6 +3456,8 @@ xdp_func_proto(enum bpf_func_id func_id)
return &bpf_xdp_event_output_proto;
case BPF_FUNC_get_smp_processor_id:
return &bpf_get_smp_processor_id_proto;
case BPF_FUNC_csum_diff:
return &bpf_csum_diff_proto;
case BPF_FUNC_xdp_adjust_head:
return &bpf_xdp_adjust_head_proto;
case BPF_FUNC_xdp_adjust_meta:
......
......@@ -25,6 +25,7 @@
static int zero = 0;
static int one = 1;
static int two __maybe_unused = 2;
static int min_sndbuf = SOCK_MIN_SNDBUF;
static int min_rcvbuf = SOCK_MIN_RCVBUF;
static int max_skb_frags = MAX_SKB_FRAGS;
......@@ -250,6 +251,46 @@ static int proc_do_rss_key(struct ctl_table *table, int write,
return proc_dostring(&fake_table, write, buffer, lenp, ppos);
}
#ifdef CONFIG_BPF_JIT
static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int ret, jit_enable = *(int *)table->data;
struct ctl_table tmp = *table;
if (write && !capable(CAP_SYS_ADMIN))
return -EPERM;
tmp.data = &jit_enable;
ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
if (write && !ret) {
if (jit_enable < 2 ||
(jit_enable == 2 && bpf_dump_raw_ok())) {
*(int *)table->data = jit_enable;
if (jit_enable == 2)
pr_warn("bpf_jit_enable = 2 was set! NEVER use this in production, only for JIT debugging!\n");
} else {
ret = -EPERM;
}
}
return ret;
}
# ifdef CONFIG_HAVE_EBPF_JIT
static int
proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
}
# endif
#endif
static struct ctl_table net_core_table[] = {
#ifdef CONFIG_NET
{
......@@ -325,13 +366,14 @@ static struct ctl_table net_core_table[] = {
.data = &bpf_jit_enable,
.maxlen = sizeof(int),
.mode = 0644,
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
.proc_handler = proc_dointvec
#else
.proc_handler = proc_dointvec_minmax,
.proc_handler = proc_dointvec_minmax_bpf_enable,
# ifdef CONFIG_BPF_JIT_ALWAYS_ON
.extra1 = &one,
.extra2 = &one,
#endif
# else
.extra1 = &zero,
.extra2 = &two,
# endif
},
# ifdef CONFIG_HAVE_EBPF_JIT
{
......@@ -339,14 +381,18 @@ static struct ctl_table net_core_table[] = {
.data = &bpf_jit_harden,
.maxlen = sizeof(int),
.mode = 0600,
.proc_handler = proc_dointvec,
.proc_handler = proc_dointvec_minmax_bpf_restricted,
.extra1 = &zero,
.extra2 = &two,
},
{
.procname = "bpf_jit_kallsyms",
.data = &bpf_jit_kallsyms,
.maxlen = sizeof(int),
.mode = 0600,
.proc_handler = proc_dointvec,
.proc_handler = proc_dointvec_minmax_bpf_restricted,
.extra1 = &zero,
.extra2 = &one,
},
# endif
#endif
......
......@@ -2613,15 +2613,6 @@ static int __init sock_init(void)
core_initcall(sock_init); /* early initcall */
static int __init jit_init(void)
{
#ifdef CONFIG_BPF_JIT_ALWAYS_ON
bpf_jit_enable = 1;
#endif
return 0;
}
pure_initcall(jit_init);
#ifdef CONFIG_PROC_FS
void socket_seq_show(struct seq_file *seq)
{
......
......@@ -101,6 +101,93 @@ static struct bpf_test tests[] = {
.result = ACCEPT,
.retval = -3,
},
{
"DIV32 by 0, zero check 1",
.insns = {
BPF_MOV32_IMM(BPF_REG_0, 42),
BPF_MOV32_IMM(BPF_REG_1, 0),
BPF_MOV32_IMM(BPF_REG_2, 1),
BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"DIV32 by 0, zero check 2",
.insns = {
BPF_MOV32_IMM(BPF_REG_0, 42),
BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
BPF_MOV32_IMM(BPF_REG_2, 1),
BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"DIV64 by 0, zero check",
.insns = {
BPF_MOV32_IMM(BPF_REG_0, 42),
BPF_MOV32_IMM(BPF_REG_1, 0),
BPF_MOV32_IMM(BPF_REG_2, 1),
BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"MOD32 by 0, zero check 1",
.insns = {
BPF_MOV32_IMM(BPF_REG_0, 42),
BPF_MOV32_IMM(BPF_REG_1, 0),
BPF_MOV32_IMM(BPF_REG_2, 1),
BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"MOD32 by 0, zero check 2",
.insns = {
BPF_MOV32_IMM(BPF_REG_0, 42),
BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
BPF_MOV32_IMM(BPF_REG_2, 1),
BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"MOD64 by 0, zero check",
.insns = {
BPF_MOV32_IMM(BPF_REG_0, 42),
BPF_MOV32_IMM(BPF_REG_1, 0),
BPF_MOV32_IMM(BPF_REG_2, 1),
BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"empty prog",
.insns = {
},
.errstr = "last insn is not an exit or jmp",
.result = REJECT,
},
{
"only exit insn",
.insns = {
BPF_EXIT_INSN(),
},
.errstr = "R0 !read_ok",
.result = REJECT,
},
{
"unreachable",
.insns = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment