Commit 05a945de authored by Kumar Kartikeya Dwivedi's avatar Kumar Kartikeya Dwivedi Committed by Alexei Starovoitov

selftests/bpf: Add verifier tests for kptr

Reuse bpf_prog_test functions to test the support for PTR_TO_BTF_ID in
BPF map case, including some tests that verify implementation sanity and
corner cases.
Signed-off-by: default avatarKumar Kartikeya Dwivedi <memxor@gmail.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20220424214901.2743946-13-memxor@gmail.com
parent 2cbc469a
...@@ -584,6 +584,12 @@ noinline void bpf_kfunc_call_memb_release(struct prog_test_member *p) ...@@ -584,6 +584,12 @@ noinline void bpf_kfunc_call_memb_release(struct prog_test_member *p)
{ {
} }
noinline struct prog_test_ref_kfunc *
bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **p, int a, int b)
{
return &prog_test_struct;
}
struct prog_test_pass1 { struct prog_test_pass1 {
int x0; int x0;
struct { struct {
...@@ -669,6 +675,7 @@ BTF_ID(func, bpf_kfunc_call_test3) ...@@ -669,6 +675,7 @@ BTF_ID(func, bpf_kfunc_call_test3)
BTF_ID(func, bpf_kfunc_call_test_acquire) BTF_ID(func, bpf_kfunc_call_test_acquire)
BTF_ID(func, bpf_kfunc_call_test_release) BTF_ID(func, bpf_kfunc_call_test_release)
BTF_ID(func, bpf_kfunc_call_memb_release) BTF_ID(func, bpf_kfunc_call_memb_release)
BTF_ID(func, bpf_kfunc_call_test_kptr_get)
BTF_ID(func, bpf_kfunc_call_test_pass_ctx) BTF_ID(func, bpf_kfunc_call_test_pass_ctx)
BTF_ID(func, bpf_kfunc_call_test_pass1) BTF_ID(func, bpf_kfunc_call_test_pass1)
BTF_ID(func, bpf_kfunc_call_test_pass2) BTF_ID(func, bpf_kfunc_call_test_pass2)
...@@ -682,6 +689,7 @@ BTF_SET_END(test_sk_check_kfunc_ids) ...@@ -682,6 +689,7 @@ BTF_SET_END(test_sk_check_kfunc_ids)
BTF_SET_START(test_sk_acquire_kfunc_ids) BTF_SET_START(test_sk_acquire_kfunc_ids)
BTF_ID(func, bpf_kfunc_call_test_acquire) BTF_ID(func, bpf_kfunc_call_test_acquire)
BTF_ID(func, bpf_kfunc_call_test_kptr_get)
BTF_SET_END(test_sk_acquire_kfunc_ids) BTF_SET_END(test_sk_acquire_kfunc_ids)
BTF_SET_START(test_sk_release_kfunc_ids) BTF_SET_START(test_sk_release_kfunc_ids)
...@@ -691,8 +699,13 @@ BTF_SET_END(test_sk_release_kfunc_ids) ...@@ -691,8 +699,13 @@ BTF_SET_END(test_sk_release_kfunc_ids)
BTF_SET_START(test_sk_ret_null_kfunc_ids) BTF_SET_START(test_sk_ret_null_kfunc_ids)
BTF_ID(func, bpf_kfunc_call_test_acquire) BTF_ID(func, bpf_kfunc_call_test_acquire)
BTF_ID(func, bpf_kfunc_call_test_kptr_get)
BTF_SET_END(test_sk_ret_null_kfunc_ids) BTF_SET_END(test_sk_ret_null_kfunc_ids)
BTF_SET_START(test_sk_kptr_acquire_kfunc_ids)
BTF_ID(func, bpf_kfunc_call_test_kptr_get)
BTF_SET_END(test_sk_kptr_acquire_kfunc_ids)
static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size, static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size,
u32 size, u32 headroom, u32 tailroom) u32 size, u32 headroom, u32 tailroom)
{ {
...@@ -1579,14 +1592,36 @@ int bpf_prog_test_run_syscall(struct bpf_prog *prog, ...@@ -1579,14 +1592,36 @@ int bpf_prog_test_run_syscall(struct bpf_prog *prog,
static const struct btf_kfunc_id_set bpf_prog_test_kfunc_set = { static const struct btf_kfunc_id_set bpf_prog_test_kfunc_set = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.check_set = &test_sk_check_kfunc_ids, .check_set = &test_sk_check_kfunc_ids,
.acquire_set = &test_sk_acquire_kfunc_ids, .acquire_set = &test_sk_acquire_kfunc_ids,
.release_set = &test_sk_release_kfunc_ids, .release_set = &test_sk_release_kfunc_ids,
.ret_null_set = &test_sk_ret_null_kfunc_ids, .ret_null_set = &test_sk_ret_null_kfunc_ids,
.kptr_acquire_set = &test_sk_kptr_acquire_kfunc_ids
}; };
BTF_ID_LIST(bpf_prog_test_dtor_kfunc_ids)
BTF_ID(struct, prog_test_ref_kfunc)
BTF_ID(func, bpf_kfunc_call_test_release)
BTF_ID(struct, prog_test_member)
BTF_ID(func, bpf_kfunc_call_memb_release)
static int __init bpf_prog_test_run_init(void) static int __init bpf_prog_test_run_init(void)
{ {
return register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_prog_test_kfunc_set); const struct btf_id_dtor_kfunc bpf_prog_test_dtor_kfunc[] = {
{
.btf_id = bpf_prog_test_dtor_kfunc_ids[0],
.kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[1]
},
{
.btf_id = bpf_prog_test_dtor_kfunc_ids[2],
.kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[3],
},
};
int ret;
ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_prog_test_kfunc_set);
return ret ?: register_btf_id_dtor_kfuncs(bpf_prog_test_dtor_kfunc,
ARRAY_SIZE(bpf_prog_test_dtor_kfunc),
THIS_MODULE);
} }
late_initcall(bpf_prog_test_run_init); late_initcall(bpf_prog_test_run_init);
...@@ -53,7 +53,7 @@ ...@@ -53,7 +53,7 @@
#define MAX_INSNS BPF_MAXINSNS #define MAX_INSNS BPF_MAXINSNS
#define MAX_TEST_INSNS 1000000 #define MAX_TEST_INSNS 1000000
#define MAX_FIXUPS 8 #define MAX_FIXUPS 8
#define MAX_NR_MAPS 22 #define MAX_NR_MAPS 23
#define MAX_TEST_RUNS 8 #define MAX_TEST_RUNS 8
#define POINTER_VALUE 0xcafe4all #define POINTER_VALUE 0xcafe4all
#define TEST_DATA_LEN 64 #define TEST_DATA_LEN 64
...@@ -101,6 +101,7 @@ struct bpf_test { ...@@ -101,6 +101,7 @@ struct bpf_test {
int fixup_map_reuseport_array[MAX_FIXUPS]; int fixup_map_reuseport_array[MAX_FIXUPS];
int fixup_map_ringbuf[MAX_FIXUPS]; int fixup_map_ringbuf[MAX_FIXUPS];
int fixup_map_timer[MAX_FIXUPS]; int fixup_map_timer[MAX_FIXUPS];
int fixup_map_kptr[MAX_FIXUPS];
struct kfunc_btf_id_pair fixup_kfunc_btf_id[MAX_FIXUPS]; struct kfunc_btf_id_pair fixup_kfunc_btf_id[MAX_FIXUPS];
/* Expected verifier log output for result REJECT or VERBOSE_ACCEPT. /* Expected verifier log output for result REJECT or VERBOSE_ACCEPT.
* Can be a tab-separated sequence of expected strings. An empty string * Can be a tab-separated sequence of expected strings. An empty string
...@@ -621,8 +622,15 @@ static int create_cgroup_storage(bool percpu) ...@@ -621,8 +622,15 @@ static int create_cgroup_storage(bool percpu)
* struct timer { * struct timer {
* struct bpf_timer t; * struct bpf_timer t;
* }; * };
* struct btf_ptr {
* struct prog_test_ref_kfunc __kptr *ptr;
* struct prog_test_ref_kfunc __kptr_ref *ptr;
* struct prog_test_member __kptr_ref *ptr;
* }
*/ */
static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l\0bpf_timer\0timer\0t"; static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l\0bpf_timer\0timer\0t"
"\0btf_ptr\0prog_test_ref_kfunc\0ptr\0kptr\0kptr_ref"
"\0prog_test_member";
static __u32 btf_raw_types[] = { static __u32 btf_raw_types[] = {
/* int */ /* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
...@@ -638,6 +646,22 @@ static __u32 btf_raw_types[] = { ...@@ -638,6 +646,22 @@ static __u32 btf_raw_types[] = {
/* struct timer */ /* [5] */ /* struct timer */ /* [5] */
BTF_TYPE_ENC(35, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 16), BTF_TYPE_ENC(35, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 16),
BTF_MEMBER_ENC(41, 4, 0), /* struct bpf_timer t; */ BTF_MEMBER_ENC(41, 4, 0), /* struct bpf_timer t; */
/* struct prog_test_ref_kfunc */ /* [6] */
BTF_STRUCT_ENC(51, 0, 0),
BTF_STRUCT_ENC(89, 0, 0), /* [7] */
/* type tag "kptr" */
BTF_TYPE_TAG_ENC(75, 6), /* [8] */
/* type tag "kptr_ref" */
BTF_TYPE_TAG_ENC(80, 6), /* [9] */
BTF_TYPE_TAG_ENC(80, 7), /* [10] */
BTF_PTR_ENC(8), /* [11] */
BTF_PTR_ENC(9), /* [12] */
BTF_PTR_ENC(10), /* [13] */
/* struct btf_ptr */ /* [14] */
BTF_STRUCT_ENC(43, 3, 24),
BTF_MEMBER_ENC(71, 11, 0), /* struct prog_test_ref_kfunc __kptr *ptr; */
BTF_MEMBER_ENC(71, 12, 64), /* struct prog_test_ref_kfunc __kptr_ref *ptr; */
BTF_MEMBER_ENC(71, 13, 128), /* struct prog_test_member __kptr_ref *ptr; */
}; };
static int load_btf(void) static int load_btf(void)
...@@ -727,6 +751,25 @@ static int create_map_timer(void) ...@@ -727,6 +751,25 @@ static int create_map_timer(void)
return fd; return fd;
} }
static int create_map_kptr(void)
{
LIBBPF_OPTS(bpf_map_create_opts, opts,
.btf_key_type_id = 1,
.btf_value_type_id = 14,
);
int fd, btf_fd;
btf_fd = load_btf();
if (btf_fd < 0)
return -1;
opts.btf_fd = btf_fd;
fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "test_map", 4, 24, 1, &opts);
if (fd < 0)
printf("Failed to create map with btf_id pointer\n");
return fd;
}
static char bpf_vlog[UINT_MAX >> 8]; static char bpf_vlog[UINT_MAX >> 8];
static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type, static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
...@@ -754,6 +797,7 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type, ...@@ -754,6 +797,7 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
int *fixup_map_reuseport_array = test->fixup_map_reuseport_array; int *fixup_map_reuseport_array = test->fixup_map_reuseport_array;
int *fixup_map_ringbuf = test->fixup_map_ringbuf; int *fixup_map_ringbuf = test->fixup_map_ringbuf;
int *fixup_map_timer = test->fixup_map_timer; int *fixup_map_timer = test->fixup_map_timer;
int *fixup_map_kptr = test->fixup_map_kptr;
struct kfunc_btf_id_pair *fixup_kfunc_btf_id = test->fixup_kfunc_btf_id; struct kfunc_btf_id_pair *fixup_kfunc_btf_id = test->fixup_kfunc_btf_id;
if (test->fill_helper) { if (test->fill_helper) {
...@@ -947,6 +991,13 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type, ...@@ -947,6 +991,13 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
fixup_map_timer++; fixup_map_timer++;
} while (*fixup_map_timer); } while (*fixup_map_timer);
} }
if (*fixup_map_kptr) {
map_fds[22] = create_map_kptr();
do {
prog[*fixup_map_kptr].imm = map_fds[22];
fixup_map_kptr++;
} while (*fixup_map_kptr);
}
/* Patch in kfunc BTF IDs */ /* Patch in kfunc BTF IDs */
if (fixup_kfunc_btf_id->kfunc) { if (fixup_kfunc_btf_id->kfunc) {
......
/* Common tests */
{
"map_kptr: BPF_ST imm != 0",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LD_MAP_FD(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_kptr = { 1 },
.result = REJECT,
.errstr = "BPF_ST imm must be 0 when storing to kptr at off=0",
},
{
"map_kptr: size != bpf_size_to_bytes(BPF_DW)",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LD_MAP_FD(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_kptr = { 1 },
.result = REJECT,
.errstr = "kptr access size must be BPF_DW",
},
{
"map_kptr: map_value non-const var_off",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LD_MAP_FD(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2, 0),
BPF_JMP_IMM(BPF_JLE, BPF_REG_2, 4, 1),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_kptr = { 1 },
.result = REJECT,
.errstr = "kptr access cannot have variable offset",
},
{
"map_kptr: bpf_kptr_xchg non-const var_off",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LD_MAP_FD(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2, 0),
BPF_JMP_IMM(BPF_JLE, BPF_REG_2, 4, 1),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_3),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_kptr_xchg),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_kptr = { 1 },
.result = REJECT,
.errstr = "R1 doesn't have constant offset. kptr has to be at the constant offset",
},
{
"map_kptr: unaligned boundary load/store",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LD_MAP_FD(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 7),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_kptr = { 1 },
.result = REJECT,
.errstr = "kptr access misaligned expected=0 off=7",
},
{
"map_kptr: reject var_off != 0",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LD_MAP_FD(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
BPF_JMP_IMM(BPF_JLE, BPF_REG_2, 4, 1),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_kptr = { 1 },
.result = REJECT,
.errstr = "variable untrusted_ptr_ access var_off=(0x0; 0x7) disallowed",
},
/* Tests for unreferened PTR_TO_BTF_ID */
{
"map_kptr: unref: reject btf_struct_ids_match == false",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LD_MAP_FD(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_kptr = { 1 },
.result = REJECT,
.errstr = "invalid kptr access, R1 type=untrusted_ptr_prog_test_ref_kfunc expected=ptr_prog_test",
},
{
"map_kptr: unref: loaded pointer marked as untrusted",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LD_MAP_FD(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_kptr = { 1 },
.result = REJECT,
.errstr = "R0 invalid mem access 'untrusted_ptr_or_null_'",
},
{
"map_kptr: unref: correct in kernel type size",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LD_MAP_FD(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 24),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_kptr = { 1 },
.result = REJECT,
.errstr = "access beyond struct prog_test_ref_kfunc at off 24 size 8",
},
{
"map_kptr: unref: inherit PTR_UNTRUSTED on struct walk",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LD_MAP_FD(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 16),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_this_cpu_ptr),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_kptr = { 1 },
.result = REJECT,
.errstr = "R1 type=untrusted_ptr_ expected=percpu_ptr_",
},
{
"map_kptr: unref: no reference state created",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LD_MAP_FD(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_kptr = { 1 },
.result = ACCEPT,
},
{
"map_kptr: unref: bpf_kptr_xchg rejected",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LD_MAP_FD(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_kptr_xchg),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_kptr = { 1 },
.result = REJECT,
.errstr = "off=0 kptr isn't referenced kptr",
},
{
"map_kptr: unref: bpf_kfunc_call_test_kptr_get rejected",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LD_MAP_FD(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_kptr = { 1 },
.result = REJECT,
.errstr = "arg#0 no referenced kptr at map value offset=0",
.fixup_kfunc_btf_id = {
{ "bpf_kfunc_call_test_kptr_get", 13 },
}
},
/* Tests for referenced PTR_TO_BTF_ID */
{
"map_kptr: ref: loaded pointer marked as untrusted",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LD_MAP_FD(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_this_cpu_ptr),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_kptr = { 1 },
.result = REJECT,
.errstr = "R1 type=untrusted_ptr_or_null_ expected=percpu_ptr_",
},
{
"map_kptr: ref: reject off != 0",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LD_MAP_FD(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_kptr_xchg),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_kptr_xchg),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_kptr = { 1 },
.result = REJECT,
.errstr = "invalid kptr access, R2 type=ptr_prog_test_ref_kfunc expected=ptr_prog_test_member",
},
{
"map_kptr: ref: reference state created and released on xchg",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LD_MAP_FD(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_kptr_xchg),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_kptr = { 1 },
.result = REJECT,
.errstr = "Unreleased reference id=5 alloc_insn=20",
.fixup_kfunc_btf_id = {
{ "bpf_kfunc_call_test_acquire", 15 },
}
},
{
"map_kptr: ref: reject STX",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LD_MAP_FD(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_1, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_kptr = { 1 },
.result = REJECT,
.errstr = "store to referenced kptr disallowed",
},
{
"map_kptr: ref: reject ST",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LD_MAP_FD(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 8, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_kptr = { 1 },
.result = REJECT,
.errstr = "store to referenced kptr disallowed",
},
{
"map_kptr: reject helper access to kptr",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LD_MAP_FD(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_kptr = { 1 },
.result = REJECT,
.errstr = "kptr cannot be accessed indirectly by helper",
},
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment