Commit 04accf79 authored by Kumar Kartikeya Dwivedi's avatar Kumar Kartikeya Dwivedi Committed by Alexei Starovoitov

selftests/bpf: Add negative C tests for kptrs

This uses the newly added SEC("?foo") naming to disable autoload of
programs, and then loads them one by one for the object and verifies
that loading fails and matches the returned error string from verifier.
This is similar to already existing verifier tests but provides coverage
for BPF C.
Signed-off-by: default avatarKumar Kartikeya Dwivedi <memxor@gmail.com>
Link: https://lore.kernel.org/r/20220511194654.765705-4-memxor@gmail.comSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 5cdccadc
......@@ -2,8 +2,86 @@
#include <test_progs.h>
#include "map_kptr.skel.h"
#include "map_kptr_fail.skel.h"
void test_map_kptr(void)
static char log_buf[1024 * 1024];
struct {
const char *prog_name;
const char *err_msg;
} map_kptr_fail_tests[] = {
{ "size_not_bpf_dw", "kptr access size must be BPF_DW" },
{ "non_const_var_off", "kptr access cannot have variable offset" },
{ "non_const_var_off_kptr_xchg", "R1 doesn't have constant offset. kptr has to be" },
{ "misaligned_access_write", "kptr access misaligned expected=8 off=7" },
{ "misaligned_access_read", "kptr access misaligned expected=8 off=1" },
{ "reject_var_off_store", "variable untrusted_ptr_ access var_off=(0x0; 0x1e0)" },
{ "reject_bad_type_match", "invalid kptr access, R1 type=untrusted_ptr_prog_test_ref_kfunc" },
{ "marked_as_untrusted_or_null", "R1 type=untrusted_ptr_or_null_ expected=percpu_ptr_" },
{ "correct_btf_id_check_size", "access beyond struct prog_test_ref_kfunc at off 32 size 4" },
{ "inherit_untrusted_on_walk", "R1 type=untrusted_ptr_ expected=percpu_ptr_" },
{ "reject_kptr_xchg_on_unref", "off=8 kptr isn't referenced kptr" },
{ "reject_kptr_get_no_map_val", "arg#0 expected pointer to map value" },
{ "reject_kptr_get_no_null_map_val", "arg#0 expected pointer to map value" },
{ "reject_kptr_get_no_kptr", "arg#0 no referenced kptr at map value offset=0" },
{ "reject_kptr_get_on_unref", "arg#0 no referenced kptr at map value offset=8" },
{ "reject_kptr_get_bad_type_match", "kernel function bpf_kfunc_call_test_kptr_get args#0" },
{ "mark_ref_as_untrusted_or_null", "R1 type=untrusted_ptr_or_null_ expected=percpu_ptr_" },
{ "reject_untrusted_store_to_ref", "store to referenced kptr disallowed" },
{ "reject_bad_type_xchg", "invalid kptr access, R2 type=ptr_prog_test_ref_kfunc expected=ptr_prog_test_member" },
{ "reject_untrusted_xchg", "R2 type=untrusted_ptr_ expected=ptr_" },
{ "reject_member_of_ref_xchg", "invalid kptr access, R2 type=ptr_prog_test_ref_kfunc" },
{ "reject_indirect_helper_access", "kptr cannot be accessed indirectly by helper" },
{ "reject_indirect_global_func_access", "kptr cannot be accessed indirectly by helper" },
{ "kptr_xchg_ref_state", "Unreleased reference id=5 alloc_insn=" },
{ "kptr_get_ref_state", "Unreleased reference id=3 alloc_insn=" },
};
static void test_map_kptr_fail_prog(const char *prog_name, const char *err_msg)
{
LIBBPF_OPTS(bpf_object_open_opts, opts, .kernel_log_buf = log_buf,
.kernel_log_size = sizeof(log_buf),
.kernel_log_level = 1);
struct map_kptr_fail *skel;
struct bpf_program *prog;
int ret;
skel = map_kptr_fail__open_opts(&opts);
if (!ASSERT_OK_PTR(skel, "map_kptr_fail__open_opts"))
return;
prog = bpf_object__find_program_by_name(skel->obj, prog_name);
if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
goto end;
bpf_program__set_autoload(prog, true);
ret = map_kptr_fail__load(skel);
if (!ASSERT_ERR(ret, "map_kptr__load must fail"))
goto end;
if (!ASSERT_OK_PTR(strstr(log_buf, err_msg), "expected error message")) {
fprintf(stderr, "Expected: %s\n", err_msg);
fprintf(stderr, "Verifier: %s\n", log_buf);
}
end:
map_kptr_fail__destroy(skel);
}
static void test_map_kptr_fail(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(map_kptr_fail_tests); i++) {
if (!test__start_subtest(map_kptr_fail_tests[i].prog_name))
continue;
test_map_kptr_fail_prog(map_kptr_fail_tests[i].prog_name,
map_kptr_fail_tests[i].err_msg);
}
}
static void test_map_kptr_success(void)
{
struct map_kptr *skel;
int key = 0, ret;
......@@ -35,3 +113,10 @@ void test_map_kptr(void)
map_kptr__destroy(skel);
}
void test_map_kptr(void)
{
if (test__start_subtest("success"))
test_map_kptr_success();
test_map_kptr_fail();
}
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
struct map_value {
char buf[8];
struct prog_test_ref_kfunc __kptr *unref_ptr;
struct prog_test_ref_kfunc __kptr_ref *ref_ptr;
struct prog_test_member __kptr_ref *ref_memb_ptr;
};
struct array_map {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, int);
__type(value, struct map_value);
__uint(max_entries, 1);
} array_map SEC(".maps");
extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym;
extern struct prog_test_ref_kfunc *
bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **p, int a, int b) __ksym;
SEC("?tc")
int size_not_bpf_dw(struct __sk_buff *ctx)
{
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
*(u32 *)&v->unref_ptr = 0;
return 0;
}
SEC("?tc")
int non_const_var_off(struct __sk_buff *ctx)
{
struct map_value *v;
int key = 0, id;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
id = ctx->protocol;
if (id < 4 || id > 12)
return 0;
*(u64 *)((void *)v + id) = 0;
return 0;
}
SEC("?tc")
int non_const_var_off_kptr_xchg(struct __sk_buff *ctx)
{
struct map_value *v;
int key = 0, id;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
id = ctx->protocol;
if (id < 4 || id > 12)
return 0;
bpf_kptr_xchg((void *)v + id, NULL);
return 0;
}
SEC("?tc")
int misaligned_access_write(struct __sk_buff *ctx)
{
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
*(void **)((void *)v + 7) = NULL;
return 0;
}
SEC("?tc")
int misaligned_access_read(struct __sk_buff *ctx)
{
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
return *(u64 *)((void *)v + 1);
}
SEC("?tc")
int reject_var_off_store(struct __sk_buff *ctx)
{
struct prog_test_ref_kfunc *unref_ptr;
struct map_value *v;
int key = 0, id;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
unref_ptr = v->unref_ptr;
if (!unref_ptr)
return 0;
id = ctx->protocol;
if (id < 4 || id > 12)
return 0;
unref_ptr += id;
v->unref_ptr = unref_ptr;
return 0;
}
SEC("?tc")
int reject_bad_type_match(struct __sk_buff *ctx)
{
struct prog_test_ref_kfunc *unref_ptr;
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
unref_ptr = v->unref_ptr;
if (!unref_ptr)
return 0;
unref_ptr = (void *)unref_ptr + 4;
v->unref_ptr = unref_ptr;
return 0;
}
SEC("?tc")
int marked_as_untrusted_or_null(struct __sk_buff *ctx)
{
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
bpf_this_cpu_ptr(v->unref_ptr);
return 0;
}
SEC("?tc")
int correct_btf_id_check_size(struct __sk_buff *ctx)
{
struct prog_test_ref_kfunc *p;
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
p = v->unref_ptr;
if (!p)
return 0;
return *(int *)((void *)p + bpf_core_type_size(struct prog_test_ref_kfunc));
}
SEC("?tc")
int inherit_untrusted_on_walk(struct __sk_buff *ctx)
{
struct prog_test_ref_kfunc *unref_ptr;
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
unref_ptr = v->unref_ptr;
if (!unref_ptr)
return 0;
unref_ptr = unref_ptr->next;
bpf_this_cpu_ptr(unref_ptr);
return 0;
}
SEC("?tc")
int reject_kptr_xchg_on_unref(struct __sk_buff *ctx)
{
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
bpf_kptr_xchg(&v->unref_ptr, NULL);
return 0;
}
SEC("?tc")
int reject_kptr_get_no_map_val(struct __sk_buff *ctx)
{
bpf_kfunc_call_test_kptr_get((void *)&ctx, 0, 0);
return 0;
}
SEC("?tc")
int reject_kptr_get_no_null_map_val(struct __sk_buff *ctx)
{
bpf_kfunc_call_test_kptr_get(bpf_map_lookup_elem(&array_map, &(int){0}), 0, 0);
return 0;
}
SEC("?tc")
int reject_kptr_get_no_kptr(struct __sk_buff *ctx)
{
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
bpf_kfunc_call_test_kptr_get((void *)v, 0, 0);
return 0;
}
SEC("?tc")
int reject_kptr_get_on_unref(struct __sk_buff *ctx)
{
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
bpf_kfunc_call_test_kptr_get(&v->unref_ptr, 0, 0);
return 0;
}
SEC("?tc")
int reject_kptr_get_bad_type_match(struct __sk_buff *ctx)
{
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
bpf_kfunc_call_test_kptr_get((void *)&v->ref_memb_ptr, 0, 0);
return 0;
}
SEC("?tc")
int mark_ref_as_untrusted_or_null(struct __sk_buff *ctx)
{
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
bpf_this_cpu_ptr(v->ref_ptr);
return 0;
}
SEC("?tc")
int reject_untrusted_store_to_ref(struct __sk_buff *ctx)
{
struct prog_test_ref_kfunc *p;
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
p = v->ref_ptr;
if (!p)
return 0;
/* Checkmate, clang */
*(struct prog_test_ref_kfunc * volatile *)&v->ref_ptr = p;
return 0;
}
SEC("?tc")
int reject_untrusted_xchg(struct __sk_buff *ctx)
{
struct prog_test_ref_kfunc *p;
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
p = v->ref_ptr;
if (!p)
return 0;
bpf_kptr_xchg(&v->ref_ptr, p);
return 0;
}
SEC("?tc")
int reject_bad_type_xchg(struct __sk_buff *ctx)
{
struct prog_test_ref_kfunc *ref_ptr;
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
ref_ptr = bpf_kfunc_call_test_acquire(&(unsigned long){0});
if (!ref_ptr)
return 0;
bpf_kptr_xchg(&v->ref_memb_ptr, ref_ptr);
return 0;
}
SEC("?tc")
int reject_member_of_ref_xchg(struct __sk_buff *ctx)
{
struct prog_test_ref_kfunc *ref_ptr;
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
ref_ptr = bpf_kfunc_call_test_acquire(&(unsigned long){0});
if (!ref_ptr)
return 0;
bpf_kptr_xchg(&v->ref_memb_ptr, &ref_ptr->memb);
return 0;
}
SEC("?syscall")
int reject_indirect_helper_access(struct __sk_buff *ctx)
{
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
bpf_get_current_comm(v, sizeof(v->buf) + 1);
return 0;
}
__noinline
int write_func(int *p)
{
return p ? *p = 42 : 0;
}
SEC("?tc")
int reject_indirect_global_func_access(struct __sk_buff *ctx)
{
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
return write_func((void *)v + 5);
}
SEC("?tc")
int kptr_xchg_ref_state(struct __sk_buff *ctx)
{
struct prog_test_ref_kfunc *p;
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
p = bpf_kfunc_call_test_acquire(&(unsigned long){0});
if (!p)
return 0;
bpf_kptr_xchg(&v->ref_ptr, p);
return 0;
}
SEC("?tc")
int kptr_get_ref_state(struct __sk_buff *ctx)
{
struct map_value *v;
int key = 0;
v = bpf_map_lookup_elem(&array_map, &key);
if (!v)
return 0;
bpf_kfunc_call_test_kptr_get(&v->ref_ptr, 0, 0);
return 0;
}
char _license[] SEC("license") = "GPL";
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment