Commit 53e80a39 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'bpf-core changes for preparation of HID-bpf'

Benjamin Tissoires says:

====================

Hi,

well, given that the HID changes haven't moved a lot in the past
revisions and that I am cc-ing a bunch of people, I have dropped them
while we focus on the last 2 requirements in bpf-core changes.

I'll submit a HID targeted series when we get these in tree, which
would make things a lore more independent.

For reference, the whole reasons for these 2 main changes are at
https://lore.kernel.org/bpf/20220902132938.2409206-1-benjamin.tissoires@redhat.com/

Compared to v10 (in addition of dropping the HID changes), I have
changed the selftests so we can test both light skeletons and libbbpf
calls.

Cheers,
Benjamin
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 028a9642 22ed8d5a
......@@ -1944,13 +1944,22 @@ int btf_distill_func_proto(struct bpf_verifier_log *log,
const char *func_name,
struct btf_func_model *m);
struct bpf_kfunc_arg_meta {
u64 r0_size;
bool r0_rdonly;
int ref_obj_id;
u32 flags;
};
struct bpf_reg_state;
int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog,
struct bpf_reg_state *regs);
int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog,
struct bpf_reg_state *regs);
int btf_check_kfunc_arg_match(struct bpf_verifier_env *env,
const struct btf *btf, u32 func_id,
struct bpf_reg_state *regs,
u32 kfunc_flags);
struct bpf_kfunc_arg_meta *meta);
int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
struct bpf_reg_state *reg);
int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
......
......@@ -598,6 +598,8 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
struct bpf_attach_target_info *tgt_info);
void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab);
int mark_chain_precision(struct bpf_verifier_env *env, int regno);
#define BPF_BASE_TYPE_MASK GENMASK(BPF_BASE_TYPE_BITS - 1, 0)
/* extract base type from bpf_{arg, return, reg}_type. */
......
......@@ -441,4 +441,14 @@ static inline int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dt
}
#endif
static inline bool btf_type_is_struct_ptr(struct btf *btf, const struct btf_type *t)
{
if (!btf_type_is_ptr(t))
return false;
t = btf_type_skip_modifiers(btf, t->type, NULL);
return btf_type_is_struct(t);
}
#endif
......@@ -208,7 +208,7 @@ enum btf_kfunc_hook {
};
enum {
BTF_KFUNC_SET_MAX_CNT = 32,
BTF_KFUNC_SET_MAX_CNT = 256,
BTF_DTOR_KFUNC_MAX_CNT = 256,
};
......@@ -6199,11 +6199,37 @@ static bool is_kfunc_arg_mem_size(const struct btf *btf,
return true;
}
static bool btf_is_kfunc_arg_mem_size(const struct btf *btf,
const struct btf_param *arg,
const struct bpf_reg_state *reg,
const char *name)
{
int len, target_len = strlen(name);
const struct btf_type *t;
const char *param_name;
t = btf_type_skip_modifiers(btf, arg->type, NULL);
if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE)
return false;
param_name = btf_name_by_offset(btf, arg->name_off);
if (str_is_empty(param_name))
return false;
len = strlen(param_name);
if (len != target_len)
return false;
if (strcmp(param_name, name))
return false;
return true;
}
static int btf_check_func_arg_match(struct bpf_verifier_env *env,
const struct btf *btf, u32 func_id,
struct bpf_reg_state *regs,
bool ptr_to_mem_ok,
u32 kfunc_flags)
struct bpf_kfunc_arg_meta *kfunc_meta,
bool processing_call)
{
enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
bool rel = false, kptr_get = false, trusted_arg = false;
......@@ -6240,12 +6266,12 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env,
return -EINVAL;
}
if (is_kfunc) {
if (is_kfunc && kfunc_meta) {
/* Only kfunc can be release func */
rel = kfunc_flags & KF_RELEASE;
kptr_get = kfunc_flags & KF_KPTR_GET;
trusted_arg = kfunc_flags & KF_TRUSTED_ARGS;
sleepable = kfunc_flags & KF_SLEEPABLE;
rel = kfunc_meta->flags & KF_RELEASE;
kptr_get = kfunc_meta->flags & KF_KPTR_GET;
trusted_arg = kfunc_meta->flags & KF_TRUSTED_ARGS;
sleepable = kfunc_meta->flags & KF_SLEEPABLE;
}
/* check that BTF function arguments match actual types that the
......@@ -6258,6 +6284,38 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env,
t = btf_type_skip_modifiers(btf, args[i].type, NULL);
if (btf_type_is_scalar(t)) {
if (is_kfunc && kfunc_meta) {
bool is_buf_size = false;
/* check for any const scalar parameter of name "rdonly_buf_size"
* or "rdwr_buf_size"
*/
if (btf_is_kfunc_arg_mem_size(btf, &args[i], reg,
"rdonly_buf_size")) {
kfunc_meta->r0_rdonly = true;
is_buf_size = true;
} else if (btf_is_kfunc_arg_mem_size(btf, &args[i], reg,
"rdwr_buf_size"))
is_buf_size = true;
if (is_buf_size) {
if (kfunc_meta->r0_size) {
bpf_log(log, "2 or more rdonly/rdwr_buf_size parameters for kfunc");
return -EINVAL;
}
if (!tnum_is_const(reg->var_off)) {
bpf_log(log, "R%d is not a const\n", regno);
return -EINVAL;
}
kfunc_meta->r0_size = reg->var_off.value;
ret = mark_chain_precision(env, regno);
if (ret)
return ret;
}
}
if (reg->type == SCALAR_VALUE)
continue;
bpf_log(log, "R%d is not a scalar\n", regno);
......@@ -6288,6 +6346,17 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env,
if (ret < 0)
return ret;
if (is_kfunc && reg->ref_obj_id) {
/* Ensure only one argument is referenced PTR_TO_BTF_ID */
if (ref_obj_id) {
bpf_log(log, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
regno, reg->ref_obj_id, ref_obj_id);
return -EFAULT;
}
ref_regno = regno;
ref_obj_id = reg->ref_obj_id;
}
/* kptr_get is only true for kfunc */
if (i == 0 && kptr_get) {
struct bpf_map_value_off_desc *off_desc;
......@@ -6360,16 +6429,6 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env,
if (reg->type == PTR_TO_BTF_ID) {
reg_btf = reg->btf;
reg_ref_id = reg->btf_id;
/* Ensure only one argument is referenced PTR_TO_BTF_ID */
if (reg->ref_obj_id) {
if (ref_obj_id) {
bpf_log(log, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
regno, reg->ref_obj_id, ref_obj_id);
return -EFAULT;
}
ref_regno = regno;
ref_obj_id = reg->ref_obj_id;
}
} else {
reg_btf = btf_vmlinux;
reg_ref_id = *reg2btf_ids[base_type(reg->type)];
......@@ -6389,7 +6448,7 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env,
reg_ref_tname);
return -EINVAL;
}
} else if (ptr_to_mem_ok) {
} else if (ptr_to_mem_ok && processing_call) {
const struct btf_type *resolve_ret;
u32 type_size;
......@@ -6460,11 +6519,14 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env,
return -EINVAL;
}
if (kfunc_meta && ref_obj_id)
kfunc_meta->ref_obj_id = ref_obj_id;
/* returns argument register number > 0 in case of reference release kfunc */
return rel ? ref_regno : 0;
}
/* Compare BTF of a function with given bpf_reg_state.
/* Compare BTF of a function declaration with given bpf_reg_state.
* Returns:
* EFAULT - there is a verifier bug. Abort verification.
* EINVAL - there is a type mismatch or BTF is not available.
......@@ -6491,7 +6553,50 @@ int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog,
return -EINVAL;
is_global = prog->aux->func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL;
err = btf_check_func_arg_match(env, btf, btf_id, regs, is_global, 0);
err = btf_check_func_arg_match(env, btf, btf_id, regs, is_global, NULL, false);
/* Compiler optimizations can remove arguments from static functions
* or mismatched type can be passed into a global function.
* In such cases mark the function as unreliable from BTF point of view.
*/
if (err)
prog->aux->func_info_aux[subprog].unreliable = true;
return err;
}
/* Compare BTF of a function call with given bpf_reg_state.
* Returns:
* EFAULT - there is a verifier bug. Abort verification.
* EINVAL - there is a type mismatch or BTF is not available.
* 0 - BTF matches with what bpf_reg_state expects.
* Only PTR_TO_CTX and SCALAR_VALUE states are recognized.
*
* NOTE: the code is duplicated from btf_check_subprog_arg_match()
* because btf_check_func_arg_match() is still doing both. Once that
* function is split in 2, we can call from here btf_check_subprog_arg_match()
* first, and then treat the calling part in a new code path.
*/
int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog,
struct bpf_reg_state *regs)
{
struct bpf_prog *prog = env->prog;
struct btf *btf = prog->aux->btf;
bool is_global;
u32 btf_id;
int err;
if (!prog->aux->func_info)
return -EINVAL;
btf_id = prog->aux->func_info[subprog].type_id;
if (!btf_id)
return -EFAULT;
if (prog->aux->func_info_aux[subprog].unreliable)
return -EINVAL;
is_global = prog->aux->func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL;
err = btf_check_func_arg_match(env, btf, btf_id, regs, is_global, NULL, true);
/* Compiler optimizations can remove arguments from static functions
* or mismatched type can be passed into a global function.
......@@ -6505,9 +6610,9 @@ int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog,
int btf_check_kfunc_arg_match(struct bpf_verifier_env *env,
const struct btf *btf, u32 func_id,
struct bpf_reg_state *regs,
u32 kfunc_flags)
struct bpf_kfunc_arg_meta *meta)
{
return btf_check_func_arg_match(env, btf, func_id, regs, true, kfunc_flags);
return btf_check_func_arg_match(env, btf, func_id, regs, true, meta, true);
}
/* Convert BTF of a function into bpf_reg_state if possible
......
......@@ -2908,7 +2908,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
return 0;
}
static int mark_chain_precision(struct bpf_verifier_env *env, int regno)
int mark_chain_precision(struct bpf_verifier_env *env, int regno)
{
return __mark_chain_precision(env, regno, -1);
}
......@@ -5233,6 +5233,25 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
env,
regno, reg->off, access_size,
zero_size_allowed, ACCESS_HELPER, meta);
case PTR_TO_CTX:
/* in case the function doesn't know how to access the context,
* (because we are in a program of type SYSCALL for example), we
* can not statically check its size.
* Dynamically check it now.
*/
if (!env->ops->convert_ctx_access) {
enum bpf_access_type atype = meta && meta->raw_mode ? BPF_WRITE : BPF_READ;
int offset = access_size - 1;
/* Allow zero-byte read from PTR_TO_CTX */
if (access_size == 0)
return zero_size_allowed ? 0 : -EACCES;
return check_mem_access(env, env->insn_idx, regno, offset, BPF_B,
atype, -1, false);
}
fallthrough;
default: /* scalar_value or invalid ptr */
/* Allow zero-byte read from NULL, regardless of pointer type */
if (zero_size_allowed && access_size == 0 &&
......@@ -6629,7 +6648,7 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn
func_info_aux = env->prog->aux->func_info_aux;
if (func_info_aux)
is_global = func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL;
err = btf_check_subprog_arg_match(env, subprog, caller->regs);
err = btf_check_subprog_call(env, subprog, caller->regs);
if (err == -EFAULT)
return err;
if (is_global) {
......@@ -7576,6 +7595,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
{
const struct btf_type *t, *func, *func_proto, *ptr_type;
struct bpf_reg_state *regs = cur_regs(env);
struct bpf_kfunc_arg_meta meta = { 0 };
const char *func_name, *ptr_type_name;
u32 i, nargs, func_id, ptr_type_id;
int err, insn_idx = *insn_idx_p;
......@@ -7610,8 +7630,10 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
acq = *kfunc_flags & KF_ACQUIRE;
meta.flags = *kfunc_flags;
/* Check the arguments */
err = btf_check_kfunc_arg_match(env, desc_btf, func_id, regs, *kfunc_flags);
err = btf_check_kfunc_arg_match(env, desc_btf, func_id, regs, &meta);
if (err < 0)
return err;
/* In case of release function, we get register number of refcounted
......@@ -7632,7 +7654,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
/* Check return type */
t = btf_type_skip_modifiers(desc_btf, func_proto->type, NULL);
if (acq && !btf_type_is_ptr(t)) {
if (acq && !btf_type_is_struct_ptr(desc_btf, t)) {
verbose(env, "acquire kernel function does not return PTR_TO_BTF_ID\n");
return -EINVAL;
}
......@@ -7644,17 +7666,33 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
ptr_type = btf_type_skip_modifiers(desc_btf, t->type,
&ptr_type_id);
if (!btf_type_is_struct(ptr_type)) {
ptr_type_name = btf_name_by_offset(desc_btf,
ptr_type->name_off);
verbose(env, "kernel function %s returns pointer type %s %s is not supported\n",
func_name, btf_type_str(ptr_type),
ptr_type_name);
return -EINVAL;
if (!meta.r0_size) {
ptr_type_name = btf_name_by_offset(desc_btf,
ptr_type->name_off);
verbose(env,
"kernel function %s returns pointer type %s %s is not supported\n",
func_name,
btf_type_str(ptr_type),
ptr_type_name);
return -EINVAL;
}
mark_reg_known_zero(env, regs, BPF_REG_0);
regs[BPF_REG_0].type = PTR_TO_MEM;
regs[BPF_REG_0].mem_size = meta.r0_size;
if (meta.r0_rdonly)
regs[BPF_REG_0].type |= MEM_RDONLY;
/* Ensures we don't access the memory after a release_reference() */
if (meta.ref_obj_id)
regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
} else {
mark_reg_known_zero(env, regs, BPF_REG_0);
regs[BPF_REG_0].btf = desc_btf;
regs[BPF_REG_0].type = PTR_TO_BTF_ID;
regs[BPF_REG_0].btf_id = ptr_type_id;
}
mark_reg_known_zero(env, regs, BPF_REG_0);
regs[BPF_REG_0].btf = desc_btf;
regs[BPF_REG_0].type = PTR_TO_BTF_ID;
regs[BPF_REG_0].btf_id = ptr_type_id;
if (*kfunc_flags & KF_RET_NULL) {
regs[BPF_REG_0].type |= PTR_MAYBE_NULL;
/* For mark_ptr_or_null_reg, see 93c230e3f5bd6 */
......
......@@ -606,6 +606,38 @@ noinline void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p)
WARN_ON_ONCE(1);
}
static int *__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc *p, const int size)
{
if (size > 2 * sizeof(int))
return NULL;
return (int *)p;
}
noinline int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p, const int rdwr_buf_size)
{
return __bpf_kfunc_call_test_get_mem(p, rdwr_buf_size);
}
noinline int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size)
{
return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
}
/* the next 2 ones can't be really used for testing expect to ensure
* that the verifier rejects the call.
* Acquire functions must return struct pointers, so these ones are
* failing.
*/
noinline int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size)
{
return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
}
noinline void bpf_kfunc_call_int_mem_release(int *p)
{
}
noinline struct prog_test_ref_kfunc *
bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **pp, int a, int b)
{
......@@ -712,6 +744,10 @@ BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_release, KF_RELEASE)
BTF_ID_FLAGS(func, bpf_kfunc_call_memb_release, KF_RELEASE)
BTF_ID_FLAGS(func, bpf_kfunc_call_memb1_release, KF_RELEASE)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdwr_mem, KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdonly_mem, KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_acq_rdonly_mem, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_kfunc_call_int_mem_release, KF_RELEASE)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_kptr_get, KF_ACQUIRE | KF_RET_NULL | KF_KPTR_GET)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1)
......@@ -1634,6 +1670,7 @@ static int __init bpf_prog_test_run_init(void)
ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_prog_test_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_prog_test_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_prog_test_kfunc_set);
return ret ?: register_btf_id_dtor_kfuncs(bpf_prog_test_dtor_kfunc,
ARRAY_SIZE(bpf_prog_test_dtor_kfunc),
THIS_MODULE);
......
......@@ -351,11 +351,12 @@ LINKED_SKELS := test_static_linked.skel.h linked_funcs.skel.h \
test_subskeleton.skel.h test_subskeleton_lib.skel.h \
test_usdt.skel.h
LSKELS := kfunc_call_test.c fentry_test.c fexit_test.c fexit_sleep.c \
LSKELS := fentry_test.c fexit_test.c fexit_sleep.c \
test_ringbuf.c atomics.c trace_printk.c trace_vprintk.c \
map_ptr_kern.c core_kern.c core_kern_overflow.c
# Generate both light skeleton and libbpf skeleton for these
LSKELS_EXTRA := test_ksyms_module.c test_ksyms_weak.c kfunc_call_test_subprog.c
LSKELS_EXTRA := test_ksyms_module.c test_ksyms_weak.c kfunc_call_test.c \
kfunc_call_test_subprog.c
SKEL_BLACKLIST += $$(LSKELS)
test_static_linked.skel.h-deps := test_static_linked1.bpf.o test_static_linked2.bpf.o
......
......@@ -2,6 +2,8 @@
/* Copyright (c) 2021 Facebook */
#include <test_progs.h>
#include <network_helpers.h>
#include "kfunc_call_fail.skel.h"
#include "kfunc_call_test.skel.h"
#include "kfunc_call_test.lskel.h"
#include "kfunc_call_test_subprog.skel.h"
#include "kfunc_call_test_subprog.lskel.h"
......@@ -9,36 +11,220 @@
#include "cap_helpers.h"
static void test_main(void)
static size_t log_buf_sz = 1048576; /* 1 MB */
static char obj_log_buf[1048576];
enum kfunc_test_type {
tc_test = 0,
syscall_test,
syscall_null_ctx_test,
};
struct kfunc_test_params {
const char *prog_name;
unsigned long lskel_prog_desc_offset;
int retval;
enum kfunc_test_type test_type;
const char *expected_err_msg;
};
#define __BPF_TEST_SUCCESS(name, __retval, type) \
{ \
.prog_name = #name, \
.lskel_prog_desc_offset = offsetof(struct kfunc_call_test_lskel, progs.name), \
.retval = __retval, \
.test_type = type, \
.expected_err_msg = NULL, \
}
#define __BPF_TEST_FAIL(name, __retval, type, error_msg) \
{ \
.prog_name = #name, \
.lskel_prog_desc_offset = 0 /* unused when test is failing */, \
.retval = __retval, \
.test_type = type, \
.expected_err_msg = error_msg, \
}
#define TC_TEST(name, retval) __BPF_TEST_SUCCESS(name, retval, tc_test)
#define SYSCALL_TEST(name, retval) __BPF_TEST_SUCCESS(name, retval, syscall_test)
#define SYSCALL_NULL_CTX_TEST(name, retval) __BPF_TEST_SUCCESS(name, retval, syscall_null_ctx_test)
#define TC_FAIL(name, retval, error_msg) __BPF_TEST_FAIL(name, retval, tc_test, error_msg)
#define SYSCALL_NULL_CTX_FAIL(name, retval, error_msg) \
__BPF_TEST_FAIL(name, retval, syscall_null_ctx_test, error_msg)
static struct kfunc_test_params kfunc_tests[] = {
/* failure cases:
* if retval is 0 -> the program will fail to load and the error message is an error
* if retval is not 0 -> the program can be loaded but running it will gives the
* provided return value. The error message is thus the one
* from a successful load
*/
SYSCALL_NULL_CTX_FAIL(kfunc_syscall_test_fail, -EINVAL, "processed 4 insns"),
SYSCALL_NULL_CTX_FAIL(kfunc_syscall_test_null_fail, -EINVAL, "processed 4 insns"),
TC_FAIL(kfunc_call_test_get_mem_fail_rdonly, 0, "R0 cannot write into rdonly_mem"),
TC_FAIL(kfunc_call_test_get_mem_fail_use_after_free, 0, "invalid mem access 'scalar'"),
TC_FAIL(kfunc_call_test_get_mem_fail_oob, 0, "min value is outside of the allowed memory range"),
TC_FAIL(kfunc_call_test_get_mem_fail_not_const, 0, "is not a const"),
TC_FAIL(kfunc_call_test_mem_acquire_fail, 0, "acquire kernel function does not return PTR_TO_BTF_ID"),
/* success cases */
TC_TEST(kfunc_call_test1, 12),
TC_TEST(kfunc_call_test2, 3),
TC_TEST(kfunc_call_test_ref_btf_id, 0),
TC_TEST(kfunc_call_test_get_mem, 42),
SYSCALL_TEST(kfunc_syscall_test, 0),
SYSCALL_NULL_CTX_TEST(kfunc_syscall_test_null, 0),
};
struct syscall_test_args {
__u8 data[16];
size_t size;
};
static void verify_success(struct kfunc_test_params *param)
{
struct kfunc_call_test_lskel *skel;
struct kfunc_call_test_lskel *lskel = NULL;
LIBBPF_OPTS(bpf_test_run_opts, topts);
struct bpf_prog_desc *lskel_prog;
struct kfunc_call_test *skel;
struct bpf_program *prog;
int prog_fd, err;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
struct syscall_test_args args = {
.size = 10,
};
switch (param->test_type) {
case syscall_test:
topts.ctx_in = &args;
topts.ctx_size_in = sizeof(args);
/* fallthrough */
case syscall_null_ctx_test:
break;
case tc_test:
topts.data_in = &pkt_v4;
topts.data_size_in = sizeof(pkt_v4);
topts.repeat = 1;
break;
}
skel = kfunc_call_test_lskel__open_and_load();
/* first test with normal libbpf */
skel = kfunc_call_test__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel"))
return;
prog_fd = skel->progs.kfunc_call_test1.prog_fd;
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "bpf_prog_test_run(test1)");
ASSERT_EQ(topts.retval, 12, "test1-retval");
prog = bpf_object__find_program_by_name(skel->obj, param->prog_name);
if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
goto cleanup;
prog_fd = skel->progs.kfunc_call_test2.prog_fd;
prog_fd = bpf_program__fd(prog);
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "bpf_prog_test_run(test2)");
ASSERT_EQ(topts.retval, 3, "test2-retval");
if (!ASSERT_OK(err, param->prog_name))
goto cleanup;
if (!ASSERT_EQ(topts.retval, param->retval, "retval"))
goto cleanup;
/* second test with light skeletons */
lskel = kfunc_call_test_lskel__open_and_load();
if (!ASSERT_OK_PTR(lskel, "lskel"))
goto cleanup;
prog_fd = skel->progs.kfunc_call_test_ref_btf_id.prog_fd;
lskel_prog = (struct bpf_prog_desc *)((char *)lskel + param->lskel_prog_desc_offset);
prog_fd = lskel_prog->prog_fd;
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "bpf_prog_test_run(test_ref_btf_id)");
ASSERT_EQ(topts.retval, 0, "test_ref_btf_id-retval");
if (!ASSERT_OK(err, param->prog_name))
goto cleanup;
ASSERT_EQ(topts.retval, param->retval, "retval");
cleanup:
kfunc_call_test__destroy(skel);
if (lskel)
kfunc_call_test_lskel__destroy(lskel);
}
static void verify_fail(struct kfunc_test_params *param)
{
LIBBPF_OPTS(bpf_object_open_opts, opts);
LIBBPF_OPTS(bpf_test_run_opts, topts);
struct bpf_program *prog;
struct kfunc_call_fail *skel;
int prog_fd, err;
struct syscall_test_args args = {
.size = 10,
};
opts.kernel_log_buf = obj_log_buf;
opts.kernel_log_size = log_buf_sz;
opts.kernel_log_level = 1;
switch (param->test_type) {
case syscall_test:
topts.ctx_in = &args;
topts.ctx_size_in = sizeof(args);
/* fallthrough */
case syscall_null_ctx_test:
break;
case tc_test:
topts.data_in = &pkt_v4;
topts.data_size_in = sizeof(pkt_v4);
break;
topts.repeat = 1;
}
skel = kfunc_call_fail__open_opts(&opts);
if (!ASSERT_OK_PTR(skel, "kfunc_call_fail__open_opts"))
goto cleanup;
prog = bpf_object__find_program_by_name(skel->obj, param->prog_name);
if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
goto cleanup;
bpf_program__set_autoload(prog, true);
err = kfunc_call_fail__load(skel);
if (!param->retval) {
/* the verifier is supposed to complain and refuses to load */
if (!ASSERT_ERR(err, "unexpected load success"))
goto out_err;
} else {
/* the program is loaded but must dynamically fail */
if (!ASSERT_OK(err, "unexpected load error"))
goto out_err;
prog_fd = bpf_program__fd(prog);
err = bpf_prog_test_run_opts(prog_fd, &topts);
if (!ASSERT_EQ(err, param->retval, param->prog_name))
goto out_err;
}
out_err:
if (!ASSERT_OK_PTR(strstr(obj_log_buf, param->expected_err_msg), "expected_err_msg")) {
fprintf(stderr, "Expected err_msg: %s\n", param->expected_err_msg);
fprintf(stderr, "Verifier output: %s\n", obj_log_buf);
}
cleanup:
kfunc_call_fail__destroy(skel);
}
static void test_main(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(kfunc_tests); i++) {
if (!test__start_subtest(kfunc_tests[i].prog_name))
continue;
kfunc_call_test_lskel__destroy(skel);
if (!kfunc_tests[i].expected_err_msg)
verify_success(&kfunc_tests[i]);
else
verify_fail(&kfunc_tests[i]);
}
}
static void test_subprog(void)
......@@ -121,8 +307,7 @@ static void test_destructive(void)
void test_kfunc_call(void)
{
if (test__start_subtest("main"))
test_main();
test_main();
if (test__start_subtest("subprog"))
test_subprog();
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym;
extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym;
extern void bpf_kfunc_call_test_mem_len_pass1(void *mem, int len) __ksym;
extern int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p, const int rdwr_buf_size) __ksym;
extern int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size) __ksym;
extern int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size) __ksym;
extern void bpf_kfunc_call_int_mem_release(int *p) __ksym;
struct syscall_test_args {
__u8 data[16];
size_t size;
};
SEC("?syscall")
int kfunc_syscall_test_fail(struct syscall_test_args *args)
{
bpf_kfunc_call_test_mem_len_pass1(&args->data, sizeof(*args) + 1);
return 0;
}
SEC("?syscall")
int kfunc_syscall_test_null_fail(struct syscall_test_args *args)
{
/* Must be called with args as a NULL pointer
* we do not check for it to have the verifier consider that
* the pointer might not be null, and so we can load it.
*
* So the following can not be added:
*
* if (args)
* return -22;
*/
bpf_kfunc_call_test_mem_len_pass1(args, sizeof(*args));
return 0;
}
SEC("?tc")
int kfunc_call_test_get_mem_fail_rdonly(struct __sk_buff *skb)
{
struct prog_test_ref_kfunc *pt;
unsigned long s = 0;
int *p = NULL;
int ret = 0;
pt = bpf_kfunc_call_test_acquire(&s);
if (pt) {
p = bpf_kfunc_call_test_get_rdonly_mem(pt, 2 * sizeof(int));
if (p)
p[0] = 42; /* this is a read-only buffer, so -EACCES */
else
ret = -1;
bpf_kfunc_call_test_release(pt);
}
return ret;
}
SEC("?tc")
int kfunc_call_test_get_mem_fail_use_after_free(struct __sk_buff *skb)
{
struct prog_test_ref_kfunc *pt;
unsigned long s = 0;
int *p = NULL;
int ret = 0;
pt = bpf_kfunc_call_test_acquire(&s);
if (pt) {
p = bpf_kfunc_call_test_get_rdwr_mem(pt, 2 * sizeof(int));
if (p) {
p[0] = 42;
ret = p[1]; /* 108 */
} else {
ret = -1;
}
bpf_kfunc_call_test_release(pt);
}
if (p)
ret = p[0]; /* p is not valid anymore */
return ret;
}
SEC("?tc")
int kfunc_call_test_get_mem_fail_oob(struct __sk_buff *skb)
{
struct prog_test_ref_kfunc *pt;
unsigned long s = 0;
int *p = NULL;
int ret = 0;
pt = bpf_kfunc_call_test_acquire(&s);
if (pt) {
p = bpf_kfunc_call_test_get_rdonly_mem(pt, 2 * sizeof(int));
if (p)
ret = p[2 * sizeof(int)]; /* oob access, so -EACCES */
else
ret = -1;
bpf_kfunc_call_test_release(pt);
}
return ret;
}
int not_const_size = 2 * sizeof(int);
SEC("?tc")
int kfunc_call_test_get_mem_fail_not_const(struct __sk_buff *skb)
{
struct prog_test_ref_kfunc *pt;
unsigned long s = 0;
int *p = NULL;
int ret = 0;
pt = bpf_kfunc_call_test_acquire(&s);
if (pt) {
p = bpf_kfunc_call_test_get_rdonly_mem(pt, not_const_size); /* non const size, -EINVAL */
if (p)
ret = p[0];
else
ret = -1;
bpf_kfunc_call_test_release(pt);
}
return ret;
}
SEC("?tc")
int kfunc_call_test_mem_acquire_fail(struct __sk_buff *skb)
{
struct prog_test_ref_kfunc *pt;
unsigned long s = 0;
int *p = NULL;
int ret = 0;
pt = bpf_kfunc_call_test_acquire(&s);
if (pt) {
/* we are failing on this one, because we are not acquiring a PTR_TO_BTF_ID (a struct ptr) */
p = bpf_kfunc_call_test_acq_rdonly_mem(pt, 2 * sizeof(int));
if (p)
ret = p[0];
else
ret = -1;
bpf_kfunc_call_int_mem_release(p);
bpf_kfunc_call_test_release(pt);
}
return ret;
}
char _license[] SEC("license") = "GPL";
......@@ -14,6 +14,8 @@ extern void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p) __ksym;
extern void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p) __ksym;
extern void bpf_kfunc_call_test_mem_len_pass1(void *mem, int len) __ksym;
extern void bpf_kfunc_call_test_mem_len_fail2(__u64 *mem, int len) __ksym;
extern int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p, const int rdwr_buf_size) __ksym;
extern int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size) __ksym;
SEC("tc")
int kfunc_call_test2(struct __sk_buff *skb)
......@@ -92,4 +94,73 @@ int kfunc_call_test_pass(struct __sk_buff *skb)
return 0;
}
struct syscall_test_args {
__u8 data[16];
size_t size;
};
SEC("syscall")
int kfunc_syscall_test(struct syscall_test_args *args)
{
const long size = args->size;
if (size > sizeof(args->data))
return -7; /* -E2BIG */
bpf_kfunc_call_test_mem_len_pass1(&args->data, sizeof(args->data));
bpf_kfunc_call_test_mem_len_pass1(&args->data, sizeof(*args));
bpf_kfunc_call_test_mem_len_pass1(&args->data, size);
return 0;
}
SEC("syscall")
int kfunc_syscall_test_null(struct syscall_test_args *args)
{
/* Must be called with args as a NULL pointer
* we do not check for it to have the verifier consider that
* the pointer might not be null, and so we can load it.
*
* So the following can not be added:
*
* if (args)
* return -22;
*/
bpf_kfunc_call_test_mem_len_pass1(args, 0);
return 0;
}
SEC("tc")
int kfunc_call_test_get_mem(struct __sk_buff *skb)
{
struct prog_test_ref_kfunc *pt;
unsigned long s = 0;
int *p = NULL;
int ret = 0;
pt = bpf_kfunc_call_test_acquire(&s);
if (pt) {
p = bpf_kfunc_call_test_get_rdwr_mem(pt, 2 * sizeof(int));
if (p) {
p[0] = 42;
ret = p[1]; /* 108 */
} else {
ret = -1;
}
if (ret >= 0) {
p = bpf_kfunc_call_test_get_rdonly_mem(pt, 2 * sizeof(int));
if (p)
ret = p[0]; /* 42 */
else
ret = -1;
}
bpf_kfunc_call_test_release(pt);
}
return ret;
}
char _license[] SEC("license") = "GPL";
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment