Commit eb1f7f71 authored by Benjamin Tissoires's avatar Benjamin Tissoires Committed by Alexei Starovoitov

bpf/verifier: allow kfunc to return an allocated mem

For drivers (outside of network), the incoming data is not statically
defined in a struct. Most of the time the data buffer is kzalloc-ed
and thus we can not rely on eBPF and BTF to explore the data.

This commit allows to return an arbitrary memory, previously allocated by
the driver.
An interesting extra point is that the kfunc can mark the exported
memory region as read only or read/write.

So, when a kfunc is not returning a pointer to a struct but to a plain
type, we can consider it is a valid allocated memory assuming that:
- one of the arguments is either called rdonly_buf_size or
  rdwr_buf_size
- and this argument is a const from the caller point of view

We can then use this parameter as the size of the allocated memory.

The memory is either read-only or read-write based on the name
of the size parameter.
Acked-by: default avatarKumar Kartikeya Dwivedi <memxor@gmail.com>
Signed-off-by: default avatarBenjamin Tissoires <benjamin.tissoires@redhat.com>
Link: https://lore.kernel.org/r/20220906151303.2780789-7-benjamin.tissoires@redhat.comSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent f9b34818
......@@ -1944,6 +1944,13 @@ int btf_distill_func_proto(struct bpf_verifier_log *log,
const char *func_name,
struct btf_func_model *m);
struct bpf_kfunc_arg_meta {
u64 r0_size;
bool r0_rdonly;
int ref_obj_id;
u32 flags;
};
struct bpf_reg_state;
int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog,
struct bpf_reg_state *regs);
......@@ -1952,7 +1959,7 @@ int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog,
int btf_check_kfunc_arg_match(struct bpf_verifier_env *env,
const struct btf *btf, u32 func_id,
struct bpf_reg_state *regs,
u32 kfunc_flags);
struct bpf_kfunc_arg_meta *meta);
int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
struct bpf_reg_state *reg);
int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
......
......@@ -598,6 +598,8 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
struct bpf_attach_target_info *tgt_info);
void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab);
int mark_chain_precision(struct bpf_verifier_env *env, int regno);
#define BPF_BASE_TYPE_MASK GENMASK(BPF_BASE_TYPE_BITS - 1, 0)
/* extract base type from bpf_{arg, return, reg}_type. */
......
......@@ -441,4 +441,14 @@ static inline int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dt
}
#endif
static inline bool btf_type_is_struct_ptr(struct btf *btf, const struct btf_type *t)
{
if (!btf_type_is_ptr(t))
return false;
t = btf_type_skip_modifiers(btf, t->type, NULL);
return btf_type_is_struct(t);
}
#endif
......@@ -6199,11 +6199,36 @@ static bool is_kfunc_arg_mem_size(const struct btf *btf,
return true;
}
static bool btf_is_kfunc_arg_mem_size(const struct btf *btf,
const struct btf_param *arg,
const struct bpf_reg_state *reg,
const char *name)
{
int len, target_len = strlen(name);
const struct btf_type *t;
const char *param_name;
t = btf_type_skip_modifiers(btf, arg->type, NULL);
if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE)
return false;
param_name = btf_name_by_offset(btf, arg->name_off);
if (str_is_empty(param_name))
return false;
len = strlen(param_name);
if (len != target_len)
return false;
if (strcmp(param_name, name))
return false;
return true;
}
static int btf_check_func_arg_match(struct bpf_verifier_env *env,
const struct btf *btf, u32 func_id,
struct bpf_reg_state *regs,
bool ptr_to_mem_ok,
u32 kfunc_flags,
struct bpf_kfunc_arg_meta *kfunc_meta,
bool processing_call)
{
enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
......@@ -6241,12 +6266,12 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env,
return -EINVAL;
}
if (is_kfunc) {
if (is_kfunc && kfunc_meta) {
/* Only kfunc can be release func */
rel = kfunc_flags & KF_RELEASE;
kptr_get = kfunc_flags & KF_KPTR_GET;
trusted_arg = kfunc_flags & KF_TRUSTED_ARGS;
sleepable = kfunc_flags & KF_SLEEPABLE;
rel = kfunc_meta->flags & KF_RELEASE;
kptr_get = kfunc_meta->flags & KF_KPTR_GET;
trusted_arg = kfunc_meta->flags & KF_TRUSTED_ARGS;
sleepable = kfunc_meta->flags & KF_SLEEPABLE;
}
/* check that BTF function arguments match actual types that the
......@@ -6259,6 +6284,38 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env,
t = btf_type_skip_modifiers(btf, args[i].type, NULL);
if (btf_type_is_scalar(t)) {
if (is_kfunc && kfunc_meta) {
bool is_buf_size = false;
/* check for any const scalar parameter of name "rdonly_buf_size"
* or "rdwr_buf_size"
*/
if (btf_is_kfunc_arg_mem_size(btf, &args[i], reg,
"rdonly_buf_size")) {
kfunc_meta->r0_rdonly = true;
is_buf_size = true;
} else if (btf_is_kfunc_arg_mem_size(btf, &args[i], reg,
"rdwr_buf_size"))
is_buf_size = true;
if (is_buf_size) {
if (kfunc_meta->r0_size) {
bpf_log(log, "2 or more rdonly/rdwr_buf_size parameters for kfunc");
return -EINVAL;
}
if (!tnum_is_const(reg->var_off)) {
bpf_log(log, "R%d is not a const\n", regno);
return -EINVAL;
}
kfunc_meta->r0_size = reg->var_off.value;
ret = mark_chain_precision(env, regno);
if (ret)
return ret;
}
}
if (reg->type == SCALAR_VALUE)
continue;
bpf_log(log, "R%d is not a scalar\n", regno);
......@@ -6289,6 +6346,17 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env,
if (ret < 0)
return ret;
if (is_kfunc && reg->ref_obj_id) {
/* Ensure only one argument is referenced PTR_TO_BTF_ID */
if (ref_obj_id) {
bpf_log(log, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
regno, reg->ref_obj_id, ref_obj_id);
return -EFAULT;
}
ref_regno = regno;
ref_obj_id = reg->ref_obj_id;
}
/* kptr_get is only true for kfunc */
if (i == 0 && kptr_get) {
struct bpf_map_value_off_desc *off_desc;
......@@ -6361,16 +6429,6 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env,
if (reg->type == PTR_TO_BTF_ID) {
reg_btf = reg->btf;
reg_ref_id = reg->btf_id;
/* Ensure only one argument is referenced PTR_TO_BTF_ID */
if (reg->ref_obj_id) {
if (ref_obj_id) {
bpf_log(log, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
regno, reg->ref_obj_id, ref_obj_id);
return -EFAULT;
}
ref_regno = regno;
ref_obj_id = reg->ref_obj_id;
}
} else {
reg_btf = btf_vmlinux;
reg_ref_id = *reg2btf_ids[base_type(reg->type)];
......@@ -6461,6 +6519,9 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env,
return -EINVAL;
}
if (kfunc_meta && ref_obj_id)
kfunc_meta->ref_obj_id = ref_obj_id;
/* returns argument register number > 0 in case of reference release kfunc */
return rel ? ref_regno : 0;
}
......@@ -6492,7 +6553,7 @@ int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog,
return -EINVAL;
is_global = prog->aux->func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL;
err = btf_check_func_arg_match(env, btf, btf_id, regs, is_global, 0, false);
err = btf_check_func_arg_match(env, btf, btf_id, regs, is_global, NULL, false);
/* Compiler optimizations can remove arguments from static functions
* or mismatched type can be passed into a global function.
......@@ -6535,7 +6596,7 @@ int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog,
return -EINVAL;
is_global = prog->aux->func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL;
err = btf_check_func_arg_match(env, btf, btf_id, regs, is_global, 0, true);
err = btf_check_func_arg_match(env, btf, btf_id, regs, is_global, NULL, true);
/* Compiler optimizations can remove arguments from static functions
* or mismatched type can be passed into a global function.
......@@ -6549,9 +6610,9 @@ int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog,
int btf_check_kfunc_arg_match(struct bpf_verifier_env *env,
const struct btf *btf, u32 func_id,
struct bpf_reg_state *regs,
u32 kfunc_flags)
struct bpf_kfunc_arg_meta *meta)
{
return btf_check_func_arg_match(env, btf, func_id, regs, true, kfunc_flags, true);
return btf_check_func_arg_match(env, btf, func_id, regs, true, meta, true);
}
/* Convert BTF of a function into bpf_reg_state if possible
......
......@@ -2908,7 +2908,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
return 0;
}
static int mark_chain_precision(struct bpf_verifier_env *env, int regno)
int mark_chain_precision(struct bpf_verifier_env *env, int regno)
{
return __mark_chain_precision(env, regno, -1);
}
......@@ -7595,6 +7595,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
{
const struct btf_type *t, *func, *func_proto, *ptr_type;
struct bpf_reg_state *regs = cur_regs(env);
struct bpf_kfunc_arg_meta meta = { 0 };
const char *func_name, *ptr_type_name;
u32 i, nargs, func_id, ptr_type_id;
int err, insn_idx = *insn_idx_p;
......@@ -7629,8 +7630,10 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
acq = *kfunc_flags & KF_ACQUIRE;
meta.flags = *kfunc_flags;
/* Check the arguments */
err = btf_check_kfunc_arg_match(env, desc_btf, func_id, regs, *kfunc_flags);
err = btf_check_kfunc_arg_match(env, desc_btf, func_id, regs, &meta);
if (err < 0)
return err;
/* In case of release function, we get register number of refcounted
......@@ -7651,7 +7654,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
/* Check return type */
t = btf_type_skip_modifiers(desc_btf, func_proto->type, NULL);
if (acq && !btf_type_is_ptr(t)) {
if (acq && !btf_type_is_struct_ptr(desc_btf, t)) {
verbose(env, "acquire kernel function does not return PTR_TO_BTF_ID\n");
return -EINVAL;
}
......@@ -7663,17 +7666,33 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
ptr_type = btf_type_skip_modifiers(desc_btf, t->type,
&ptr_type_id);
if (!btf_type_is_struct(ptr_type)) {
ptr_type_name = btf_name_by_offset(desc_btf,
ptr_type->name_off);
verbose(env, "kernel function %s returns pointer type %s %s is not supported\n",
func_name, btf_type_str(ptr_type),
ptr_type_name);
return -EINVAL;
if (!meta.r0_size) {
ptr_type_name = btf_name_by_offset(desc_btf,
ptr_type->name_off);
verbose(env,
"kernel function %s returns pointer type %s %s is not supported\n",
func_name,
btf_type_str(ptr_type),
ptr_type_name);
return -EINVAL;
}
mark_reg_known_zero(env, regs, BPF_REG_0);
regs[BPF_REG_0].type = PTR_TO_MEM;
regs[BPF_REG_0].mem_size = meta.r0_size;
if (meta.r0_rdonly)
regs[BPF_REG_0].type |= MEM_RDONLY;
/* Ensures we don't access the memory after a release_reference() */
if (meta.ref_obj_id)
regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
} else {
mark_reg_known_zero(env, regs, BPF_REG_0);
regs[BPF_REG_0].btf = desc_btf;
regs[BPF_REG_0].type = PTR_TO_BTF_ID;
regs[BPF_REG_0].btf_id = ptr_type_id;
}
mark_reg_known_zero(env, regs, BPF_REG_0);
regs[BPF_REG_0].btf = desc_btf;
regs[BPF_REG_0].type = PTR_TO_BTF_ID;
regs[BPF_REG_0].btf_id = ptr_type_id;
if (*kfunc_flags & KF_RET_NULL) {
regs[BPF_REG_0].type |= PTR_MAYBE_NULL;
/* For mark_ptr_or_null_reg, see 93c230e3f5bd6 */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment