Commit b0966c72 authored by Dave Marchevsky's avatar Dave Marchevsky Committed by Alexei Starovoitov

bpf: Support bpf_kptr_xchg into local kptr

Currently, users can only stash kptr into map values with bpf_kptr_xchg().
This patch further supports stashing kptr into local kptr by adding local
kptr as a valid destination type.

When stashing into local kptr, btf_record in program BTF is used instead
of btf_record in map to search for the btf_field of the local kptr.

The local kptr specific checks in check_reg_type() only apply when the
source argument of bpf_kptr_xchg() is local kptr. Therefore, we make the
scope of the check explicit as the destination now can also be local kptr.
Acked-by: default avatarMartin KaFai Lau <martin.lau@kernel.org>
Signed-off-by: default avatarDave Marchevsky <davemarchevsky@fb.com>
Signed-off-by: default avatarAmery Hung <amery.hung@bytedance.com>
Link: https://lore.kernel.org/r/20240813212424.2871455-5-amery.hung@bytedance.comSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent d59232af
...@@ -5519,11 +5519,12 @@ union bpf_attr { ...@@ -5519,11 +5519,12 @@ union bpf_attr {
* **-EOPNOTSUPP** if the hash calculation failed or **-EINVAL** if * **-EOPNOTSUPP** if the hash calculation failed or **-EINVAL** if
* invalid arguments are passed. * invalid arguments are passed.
* *
* void *bpf_kptr_xchg(void *map_value, void *ptr) * void *bpf_kptr_xchg(void *dst, void *ptr)
* Description * Description
* Exchange kptr at pointer *map_value* with *ptr*, and return the * Exchange kptr at pointer *dst* with *ptr*, and return the old value.
* old value. *ptr* can be NULL, otherwise it must be a referenced * *dst* can be map value or local kptr. *ptr* can be NULL, otherwise
* pointer which will be released when this helper is called. * it must be a referenced pointer which will be released when this helper
* is called.
* Return * Return
* The old value of kptr (which can be NULL). The returned pointer * The old value of kptr (which can be NULL). The returned pointer
* if not NULL, is a reference which must be released using its * if not NULL, is a reference which must be released using its
......
...@@ -1619,9 +1619,9 @@ void bpf_wq_cancel_and_free(void *val) ...@@ -1619,9 +1619,9 @@ void bpf_wq_cancel_and_free(void *val)
schedule_work(&work->delete_work); schedule_work(&work->delete_work);
} }
BPF_CALL_2(bpf_kptr_xchg, void *, map_value, void *, ptr) BPF_CALL_2(bpf_kptr_xchg, void *, dst, void *, ptr)
{ {
unsigned long *kptr = map_value; unsigned long *kptr = dst;
/* This helper may be inlined by verifier. */ /* This helper may be inlined by verifier. */
return xchg(kptr, (unsigned long)ptr); return xchg(kptr, (unsigned long)ptr);
......
...@@ -7803,29 +7803,38 @@ static int process_kptr_func(struct bpf_verifier_env *env, int regno, ...@@ -7803,29 +7803,38 @@ static int process_kptr_func(struct bpf_verifier_env *env, int regno,
struct bpf_call_arg_meta *meta) struct bpf_call_arg_meta *meta)
{ {
struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
struct bpf_map *map_ptr = reg->map_ptr;
struct btf_field *kptr_field; struct btf_field *kptr_field;
struct bpf_map *map_ptr;
struct btf_record *rec;
u32 kptr_off; u32 kptr_off;
if (type_is_ptr_alloc_obj(reg->type)) {
rec = reg_btf_record(reg);
} else { /* PTR_TO_MAP_VALUE */
map_ptr = reg->map_ptr;
if (!map_ptr->btf) {
verbose(env, "map '%s' has to have BTF in order to use bpf_kptr_xchg\n",
map_ptr->name);
return -EINVAL;
}
rec = map_ptr->record;
meta->map_ptr = map_ptr;
}
if (!tnum_is_const(reg->var_off)) { if (!tnum_is_const(reg->var_off)) {
verbose(env, verbose(env,
"R%d doesn't have constant offset. kptr has to be at the constant offset\n", "R%d doesn't have constant offset. kptr has to be at the constant offset\n",
regno); regno);
return -EINVAL; return -EINVAL;
} }
if (!map_ptr->btf) {
verbose(env, "map '%s' has to have BTF in order to use bpf_kptr_xchg\n", if (!btf_record_has_field(rec, BPF_KPTR)) {
map_ptr->name); verbose(env, "R%d has no valid kptr\n", regno);
return -EINVAL;
}
if (!btf_record_has_field(map_ptr->record, BPF_KPTR)) {
verbose(env, "map '%s' has no valid kptr\n", map_ptr->name);
return -EINVAL; return -EINVAL;
} }
meta->map_ptr = map_ptr;
kptr_off = reg->off + reg->var_off.value; kptr_off = reg->off + reg->var_off.value;
kptr_field = btf_record_find(map_ptr->record, kptr_off, BPF_KPTR); kptr_field = btf_record_find(rec, kptr_off, BPF_KPTR);
if (!kptr_field) { if (!kptr_field) {
verbose(env, "off=%d doesn't point to kptr\n", kptr_off); verbose(env, "off=%d doesn't point to kptr\n", kptr_off);
return -EACCES; return -EACCES;
...@@ -8412,7 +8421,12 @@ static const struct bpf_reg_types func_ptr_types = { .types = { PTR_TO_FUNC } }; ...@@ -8412,7 +8421,12 @@ static const struct bpf_reg_types func_ptr_types = { .types = { PTR_TO_FUNC } };
static const struct bpf_reg_types stack_ptr_types = { .types = { PTR_TO_STACK } }; static const struct bpf_reg_types stack_ptr_types = { .types = { PTR_TO_STACK } };
static const struct bpf_reg_types const_str_ptr_types = { .types = { PTR_TO_MAP_VALUE } }; static const struct bpf_reg_types const_str_ptr_types = { .types = { PTR_TO_MAP_VALUE } };
static const struct bpf_reg_types timer_types = { .types = { PTR_TO_MAP_VALUE } }; static const struct bpf_reg_types timer_types = { .types = { PTR_TO_MAP_VALUE } };
static const struct bpf_reg_types kptr_xchg_dest_types = { .types = { PTR_TO_MAP_VALUE } }; static const struct bpf_reg_types kptr_xchg_dest_types = {
.types = {
PTR_TO_MAP_VALUE,
PTR_TO_BTF_ID | MEM_ALLOC
}
};
static const struct bpf_reg_types dynptr_types = { static const struct bpf_reg_types dynptr_types = {
.types = { .types = {
PTR_TO_STACK, PTR_TO_STACK,
...@@ -8483,7 +8497,8 @@ static int check_reg_type(struct bpf_verifier_env *env, u32 regno, ...@@ -8483,7 +8497,8 @@ static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
if (base_type(arg_type) == ARG_PTR_TO_MEM) if (base_type(arg_type) == ARG_PTR_TO_MEM)
type &= ~DYNPTR_TYPE_FLAG_MASK; type &= ~DYNPTR_TYPE_FLAG_MASK;
if (meta->func_id == BPF_FUNC_kptr_xchg && type_is_alloc(type)) { /* Local kptr types are allowed as the source argument of bpf_kptr_xchg */
if (meta->func_id == BPF_FUNC_kptr_xchg && type_is_alloc(type) && regno == BPF_REG_2) {
type &= ~MEM_ALLOC; type &= ~MEM_ALLOC;
type &= ~MEM_PERCPU; type &= ~MEM_PERCPU;
} }
...@@ -8576,7 +8591,8 @@ static int check_reg_type(struct bpf_verifier_env *env, u32 regno, ...@@ -8576,7 +8591,8 @@ static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
verbose(env, "verifier internal error: unimplemented handling of MEM_ALLOC\n"); verbose(env, "verifier internal error: unimplemented handling of MEM_ALLOC\n");
return -EFAULT; return -EFAULT;
} }
if (meta->func_id == BPF_FUNC_kptr_xchg) { /* Check if local kptr in src arg matches kptr in dst arg */
if (meta->func_id == BPF_FUNC_kptr_xchg && regno == BPF_REG_2) {
if (map_kptr_match_type(env, meta->kptr_field, reg, regno)) if (map_kptr_match_type(env, meta->kptr_field, reg, regno))
return -EACCES; return -EACCES;
} }
...@@ -8887,7 +8903,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg, ...@@ -8887,7 +8903,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
meta->release_regno = regno; meta->release_regno = regno;
} }
if (reg->ref_obj_id) { if (reg->ref_obj_id && base_type(arg_type) != ARG_KPTR_XCHG_DEST) {
if (meta->ref_obj_id) { if (meta->ref_obj_id) {
verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n", verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
regno, reg->ref_obj_id, regno, reg->ref_obj_id,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment