Commit bd0b4836 authored by Amery Hung's avatar Amery Hung Committed by Alexei Starovoitov

selftests/bpf: Make sure stashed kptr in local kptr is freed recursively

When dropping a local kptr, any kptr stashed into it is supposed to be
freed through bpf_obj_free_fields->__bpf_obj_drop_impl recursively. Add a
test to make sure it happens.

The test first stashes a referenced kptr to "struct task" into a local
kptr and gets the reference count of the task. Then, it drops the local
kptr and reads the reference count of the task again. Since
bpf_obj_free_fields and __bpf_obj_drop_impl will go through the local kptr
recursively during bpf_obj_drop, the dtor of the stashed task kptr should
eventually be called. The second reference count should be one less than
the first one.
Signed-off-by: default avatarAmery Hung <amery.hung@bytedance.com>
Acked-by: default avatarMartin KaFai Lau <martin.lau@kernel.org>
Link: https://lore.kernel.org/r/20240827011301.608620-1-amery.hung@bytedance.comSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent c634d6f4
...@@ -143,8 +143,9 @@ int BPF_PROG(test_task_acquire_leave_in_map, struct task_struct *task, u64 clone ...@@ -143,8 +143,9 @@ int BPF_PROG(test_task_acquire_leave_in_map, struct task_struct *task, u64 clone
SEC("tp_btf/task_newtask") SEC("tp_btf/task_newtask")
int BPF_PROG(test_task_xchg_release, struct task_struct *task, u64 clone_flags) int BPF_PROG(test_task_xchg_release, struct task_struct *task, u64 clone_flags)
{ {
struct task_struct *kptr; struct task_struct *kptr, *acquired;
struct __tasks_kfunc_map_value *v, *local; struct __tasks_kfunc_map_value *v, *local;
int refcnt, refcnt_after_drop;
long status; long status;
if (!is_test_kfunc_task()) if (!is_test_kfunc_task())
...@@ -190,7 +191,34 @@ int BPF_PROG(test_task_xchg_release, struct task_struct *task, u64 clone_flags) ...@@ -190,7 +191,34 @@ int BPF_PROG(test_task_xchg_release, struct task_struct *task, u64 clone_flags)
return 0; return 0;
} }
/* Stash a copy into local kptr and check if it is released recursively */
acquired = bpf_task_acquire(kptr);
if (!acquired) {
err = 7;
bpf_obj_drop(local);
bpf_task_release(kptr);
return 0;
}
bpf_probe_read_kernel(&refcnt, sizeof(refcnt), &acquired->rcu_users);
acquired = bpf_kptr_xchg(&local->task, acquired);
if (acquired) {
err = 8;
bpf_obj_drop(local); bpf_obj_drop(local);
bpf_task_release(kptr);
bpf_task_release(acquired);
return 0;
}
bpf_obj_drop(local);
bpf_probe_read_kernel(&refcnt_after_drop, sizeof(refcnt_after_drop), &kptr->rcu_users);
if (refcnt != refcnt_after_drop + 1) {
err = 9;
bpf_task_release(kptr);
return 0;
}
bpf_task_release(kptr); bpf_task_release(kptr);
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment