Commit 1b403ce7 authored by David Vernet's avatar David Vernet Committed by Alexei Starovoitov

bpf: Remove bpf_cpumask_kptr_get() kfunc

Now that struct bpf_cpumask is RCU safe, there's no need for this kfunc.
Rather than doing the following:

private(MASK) static struct bpf_cpumask __kptr *global;

int BPF_PROG(prog, s32 cpu, ...)
{
	struct bpf_cpumask *cpumask;

	bpf_rcu_read_lock();
	cpumask = bpf_cpumask_kptr_get(&global);
	if (!cpumask) {
		bpf_rcu_read_unlock();
		return -1;
	}
	bpf_cpumask_setall(cpumask);
	...
	bpf_cpumask_release(cpumask);
	bpf_rcu_read_unlock();
}

Programs can instead simply do (assume same global cpumask):

int BPF_PROG(prog, ...)
{
	struct bpf_cpumask *cpumask;

	bpf_rcu_read_lock();
	cpumask = global;
	if (!cpumask) {
		bpf_rcu_read_unlock();
		return -1;
	}
	bpf_cpumask_setall(cpumask);
	...
	bpf_rcu_read_unlock();
}

In other words, no extra atomic acquire / release, and less boilerplate
code.

This patch removes both the kfunc, as well as its selftests and
documentation.
Signed-off-by: default avatarDavid Vernet <void@manifault.com>
Link: https://lore.kernel.org/r/20230316054028.88924-5-void@manifault.comSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent a5a197df
...@@ -82,34 +82,6 @@ __bpf_kfunc struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) ...@@ -82,34 +82,6 @@ __bpf_kfunc struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask)
return cpumask; return cpumask;
} }
/**
* bpf_cpumask_kptr_get() - Attempt to acquire a reference to a BPF cpumask
* stored in a map.
* @cpumaskp: A pointer to a BPF cpumask map value.
*
* Attempts to acquire a reference to a BPF cpumask stored in a map value. The
* cpumask returned by this function must either be embedded in a map as a
* kptr, or freed with bpf_cpumask_release(). This function may return NULL if
* no BPF cpumask was found in the specified map value.
*/
__bpf_kfunc struct bpf_cpumask *bpf_cpumask_kptr_get(struct bpf_cpumask **cpumaskp)
{
struct bpf_cpumask *cpumask;
/* The BPF memory allocator frees memory backing its caches in an RCU
* callback. Thus, we can safely use RCU to ensure that the cpumask is
* safe to read.
*/
rcu_read_lock();
cpumask = READ_ONCE(*cpumaskp);
if (cpumask && !refcount_inc_not_zero(&cpumask->usage))
cpumask = NULL;
rcu_read_unlock();
return cpumask;
}
static void cpumask_free_cb(struct rcu_head *head) static void cpumask_free_cb(struct rcu_head *head)
{ {
struct bpf_cpumask *cpumask; struct bpf_cpumask *cpumask;
...@@ -435,7 +407,6 @@ BTF_SET8_START(cpumask_kfunc_btf_ids) ...@@ -435,7 +407,6 @@ BTF_SET8_START(cpumask_kfunc_btf_ids)
BTF_ID_FLAGS(func, bpf_cpumask_create, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_cpumask_create, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_cpumask_release, KF_RELEASE | KF_TRUSTED_ARGS) BTF_ID_FLAGS(func, bpf_cpumask_release, KF_RELEASE | KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_cpumask_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS) BTF_ID_FLAGS(func, bpf_cpumask_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_cpumask_kptr_get, KF_ACQUIRE | KF_KPTR_GET | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_cpumask_first, KF_RCU) BTF_ID_FLAGS(func, bpf_cpumask_first, KF_RCU)
BTF_ID_FLAGS(func, bpf_cpumask_first_zero, KF_RCU) BTF_ID_FLAGS(func, bpf_cpumask_first_zero, KF_RCU)
BTF_ID_FLAGS(func, bpf_cpumask_set_cpu, KF_RCU) BTF_ID_FLAGS(func, bpf_cpumask_set_cpu, KF_RCU)
......
...@@ -16,7 +16,6 @@ static const char * const cpumask_success_testcases[] = { ...@@ -16,7 +16,6 @@ static const char * const cpumask_success_testcases[] = {
"test_copy_any_anyand", "test_copy_any_anyand",
"test_insert_leave", "test_insert_leave",
"test_insert_remove_release", "test_insert_remove_release",
"test_insert_kptr_get_release",
"test_global_mask_rcu", "test_global_mask_rcu",
}; };
......
...@@ -26,7 +26,6 @@ struct array_map { ...@@ -26,7 +26,6 @@ struct array_map {
struct bpf_cpumask *bpf_cpumask_create(void) __ksym; struct bpf_cpumask *bpf_cpumask_create(void) __ksym;
void bpf_cpumask_release(struct bpf_cpumask *cpumask) __ksym; void bpf_cpumask_release(struct bpf_cpumask *cpumask) __ksym;
struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) __ksym; struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) __ksym;
struct bpf_cpumask *bpf_cpumask_kptr_get(struct bpf_cpumask **cpumask) __ksym;
u32 bpf_cpumask_first(const struct cpumask *cpumask) __ksym; u32 bpf_cpumask_first(const struct cpumask *cpumask) __ksym;
u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym; u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym;
void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym; void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
......
...@@ -94,30 +94,6 @@ int BPF_PROG(test_insert_remove_no_release, struct task_struct *task, u64 clone_ ...@@ -94,30 +94,6 @@ int BPF_PROG(test_insert_remove_no_release, struct task_struct *task, u64 clone_
return 0; return 0;
} }
SEC("tp_btf/task_newtask")
__failure __msg("Unreleased reference")
int BPF_PROG(test_kptr_get_no_release, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *cpumask;
struct __cpumask_map_value *v;
cpumask = create_cpumask();
if (!cpumask)
return 0;
if (cpumask_map_insert(cpumask))
return 0;
v = cpumask_map_value_lookup();
if (!v)
return 0;
cpumask = bpf_cpumask_kptr_get(&v->cpumask);
/* cpumask is never released. */
return 0;
}
SEC("tp_btf/task_newtask") SEC("tp_btf/task_newtask")
__failure __msg("NULL pointer passed to trusted arg0") __failure __msg("NULL pointer passed to trusted arg0")
int BPF_PROG(test_cpumask_null, struct task_struct *task, u64 clone_flags) int BPF_PROG(test_cpumask_null, struct task_struct *task, u64 clone_flags)
......
...@@ -394,36 +394,6 @@ int BPF_PROG(test_insert_remove_release, struct task_struct *task, u64 clone_fla ...@@ -394,36 +394,6 @@ int BPF_PROG(test_insert_remove_release, struct task_struct *task, u64 clone_fla
return 0; return 0;
} }
SEC("tp_btf/task_newtask")
int BPF_PROG(test_insert_kptr_get_release, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *cpumask;
struct __cpumask_map_value *v;
cpumask = create_cpumask();
if (!cpumask)
return 0;
if (cpumask_map_insert(cpumask)) {
err = 3;
return 0;
}
v = cpumask_map_value_lookup();
if (!v) {
err = 4;
return 0;
}
cpumask = bpf_cpumask_kptr_get(&v->cpumask);
if (cpumask)
bpf_cpumask_release(cpumask);
else
err = 5;
return 0;
}
SEC("tp_btf/task_newtask") SEC("tp_btf/task_newtask")
int BPF_PROG(test_global_mask_rcu, struct task_struct *task, u64 clone_flags) int BPF_PROG(test_global_mask_rcu, struct task_struct *task, u64 clone_flags)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment