Commit 6adf82a4 authored by Yonghong Song's avatar Yonghong Song Committed by Alexei Starovoitov

selftests/bpf: Add tests for array map with local percpu kptr

Add non-sleepable and sleepable tests with percpu kptr. For
non-sleepable test, four programs are executed in the order of:
  1. allocate percpu data.
  2. assign values to percpu data.
  3. retrieve percpu data.
  4. de-allocate percpu data.

The sleepable prog tried to exercise all above 4 steps in a
single prog. Also for sleepable prog, rcu_read_lock is needed
to protect direct percpu ptr access (from map value) and
following bpf_this_cpu_ptr() and bpf_per_cpu_ptr() helpers.
Signed-off-by: default avatarYonghong Song <yonghong.song@linux.dev>
Link: https://lore.kernel.org/r/20230827152811.2000125-1-yonghong.song@linux.devSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 968c76cb
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "percpu_alloc_array.skel.h"
static void test_array(void)
{
struct percpu_alloc_array *skel;
int err, prog_fd;
LIBBPF_OPTS(bpf_test_run_opts, topts);
skel = percpu_alloc_array__open();
if (!ASSERT_OK_PTR(skel, "percpu_alloc_array__open"))
return;
bpf_program__set_autoload(skel->progs.test_array_map_1, true);
bpf_program__set_autoload(skel->progs.test_array_map_2, true);
bpf_program__set_autoload(skel->progs.test_array_map_3, true);
bpf_program__set_autoload(skel->progs.test_array_map_4, true);
skel->rodata->nr_cpus = libbpf_num_possible_cpus();
err = percpu_alloc_array__load(skel);
if (!ASSERT_OK(err, "percpu_alloc_array__load"))
goto out;
err = percpu_alloc_array__attach(skel);
if (!ASSERT_OK(err, "percpu_alloc_array__attach"))
goto out;
prog_fd = bpf_program__fd(skel->progs.test_array_map_1);
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "test_run array_map 1-4");
ASSERT_EQ(topts.retval, 0, "test_run array_map 1-4");
ASSERT_EQ(skel->bss->cpu0_field_d, 2, "cpu0_field_d");
ASSERT_EQ(skel->bss->sum_field_c, 1, "sum_field_c");
out:
percpu_alloc_array__destroy(skel);
}
static void test_array_sleepable(void)
{
struct percpu_alloc_array *skel;
int err, prog_fd;
LIBBPF_OPTS(bpf_test_run_opts, topts);
skel = percpu_alloc_array__open();
if (!ASSERT_OK_PTR(skel, "percpu_alloc__open"))
return;
bpf_program__set_autoload(skel->progs.test_array_map_10, true);
skel->rodata->nr_cpus = libbpf_num_possible_cpus();
err = percpu_alloc_array__load(skel);
if (!ASSERT_OK(err, "percpu_alloc_array__load"))
goto out;
err = percpu_alloc_array__attach(skel);
if (!ASSERT_OK(err, "percpu_alloc_array__attach"))
goto out;
prog_fd = bpf_program__fd(skel->progs.test_array_map_10);
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "test_run array_map_10");
ASSERT_EQ(topts.retval, 0, "test_run array_map_10");
ASSERT_EQ(skel->bss->cpu0_field_d, 2, "cpu0_field_d");
ASSERT_EQ(skel->bss->sum_field_c, 1, "sum_field_c");
out:
percpu_alloc_array__destroy(skel);
}
void test_percpu_alloc(void)
{
if (test__start_subtest("array"))
test_array();
if (test__start_subtest("array_sleepable"))
test_array_sleepable();
}
#include "bpf_experimental.h"
struct val_t {
long b, c, d;
};
struct elem {
long sum;
struct val_t __percpu_kptr *pc;
};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, struct elem);
} array SEC(".maps");
void bpf_rcu_read_lock(void) __ksym;
void bpf_rcu_read_unlock(void) __ksym;
const volatile int nr_cpus;
/* Initialize the percpu object */
SEC("?fentry/bpf_fentry_test1")
int BPF_PROG(test_array_map_1)
{
struct val_t __percpu_kptr *p;
struct elem *e;
int index = 0;
e = bpf_map_lookup_elem(&array, &index);
if (!e)
return 0;
p = bpf_percpu_obj_new(struct val_t);
if (!p)
return 0;
p = bpf_kptr_xchg(&e->pc, p);
if (p)
bpf_percpu_obj_drop(p);
return 0;
}
/* Update percpu data */
SEC("?fentry/bpf_fentry_test2")
int BPF_PROG(test_array_map_2)
{
struct val_t __percpu_kptr *p;
struct val_t *v;
struct elem *e;
int index = 0;
e = bpf_map_lookup_elem(&array, &index);
if (!e)
return 0;
p = e->pc;
if (!p)
return 0;
v = bpf_per_cpu_ptr(p, 0);
if (!v)
return 0;
v->c = 1;
v->d = 2;
return 0;
}
int cpu0_field_d, sum_field_c;
/* Summarize percpu data */
SEC("?fentry/bpf_fentry_test3")
int BPF_PROG(test_array_map_3)
{
struct val_t __percpu_kptr *p;
int i, index = 0;
struct val_t *v;
struct elem *e;
e = bpf_map_lookup_elem(&array, &index);
if (!e)
return 0;
p = e->pc;
if (!p)
return 0;
bpf_for(i, 0, nr_cpus) {
v = bpf_per_cpu_ptr(p, i);
if (v) {
if (i == 0)
cpu0_field_d = v->d;
sum_field_c += v->c;
}
}
return 0;
}
/* Explicitly free allocated percpu data */
SEC("?fentry/bpf_fentry_test4")
int BPF_PROG(test_array_map_4)
{
struct val_t __percpu_kptr *p;
struct elem *e;
int index = 0;
e = bpf_map_lookup_elem(&array, &index);
if (!e)
return 0;
/* delete */
p = bpf_kptr_xchg(&e->pc, NULL);
if (p) {
bpf_percpu_obj_drop(p);
}
return 0;
}
SEC("?fentry.s/bpf_fentry_test1")
int BPF_PROG(test_array_map_10)
{
struct val_t __percpu_kptr *p, *p1;
int i, index = 0;
struct val_t *v;
struct elem *e;
e = bpf_map_lookup_elem(&array, &index);
if (!e)
return 0;
bpf_rcu_read_lock();
p = e->pc;
if (!p) {
p = bpf_percpu_obj_new(struct val_t);
if (!p)
goto out;
p1 = bpf_kptr_xchg(&e->pc, p);
if (p1) {
/* race condition */
bpf_percpu_obj_drop(p1);
}
p = e->pc;
if (!p)
goto out;
}
v = bpf_this_cpu_ptr(p);
v->c = 3;
v = bpf_this_cpu_ptr(p);
v->c = 0;
v = bpf_per_cpu_ptr(p, 0);
if (!v)
goto out;
v->c = 1;
v->d = 2;
/* delete */
p1 = bpf_kptr_xchg(&e->pc, NULL);
if (!p1)
goto out;
bpf_for(i, 0, nr_cpus) {
v = bpf_per_cpu_ptr(p, i);
if (v) {
if (i == 0)
cpu0_field_d = v->d;
sum_field_c += v->c;
}
}
/* finally release p */
bpf_percpu_obj_drop(p1);
out:
bpf_rcu_read_unlock();
return 0;
}
char _license[] SEC("license") = "GPL";
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment