Commit b2531d4b authored by Andrii Nakryiko's avatar Andrii Nakryiko Committed by Daniel Borkmann

selftests/bpf: Convert some selftests to high-level BPF map APIs

Convert a bunch of selftests to using newly added high-level BPF map
APIs.

This change exposed that map_kptr selftests allocated too big buffer,
which is fixed in this patch as well.
Signed-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20220512220713.2617964-2-andrii@kernel.org
parent 737d0646
...@@ -167,7 +167,7 @@ void test_core_autosize(void) ...@@ -167,7 +167,7 @@ void test_core_autosize(void)
if (!ASSERT_OK_PTR(bss_map, "bss_map_find")) if (!ASSERT_OK_PTR(bss_map, "bss_map_find"))
goto cleanup; goto cleanup;
err = bpf_map_lookup_elem(bpf_map__fd(bss_map), &zero, (void *)&out); err = bpf_map__lookup_elem(bss_map, &zero, sizeof(zero), &out, sizeof(out), 0);
if (!ASSERT_OK(err, "bss_lookup")) if (!ASSERT_OK(err, "bss_lookup"))
goto cleanup; goto cleanup;
......
...@@ -6,31 +6,32 @@ ...@@ -6,31 +6,32 @@
void test_core_retro(void) void test_core_retro(void)
{ {
int err, zero = 0, res, duration = 0, my_pid = getpid(); int err, zero = 0, res, my_pid = getpid();
struct test_core_retro *skel; struct test_core_retro *skel;
/* load program */ /* load program */
skel = test_core_retro__open_and_load(); skel = test_core_retro__open_and_load();
if (CHECK(!skel, "skel_load", "skeleton open/load failed\n")) if (!ASSERT_OK_PTR(skel, "skel_load"))
goto out_close; goto out_close;
err = bpf_map_update_elem(bpf_map__fd(skel->maps.exp_tgid_map), &zero, &my_pid, 0); err = bpf_map__update_elem(skel->maps.exp_tgid_map, &zero, sizeof(zero),
if (CHECK(err, "map_update", "failed to set expected PID: %d\n", errno)) &my_pid, sizeof(my_pid), 0);
if (!ASSERT_OK(err, "map_update"))
goto out_close; goto out_close;
/* attach probe */ /* attach probe */
err = test_core_retro__attach(skel); err = test_core_retro__attach(skel);
if (CHECK(err, "attach_kprobe", "err %d\n", err)) if (!ASSERT_OK(err, "attach_kprobe"))
goto out_close; goto out_close;
/* trigger */ /* trigger */
usleep(1); usleep(1);
err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.results), &zero, &res); err = bpf_map__lookup_elem(skel->maps.results, &zero, sizeof(zero), &res, sizeof(res), 0);
if (CHECK(err, "map_lookup", "failed to lookup result: %d\n", errno)) if (!ASSERT_OK(err, "map_lookup"))
goto out_close; goto out_close;
CHECK(res != my_pid, "pid_check", "got %d != exp %d\n", res, my_pid); ASSERT_EQ(res, my_pid, "pid_check");
out_close: out_close:
test_core_retro__destroy(skel); test_core_retro__destroy(skel);
......
...@@ -10,9 +10,10 @@ static unsigned int duration; ...@@ -10,9 +10,10 @@ static unsigned int duration;
static void test_hash_map(void) static void test_hash_map(void)
{ {
int i, err, hashmap_fd, max_entries, percpu_map_fd; int i, err, max_entries;
struct for_each_hash_map_elem *skel; struct for_each_hash_map_elem *skel;
__u64 *percpu_valbuf = NULL; __u64 *percpu_valbuf = NULL;
size_t percpu_val_sz;
__u32 key, num_cpus; __u32 key, num_cpus;
__u64 val; __u64 val;
LIBBPF_OPTS(bpf_test_run_opts, topts, LIBBPF_OPTS(bpf_test_run_opts, topts,
...@@ -25,26 +26,27 @@ static void test_hash_map(void) ...@@ -25,26 +26,27 @@ static void test_hash_map(void)
if (!ASSERT_OK_PTR(skel, "for_each_hash_map_elem__open_and_load")) if (!ASSERT_OK_PTR(skel, "for_each_hash_map_elem__open_and_load"))
return; return;
hashmap_fd = bpf_map__fd(skel->maps.hashmap);
max_entries = bpf_map__max_entries(skel->maps.hashmap); max_entries = bpf_map__max_entries(skel->maps.hashmap);
for (i = 0; i < max_entries; i++) { for (i = 0; i < max_entries; i++) {
key = i; key = i;
val = i + 1; val = i + 1;
err = bpf_map_update_elem(hashmap_fd, &key, &val, BPF_ANY); err = bpf_map__update_elem(skel->maps.hashmap, &key, sizeof(key),
&val, sizeof(val), BPF_ANY);
if (!ASSERT_OK(err, "map_update")) if (!ASSERT_OK(err, "map_update"))
goto out; goto out;
} }
num_cpus = bpf_num_possible_cpus(); num_cpus = bpf_num_possible_cpus();
percpu_map_fd = bpf_map__fd(skel->maps.percpu_map); percpu_val_sz = sizeof(__u64) * num_cpus;
percpu_valbuf = malloc(sizeof(__u64) * num_cpus); percpu_valbuf = malloc(percpu_val_sz);
if (!ASSERT_OK_PTR(percpu_valbuf, "percpu_valbuf")) if (!ASSERT_OK_PTR(percpu_valbuf, "percpu_valbuf"))
goto out; goto out;
key = 1; key = 1;
for (i = 0; i < num_cpus; i++) for (i = 0; i < num_cpus; i++)
percpu_valbuf[i] = i + 1; percpu_valbuf[i] = i + 1;
err = bpf_map_update_elem(percpu_map_fd, &key, percpu_valbuf, BPF_ANY); err = bpf_map__update_elem(skel->maps.percpu_map, &key, sizeof(key),
percpu_valbuf, percpu_val_sz, BPF_ANY);
if (!ASSERT_OK(err, "percpu_map_update")) if (!ASSERT_OK(err, "percpu_map_update"))
goto out; goto out;
...@@ -58,7 +60,7 @@ static void test_hash_map(void) ...@@ -58,7 +60,7 @@ static void test_hash_map(void)
ASSERT_EQ(skel->bss->hashmap_elems, max_entries, "hashmap_elems"); ASSERT_EQ(skel->bss->hashmap_elems, max_entries, "hashmap_elems");
key = 1; key = 1;
err = bpf_map_lookup_elem(hashmap_fd, &key, &val); err = bpf_map__lookup_elem(skel->maps.hashmap, &key, sizeof(key), &val, sizeof(val), 0);
ASSERT_ERR(err, "hashmap_lookup"); ASSERT_ERR(err, "hashmap_lookup");
ASSERT_EQ(skel->bss->percpu_called, 1, "percpu_called"); ASSERT_EQ(skel->bss->percpu_called, 1, "percpu_called");
...@@ -75,9 +77,10 @@ static void test_hash_map(void) ...@@ -75,9 +77,10 @@ static void test_hash_map(void)
static void test_array_map(void) static void test_array_map(void)
{ {
__u32 key, num_cpus, max_entries; __u32 key, num_cpus, max_entries;
int i, arraymap_fd, percpu_map_fd, err; int i, err;
struct for_each_array_map_elem *skel; struct for_each_array_map_elem *skel;
__u64 *percpu_valbuf = NULL; __u64 *percpu_valbuf = NULL;
size_t percpu_val_sz;
__u64 val, expected_total; __u64 val, expected_total;
LIBBPF_OPTS(bpf_test_run_opts, topts, LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4, .data_in = &pkt_v4,
...@@ -89,7 +92,6 @@ static void test_array_map(void) ...@@ -89,7 +92,6 @@ static void test_array_map(void)
if (!ASSERT_OK_PTR(skel, "for_each_array_map_elem__open_and_load")) if (!ASSERT_OK_PTR(skel, "for_each_array_map_elem__open_and_load"))
return; return;
arraymap_fd = bpf_map__fd(skel->maps.arraymap);
expected_total = 0; expected_total = 0;
max_entries = bpf_map__max_entries(skel->maps.arraymap); max_entries = bpf_map__max_entries(skel->maps.arraymap);
for (i = 0; i < max_entries; i++) { for (i = 0; i < max_entries; i++) {
...@@ -98,21 +100,23 @@ static void test_array_map(void) ...@@ -98,21 +100,23 @@ static void test_array_map(void)
/* skip the last iteration for expected total */ /* skip the last iteration for expected total */
if (i != max_entries - 1) if (i != max_entries - 1)
expected_total += val; expected_total += val;
err = bpf_map_update_elem(arraymap_fd, &key, &val, BPF_ANY); err = bpf_map__update_elem(skel->maps.arraymap, &key, sizeof(key),
&val, sizeof(val), BPF_ANY);
if (!ASSERT_OK(err, "map_update")) if (!ASSERT_OK(err, "map_update"))
goto out; goto out;
} }
num_cpus = bpf_num_possible_cpus(); num_cpus = bpf_num_possible_cpus();
percpu_map_fd = bpf_map__fd(skel->maps.percpu_map); percpu_val_sz = sizeof(__u64) * num_cpus;
percpu_valbuf = malloc(sizeof(__u64) * num_cpus); percpu_valbuf = malloc(percpu_val_sz);
if (!ASSERT_OK_PTR(percpu_valbuf, "percpu_valbuf")) if (!ASSERT_OK_PTR(percpu_valbuf, "percpu_valbuf"))
goto out; goto out;
key = 0; key = 0;
for (i = 0; i < num_cpus; i++) for (i = 0; i < num_cpus; i++)
percpu_valbuf[i] = i + 1; percpu_valbuf[i] = i + 1;
err = bpf_map_update_elem(percpu_map_fd, &key, percpu_valbuf, BPF_ANY); err = bpf_map__update_elem(skel->maps.percpu_map, &key, sizeof(key),
percpu_valbuf, percpu_val_sz, BPF_ANY);
if (!ASSERT_OK(err, "percpu_map_update")) if (!ASSERT_OK(err, "percpu_map_update"))
goto out; goto out;
......
...@@ -112,7 +112,8 @@ static void test_lookup_and_delete_hash(void) ...@@ -112,7 +112,8 @@ static void test_lookup_and_delete_hash(void)
/* Lookup and delete element. */ /* Lookup and delete element. */
key = 1; key = 1;
err = bpf_map_lookup_and_delete_elem(map_fd, &key, &value); err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map,
&key, sizeof(key), &value, sizeof(value), 0);
if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem")) if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"))
goto cleanup; goto cleanup;
...@@ -147,7 +148,8 @@ static void test_lookup_and_delete_percpu_hash(void) ...@@ -147,7 +148,8 @@ static void test_lookup_and_delete_percpu_hash(void)
/* Lookup and delete element. */ /* Lookup and delete element. */
key = 1; key = 1;
err = bpf_map_lookup_and_delete_elem(map_fd, &key, value); err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map,
&key, sizeof(key), value, sizeof(value), 0);
if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem")) if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"))
goto cleanup; goto cleanup;
...@@ -191,7 +193,8 @@ static void test_lookup_and_delete_lru_hash(void) ...@@ -191,7 +193,8 @@ static void test_lookup_and_delete_lru_hash(void)
goto cleanup; goto cleanup;
/* Lookup and delete element 3. */ /* Lookup and delete element 3. */
err = bpf_map_lookup_and_delete_elem(map_fd, &key, &value); err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map,
&key, sizeof(key), &value, sizeof(value), 0);
if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem")) if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"))
goto cleanup; goto cleanup;
...@@ -240,10 +243,10 @@ static void test_lookup_and_delete_lru_percpu_hash(void) ...@@ -240,10 +243,10 @@ static void test_lookup_and_delete_lru_percpu_hash(void)
value[i] = 0; value[i] = 0;
/* Lookup and delete element 3. */ /* Lookup and delete element 3. */
err = bpf_map_lookup_and_delete_elem(map_fd, &key, value); err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map,
if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem")) { &key, sizeof(key), value, sizeof(value), 0);
if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"))
goto cleanup; goto cleanup;
}
/* Check if only one CPU has set the value. */ /* Check if only one CPU has set the value. */
for (i = 0; i < nr_cpus; i++) { for (i = 0; i < nr_cpus; i++) {
......
...@@ -91,7 +91,7 @@ static void test_map_kptr_success(bool test_run) ...@@ -91,7 +91,7 @@ static void test_map_kptr_success(bool test_run)
); );
struct map_kptr *skel; struct map_kptr *skel;
int key = 0, ret; int key = 0, ret;
char buf[24]; char buf[16];
skel = map_kptr__open_and_load(); skel = map_kptr__open_and_load();
if (!ASSERT_OK_PTR(skel, "map_kptr__open_and_load")) if (!ASSERT_OK_PTR(skel, "map_kptr__open_and_load"))
...@@ -107,24 +107,29 @@ static void test_map_kptr_success(bool test_run) ...@@ -107,24 +107,29 @@ static void test_map_kptr_success(bool test_run)
if (test_run) if (test_run)
return; return;
ret = bpf_map_update_elem(bpf_map__fd(skel->maps.array_map), &key, buf, 0); ret = bpf_map__update_elem(skel->maps.array_map,
&key, sizeof(key), buf, sizeof(buf), 0);
ASSERT_OK(ret, "array_map update"); ASSERT_OK(ret, "array_map update");
ret = bpf_map_update_elem(bpf_map__fd(skel->maps.array_map), &key, buf, 0); ret = bpf_map__update_elem(skel->maps.array_map,
&key, sizeof(key), buf, sizeof(buf), 0);
ASSERT_OK(ret, "array_map update2"); ASSERT_OK(ret, "array_map update2");
ret = bpf_map_update_elem(bpf_map__fd(skel->maps.hash_map), &key, buf, 0); ret = bpf_map__update_elem(skel->maps.hash_map,
&key, sizeof(key), buf, sizeof(buf), 0);
ASSERT_OK(ret, "hash_map update"); ASSERT_OK(ret, "hash_map update");
ret = bpf_map_delete_elem(bpf_map__fd(skel->maps.hash_map), &key); ret = bpf_map__delete_elem(skel->maps.hash_map, &key, sizeof(key), 0);
ASSERT_OK(ret, "hash_map delete"); ASSERT_OK(ret, "hash_map delete");
ret = bpf_map_update_elem(bpf_map__fd(skel->maps.hash_malloc_map), &key, buf, 0); ret = bpf_map__update_elem(skel->maps.hash_malloc_map,
&key, sizeof(key), buf, sizeof(buf), 0);
ASSERT_OK(ret, "hash_malloc_map update"); ASSERT_OK(ret, "hash_malloc_map update");
ret = bpf_map_delete_elem(bpf_map__fd(skel->maps.hash_malloc_map), &key); ret = bpf_map__delete_elem(skel->maps.hash_malloc_map, &key, sizeof(key), 0);
ASSERT_OK(ret, "hash_malloc_map delete"); ASSERT_OK(ret, "hash_malloc_map delete");
ret = bpf_map_update_elem(bpf_map__fd(skel->maps.lru_hash_map), &key, buf, 0); ret = bpf_map__update_elem(skel->maps.lru_hash_map,
&key, sizeof(key), buf, sizeof(buf), 0);
ASSERT_OK(ret, "lru_hash_map update"); ASSERT_OK(ret, "lru_hash_map update");
ret = bpf_map_delete_elem(bpf_map__fd(skel->maps.lru_hash_map), &key); ret = bpf_map__delete_elem(skel->maps.lru_hash_map, &key, sizeof(key), 0);
ASSERT_OK(ret, "lru_hash_map delete"); ASSERT_OK(ret, "lru_hash_map delete");
map_kptr__destroy(skel); map_kptr__destroy(skel);
......
...@@ -8,7 +8,7 @@ void test_stacktrace_build_id(void) ...@@ -8,7 +8,7 @@ void test_stacktrace_build_id(void)
int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd; int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
struct test_stacktrace_build_id *skel; struct test_stacktrace_build_id *skel;
int err, stack_trace_len; int err, stack_trace_len;
__u32 key, previous_key, val, duration = 0; __u32 key, prev_key, val, duration = 0;
char buf[256]; char buf[256];
int i, j; int i, j;
struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
...@@ -58,7 +58,7 @@ void test_stacktrace_build_id(void) ...@@ -58,7 +58,7 @@ void test_stacktrace_build_id(void)
"err %d errno %d\n", err, errno)) "err %d errno %d\n", err, errno))
goto cleanup; goto cleanup;
err = bpf_map_get_next_key(stackmap_fd, NULL, &key); err = bpf_map__get_next_key(skel->maps.stackmap, NULL, &key, sizeof(key));
if (CHECK(err, "get_next_key from stackmap", if (CHECK(err, "get_next_key from stackmap",
"err %d, errno %d\n", err, errno)) "err %d, errno %d\n", err, errno))
goto cleanup; goto cleanup;
...@@ -79,8 +79,8 @@ void test_stacktrace_build_id(void) ...@@ -79,8 +79,8 @@ void test_stacktrace_build_id(void)
if (strstr(buf, build_id) != NULL) if (strstr(buf, build_id) != NULL)
build_id_matches = 1; build_id_matches = 1;
} }
previous_key = key; prev_key = key;
} while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); } while (bpf_map__get_next_key(skel->maps.stackmap, &prev_key, &key, sizeof(key)) == 0);
/* stack_map_get_build_id_offset() is racy and sometimes can return /* stack_map_get_build_id_offset() is racy and sometimes can return
* BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID; * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
......
...@@ -27,7 +27,7 @@ void test_stacktrace_build_id_nmi(void) ...@@ -27,7 +27,7 @@ void test_stacktrace_build_id_nmi(void)
.type = PERF_TYPE_HARDWARE, .type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES, .config = PERF_COUNT_HW_CPU_CYCLES,
}; };
__u32 key, previous_key, val, duration = 0; __u32 key, prev_key, val, duration = 0;
char buf[256]; char buf[256];
int i, j; int i, j;
struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
...@@ -100,7 +100,7 @@ void test_stacktrace_build_id_nmi(void) ...@@ -100,7 +100,7 @@ void test_stacktrace_build_id_nmi(void)
"err %d errno %d\n", err, errno)) "err %d errno %d\n", err, errno))
goto cleanup; goto cleanup;
err = bpf_map_get_next_key(stackmap_fd, NULL, &key); err = bpf_map__get_next_key(skel->maps.stackmap, NULL, &key, sizeof(key));
if (CHECK(err, "get_next_key from stackmap", if (CHECK(err, "get_next_key from stackmap",
"err %d, errno %d\n", err, errno)) "err %d, errno %d\n", err, errno))
goto cleanup; goto cleanup;
...@@ -108,7 +108,8 @@ void test_stacktrace_build_id_nmi(void) ...@@ -108,7 +108,8 @@ void test_stacktrace_build_id_nmi(void)
do { do {
char build_id[64]; char build_id[64];
err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs); err = bpf_map__lookup_elem(skel->maps.stackmap, &key, sizeof(key),
id_offs, sizeof(id_offs), 0);
if (CHECK(err, "lookup_elem from stackmap", if (CHECK(err, "lookup_elem from stackmap",
"err %d, errno %d\n", err, errno)) "err %d, errno %d\n", err, errno))
goto cleanup; goto cleanup;
...@@ -121,8 +122,8 @@ void test_stacktrace_build_id_nmi(void) ...@@ -121,8 +122,8 @@ void test_stacktrace_build_id_nmi(void)
if (strstr(buf, build_id) != NULL) if (strstr(buf, build_id) != NULL)
build_id_matches = 1; build_id_matches = 1;
} }
previous_key = key; prev_key = key;
} while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); } while (bpf_map__get_next_key(skel->maps.stackmap, &prev_key, &key, sizeof(key)) == 0);
/* stack_map_get_build_id_offset() is racy and sometimes can return /* stack_map_get_build_id_offset() is racy and sometimes can return
* BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID; * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
......
...@@ -35,7 +35,7 @@ static int timer_mim(struct timer_mim *timer_skel) ...@@ -35,7 +35,7 @@ static int timer_mim(struct timer_mim *timer_skel)
ASSERT_EQ(timer_skel->bss->ok, 1 | 2, "ok"); ASSERT_EQ(timer_skel->bss->ok, 1 | 2, "ok");
close(bpf_map__fd(timer_skel->maps.inner_htab)); close(bpf_map__fd(timer_skel->maps.inner_htab));
err = bpf_map_delete_elem(bpf_map__fd(timer_skel->maps.outer_arr), &key1); err = bpf_map__delete_elem(timer_skel->maps.outer_arr, &key1, sizeof(key1), 0);
ASSERT_EQ(err, 0, "delete inner map"); ASSERT_EQ(err, 0, "delete inner map");
/* check that timer_cb[12] are no longer running */ /* check that timer_cb[12] are no longer running */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment