Commit 39316183 authored by Delyan Kratunov's avatar Delyan Kratunov Committed by Andrii Nakryiko

selftests/bpf: Migrate from bpf_prog_test_run_xattr

bpf_prog_test_run_xattr is being deprecated in favor of the OPTS-based
bpf_prog_test_run_opts.
We end up unable to use CHECK_ATTR so replace usages with ASSERT_* calls.
Also, prog_run_xattr is now prog_run_opts.
Signed-off-by: default avatarDelyan Kratunov <delyank@fb.com>
Signed-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20220202235423.1097270-3-delyank@fb.com
parent 04fcb5f9
...@@ -79,28 +79,21 @@ static void test_check_mtu_run_xdp(struct test_check_mtu *skel, ...@@ -79,28 +79,21 @@ static void test_check_mtu_run_xdp(struct test_check_mtu *skel,
struct bpf_program *prog, struct bpf_program *prog,
__u32 mtu_expect) __u32 mtu_expect)
{ {
const char *prog_name = bpf_program__name(prog);
int retval_expect = XDP_PASS; int retval_expect = XDP_PASS;
__u32 mtu_result = 0; __u32 mtu_result = 0;
char buf[256] = {}; char buf[256] = {};
int err; int err, prog_fd = bpf_program__fd(prog);
struct bpf_prog_test_run_attr tattr = { LIBBPF_OPTS(bpf_test_run_opts, topts,
.repeat = 1, .repeat = 1,
.data_in = &pkt_v4, .data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4), .data_size_in = sizeof(pkt_v4),
.data_out = buf, .data_out = buf,
.data_size_out = sizeof(buf), .data_size_out = sizeof(buf),
.prog_fd = bpf_program__fd(prog), );
};
err = bpf_prog_test_run_xattr(&tattr);
CHECK_ATTR(err != 0, "bpf_prog_test_run",
"prog_name:%s (err %d errno %d retval %d)\n",
prog_name, err, errno, tattr.retval);
CHECK(tattr.retval != retval_expect, "retval", err = bpf_prog_test_run_opts(prog_fd, &topts);
"progname:%s unexpected retval=%d expected=%d\n", ASSERT_OK(err, "test_run");
prog_name, tattr.retval, retval_expect); ASSERT_EQ(topts.retval, retval_expect, "retval");
/* Extract MTU that BPF-prog got */ /* Extract MTU that BPF-prog got */
mtu_result = skel->bss->global_bpf_mtu_xdp; mtu_result = skel->bss->global_bpf_mtu_xdp;
...@@ -139,28 +132,21 @@ static void test_check_mtu_run_tc(struct test_check_mtu *skel, ...@@ -139,28 +132,21 @@ static void test_check_mtu_run_tc(struct test_check_mtu *skel,
struct bpf_program *prog, struct bpf_program *prog,
__u32 mtu_expect) __u32 mtu_expect)
{ {
const char *prog_name = bpf_program__name(prog);
int retval_expect = BPF_OK; int retval_expect = BPF_OK;
__u32 mtu_result = 0; __u32 mtu_result = 0;
char buf[256] = {}; char buf[256] = {};
int err; int err, prog_fd = bpf_program__fd(prog);
struct bpf_prog_test_run_attr tattr = { LIBBPF_OPTS(bpf_test_run_opts, topts,
.repeat = 1,
.data_in = &pkt_v4, .data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4), .data_size_in = sizeof(pkt_v4),
.data_out = buf, .data_out = buf,
.data_size_out = sizeof(buf), .data_size_out = sizeof(buf),
.prog_fd = bpf_program__fd(prog), .repeat = 1,
}; );
err = bpf_prog_test_run_xattr(&tattr);
CHECK_ATTR(err != 0, "bpf_prog_test_run",
"prog_name:%s (err %d errno %d retval %d)\n",
prog_name, err, errno, tattr.retval);
CHECK(tattr.retval != retval_expect, "retval", err = bpf_prog_test_run_opts(prog_fd, &topts);
"progname:%s unexpected retval=%d expected=%d\n", ASSERT_OK(err, "test_run");
prog_name, tattr.retval, retval_expect); ASSERT_EQ(topts.retval, retval_expect, "retval");
/* Extract MTU that BPF-prog got */ /* Extract MTU that BPF-prog got */
mtu_result = skel->bss->global_bpf_mtu_tc; mtu_result = skel->bss->global_bpf_mtu_tc;
......
...@@ -161,7 +161,7 @@ static socklen_t prepare_addr(struct sockaddr_storage *addr, int family) ...@@ -161,7 +161,7 @@ static socklen_t prepare_addr(struct sockaddr_storage *addr, int family)
} }
} }
static bool was_decapsulated(struct bpf_prog_test_run_attr *tattr) static bool was_decapsulated(struct bpf_test_run_opts *tattr)
{ {
return tattr->data_size_out < tattr->data_size_in; return tattr->data_size_out < tattr->data_size_in;
} }
...@@ -367,12 +367,12 @@ static void close_fds(int *fds, int n) ...@@ -367,12 +367,12 @@ static void close_fds(int *fds, int n)
static void test_cls_redirect_common(struct bpf_program *prog) static void test_cls_redirect_common(struct bpf_program *prog)
{ {
struct bpf_prog_test_run_attr tattr = {}; LIBBPF_OPTS(bpf_test_run_opts, tattr);
int families[] = { AF_INET, AF_INET6 }; int families[] = { AF_INET, AF_INET6 };
struct sockaddr_storage ss; struct sockaddr_storage ss;
struct sockaddr *addr; struct sockaddr *addr;
socklen_t slen; socklen_t slen;
int i, j, err; int i, j, err, prog_fd;
int servers[__NR_KIND][ARRAY_SIZE(families)] = {}; int servers[__NR_KIND][ARRAY_SIZE(families)] = {};
int conns[__NR_KIND][ARRAY_SIZE(families)] = {}; int conns[__NR_KIND][ARRAY_SIZE(families)] = {};
struct tuple tuples[__NR_KIND][ARRAY_SIZE(families)]; struct tuple tuples[__NR_KIND][ARRAY_SIZE(families)];
...@@ -394,7 +394,7 @@ static void test_cls_redirect_common(struct bpf_program *prog) ...@@ -394,7 +394,7 @@ static void test_cls_redirect_common(struct bpf_program *prog)
goto cleanup; goto cleanup;
} }
tattr.prog_fd = bpf_program__fd(prog); prog_fd = bpf_program__fd(prog);
for (i = 0; i < ARRAY_SIZE(tests); i++) { for (i = 0; i < ARRAY_SIZE(tests); i++) {
struct test_cfg *test = &tests[i]; struct test_cfg *test = &tests[i];
...@@ -415,7 +415,7 @@ static void test_cls_redirect_common(struct bpf_program *prog) ...@@ -415,7 +415,7 @@ static void test_cls_redirect_common(struct bpf_program *prog)
if (CHECK_FAIL(!tattr.data_size_in)) if (CHECK_FAIL(!tattr.data_size_in))
continue; continue;
err = bpf_prog_test_run_xattr(&tattr); err = bpf_prog_test_run_opts(prog_fd, &tattr);
if (CHECK_FAIL(err)) if (CHECK_FAIL(err))
continue; continue;
......
...@@ -26,10 +26,10 @@ static void test_dummy_st_ops_attach(void) ...@@ -26,10 +26,10 @@ static void test_dummy_st_ops_attach(void)
static void test_dummy_init_ret_value(void) static void test_dummy_init_ret_value(void)
{ {
__u64 args[1] = {0}; __u64 args[1] = {0};
struct bpf_prog_test_run_attr attr = { LIBBPF_OPTS(bpf_test_run_opts, attr,
.ctx_size_in = sizeof(args),
.ctx_in = args, .ctx_in = args,
}; .ctx_size_in = sizeof(args),
);
struct dummy_st_ops *skel; struct dummy_st_ops *skel;
int fd, err; int fd, err;
...@@ -38,8 +38,7 @@ static void test_dummy_init_ret_value(void) ...@@ -38,8 +38,7 @@ static void test_dummy_init_ret_value(void)
return; return;
fd = bpf_program__fd(skel->progs.test_1); fd = bpf_program__fd(skel->progs.test_1);
attr.prog_fd = fd; err = bpf_prog_test_run_opts(fd, &attr);
err = bpf_prog_test_run_xattr(&attr);
ASSERT_OK(err, "test_run"); ASSERT_OK(err, "test_run");
ASSERT_EQ(attr.retval, 0xf2f3f4f5, "test_ret"); ASSERT_EQ(attr.retval, 0xf2f3f4f5, "test_ret");
...@@ -53,10 +52,10 @@ static void test_dummy_init_ptr_arg(void) ...@@ -53,10 +52,10 @@ static void test_dummy_init_ptr_arg(void)
.val = exp_retval, .val = exp_retval,
}; };
__u64 args[1] = {(unsigned long)&in_state}; __u64 args[1] = {(unsigned long)&in_state};
struct bpf_prog_test_run_attr attr = { LIBBPF_OPTS(bpf_test_run_opts, attr,
.ctx_size_in = sizeof(args),
.ctx_in = args, .ctx_in = args,
}; .ctx_size_in = sizeof(args),
);
struct dummy_st_ops *skel; struct dummy_st_ops *skel;
int fd, err; int fd, err;
...@@ -65,8 +64,7 @@ static void test_dummy_init_ptr_arg(void) ...@@ -65,8 +64,7 @@ static void test_dummy_init_ptr_arg(void)
return; return;
fd = bpf_program__fd(skel->progs.test_1); fd = bpf_program__fd(skel->progs.test_1);
attr.prog_fd = fd; err = bpf_prog_test_run_opts(fd, &attr);
err = bpf_prog_test_run_xattr(&attr);
ASSERT_OK(err, "test_run"); ASSERT_OK(err, "test_run");
ASSERT_EQ(in_state.val, 0x5a, "test_ptr_ret"); ASSERT_EQ(in_state.val, 0x5a, "test_ptr_ret");
ASSERT_EQ(attr.retval, exp_retval, "test_ret"); ASSERT_EQ(attr.retval, exp_retval, "test_ret");
...@@ -77,10 +75,10 @@ static void test_dummy_init_ptr_arg(void) ...@@ -77,10 +75,10 @@ static void test_dummy_init_ptr_arg(void)
static void test_dummy_multiple_args(void) static void test_dummy_multiple_args(void)
{ {
__u64 args[5] = {0, -100, 0x8a5f, 'c', 0x1234567887654321ULL}; __u64 args[5] = {0, -100, 0x8a5f, 'c', 0x1234567887654321ULL};
struct bpf_prog_test_run_attr attr = { LIBBPF_OPTS(bpf_test_run_opts, attr,
.ctx_size_in = sizeof(args),
.ctx_in = args, .ctx_in = args,
}; .ctx_size_in = sizeof(args),
);
struct dummy_st_ops *skel; struct dummy_st_ops *skel;
int fd, err; int fd, err;
size_t i; size_t i;
...@@ -91,8 +89,7 @@ static void test_dummy_multiple_args(void) ...@@ -91,8 +89,7 @@ static void test_dummy_multiple_args(void)
return; return;
fd = bpf_program__fd(skel->progs.test_2); fd = bpf_program__fd(skel->progs.test_2);
attr.prog_fd = fd; err = bpf_prog_test_run_opts(fd, &attr);
err = bpf_prog_test_run_xattr(&attr);
ASSERT_OK(err, "test_run"); ASSERT_OK(err, "test_run");
for (i = 0; i < ARRAY_SIZE(args); i++) { for (i = 0; i < ARRAY_SIZE(args); i++) {
snprintf(name, sizeof(name), "arg %zu", i); snprintf(name, sizeof(name), "arg %zu", i);
......
...@@ -13,8 +13,9 @@ ...@@ -13,8 +13,9 @@
#endif #endif
#define CHECK_FLOW_KEYS(desc, got, expected) \ #define CHECK_FLOW_KEYS(desc, got, expected) \
CHECK_ATTR(memcmp(&got, &expected, sizeof(got)) != 0, \ _CHECK(memcmp(&got, &expected, sizeof(got)) != 0, \
desc, \ desc, \
topts.duration, \
"nhoff=%u/%u " \ "nhoff=%u/%u " \
"thoff=%u/%u " \ "thoff=%u/%u " \
"addr_proto=0x%x/0x%x " \ "addr_proto=0x%x/0x%x " \
...@@ -487,7 +488,7 @@ static void run_tests_skb_less(int tap_fd, struct bpf_map *keys) ...@@ -487,7 +488,7 @@ static void run_tests_skb_less(int tap_fd, struct bpf_map *keys)
/* Keep in sync with 'flags' from eth_get_headlen. */ /* Keep in sync with 'flags' from eth_get_headlen. */
__u32 eth_get_headlen_flags = __u32 eth_get_headlen_flags =
BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG; BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG;
struct bpf_prog_test_run_attr tattr = {}; LIBBPF_OPTS(bpf_test_run_opts, topts);
struct bpf_flow_keys flow_keys = {}; struct bpf_flow_keys flow_keys = {};
__u32 key = (__u32)(tests[i].keys.sport) << 16 | __u32 key = (__u32)(tests[i].keys.sport) << 16 |
tests[i].keys.dport; tests[i].keys.dport;
...@@ -503,13 +504,12 @@ static void run_tests_skb_less(int tap_fd, struct bpf_map *keys) ...@@ -503,13 +504,12 @@ static void run_tests_skb_less(int tap_fd, struct bpf_map *keys)
CHECK(err < 0, "tx_tap", "err %d errno %d\n", err, errno); CHECK(err < 0, "tx_tap", "err %d errno %d\n", err, errno);
err = bpf_map_lookup_elem(keys_fd, &key, &flow_keys); err = bpf_map_lookup_elem(keys_fd, &key, &flow_keys);
CHECK_ATTR(err, tests[i].name, "bpf_map_lookup_elem %d\n", err); ASSERT_OK(err, "bpf_map_lookup_elem");
CHECK_ATTR(err, tests[i].name, "skb-less err %d\n", err);
CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys); CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
err = bpf_map_delete_elem(keys_fd, &key); err = bpf_map_delete_elem(keys_fd, &key);
CHECK_ATTR(err, tests[i].name, "bpf_map_delete_elem %d\n", err); ASSERT_OK(err, "bpf_map_delete_elem");
} }
} }
...@@ -573,27 +573,24 @@ void test_flow_dissector(void) ...@@ -573,27 +573,24 @@ void test_flow_dissector(void)
for (i = 0; i < ARRAY_SIZE(tests); i++) { for (i = 0; i < ARRAY_SIZE(tests); i++) {
struct bpf_flow_keys flow_keys; struct bpf_flow_keys flow_keys;
struct bpf_prog_test_run_attr tattr = { LIBBPF_OPTS(bpf_test_run_opts, topts,
.prog_fd = prog_fd,
.data_in = &tests[i].pkt, .data_in = &tests[i].pkt,
.data_size_in = sizeof(tests[i].pkt), .data_size_in = sizeof(tests[i].pkt),
.data_out = &flow_keys, .data_out = &flow_keys,
}; );
static struct bpf_flow_keys ctx = {}; static struct bpf_flow_keys ctx = {};
if (tests[i].flags) { if (tests[i].flags) {
tattr.ctx_in = &ctx; topts.ctx_in = &ctx;
tattr.ctx_size_in = sizeof(ctx); topts.ctx_size_in = sizeof(ctx);
ctx.flags = tests[i].flags; ctx.flags = tests[i].flags;
} }
err = bpf_prog_test_run_xattr(&tattr); err = bpf_prog_test_run_opts(prog_fd, &topts);
CHECK_ATTR(tattr.data_size_out != sizeof(flow_keys) || ASSERT_OK(err, "test_run");
err || tattr.retval != 1, ASSERT_EQ(topts.retval, 1, "test_run retval");
tests[i].name, ASSERT_EQ(topts.data_size_out, sizeof(flow_keys),
"err %d errno %d retval %d duration %d size %u/%zu\n", "test_run data_size_out");
err, errno, tattr.retval, tattr.duration,
tattr.data_size_out, sizeof(flow_keys));
CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys); CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
} }
......
...@@ -53,24 +53,24 @@ static void on_sample(void *ctx, int cpu, void *data, __u32 size) ...@@ -53,24 +53,24 @@ static void on_sample(void *ctx, int cpu, void *data, __u32 size)
void serial_test_kfree_skb(void) void serial_test_kfree_skb(void)
{ {
struct __sk_buff skb = {}; struct __sk_buff skb = {};
struct bpf_prog_test_run_attr tattr = { LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v6, .data_in = &pkt_v6,
.data_size_in = sizeof(pkt_v6), .data_size_in = sizeof(pkt_v6),
.ctx_in = &skb, .ctx_in = &skb,
.ctx_size_in = sizeof(skb), .ctx_size_in = sizeof(skb),
}; );
struct kfree_skb *skel = NULL; struct kfree_skb *skel = NULL;
struct bpf_link *link; struct bpf_link *link;
struct bpf_object *obj; struct bpf_object *obj;
struct perf_buffer *pb = NULL; struct perf_buffer *pb = NULL;
int err; int err, prog_fd;
bool passed = false; bool passed = false;
__u32 duration = 0; __u32 duration = 0;
const int zero = 0; const int zero = 0;
bool test_ok[2]; bool test_ok[2];
err = bpf_prog_test_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS, err = bpf_prog_test_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS,
&obj, &tattr.prog_fd); &obj, &prog_fd);
if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno)) if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno))
return; return;
...@@ -100,11 +100,9 @@ void serial_test_kfree_skb(void) ...@@ -100,11 +100,9 @@ void serial_test_kfree_skb(void)
goto close_prog; goto close_prog;
memcpy(skb.cb, &cb, sizeof(cb)); memcpy(skb.cb, &cb, sizeof(cb));
err = bpf_prog_test_run_xattr(&tattr); err = bpf_prog_test_run_opts(prog_fd, &topts);
duration = tattr.duration; ASSERT_OK(err, "ipv6 test_run");
CHECK(err || tattr.retval, "ipv6", ASSERT_OK(topts.retval, "ipv6 test_run retval");
"err %d errno %d retval %d duration %d\n",
err, errno, tattr.retval, duration);
/* read perf buffer */ /* read perf buffer */
err = perf_buffer__poll(pb, 100); err = perf_buffer__poll(pb, 100);
......
...@@ -20,60 +20,54 @@ static void check_run_cnt(int prog_fd, __u64 run_cnt) ...@@ -20,60 +20,54 @@ static void check_run_cnt(int prog_fd, __u64 run_cnt)
"incorrect number of repetitions, want %llu have %llu\n", run_cnt, info.run_cnt); "incorrect number of repetitions, want %llu have %llu\n", run_cnt, info.run_cnt);
} }
void test_prog_run_xattr(void) void test_prog_run_opts(void)
{ {
struct test_pkt_access *skel; struct test_pkt_access *skel;
int err, stats_fd = -1; int err, stats_fd = -1, prog_fd;
char buf[10] = {}; char buf[10] = {};
__u64 run_cnt = 0; __u64 run_cnt = 0;
struct bpf_prog_test_run_attr tattr = { LIBBPF_OPTS(bpf_test_run_opts, topts,
.repeat = 1, .repeat = 1,
.data_in = &pkt_v4, .data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4), .data_size_in = sizeof(pkt_v4),
.data_out = buf, .data_out = buf,
.data_size_out = 5, .data_size_out = 5,
}; );
stats_fd = bpf_enable_stats(BPF_STATS_RUN_TIME); stats_fd = bpf_enable_stats(BPF_STATS_RUN_TIME);
if (CHECK_ATTR(stats_fd < 0, "enable_stats", "failed %d\n", errno)) if (!ASSERT_GE(stats_fd, 0, "enable_stats good fd"))
return; return;
skel = test_pkt_access__open_and_load(); skel = test_pkt_access__open_and_load();
if (CHECK_ATTR(!skel, "open_and_load", "failed\n")) if (!ASSERT_OK_PTR(skel, "open_and_load"))
goto cleanup; goto cleanup;
tattr.prog_fd = bpf_program__fd(skel->progs.test_pkt_access); prog_fd = bpf_program__fd(skel->progs.test_pkt_access);
err = bpf_prog_test_run_xattr(&tattr); err = bpf_prog_test_run_opts(prog_fd, &topts);
CHECK_ATTR(err >= 0 || errno != ENOSPC || tattr.retval, "run", ASSERT_EQ(errno, ENOSPC, "test_run errno");
"err %d errno %d retval %d\n", err, errno, tattr.retval); ASSERT_ERR(err, "test_run");
ASSERT_OK(topts.retval, "test_run retval");
CHECK_ATTR(tattr.data_size_out != sizeof(pkt_v4), "data_size_out", ASSERT_EQ(topts.data_size_out, sizeof(pkt_v4), "test_run data_size_out");
"incorrect output size, want %zu have %u\n", ASSERT_EQ(buf[5], 0, "overflow, BPF_PROG_TEST_RUN ignored size hint");
sizeof(pkt_v4), tattr.data_size_out);
CHECK_ATTR(buf[5] != 0, "overflow", run_cnt += topts.repeat;
"BPF_PROG_TEST_RUN ignored size hint\n"); check_run_cnt(prog_fd, run_cnt);
run_cnt += tattr.repeat; topts.data_out = NULL;
check_run_cnt(tattr.prog_fd, run_cnt); topts.data_size_out = 0;
topts.repeat = 2;
tattr.data_out = NULL;
tattr.data_size_out = 0;
tattr.repeat = 2;
errno = 0; errno = 0;
err = bpf_prog_test_run_xattr(&tattr); err = bpf_prog_test_run_opts(prog_fd, &topts);
CHECK_ATTR(err || errno || tattr.retval, "run_no_output", ASSERT_OK(errno, "run_no_output errno");
"err %d errno %d retval %d\n", err, errno, tattr.retval); ASSERT_OK(err, "run_no_output err");
ASSERT_OK(topts.retval, "run_no_output retval");
tattr.data_size_out = 1;
err = bpf_prog_test_run_xattr(&tattr);
CHECK_ATTR(err != -EINVAL, "run_wrong_size_out", "err %d\n", err);
run_cnt += tattr.repeat; run_cnt += topts.repeat;
check_run_cnt(tattr.prog_fd, run_cnt); check_run_cnt(prog_fd, run_cnt);
cleanup: cleanup:
if (skel) if (skel)
......
...@@ -5,59 +5,54 @@ ...@@ -5,59 +5,54 @@
#include "bpf/libbpf_internal.h" #include "bpf/libbpf_internal.h"
#include "test_raw_tp_test_run.skel.h" #include "test_raw_tp_test_run.skel.h"
static int duration;
void test_raw_tp_test_run(void) void test_raw_tp_test_run(void)
{ {
struct bpf_prog_test_run_attr test_attr = {};
int comm_fd = -1, err, nr_online, i, prog_fd; int comm_fd = -1, err, nr_online, i, prog_fd;
__u64 args[2] = {0x1234ULL, 0x5678ULL}; __u64 args[2] = {0x1234ULL, 0x5678ULL};
int expected_retval = 0x1234 + 0x5678; int expected_retval = 0x1234 + 0x5678;
struct test_raw_tp_test_run *skel; struct test_raw_tp_test_run *skel;
char buf[] = "new_name"; char buf[] = "new_name";
bool *online = NULL; bool *online = NULL;
DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts, LIBBPF_OPTS(bpf_test_run_opts, opts,
.ctx_in = args, .ctx_in = args,
.ctx_size_in = sizeof(args), .ctx_size_in = sizeof(args),
.flags = BPF_F_TEST_RUN_ON_CPU, .flags = BPF_F_TEST_RUN_ON_CPU,
); );
err = parse_cpu_mask_file("/sys/devices/system/cpu/online", &online, err = parse_cpu_mask_file("/sys/devices/system/cpu/online", &online,
&nr_online); &nr_online);
if (CHECK(err, "parse_cpu_mask_file", "err %d\n", err)) if (!ASSERT_OK(err, "parse_cpu_mask_file"))
return; return;
skel = test_raw_tp_test_run__open_and_load(); skel = test_raw_tp_test_run__open_and_load();
if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) if (!ASSERT_OK_PTR(skel, "skel_open"))
goto cleanup; goto cleanup;
err = test_raw_tp_test_run__attach(skel); err = test_raw_tp_test_run__attach(skel);
if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err)) if (!ASSERT_OK(err, "skel_attach"))
goto cleanup; goto cleanup;
comm_fd = open("/proc/self/comm", O_WRONLY|O_TRUNC); comm_fd = open("/proc/self/comm", O_WRONLY|O_TRUNC);
if (CHECK(comm_fd < 0, "open /proc/self/comm", "err %d\n", errno)) if (!ASSERT_GE(comm_fd, 0, "open /proc/self/comm"))
goto cleanup; goto cleanup;
err = write(comm_fd, buf, sizeof(buf)); err = write(comm_fd, buf, sizeof(buf));
CHECK(err < 0, "task rename", "err %d", errno); ASSERT_GE(err, 0, "task rename");
CHECK(skel->bss->count == 0, "check_count", "didn't increase\n"); ASSERT_NEQ(skel->bss->count, 0, "check_count");
CHECK(skel->data->on_cpu != 0xffffffff, "check_on_cpu", "got wrong value\n"); ASSERT_EQ(skel->data->on_cpu, 0xffffffff, "check_on_cpu");
prog_fd = bpf_program__fd(skel->progs.rename); prog_fd = bpf_program__fd(skel->progs.rename);
test_attr.prog_fd = prog_fd; opts.ctx_in = args;
test_attr.ctx_in = args; opts.ctx_size_in = sizeof(__u64);
test_attr.ctx_size_in = sizeof(__u64);
err = bpf_prog_test_run_xattr(&test_attr); err = bpf_prog_test_run_opts(prog_fd, &opts);
CHECK(err == 0, "test_run", "should fail for too small ctx\n"); ASSERT_NEQ(err, 0, "test_run should fail for too small ctx");
test_attr.ctx_size_in = sizeof(args); opts.ctx_size_in = sizeof(args);
err = bpf_prog_test_run_xattr(&test_attr); err = bpf_prog_test_run_opts(prog_fd, &opts);
CHECK(err < 0, "test_run", "err %d\n", errno); ASSERT_OK(err, "test_run");
CHECK(test_attr.retval != expected_retval, "check_retval", ASSERT_EQ(opts.retval, expected_retval, "check_retval");
"expect 0x%x, got 0x%x\n", expected_retval, test_attr.retval);
for (i = 0; i < nr_online; i++) { for (i = 0; i < nr_online; i++) {
if (!online[i]) if (!online[i])
...@@ -66,28 +61,23 @@ void test_raw_tp_test_run(void) ...@@ -66,28 +61,23 @@ void test_raw_tp_test_run(void)
opts.cpu = i; opts.cpu = i;
opts.retval = 0; opts.retval = 0;
err = bpf_prog_test_run_opts(prog_fd, &opts); err = bpf_prog_test_run_opts(prog_fd, &opts);
CHECK(err < 0, "test_run_opts", "err %d\n", errno); ASSERT_OK(err, "test_run_opts");
CHECK(skel->data->on_cpu != i, "check_on_cpu", ASSERT_EQ(skel->data->on_cpu, i, "check_on_cpu");
"expect %d got %d\n", i, skel->data->on_cpu); ASSERT_EQ(opts.retval, expected_retval, "check_retval");
CHECK(opts.retval != expected_retval,
"check_retval", "expect 0x%x, got 0x%x\n",
expected_retval, opts.retval);
} }
/* invalid cpu ID should fail with ENXIO */ /* invalid cpu ID should fail with ENXIO */
opts.cpu = 0xffffffff; opts.cpu = 0xffffffff;
err = bpf_prog_test_run_opts(prog_fd, &opts); err = bpf_prog_test_run_opts(prog_fd, &opts);
CHECK(err >= 0 || errno != ENXIO, ASSERT_EQ(errno, ENXIO, "test_run_opts should fail with ENXIO");
"test_run_opts_fail", ASSERT_ERR(err, "test_run_opts_fail");
"should failed with ENXIO\n");
/* non-zero cpu w/o BPF_F_TEST_RUN_ON_CPU should fail with EINVAL */ /* non-zero cpu w/o BPF_F_TEST_RUN_ON_CPU should fail with EINVAL */
opts.cpu = 1; opts.cpu = 1;
opts.flags = 0; opts.flags = 0;
err = bpf_prog_test_run_opts(prog_fd, &opts); err = bpf_prog_test_run_opts(prog_fd, &opts);
CHECK(err >= 0 || errno != EINVAL, ASSERT_EQ(errno, EINVAL, "test_run_opts should fail with EINVAL");
"test_run_opts_fail", ASSERT_ERR(err, "test_run_opts_fail");
"should failed with EINVAL\n");
cleanup: cleanup:
close(comm_fd); close(comm_fd);
......
...@@ -20,97 +20,72 @@ void test_skb_ctx(void) ...@@ -20,97 +20,72 @@ void test_skb_ctx(void)
.gso_size = 10, .gso_size = 10,
.hwtstamp = 11, .hwtstamp = 11,
}; };
struct bpf_prog_test_run_attr tattr = { LIBBPF_OPTS(bpf_test_run_opts, tattr,
.data_in = &pkt_v4, .data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4), .data_size_in = sizeof(pkt_v4),
.ctx_in = &skb, .ctx_in = &skb,
.ctx_size_in = sizeof(skb), .ctx_size_in = sizeof(skb),
.ctx_out = &skb, .ctx_out = &skb,
.ctx_size_out = sizeof(skb), .ctx_size_out = sizeof(skb),
}; );
struct bpf_object *obj; struct bpf_object *obj;
int err; int err, prog_fd, i;
int i;
err = bpf_prog_test_load("./test_skb_ctx.o", BPF_PROG_TYPE_SCHED_CLS, &obj, err = bpf_prog_test_load("./test_skb_ctx.o", BPF_PROG_TYPE_SCHED_CLS,
&tattr.prog_fd); &obj, &prog_fd);
if (CHECK_ATTR(err, "load", "err %d errno %d\n", err, errno)) if (!ASSERT_OK(err, "load"))
return; return;
/* ctx_in != NULL, ctx_size_in == 0 */ /* ctx_in != NULL, ctx_size_in == 0 */
tattr.ctx_size_in = 0; tattr.ctx_size_in = 0;
err = bpf_prog_test_run_xattr(&tattr); err = bpf_prog_test_run_opts(prog_fd, &tattr);
CHECK_ATTR(err == 0, "ctx_size_in", "err %d errno %d\n", err, errno); ASSERT_NEQ(err, 0, "ctx_size_in");
tattr.ctx_size_in = sizeof(skb); tattr.ctx_size_in = sizeof(skb);
/* ctx_out != NULL, ctx_size_out == 0 */ /* ctx_out != NULL, ctx_size_out == 0 */
tattr.ctx_size_out = 0; tattr.ctx_size_out = 0;
err = bpf_prog_test_run_xattr(&tattr); err = bpf_prog_test_run_opts(prog_fd, &tattr);
CHECK_ATTR(err == 0, "ctx_size_out", "err %d errno %d\n", err, errno); ASSERT_NEQ(err, 0, "ctx_size_out");
tattr.ctx_size_out = sizeof(skb); tattr.ctx_size_out = sizeof(skb);
/* non-zero [len, tc_index] fields should be rejected*/ /* non-zero [len, tc_index] fields should be rejected*/
skb.len = 1; skb.len = 1;
err = bpf_prog_test_run_xattr(&tattr); err = bpf_prog_test_run_opts(prog_fd, &tattr);
CHECK_ATTR(err == 0, "len", "err %d errno %d\n", err, errno); ASSERT_NEQ(err, 0, "len");
skb.len = 0; skb.len = 0;
skb.tc_index = 1; skb.tc_index = 1;
err = bpf_prog_test_run_xattr(&tattr); err = bpf_prog_test_run_opts(prog_fd, &tattr);
CHECK_ATTR(err == 0, "tc_index", "err %d errno %d\n", err, errno); ASSERT_NEQ(err, 0, "tc_index");
skb.tc_index = 0; skb.tc_index = 0;
/* non-zero [hash, sk] fields should be rejected */ /* non-zero [hash, sk] fields should be rejected */
skb.hash = 1; skb.hash = 1;
err = bpf_prog_test_run_xattr(&tattr); err = bpf_prog_test_run_opts(prog_fd, &tattr);
CHECK_ATTR(err == 0, "hash", "err %d errno %d\n", err, errno); ASSERT_NEQ(err, 0, "hash");
skb.hash = 0; skb.hash = 0;
skb.sk = (struct bpf_sock *)1; skb.sk = (struct bpf_sock *)1;
err = bpf_prog_test_run_xattr(&tattr); err = bpf_prog_test_run_opts(prog_fd, &tattr);
CHECK_ATTR(err == 0, "sk", "err %d errno %d\n", err, errno); ASSERT_NEQ(err, 0, "sk");
skb.sk = 0; skb.sk = 0;
err = bpf_prog_test_run_xattr(&tattr); err = bpf_prog_test_run_opts(prog_fd, &tattr);
CHECK_ATTR(err != 0 || tattr.retval, ASSERT_OK(err, "test_run");
"run", ASSERT_OK(tattr.retval, "test_run retval");
"err %d errno %d retval %d\n", ASSERT_EQ(tattr.ctx_size_out, sizeof(skb), "ctx_size_out");
err, errno, tattr.retval);
CHECK_ATTR(tattr.ctx_size_out != sizeof(skb),
"ctx_size_out",
"incorrect output size, want %zu have %u\n",
sizeof(skb), tattr.ctx_size_out);
for (i = 0; i < 5; i++) for (i = 0; i < 5; i++)
CHECK_ATTR(skb.cb[i] != i + 2, ASSERT_EQ(skb.cb[i], i + 2, "ctx_out_cb");
"ctx_out_cb", ASSERT_EQ(skb.priority, 7, "ctx_out_priority");
"skb->cb[i] == %d, expected %d\n", ASSERT_EQ(skb.ifindex, 1, "ctx_out_ifindex");
skb.cb[i], i + 2); ASSERT_EQ(skb.ingress_ifindex, 11, "ctx_out_ingress_ifindex");
CHECK_ATTR(skb.priority != 7, ASSERT_EQ(skb.tstamp, 8, "ctx_out_tstamp");
"ctx_out_priority", ASSERT_EQ(skb.mark, 10, "ctx_out_mark");
"skb->priority == %d, expected %d\n",
skb.priority, 7);
CHECK_ATTR(skb.ifindex != 1,
"ctx_out_ifindex",
"skb->ifindex == %d, expected %d\n",
skb.ifindex, 1);
CHECK_ATTR(skb.ingress_ifindex != 11,
"ctx_out_ingress_ifindex",
"skb->ingress_ifindex == %d, expected %d\n",
skb.ingress_ifindex, 11);
CHECK_ATTR(skb.tstamp != 8,
"ctx_out_tstamp",
"skb->tstamp == %lld, expected %d\n",
skb.tstamp, 8);
CHECK_ATTR(skb.mark != 10,
"ctx_out_mark",
"skb->mark == %u, expected %d\n",
skb.mark, 10);
bpf_object__close(obj); bpf_object__close(obj);
} }
...@@ -9,22 +9,22 @@ void test_skb_helpers(void) ...@@ -9,22 +9,22 @@ void test_skb_helpers(void)
.gso_segs = 8, .gso_segs = 8,
.gso_size = 10, .gso_size = 10,
}; };
struct bpf_prog_test_run_attr tattr = { LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4, .data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4), .data_size_in = sizeof(pkt_v4),
.ctx_in = &skb, .ctx_in = &skb,
.ctx_size_in = sizeof(skb), .ctx_size_in = sizeof(skb),
.ctx_out = &skb, .ctx_out = &skb,
.ctx_size_out = sizeof(skb), .ctx_size_out = sizeof(skb),
}; );
struct bpf_object *obj; struct bpf_object *obj;
int err; int err, prog_fd;
err = bpf_prog_test_load("./test_skb_helpers.o", BPF_PROG_TYPE_SCHED_CLS, &obj, err = bpf_prog_test_load("./test_skb_helpers.o",
&tattr.prog_fd); BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
if (CHECK_ATTR(err, "load", "err %d errno %d\n", err, errno)) if (!ASSERT_OK(err, "load"))
return; return;
err = bpf_prog_test_run_xattr(&tattr); err = bpf_prog_test_run_opts(prog_fd, &topts);
CHECK_ATTR(err, "len", "err %d errno %d\n", err, errno); ASSERT_OK(err, "test_run");
bpf_object__close(obj); bpf_object__close(obj);
} }
...@@ -140,12 +140,16 @@ static void test_skmsg_helpers(enum bpf_map_type map_type) ...@@ -140,12 +140,16 @@ static void test_skmsg_helpers(enum bpf_map_type map_type)
static void test_sockmap_update(enum bpf_map_type map_type) static void test_sockmap_update(enum bpf_map_type map_type)
{ {
struct bpf_prog_test_run_attr tattr;
int err, prog, src, duration = 0; int err, prog, src, duration = 0;
struct test_sockmap_update *skel; struct test_sockmap_update *skel;
struct bpf_map *dst_map; struct bpf_map *dst_map;
const __u32 zero = 0; const __u32 zero = 0;
char dummy[14] = {0}; char dummy[14] = {0};
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = dummy,
.data_size_in = sizeof(dummy),
.repeat = 1,
);
__s64 sk; __s64 sk;
sk = connected_socket_v4(); sk = connected_socket_v4();
...@@ -167,16 +171,10 @@ static void test_sockmap_update(enum bpf_map_type map_type) ...@@ -167,16 +171,10 @@ static void test_sockmap_update(enum bpf_map_type map_type)
if (CHECK(err, "update_elem(src)", "errno=%u\n", errno)) if (CHECK(err, "update_elem(src)", "errno=%u\n", errno))
goto out; goto out;
tattr = (struct bpf_prog_test_run_attr){ err = bpf_prog_test_run_opts(prog, &topts);
.prog_fd = prog, if (!ASSERT_OK(err, "test_run"))
.repeat = 1, goto out;
.data_in = dummy, if (!ASSERT_NEQ(topts.retval, 0, "test_run retval"))
.data_size_in = sizeof(dummy),
};
err = bpf_prog_test_run_xattr(&tattr);
if (CHECK_ATTR(err || !tattr.retval, "bpf_prog_test_run",
"errno=%u retval=%u\n", errno, tattr.retval))
goto out; goto out;
compare_cookies(skel->maps.src, dst_map); compare_cookies(skel->maps.src, dst_map);
......
...@@ -20,20 +20,20 @@ void test_syscall(void) ...@@ -20,20 +20,20 @@ void test_syscall(void)
.log_buf = (uintptr_t) verifier_log, .log_buf = (uintptr_t) verifier_log,
.log_size = sizeof(verifier_log), .log_size = sizeof(verifier_log),
}; };
struct bpf_prog_test_run_attr tattr = { LIBBPF_OPTS(bpf_test_run_opts, tattr,
.ctx_in = &ctx, .ctx_in = &ctx,
.ctx_size_in = sizeof(ctx), .ctx_size_in = sizeof(ctx),
}; );
struct syscall *skel = NULL; struct syscall *skel = NULL;
__u64 key = 12, value = 0; __u64 key = 12, value = 0;
int err; int err, prog_fd;
skel = syscall__open_and_load(); skel = syscall__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_load")) if (!ASSERT_OK_PTR(skel, "skel_load"))
goto cleanup; goto cleanup;
tattr.prog_fd = bpf_program__fd(skel->progs.bpf_prog); prog_fd = bpf_program__fd(skel->progs.bpf_prog);
err = bpf_prog_test_run_xattr(&tattr); err = bpf_prog_test_run_opts(prog_fd, &tattr);
ASSERT_EQ(err, 0, "err"); ASSERT_EQ(err, 0, "err");
ASSERT_EQ(tattr.retval, 1, "retval"); ASSERT_EQ(tattr.retval, 1, "retval");
ASSERT_GT(ctx.map_fd, 0, "ctx.map_fd"); ASSERT_GT(ctx.map_fd, 0, "ctx.map_fd");
......
...@@ -8,20 +8,20 @@ ...@@ -8,20 +8,20 @@
static int sanity_run(struct bpf_program *prog) static int sanity_run(struct bpf_program *prog)
{ {
struct bpf_prog_test_run_attr test_attr = {}; LIBBPF_OPTS(bpf_test_run_opts, test_attr);
__u64 args[] = {1, 2, 3}; __u64 args[] = {1, 2, 3};
__u32 duration = 0;
int err, prog_fd; int err, prog_fd;
prog_fd = bpf_program__fd(prog); prog_fd = bpf_program__fd(prog);
test_attr.prog_fd = prog_fd;
test_attr.ctx_in = args; test_attr.ctx_in = args;
test_attr.ctx_size_in = sizeof(args); test_attr.ctx_size_in = sizeof(args);
err = bpf_prog_test_run_xattr(&test_attr); err = bpf_prog_test_run_opts(prog_fd, &test_attr);
if (CHECK(err || test_attr.retval, "test_run", if (!ASSERT_OK(err, "test_run"))
"err %d errno %d retval %d duration %d\n", return -1;
err, errno, test_attr.retval, duration))
if (!ASSERT_OK(test_attr.retval, "test_run retval"))
return -1; return -1;
return 0; return 0;
} }
......
...@@ -78,17 +78,17 @@ static void test_xdp_adjust_tail_grow2(void) ...@@ -78,17 +78,17 @@ static void test_xdp_adjust_tail_grow2(void)
int tailroom = 320; /* SKB_DATA_ALIGN(sizeof(struct skb_shared_info))*/; int tailroom = 320; /* SKB_DATA_ALIGN(sizeof(struct skb_shared_info))*/;
struct bpf_object *obj; struct bpf_object *obj;
int err, cnt, i; int err, cnt, i;
int max_grow; int max_grow, prog_fd;
struct bpf_prog_test_run_attr tattr = { LIBBPF_OPTS(bpf_test_run_opts, tattr,
.repeat = 1, .repeat = 1,
.data_in = &buf, .data_in = &buf,
.data_out = &buf, .data_out = &buf,
.data_size_in = 0, /* Per test */ .data_size_in = 0, /* Per test */
.data_size_out = 0, /* Per test */ .data_size_out = 0, /* Per test */
}; );
err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &tattr.prog_fd); err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
if (ASSERT_OK(err, "test_xdp_adjust_tail_grow")) if (ASSERT_OK(err, "test_xdp_adjust_tail_grow"))
return; return;
...@@ -97,7 +97,7 @@ static void test_xdp_adjust_tail_grow2(void) ...@@ -97,7 +97,7 @@ static void test_xdp_adjust_tail_grow2(void)
tattr.data_size_in = 64; /* Determine test case via pkt size */ tattr.data_size_in = 64; /* Determine test case via pkt size */
tattr.data_size_out = 128; /* Limit copy_size */ tattr.data_size_out = 128; /* Limit copy_size */
/* Kernel side alloc packet memory area that is zero init */ /* Kernel side alloc packet memory area that is zero init */
err = bpf_prog_test_run_xattr(&tattr); err = bpf_prog_test_run_opts(prog_fd, &tattr);
ASSERT_EQ(errno, ENOSPC, "case-64 errno"); /* Due limit copy_size in bpf_test_finish */ ASSERT_EQ(errno, ENOSPC, "case-64 errno"); /* Due limit copy_size in bpf_test_finish */
ASSERT_EQ(tattr.retval, XDP_TX, "case-64 retval"); ASSERT_EQ(tattr.retval, XDP_TX, "case-64 retval");
...@@ -115,7 +115,7 @@ static void test_xdp_adjust_tail_grow2(void) ...@@ -115,7 +115,7 @@ static void test_xdp_adjust_tail_grow2(void)
memset(buf, 2, sizeof(buf)); memset(buf, 2, sizeof(buf));
tattr.data_size_in = 128; /* Determine test case via pkt size */ tattr.data_size_in = 128; /* Determine test case via pkt size */
tattr.data_size_out = sizeof(buf); /* Copy everything */ tattr.data_size_out = sizeof(buf); /* Copy everything */
err = bpf_prog_test_run_xattr(&tattr); err = bpf_prog_test_run_opts(prog_fd, &tattr);
max_grow = 4096 - XDP_PACKET_HEADROOM - tailroom; /* 3520 */ max_grow = 4096 - XDP_PACKET_HEADROOM - tailroom; /* 3520 */
ASSERT_OK(err, "case-128"); ASSERT_OK(err, "case-128");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment