Commit 46a4a970 authored by Daniel Borkmann's avatar Daniel Borkmann

Merge branch 'bpf-libbpf-cleanups'

Andrii Nakryiko says:

====================
This patch set's main goal is to teach bpf_object__open() (and its variants)
to automatically derive BPF program type/expected attach type from section
names, similarly to how bpf_prog_load() was doing it. This significantly
improves user experience by eliminating yet another
obvious-only-in-the-hindsight surprise, when using libbpf APIs.

There are a bunch of auxiliary clean-ups and improvements. E.g.,
bpf_program__get_type() and bpf_program__get_expected_attach_type() are added
for completeness and symmetry with corresponding setter APIs. Some clean up
and fixes in selftests/bpf are done as well.
====================
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parents be18010e 1678e33c
......@@ -167,6 +167,8 @@ enum {
IFLA_NEW_IFINDEX,
IFLA_MIN_MTU,
IFLA_MAX_MTU,
IFLA_PROP_LIST,
IFLA_ALT_IFNAME, /* Alternative ifname */
__IFLA_MAX
};
......
......@@ -3611,6 +3611,7 @@ static struct bpf_object *
__bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
struct bpf_object_open_opts *opts)
{
struct bpf_program *prog;
struct bpf_object *obj;
const char *obj_name;
char tmp_name[64];
......@@ -3650,8 +3651,24 @@ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
CHECK_ERR(bpf_object__probe_caps(obj), err, out);
CHECK_ERR(bpf_object__elf_collect(obj, relaxed_maps), err, out);
CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
bpf_object__elf_finish(obj);
bpf_object__for_each_program(prog, obj) {
enum bpf_prog_type prog_type;
enum bpf_attach_type attach_type;
err = libbpf_prog_type_by_name(prog->section_name, &prog_type,
&attach_type);
if (err == -ESRCH)
/* couldn't guess, but user might manually specify */
continue;
if (err)
goto out;
bpf_program__set_type(prog, prog_type);
bpf_program__set_expected_attach_type(prog, attach_type);
}
return obj;
out:
bpf_object__close(obj);
......@@ -4463,6 +4480,11 @@ int bpf_program__nth_fd(const struct bpf_program *prog, int n)
return fd;
}
enum bpf_prog_type bpf_program__get_type(struct bpf_program *prog)
{
return prog->type;
}
void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
{
prog->type = type;
......@@ -4497,6 +4519,12 @@ BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
enum bpf_attach_type
bpf_program__get_expected_attach_type(struct bpf_program *prog)
{
return prog->expected_attach_type;
}
void bpf_program__set_expected_attach_type(struct bpf_program *prog,
enum bpf_attach_type type)
{
......@@ -4536,11 +4564,15 @@ static const struct {
} section_names[] = {
BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
BPF_PROG_SEC("kprobe/", BPF_PROG_TYPE_KPROBE),
BPF_PROG_SEC("uprobe/", BPF_PROG_TYPE_KPROBE),
BPF_PROG_SEC("kretprobe/", BPF_PROG_TYPE_KPROBE),
BPF_PROG_SEC("uretprobe/", BPF_PROG_TYPE_KPROBE),
BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS),
BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT),
BPF_PROG_SEC("tracepoint/", BPF_PROG_TYPE_TRACEPOINT),
BPF_PROG_SEC("tp/", BPF_PROG_TYPE_TRACEPOINT),
BPF_PROG_SEC("raw_tracepoint/", BPF_PROG_TYPE_RAW_TRACEPOINT),
BPF_PROG_SEC("raw_tp/", BPF_PROG_TYPE_RAW_TRACEPOINT),
BPF_PROG_BTF("tp_btf/", BPF_PROG_TYPE_RAW_TRACEPOINT),
BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP),
BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
......@@ -4676,7 +4708,7 @@ int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
free(type_names);
}
return -EINVAL;
return -ESRCH;
}
int libbpf_attach_type_by_name(const char *name,
......@@ -4706,15 +4738,6 @@ int libbpf_attach_type_by_name(const char *name,
return -EINVAL;
}
static int
bpf_program__identify_section(struct bpf_program *prog,
enum bpf_prog_type *prog_type,
enum bpf_attach_type *expected_attach_type)
{
return libbpf_prog_type_by_name(prog->section_name, prog_type,
expected_attach_type);
}
int bpf_map__fd(const struct bpf_map *map)
{
return map ? map->fd : -EINVAL;
......@@ -4882,8 +4905,6 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
{
struct bpf_object_open_attr open_attr = {};
struct bpf_program *prog, *first_prog = NULL;
enum bpf_attach_type expected_attach_type;
enum bpf_prog_type prog_type;
struct bpf_object *obj;
struct bpf_map *map;
int err;
......@@ -4901,26 +4922,27 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
return -ENOENT;
bpf_object__for_each_program(prog, obj) {
enum bpf_attach_type attach_type = attr->expected_attach_type;
/*
* If type is not specified, try to guess it based on
* section name.
* to preserve backwards compatibility, bpf_prog_load treats
* attr->prog_type, if specified, as an override to whatever
* bpf_object__open guessed
*/
if (attr->prog_type != BPF_PROG_TYPE_UNSPEC) {
bpf_program__set_type(prog, attr->prog_type);
bpf_program__set_expected_attach_type(prog,
attach_type);
}
if (bpf_program__get_type(prog) == BPF_PROG_TYPE_UNSPEC) {
/*
* we haven't guessed from section name and user
* didn't provide a fallback type, too bad...
*/
prog_type = attr->prog_type;
prog->prog_ifindex = attr->ifindex;
expected_attach_type = attr->expected_attach_type;
if (prog_type == BPF_PROG_TYPE_UNSPEC) {
err = bpf_program__identify_section(prog, &prog_type,
&expected_attach_type);
if (err < 0) {
bpf_object__close(obj);
return -EINVAL;
}
}
bpf_program__set_type(prog, prog_type);
bpf_program__set_expected_attach_type(prog,
expected_attach_type);
prog->prog_ifindex = attr->ifindex;
prog->log_level = attr->log_level;
prog->prog_flags = attr->prog_flags;
if (!first_prog)
......
......@@ -302,8 +302,13 @@ LIBBPF_API int bpf_program__set_sched_cls(struct bpf_program *prog);
LIBBPF_API int bpf_program__set_sched_act(struct bpf_program *prog);
LIBBPF_API int bpf_program__set_xdp(struct bpf_program *prog);
LIBBPF_API int bpf_program__set_perf_event(struct bpf_program *prog);
LIBBPF_API enum bpf_prog_type bpf_program__get_type(struct bpf_program *prog);
LIBBPF_API void bpf_program__set_type(struct bpf_program *prog,
enum bpf_prog_type type);
LIBBPF_API enum bpf_attach_type
bpf_program__get_expected_attach_type(struct bpf_program *prog);
LIBBPF_API void
bpf_program__set_expected_attach_type(struct bpf_program *prog,
enum bpf_attach_type type);
......
......@@ -195,4 +195,6 @@ LIBBPF_0.0.6 {
global:
bpf_object__open_file;
bpf_object__open_mem;
bpf_program__get_expected_attach_type;
bpf_program__get_type;
} LIBBPF_0.0.5;
......@@ -99,11 +99,6 @@ void test_attach_probe(void)
"prog '%s' not found\n", uretprobe_name))
goto cleanup;
bpf_program__set_kprobe(kprobe_prog);
bpf_program__set_kprobe(kretprobe_prog);
bpf_program__set_kprobe(uprobe_prog);
bpf_program__set_kprobe(uretprobe_prog);
/* create maps && load programs */
err = bpf_object__load(obj);
if (CHECK(err, "obj_load", "err %d\n", err))
......
......@@ -391,7 +391,6 @@ void test_core_reloc(void)
if (CHECK(!prog, "find_probe",
"prog '%s' not found\n", probe_name))
goto cleanup;
bpf_program__set_type(prog, BPF_PROG_TYPE_RAW_TRACEPOINT);
load_attr.obj = obj;
load_attr.log_level = 0;
......
......@@ -36,10 +36,6 @@ void test_rdonly_maps(void)
if (CHECK(IS_ERR(obj), "obj_open", "err %ld\n", PTR_ERR(obj)))
return;
bpf_object__for_each_program(prog, obj) {
bpf_program__set_raw_tracepoint(prog);
}
err = bpf_object__load(obj);
if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno))
goto cleanup;
......
......@@ -31,7 +31,8 @@ void test_reference_tracking(void)
if (strstr(title, ".text") != NULL)
continue;
bpf_program__set_type(prog, BPF_PROG_TYPE_SCHED_CLS);
if (!test__start_subtest(title))
continue;
/* Expect verifier failure if test name has 'fail' */
if (strstr(title, "fail") != NULL) {
......
......@@ -53,7 +53,7 @@ static struct bpf_sock_tuple *get_tuple(void *data, __u64 nh_off,
return result;
}
SEC("sk_lookup_success")
SEC("classifier/sk_lookup_success")
int bpf_sk_lookup_test0(struct __sk_buff *skb)
{
void *data_end = (void *)(long)skb->data_end;
......@@ -78,7 +78,7 @@ int bpf_sk_lookup_test0(struct __sk_buff *skb)
return sk ? TC_ACT_OK : TC_ACT_UNSPEC;
}
SEC("sk_lookup_success_simple")
SEC("classifier/sk_lookup_success_simple")
int bpf_sk_lookup_test1(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
......@@ -90,7 +90,7 @@ int bpf_sk_lookup_test1(struct __sk_buff *skb)
return 0;
}
SEC("fail_use_after_free")
SEC("classifier/fail_use_after_free")
int bpf_sk_lookup_uaf(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
......@@ -105,7 +105,7 @@ int bpf_sk_lookup_uaf(struct __sk_buff *skb)
return family;
}
SEC("fail_modify_sk_pointer")
SEC("classifier/fail_modify_sk_pointer")
int bpf_sk_lookup_modptr(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
......@@ -120,7 +120,7 @@ int bpf_sk_lookup_modptr(struct __sk_buff *skb)
return 0;
}
SEC("fail_modify_sk_or_null_pointer")
SEC("classifier/fail_modify_sk_or_null_pointer")
int bpf_sk_lookup_modptr_or_null(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
......@@ -134,7 +134,7 @@ int bpf_sk_lookup_modptr_or_null(struct __sk_buff *skb)
return 0;
}
SEC("fail_no_release")
SEC("classifier/fail_no_release")
int bpf_sk_lookup_test2(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
......@@ -143,7 +143,7 @@ int bpf_sk_lookup_test2(struct __sk_buff *skb)
return 0;
}
SEC("fail_release_twice")
SEC("classifier/fail_release_twice")
int bpf_sk_lookup_test3(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
......@@ -155,7 +155,7 @@ int bpf_sk_lookup_test3(struct __sk_buff *skb)
return 0;
}
SEC("fail_release_unchecked")
SEC("classifier/fail_release_unchecked")
int bpf_sk_lookup_test4(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
......@@ -172,7 +172,7 @@ void lookup_no_release(struct __sk_buff *skb)
bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
}
SEC("fail_no_release_subcall")
SEC("classifier/fail_no_release_subcall")
int bpf_sk_lookup_test5(struct __sk_buff *skb)
{
lookup_no_release(skb);
......
......@@ -1142,7 +1142,6 @@ static void test_sockmap(unsigned int tasks, void *data)
#define MAPINMAP_PROG "./test_map_in_map.o"
static void test_map_in_map(void)
{
struct bpf_program *prog;
struct bpf_object *obj;
struct bpf_map *map;
int mim_fd, fd, err;
......@@ -1179,9 +1178,6 @@ static void test_map_in_map(void)
goto out_map_in_map;
}
bpf_object__for_each_program(prog, obj) {
bpf_program__set_xdp(prog);
}
bpf_object__load(obj);
map = bpf_object__find_map_by_name(obj, "mim_array");
......
......@@ -20,7 +20,7 @@ struct prog_test_def {
bool tested;
bool need_cgroup_cleanup;
const char *subtest_name;
char *subtest_name;
int subtest_num;
/* store counts before subtest started */
......@@ -81,16 +81,17 @@ void test__end_subtest()
fprintf(env.stdout, "#%d/%d %s:%s\n",
test->test_num, test->subtest_num,
test->subtest_name, sub_error_cnt ? "FAIL" : "OK");
free(test->subtest_name);
test->subtest_name = NULL;
}
bool test__start_subtest(const char *name)
{
struct prog_test_def *test = env.test;
if (test->subtest_name) {
if (test->subtest_name)
test__end_subtest();
test->subtest_name = NULL;
}
test->subtest_num++;
......@@ -104,7 +105,13 @@ bool test__start_subtest(const char *name)
if (!should_run(&env.subtest_selector, test->subtest_num, name))
return false;
test->subtest_name = name;
test->subtest_name = strdup(name);
if (!test->subtest_name) {
fprintf(env.stderr,
"Subtest #%d: failed to copy subtest name!\n",
test->subtest_num);
return false;
}
env.test->old_error_cnt = env.test->error_cnt;
return true;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment