Commit 8f4c92f0 authored by Andrii Nakryiko's avatar Andrii Nakryiko

Merge branch 'libbpf: allow users to set kprobe/uprobe attach mode'

Menglong Dong says:

====================

From: Menglong Dong <imagedong@tencent.com>

By default, libbpf will attach the kprobe/uprobe BPF program in the
latest mode that supported by kernel. In this series, we add the support
to let users manually attach kprobe/uprobe in legacy/perf/link mode in
the 1th patch.

And in the 2th patch, we split the testing 'attach_probe' into multi
subtests, as Andrii suggested.

In the 3th patch, we add the testings for loading kprobe/uprobe in
different mode.

Changes since v3:
- rename eBPF to BPF in the doc
- use OPTS_GET() to get the value of 'force_ioctl_attach'
- error out on attach mode is not supported
- use test_attach_probe_manual__open_and_load() directly

Changes since v2:
- fix the typo in the 2th patch

Changes since v1:
- some small changes in the 1th patch, as Andrii suggested
- split 'attach_probe' into multi subtests
====================
Signed-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
parents fd4cb29f c7aec81b
......@@ -9724,6 +9724,7 @@ struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *p
char errmsg[STRERR_BUFSIZE];
struct bpf_link_perf *link;
int prog_fd, link_fd = -1, err;
bool force_ioctl_attach;
if (!OPTS_VALID(opts, bpf_perf_event_opts))
return libbpf_err_ptr(-EINVAL);
......@@ -9747,7 +9748,8 @@ struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *p
link->link.dealloc = &bpf_link_perf_dealloc;
link->perf_event_fd = pfd;
if (kernel_supports(prog->obj, FEAT_PERF_LINK)) {
force_ioctl_attach = OPTS_GET(opts, force_ioctl_attach, false);
if (kernel_supports(prog->obj, FEAT_PERF_LINK) && !force_ioctl_attach) {
DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_opts,
.perf_event.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0));
......@@ -10106,6 +10108,7 @@ bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
const struct bpf_kprobe_opts *opts)
{
DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
enum probe_attach_mode attach_mode;
char errmsg[STRERR_BUFSIZE];
char *legacy_probe = NULL;
struct bpf_link *link;
......@@ -10116,11 +10119,32 @@ bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
if (!OPTS_VALID(opts, bpf_kprobe_opts))
return libbpf_err_ptr(-EINVAL);
attach_mode = OPTS_GET(opts, attach_mode, PROBE_ATTACH_MODE_DEFAULT);
retprobe = OPTS_GET(opts, retprobe, false);
offset = OPTS_GET(opts, offset, 0);
pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
legacy = determine_kprobe_perf_type() < 0;
switch (attach_mode) {
case PROBE_ATTACH_MODE_LEGACY:
legacy = true;
pe_opts.force_ioctl_attach = true;
break;
case PROBE_ATTACH_MODE_PERF:
if (legacy)
return libbpf_err_ptr(-ENOTSUP);
pe_opts.force_ioctl_attach = true;
break;
case PROBE_ATTACH_MODE_LINK:
if (legacy || !kernel_supports(prog->obj, FEAT_PERF_LINK))
return libbpf_err_ptr(-ENOTSUP);
break;
case PROBE_ATTACH_MODE_DEFAULT:
break;
default:
return libbpf_err_ptr(-EINVAL);
}
if (!legacy) {
pfd = perf_event_open_probe(false /* uprobe */, retprobe,
func_name, offset,
......@@ -10852,6 +10876,7 @@ bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
const char *archive_path = NULL, *archive_sep = NULL;
char errmsg[STRERR_BUFSIZE], *legacy_probe = NULL;
DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
enum probe_attach_mode attach_mode;
char full_path[PATH_MAX];
struct bpf_link *link;
size_t ref_ctr_off;
......@@ -10862,6 +10887,7 @@ bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
if (!OPTS_VALID(opts, bpf_uprobe_opts))
return libbpf_err_ptr(-EINVAL);
attach_mode = OPTS_GET(opts, attach_mode, PROBE_ATTACH_MODE_DEFAULT);
retprobe = OPTS_GET(opts, retprobe, false);
ref_ctr_off = OPTS_GET(opts, ref_ctr_offset, 0);
pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
......@@ -10903,6 +10929,26 @@ bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
}
legacy = determine_uprobe_perf_type() < 0;
switch (attach_mode) {
case PROBE_ATTACH_MODE_LEGACY:
legacy = true;
pe_opts.force_ioctl_attach = true;
break;
case PROBE_ATTACH_MODE_PERF:
if (legacy)
return libbpf_err_ptr(-ENOTSUP);
pe_opts.force_ioctl_attach = true;
break;
case PROBE_ATTACH_MODE_LINK:
if (legacy || !kernel_supports(prog->obj, FEAT_PERF_LINK))
return libbpf_err_ptr(-ENOTSUP);
break;
case PROBE_ATTACH_MODE_DEFAULT:
break;
default:
return libbpf_err_ptr(-EINVAL);
}
if (!legacy) {
pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path,
func_offset, pid, ref_ctr_off);
......
......@@ -447,12 +447,15 @@ LIBBPF_API struct bpf_link *
bpf_program__attach(const struct bpf_program *prog);
struct bpf_perf_event_opts {
/* size of this struct, for forward/backward compatiblity */
/* size of this struct, for forward/backward compatibility */
size_t sz;
/* custom user-provided value fetchable through bpf_get_attach_cookie() */
__u64 bpf_cookie;
/* don't use BPF link when attach BPF program */
bool force_ioctl_attach;
size_t :0;
};
#define bpf_perf_event_opts__last_field bpf_cookie
#define bpf_perf_event_opts__last_field force_ioctl_attach
LIBBPF_API struct bpf_link *
bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd);
......@@ -461,8 +464,25 @@ LIBBPF_API struct bpf_link *
bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd,
const struct bpf_perf_event_opts *opts);
/**
* enum probe_attach_mode - the mode to attach kprobe/uprobe
*
* force libbpf to attach kprobe/uprobe in specific mode, -ENOTSUP will
* be returned if it is not supported by the kernel.
*/
enum probe_attach_mode {
/* attach probe in latest supported mode by kernel */
PROBE_ATTACH_MODE_DEFAULT = 0,
/* attach probe in legacy mode, using debugfs/tracefs */
PROBE_ATTACH_MODE_LEGACY,
/* create perf event with perf_event_open() syscall */
PROBE_ATTACH_MODE_PERF,
/* attach probe with BPF link */
PROBE_ATTACH_MODE_LINK,
};
struct bpf_kprobe_opts {
/* size of this struct, for forward/backward compatiblity */
/* size of this struct, for forward/backward compatibility */
size_t sz;
/* custom user-provided value fetchable through bpf_get_attach_cookie() */
__u64 bpf_cookie;
......@@ -470,9 +490,11 @@ struct bpf_kprobe_opts {
size_t offset;
/* kprobe is return probe */
bool retprobe;
/* kprobe attach mode */
enum probe_attach_mode attach_mode;
size_t :0;
};
#define bpf_kprobe_opts__last_field retprobe
#define bpf_kprobe_opts__last_field attach_mode
LIBBPF_API struct bpf_link *
bpf_program__attach_kprobe(const struct bpf_program *prog, bool retprobe,
......@@ -506,7 +528,7 @@ bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
const struct bpf_kprobe_multi_opts *opts);
struct bpf_ksyscall_opts {
/* size of this struct, for forward/backward compatiblity */
/* size of this struct, for forward/backward compatibility */
size_t sz;
/* custom user-provided value fetchable through bpf_get_attach_cookie() */
__u64 bpf_cookie;
......@@ -552,7 +574,7 @@ bpf_program__attach_ksyscall(const struct bpf_program *prog,
const struct bpf_ksyscall_opts *opts);
struct bpf_uprobe_opts {
/* size of this struct, for forward/backward compatiblity */
/* size of this struct, for forward/backward compatibility */
size_t sz;
/* offset of kernel reference counted USDT semaphore, added in
* a6ca88b241d5 ("trace_uprobe: support reference counter in fd-based uprobe")
......@@ -570,9 +592,11 @@ struct bpf_uprobe_opts {
* binary_path.
*/
const char *func_name;
/* uprobe attach mode */
enum probe_attach_mode attach_mode;
size_t :0;
};
#define bpf_uprobe_opts__last_field func_name
#define bpf_uprobe_opts__last_field attach_mode
/**
* @brief **bpf_program__attach_uprobe()** attaches a BPF program
......@@ -646,7 +670,7 @@ bpf_program__attach_usdt(const struct bpf_program *prog,
const struct bpf_usdt_opts *opts);
struct bpf_tracepoint_opts {
/* size of this struct, for forward/backward compatiblity */
/* size of this struct, for forward/backward compatibility */
size_t sz;
/* custom user-provided value fetchable through bpf_get_attach_cookie() */
__u64 bpf_cookie;
......@@ -1110,7 +1134,7 @@ struct user_ring_buffer;
typedef int (*ring_buffer_sample_fn)(void *ctx, void *data, size_t size);
struct ring_buffer_opts {
size_t sz; /* size of this struct, for forward/backward compatiblity */
size_t sz; /* size of this struct, for forward/backward compatibility */
};
#define ring_buffer_opts__last_field sz
......@@ -1475,7 +1499,7 @@ LIBBPF_API void
bpf_object__destroy_subskeleton(struct bpf_object_subskeleton *s);
struct gen_loader_opts {
size_t sz; /* size of this struct, for forward/backward compatiblity */
size_t sz; /* size of this struct, for forward/backward compatibility */
const char *data;
const char *insns;
__u32 data_sz;
......@@ -1493,13 +1517,13 @@ enum libbpf_tristate {
};
struct bpf_linker_opts {
/* size of this struct, for forward/backward compatiblity */
/* size of this struct, for forward/backward compatibility */
size_t sz;
};
#define bpf_linker_opts__last_field sz
struct bpf_linker_file_opts {
/* size of this struct, for forward/backward compatiblity */
/* size of this struct, for forward/backward compatibility */
size_t sz;
};
#define bpf_linker_file_opts__last_field sz
......@@ -1542,7 +1566,7 @@ typedef int (*libbpf_prog_attach_fn_t)(const struct bpf_program *prog, long cook
struct bpf_link **link);
struct libbpf_prog_handler_opts {
/* size of this struct, for forward/backward compatiblity */
/* size of this struct, for forward/backward compatibility */
size_t sz;
/* User-provided value that is passed to prog_setup_fn,
* prog_prepare_load_fn, and prog_attach_fn callbacks. Allows user to
......
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "test_attach_kprobe_sleepable.skel.h"
#include "test_attach_probe_manual.skel.h"
#include "test_attach_probe.skel.h"
/* this is how USDT semaphore is actually defined, except volatile modifier */
......@@ -23,81 +25,54 @@ static noinline void trigger_func3(void)
asm volatile ("");
}
/* attach point for ref_ctr */
static noinline void trigger_func4(void)
{
asm volatile ("");
}
static char test_data[] = "test_data";
void test_attach_probe(void)
/* manual attach kprobe/kretprobe/uprobe/uretprobe testings */
static void test_attach_probe_manual(enum probe_attach_mode attach_mode)
{
DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
struct bpf_link *kprobe_link, *kretprobe_link;
struct bpf_link *uprobe_link, *uretprobe_link;
struct test_attach_probe* skel;
ssize_t uprobe_offset, ref_ctr_offset;
struct bpf_link *uprobe_err_link;
FILE *devnull;
bool legacy;
/* Check if new-style kprobe/uprobe API is supported.
* Kernels that support new FD-based kprobe and uprobe BPF attachment
* through perf_event_open() syscall expose
* /sys/bus/event_source/devices/kprobe/type and
* /sys/bus/event_source/devices/uprobe/type files, respectively. They
* contain magic numbers that are passed as "type" field of
* perf_event_attr. Lack of such file in the system indicates legacy
* kernel with old-style kprobe/uprobe attach interface through
* creating per-probe event through tracefs. For such cases
* ref_ctr_offset feature is not supported, so we don't test it.
*/
legacy = access("/sys/bus/event_source/devices/kprobe/type", F_OK) != 0;
struct test_attach_probe_manual *skel;
ssize_t uprobe_offset;
uprobe_offset = get_uprobe_offset(&trigger_func);
if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset"))
skel = test_attach_probe_manual__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load"))
return;
ref_ctr_offset = get_rel_offset((uintptr_t)&uprobe_ref_ctr);
if (!ASSERT_GE(ref_ctr_offset, 0, "ref_ctr_offset"))
return;
skel = test_attach_probe__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
/* sleepable kprobe test case needs flags set before loading */
if (!ASSERT_OK(bpf_program__set_flags(skel->progs.handle_kprobe_sleepable,
BPF_F_SLEEPABLE), "kprobe_sleepable_flags"))
goto cleanup;
if (!ASSERT_OK(test_attach_probe__load(skel), "skel_load"))
goto cleanup;
if (!ASSERT_OK_PTR(skel->bss, "check_bss"))
uprobe_offset = get_uprobe_offset(&trigger_func);
if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset"))
goto cleanup;
/* manual-attach kprobe/kretprobe */
kprobe_link = bpf_program__attach_kprobe(skel->progs.handle_kprobe,
false /* retprobe */,
SYS_NANOSLEEP_KPROBE_NAME);
kprobe_opts.attach_mode = attach_mode;
kprobe_opts.retprobe = false;
kprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
SYS_NANOSLEEP_KPROBE_NAME,
&kprobe_opts);
if (!ASSERT_OK_PTR(kprobe_link, "attach_kprobe"))
goto cleanup;
skel->links.handle_kprobe = kprobe_link;
kretprobe_link = bpf_program__attach_kprobe(skel->progs.handle_kretprobe,
true /* retprobe */,
SYS_NANOSLEEP_KPROBE_NAME);
kprobe_opts.retprobe = true;
kretprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe,
SYS_NANOSLEEP_KPROBE_NAME,
&kprobe_opts);
if (!ASSERT_OK_PTR(kretprobe_link, "attach_kretprobe"))
goto cleanup;
skel->links.handle_kretprobe = kretprobe_link;
/* auto-attachable kprobe and kretprobe */
skel->links.handle_kprobe_auto = bpf_program__attach(skel->progs.handle_kprobe_auto);
ASSERT_OK_PTR(skel->links.handle_kprobe_auto, "attach_kprobe_auto");
skel->links.handle_kretprobe_auto = bpf_program__attach(skel->progs.handle_kretprobe_auto);
ASSERT_OK_PTR(skel->links.handle_kretprobe_auto, "attach_kretprobe_auto");
if (!legacy)
ASSERT_EQ(uprobe_ref_ctr, 0, "uprobe_ref_ctr_before");
/* manual-attach uprobe/uretprobe */
uprobe_opts.attach_mode = attach_mode;
uprobe_opts.ref_ctr_offset = 0;
uprobe_opts.retprobe = false;
uprobe_opts.ref_ctr_offset = legacy ? 0 : ref_ctr_offset;
uprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe,
0 /* self pid */,
"/proc/self/exe",
......@@ -107,12 +82,7 @@ void test_attach_probe(void)
goto cleanup;
skel->links.handle_uprobe = uprobe_link;
if (!legacy)
ASSERT_GT(uprobe_ref_ctr, 0, "uprobe_ref_ctr_after");
/* if uprobe uses ref_ctr, uretprobe has to use ref_ctr as well */
uprobe_opts.retprobe = true;
uprobe_opts.ref_ctr_offset = legacy ? 0 : ref_ctr_offset;
uretprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe,
-1 /* any pid */,
"/proc/self/exe",
......@@ -121,12 +91,7 @@ void test_attach_probe(void)
goto cleanup;
skel->links.handle_uretprobe = uretprobe_link;
/* verify auto-attach fails for old-style uprobe definition */
uprobe_err_link = bpf_program__attach(skel->progs.handle_uprobe_byname);
if (!ASSERT_EQ(libbpf_get_error(uprobe_err_link), -EOPNOTSUPP,
"auto-attach should fail for old-style name"))
goto cleanup;
/* attach uprobe by function name manually */
uprobe_opts.func_name = "trigger_func2";
uprobe_opts.retprobe = false;
uprobe_opts.ref_ctr_offset = 0;
......@@ -138,11 +103,63 @@ void test_attach_probe(void)
if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname, "attach_uprobe_byname"))
goto cleanup;
/* trigger & validate kprobe && kretprobe */
usleep(1);
/* trigger & validate uprobe & uretprobe */
trigger_func();
/* trigger & validate uprobe attached by name */
trigger_func2();
ASSERT_EQ(skel->bss->kprobe_res, 1, "check_kprobe_res");
ASSERT_EQ(skel->bss->kretprobe_res, 2, "check_kretprobe_res");
ASSERT_EQ(skel->bss->uprobe_res, 3, "check_uprobe_res");
ASSERT_EQ(skel->bss->uretprobe_res, 4, "check_uretprobe_res");
ASSERT_EQ(skel->bss->uprobe_byname_res, 5, "check_uprobe_byname_res");
cleanup:
test_attach_probe_manual__destroy(skel);
}
static void test_attach_probe_auto(struct test_attach_probe *skel)
{
struct bpf_link *uprobe_err_link;
/* auto-attachable kprobe and kretprobe */
skel->links.handle_kprobe_auto = bpf_program__attach(skel->progs.handle_kprobe_auto);
ASSERT_OK_PTR(skel->links.handle_kprobe_auto, "attach_kprobe_auto");
skel->links.handle_kretprobe_auto = bpf_program__attach(skel->progs.handle_kretprobe_auto);
ASSERT_OK_PTR(skel->links.handle_kretprobe_auto, "attach_kretprobe_auto");
/* verify auto-attach fails for old-style uprobe definition */
uprobe_err_link = bpf_program__attach(skel->progs.handle_uprobe_byname);
if (!ASSERT_EQ(libbpf_get_error(uprobe_err_link), -EOPNOTSUPP,
"auto-attach should fail for old-style name"))
return;
/* verify auto-attach works */
skel->links.handle_uretprobe_byname =
bpf_program__attach(skel->progs.handle_uretprobe_byname);
if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname, "attach_uretprobe_byname"))
goto cleanup;
return;
/* trigger & validate kprobe && kretprobe */
usleep(1);
/* trigger & validate uprobe attached by name */
trigger_func2();
ASSERT_EQ(skel->bss->kprobe2_res, 11, "check_kprobe_auto_res");
ASSERT_EQ(skel->bss->kretprobe2_res, 22, "check_kretprobe_auto_res");
ASSERT_EQ(skel->bss->uretprobe_byname_res, 6, "check_uretprobe_byname_res");
}
static void test_uprobe_lib(struct test_attach_probe *skel)
{
DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
FILE *devnull;
/* test attach by name for a library function, using the library
* as the binary argument. libc.so.6 will be resolved via dlopen()/dlinfo().
......@@ -155,7 +172,7 @@ void test_attach_probe(void)
"libc.so.6",
0, &uprobe_opts);
if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname2, "attach_uprobe_byname2"))
goto cleanup;
return;
uprobe_opts.func_name = "fclose";
uprobe_opts.retprobe = true;
......@@ -165,62 +182,144 @@ void test_attach_probe(void)
"libc.so.6",
0, &uprobe_opts);
if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname2, "attach_uretprobe_byname2"))
return;
/* trigger & validate shared library u[ret]probes attached by name */
devnull = fopen("/dev/null", "r");
fclose(devnull);
ASSERT_EQ(skel->bss->uprobe_byname2_res, 7, "check_uprobe_byname2_res");
ASSERT_EQ(skel->bss->uretprobe_byname2_res, 8, "check_uretprobe_byname2_res");
}
static void test_uprobe_ref_ctr(struct test_attach_probe *skel)
{
DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
struct bpf_link *uprobe_link, *uretprobe_link;
ssize_t uprobe_offset, ref_ctr_offset;
uprobe_offset = get_uprobe_offset(&trigger_func4);
if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset_ref_ctr"))
return;
ref_ctr_offset = get_rel_offset((uintptr_t)&uprobe_ref_ctr);
if (!ASSERT_GE(ref_ctr_offset, 0, "ref_ctr_offset"))
return;
ASSERT_EQ(uprobe_ref_ctr, 0, "uprobe_ref_ctr_before");
uprobe_opts.retprobe = false;
uprobe_opts.ref_ctr_offset = ref_ctr_offset;
uprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe_ref_ctr,
0 /* self pid */,
"/proc/self/exe",
uprobe_offset,
&uprobe_opts);
if (!ASSERT_OK_PTR(uprobe_link, "attach_uprobe_ref_ctr"))
return;
skel->links.handle_uprobe_ref_ctr = uprobe_link;
ASSERT_GT(uprobe_ref_ctr, 0, "uprobe_ref_ctr_after");
/* if uprobe uses ref_ctr, uretprobe has to use ref_ctr as well */
uprobe_opts.retprobe = true;
uprobe_opts.ref_ctr_offset = ref_ctr_offset;
uretprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe_ref_ctr,
-1 /* any pid */,
"/proc/self/exe",
uprobe_offset, &uprobe_opts);
if (!ASSERT_OK_PTR(uretprobe_link, "attach_uretprobe_ref_ctr"))
return;
skel->links.handle_uretprobe_ref_ctr = uretprobe_link;
}
static void test_kprobe_sleepable(void)
{
struct test_attach_kprobe_sleepable *skel;
skel = test_attach_kprobe_sleepable__open();
if (!ASSERT_OK_PTR(skel, "skel_kprobe_sleepable_open"))
return;
/* sleepable kprobe test case needs flags set before loading */
if (!ASSERT_OK(bpf_program__set_flags(skel->progs.handle_kprobe_sleepable,
BPF_F_SLEEPABLE), "kprobe_sleepable_flags"))
goto cleanup;
if (!ASSERT_OK(test_attach_kprobe_sleepable__load(skel),
"skel_kprobe_sleepable_load"))
goto cleanup;
/* sleepable kprobes should not attach successfully */
skel->links.handle_kprobe_sleepable = bpf_program__attach(skel->progs.handle_kprobe_sleepable);
if (!ASSERT_ERR_PTR(skel->links.handle_kprobe_sleepable, "attach_kprobe_sleepable"))
goto cleanup;
ASSERT_ERR_PTR(skel->links.handle_kprobe_sleepable, "attach_kprobe_sleepable");
cleanup:
test_attach_kprobe_sleepable__destroy(skel);
}
static void test_uprobe_sleepable(struct test_attach_probe *skel)
{
/* test sleepable uprobe and uretprobe variants */
skel->links.handle_uprobe_byname3_sleepable = bpf_program__attach(skel->progs.handle_uprobe_byname3_sleepable);
if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname3_sleepable, "attach_uprobe_byname3_sleepable"))
goto cleanup;
return;
skel->links.handle_uprobe_byname3 = bpf_program__attach(skel->progs.handle_uprobe_byname3);
if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname3, "attach_uprobe_byname3"))
goto cleanup;
return;
skel->links.handle_uretprobe_byname3_sleepable = bpf_program__attach(skel->progs.handle_uretprobe_byname3_sleepable);
if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname3_sleepable, "attach_uretprobe_byname3_sleepable"))
goto cleanup;
return;
skel->links.handle_uretprobe_byname3 = bpf_program__attach(skel->progs.handle_uretprobe_byname3);
if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname3, "attach_uretprobe_byname3"))
goto cleanup;
return;
skel->bss->user_ptr = test_data;
/* trigger & validate kprobe && kretprobe */
usleep(1);
/* trigger & validate shared library u[ret]probes attached by name */
devnull = fopen("/dev/null", "r");
fclose(devnull);
/* trigger & validate uprobe & uretprobe */
trigger_func();
/* trigger & validate uprobe attached by name */
trigger_func2();
/* trigger & validate sleepable uprobe attached by name */
trigger_func3();
ASSERT_EQ(skel->bss->kprobe_res, 1, "check_kprobe_res");
ASSERT_EQ(skel->bss->kprobe2_res, 11, "check_kprobe_auto_res");
ASSERT_EQ(skel->bss->kretprobe_res, 2, "check_kretprobe_res");
ASSERT_EQ(skel->bss->kretprobe2_res, 22, "check_kretprobe_auto_res");
ASSERT_EQ(skel->bss->uprobe_res, 3, "check_uprobe_res");
ASSERT_EQ(skel->bss->uretprobe_res, 4, "check_uretprobe_res");
ASSERT_EQ(skel->bss->uprobe_byname_res, 5, "check_uprobe_byname_res");
ASSERT_EQ(skel->bss->uretprobe_byname_res, 6, "check_uretprobe_byname_res");
ASSERT_EQ(skel->bss->uprobe_byname2_res, 7, "check_uprobe_byname2_res");
ASSERT_EQ(skel->bss->uretprobe_byname2_res, 8, "check_uretprobe_byname2_res");
ASSERT_EQ(skel->bss->uprobe_byname3_sleepable_res, 9, "check_uprobe_byname3_sleepable_res");
ASSERT_EQ(skel->bss->uprobe_byname3_res, 10, "check_uprobe_byname3_res");
ASSERT_EQ(skel->bss->uretprobe_byname3_sleepable_res, 11, "check_uretprobe_byname3_sleepable_res");
ASSERT_EQ(skel->bss->uretprobe_byname3_res, 12, "check_uretprobe_byname3_res");
}
void test_attach_probe(void)
{
struct test_attach_probe *skel;
skel = test_attach_probe__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
if (!ASSERT_OK(test_attach_probe__load(skel), "skel_load"))
goto cleanup;
if (!ASSERT_OK_PTR(skel->bss, "check_bss"))
goto cleanup;
if (test__start_subtest("manual-default"))
test_attach_probe_manual(PROBE_ATTACH_MODE_DEFAULT);
if (test__start_subtest("manual-legacy"))
test_attach_probe_manual(PROBE_ATTACH_MODE_LEGACY);
if (test__start_subtest("manual-perf"))
test_attach_probe_manual(PROBE_ATTACH_MODE_PERF);
if (test__start_subtest("manual-link"))
test_attach_probe_manual(PROBE_ATTACH_MODE_LINK);
if (test__start_subtest("auto"))
test_attach_probe_auto(skel);
if (test__start_subtest("kprobe-sleepable"))
test_kprobe_sleepable();
if (test__start_subtest("uprobe-lib"))
test_uprobe_lib(skel);
if (test__start_subtest("uprobe-sleepable"))
test_uprobe_sleepable(skel);
if (test__start_subtest("uprobe-ref_ctr"))
test_uprobe_ref_ctr(skel);
cleanup:
test_attach_probe__destroy(skel);
......
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017 Facebook
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
#include "bpf_misc.h"
int kprobe_res = 0;
/**
* This program will be manually made sleepable on the userspace side
* and should thus be unattachable.
*/
SEC("kprobe/" SYS_PREFIX "sys_nanosleep")
int handle_kprobe_sleepable(struct pt_regs *ctx)
{
kprobe_res = 1;
return 0;
}
char _license[] SEC("license") = "GPL";
......@@ -7,12 +7,8 @@
#include <bpf/bpf_core_read.h>
#include "bpf_misc.h"
int kprobe_res = 0;
int kprobe2_res = 0;
int kretprobe_res = 0;
int kretprobe2_res = 0;
int uprobe_res = 0;
int uretprobe_res = 0;
int uprobe_byname_res = 0;
int uretprobe_byname_res = 0;
int uprobe_byname2_res = 0;
......@@ -23,13 +19,6 @@ int uretprobe_byname3_sleepable_res = 0;
int uretprobe_byname3_res = 0;
void *user_ptr = 0;
SEC("kprobe")
int handle_kprobe(struct pt_regs *ctx)
{
kprobe_res = 1;
return 0;
}
SEC("ksyscall/nanosleep")
int BPF_KSYSCALL(handle_kprobe_auto, struct __kernel_timespec *req, struct __kernel_timespec *rem)
{
......@@ -37,24 +26,6 @@ int BPF_KSYSCALL(handle_kprobe_auto, struct __kernel_timespec *req, struct __ker
return 0;
}
/**
* This program will be manually made sleepable on the userspace side
* and should thus be unattachable.
*/
SEC("kprobe/" SYS_PREFIX "sys_nanosleep")
int handle_kprobe_sleepable(struct pt_regs *ctx)
{
kprobe_res = 2;
return 0;
}
SEC("kretprobe")
int handle_kretprobe(struct pt_regs *ctx)
{
kretprobe_res = 2;
return 0;
}
SEC("kretsyscall/nanosleep")
int BPF_KRETPROBE(handle_kretprobe_auto, int ret)
{
......@@ -63,16 +34,14 @@ int BPF_KRETPROBE(handle_kretprobe_auto, int ret)
}
SEC("uprobe")
int handle_uprobe(struct pt_regs *ctx)
int handle_uprobe_ref_ctr(struct pt_regs *ctx)
{
uprobe_res = 3;
return 0;
}
SEC("uretprobe")
int handle_uretprobe(struct pt_regs *ctx)
int handle_uretprobe_ref_ctr(struct pt_regs *ctx)
{
uretprobe_res = 4;
return 0;
}
......
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017 Facebook
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
#include "bpf_misc.h"
int kprobe_res = 0;
int kretprobe_res = 0;
int uprobe_res = 0;
int uretprobe_res = 0;
int uprobe_byname_res = 0;
void *user_ptr = 0;
SEC("kprobe")
int handle_kprobe(struct pt_regs *ctx)
{
kprobe_res = 1;
return 0;
}
SEC("kretprobe")
int handle_kretprobe(struct pt_regs *ctx)
{
kretprobe_res = 2;
return 0;
}
SEC("uprobe")
int handle_uprobe(struct pt_regs *ctx)
{
uprobe_res = 3;
return 0;
}
SEC("uretprobe")
int handle_uretprobe(struct pt_regs *ctx)
{
uretprobe_res = 4;
return 0;
}
SEC("uprobe")
int handle_uprobe_byname(struct pt_regs *ctx)
{
uprobe_byname_res = 5;
return 0;
}
char _license[] SEC("license") = "GPL";
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment