Commit 2c2f6abe authored by Hao Luo's avatar Hao Luo Committed by Alexei Starovoitov

selftests/bpf: Ksyms_btf to test typed ksyms

Selftests for typed ksyms. Tests two types of ksyms: one is a struct,
the other is a plain int. This tests two paths in the kernel. Struct
ksyms will be converted into PTR_TO_BTF_ID by the verifier while int
typed ksyms will be converted into PTR_TO_MEM.
Signed-off-by: default avatarHao Luo <haoluo@google.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Acked-by: default avatarAndrii Nakryiko <andriin@fb.com>
Link: https://lore.kernel.org/bpf/20200929235049.2533242-4-haoluo@google.com
parent d370bbe1
...@@ -7,40 +7,28 @@ ...@@ -7,40 +7,28 @@
static int duration; static int duration;
static __u64 kallsyms_find(const char *sym)
{
char type, name[500];
__u64 addr, res = 0;
FILE *f;
f = fopen("/proc/kallsyms", "r");
if (CHECK(!f, "kallsyms_fopen", "failed to open: %d\n", errno))
return 0;
while (fscanf(f, "%llx %c %499s%*[^\n]\n", &addr, &type, name) > 0) {
if (strcmp(name, sym) == 0) {
res = addr;
goto out;
}
}
CHECK(false, "not_found", "symbol %s not found\n", sym);
out:
fclose(f);
return res;
}
void test_ksyms(void) void test_ksyms(void)
{ {
__u64 per_cpu_start_addr = kallsyms_find("__per_cpu_start");
__u64 link_fops_addr = kallsyms_find("bpf_link_fops");
const char *btf_path = "/sys/kernel/btf/vmlinux"; const char *btf_path = "/sys/kernel/btf/vmlinux";
struct test_ksyms *skel; struct test_ksyms *skel;
struct test_ksyms__data *data; struct test_ksyms__data *data;
__u64 link_fops_addr, per_cpu_start_addr;
struct stat st; struct stat st;
__u64 btf_size; __u64 btf_size;
int err; int err;
err = kallsyms_find("bpf_link_fops", &link_fops_addr);
if (CHECK(err == -EINVAL, "kallsyms_fopen", "failed to open: %d\n", errno))
return;
if (CHECK(err == -ENOENT, "ksym_find", "symbol 'bpf_link_fops' not found\n"))
return;
err = kallsyms_find("__per_cpu_start", &per_cpu_start_addr);
if (CHECK(err == -EINVAL, "kallsyms_fopen", "failed to open: %d\n", errno))
return;
if (CHECK(err == -ENOENT, "ksym_find", "symbol 'per_cpu_start' not found\n"))
return;
if (CHECK(stat(btf_path, &st), "stat_btf", "err %d\n", errno)) if (CHECK(stat(btf_path, &st), "stat_btf", "err %d\n", errno))
return; return;
btf_size = st.st_size; btf_size = st.st_size;
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Google */
#include <test_progs.h>
#include <bpf/libbpf.h>
#include <bpf/btf.h>
#include "test_ksyms_btf.skel.h"
static int duration;
void test_ksyms_btf(void)
{
__u64 runqueues_addr, bpf_prog_active_addr;
struct test_ksyms_btf *skel = NULL;
struct test_ksyms_btf__data *data;
struct btf *btf;
int percpu_datasec;
int err;
err = kallsyms_find("runqueues", &runqueues_addr);
if (CHECK(err == -EINVAL, "kallsyms_fopen", "failed to open: %d\n", errno))
return;
if (CHECK(err == -ENOENT, "ksym_find", "symbol 'runqueues' not found\n"))
return;
err = kallsyms_find("bpf_prog_active", &bpf_prog_active_addr);
if (CHECK(err == -EINVAL, "kallsyms_fopen", "failed to open: %d\n", errno))
return;
if (CHECK(err == -ENOENT, "ksym_find", "symbol 'bpf_prog_active' not found\n"))
return;
btf = libbpf_find_kernel_btf();
if (CHECK(IS_ERR(btf), "btf_exists", "failed to load kernel BTF: %ld\n",
PTR_ERR(btf)))
return;
percpu_datasec = btf__find_by_name_kind(btf, ".data..percpu",
BTF_KIND_DATASEC);
if (percpu_datasec < 0) {
printf("%s:SKIP:no PERCPU DATASEC in kernel btf\n",
__func__);
test__skip();
goto cleanup;
}
skel = test_ksyms_btf__open_and_load();
if (CHECK(!skel, "skel_open", "failed to open and load skeleton\n"))
goto cleanup;
err = test_ksyms_btf__attach(skel);
if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
goto cleanup;
/* trigger tracepoint */
usleep(1);
data = skel->data;
CHECK(data->out__runqueues_addr != runqueues_addr, "runqueues_addr",
"got %llu, exp %llu\n",
(unsigned long long)data->out__runqueues_addr,
(unsigned long long)runqueues_addr);
CHECK(data->out__bpf_prog_active_addr != bpf_prog_active_addr, "bpf_prog_active_addr",
"got %llu, exp %llu\n",
(unsigned long long)data->out__bpf_prog_active_addr,
(unsigned long long)bpf_prog_active_addr);
cleanup:
btf__free(btf);
test_ksyms_btf__destroy(skel);
}
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Google */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
__u64 out__runqueues_addr = -1;
__u64 out__bpf_prog_active_addr = -1;
extern const struct rq runqueues __ksym; /* struct type global var. */
extern const int bpf_prog_active __ksym; /* int type global var. */
SEC("raw_tp/sys_enter")
int handler(const void *ctx)
{
out__runqueues_addr = (__u64)&runqueues;
out__bpf_prog_active_addr = (__u64)&bpf_prog_active;
return 0;
}
char _license[] SEC("license") = "GPL";
...@@ -90,6 +90,33 @@ long ksym_get_addr(const char *name) ...@@ -90,6 +90,33 @@ long ksym_get_addr(const char *name)
return 0; return 0;
} }
/* open kallsyms and read symbol addresses on the fly. Without caching all symbols,
* this is faster than load + find.
*/
int kallsyms_find(const char *sym, unsigned long long *addr)
{
char type, name[500];
unsigned long long value;
int err = 0;
FILE *f;
f = fopen("/proc/kallsyms", "r");
if (!f)
return -EINVAL;
while (fscanf(f, "%llx %c %499s%*[^\n]\n", &value, &type, name) > 0) {
if (strcmp(name, sym) == 0) {
*addr = value;
goto out;
}
}
err = -ENOENT;
out:
fclose(f);
return err;
}
void read_trace_pipe(void) void read_trace_pipe(void)
{ {
int trace_fd; int trace_fd;
......
...@@ -12,6 +12,10 @@ struct ksym { ...@@ -12,6 +12,10 @@ struct ksym {
int load_kallsyms(void); int load_kallsyms(void);
struct ksym *ksym_search(long key); struct ksym *ksym_search(long key);
long ksym_get_addr(const char *name); long ksym_get_addr(const char *name);
/* open kallsyms and find addresses on the fly, faster than load + search. */
int kallsyms_find(const char *sym, unsigned long long *addr);
void read_trace_pipe(void); void read_trace_pipe(void);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment