Commit 52b07e56 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'samples: bpf: Refactor XDP programs with libbpf'

"Daniel T. Lee" says:

====================
To avoid confusion caused by the increasing fragmentation of the BPF
Loader program, this commit would like to convert the previous bpf_load
loader with the libbpf loader.

Thanks to libbpf's bpf_link interface, managing the tracepoint BPF
program is much easier. bpf_program__attach_tracepoint manages the
enable of tracepoint event and attach of BPF programs to it with a
single interface bpf_link, so there is no need to manage event_fd and
prog_fd separately.

And due to addition of generic bpf_program__attach() to libbpf, it is
now possible to attach BPF programs with __attach() instead of
explicitly calling __attach_<type>().

This patchset refactors xdp_monitor with using this libbpf API, and the
bpf_load is removed and migrated to libbpf. Also, attach_tracepoint()
is replaced with the generic __attach() method in xdp_redirect_cpu.
Moreover, maps in kern program have been converted to BTF-defined map.
---
Changes in v2:
 - added cleanup logic for bpf_link and bpf_object in xdp_monitor
 - program section match with bpf_program__is_<type> instead of strncmp
 - revert BTF key/val type to default of BPF_MAP_TYPE_PERF_EVENT_ARRAY
 - split increment into seperate satement
 - refactor pointer array initialization
 - error code cleanup
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 673e3752 321f6324
...@@ -98,8 +98,8 @@ test_map_in_map-objs := test_map_in_map_user.o ...@@ -98,8 +98,8 @@ test_map_in_map-objs := test_map_in_map_user.o
per_socket_stats_example-objs := cookie_uid_helper_example.o per_socket_stats_example-objs := cookie_uid_helper_example.o
xdp_redirect-objs := xdp_redirect_user.o xdp_redirect-objs := xdp_redirect_user.o
xdp_redirect_map-objs := xdp_redirect_map_user.o xdp_redirect_map-objs := xdp_redirect_map_user.o
xdp_redirect_cpu-objs := bpf_load.o xdp_redirect_cpu_user.o xdp_redirect_cpu-objs := xdp_redirect_cpu_user.o
xdp_monitor-objs := bpf_load.o xdp_monitor_user.o xdp_monitor-objs := xdp_monitor_user.o
xdp_rxq_info-objs := xdp_rxq_info_user.o xdp_rxq_info-objs := xdp_rxq_info_user.o
syscall_tp-objs := syscall_tp_user.o syscall_tp-objs := syscall_tp_user.o
cpustat-objs := cpustat_user.o cpustat-objs := cpustat_user.o
......
...@@ -6,21 +6,21 @@ ...@@ -6,21 +6,21 @@
#include <uapi/linux/bpf.h> #include <uapi/linux/bpf.h>
#include <bpf/bpf_helpers.h> #include <bpf/bpf_helpers.h>
struct bpf_map_def SEC("maps") redirect_err_cnt = { struct {
.type = BPF_MAP_TYPE_PERCPU_ARRAY, __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
.key_size = sizeof(u32), __type(key, u32);
.value_size = sizeof(u64), __type(value, u64);
.max_entries = 2, __uint(max_entries, 2);
/* TODO: have entries for all possible errno's */ /* TODO: have entries for all possible errno's */
}; } redirect_err_cnt SEC(".maps");
#define XDP_UNKNOWN XDP_REDIRECT + 1 #define XDP_UNKNOWN XDP_REDIRECT + 1
struct bpf_map_def SEC("maps") exception_cnt = { struct {
.type = BPF_MAP_TYPE_PERCPU_ARRAY, __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
.key_size = sizeof(u32), __type(key, u32);
.value_size = sizeof(u64), __type(value, u64);
.max_entries = XDP_UNKNOWN + 1, __uint(max_entries, XDP_UNKNOWN + 1);
}; } exception_cnt SEC(".maps");
/* Tracepoint format: /sys/kernel/debug/tracing/events/xdp/xdp_redirect/format /* Tracepoint format: /sys/kernel/debug/tracing/events/xdp/xdp_redirect/format
* Code in: kernel/include/trace/events/xdp.h * Code in: kernel/include/trace/events/xdp.h
...@@ -129,19 +129,19 @@ struct datarec { ...@@ -129,19 +129,19 @@ struct datarec {
}; };
#define MAX_CPUS 64 #define MAX_CPUS 64
struct bpf_map_def SEC("maps") cpumap_enqueue_cnt = { struct {
.type = BPF_MAP_TYPE_PERCPU_ARRAY, __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
.key_size = sizeof(u32), __type(key, u32);
.value_size = sizeof(struct datarec), __type(value, struct datarec);
.max_entries = MAX_CPUS, __uint(max_entries, MAX_CPUS);
}; } cpumap_enqueue_cnt SEC(".maps");
struct bpf_map_def SEC("maps") cpumap_kthread_cnt = { struct {
.type = BPF_MAP_TYPE_PERCPU_ARRAY, __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
.key_size = sizeof(u32), __type(key, u32);
.value_size = sizeof(struct datarec), __type(value, struct datarec);
.max_entries = 1, __uint(max_entries, 1);
}; } cpumap_kthread_cnt SEC(".maps");
/* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_cpumap_enqueue/format /* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_cpumap_enqueue/format
* Code in: kernel/include/trace/events/xdp.h * Code in: kernel/include/trace/events/xdp.h
...@@ -210,12 +210,12 @@ int trace_xdp_cpumap_kthread(struct cpumap_kthread_ctx *ctx) ...@@ -210,12 +210,12 @@ int trace_xdp_cpumap_kthread(struct cpumap_kthread_ctx *ctx)
return 0; return 0;
} }
struct bpf_map_def SEC("maps") devmap_xmit_cnt = { struct {
.type = BPF_MAP_TYPE_PERCPU_ARRAY, __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
.key_size = sizeof(u32), __type(key, u32);
.value_size = sizeof(struct datarec), __type(value, struct datarec);
.max_entries = 1, __uint(max_entries, 1);
}; } devmap_xmit_cnt SEC(".maps");
/* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_devmap_xmit/format /* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_devmap_xmit/format
* Code in: kernel/include/trace/events/xdp.h * Code in: kernel/include/trace/events/xdp.h
......
...@@ -26,12 +26,37 @@ static const char *__doc_err_only__= ...@@ -26,12 +26,37 @@ static const char *__doc_err_only__=
#include <net/if.h> #include <net/if.h>
#include <time.h> #include <time.h>
#include <signal.h>
#include <bpf/bpf.h> #include <bpf/bpf.h>
#include "bpf_load.h" #include <bpf/libbpf.h>
#include "bpf_util.h" #include "bpf_util.h"
enum map_type {
REDIRECT_ERR_CNT,
EXCEPTION_CNT,
CPUMAP_ENQUEUE_CNT,
CPUMAP_KTHREAD_CNT,
DEVMAP_XMIT_CNT,
};
static const char *const map_type_strings[] = {
[REDIRECT_ERR_CNT] = "redirect_err_cnt",
[EXCEPTION_CNT] = "exception_cnt",
[CPUMAP_ENQUEUE_CNT] = "cpumap_enqueue_cnt",
[CPUMAP_KTHREAD_CNT] = "cpumap_kthread_cnt",
[DEVMAP_XMIT_CNT] = "devmap_xmit_cnt",
};
#define NUM_MAP 5
#define NUM_TP 8
static int tp_cnt;
static int map_cnt;
static int verbose = 1; static int verbose = 1;
static bool debug = false; static bool debug = false;
struct bpf_map *map_data[NUM_MAP] = {};
struct bpf_link *tp_links[NUM_TP] = {};
struct bpf_object *obj;
static const struct option long_options[] = { static const struct option long_options[] = {
{"help", no_argument, NULL, 'h' }, {"help", no_argument, NULL, 'h' },
...@@ -41,6 +66,16 @@ static const struct option long_options[] = { ...@@ -41,6 +66,16 @@ static const struct option long_options[] = {
{0, 0, NULL, 0 } {0, 0, NULL, 0 }
}; };
static void int_exit(int sig)
{
/* Detach tracepoints */
while (tp_cnt)
bpf_link__destroy(tp_links[--tp_cnt]);
bpf_object__close(obj);
exit(0);
}
/* C standard specifies two constants, EXIT_SUCCESS(0) and EXIT_FAILURE(1) */ /* C standard specifies two constants, EXIT_SUCCESS(0) and EXIT_FAILURE(1) */
#define EXIT_FAIL_MEM 5 #define EXIT_FAIL_MEM 5
...@@ -483,23 +518,23 @@ static bool stats_collect(struct stats_record *rec) ...@@ -483,23 +518,23 @@ static bool stats_collect(struct stats_record *rec)
* this can happen by someone running perf-record -e * this can happen by someone running perf-record -e
*/ */
fd = map_data[0].fd; /* map0: redirect_err_cnt */ fd = bpf_map__fd(map_data[REDIRECT_ERR_CNT]);
for (i = 0; i < REDIR_RES_MAX; i++) for (i = 0; i < REDIR_RES_MAX; i++)
map_collect_record_u64(fd, i, &rec->xdp_redirect[i]); map_collect_record_u64(fd, i, &rec->xdp_redirect[i]);
fd = map_data[1].fd; /* map1: exception_cnt */ fd = bpf_map__fd(map_data[EXCEPTION_CNT]);
for (i = 0; i < XDP_ACTION_MAX; i++) { for (i = 0; i < XDP_ACTION_MAX; i++) {
map_collect_record_u64(fd, i, &rec->xdp_exception[i]); map_collect_record_u64(fd, i, &rec->xdp_exception[i]);
} }
fd = map_data[2].fd; /* map2: cpumap_enqueue_cnt */ fd = bpf_map__fd(map_data[CPUMAP_ENQUEUE_CNT]);
for (i = 0; i < MAX_CPUS; i++) for (i = 0; i < MAX_CPUS; i++)
map_collect_record(fd, i, &rec->xdp_cpumap_enqueue[i]); map_collect_record(fd, i, &rec->xdp_cpumap_enqueue[i]);
fd = map_data[3].fd; /* map3: cpumap_kthread_cnt */ fd = bpf_map__fd(map_data[CPUMAP_KTHREAD_CNT]);
map_collect_record(fd, 0, &rec->xdp_cpumap_kthread); map_collect_record(fd, 0, &rec->xdp_cpumap_kthread);
fd = map_data[4].fd; /* map4: devmap_xmit_cnt */ fd = bpf_map__fd(map_data[DEVMAP_XMIT_CNT]);
map_collect_record(fd, 0, &rec->xdp_devmap_xmit); map_collect_record(fd, 0, &rec->xdp_devmap_xmit);
return true; return true;
...@@ -598,8 +633,8 @@ static void stats_poll(int interval, bool err_only) ...@@ -598,8 +633,8 @@ static void stats_poll(int interval, bool err_only)
/* TODO Need more advanced stats on error types */ /* TODO Need more advanced stats on error types */
if (verbose) { if (verbose) {
printf(" - Stats map0: %s\n", map_data[0].name); printf(" - Stats map0: %s\n", bpf_map__name(map_data[0]));
printf(" - Stats map1: %s\n", map_data[1].name); printf(" - Stats map1: %s\n", bpf_map__name(map_data[1]));
printf("\n"); printf("\n");
} }
fflush(stdout); fflush(stdout);
...@@ -618,44 +653,51 @@ static void stats_poll(int interval, bool err_only) ...@@ -618,44 +653,51 @@ static void stats_poll(int interval, bool err_only)
static void print_bpf_prog_info(void) static void print_bpf_prog_info(void)
{ {
int i; struct bpf_program *prog;
struct bpf_map *map;
int i = 0;
/* Prog info */ /* Prog info */
printf("Loaded BPF prog have %d bpf program(s)\n", prog_cnt); printf("Loaded BPF prog have %d bpf program(s)\n", tp_cnt);
for (i = 0; i < prog_cnt; i++) { bpf_object__for_each_program(prog, obj) {
printf(" - prog_fd[%d] = fd(%d)\n", i, prog_fd[i]); printf(" - prog_fd[%d] = fd(%d)\n", i, bpf_program__fd(prog));
i++;
} }
i = 0;
/* Maps info */ /* Maps info */
printf("Loaded BPF prog have %d map(s)\n", map_data_count); printf("Loaded BPF prog have %d map(s)\n", map_cnt);
for (i = 0; i < map_data_count; i++) { bpf_object__for_each_map(map, obj) {
char *name = map_data[i].name; const char *name = bpf_map__name(map);
int fd = map_data[i].fd; int fd = bpf_map__fd(map);
printf(" - map_data[%d] = fd(%d) name:%s\n", i, fd, name); printf(" - map_data[%d] = fd(%d) name:%s\n", i, fd, name);
i++;
} }
/* Event info */ /* Event info */
printf("Searching for (max:%d) event file descriptor(s)\n", prog_cnt); printf("Searching for (max:%d) event file descriptor(s)\n", tp_cnt);
for (i = 0; i < prog_cnt; i++) { for (i = 0; i < tp_cnt; i++) {
if (event_fd[i] != -1) int fd = bpf_link__fd(tp_links[i]);
printf(" - event_fd[%d] = fd(%d)\n", i, event_fd[i]);
if (fd != -1)
printf(" - event_fd[%d] = fd(%d)\n", i, fd);
} }
} }
int main(int argc, char **argv) int main(int argc, char **argv)
{ {
struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY}; struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
struct bpf_program *prog;
int longindex = 0, opt; int longindex = 0, opt;
int ret = EXIT_SUCCESS; int ret = EXIT_FAILURE;
char bpf_obj_file[256]; enum map_type type;
char filename[256];
/* Default settings: */ /* Default settings: */
bool errors_only = true; bool errors_only = true;
int interval = 2; int interval = 2;
snprintf(bpf_obj_file, sizeof(bpf_obj_file), "%s_kern.o", argv[0]);
/* Parse commands line args */ /* Parse commands line args */
while ((opt = getopt_long(argc, argv, "hDSs:", while ((opt = getopt_long(argc, argv, "hDSs:",
long_options, &longindex)) != -1) { long_options, &longindex)) != -1) {
...@@ -672,40 +714,79 @@ int main(int argc, char **argv) ...@@ -672,40 +714,79 @@ int main(int argc, char **argv)
case 'h': case 'h':
default: default:
usage(argv); usage(argv);
return EXIT_FAILURE; return ret;
} }
} }
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
if (setrlimit(RLIMIT_MEMLOCK, &r)) { if (setrlimit(RLIMIT_MEMLOCK, &r)) {
perror("setrlimit(RLIMIT_MEMLOCK)"); perror("setrlimit(RLIMIT_MEMLOCK)");
return EXIT_FAILURE; return ret;
} }
if (load_bpf_file(bpf_obj_file)) { /* Remove tracepoint program when program is interrupted or killed */
printf("ERROR - bpf_log_buf: %s", bpf_log_buf); signal(SIGINT, int_exit);
return EXIT_FAILURE; signal(SIGTERM, int_exit);
obj = bpf_object__open_file(filename, NULL);
if (libbpf_get_error(obj)) {
printf("ERROR: opening BPF object file failed\n");
obj = NULL;
goto cleanup;
}
/* load BPF program */
if (bpf_object__load(obj)) {
printf("ERROR: loading BPF object file failed\n");
goto cleanup;
}
for (type = 0; type < NUM_MAP; type++) {
map_data[type] =
bpf_object__find_map_by_name(obj, map_type_strings[type]);
if (libbpf_get_error(map_data[type])) {
printf("ERROR: finding a map in obj file failed\n");
goto cleanup;
}
map_cnt++;
} }
if (!prog_fd[0]) {
printf("ERROR - load_bpf_file: %s\n", strerror(errno)); bpf_object__for_each_program(prog, obj) {
return EXIT_FAILURE; tp_links[tp_cnt] = bpf_program__attach(prog);
if (libbpf_get_error(tp_links[tp_cnt])) {
printf("ERROR: bpf_program__attach failed\n");
tp_links[tp_cnt] = NULL;
goto cleanup;
}
tp_cnt++;
} }
if (debug) { if (debug) {
print_bpf_prog_info(); print_bpf_prog_info();
} }
/* Unload/stop tracepoint event by closing fd's */ /* Unload/stop tracepoint event by closing bpf_link's */
if (errors_only) { if (errors_only) {
/* The prog_fd[i] and event_fd[i] depend on the /* The bpf_link[i] depend on the order of
* order the functions was defined in _kern.c * the functions was defined in _kern.c
*/ */
close(event_fd[2]); /* tracepoint/xdp/xdp_redirect */ bpf_link__destroy(tp_links[2]); /* tracepoint/xdp/xdp_redirect */
close(prog_fd[2]); /* func: trace_xdp_redirect */ tp_links[2] = NULL;
close(event_fd[3]); /* tracepoint/xdp/xdp_redirect_map */
close(prog_fd[3]); /* func: trace_xdp_redirect_map */ bpf_link__destroy(tp_links[3]); /* tracepoint/xdp/xdp_redirect_map */
tp_links[3] = NULL;
} }
stats_poll(interval, errors_only); stats_poll(interval, errors_only);
ret = EXIT_SUCCESS;
cleanup:
/* Detach tracepoints */
while (tp_cnt)
bpf_link__destroy(tp_links[--tp_cnt]);
bpf_object__close(obj);
return ret; return ret;
} }
...@@ -37,18 +37,35 @@ static __u32 prog_id; ...@@ -37,18 +37,35 @@ static __u32 prog_id;
static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST; static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
static int n_cpus; static int n_cpus;
static int cpu_map_fd;
static int rx_cnt_map_fd; enum map_type {
static int redirect_err_cnt_map_fd; CPU_MAP,
static int cpumap_enqueue_cnt_map_fd; RX_CNT,
static int cpumap_kthread_cnt_map_fd; REDIRECT_ERR_CNT,
static int cpus_available_map_fd; CPUMAP_ENQUEUE_CNT,
static int cpus_count_map_fd; CPUMAP_KTHREAD_CNT,
static int cpus_iterator_map_fd; CPUS_AVAILABLE,
static int exception_cnt_map_fd; CPUS_COUNT,
CPUS_ITERATOR,
EXCEPTION_CNT,
};
static const char *const map_type_strings[] = {
[CPU_MAP] = "cpu_map",
[RX_CNT] = "rx_cnt",
[REDIRECT_ERR_CNT] = "redirect_err_cnt",
[CPUMAP_ENQUEUE_CNT] = "cpumap_enqueue_cnt",
[CPUMAP_KTHREAD_CNT] = "cpumap_kthread_cnt",
[CPUS_AVAILABLE] = "cpus_available",
[CPUS_COUNT] = "cpus_count",
[CPUS_ITERATOR] = "cpus_iterator",
[EXCEPTION_CNT] = "exception_cnt",
};
#define NUM_TP 5 #define NUM_TP 5
struct bpf_link *tp_links[NUM_TP] = { 0 }; #define NUM_MAP 9
struct bpf_link *tp_links[NUM_TP] = {};
static int map_fds[NUM_MAP];
static int tp_cnt = 0; static int tp_cnt = 0;
/* Exit return codes */ /* Exit return codes */
...@@ -527,20 +544,20 @@ static void stats_collect(struct stats_record *rec) ...@@ -527,20 +544,20 @@ static void stats_collect(struct stats_record *rec)
{ {
int fd, i; int fd, i;
fd = rx_cnt_map_fd; fd = map_fds[RX_CNT];
map_collect_percpu(fd, 0, &rec->rx_cnt); map_collect_percpu(fd, 0, &rec->rx_cnt);
fd = redirect_err_cnt_map_fd; fd = map_fds[REDIRECT_ERR_CNT];
map_collect_percpu(fd, 1, &rec->redir_err); map_collect_percpu(fd, 1, &rec->redir_err);
fd = cpumap_enqueue_cnt_map_fd; fd = map_fds[CPUMAP_ENQUEUE_CNT];
for (i = 0; i < n_cpus; i++) for (i = 0; i < n_cpus; i++)
map_collect_percpu(fd, i, &rec->enq[i]); map_collect_percpu(fd, i, &rec->enq[i]);
fd = cpumap_kthread_cnt_map_fd; fd = map_fds[CPUMAP_KTHREAD_CNT];
map_collect_percpu(fd, 0, &rec->kthread); map_collect_percpu(fd, 0, &rec->kthread);
fd = exception_cnt_map_fd; fd = map_fds[EXCEPTION_CNT];
map_collect_percpu(fd, 0, &rec->exception); map_collect_percpu(fd, 0, &rec->exception);
} }
...@@ -565,7 +582,7 @@ static int create_cpu_entry(__u32 cpu, struct bpf_cpumap_val *value, ...@@ -565,7 +582,7 @@ static int create_cpu_entry(__u32 cpu, struct bpf_cpumap_val *value,
/* Add a CPU entry to cpumap, as this allocate a cpu entry in /* Add a CPU entry to cpumap, as this allocate a cpu entry in
* the kernel for the cpu. * the kernel for the cpu.
*/ */
ret = bpf_map_update_elem(cpu_map_fd, &cpu, value, 0); ret = bpf_map_update_elem(map_fds[CPU_MAP], &cpu, value, 0);
if (ret) { if (ret) {
fprintf(stderr, "Create CPU entry failed (err:%d)\n", ret); fprintf(stderr, "Create CPU entry failed (err:%d)\n", ret);
exit(EXIT_FAIL_BPF); exit(EXIT_FAIL_BPF);
...@@ -574,21 +591,21 @@ static int create_cpu_entry(__u32 cpu, struct bpf_cpumap_val *value, ...@@ -574,21 +591,21 @@ static int create_cpu_entry(__u32 cpu, struct bpf_cpumap_val *value,
/* Inform bpf_prog's that a new CPU is available to select /* Inform bpf_prog's that a new CPU is available to select
* from via some control maps. * from via some control maps.
*/ */
ret = bpf_map_update_elem(cpus_available_map_fd, &avail_idx, &cpu, 0); ret = bpf_map_update_elem(map_fds[CPUS_AVAILABLE], &avail_idx, &cpu, 0);
if (ret) { if (ret) {
fprintf(stderr, "Add to avail CPUs failed\n"); fprintf(stderr, "Add to avail CPUs failed\n");
exit(EXIT_FAIL_BPF); exit(EXIT_FAIL_BPF);
} }
/* When not replacing/updating existing entry, bump the count */ /* When not replacing/updating existing entry, bump the count */
ret = bpf_map_lookup_elem(cpus_count_map_fd, &key, &curr_cpus_count); ret = bpf_map_lookup_elem(map_fds[CPUS_COUNT], &key, &curr_cpus_count);
if (ret) { if (ret) {
fprintf(stderr, "Failed reading curr cpus_count\n"); fprintf(stderr, "Failed reading curr cpus_count\n");
exit(EXIT_FAIL_BPF); exit(EXIT_FAIL_BPF);
} }
if (new) { if (new) {
curr_cpus_count++; curr_cpus_count++;
ret = bpf_map_update_elem(cpus_count_map_fd, &key, ret = bpf_map_update_elem(map_fds[CPUS_COUNT], &key,
&curr_cpus_count, 0); &curr_cpus_count, 0);
if (ret) { if (ret) {
fprintf(stderr, "Failed write curr cpus_count\n"); fprintf(stderr, "Failed write curr cpus_count\n");
...@@ -612,7 +629,7 @@ static void mark_cpus_unavailable(void) ...@@ -612,7 +629,7 @@ static void mark_cpus_unavailable(void)
int ret, i; int ret, i;
for (i = 0; i < n_cpus; i++) { for (i = 0; i < n_cpus; i++) {
ret = bpf_map_update_elem(cpus_available_map_fd, &i, ret = bpf_map_update_elem(map_fds[CPUS_AVAILABLE], &i,
&invalid_cpu, 0); &invalid_cpu, 0);
if (ret) { if (ret) {
fprintf(stderr, "Failed marking CPU unavailable\n"); fprintf(stderr, "Failed marking CPU unavailable\n");
...@@ -665,68 +682,37 @@ static void stats_poll(int interval, bool use_separators, char *prog_name, ...@@ -665,68 +682,37 @@ static void stats_poll(int interval, bool use_separators, char *prog_name,
free_stats_record(prev); free_stats_record(prev);
} }
static struct bpf_link * attach_tp(struct bpf_object *obj, static int init_tracepoints(struct bpf_object *obj)
const char *tp_category,
const char* tp_name)
{ {
struct bpf_program *prog; struct bpf_program *prog;
struct bpf_link *link;
char sec_name[PATH_MAX];
int len;
len = snprintf(sec_name, PATH_MAX, "tracepoint/%s/%s", bpf_object__for_each_program(prog, obj) {
tp_category, tp_name); if (bpf_program__is_tracepoint(prog) != true)
if (len < 0) continue;
exit(EXIT_FAIL);
prog = bpf_object__find_program_by_title(obj, sec_name); tp_links[tp_cnt] = bpf_program__attach(prog);
if (!prog) { if (libbpf_get_error(tp_links[tp_cnt])) {
fprintf(stderr, "ERR: finding progsec: %s\n", sec_name); tp_links[tp_cnt] = NULL;
exit(EXIT_FAIL_BPF); return -EINVAL;
}
tp_cnt++;
} }
link = bpf_program__attach_tracepoint(prog, tp_category, tp_name); return 0;
if (libbpf_get_error(link))
exit(EXIT_FAIL_BPF);
return link;
}
static void init_tracepoints(struct bpf_object *obj) {
tp_links[tp_cnt++] = attach_tp(obj, "xdp", "xdp_redirect_err");
tp_links[tp_cnt++] = attach_tp(obj, "xdp", "xdp_redirect_map_err");
tp_links[tp_cnt++] = attach_tp(obj, "xdp", "xdp_exception");
tp_links[tp_cnt++] = attach_tp(obj, "xdp", "xdp_cpumap_enqueue");
tp_links[tp_cnt++] = attach_tp(obj, "xdp", "xdp_cpumap_kthread");
} }
static int init_map_fds(struct bpf_object *obj) static int init_map_fds(struct bpf_object *obj)
{ {
/* Maps updated by tracepoints */ enum map_type type;
redirect_err_cnt_map_fd =
bpf_object__find_map_fd_by_name(obj, "redirect_err_cnt"); for (type = 0; type < NUM_MAP; type++) {
exception_cnt_map_fd = map_fds[type] =
bpf_object__find_map_fd_by_name(obj, "exception_cnt"); bpf_object__find_map_fd_by_name(obj,
cpumap_enqueue_cnt_map_fd = map_type_strings[type]);
bpf_object__find_map_fd_by_name(obj, "cpumap_enqueue_cnt");
cpumap_kthread_cnt_map_fd = if (map_fds[type] < 0)
bpf_object__find_map_fd_by_name(obj, "cpumap_kthread_cnt"); return -ENOENT;
}
/* Maps used by XDP */
rx_cnt_map_fd = bpf_object__find_map_fd_by_name(obj, "rx_cnt");
cpu_map_fd = bpf_object__find_map_fd_by_name(obj, "cpu_map");
cpus_available_map_fd =
bpf_object__find_map_fd_by_name(obj, "cpus_available");
cpus_count_map_fd = bpf_object__find_map_fd_by_name(obj, "cpus_count");
cpus_iterator_map_fd =
bpf_object__find_map_fd_by_name(obj, "cpus_iterator");
if (cpu_map_fd < 0 || rx_cnt_map_fd < 0 ||
redirect_err_cnt_map_fd < 0 || cpumap_enqueue_cnt_map_fd < 0 ||
cpumap_kthread_cnt_map_fd < 0 || cpus_available_map_fd < 0 ||
cpus_count_map_fd < 0 || cpus_iterator_map_fd < 0 ||
exception_cnt_map_fd < 0)
return -ENOENT;
return 0; return 0;
} }
...@@ -795,13 +781,13 @@ int main(int argc, char **argv) ...@@ -795,13 +781,13 @@ int main(int argc, char **argv)
bool stress_mode = false; bool stress_mode = false;
struct bpf_program *prog; struct bpf_program *prog;
struct bpf_object *obj; struct bpf_object *obj;
int err = EXIT_FAIL;
char filename[256]; char filename[256];
int added_cpus = 0; int added_cpus = 0;
int longindex = 0; int longindex = 0;
int interval = 2; int interval = 2;
int add_cpu = -1; int add_cpu = -1;
int opt, err; int opt, prog_fd;
int prog_fd;
int *cpu, i; int *cpu, i;
__u32 qsize; __u32 qsize;
...@@ -824,24 +810,29 @@ int main(int argc, char **argv) ...@@ -824,24 +810,29 @@ int main(int argc, char **argv)
} }
if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd)) if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd))
return EXIT_FAIL; return err;
if (prog_fd < 0) { if (prog_fd < 0) {
fprintf(stderr, "ERR: bpf_prog_load_xattr: %s\n", fprintf(stderr, "ERR: bpf_prog_load_xattr: %s\n",
strerror(errno)); strerror(errno));
return EXIT_FAIL; return err;
} }
init_tracepoints(obj);
if (init_tracepoints(obj) < 0) {
fprintf(stderr, "ERR: bpf_program__attach failed\n");
return err;
}
if (init_map_fds(obj) < 0) { if (init_map_fds(obj) < 0) {
fprintf(stderr, "bpf_object__find_map_fd_by_name failed\n"); fprintf(stderr, "bpf_object__find_map_fd_by_name failed\n");
return EXIT_FAIL; return err;
} }
mark_cpus_unavailable(); mark_cpus_unavailable();
cpu = malloc(n_cpus * sizeof(int)); cpu = malloc(n_cpus * sizeof(int));
if (!cpu) { if (!cpu) {
fprintf(stderr, "failed to allocate cpu array\n"); fprintf(stderr, "failed to allocate cpu array\n");
return EXIT_FAIL; return err;
} }
memset(cpu, 0, n_cpus * sizeof(int)); memset(cpu, 0, n_cpus * sizeof(int));
...@@ -960,14 +951,12 @@ int main(int argc, char **argv) ...@@ -960,14 +951,12 @@ int main(int argc, char **argv)
prog = bpf_object__find_program_by_title(obj, prog_name); prog = bpf_object__find_program_by_title(obj, prog_name);
if (!prog) { if (!prog) {
fprintf(stderr, "bpf_object__find_program_by_title failed\n"); fprintf(stderr, "bpf_object__find_program_by_title failed\n");
err = EXIT_FAIL;
goto out; goto out;
} }
prog_fd = bpf_program__fd(prog); prog_fd = bpf_program__fd(prog);
if (prog_fd < 0) { if (prog_fd < 0) {
fprintf(stderr, "bpf_program__fd failed\n"); fprintf(stderr, "bpf_program__fd failed\n");
err = EXIT_FAIL;
goto out; goto out;
} }
...@@ -986,6 +975,8 @@ int main(int argc, char **argv) ...@@ -986,6 +975,8 @@ int main(int argc, char **argv)
stats_poll(interval, use_separators, prog_name, mprog_name, stats_poll(interval, use_separators, prog_name, mprog_name,
&value, stress_mode); &value, stress_mode);
err = EXIT_OK;
out: out:
free(cpu); free(cpu);
return err; return err;
......
...@@ -5,14 +5,12 @@ ...@@ -5,14 +5,12 @@
#include <bpf/bpf_helpers.h> #include <bpf/bpf_helpers.h>
#define SAMPLE_SIZE 64ul #define SAMPLE_SIZE 64ul
#define MAX_CPUS 128
struct {
struct bpf_map_def SEC("maps") my_map = { __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY, __uint(key_size, sizeof(int));
.key_size = sizeof(int), __uint(value_size, sizeof(u32));
.value_size = sizeof(u32), } my_map SEC(".maps");
.max_entries = MAX_CPUS,
};
SEC("xdp_sample") SEC("xdp_sample")
int xdp_sample_prog(struct xdp_md *ctx) int xdp_sample_prog(struct xdp_md *ctx)
......
...@@ -18,7 +18,6 @@ ...@@ -18,7 +18,6 @@
#include "perf-sys.h" #include "perf-sys.h"
#define MAX_CPUS 128
static int if_idx; static int if_idx;
static char *if_name; static char *if_name;
static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST; static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment