Commit 1ec6fd34 authored by Namhyung Kim's avatar Namhyung Kim Committed by Arnaldo Carvalho de Melo

perf bpf-filter: Support separate lost counts for each filter

As the BPF filter is shared between other processes, it should have its
own counter for each invocation.  Add a new array map (lost_count) to
save the count using the same index as the filter.  It should clear the
count before running the filter.
Signed-off-by: default avatarNamhyung Kim <namhyung@kernel.org>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: KP Singh <kpsingh@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Song Liu <song@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: bpf@vger.kernel.org
Link: https://lore.kernel.org/r/20240703223035.2024586-6-namhyung@kernel.orgSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent 0715f65e
......@@ -260,11 +260,23 @@ int perf_bpf_filter__prepare(struct evsel *evsel, struct target *target)
}
if (needs_pid_hash && geteuid() != 0) {
int zero = 0;
/* The filters map is shared among other processes */
ret = update_pid_hash(evsel, entry);
if (ret < 0)
goto err;
fd = get_pinned_fd("dropped");
if (fd < 0) {
ret = fd;
goto err;
}
/* Reset the lost count */
bpf_map_update_elem(fd, &pinned_filter_idx, &zero, BPF_ANY);
close(fd);
fd = get_pinned_fd("perf_sample_filter");
if (fd < 0) {
ret = fd;
......@@ -347,9 +359,25 @@ int perf_bpf_filter__destroy(struct evsel *evsel)
u64 perf_bpf_filter__lost_count(struct evsel *evsel)
{
struct sample_filter_bpf *skel = evsel->bpf_skel;
int count = 0;
if (list_empty(&evsel->bpf_filters))
return 0;
if (pinned_filter_idx >= 0) {
int fd = get_pinned_fd("dropped");
bpf_map_lookup_elem(fd, &pinned_filter_idx, &count);
close(fd);
} else if (evsel->bpf_skel) {
struct sample_filter_bpf *skel = evsel->bpf_skel;
int fd = bpf_map__fd(skel->maps.dropped);
int idx = 0;
return skel ? skel->bss->dropped : 0;
bpf_map_lookup_elem(fd, &idx, &count);
}
return count;
}
struct perf_bpf_filter_expr *perf_bpf_filter_expr__new(enum perf_bpf_filter_term term,
......@@ -402,6 +430,7 @@ int perf_bpf_filter__pin(void)
/* pinned program will use pid-hash */
bpf_map__set_max_entries(skel->maps.filters, MAX_FILTERS);
bpf_map__set_max_entries(skel->maps.pid_hash, MAX_PIDS);
bpf_map__set_max_entries(skel->maps.dropped, MAX_FILTERS);
skel->rodata->use_pid_hash = 1;
if (sample_filter_bpf__load(skel) < 0) {
......@@ -459,6 +488,10 @@ int perf_bpf_filter__pin(void)
pr_debug("chmod for pid_hash failed\n");
ret = -errno;
}
if (fchmodat(dir_fd, "dropped", 0666, 0) < 0) {
pr_debug("chmod for dropped failed\n");
ret = -errno;
}
err_close:
close(dir_fd);
......
......@@ -23,7 +23,14 @@ struct pid_hash {
__uint(max_entries, 1);
} pid_hash SEC(".maps");
int dropped;
/* tgid to filter index */
struct lost_count {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, int);
__type(value, int);
__uint(max_entries, 1);
} dropped SEC(".maps");
volatile const int use_pid_hash;
void *bpf_cast_to_kern_ctx(void *) __ksym;
......@@ -189,6 +196,7 @@ int perf_sample_filter(void *ctx)
int in_group = 0;
int group_result = 0;
int i, k;
int *losts;
kctx = bpf_cast_to_kern_ctx(ctx);
......@@ -252,7 +260,10 @@ int perf_sample_filter(void *ctx)
return 1;
drop:
__sync_fetch_and_add(&dropped, 1);
losts = bpf_map_lookup_elem(&dropped, &k);
if (losts != NULL)
__sync_fetch_and_add(losts, 1);
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment