Commit 28667a52 authored by Riccardo Mancini's avatar Riccardo Mancini Committed by Arnaldo Carvalho de Melo

perf evsel: Handle precise_ip fallback in evsel__open_cpu()

This is another patch in the effort to separate the fallback mechanisms
from the open itself.

In case of precise_ip fallback, the original precise_ip will be stored
in the evsel (it was stored in a local variable) and the open will be
retried. Since the precise_ip fallback will be the first in the chain of
fallbacks, there should be no functional change with this patch.
Signed-off-by: default avatarRiccardo Mancini <rickyman7@gmail.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lore.kernel.org/lkml/74208c433d2024a6c4af9c0b140b54ed6b5ea810.1629490974.git.rickyman7@gmail.comSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent 91233d00
......@@ -1709,42 +1709,29 @@ static void display_attr(struct perf_event_attr *attr)
}
}
static int perf_event_open(struct evsel *evsel,
pid_t pid, int cpu, int group_fd)
bool evsel__precise_ip_fallback(struct evsel *evsel)
{
int precise_ip = evsel->core.attr.precise_ip;
int fd;
while (1) {
pr_debug2_peo("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx",
pid, cpu, group_fd, evsel->open_flags);
fd = sys_perf_event_open(&evsel->core.attr, pid, cpu, group_fd, evsel->open_flags);
if (fd >= 0)
break;
/* Do not try less precise if not requested. */
if (!evsel->precise_max)
break;
/*
* We tried all the precise_ip values, and it's
* still failing, so leave it to standard fallback.
*/
if (!evsel->core.attr.precise_ip) {
evsel->core.attr.precise_ip = precise_ip;
break;
}
/* Do not try less precise if not requested. */
if (!evsel->precise_max)
return false;
pr_debug2_peo("\nsys_perf_event_open failed, error %d\n", -ENOTSUP);
evsel->core.attr.precise_ip--;
pr_debug2_peo("decreasing precise_ip by one (%d)\n", evsel->core.attr.precise_ip);
display_attr(&evsel->core.attr);
/*
* We tried all the precise_ip values, and it's
* still failing, so leave it to standard fallback.
*/
if (!evsel->core.attr.precise_ip) {
evsel->core.attr.precise_ip = evsel->precise_ip_original;
return false;
}
return fd;
}
if (!evsel->precise_ip_original)
evsel->precise_ip_original = evsel->core.attr.precise_ip;
evsel->core.attr.precise_ip--;
pr_debug2_peo("decreasing precise_ip by one (%d)\n", evsel->core.attr.precise_ip);
display_attr(&evsel->core.attr);
return true;
}
static struct perf_cpu_map *empty_cpu_map;
static struct perf_thread_map *empty_thread_map;
......@@ -2004,8 +1991,11 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
test_attr__ready();
fd = perf_event_open(evsel, pid, cpus->map[cpu],
group_fd);
pr_debug2_peo("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx",
pid, cpus->map[cpu], group_fd, evsel->open_flags);
fd = sys_perf_event_open(&evsel->core.attr, pid, cpus->map[cpu],
group_fd, evsel->open_flags);
FD(evsel, cpu, thread) = fd;
......@@ -2058,6 +2048,9 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
return 0;
try_fallback:
if (evsel__precise_ip_fallback(evsel))
goto retry_open;
if (evsel__ignore_missing_thread(evsel, cpus->nr, cpu, threads, thread, err)) {
/* We just removed 1 thread, so lower the upper nthreads limit. */
nthreads--;
......
......@@ -151,6 +151,7 @@ struct evsel {
struct bperf_follower_bpf *follower_skel;
};
unsigned long open_flags;
int precise_ip_original;
};
struct perf_missing_features {
......@@ -298,6 +299,7 @@ bool evsel__ignore_missing_thread(struct evsel *evsel,
int nr_cpus, int cpu,
struct perf_thread_map *threads,
int thread, int err);
bool evsel__precise_ip_fallback(struct evsel *evsel);
struct perf_sample;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment