Commit 71efc48a authored by Riccardo Mancini's avatar Riccardo Mancini Committed by Arnaldo Carvalho de Melo

perf evsel: Separate rlimit increase from evsel__open_cpu()

This is a preparatory patch for the workqueue patches with the goal to
separate from evlist__open_cpu() the actual opening (which could be
performed in parallel), from the existing fallback mechanisms, which
should be handled sequentially.

This patch separates the rlimit increase from evsel__open_cpu().
Signed-off-by: default avatarRiccardo Mancini <rickyman7@gmail.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lore.kernel.org/lkml/2f256de8ec37b9809a5cef73c2fa7bce416af5d3.1629490974.git.rickyman7@gmail.comSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent d21fc5f0
...@@ -1931,13 +1931,40 @@ bool evsel__detect_missing_features(struct evsel *evsel) ...@@ -1931,13 +1931,40 @@ bool evsel__detect_missing_features(struct evsel *evsel)
} }
} }
bool evsel__increase_rlimit(enum rlimit_action *set_rlimit)
{
int old_errno;
struct rlimit l;
if (*set_rlimit < INCREASED_MAX) {
old_errno = errno;
if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
if (*set_rlimit == NO_CHANGE) {
l.rlim_cur = l.rlim_max;
} else {
l.rlim_cur = l.rlim_max + 1000;
l.rlim_max = l.rlim_cur;
}
if (setrlimit(RLIMIT_NOFILE, &l) == 0) {
(*set_rlimit) += 1;
errno = old_errno;
return true;
}
}
errno = old_errno;
}
return false;
}
static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
struct perf_thread_map *threads, struct perf_thread_map *threads,
int start_cpu, int end_cpu) int start_cpu, int end_cpu)
{ {
int cpu, thread, nthreads; int cpu, thread, nthreads;
int pid = -1, err, old_errno; int pid = -1, err, old_errno;
enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE; enum rlimit_action set_rlimit = NO_CHANGE;
err = __evsel__prepare_open(evsel, cpus, threads); err = __evsel__prepare_open(evsel, cpus, threads);
if (err) if (err)
...@@ -2046,25 +2073,8 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, ...@@ -2046,25 +2073,8 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
* perf stat needs between 5 and 22 fds per CPU. When we run out * perf stat needs between 5 and 22 fds per CPU. When we run out
* of them try to increase the limits. * of them try to increase the limits.
*/ */
if (err == -EMFILE && set_rlimit < INCREASED_MAX) { if (err == -EMFILE && evsel__increase_rlimit(&set_rlimit))
struct rlimit l; goto retry_open;
old_errno = errno;
if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
if (set_rlimit == NO_CHANGE)
l.rlim_cur = l.rlim_max;
else {
l.rlim_cur = l.rlim_max + 1000;
l.rlim_max = l.rlim_cur;
}
if (setrlimit(RLIMIT_NOFILE, &l) == 0) {
set_rlimit++;
errno = old_errno;
goto retry_open;
}
}
errno = old_errno;
}
if (err != -EINVAL || cpu > 0 || thread > 0) if (err != -EINVAL || cpu > 0 || thread > 0)
goto out_close; goto out_close;
......
...@@ -291,6 +291,9 @@ int evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus, ...@@ -291,6 +291,9 @@ int evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus,
struct perf_thread_map *threads); struct perf_thread_map *threads);
bool evsel__detect_missing_features(struct evsel *evsel); bool evsel__detect_missing_features(struct evsel *evsel);
enum rlimit_action { NO_CHANGE, SET_TO_MAX, INCREASED_MAX };
bool evsel__increase_rlimit(enum rlimit_action *set_rlimit);
struct perf_sample; struct perf_sample;
void *evsel__rawptr(struct evsel *evsel, struct perf_sample *sample, const char *name); void *evsel__rawptr(struct evsel *evsel, struct perf_sample *sample, const char *name);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment