Commit 9521b5f2 authored by Jiri Olsa's avatar Jiri Olsa Committed by Arnaldo Carvalho de Melo

perf tools: Rename perf_evlist__mmap() to evlist__mmap()

Rename perf_evlist__mmap() to evlist__mmap(), so we don't have a name
clash when we add perf_evlist__mmap() in libperf.
Signed-off-by: default avatarJiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Michael Petlan <mpetlan@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lore.kernel.org/lkml/20190913132355.21634-5-jolsa@kernel.orgSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent a5830532
...@@ -90,7 +90,7 @@ int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe ...@@ -90,7 +90,7 @@ int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe
CHECK__(evlist__open(evlist)); CHECK__(evlist__open(evlist));
CHECK__(perf_evlist__mmap(evlist, UINT_MAX)); CHECK__(evlist__mmap(evlist, UINT_MAX));
pc = evlist->mmap[0].base; pc = evlist->mmap[0].base;
ret = perf_read_tsc_conversion(pc, &tc); ret = perf_read_tsc_conversion(pc, &tc);
......
...@@ -1060,7 +1060,7 @@ static int kvm_live_open_events(struct perf_kvm_stat *kvm) ...@@ -1060,7 +1060,7 @@ static int kvm_live_open_events(struct perf_kvm_stat *kvm)
goto out; goto out;
} }
if (perf_evlist__mmap(evlist, kvm->opts.mmap_pages) < 0) { if (evlist__mmap(evlist, kvm->opts.mmap_pages) < 0) {
ui__error("Failed to mmap the events: %s\n", ui__error("Failed to mmap the events: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf))); str_error_r(errno, sbuf, sizeof(sbuf)));
evlist__close(evlist); evlist__close(evlist);
......
...@@ -439,7 +439,7 @@ static int record__mmap_flush_parse(const struct option *opt, ...@@ -439,7 +439,7 @@ static int record__mmap_flush_parse(const struct option *opt,
if (!opts->mmap_flush) if (!opts->mmap_flush)
opts->mmap_flush = MMAP_FLUSH_DEFAULT; opts->mmap_flush = MMAP_FLUSH_DEFAULT;
flush_max = perf_evlist__mmap_size(opts->mmap_pages); flush_max = evlist__mmap_size(opts->mmap_pages);
flush_max /= 4; flush_max /= 4;
if (opts->mmap_flush > flush_max) if (opts->mmap_flush > flush_max)
opts->mmap_flush = flush_max; opts->mmap_flush = flush_max;
...@@ -707,7 +707,7 @@ static int record__mmap_evlist(struct record *rec, ...@@ -707,7 +707,7 @@ static int record__mmap_evlist(struct record *rec,
if (opts->affinity != PERF_AFFINITY_SYS) if (opts->affinity != PERF_AFFINITY_SYS)
cpu__setup_cpunode_map(); cpu__setup_cpunode_map();
if (perf_evlist__mmap_ex(evlist, opts->mmap_pages, if (evlist__mmap_ex(evlist, opts->mmap_pages,
opts->auxtrace_mmap_pages, opts->auxtrace_mmap_pages,
opts->auxtrace_snapshot_mode, opts->auxtrace_snapshot_mode,
opts->nr_cblocks, opts->affinity, opts->nr_cblocks, opts->affinity,
...@@ -1980,7 +1980,7 @@ static int record__parse_mmap_pages(const struct option *opt, ...@@ -1980,7 +1980,7 @@ static int record__parse_mmap_pages(const struct option *opt,
static void switch_output_size_warn(struct record *rec) static void switch_output_size_warn(struct record *rec)
{ {
u64 wakeup_size = perf_evlist__mmap_size(rec->opts.mmap_pages); u64 wakeup_size = evlist__mmap_size(rec->opts.mmap_pages);
struct switch_output *s = &rec->switch_output; struct switch_output *s = &rec->switch_output;
wakeup_size /= 2; wakeup_size /= 2;
......
...@@ -1042,7 +1042,7 @@ static int perf_top__start_counters(struct perf_top *top) ...@@ -1042,7 +1042,7 @@ static int perf_top__start_counters(struct perf_top *top)
} }
} }
if (perf_evlist__mmap(evlist, opts->mmap_pages) < 0) { if (evlist__mmap(evlist, opts->mmap_pages) < 0) {
ui__error("Failed to mmap with %d (%s)\n", ui__error("Failed to mmap with %d (%s)\n",
errno, str_error_r(errno, msg, sizeof(msg))); errno, str_error_r(errno, msg, sizeof(msg)));
goto out_err; goto out_err;
......
...@@ -3409,7 +3409,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv) ...@@ -3409,7 +3409,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
if (trace->dump.map) if (trace->dump.map)
bpf_map__fprintf(trace->dump.map, trace->output); bpf_map__fprintf(trace->dump.map, trace->output);
err = perf_evlist__mmap(evlist, trace->opts.mmap_pages); err = evlist__mmap(evlist, trace->opts.mmap_pages);
if (err < 0) if (err < 0)
goto out_error_mmap; goto out_error_mmap;
......
...@@ -63,9 +63,9 @@ static int do_test(struct evlist *evlist, int mmap_pages, ...@@ -63,9 +63,9 @@ static int do_test(struct evlist *evlist, int mmap_pages,
int err; int err;
char sbuf[STRERR_BUFSIZE]; char sbuf[STRERR_BUFSIZE];
err = perf_evlist__mmap(evlist, mmap_pages); err = evlist__mmap(evlist, mmap_pages);
if (err < 0) { if (err < 0) {
pr_debug("perf_evlist__mmap: %s\n", pr_debug("evlist__mmap: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf))); str_error_r(errno, sbuf, sizeof(sbuf)));
return TEST_FAIL; return TEST_FAIL;
} }
......
...@@ -167,9 +167,9 @@ static int do_test(struct bpf_object *obj, int (*func)(void), ...@@ -167,9 +167,9 @@ static int do_test(struct bpf_object *obj, int (*func)(void),
goto out_delete_evlist; goto out_delete_evlist;
} }
err = perf_evlist__mmap(evlist, opts.mmap_pages); err = evlist__mmap(evlist, opts.mmap_pages);
if (err < 0) { if (err < 0) {
pr_debug("perf_evlist__mmap: %s\n", pr_debug("evlist__mmap: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf))); str_error_r(errno, sbuf, sizeof(sbuf)));
goto out_delete_evlist; goto out_delete_evlist;
} }
......
...@@ -685,9 +685,9 @@ static int do_test_code_reading(bool try_kcore) ...@@ -685,9 +685,9 @@ static int do_test_code_reading(bool try_kcore)
break; break;
} }
ret = perf_evlist__mmap(evlist, UINT_MAX); ret = evlist__mmap(evlist, UINT_MAX);
if (ret < 0) { if (ret < 0) {
pr_debug("perf_evlist__mmap failed\n"); pr_debug("evlist__mmap failed\n");
goto out_put; goto out_put;
} }
......
...@@ -104,7 +104,7 @@ int test__keep_tracking(struct test *test __maybe_unused, int subtest __maybe_un ...@@ -104,7 +104,7 @@ int test__keep_tracking(struct test *test __maybe_unused, int subtest __maybe_un
goto out_err; goto out_err;
} }
CHECK__(perf_evlist__mmap(evlist, UINT_MAX)); CHECK__(evlist__mmap(evlist, UINT_MAX));
/* /*
* First, test that a 'comm' event can be found when the event is * First, test that a 'comm' event can be found when the event is
......
...@@ -99,7 +99,7 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse ...@@ -99,7 +99,7 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse
expected_nr_events[i] = 1 + rand() % 127; expected_nr_events[i] = 1 + rand() % 127;
} }
if (perf_evlist__mmap(evlist, 128) < 0) { if (evlist__mmap(evlist, 128) < 0) {
pr_debug("failed to mmap events: %d (%s)\n", errno, pr_debug("failed to mmap events: %d (%s)\n", errno,
str_error_r(errno, sbuf, sizeof(sbuf))); str_error_r(errno, sbuf, sizeof(sbuf)));
goto out_delete_evlist; goto out_delete_evlist;
......
...@@ -69,9 +69,9 @@ int test__syscall_openat_tp_fields(struct test *test __maybe_unused, int subtest ...@@ -69,9 +69,9 @@ int test__syscall_openat_tp_fields(struct test *test __maybe_unused, int subtest
goto out_delete_evlist; goto out_delete_evlist;
} }
err = perf_evlist__mmap(evlist, UINT_MAX); err = evlist__mmap(evlist, UINT_MAX);
if (err < 0) { if (err < 0) {
pr_debug("perf_evlist__mmap: %s\n", pr_debug("evlist__mmap: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf))); str_error_r(errno, sbuf, sizeof(sbuf)));
goto out_delete_evlist; goto out_delete_evlist;
} }
......
...@@ -143,9 +143,9 @@ int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unus ...@@ -143,9 +143,9 @@ int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unus
* fds in the same CPU to be injected in the same mmap ring buffer * fds in the same CPU to be injected in the same mmap ring buffer
* (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)). * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)).
*/ */
err = perf_evlist__mmap(evlist, opts.mmap_pages); err = evlist__mmap(evlist, opts.mmap_pages);
if (err < 0) { if (err < 0) {
pr_debug("perf_evlist__mmap: %s\n", pr_debug("evlist__mmap: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf))); str_error_r(errno, sbuf, sizeof(sbuf)));
goto out_delete_evlist; goto out_delete_evlist;
} }
......
...@@ -82,7 +82,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id) ...@@ -82,7 +82,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
goto out_delete_evlist; goto out_delete_evlist;
} }
err = perf_evlist__mmap(evlist, 128); err = evlist__mmap(evlist, 128);
if (err < 0) { if (err < 0) {
pr_debug("failed to mmap event: %d (%s)\n", errno, pr_debug("failed to mmap event: %d (%s)\n", errno,
str_error_r(errno, sbuf, sizeof(sbuf))); str_error_r(errno, sbuf, sizeof(sbuf)));
......
...@@ -460,9 +460,9 @@ int test__switch_tracking(struct test *test __maybe_unused, int subtest __maybe_ ...@@ -460,9 +460,9 @@ int test__switch_tracking(struct test *test __maybe_unused, int subtest __maybe_
goto out; goto out;
} }
err = perf_evlist__mmap(evlist, UINT_MAX); err = evlist__mmap(evlist, UINT_MAX);
if (err) { if (err) {
pr_debug("perf_evlist__mmap failed!\n"); pr_debug("evlist__mmap failed!\n");
goto out_err; goto out_err;
} }
......
...@@ -106,7 +106,7 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused ...@@ -106,7 +106,7 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused
goto out_delete_evlist; goto out_delete_evlist;
} }
if (perf_evlist__mmap(evlist, 128) < 0) { if (evlist__mmap(evlist, 128) < 0) {
pr_debug("failed to mmap events: %d (%s)\n", errno, pr_debug("failed to mmap events: %d (%s)\n", errno,
str_error_r(errno, sbuf, sizeof(sbuf))); str_error_r(errno, sbuf, sizeof(sbuf)));
goto out_delete_evlist; goto out_delete_evlist;
......
...@@ -732,7 +732,7 @@ perf_evlist__should_poll(struct evlist *evlist __maybe_unused, ...@@ -732,7 +732,7 @@ perf_evlist__should_poll(struct evlist *evlist __maybe_unused,
return true; return true;
} }
static int perf_evlist__mmap_per_evsel(struct evlist *evlist, int idx, static int evlist__mmap_per_evsel(struct evlist *evlist, int idx,
struct mmap_params *mp, int cpu_idx, struct mmap_params *mp, int cpu_idx,
int thread, int *_output, int *_output_overwrite) int thread, int *_output, int *_output_overwrite)
{ {
...@@ -810,7 +810,7 @@ static int perf_evlist__mmap_per_evsel(struct evlist *evlist, int idx, ...@@ -810,7 +810,7 @@ static int perf_evlist__mmap_per_evsel(struct evlist *evlist, int idx,
return 0; return 0;
} }
static int perf_evlist__mmap_per_cpu(struct evlist *evlist, static int evlist__mmap_per_cpu(struct evlist *evlist,
struct mmap_params *mp) struct mmap_params *mp)
{ {
int cpu, thread; int cpu, thread;
...@@ -826,7 +826,7 @@ static int perf_evlist__mmap_per_cpu(struct evlist *evlist, ...@@ -826,7 +826,7 @@ static int perf_evlist__mmap_per_cpu(struct evlist *evlist,
true); true);
for (thread = 0; thread < nr_threads; thread++) { for (thread = 0; thread < nr_threads; thread++) {
if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu, if (evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
thread, &output, &output_overwrite)) thread, &output, &output_overwrite))
goto out_unmap; goto out_unmap;
} }
...@@ -839,7 +839,7 @@ static int perf_evlist__mmap_per_cpu(struct evlist *evlist, ...@@ -839,7 +839,7 @@ static int perf_evlist__mmap_per_cpu(struct evlist *evlist,
return -1; return -1;
} }
static int perf_evlist__mmap_per_thread(struct evlist *evlist, static int evlist__mmap_per_thread(struct evlist *evlist,
struct mmap_params *mp) struct mmap_params *mp)
{ {
int thread; int thread;
...@@ -853,7 +853,7 @@ static int perf_evlist__mmap_per_thread(struct evlist *evlist, ...@@ -853,7 +853,7 @@ static int perf_evlist__mmap_per_thread(struct evlist *evlist,
auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread, auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread,
false); false);
if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread, if (evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
&output, &output_overwrite)) &output, &output_overwrite))
goto out_unmap; goto out_unmap;
} }
...@@ -888,7 +888,7 @@ unsigned long perf_event_mlock_kb_in_pages(void) ...@@ -888,7 +888,7 @@ unsigned long perf_event_mlock_kb_in_pages(void)
return pages; return pages;
} }
size_t perf_evlist__mmap_size(unsigned long pages) size_t evlist__mmap_size(unsigned long pages)
{ {
if (pages == UINT_MAX) if (pages == UINT_MAX)
pages = perf_event_mlock_kb_in_pages(); pages = perf_event_mlock_kb_in_pages();
...@@ -971,7 +971,7 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str, ...@@ -971,7 +971,7 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
} }
/** /**
* perf_evlist__mmap_ex - Create mmaps to receive events. * evlist__mmap_ex - Create mmaps to receive events.
* @evlist: list of events * @evlist: list of events
* @pages: map length in pages * @pages: map length in pages
* @overwrite: overwrite older events? * @overwrite: overwrite older events?
...@@ -979,7 +979,7 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str, ...@@ -979,7 +979,7 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
* @auxtrace_overwrite - overwrite older auxtrace data? * @auxtrace_overwrite - overwrite older auxtrace data?
* *
* If @overwrite is %false the user needs to signal event consumption using * If @overwrite is %false the user needs to signal event consumption using
* perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this * perf_mmap__write_tail(). Using evlist__mmap_read() does this
* automatically. * automatically.
* *
* Similarly, if @auxtrace_overwrite is %false the user needs to signal data * Similarly, if @auxtrace_overwrite is %false the user needs to signal data
...@@ -987,7 +987,7 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str, ...@@ -987,7 +987,7 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
* *
* Return: %0 on success, negative error code otherwise. * Return: %0 on success, negative error code otherwise.
*/ */
int perf_evlist__mmap_ex(struct evlist *evlist, unsigned int pages, int evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
unsigned int auxtrace_pages, unsigned int auxtrace_pages,
bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush, bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush,
int comp_level) int comp_level)
...@@ -1011,7 +1011,7 @@ int perf_evlist__mmap_ex(struct evlist *evlist, unsigned int pages, ...@@ -1011,7 +1011,7 @@ int perf_evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0) if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
return -ENOMEM; return -ENOMEM;
evlist->mmap_len = perf_evlist__mmap_size(pages); evlist->mmap_len = evlist__mmap_size(pages);
pr_debug("mmap size %zuB\n", evlist->mmap_len); pr_debug("mmap size %zuB\n", evlist->mmap_len);
mp.mask = evlist->mmap_len - page_size - 1; mp.mask = evlist->mmap_len - page_size - 1;
...@@ -1026,14 +1026,14 @@ int perf_evlist__mmap_ex(struct evlist *evlist, unsigned int pages, ...@@ -1026,14 +1026,14 @@ int perf_evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
} }
if (perf_cpu_map__empty(cpus)) if (perf_cpu_map__empty(cpus))
return perf_evlist__mmap_per_thread(evlist, &mp); return evlist__mmap_per_thread(evlist, &mp);
return perf_evlist__mmap_per_cpu(evlist, &mp); return evlist__mmap_per_cpu(evlist, &mp);
} }
int perf_evlist__mmap(struct evlist *evlist, unsigned int pages) int evlist__mmap(struct evlist *evlist, unsigned int pages)
{ {
return perf_evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1, 0); return evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1, 0);
} }
int perf_evlist__create_maps(struct evlist *evlist, struct target *target) int perf_evlist__create_maps(struct evlist *evlist, struct target *target)
...@@ -1889,7 +1889,7 @@ int perf_evlist__start_sb_thread(struct evlist *evlist, ...@@ -1889,7 +1889,7 @@ int perf_evlist__start_sb_thread(struct evlist *evlist,
goto out_delete_evlist; goto out_delete_evlist;
} }
if (perf_evlist__mmap(evlist, UINT_MAX)) if (evlist__mmap(evlist, UINT_MAX))
goto out_delete_evlist; goto out_delete_evlist;
evlist__for_each_entry(evlist, counter) { evlist__for_each_entry(evlist, counter) {
......
...@@ -139,7 +139,7 @@ struct perf_sample_id *perf_evlist__id2sid(struct evlist *evlist, u64 id); ...@@ -139,7 +139,7 @@ struct perf_sample_id *perf_evlist__id2sid(struct evlist *evlist, u64 id);
void perf_evlist__toggle_bkw_mmap(struct evlist *evlist, enum bkw_mmap_state state); void perf_evlist__toggle_bkw_mmap(struct evlist *evlist, enum bkw_mmap_state state);
void perf_evlist__mmap_consume(struct evlist *evlist, int idx); void evlist__mmap_consume(struct evlist *evlist, int idx);
int evlist__open(struct evlist *evlist); int evlist__open(struct evlist *evlist);
void evlist__close(struct evlist *evlist); void evlist__close(struct evlist *evlist);
...@@ -170,14 +170,14 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, ...@@ -170,14 +170,14 @@ int perf_evlist__parse_mmap_pages(const struct option *opt,
unsigned long perf_event_mlock_kb_in_pages(void); unsigned long perf_event_mlock_kb_in_pages(void);
int perf_evlist__mmap_ex(struct evlist *evlist, unsigned int pages, int evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
unsigned int auxtrace_pages, unsigned int auxtrace_pages,
bool auxtrace_overwrite, int nr_cblocks, bool auxtrace_overwrite, int nr_cblocks,
int affinity, int flush, int comp_level); int affinity, int flush, int comp_level);
int perf_evlist__mmap(struct evlist *evlist, unsigned int pages); int evlist__mmap(struct evlist *evlist, unsigned int pages);
void perf_evlist__munmap(struct evlist *evlist); void perf_evlist__munmap(struct evlist *evlist);
size_t perf_evlist__mmap_size(unsigned long pages); size_t evlist__mmap_size(unsigned long pages);
void evlist__disable(struct evlist *evlist); void evlist__disable(struct evlist *evlist);
void evlist__enable(struct evlist *evlist); void evlist__enable(struct evlist *evlist);
......
...@@ -899,7 +899,7 @@ static PyObject *pyrf_evlist__mmap(struct pyrf_evlist *pevlist, ...@@ -899,7 +899,7 @@ static PyObject *pyrf_evlist__mmap(struct pyrf_evlist *pevlist,
&pages, &overwrite)) &pages, &overwrite))
return NULL; return NULL;
if (perf_evlist__mmap(evlist, pages) < 0) { if (evlist__mmap(evlist, pages) < 0) {
PyErr_SetFromErrno(PyExc_OSError); PyErr_SetFromErrno(PyExc_OSError);
return NULL; return NULL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment