Commit 3805e4f3 authored by Jiri Olsa's avatar Jiri Olsa Committed by Arnaldo Carvalho de Melo

libperf: Move mmap allocation to perf_evlist__mmap_ops::get

Move allocation of the mmap array into perf_evlist__mmap_ops::get, to
centralize the mmap allocation.

Also move nr_mmap setup to perf_evlist__mmap_ops so it's centralized and
shared by both perf and libperf mmap code.
Signed-off-by: default avatarJiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexey Budankov <alexey.budankov@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Jin Yao <yao.jin@linux.intel.com>
Cc: Michael Petlan <mpetlan@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lore.kernel.org/lkml/20191017105918.20873-3-jolsa@kernel.orgSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent 6eb65f7a
...@@ -338,10 +338,6 @@ static struct perf_mmap* perf_evlist__alloc_mmap(struct perf_evlist *evlist, boo ...@@ -338,10 +338,6 @@ static struct perf_mmap* perf_evlist__alloc_mmap(struct perf_evlist *evlist, boo
int i; int i;
struct perf_mmap *map; struct perf_mmap *map;
evlist->nr_mmaps = perf_cpu_map__nr(evlist->cpus);
if (perf_cpu_map__empty(evlist->cpus))
evlist->nr_mmaps = perf_thread_map__nr(evlist->threads);
map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
if (!map) if (!map)
return NULL; return NULL;
...@@ -384,18 +380,22 @@ static void perf_evlist__set_sid_idx(struct perf_evlist *evlist, ...@@ -384,18 +380,22 @@ static void perf_evlist__set_sid_idx(struct perf_evlist *evlist,
static struct perf_mmap* static struct perf_mmap*
perf_evlist__mmap_cb_get(struct perf_evlist *evlist, bool overwrite, int idx) perf_evlist__mmap_cb_get(struct perf_evlist *evlist, bool overwrite, int idx)
{ {
struct perf_mmap *map = &evlist->mmap[idx]; struct perf_mmap *maps;
if (overwrite) { maps = overwrite ? evlist->mmap_ovw : evlist->mmap;
if (!evlist->mmap_ovw) {
evlist->mmap_ovw = perf_evlist__alloc_mmap(evlist, true); if (!maps) {
if (!evlist->mmap_ovw) maps = perf_evlist__alloc_mmap(evlist, overwrite);
return NULL; if (!maps)
} return NULL;
map = &evlist->mmap_ovw[idx];
if (overwrite)
evlist->mmap_ovw = maps;
else
evlist->mmap = maps;
} }
return map; return &maps[idx];
} }
#define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y)) #define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
...@@ -556,6 +556,17 @@ mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops, ...@@ -556,6 +556,17 @@ mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
return -1; return -1;
} }
static int perf_evlist__nr_mmaps(struct perf_evlist *evlist)
{
int nr_mmaps;
nr_mmaps = perf_cpu_map__nr(evlist->cpus);
if (perf_cpu_map__empty(evlist->cpus))
nr_mmaps = perf_thread_map__nr(evlist->threads);
return nr_mmaps;
}
int perf_evlist__mmap_ops(struct perf_evlist *evlist, int perf_evlist__mmap_ops(struct perf_evlist *evlist,
struct perf_evlist_mmap_ops *ops, struct perf_evlist_mmap_ops *ops,
struct perf_mmap_param *mp) struct perf_mmap_param *mp)
...@@ -567,10 +578,7 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist, ...@@ -567,10 +578,7 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
if (!ops || !ops->get || !ops->mmap) if (!ops || !ops->get || !ops->mmap)
return -EINVAL; return -EINVAL;
if (!evlist->mmap) evlist->nr_mmaps = perf_evlist__nr_mmaps(evlist);
evlist->mmap = perf_evlist__alloc_mmap(evlist, false);
if (!evlist->mmap)
return -ENOMEM;
perf_evlist__for_each_entry(evlist, evsel) { perf_evlist__for_each_entry(evlist, evsel) {
if ((evsel->attr.read_format & PERF_FORMAT_ID) && if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
......
...@@ -599,9 +599,6 @@ static struct mmap *evlist__alloc_mmap(struct evlist *evlist, ...@@ -599,9 +599,6 @@ static struct mmap *evlist__alloc_mmap(struct evlist *evlist,
int i; int i;
struct mmap *map; struct mmap *map;
evlist->core.nr_mmaps = perf_cpu_map__nr(evlist->core.cpus);
if (perf_cpu_map__empty(evlist->core.cpus))
evlist->core.nr_mmaps = perf_thread_map__nr(evlist->core.threads);
map = zalloc(evlist->core.nr_mmaps * sizeof(struct mmap)); map = zalloc(evlist->core.nr_mmaps * sizeof(struct mmap));
if (!map) if (!map)
return NULL; return NULL;
...@@ -639,19 +636,21 @@ static struct perf_mmap* ...@@ -639,19 +636,21 @@ static struct perf_mmap*
perf_evlist__mmap_cb_get(struct perf_evlist *_evlist, bool overwrite, int idx) perf_evlist__mmap_cb_get(struct perf_evlist *_evlist, bool overwrite, int idx)
{ {
struct evlist *evlist = container_of(_evlist, struct evlist, core); struct evlist *evlist = container_of(_evlist, struct evlist, core);
struct mmap *maps = evlist->mmap; struct mmap *maps;
if (overwrite) { maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
maps = evlist->overwrite_mmap;
if (!maps) { if (!maps) {
maps = evlist__alloc_mmap(evlist, true); maps = evlist__alloc_mmap(evlist, overwrite);
if (!maps) if (!maps)
return NULL; return NULL;
if (overwrite) {
evlist->overwrite_mmap = maps; evlist->overwrite_mmap = maps;
if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY) if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY)
perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING); perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
} else {
evlist->mmap = maps;
} }
} }
...@@ -812,11 +811,6 @@ int evlist__mmap_ex(struct evlist *evlist, unsigned int pages, ...@@ -812,11 +811,6 @@ int evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
.mmap = perf_evlist__mmap_cb_mmap, .mmap = perf_evlist__mmap_cb_mmap,
}; };
if (!evlist->mmap)
evlist->mmap = evlist__alloc_mmap(evlist, false);
if (!evlist->mmap)
return -ENOMEM;
evlist->core.mmap_len = evlist__mmap_size(pages); evlist->core.mmap_len = evlist__mmap_size(pages);
pr_debug("mmap size %zuB\n", evlist->core.mmap_len); pr_debug("mmap size %zuB\n", evlist->core.mmap_len);
mp.core.mask = evlist->core.mmap_len - page_size - 1; mp.core.mask = evlist->core.mmap_len - page_size - 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment