Commit 9d2ed645 authored by Alexey Budankov's avatar Alexey Budankov Committed by Arnaldo Carvalho de Melo

perf record: Allocate affinity masks

Allocate affinity option and masks for mmap data buffers and record
thread as well as initialize allocated objects.
Signed-off-by: default avatarAlexey Budankov <alexey.budankov@linux.intel.com>
Reviewed-by: default avatarJiri Olsa <jolsa@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/526fa2b0-07de-6dbd-a7e9-26ba875593c9@linux.intel.comSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent 159b0da5
...@@ -81,12 +81,17 @@ struct record { ...@@ -81,12 +81,17 @@ struct record {
bool timestamp_boundary; bool timestamp_boundary;
struct switch_output switch_output; struct switch_output switch_output;
unsigned long long samples; unsigned long long samples;
cpu_set_t affinity_mask;
}; };
static volatile int auxtrace_record__snapshot_started; static volatile int auxtrace_record__snapshot_started;
static DEFINE_TRIGGER(auxtrace_snapshot_trigger); static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
static DEFINE_TRIGGER(switch_output_trigger); static DEFINE_TRIGGER(switch_output_trigger);
static const char *affinity_tags[PERF_AFFINITY_MAX] = {
"SYS", "NODE", "CPU"
};
static bool switch_output_signal(struct record *rec) static bool switch_output_signal(struct record *rec)
{ {
return rec->switch_output.signal && return rec->switch_output.signal &&
...@@ -533,7 +538,8 @@ static int record__mmap_evlist(struct record *rec, ...@@ -533,7 +538,8 @@ static int record__mmap_evlist(struct record *rec,
if (perf_evlist__mmap_ex(evlist, opts->mmap_pages, if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
opts->auxtrace_mmap_pages, opts->auxtrace_mmap_pages,
opts->auxtrace_snapshot_mode, opts->nr_cblocks) < 0) { opts->auxtrace_snapshot_mode,
opts->nr_cblocks, opts->affinity) < 0) {
if (errno == EPERM) { if (errno == EPERM) {
pr_err("Permission error mapping pages.\n" pr_err("Permission error mapping pages.\n"
"Consider increasing " "Consider increasing "
...@@ -1977,6 +1983,9 @@ int cmd_record(int argc, const char **argv) ...@@ -1977,6 +1983,9 @@ int cmd_record(int argc, const char **argv)
# undef REASON # undef REASON
#endif #endif
CPU_ZERO(&rec->affinity_mask);
rec->opts.affinity = PERF_AFFINITY_SYS;
rec->evlist = perf_evlist__new(); rec->evlist = perf_evlist__new();
if (rec->evlist == NULL) if (rec->evlist == NULL)
return -ENOMEM; return -ENOMEM;
...@@ -2140,6 +2149,8 @@ int cmd_record(int argc, const char **argv) ...@@ -2140,6 +2149,8 @@ int cmd_record(int argc, const char **argv)
if (verbose > 0) if (verbose > 0)
pr_info("nr_cblocks: %d\n", rec->opts.nr_cblocks); pr_info("nr_cblocks: %d\n", rec->opts.nr_cblocks);
pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]);
err = __cmd_record(&record, argc, argv); err = __cmd_record(&record, argc, argv);
out: out:
perf_evlist__delete(rec->evlist); perf_evlist__delete(rec->evlist);
......
...@@ -84,6 +84,14 @@ struct record_opts { ...@@ -84,6 +84,14 @@ struct record_opts {
clockid_t clockid; clockid_t clockid;
u64 clockid_res_ns; u64 clockid_res_ns;
int nr_cblocks; int nr_cblocks;
int affinity;
};
enum perf_affinity {
PERF_AFFINITY_SYS = 0,
PERF_AFFINITY_NODE,
PERF_AFFINITY_CPU,
PERF_AFFINITY_MAX
}; };
struct option; struct option;
......
...@@ -1022,7 +1022,7 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str, ...@@ -1022,7 +1022,7 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
*/ */
int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages, int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
unsigned int auxtrace_pages, unsigned int auxtrace_pages,
bool auxtrace_overwrite, int nr_cblocks) bool auxtrace_overwrite, int nr_cblocks, int affinity)
{ {
struct perf_evsel *evsel; struct perf_evsel *evsel;
const struct cpu_map *cpus = evlist->cpus; const struct cpu_map *cpus = evlist->cpus;
...@@ -1032,7 +1032,7 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages, ...@@ -1032,7 +1032,7 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
* Its value is decided by evsel's write_backward. * Its value is decided by evsel's write_backward.
* So &mp should not be passed through const pointer. * So &mp should not be passed through const pointer.
*/ */
struct mmap_params mp = { .nr_cblocks = nr_cblocks }; struct mmap_params mp = { .nr_cblocks = nr_cblocks, .affinity = affinity };
if (!evlist->mmap) if (!evlist->mmap)
evlist->mmap = perf_evlist__alloc_mmap(evlist, false); evlist->mmap = perf_evlist__alloc_mmap(evlist, false);
...@@ -1064,7 +1064,7 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages, ...@@ -1064,7 +1064,7 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages) int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages)
{ {
return perf_evlist__mmap_ex(evlist, pages, 0, false, 0); return perf_evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS);
} }
int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target) int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
......
...@@ -165,7 +165,7 @@ unsigned long perf_event_mlock_kb_in_pages(void); ...@@ -165,7 +165,7 @@ unsigned long perf_event_mlock_kb_in_pages(void);
int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages, int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
unsigned int auxtrace_pages, unsigned int auxtrace_pages,
bool auxtrace_overwrite, int nr_cblocks); bool auxtrace_overwrite, int nr_cblocks, int affinity);
int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages); int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages);
void perf_evlist__munmap(struct perf_evlist *evlist); void perf_evlist__munmap(struct perf_evlist *evlist);
......
...@@ -343,6 +343,8 @@ int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int c ...@@ -343,6 +343,8 @@ int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int c
map->fd = fd; map->fd = fd;
map->cpu = cpu; map->cpu = cpu;
CPU_ZERO(&map->affinity_mask);
if (auxtrace_mmap__mmap(&map->auxtrace_mmap, if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
&mp->auxtrace_mp, map->base, fd)) &mp->auxtrace_mp, map->base, fd))
return -1; return -1;
......
...@@ -38,6 +38,7 @@ struct perf_mmap { ...@@ -38,6 +38,7 @@ struct perf_mmap {
int nr_cblocks; int nr_cblocks;
} aio; } aio;
#endif #endif
cpu_set_t affinity_mask;
}; };
/* /*
...@@ -69,7 +70,7 @@ enum bkw_mmap_state { ...@@ -69,7 +70,7 @@ enum bkw_mmap_state {
}; };
struct mmap_params { struct mmap_params {
int prot, mask, nr_cblocks; int prot, mask, nr_cblocks, affinity;
struct auxtrace_mmap_params auxtrace_mp; struct auxtrace_mmap_params auxtrace_mp;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment