Commit 315c0a1f authored by Jiri Olsa's avatar Jiri Olsa Committed by Arnaldo Carvalho de Melo

libperf: Move perf's cpu_map__empty() to perf_cpu_map__empty()

So it's part of the libperf library as one of basic functions operating
on the perf_cpu_map class.
Signed-off-by: default avatarJiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Michael Petlan <mpetlan@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20190822111141.25823-4-jolsa@kernel.orgSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent 6549cd8f
...@@ -396,7 +396,7 @@ static int cs_etm_recording_options(struct auxtrace_record *itr, ...@@ -396,7 +396,7 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
* AUX event. We also need the contextID in order to be notified * AUX event. We also need the contextID in order to be notified
* when a context switch happened. * when a context switch happened.
*/ */
if (!cpu_map__empty(cpus)) { if (!perf_cpu_map__empty(cpus)) {
perf_evsel__set_sample_bit(cs_etm_evsel, CPU); perf_evsel__set_sample_bit(cs_etm_evsel, CPU);
err = cs_etm_set_option(itr, cs_etm_evsel, err = cs_etm_set_option(itr, cs_etm_evsel,
...@@ -420,7 +420,7 @@ static int cs_etm_recording_options(struct auxtrace_record *itr, ...@@ -420,7 +420,7 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
tracking_evsel->core.attr.sample_period = 1; tracking_evsel->core.attr.sample_period = 1;
/* In per-cpu case, always need the time of mmap events etc */ /* In per-cpu case, always need the time of mmap events etc */
if (!cpu_map__empty(cpus)) if (!perf_cpu_map__empty(cpus))
perf_evsel__set_sample_bit(tracking_evsel, TIME); perf_evsel__set_sample_bit(tracking_evsel, TIME);
} }
...@@ -493,7 +493,7 @@ cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused, ...@@ -493,7 +493,7 @@ cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused,
struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL); struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
/* cpu map is not empty, we have specific CPUs to work with */ /* cpu map is not empty, we have specific CPUs to work with */
if (!cpu_map__empty(event_cpus)) { if (!perf_cpu_map__empty(event_cpus)) {
for (i = 0; i < cpu__max_cpu(); i++) { for (i = 0; i < cpu__max_cpu(); i++) {
if (!cpu_map__has(event_cpus, i) || if (!cpu_map__has(event_cpus, i) ||
!cpu_map__has(online_cpus, i)) !cpu_map__has(online_cpus, i))
...@@ -649,7 +649,7 @@ static int cs_etm_info_fill(struct auxtrace_record *itr, ...@@ -649,7 +649,7 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
return -EINVAL; return -EINVAL;
/* If the cpu_map is empty all online CPUs are involved */ /* If the cpu_map is empty all online CPUs are involved */
if (cpu_map__empty(event_cpus)) { if (perf_cpu_map__empty(event_cpus)) {
cpu_map = online_cpus; cpu_map = online_cpus;
} else { } else {
/* Make sure all specified CPUs are online */ /* Make sure all specified CPUs are online */
......
...@@ -133,7 +133,7 @@ static int intel_bts_recording_options(struct auxtrace_record *itr, ...@@ -133,7 +133,7 @@ static int intel_bts_recording_options(struct auxtrace_record *itr,
if (!opts->full_auxtrace) if (!opts->full_auxtrace)
return 0; return 0;
if (opts->full_auxtrace && !cpu_map__empty(cpus)) { if (opts->full_auxtrace && !perf_cpu_map__empty(cpus)) {
pr_err(INTEL_BTS_PMU_NAME " does not support per-cpu recording\n"); pr_err(INTEL_BTS_PMU_NAME " does not support per-cpu recording\n");
return -EINVAL; return -EINVAL;
} }
...@@ -214,7 +214,7 @@ static int intel_bts_recording_options(struct auxtrace_record *itr, ...@@ -214,7 +214,7 @@ static int intel_bts_recording_options(struct auxtrace_record *itr,
* In the case of per-cpu mmaps, we need the CPU on the * In the case of per-cpu mmaps, we need the CPU on the
* AUX event. * AUX event.
*/ */
if (!cpu_map__empty(cpus)) if (!perf_cpu_map__empty(cpus))
perf_evsel__set_sample_bit(intel_bts_evsel, CPU); perf_evsel__set_sample_bit(intel_bts_evsel, CPU);
} }
......
...@@ -365,7 +365,7 @@ static int intel_pt_info_fill(struct auxtrace_record *itr, ...@@ -365,7 +365,7 @@ static int intel_pt_info_fill(struct auxtrace_record *itr,
ui__warning("Intel Processor Trace: TSC not available\n"); ui__warning("Intel Processor Trace: TSC not available\n");
} }
per_cpu_mmaps = !cpu_map__empty(session->evlist->core.cpus); per_cpu_mmaps = !perf_cpu_map__empty(session->evlist->core.cpus);
auxtrace_info->type = PERF_AUXTRACE_INTEL_PT; auxtrace_info->type = PERF_AUXTRACE_INTEL_PT;
auxtrace_info->priv[INTEL_PT_PMU_TYPE] = intel_pt_pmu->type; auxtrace_info->priv[INTEL_PT_PMU_TYPE] = intel_pt_pmu->type;
...@@ -702,7 +702,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, ...@@ -702,7 +702,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
* Per-cpu recording needs sched_switch events to distinguish different * Per-cpu recording needs sched_switch events to distinguish different
* threads. * threads.
*/ */
if (have_timing_info && !cpu_map__empty(cpus)) { if (have_timing_info && !perf_cpu_map__empty(cpus)) {
if (perf_can_record_switch_events()) { if (perf_can_record_switch_events()) {
bool cpu_wide = !target__none(&opts->target) && bool cpu_wide = !target__none(&opts->target) &&
!target__has_task(&opts->target); !target__has_task(&opts->target);
...@@ -760,7 +760,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, ...@@ -760,7 +760,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
* In the case of per-cpu mmaps, we need the CPU on the * In the case of per-cpu mmaps, we need the CPU on the
* AUX event. * AUX event.
*/ */
if (!cpu_map__empty(cpus)) if (!perf_cpu_map__empty(cpus))
perf_evsel__set_sample_bit(intel_pt_evsel, CPU); perf_evsel__set_sample_bit(intel_pt_evsel, CPU);
} }
...@@ -784,7 +784,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, ...@@ -784,7 +784,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
tracking_evsel->immediate = true; tracking_evsel->immediate = true;
/* In per-cpu case, always need the time of mmap events etc */ /* In per-cpu case, always need the time of mmap events etc */
if (!cpu_map__empty(cpus)) { if (!perf_cpu_map__empty(cpus)) {
perf_evsel__set_sample_bit(tracking_evsel, TIME); perf_evsel__set_sample_bit(tracking_evsel, TIME);
/* And the CPU for switch events */ /* And the CPU for switch events */
perf_evsel__set_sample_bit(tracking_evsel, CPU); perf_evsel__set_sample_bit(tracking_evsel, CPU);
...@@ -796,7 +796,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, ...@@ -796,7 +796,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
* Warn the user when we do not have enough information to decode i.e. * Warn the user when we do not have enough information to decode i.e.
* per-cpu with no sched_switch (except workload-only). * per-cpu with no sched_switch (except workload-only).
*/ */
if (!ptr->have_sched_switch && !cpu_map__empty(cpus) && if (!ptr->have_sched_switch && !perf_cpu_map__empty(cpus) &&
!target__none(&opts->target)) !target__none(&opts->target))
ui__warning("Intel Processor Trace decoding will not be possible except for kernel tracing!\n"); ui__warning("Intel Processor Trace decoding will not be possible except for kernel tracing!\n");
......
...@@ -2059,7 +2059,7 @@ static int setup_nodes(struct perf_session *session) ...@@ -2059,7 +2059,7 @@ static int setup_nodes(struct perf_session *session)
nodes[node] = set; nodes[node] = set;
/* empty node, skip */ /* empty node, skip */
if (cpu_map__empty(map)) if (perf_cpu_map__empty(map))
continue; continue;
for (cpu = 0; cpu < map->nr; cpu++) { for (cpu = 0; cpu < map->nr; cpu++) {
......
...@@ -928,7 +928,7 @@ static int perf_stat_init_aggr_mode(void) ...@@ -928,7 +928,7 @@ static int perf_stat_init_aggr_mode(void)
* the aggregation translate cpumap. * the aggregation translate cpumap.
*/ */
nr = cpu_map__get_max(evsel_list->core.cpus); nr = cpu_map__get_max(evsel_list->core.cpus);
stat_config.cpus_aggr_map = cpu_map__empty_new(nr + 1); stat_config.cpus_aggr_map = perf_cpu_map__empty_new(nr + 1);
return stat_config.cpus_aggr_map ? 0 : -ENOMEM; return stat_config.cpus_aggr_map ? 0 : -ENOMEM;
} }
...@@ -1493,7 +1493,7 @@ int process_stat_config_event(struct perf_session *session, ...@@ -1493,7 +1493,7 @@ int process_stat_config_event(struct perf_session *session,
perf_event__read_stat_config(&stat_config, &event->stat_config); perf_event__read_stat_config(&stat_config, &event->stat_config);
if (cpu_map__empty(st->cpus)) { if (perf_cpu_map__empty(st->cpus)) {
if (st->aggr_mode != AGGR_UNSET) if (st->aggr_mode != AGGR_UNSET)
pr_warning("warning: processing task data, aggregation mode not set\n"); pr_warning("warning: processing task data, aggregation mode not set\n");
return 0; return 0;
......
...@@ -237,3 +237,8 @@ int perf_cpu_map__nr(const struct perf_cpu_map *cpus) ...@@ -237,3 +237,8 @@ int perf_cpu_map__nr(const struct perf_cpu_map *cpus)
{ {
return cpus ? cpus->nr : 1; return cpus ? cpus->nr : 1;
} }
bool perf_cpu_map__empty(const struct perf_cpu_map *map)
{
return map ? map->map[0] == -1 : true;
}
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include <perf/core.h> #include <perf/core.h>
#include <stdio.h> #include <stdio.h>
#include <stdbool.h>
struct perf_cpu_map; struct perf_cpu_map;
...@@ -14,6 +15,7 @@ LIBPERF_API struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map); ...@@ -14,6 +15,7 @@ LIBPERF_API struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map);
LIBPERF_API void perf_cpu_map__put(struct perf_cpu_map *map); LIBPERF_API void perf_cpu_map__put(struct perf_cpu_map *map);
LIBPERF_API int perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx); LIBPERF_API int perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx);
LIBPERF_API int perf_cpu_map__nr(const struct perf_cpu_map *cpus); LIBPERF_API int perf_cpu_map__nr(const struct perf_cpu_map *cpus);
LIBPERF_API bool perf_cpu_map__empty(const struct perf_cpu_map *map);
#define perf_cpu_map__for_each_cpu(cpu, idx, cpus) \ #define perf_cpu_map__for_each_cpu(cpu, idx, cpus) \
for ((idx) = 0, (cpu) = perf_cpu_map__cpu(cpus, idx); \ for ((idx) = 0, (cpu) = perf_cpu_map__cpu(cpus, idx); \
......
...@@ -8,6 +8,7 @@ LIBPERF_0.0.1 { ...@@ -8,6 +8,7 @@ LIBPERF_0.0.1 {
perf_cpu_map__read; perf_cpu_map__read;
perf_cpu_map__nr; perf_cpu_map__nr;
perf_cpu_map__cpu; perf_cpu_map__cpu;
perf_cpu_map__empty;
perf_thread_map__new_dummy; perf_thread_map__new_dummy;
perf_thread_map__set_pid; perf_thread_map__set_pid;
perf_thread_map__comm; perf_thread_map__comm;
......
...@@ -21,7 +21,7 @@ static struct perf_cpu_map *cpu_map__from_entries(struct cpu_map_entries *cpus) ...@@ -21,7 +21,7 @@ static struct perf_cpu_map *cpu_map__from_entries(struct cpu_map_entries *cpus)
{ {
struct perf_cpu_map *map; struct perf_cpu_map *map;
map = cpu_map__empty_new(cpus->nr); map = perf_cpu_map__empty_new(cpus->nr);
if (map) { if (map) {
unsigned i; unsigned i;
...@@ -48,7 +48,7 @@ static struct perf_cpu_map *cpu_map__from_mask(struct cpu_map_mask *mask) ...@@ -48,7 +48,7 @@ static struct perf_cpu_map *cpu_map__from_mask(struct cpu_map_mask *mask)
nr = bitmap_weight(mask->mask, nbits); nr = bitmap_weight(mask->mask, nbits);
map = cpu_map__empty_new(nr); map = perf_cpu_map__empty_new(nr);
if (map) { if (map) {
int cpu, i = 0; int cpu, i = 0;
...@@ -77,7 +77,7 @@ size_t cpu_map__fprintf(struct perf_cpu_map *map, FILE *fp) ...@@ -77,7 +77,7 @@ size_t cpu_map__fprintf(struct perf_cpu_map *map, FILE *fp)
#undef BUFSIZE #undef BUFSIZE
} }
struct perf_cpu_map *cpu_map__empty_new(int nr) struct perf_cpu_map *perf_cpu_map__empty_new(int nr)
{ {
struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int) * nr); struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int) * nr);
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
#include "perf.h" #include "perf.h"
#include "util/debug.h" #include "util/debug.h"
struct perf_cpu_map *cpu_map__empty_new(int nr); struct perf_cpu_map *perf_cpu_map__empty_new(int nr);
struct perf_cpu_map *cpu_map__new_data(struct cpu_map_data *data); struct perf_cpu_map *cpu_map__new_data(struct cpu_map_data *data);
size_t cpu_map__snprint(struct perf_cpu_map *map, char *buf, size_t size); size_t cpu_map__snprint(struct perf_cpu_map *map, char *buf, size_t size);
size_t cpu_map__snprint_mask(struct perf_cpu_map *map, char *buf, size_t size); size_t cpu_map__snprint_mask(struct perf_cpu_map *map, char *buf, size_t size);
...@@ -49,11 +49,6 @@ static inline int cpu_map__id_to_cpu(int id) ...@@ -49,11 +49,6 @@ static inline int cpu_map__id_to_cpu(int id)
return id & 0xffff; return id & 0xffff;
} }
static inline bool cpu_map__empty(const struct perf_cpu_map *map)
{
return map ? map->map[0] == -1 : true;
}
int cpu__setup_cpunode_map(void); int cpu__setup_cpunode_map(void);
int cpu__max_node(void); int cpu__max_node(void);
......
...@@ -1055,7 +1055,7 @@ static size_t mask_size(struct perf_cpu_map *map, int *max) ...@@ -1055,7 +1055,7 @@ static size_t mask_size(struct perf_cpu_map *map, int *max)
void *cpu_map_data__alloc(struct perf_cpu_map *map, size_t *size, u16 *type, int *max) void *cpu_map_data__alloc(struct perf_cpu_map *map, size_t *size, u16 *type, int *max)
{ {
size_t size_cpus, size_mask; size_t size_cpus, size_mask;
bool is_dummy = cpu_map__empty(map); bool is_dummy = perf_cpu_map__empty(map);
/* /*
* Both array and mask data have variable size based * Both array and mask data have variable size based
......
...@@ -386,7 +386,7 @@ static int perf_evlist__enable_event_thread(struct evlist *evlist, ...@@ -386,7 +386,7 @@ static int perf_evlist__enable_event_thread(struct evlist *evlist,
int perf_evlist__enable_event_idx(struct evlist *evlist, int perf_evlist__enable_event_idx(struct evlist *evlist,
struct evsel *evsel, int idx) struct evsel *evsel, int idx)
{ {
bool per_cpu_mmaps = !cpu_map__empty(evlist->core.cpus); bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.cpus);
if (per_cpu_mmaps) if (per_cpu_mmaps)
return perf_evlist__enable_event_cpu(evlist, evsel, idx); return perf_evlist__enable_event_cpu(evlist, evsel, idx);
...@@ -693,7 +693,7 @@ static struct perf_mmap *perf_evlist__alloc_mmap(struct evlist *evlist, ...@@ -693,7 +693,7 @@ static struct perf_mmap *perf_evlist__alloc_mmap(struct evlist *evlist,
struct perf_mmap *map; struct perf_mmap *map;
evlist->nr_mmaps = perf_cpu_map__nr(evlist->core.cpus); evlist->nr_mmaps = perf_cpu_map__nr(evlist->core.cpus);
if (cpu_map__empty(evlist->core.cpus)) if (perf_cpu_map__empty(evlist->core.cpus))
evlist->nr_mmaps = thread_map__nr(evlist->core.threads); evlist->nr_mmaps = thread_map__nr(evlist->core.threads);
map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
if (!map) if (!map)
...@@ -1018,7 +1018,7 @@ int perf_evlist__mmap_ex(struct evlist *evlist, unsigned int pages, ...@@ -1018,7 +1018,7 @@ int perf_evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
return -ENOMEM; return -ENOMEM;
} }
if (cpu_map__empty(cpus)) if (perf_cpu_map__empty(cpus))
return perf_evlist__mmap_per_thread(evlist, &mp); return perf_evlist__mmap_per_thread(evlist, &mp);
return perf_evlist__mmap_per_cpu(evlist, &mp); return perf_evlist__mmap_per_cpu(evlist, &mp);
......
...@@ -275,7 +275,7 @@ bool perf_evlist__can_select_event(struct evlist *evlist, const char *str) ...@@ -275,7 +275,7 @@ bool perf_evlist__can_select_event(struct evlist *evlist, const char *str)
evsel = perf_evlist__last(temp_evlist); evsel = perf_evlist__last(temp_evlist);
if (!evlist || cpu_map__empty(evlist->core.cpus)) { if (!evlist || perf_cpu_map__empty(evlist->core.cpus)) {
struct perf_cpu_map *cpus = perf_cpu_map__new(NULL); struct perf_cpu_map *cpus = perf_cpu_map__new(NULL);
cpu = cpus ? cpus->map[0] : 0; cpu = cpus ? cpus->map[0] : 0;
......
...@@ -223,7 +223,7 @@ static int check_per_pkg(struct evsel *counter, ...@@ -223,7 +223,7 @@ static int check_per_pkg(struct evsel *counter,
if (!counter->per_pkg) if (!counter->per_pkg)
return 0; return 0;
if (cpu_map__empty(cpus)) if (perf_cpu_map__empty(cpus))
return 0; return 0;
if (!mask) { if (!mask) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment