Commit 3e5deb70 authored by Ian Rogers's avatar Ian Rogers Committed by Arnaldo Carvalho de Melo

perf cpumap: Clean up use of perf_cpu_map__has_any_cpu_or_is_empty

Most uses of what was perf_cpu_map__empty but is now
perf_cpu_map__has_any_cpu_or_is_empty want to do something with the
CPU map if it contains CPUs. Replace uses of
perf_cpu_map__has_any_cpu_or_is_empty with other helpers so that CPUs
within the map can be handled.
Reviewed-by: default avatarJames Clark <james.clark@arm.com>
Signed-off-by: default avatarIan Rogers <irogers@google.com>
Acked-by: default avatarNamhyung Kim <namhyung@kernel.org>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexandre Ghiti <alexghiti@rivosinc.com>
Cc: Andrew Jones <ajones@ventanamicro.com>
Cc: André Almeida <andrealmeid@igalia.com>
Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
Cc: Atish Patra <atishp@rivosinc.com>
Cc: Changbin Du <changbin.du@huawei.com>
Cc: Darren Hart <dvhart@infradead.org>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Garry <john.g.garry@oracle.com>
Cc: K Prateek Nayak <kprateek.nayak@amd.com>
Cc: Kajol Jain <kjain@linux.ibm.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mike Leach <mike.leach@linaro.org>
Cc: Nick Desaulniers <ndesaulniers@google.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Paran Lee <p4ranlee@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ravi Bangoria <ravi.bangoria@amd.com>
Cc: Sandipan Das <sandipan.das@amd.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Steinar H. Gunderson <sesse@google.com>
Cc: Suzuki Poulouse <suzuki.poulose@arm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will@kernel.org>
Cc: Yang Jihong <yangjihong1@huawei.com>
Cc: Yang Li <yang.lee@linux.alibaba.com>
Cc: Yanteng Si <siyanteng@loongson.cn>
Link: https://lore.kernel.org/r/20240202234057.2085863-6-irogers@google.comSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent 291dcd77
...@@ -2319,11 +2319,7 @@ static int setup_nodes(struct perf_session *session) ...@@ -2319,11 +2319,7 @@ static int setup_nodes(struct perf_session *session)
nodes[node] = set; nodes[node] = set;
/* empty node, skip */ perf_cpu_map__for_each_cpu_skip_any(cpu, idx, map) {
if (perf_cpu_map__has_any_cpu_or_is_empty(map))
continue;
perf_cpu_map__for_each_cpu(cpu, idx, map) {
__set_bit(cpu.cpu, set); __set_bit(cpu.cpu, set);
if (WARN_ONCE(cpu2node[cpu.cpu] != -1, "node/cpu topology bug")) if (WARN_ONCE(cpu2node[cpu.cpu] != -1, "node/cpu topology bug"))
......
...@@ -1319,10 +1319,9 @@ static int cpu__get_cache_id_from_map(struct perf_cpu cpu, char *map) ...@@ -1319,10 +1319,9 @@ static int cpu__get_cache_id_from_map(struct perf_cpu cpu, char *map)
* be the first online CPU in the cache domain else use the * be the first online CPU in the cache domain else use the
* first online CPU of the cache domain as the ID. * first online CPU of the cache domain as the ID.
*/ */
if (perf_cpu_map__has_any_cpu_or_is_empty(cpu_map)) id = perf_cpu_map__min(cpu_map).cpu;
if (id == -1)
id = cpu.cpu; id = cpu.cpu;
else
id = perf_cpu_map__cpu(cpu_map, 0).cpu;
/* Free the perf_cpu_map used to find the cache ID */ /* Free the perf_cpu_map used to find the cache ID */
perf_cpu_map__put(cpu_map); perf_cpu_map__put(cpu_map);
...@@ -1642,7 +1641,7 @@ static int perf_stat_init_aggr_mode(void) ...@@ -1642,7 +1641,7 @@ static int perf_stat_init_aggr_mode(void)
* taking the highest cpu number to be the size of * taking the highest cpu number to be the size of
* the aggregation translate cpumap. * the aggregation translate cpumap.
*/ */
if (!perf_cpu_map__has_any_cpu_or_is_empty(evsel_list->core.user_requested_cpus)) if (!perf_cpu_map__is_any_cpu_or_is_empty(evsel_list->core.user_requested_cpus))
nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu; nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu;
else else
nr = 0; nr = 0;
...@@ -2334,7 +2333,7 @@ int process_stat_config_event(struct perf_session *session, ...@@ -2334,7 +2333,7 @@ int process_stat_config_event(struct perf_session *session,
perf_event__read_stat_config(&stat_config, &event->stat_config); perf_event__read_stat_config(&stat_config, &event->stat_config);
if (perf_cpu_map__has_any_cpu_or_is_empty(st->cpus)) { if (perf_cpu_map__is_empty(st->cpus)) {
if (st->aggr_mode != AGGR_UNSET) if (st->aggr_mode != AGGR_UNSET)
pr_warning("warning: processing task data, aggregation mode not set\n"); pr_warning("warning: processing task data, aggregation mode not set\n");
} else if (st->aggr_mode != AGGR_UNSET) { } else if (st->aggr_mode != AGGR_UNSET) {
......
...@@ -174,7 +174,7 @@ void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp, ...@@ -174,7 +174,7 @@ void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
struct evlist *evlist, struct evlist *evlist,
struct evsel *evsel, int idx) struct evsel *evsel, int idx)
{ {
bool per_cpu = !perf_cpu_map__has_any_cpu_or_is_empty(evlist->core.user_requested_cpus); bool per_cpu = !perf_cpu_map__has_any_cpu(evlist->core.user_requested_cpus);
mp->mmap_needed = evsel->needs_auxtrace_mmap; mp->mmap_needed = evsel->needs_auxtrace_mmap;
...@@ -648,7 +648,7 @@ int auxtrace_parse_snapshot_options(struct auxtrace_record *itr, ...@@ -648,7 +648,7 @@ int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
static int evlist__enable_event_idx(struct evlist *evlist, struct evsel *evsel, int idx) static int evlist__enable_event_idx(struct evlist *evlist, struct evsel *evsel, int idx)
{ {
bool per_cpu_mmaps = !perf_cpu_map__has_any_cpu_or_is_empty(evlist->core.user_requested_cpus); bool per_cpu_mmaps = !perf_cpu_map__has_any_cpu(evlist->core.user_requested_cpus);
if (per_cpu_mmaps) { if (per_cpu_mmaps) {
struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->core.all_cpus, idx); struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->core.all_cpus, idx);
......
...@@ -237,7 +237,7 @@ bool evlist__can_select_event(struct evlist *evlist, const char *str) ...@@ -237,7 +237,7 @@ bool evlist__can_select_event(struct evlist *evlist, const char *str)
evsel = evlist__last(temp_evlist); evsel = evlist__last(temp_evlist);
if (!evlist || perf_cpu_map__has_any_cpu_or_is_empty(evlist->core.user_requested_cpus)) { if (!evlist || perf_cpu_map__is_any_cpu_or_is_empty(evlist->core.user_requested_cpus)) {
struct perf_cpu_map *cpus = perf_cpu_map__new_online_cpus(); struct perf_cpu_map *cpus = perf_cpu_map__new_online_cpus();
if (cpus) if (cpus)
......
...@@ -315,7 +315,7 @@ static int check_per_pkg(struct evsel *counter, struct perf_counts_values *vals, ...@@ -315,7 +315,7 @@ static int check_per_pkg(struct evsel *counter, struct perf_counts_values *vals,
if (!counter->per_pkg) if (!counter->per_pkg)
return 0; return 0;
if (perf_cpu_map__has_any_cpu_or_is_empty(cpus)) if (perf_cpu_map__is_any_cpu_or_is_empty(cpus))
return 0; return 0;
if (!mask) { if (!mask) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment