Commit 796bfadd authored by Mathieu Poirier's avatar Mathieu Poirier Committed by Arnaldo Carvalho de Melo

perf cs-etm: Properly deal with cpu maps

This patch allows the CoreSight AUX info section to fit topologies where
only a subset of all available CPUs are present, avoiding at the same
time accessing the ETM configuration areas of CPUs that have been
offlined.
Signed-off-by: default avatarMathieu Poirier <mathieu.poirier@linaro.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: linux-arm-kernel@lists.infradead.org
Link: http://lkml.kernel.org/r/1518478737-24649-1-git-send-email-mathieu.poirier@linaro.orgSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent d2785de1
...@@ -298,12 +298,17 @@ cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused, ...@@ -298,12 +298,17 @@ cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused,
{ {
int i; int i;
int etmv3 = 0, etmv4 = 0; int etmv3 = 0, etmv4 = 0;
const struct cpu_map *cpus = evlist->cpus; struct cpu_map *event_cpus = evlist->cpus;
struct cpu_map *online_cpus = cpu_map__new(NULL);
/* cpu map is not empty, we have specific CPUs to work with */ /* cpu map is not empty, we have specific CPUs to work with */
if (!cpu_map__empty(cpus)) { if (!cpu_map__empty(event_cpus)) {
for (i = 0; i < cpu_map__nr(cpus); i++) { for (i = 0; i < cpu__max_cpu(); i++) {
if (cs_etm_is_etmv4(itr, cpus->map[i])) if (!cpu_map__has(event_cpus, i) ||
!cpu_map__has(online_cpus, i))
continue;
if (cs_etm_is_etmv4(itr, i))
etmv4++; etmv4++;
else else
etmv3++; etmv3++;
...@@ -311,6 +316,9 @@ cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused, ...@@ -311,6 +316,9 @@ cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused,
} else { } else {
/* get configuration for all CPUs in the system */ /* get configuration for all CPUs in the system */
for (i = 0; i < cpu__max_cpu(); i++) { for (i = 0; i < cpu__max_cpu(); i++) {
if (!cpu_map__has(online_cpus, i))
continue;
if (cs_etm_is_etmv4(itr, i)) if (cs_etm_is_etmv4(itr, i))
etmv4++; etmv4++;
else else
...@@ -318,6 +326,8 @@ cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused, ...@@ -318,6 +326,8 @@ cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused,
} }
} }
cpu_map__put(online_cpus);
return (CS_ETM_HEADER_SIZE + return (CS_ETM_HEADER_SIZE +
(etmv4 * CS_ETMV4_PRIV_SIZE) + (etmv4 * CS_ETMV4_PRIV_SIZE) +
(etmv3 * CS_ETMV3_PRIV_SIZE)); (etmv3 * CS_ETMV3_PRIV_SIZE));
...@@ -447,7 +457,9 @@ static int cs_etm_info_fill(struct auxtrace_record *itr, ...@@ -447,7 +457,9 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
int i; int i;
u32 offset; u32 offset;
u64 nr_cpu, type; u64 nr_cpu, type;
const struct cpu_map *cpus = session->evlist->cpus; struct cpu_map *cpu_map;
struct cpu_map *event_cpus = session->evlist->cpus;
struct cpu_map *online_cpus = cpu_map__new(NULL);
struct cs_etm_recording *ptr = struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr); container_of(itr, struct cs_etm_recording, itr);
struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
...@@ -458,8 +470,21 @@ static int cs_etm_info_fill(struct auxtrace_record *itr, ...@@ -458,8 +470,21 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
if (!session->evlist->nr_mmaps) if (!session->evlist->nr_mmaps)
return -EINVAL; return -EINVAL;
/* If the cpu_map is empty all CPUs are involved */ /* If the cpu_map is empty all online CPUs are involved */
nr_cpu = cpu_map__empty(cpus) ? cpu__max_cpu() : cpu_map__nr(cpus); if (cpu_map__empty(event_cpus)) {
cpu_map = online_cpus;
} else {
/* Make sure all specified CPUs are online */
for (i = 0; i < cpu_map__nr(event_cpus); i++) {
if (cpu_map__has(event_cpus, i) &&
!cpu_map__has(online_cpus, i))
return -EINVAL;
}
cpu_map = event_cpus;
}
nr_cpu = cpu_map__nr(cpu_map);
/* Get PMU type as dynamically assigned by the core */ /* Get PMU type as dynamically assigned by the core */
type = cs_etm_pmu->type; type = cs_etm_pmu->type;
...@@ -472,15 +497,11 @@ static int cs_etm_info_fill(struct auxtrace_record *itr, ...@@ -472,15 +497,11 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
offset = CS_ETM_SNAPSHOT + 1; offset = CS_ETM_SNAPSHOT + 1;
/* cpu map is not empty, we have specific CPUs to work with */ for (i = 0; i < cpu__max_cpu() && offset < priv_size; i++)
if (!cpu_map__empty(cpus)) { if (cpu_map__has(cpu_map, i))
for (i = 0; i < cpu_map__nr(cpus) && offset < priv_size; i++)
cs_etm_get_metadata(cpus->map[i], &offset, itr, info);
} else {
/* get configuration for all CPUs in the system */
for (i = 0; i < cpu__max_cpu(); i++)
cs_etm_get_metadata(i, &offset, itr, info); cs_etm_get_metadata(i, &offset, itr, info);
}
cpu_map__put(online_cpus);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment