Commit 6c833bb9 authored by Will Deacon's avatar Will Deacon

arm64: perf: Allow standard PMUv3 events to be extended by the CPU type

Rather than continue adding CPU-specific event maps, instead look up by
default in the PMUv3 event map and only fallback to the CPU-specific maps
if either the event isn't described by PMUv3, or it is described but
the PMCEID registers say that it is unsupported by the current CPU.
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent c1be2ddb
...@@ -921,7 +921,13 @@ static void armv8pmu_reset(void *info) ...@@ -921,7 +921,13 @@ static void armv8pmu_reset(void *info)
ARMV8_PMU_PMCR_LC); ARMV8_PMU_PMCR_LC);
} }
static int armv8_pmuv3_map_event(struct perf_event *event) static int __armv8_pmuv3_map_event(struct perf_event *event,
const unsigned (*extra_event_map)
[PERF_COUNT_HW_MAX],
const unsigned (*extra_cache_map)
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX])
{ {
int hw_event_id; int hw_event_id;
struct arm_pmu *armpmu = to_arm_pmu(event->pmu); struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
...@@ -929,44 +935,44 @@ static int armv8_pmuv3_map_event(struct perf_event *event) ...@@ -929,44 +935,44 @@ static int armv8_pmuv3_map_event(struct perf_event *event)
hw_event_id = armpmu_map_event(event, &armv8_pmuv3_perf_map, hw_event_id = armpmu_map_event(event, &armv8_pmuv3_perf_map,
&armv8_pmuv3_perf_cache_map, &armv8_pmuv3_perf_cache_map,
ARMV8_PMU_EVTYPE_EVENT); ARMV8_PMU_EVTYPE_EVENT);
if (hw_event_id < 0)
return hw_event_id;
/* disable micro/arch events not supported by this PMU */ /* Onl expose micro/arch events supported by this PMU */
if ((hw_event_id < ARMV8_PMUV3_MAX_COMMON_EVENTS) && if ((hw_event_id > 0) && (hw_event_id < ARMV8_PMUV3_MAX_COMMON_EVENTS)
!test_bit(hw_event_id, armpmu->pmceid_bitmap)) { && test_bit(hw_event_id, armpmu->pmceid_bitmap)) {
return -EOPNOTSUPP; return hw_event_id;
} }
return hw_event_id; return armpmu_map_event(event, extra_event_map, extra_cache_map,
ARMV8_PMU_EVTYPE_EVENT);
}
static int armv8_pmuv3_map_event(struct perf_event *event)
{
return __armv8_pmuv3_map_event(event, NULL, NULL);
} }
static int armv8_a53_map_event(struct perf_event *event) static int armv8_a53_map_event(struct perf_event *event)
{ {
return armpmu_map_event(event, &armv8_a53_perf_map, return __armv8_pmuv3_map_event(event, &armv8_a53_perf_map,
&armv8_a53_perf_cache_map, &armv8_a53_perf_cache_map);
ARMV8_PMU_EVTYPE_EVENT);
} }
static int armv8_a57_map_event(struct perf_event *event) static int armv8_a57_map_event(struct perf_event *event)
{ {
return armpmu_map_event(event, &armv8_a57_perf_map, return __armv8_pmuv3_map_event(event, &armv8_a57_perf_map,
&armv8_a57_perf_cache_map, &armv8_a57_perf_cache_map);
ARMV8_PMU_EVTYPE_EVENT);
} }
static int armv8_thunder_map_event(struct perf_event *event) static int armv8_thunder_map_event(struct perf_event *event)
{ {
return armpmu_map_event(event, &armv8_thunder_perf_map, return __armv8_pmuv3_map_event(event, &armv8_thunder_perf_map,
&armv8_thunder_perf_cache_map, &armv8_thunder_perf_cache_map);
ARMV8_PMU_EVTYPE_EVENT);
} }
static int armv8_vulcan_map_event(struct perf_event *event) static int armv8_vulcan_map_event(struct perf_event *event)
{ {
return armpmu_map_event(event, &armv8_vulcan_perf_map, return __armv8_pmuv3_map_event(event, &armv8_vulcan_perf_map,
&armv8_vulcan_perf_cache_map, &armv8_vulcan_perf_cache_map);
ARMV8_PMU_EVTYPE_EVENT);
} }
struct armv8pmu_probe_info { struct armv8pmu_probe_info {
......
...@@ -47,6 +47,9 @@ armpmu_map_cache_event(const unsigned (*cache_map) ...@@ -47,6 +47,9 @@ armpmu_map_cache_event(const unsigned (*cache_map)
if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return -EINVAL; return -EINVAL;
if (!cache_map)
return -ENOENT;
ret = (int)(*cache_map)[cache_type][cache_op][cache_result]; ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
if (ret == CACHE_OP_UNSUPPORTED) if (ret == CACHE_OP_UNSUPPORTED)
...@@ -63,6 +66,9 @@ armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) ...@@ -63,6 +66,9 @@ armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
if (config >= PERF_COUNT_HW_MAX) if (config >= PERF_COUNT_HW_MAX)
return -EINVAL; return -EINVAL;
if (!event_map)
return -ENOENT;
mapping = (*event_map)[config]; mapping = (*event_map)[config];
return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment