Commit 0c2822b1 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 regression fix from Will Deacon:
 "Apologies for the _extremely_ late pull request here, but we had a
  'perf' (i.e. CPU PMU) regression on the Apple M1 reported on Wednesday
  [1] which was introduced by bd275681 ("perf: Rewrite core context
  handling") during the merge window.

  Mark and I looked into this and noticed an additional problem caused
  by the same patch, where the 'CHAIN' event (used to combine two
  adjacent 32-bit counters into a single 64-bit counter) was not being
  filtered correctly. Mark posted a series on Thursday [2] which
  addresses both of these regressions and I queued it the same day.

  The changes are small, self-contained and have been confirmed to fix
  the original regression.

  Summary:

   - Fix 'perf' regression for non-standard CPU PMU hardware (i.e. Apple
     M1)"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: perf: reject CHAIN events at creation time
  arm_pmu: fix event CPU filtering
parents 0e9fd589 853e2dac
...@@ -1023,12 +1023,6 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event, ...@@ -1023,12 +1023,6 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
return 0; return 0;
} }
static bool armv8pmu_filter(struct pmu *pmu, int cpu)
{
struct arm_pmu *armpmu = to_arm_pmu(pmu);
return !cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus);
}
static void armv8pmu_reset(void *info) static void armv8pmu_reset(void *info)
{ {
struct arm_pmu *cpu_pmu = (struct arm_pmu *)info; struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
...@@ -1069,6 +1063,14 @@ static int __armv8_pmuv3_map_event(struct perf_event *event, ...@@ -1069,6 +1063,14 @@ static int __armv8_pmuv3_map_event(struct perf_event *event,
&armv8_pmuv3_perf_cache_map, &armv8_pmuv3_perf_cache_map,
ARMV8_PMU_EVTYPE_EVENT); ARMV8_PMU_EVTYPE_EVENT);
/*
* CHAIN events only work when paired with an adjacent counter, and it
* never makes sense for a user to open one in isolation, as they'll be
* rotated arbitrarily.
*/
if (hw_event_id == ARMV8_PMUV3_PERFCTR_CHAIN)
return -EINVAL;
if (armv8pmu_event_is_64bit(event)) if (armv8pmu_event_is_64bit(event))
event->hw.flags |= ARMPMU_EVT_64BIT; event->hw.flags |= ARMPMU_EVT_64BIT;
...@@ -1258,7 +1260,6 @@ static int armv8_pmu_init(struct arm_pmu *cpu_pmu, char *name, ...@@ -1258,7 +1260,6 @@ static int armv8_pmu_init(struct arm_pmu *cpu_pmu, char *name,
cpu_pmu->stop = armv8pmu_stop; cpu_pmu->stop = armv8pmu_stop;
cpu_pmu->reset = armv8pmu_reset; cpu_pmu->reset = armv8pmu_reset;
cpu_pmu->set_event_filter = armv8pmu_set_event_filter; cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
cpu_pmu->filter = armv8pmu_filter;
cpu_pmu->pmu.event_idx = armv8pmu_user_event_idx; cpu_pmu->pmu.event_idx = armv8pmu_user_event_idx;
......
...@@ -550,13 +550,7 @@ static void armpmu_disable(struct pmu *pmu) ...@@ -550,13 +550,7 @@ static void armpmu_disable(struct pmu *pmu)
static bool armpmu_filter(struct pmu *pmu, int cpu) static bool armpmu_filter(struct pmu *pmu, int cpu)
{ {
struct arm_pmu *armpmu = to_arm_pmu(pmu); struct arm_pmu *armpmu = to_arm_pmu(pmu);
bool ret; return !cpumask_test_cpu(cpu, &armpmu->supported_cpus);
ret = cpumask_test_cpu(cpu, &armpmu->supported_cpus);
if (ret && armpmu->filter)
return armpmu->filter(pmu, cpu);
return ret;
} }
static ssize_t cpus_show(struct device *dev, static ssize_t cpus_show(struct device *dev,
......
...@@ -100,7 +100,6 @@ struct arm_pmu { ...@@ -100,7 +100,6 @@ struct arm_pmu {
void (*stop)(struct arm_pmu *); void (*stop)(struct arm_pmu *);
void (*reset)(void *); void (*reset)(void *);
int (*map_event)(struct perf_event *event); int (*map_event)(struct perf_event *event);
bool (*filter)(struct pmu *pmu, int cpu);
int num_events; int num_events;
bool secure_access; /* 32-bit ARM only */ bool secure_access; /* 32-bit ARM only */
#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40 #define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment