Commit 61e76d53 authored by Kan Liang's avatar Kan Liang Committed by Peter Zijlstra

perf/x86: Track pmu in per-CPU cpu_hw_events

Some platforms, e.g. Alder Lake, have hybrid architecture. In the same
package, there may be more than one type of CPU. The PMU capabilities
are different among different types of CPU. Perf will register a
dedicated PMU for each type of CPU.

Add a 'pmu' variable in the struct cpu_hw_events to track the dedicated
PMU of the current CPU.

Current x86_get_pmu() use the global 'pmu', which will be broken on a
hybrid platform. Modify it to apply the 'pmu' of the specific CPU.

Initialize the per-CPU 'pmu' variable with the global 'pmu'. There is
nothing changed for the non-hybrid platforms.

The is_x86_event() will be updated in the later patch ("perf/x86:
Register hybrid PMUs") for hybrid platforms. For the non-hybrid
platforms, nothing is changed here.
Suggested-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarKan Liang <kan.liang@linux.intel.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/1618237865-33448-4-git-send-email-kan.liang@linux.intel.com
parent 250b3c0d
...@@ -45,9 +45,11 @@ ...@@ -45,9 +45,11 @@
#include "perf_event.h" #include "perf_event.h"
struct x86_pmu x86_pmu __read_mostly; struct x86_pmu x86_pmu __read_mostly;
static struct pmu pmu;
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
.enabled = 1, .enabled = 1,
.pmu = &pmu,
}; };
DEFINE_STATIC_KEY_FALSE(rdpmc_never_available_key); DEFINE_STATIC_KEY_FALSE(rdpmc_never_available_key);
...@@ -724,16 +726,23 @@ void x86_pmu_enable_all(int added) ...@@ -724,16 +726,23 @@ void x86_pmu_enable_all(int added)
} }
} }
static struct pmu pmu;
static inline int is_x86_event(struct perf_event *event) static inline int is_x86_event(struct perf_event *event)
{ {
return event->pmu == &pmu; return event->pmu == &pmu;
} }
struct pmu *x86_get_pmu(void) struct pmu *x86_get_pmu(unsigned int cpu)
{ {
return &pmu; struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
/*
* All CPUs of the hybrid type have been offline.
* The x86_get_pmu() should not be invoked.
*/
if (WARN_ON_ONCE(!cpuc->pmu))
return &pmu;
return cpuc->pmu;
} }
/* /*
* Event scheduler state: * Event scheduler state:
......
...@@ -4876,7 +4876,7 @@ static void update_tfa_sched(void *ignored) ...@@ -4876,7 +4876,7 @@ static void update_tfa_sched(void *ignored)
* and if so force schedule out for all event types all contexts * and if so force schedule out for all event types all contexts
*/ */
if (test_bit(3, cpuc->active_mask)) if (test_bit(3, cpuc->active_mask))
perf_pmu_resched(x86_get_pmu()); perf_pmu_resched(x86_get_pmu(smp_processor_id()));
} }
static ssize_t show_sysctl_tfa(struct device *cdev, static ssize_t show_sysctl_tfa(struct device *cdev,
......
...@@ -2192,7 +2192,7 @@ void __init intel_ds_init(void) ...@@ -2192,7 +2192,7 @@ void __init intel_ds_init(void)
PERF_SAMPLE_TIME; PERF_SAMPLE_TIME;
x86_pmu.flags |= PMU_FL_PEBS_ALL; x86_pmu.flags |= PMU_FL_PEBS_ALL;
pebs_qual = "-baseline"; pebs_qual = "-baseline";
x86_get_pmu()->capabilities |= PERF_PMU_CAP_EXTENDED_REGS; x86_get_pmu(smp_processor_id())->capabilities |= PERF_PMU_CAP_EXTENDED_REGS;
} else { } else {
/* Only basic record supported */ /* Only basic record supported */
x86_pmu.large_pebs_flags &= x86_pmu.large_pebs_flags &=
...@@ -2207,7 +2207,7 @@ void __init intel_ds_init(void) ...@@ -2207,7 +2207,7 @@ void __init intel_ds_init(void)
if (x86_pmu.intel_cap.pebs_output_pt_available) { if (x86_pmu.intel_cap.pebs_output_pt_available) {
pr_cont("PEBS-via-PT, "); pr_cont("PEBS-via-PT, ");
x86_get_pmu()->capabilities |= PERF_PMU_CAP_AUX_OUTPUT; x86_get_pmu(smp_processor_id())->capabilities |= PERF_PMU_CAP_AUX_OUTPUT;
} }
break; break;
......
...@@ -705,7 +705,7 @@ void intel_pmu_lbr_add(struct perf_event *event) ...@@ -705,7 +705,7 @@ void intel_pmu_lbr_add(struct perf_event *event)
void release_lbr_buffers(void) void release_lbr_buffers(void)
{ {
struct kmem_cache *kmem_cache = x86_get_pmu()->task_ctx_cache; struct kmem_cache *kmem_cache;
struct cpu_hw_events *cpuc; struct cpu_hw_events *cpuc;
int cpu; int cpu;
...@@ -714,6 +714,7 @@ void release_lbr_buffers(void) ...@@ -714,6 +714,7 @@ void release_lbr_buffers(void)
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
cpuc = per_cpu_ptr(&cpu_hw_events, cpu); cpuc = per_cpu_ptr(&cpu_hw_events, cpu);
kmem_cache = x86_get_pmu(cpu)->task_ctx_cache;
if (kmem_cache && cpuc->lbr_xsave) { if (kmem_cache && cpuc->lbr_xsave) {
kmem_cache_free(kmem_cache, cpuc->lbr_xsave); kmem_cache_free(kmem_cache, cpuc->lbr_xsave);
cpuc->lbr_xsave = NULL; cpuc->lbr_xsave = NULL;
...@@ -1609,7 +1610,7 @@ void intel_pmu_lbr_init_hsw(void) ...@@ -1609,7 +1610,7 @@ void intel_pmu_lbr_init_hsw(void)
x86_pmu.lbr_sel_mask = LBR_SEL_MASK; x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
x86_pmu.lbr_sel_map = hsw_lbr_sel_map; x86_pmu.lbr_sel_map = hsw_lbr_sel_map;
x86_get_pmu()->task_ctx_cache = create_lbr_kmem_cache(size, 0); x86_get_pmu(smp_processor_id())->task_ctx_cache = create_lbr_kmem_cache(size, 0);
if (lbr_from_signext_quirk_needed()) if (lbr_from_signext_quirk_needed())
static_branch_enable(&lbr_from_quirk_key); static_branch_enable(&lbr_from_quirk_key);
...@@ -1629,7 +1630,7 @@ __init void intel_pmu_lbr_init_skl(void) ...@@ -1629,7 +1630,7 @@ __init void intel_pmu_lbr_init_skl(void)
x86_pmu.lbr_sel_mask = LBR_SEL_MASK; x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
x86_pmu.lbr_sel_map = hsw_lbr_sel_map; x86_pmu.lbr_sel_map = hsw_lbr_sel_map;
x86_get_pmu()->task_ctx_cache = create_lbr_kmem_cache(size, 0); x86_get_pmu(smp_processor_id())->task_ctx_cache = create_lbr_kmem_cache(size, 0);
/* /*
* SW branch filter usage: * SW branch filter usage:
...@@ -1726,7 +1727,7 @@ static bool is_arch_lbr_xsave_available(void) ...@@ -1726,7 +1727,7 @@ static bool is_arch_lbr_xsave_available(void)
void __init intel_pmu_arch_lbr_init(void) void __init intel_pmu_arch_lbr_init(void)
{ {
struct pmu *pmu = x86_get_pmu(); struct pmu *pmu = x86_get_pmu(smp_processor_id());
union cpuid28_eax eax; union cpuid28_eax eax;
union cpuid28_ebx ebx; union cpuid28_ebx ebx;
union cpuid28_ecx ecx; union cpuid28_ecx ecx;
......
...@@ -326,6 +326,8 @@ struct cpu_hw_events { ...@@ -326,6 +326,8 @@ struct cpu_hw_events {
int n_pair; /* Large increment events */ int n_pair; /* Large increment events */
void *kfree_on_online[X86_PERF_KFREE_MAX]; void *kfree_on_online[X86_PERF_KFREE_MAX];
struct pmu *pmu;
}; };
#define __EVENT_CONSTRAINT_RANGE(c, e, n, m, w, o, f) { \ #define __EVENT_CONSTRAINT_RANGE(c, e, n, m, w, o, f) { \
...@@ -904,7 +906,7 @@ static struct perf_pmu_events_ht_attr event_attr_##v = { \ ...@@ -904,7 +906,7 @@ static struct perf_pmu_events_ht_attr event_attr_##v = { \
.event_str_ht = ht, \ .event_str_ht = ht, \
} }
struct pmu *x86_get_pmu(void); struct pmu *x86_get_pmu(unsigned int cpu);
extern struct x86_pmu x86_pmu __read_mostly; extern struct x86_pmu x86_pmu __read_mostly;
static __always_inline struct x86_perf_task_context_opt *task_context_opt(void *ctx) static __always_inline struct x86_perf_task_context_opt *task_context_opt(void *ctx)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment