Commit d4b294bf authored by Kan Liang's avatar Kan Liang Committed by Peter Zijlstra

perf/x86: Hybrid PMU support for counters

The number of GP and fixed counters are different among hybrid PMUs.
Each hybrid PMU should use its own counter related information.

When handling a certain hybrid PMU, apply the number of counters from
the corresponding hybrid PMU.

When reserving the counters in the initialization of a new event,
reserve all possible counters.

The number of counter recored in the global x86_pmu is for the
architecture counters which are available for all hybrid PMUs. KVM
doesn't support the hybrid PMU yet. Return the number of the
architecture counters for now.

For the functions only available for the old platforms, e.g.,
intel_pmu_drain_pebs_nhm(), nothing is changed.
Signed-off-by: default avatarKan Liang <kan.liang@linux.intel.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarAndi Kleen <ak@linux.intel.com>
Link: https://lkml.kernel.org/r/1618237865-33448-7-git-send-email-kan.liang@linux.intel.com
parent fc4b8fca
...@@ -185,16 +185,29 @@ static DEFINE_MUTEX(pmc_reserve_mutex); ...@@ -185,16 +185,29 @@ static DEFINE_MUTEX(pmc_reserve_mutex);
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
static inline int get_possible_num_counters(void)
{
int i, num_counters = x86_pmu.num_counters;
if (!is_hybrid())
return num_counters;
for (i = 0; i < x86_pmu.num_hybrid_pmus; i++)
num_counters = max_t(int, num_counters, x86_pmu.hybrid_pmu[i].num_counters);
return num_counters;
}
static bool reserve_pmc_hardware(void) static bool reserve_pmc_hardware(void)
{ {
int i; int i, num_counters = get_possible_num_counters();
for (i = 0; i < x86_pmu.num_counters; i++) { for (i = 0; i < num_counters; i++) {
if (!reserve_perfctr_nmi(x86_pmu_event_addr(i))) if (!reserve_perfctr_nmi(x86_pmu_event_addr(i)))
goto perfctr_fail; goto perfctr_fail;
} }
for (i = 0; i < x86_pmu.num_counters; i++) { for (i = 0; i < num_counters; i++) {
if (!reserve_evntsel_nmi(x86_pmu_config_addr(i))) if (!reserve_evntsel_nmi(x86_pmu_config_addr(i)))
goto eventsel_fail; goto eventsel_fail;
} }
...@@ -205,7 +218,7 @@ static bool reserve_pmc_hardware(void) ...@@ -205,7 +218,7 @@ static bool reserve_pmc_hardware(void)
for (i--; i >= 0; i--) for (i--; i >= 0; i--)
release_evntsel_nmi(x86_pmu_config_addr(i)); release_evntsel_nmi(x86_pmu_config_addr(i));
i = x86_pmu.num_counters; i = num_counters;
perfctr_fail: perfctr_fail:
for (i--; i >= 0; i--) for (i--; i >= 0; i--)
...@@ -216,9 +229,9 @@ static bool reserve_pmc_hardware(void) ...@@ -216,9 +229,9 @@ static bool reserve_pmc_hardware(void)
static void release_pmc_hardware(void) static void release_pmc_hardware(void)
{ {
int i; int i, num_counters = get_possible_num_counters();
for (i = 0; i < x86_pmu.num_counters; i++) { for (i = 0; i < num_counters; i++) {
release_perfctr_nmi(x86_pmu_event_addr(i)); release_perfctr_nmi(x86_pmu_event_addr(i));
release_evntsel_nmi(x86_pmu_config_addr(i)); release_evntsel_nmi(x86_pmu_config_addr(i));
} }
...@@ -946,6 +959,7 @@ EXPORT_SYMBOL_GPL(perf_assign_events); ...@@ -946,6 +959,7 @@ EXPORT_SYMBOL_GPL(perf_assign_events);
int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
{ {
int num_counters = hybrid(cpuc->pmu, num_counters);
struct event_constraint *c; struct event_constraint *c;
struct perf_event *e; struct perf_event *e;
int n0, i, wmin, wmax, unsched = 0; int n0, i, wmin, wmax, unsched = 0;
...@@ -1021,7 +1035,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) ...@@ -1021,7 +1035,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
/* slow path */ /* slow path */
if (i != n) { if (i != n) {
int gpmax = x86_pmu.num_counters; int gpmax = num_counters;
/* /*
* Do not allow scheduling of more than half the available * Do not allow scheduling of more than half the available
...@@ -1042,7 +1056,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) ...@@ -1042,7 +1056,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
* the extra Merge events needed by large increment events. * the extra Merge events needed by large increment events.
*/ */
if (x86_pmu.flags & PMU_FL_PAIR) { if (x86_pmu.flags & PMU_FL_PAIR) {
gpmax = x86_pmu.num_counters - cpuc->n_pair; gpmax = num_counters - cpuc->n_pair;
WARN_ON(gpmax <= 0); WARN_ON(gpmax <= 0);
} }
...@@ -1129,10 +1143,12 @@ static int collect_event(struct cpu_hw_events *cpuc, struct perf_event *event, ...@@ -1129,10 +1143,12 @@ static int collect_event(struct cpu_hw_events *cpuc, struct perf_event *event,
*/ */
static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp) static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
{ {
int num_counters = hybrid(cpuc->pmu, num_counters);
int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed);
struct perf_event *event; struct perf_event *event;
int n, max_count; int n, max_count;
max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed; max_count = num_counters + num_counters_fixed;
/* current number of events already accepted */ /* current number of events already accepted */
n = cpuc->n_events; n = cpuc->n_events;
...@@ -1499,18 +1515,18 @@ void perf_event_print_debug(void) ...@@ -1499,18 +1515,18 @@ void perf_event_print_debug(void)
{ {
u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed; u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
u64 pebs, debugctl; u64 pebs, debugctl;
struct cpu_hw_events *cpuc; int cpu = smp_processor_id();
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
int num_counters = hybrid(cpuc->pmu, num_counters);
int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed);
unsigned long flags; unsigned long flags;
int cpu, idx; int idx;
if (!x86_pmu.num_counters) if (!num_counters)
return; return;
local_irq_save(flags); local_irq_save(flags);
cpu = smp_processor_id();
cpuc = &per_cpu(cpu_hw_events, cpu);
if (x86_pmu.version >= 2) { if (x86_pmu.version >= 2) {
rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
...@@ -1533,7 +1549,7 @@ void perf_event_print_debug(void) ...@@ -1533,7 +1549,7 @@ void perf_event_print_debug(void)
} }
pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask); pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
for (idx = 0; idx < x86_pmu.num_counters; idx++) { for (idx = 0; idx < num_counters; idx++) {
rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl); rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl);
rdmsrl(x86_pmu_event_addr(idx), pmc_count); rdmsrl(x86_pmu_event_addr(idx), pmc_count);
...@@ -1546,7 +1562,7 @@ void perf_event_print_debug(void) ...@@ -1546,7 +1562,7 @@ void perf_event_print_debug(void)
pr_info("CPU#%d: gen-PMC%d left: %016llx\n", pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
cpu, idx, prev_left); cpu, idx, prev_left);
} }
for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) { for (idx = 0; idx < num_counters_fixed; idx++) {
if (fixed_counter_disabled(idx, cpuc->pmu)) if (fixed_counter_disabled(idx, cpuc->pmu))
continue; continue;
rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count); rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
...@@ -2781,6 +2797,11 @@ unsigned long perf_misc_flags(struct pt_regs *regs) ...@@ -2781,6 +2797,11 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap) void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
{ {
cap->version = x86_pmu.version; cap->version = x86_pmu.version;
/*
* KVM doesn't support the hybrid PMU yet.
* Return the common value in global x86_pmu,
* which available for all cores.
*/
cap->num_counters_gp = x86_pmu.num_counters; cap->num_counters_gp = x86_pmu.num_counters;
cap->num_counters_fixed = x86_pmu.num_counters_fixed; cap->num_counters_fixed = x86_pmu.num_counters_fixed;
cap->bit_width_gp = x86_pmu.cntval_bits; cap->bit_width_gp = x86_pmu.cntval_bits;
......
...@@ -2711,21 +2711,23 @@ static void intel_pmu_reset(void) ...@@ -2711,21 +2711,23 @@ static void intel_pmu_reset(void)
{ {
struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds); struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed);
int num_counters = hybrid(cpuc->pmu, num_counters);
unsigned long flags; unsigned long flags;
int idx; int idx;
if (!x86_pmu.num_counters) if (!num_counters)
return; return;
local_irq_save(flags); local_irq_save(flags);
pr_info("clearing PMU state on CPU#%d\n", smp_processor_id()); pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
for (idx = 0; idx < x86_pmu.num_counters; idx++) { for (idx = 0; idx < num_counters; idx++) {
wrmsrl_safe(x86_pmu_config_addr(idx), 0ull); wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
wrmsrl_safe(x86_pmu_event_addr(idx), 0ull); wrmsrl_safe(x86_pmu_event_addr(idx), 0ull);
} }
for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) { for (idx = 0; idx < num_counters_fixed; idx++) {
if (fixed_counter_disabled(idx, cpuc->pmu)) if (fixed_counter_disabled(idx, cpuc->pmu))
continue; continue;
wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
......
...@@ -1007,6 +1007,8 @@ void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in) ...@@ -1007,6 +1007,8 @@ void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in)
static inline void pebs_update_threshold(struct cpu_hw_events *cpuc) static inline void pebs_update_threshold(struct cpu_hw_events *cpuc)
{ {
struct debug_store *ds = cpuc->ds; struct debug_store *ds = cpuc->ds;
int max_pebs_events = hybrid(cpuc->pmu, max_pebs_events);
int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed);
u64 threshold; u64 threshold;
int reserved; int reserved;
...@@ -1014,9 +1016,9 @@ static inline void pebs_update_threshold(struct cpu_hw_events *cpuc) ...@@ -1014,9 +1016,9 @@ static inline void pebs_update_threshold(struct cpu_hw_events *cpuc)
return; return;
if (x86_pmu.flags & PMU_FL_PEBS_ALL) if (x86_pmu.flags & PMU_FL_PEBS_ALL)
reserved = x86_pmu.max_pebs_events + x86_pmu.num_counters_fixed; reserved = max_pebs_events + num_counters_fixed;
else else
reserved = x86_pmu.max_pebs_events; reserved = max_pebs_events;
if (cpuc->n_pebs == cpuc->n_large_pebs) { if (cpuc->n_pebs == cpuc->n_large_pebs) {
threshold = ds->pebs_absolute_maximum - threshold = ds->pebs_absolute_maximum -
...@@ -2072,6 +2074,8 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d ...@@ -2072,6 +2074,8 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d
{ {
short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {}; short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int max_pebs_events = hybrid(cpuc->pmu, max_pebs_events);
int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed);
struct debug_store *ds = cpuc->ds; struct debug_store *ds = cpuc->ds;
struct perf_event *event; struct perf_event *event;
void *base, *at, *top; void *base, *at, *top;
...@@ -2086,9 +2090,9 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d ...@@ -2086,9 +2090,9 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d
ds->pebs_index = ds->pebs_buffer_base; ds->pebs_index = ds->pebs_buffer_base;
mask = ((1ULL << x86_pmu.max_pebs_events) - 1) | mask = ((1ULL << max_pebs_events) - 1) |
(((1ULL << x86_pmu.num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED); (((1ULL << num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED);
size = INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed; size = INTEL_PMC_IDX_FIXED + num_counters_fixed;
if (unlikely(base >= top)) { if (unlikely(base >= top)) {
intel_pmu_pebs_event_update_no_drain(cpuc, size); intel_pmu_pebs_event_update_no_drain(cpuc, size);
......
...@@ -635,6 +635,9 @@ struct x86_hybrid_pmu { ...@@ -635,6 +635,9 @@ struct x86_hybrid_pmu {
struct pmu pmu; struct pmu pmu;
union perf_capabilities intel_cap; union perf_capabilities intel_cap;
u64 intel_ctrl; u64 intel_ctrl;
int max_pebs_events;
int num_counters;
int num_counters_fixed;
}; };
static __always_inline struct x86_hybrid_pmu *hybrid_pmu(struct pmu *pmu) static __always_inline struct x86_hybrid_pmu *hybrid_pmu(struct pmu *pmu)
...@@ -850,6 +853,7 @@ struct x86_pmu { ...@@ -850,6 +853,7 @@ struct x86_pmu {
* are available for all PMUs. The hybrid_pmu only includes the * are available for all PMUs. The hybrid_pmu only includes the
* unique capabilities. * unique capabilities.
*/ */
int num_hybrid_pmus;
struct x86_hybrid_pmu *hybrid_pmu; struct x86_hybrid_pmu *hybrid_pmu;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment