Commit add76959 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'perf-core-2022-12-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf events updates from Ingo Molnar:

 - Thoroughly rewrite the data structures that implement perf task
   context handling, with the goal of fixing various quirks and
   unfeatures both in already merged, and in upcoming proposed code.

   The old data structure is the per task and per cpu
   perf_event_contexts:

         task_struct::perf_events_ctxp[] <-> perf_event_context <-> perf_cpu_context
              ^                                 |    ^     |           ^
              `---------------------------------'    |     `--> pmu ---'
                                                     v           ^
                                                perf_event ------'

   In this new design this is replaced with a single task context and a
   single CPU context, plus intermediate data-structures:

         task_struct::perf_event_ctxp -> perf_event_context <- perf_cpu_context
              ^                           |   ^ ^
              `---------------------------'   | |
                                              | |    perf_cpu_pmu_context <--.
                                              | `----.    ^                  |
                                              |      |    |                  |
                                              |      v    v                  |
                                              | ,--> perf_event_pmu_context  |
                                              | |                            |
                                              | |                            |
                                              v v                            |
                                         perf_event ---> pmu ----------------'

   [ See commit bd275681 for more details. ]

   This rewrite was developed by Peter Zijlstra and Ravi Bangoria.

 - Optimize perf_tp_event()

 - Update the Intel uncore PMU driver, extending it with UPI topology
   discovery on various hardware models.

 - Misc fixes & cleanups

* tag 'perf-core-2022-12-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (25 commits)
  perf/x86/intel/uncore: Fix reference count leak in __uncore_imc_init_box()
  perf/x86/intel/uncore: Fix reference count leak in snr_uncore_mmio_map()
  perf/x86/intel/uncore: Fix reference count leak in hswep_has_limit_sbox()
  perf/x86/intel/uncore: Fix reference count leak in sad_cfg_iio_topology()
  perf/x86/intel/uncore: Make set_mapping() procedure void
  perf/x86/intel/uncore: Update sysfs-devices-mapping file
  perf/x86/intel/uncore: Enable UPI topology discovery for Sapphire Rapids
  perf/x86/intel/uncore: Enable UPI topology discovery for Icelake Server
  perf/x86/intel/uncore: Get UPI NodeID and GroupID
  perf/x86/intel/uncore: Enable UPI topology discovery for Skylake Server
  perf/x86/intel/uncore: Generalize get_topology() for SKX PMUs
  perf/x86/intel/uncore: Disable I/O stacks to PMU mapping on ICX-D
  perf/x86/intel/uncore: Clear attr_update properly
  perf/x86/intel/uncore: Introduce UPI topology type
  perf/x86/intel/uncore: Generalize IIO topology support
  perf/core: Don't allow grouping events from different hw pmus
  perf/amd/ibs: Make IBS a core pmu
  perf: Fix function pointer case
  perf/x86/amd: Remove the repeated declaration
  perf: Fix possible memleak in pmu_dev_alloc()
  ...
parents 617fe4fa 17b8d847
What: /sys/devices/uncore_iio_x/dieX What: /sys/devices/uncore_iio_x/dieX
Date: February 2020 Date: February 2020
Contact: Roman Sudarikov <roman.sudarikov@linux.intel.com> Contact: Alexander Antonov <alexander.antonov@linux.intel.com>
Description: Description:
Each IIO stack (PCIe root port) has its own IIO PMON block, so Each IIO stack (PCIe root port) has its own IIO PMON block, so
each dieX file (where X is die number) holds "Segment:Root Bus" each dieX file (where X is die number) holds "Segment:Root Bus"
...@@ -32,3 +32,31 @@ Description: ...@@ -32,3 +32,31 @@ Description:
IIO PMU 0 on die 1 belongs to PCI RP on bus 0x40, domain 0x0000 IIO PMU 0 on die 1 belongs to PCI RP on bus 0x40, domain 0x0000
IIO PMU 0 on die 2 belongs to PCI RP on bus 0x80, domain 0x0000 IIO PMU 0 on die 2 belongs to PCI RP on bus 0x80, domain 0x0000
IIO PMU 0 on die 3 belongs to PCI RP on bus 0xc0, domain 0x0000 IIO PMU 0 on die 3 belongs to PCI RP on bus 0xc0, domain 0x0000
What: /sys/devices/uncore_upi_x/dieX
Date: March 2022
Contact: Alexander Antonov <alexander.antonov@linux.intel.com>
Description:
Each /sys/devices/uncore_upi_X/dieY file holds "upi_Z,die_W"
value that means UPI link number X on die Y is connected to UPI
link Z on die W and this link between sockets can be monitored
by UPI PMON block.
For example, 4-die Sapphire Rapids platform has the following
UPI 0 topology::
# tail /sys/devices/uncore_upi_0/die*
==> /sys/devices/uncore_upi_0/die0 <==
upi_1,die_1
==> /sys/devices/uncore_upi_0/die1 <==
upi_0,die_3
==> /sys/devices/uncore_upi_0/die2 <==
upi_1,die_3
==> /sys/devices/uncore_upi_0/die3 <==
upi_0,die_1
Which means::
UPI link 0 on die 0 is connected to UPI link 1 on die 1
UPI link 0 on die 1 is connected to UPI link 0 on die 3
UPI link 0 on die 2 is connected to UPI link 1 on die 3
UPI link 0 on die 3 is connected to UPI link 0 on die 1
\ No newline at end of file
...@@ -806,10 +806,14 @@ static void armv8pmu_disable_event(struct perf_event *event) ...@@ -806,10 +806,14 @@ static void armv8pmu_disable_event(struct perf_event *event)
static void armv8pmu_start(struct arm_pmu *cpu_pmu) static void armv8pmu_start(struct arm_pmu *cpu_pmu)
{ {
struct perf_event_context *task_ctx = struct perf_event_context *ctx;
this_cpu_ptr(cpu_pmu->pmu.pmu_cpu_context)->task_ctx; int nr_user = 0;
if (sysctl_perf_user_access && task_ctx && task_ctx->nr_user) ctx = perf_cpu_task_ctx();
if (ctx)
nr_user = ctx->nr_user;
if (sysctl_perf_user_access && nr_user)
armv8pmu_enable_user_access(cpu_pmu); armv8pmu_enable_user_access(cpu_pmu);
else else
armv8pmu_disable_user_access(); armv8pmu_disable_user_access();
...@@ -1019,10 +1023,10 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event, ...@@ -1019,10 +1023,10 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
return 0; return 0;
} }
static int armv8pmu_filter_match(struct perf_event *event) static bool armv8pmu_filter(struct pmu *pmu, int cpu)
{ {
unsigned long evtype = event->hw.config_base & ARMV8_PMU_EVTYPE_EVENT; struct arm_pmu *armpmu = to_arm_pmu(pmu);
return evtype != ARMV8_PMUV3_PERFCTR_CHAIN; return !cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus);
} }
static void armv8pmu_reset(void *info) static void armv8pmu_reset(void *info)
...@@ -1254,7 +1258,7 @@ static int armv8_pmu_init(struct arm_pmu *cpu_pmu, char *name, ...@@ -1254,7 +1258,7 @@ static int armv8_pmu_init(struct arm_pmu *cpu_pmu, char *name,
cpu_pmu->stop = armv8pmu_stop; cpu_pmu->stop = armv8pmu_stop;
cpu_pmu->reset = armv8pmu_reset; cpu_pmu->reset = armv8pmu_reset;
cpu_pmu->set_event_filter = armv8pmu_set_event_filter; cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
cpu_pmu->filter_match = armv8pmu_filter_match; cpu_pmu->filter = armv8pmu_filter;
cpu_pmu->pmu.event_idx = armv8pmu_user_event_idx; cpu_pmu->pmu.event_idx = armv8pmu_user_event_idx;
......
...@@ -132,7 +132,7 @@ static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw) ...@@ -132,7 +132,7 @@ static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw)
static inline void power_pmu_bhrb_enable(struct perf_event *event) {} static inline void power_pmu_bhrb_enable(struct perf_event *event) {}
static inline void power_pmu_bhrb_disable(struct perf_event *event) {} static inline void power_pmu_bhrb_disable(struct perf_event *event) {}
static void power_pmu_sched_task(struct perf_event_context *ctx, bool sched_in) {} static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in) {}
static inline void power_pmu_bhrb_read(struct perf_event *event, struct cpu_hw_events *cpuhw) {} static inline void power_pmu_bhrb_read(struct perf_event *event, struct cpu_hw_events *cpuhw) {}
static void pmao_restore_workaround(bool ebb) { } static void pmao_restore_workaround(bool ebb) { }
#endif /* CONFIG_PPC32 */ #endif /* CONFIG_PPC32 */
...@@ -424,7 +424,7 @@ static void power_pmu_bhrb_enable(struct perf_event *event) ...@@ -424,7 +424,7 @@ static void power_pmu_bhrb_enable(struct perf_event *event)
cpuhw->bhrb_context = event->ctx; cpuhw->bhrb_context = event->ctx;
} }
cpuhw->bhrb_users++; cpuhw->bhrb_users++;
perf_sched_cb_inc(event->ctx->pmu); perf_sched_cb_inc(event->pmu);
} }
static void power_pmu_bhrb_disable(struct perf_event *event) static void power_pmu_bhrb_disable(struct perf_event *event)
...@@ -436,7 +436,7 @@ static void power_pmu_bhrb_disable(struct perf_event *event) ...@@ -436,7 +436,7 @@ static void power_pmu_bhrb_disable(struct perf_event *event)
WARN_ON_ONCE(!cpuhw->bhrb_users); WARN_ON_ONCE(!cpuhw->bhrb_users);
cpuhw->bhrb_users--; cpuhw->bhrb_users--;
perf_sched_cb_dec(event->ctx->pmu); perf_sched_cb_dec(event->pmu);
if (!cpuhw->disabled && !cpuhw->bhrb_users) { if (!cpuhw->disabled && !cpuhw->bhrb_users) {
/* BHRB cannot be turned off when other /* BHRB cannot be turned off when other
...@@ -451,7 +451,7 @@ static void power_pmu_bhrb_disable(struct perf_event *event) ...@@ -451,7 +451,7 @@ static void power_pmu_bhrb_disable(struct perf_event *event)
/* Called from ctxsw to prevent one process's branch entries to /* Called from ctxsw to prevent one process's branch entries to
* mingle with the other process's entries during context switch. * mingle with the other process's entries during context switch.
*/ */
static void power_pmu_sched_task(struct perf_event_context *ctx, bool sched_in) static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
{ {
if (!ppmu->bhrb_nr) if (!ppmu->bhrb_nr)
return; return;
......
...@@ -377,7 +377,7 @@ static int paicrypt_push_sample(void) ...@@ -377,7 +377,7 @@ static int paicrypt_push_sample(void)
/* Called on schedule-in and schedule-out. No access to event structure, /* Called on schedule-in and schedule-out. No access to event structure,
* but for sampling only event CRYPTO_ALL is allowed. * but for sampling only event CRYPTO_ALL is allowed.
*/ */
static void paicrypt_sched_task(struct perf_event_context *ctx, bool sched_in) static void paicrypt_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
{ {
/* We started with a clean page on event installation. So read out /* We started with a clean page on event installation. So read out
* results on schedule_out and if page was dirty, clear values. * results on schedule_out and if page was dirty, clear values.
......
...@@ -466,7 +466,7 @@ static int paiext_push_sample(void) ...@@ -466,7 +466,7 @@ static int paiext_push_sample(void)
/* Called on schedule-in and schedule-out. No access to event structure, /* Called on schedule-in and schedule-out. No access to event structure,
* but for sampling only event NNPA_ALL is allowed. * but for sampling only event NNPA_ALL is allowed.
*/ */
static void paiext_sched_task(struct perf_event_context *ctx, bool sched_in) static void paiext_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
{ {
/* We started with a clean page on event installation. So read out /* We started with a clean page on event installation. So read out
* results on schedule_out and if page was dirty, clear values. * results on schedule_out and if page was dirty, clear values.
......
...@@ -384,7 +384,7 @@ static void amd_brs_poison_buffer(void) ...@@ -384,7 +384,7 @@ static void amd_brs_poison_buffer(void)
* On ctxswin, sched_in = true, called after the PMU has started * On ctxswin, sched_in = true, called after the PMU has started
* On ctxswout, sched_in = false, called before the PMU is stopped * On ctxswout, sched_in = false, called before the PMU is stopped
*/ */
void amd_pmu_brs_sched_task(struct perf_event_context *ctx, bool sched_in) void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
{ {
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
......
...@@ -631,7 +631,7 @@ static const struct attribute_group *op_attr_update[] = { ...@@ -631,7 +631,7 @@ static const struct attribute_group *op_attr_update[] = {
static struct perf_ibs perf_ibs_fetch = { static struct perf_ibs perf_ibs_fetch = {
.pmu = { .pmu = {
.task_ctx_nr = perf_invalid_context, .task_ctx_nr = perf_hw_context,
.event_init = perf_ibs_init, .event_init = perf_ibs_init,
.add = perf_ibs_add, .add = perf_ibs_add,
...@@ -655,7 +655,7 @@ static struct perf_ibs perf_ibs_fetch = { ...@@ -655,7 +655,7 @@ static struct perf_ibs perf_ibs_fetch = {
static struct perf_ibs perf_ibs_op = { static struct perf_ibs perf_ibs_op = {
.pmu = { .pmu = {
.task_ctx_nr = perf_invalid_context, .task_ctx_nr = perf_hw_context,
.event_init = perf_ibs_init, .event_init = perf_ibs_init,
.add = perf_ibs_add, .add = perf_ibs_add,
......
...@@ -352,7 +352,7 @@ void amd_pmu_lbr_add(struct perf_event *event) ...@@ -352,7 +352,7 @@ void amd_pmu_lbr_add(struct perf_event *event)
cpuc->br_sel = reg->reg; cpuc->br_sel = reg->reg;
} }
perf_sched_cb_inc(event->ctx->pmu); perf_sched_cb_inc(event->pmu);
if (!cpuc->lbr_users++ && !event->total_time_running) if (!cpuc->lbr_users++ && !event->total_time_running)
amd_pmu_lbr_reset(); amd_pmu_lbr_reset();
...@@ -370,10 +370,10 @@ void amd_pmu_lbr_del(struct perf_event *event) ...@@ -370,10 +370,10 @@ void amd_pmu_lbr_del(struct perf_event *event)
cpuc->lbr_users--; cpuc->lbr_users--;
WARN_ON_ONCE(cpuc->lbr_users < 0); WARN_ON_ONCE(cpuc->lbr_users < 0);
perf_sched_cb_dec(event->ctx->pmu); perf_sched_cb_dec(event->pmu);
} }
void amd_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in) void amd_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
{ {
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
......
...@@ -90,6 +90,8 @@ DEFINE_STATIC_CALL_NULL(x86_pmu_swap_task_ctx, *x86_pmu.swap_task_ctx); ...@@ -90,6 +90,8 @@ DEFINE_STATIC_CALL_NULL(x86_pmu_swap_task_ctx, *x86_pmu.swap_task_ctx);
DEFINE_STATIC_CALL_NULL(x86_pmu_drain_pebs, *x86_pmu.drain_pebs); DEFINE_STATIC_CALL_NULL(x86_pmu_drain_pebs, *x86_pmu.drain_pebs);
DEFINE_STATIC_CALL_NULL(x86_pmu_pebs_aliases, *x86_pmu.pebs_aliases); DEFINE_STATIC_CALL_NULL(x86_pmu_pebs_aliases, *x86_pmu.pebs_aliases);
DEFINE_STATIC_CALL_NULL(x86_pmu_filter, *x86_pmu.filter);
/* /*
* This one is magic, it will get called even when PMU init fails (because * This one is magic, it will get called even when PMU init fails (because
* there is no PMU), in which case it should simply return NULL. * there is no PMU), in which case it should simply return NULL.
...@@ -2031,6 +2033,7 @@ static void x86_pmu_static_call_update(void) ...@@ -2031,6 +2033,7 @@ static void x86_pmu_static_call_update(void)
static_call_update(x86_pmu_pebs_aliases, x86_pmu.pebs_aliases); static_call_update(x86_pmu_pebs_aliases, x86_pmu.pebs_aliases);
static_call_update(x86_pmu_guest_get_msrs, x86_pmu.guest_get_msrs); static_call_update(x86_pmu_guest_get_msrs, x86_pmu.guest_get_msrs);
static_call_update(x86_pmu_filter, x86_pmu.filter);
} }
static void _x86_pmu_read(struct perf_event *event) static void _x86_pmu_read(struct perf_event *event)
...@@ -2052,23 +2055,6 @@ void x86_pmu_show_pmu_cap(int num_counters, int num_counters_fixed, ...@@ -2052,23 +2055,6 @@ void x86_pmu_show_pmu_cap(int num_counters, int num_counters_fixed,
pr_info("... event mask: %016Lx\n", intel_ctrl); pr_info("... event mask: %016Lx\n", intel_ctrl);
} }
/*
* The generic code is not hybrid friendly. The hybrid_pmu->pmu
* of the first registered PMU is unconditionally assigned to
* each possible cpuctx->ctx.pmu.
* Update the correct hybrid PMU to the cpuctx->ctx.pmu.
*/
void x86_pmu_update_cpu_context(struct pmu *pmu, int cpu)
{
struct perf_cpu_context *cpuctx;
if (!pmu->pmu_cpu_context)
return;
cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
cpuctx->ctx.pmu = pmu;
}
static int __init init_hw_perf_events(void) static int __init init_hw_perf_events(void)
{ {
struct x86_pmu_quirk *quirk; struct x86_pmu_quirk *quirk;
...@@ -2175,13 +2161,9 @@ static int __init init_hw_perf_events(void) ...@@ -2175,13 +2161,9 @@ static int __init init_hw_perf_events(void)
if (err) if (err)
goto out2; goto out2;
} else { } else {
u8 cpu_type = get_this_hybrid_cpu_type();
struct x86_hybrid_pmu *hybrid_pmu; struct x86_hybrid_pmu *hybrid_pmu;
int i, j; int i, j;
if (!cpu_type && x86_pmu.get_hybrid_cpu_type)
cpu_type = x86_pmu.get_hybrid_cpu_type();
for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) { for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) {
hybrid_pmu = &x86_pmu.hybrid_pmu[i]; hybrid_pmu = &x86_pmu.hybrid_pmu[i];
...@@ -2195,9 +2177,6 @@ static int __init init_hw_perf_events(void) ...@@ -2195,9 +2177,6 @@ static int __init init_hw_perf_events(void)
(hybrid_pmu->cpu_type == hybrid_big) ? PERF_TYPE_RAW : -1); (hybrid_pmu->cpu_type == hybrid_big) ? PERF_TYPE_RAW : -1);
if (err) if (err)
break; break;
if (cpu_type == hybrid_pmu->cpu_type)
x86_pmu_update_cpu_context(&hybrid_pmu->pmu, raw_smp_processor_id());
} }
if (i < x86_pmu.num_hybrid_pmus) { if (i < x86_pmu.num_hybrid_pmus) {
...@@ -2646,15 +2625,15 @@ static const struct attribute_group *x86_pmu_attr_groups[] = { ...@@ -2646,15 +2625,15 @@ static const struct attribute_group *x86_pmu_attr_groups[] = {
NULL, NULL,
}; };
static void x86_pmu_sched_task(struct perf_event_context *ctx, bool sched_in) static void x86_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
{ {
static_call_cond(x86_pmu_sched_task)(ctx, sched_in); static_call_cond(x86_pmu_sched_task)(pmu_ctx, sched_in);
} }
static void x86_pmu_swap_task_ctx(struct perf_event_context *prev, static void x86_pmu_swap_task_ctx(struct perf_event_pmu_context *prev_epc,
struct perf_event_context *next) struct perf_event_pmu_context *next_epc)
{ {
static_call_cond(x86_pmu_swap_task_ctx)(prev, next); static_call_cond(x86_pmu_swap_task_ctx)(prev_epc, next_epc);
} }
void perf_check_microcode(void) void perf_check_microcode(void)
...@@ -2689,12 +2668,13 @@ static int x86_pmu_aux_output_match(struct perf_event *event) ...@@ -2689,12 +2668,13 @@ static int x86_pmu_aux_output_match(struct perf_event *event)
return 0; return 0;
} }
static int x86_pmu_filter_match(struct perf_event *event) static bool x86_pmu_filter(struct pmu *pmu, int cpu)
{ {
if (x86_pmu.filter_match) bool ret = false;
return x86_pmu.filter_match(event);
return 1; static_call_cond(x86_pmu_filter)(pmu, cpu, &ret);
return ret;
} }
static struct pmu pmu = { static struct pmu pmu = {
...@@ -2725,7 +2705,7 @@ static struct pmu pmu = { ...@@ -2725,7 +2705,7 @@ static struct pmu pmu = {
.aux_output_match = x86_pmu_aux_output_match, .aux_output_match = x86_pmu_aux_output_match,
.filter_match = x86_pmu_filter_match, .filter = x86_pmu_filter,
}; };
void arch_perf_update_userpage(struct perf_event *event, void arch_perf_update_userpage(struct perf_event *event,
......
...@@ -4536,8 +4536,6 @@ static bool init_hybrid_pmu(int cpu) ...@@ -4536,8 +4536,6 @@ static bool init_hybrid_pmu(int cpu)
cpumask_set_cpu(cpu, &pmu->supported_cpus); cpumask_set_cpu(cpu, &pmu->supported_cpus);
cpuc->pmu = &pmu->pmu; cpuc->pmu = &pmu->pmu;
x86_pmu_update_cpu_context(&pmu->pmu, cpu);
return true; return true;
} }
...@@ -4671,17 +4669,17 @@ static void intel_pmu_cpu_dead(int cpu) ...@@ -4671,17 +4669,17 @@ static void intel_pmu_cpu_dead(int cpu)
cpumask_clear_cpu(cpu, &hybrid_pmu(cpuc->pmu)->supported_cpus); cpumask_clear_cpu(cpu, &hybrid_pmu(cpuc->pmu)->supported_cpus);
} }
static void intel_pmu_sched_task(struct perf_event_context *ctx, static void intel_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx,
bool sched_in) bool sched_in)
{ {
intel_pmu_pebs_sched_task(ctx, sched_in); intel_pmu_pebs_sched_task(pmu_ctx, sched_in);
intel_pmu_lbr_sched_task(ctx, sched_in); intel_pmu_lbr_sched_task(pmu_ctx, sched_in);
} }
static void intel_pmu_swap_task_ctx(struct perf_event_context *prev, static void intel_pmu_swap_task_ctx(struct perf_event_pmu_context *prev_epc,
struct perf_event_context *next) struct perf_event_pmu_context *next_epc)
{ {
intel_pmu_lbr_swap_task_ctx(prev, next); intel_pmu_lbr_swap_task_ctx(prev_epc, next_epc);
} }
static int intel_pmu_check_period(struct perf_event *event, u64 value) static int intel_pmu_check_period(struct perf_event *event, u64 value)
...@@ -4705,12 +4703,11 @@ static int intel_pmu_aux_output_match(struct perf_event *event) ...@@ -4705,12 +4703,11 @@ static int intel_pmu_aux_output_match(struct perf_event *event)
return is_intel_pt_event(event); return is_intel_pt_event(event);
} }
static int intel_pmu_filter_match(struct perf_event *event) static void intel_pmu_filter(struct pmu *pmu, int cpu, bool *ret)
{ {
struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); struct x86_hybrid_pmu *hpmu = hybrid_pmu(pmu);
unsigned int cpu = smp_processor_id();
return cpumask_test_cpu(cpu, &pmu->supported_cpus); *ret = !cpumask_test_cpu(cpu, &hpmu->supported_cpus);
} }
PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
...@@ -6413,7 +6410,7 @@ __init int intel_pmu_init(void) ...@@ -6413,7 +6410,7 @@ __init int intel_pmu_init(void)
static_call_update(intel_pmu_set_topdown_event_period, static_call_update(intel_pmu_set_topdown_event_period,
&adl_set_topdown_event_period); &adl_set_topdown_event_period);
x86_pmu.filter_match = intel_pmu_filter_match; x86_pmu.filter = intel_pmu_filter;
x86_pmu.get_event_constraints = adl_get_event_constraints; x86_pmu.get_event_constraints = adl_get_event_constraints;
x86_pmu.hw_config = adl_hw_config; x86_pmu.hw_config = adl_hw_config;
x86_pmu.limit_period = spr_limit_period; x86_pmu.limit_period = spr_limit_period;
......
...@@ -1069,7 +1069,7 @@ static inline bool pebs_needs_sched_cb(struct cpu_hw_events *cpuc) ...@@ -1069,7 +1069,7 @@ static inline bool pebs_needs_sched_cb(struct cpu_hw_events *cpuc)
return cpuc->n_pebs && (cpuc->n_pebs == cpuc->n_large_pebs); return cpuc->n_pebs && (cpuc->n_pebs == cpuc->n_large_pebs);
} }
void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in) void intel_pmu_pebs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
{ {
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
...@@ -1177,7 +1177,7 @@ static void ...@@ -1177,7 +1177,7 @@ static void
pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc, pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
struct perf_event *event, bool add) struct perf_event *event, bool add)
{ {
struct pmu *pmu = event->ctx->pmu; struct pmu *pmu = event->pmu;
/* /*
* Make sure we get updated with the first PEBS * Make sure we get updated with the first PEBS
* event. It will trigger also during removal, but * event. It will trigger also during removal, but
......
...@@ -515,21 +515,21 @@ static void __intel_pmu_lbr_save(void *ctx) ...@@ -515,21 +515,21 @@ static void __intel_pmu_lbr_save(void *ctx)
cpuc->last_log_id = ++task_context_opt(ctx)->log_id; cpuc->last_log_id = ++task_context_opt(ctx)->log_id;
} }
void intel_pmu_lbr_swap_task_ctx(struct perf_event_context *prev, void intel_pmu_lbr_swap_task_ctx(struct perf_event_pmu_context *prev_epc,
struct perf_event_context *next) struct perf_event_pmu_context *next_epc)
{ {
void *prev_ctx_data, *next_ctx_data; void *prev_ctx_data, *next_ctx_data;
swap(prev->task_ctx_data, next->task_ctx_data); swap(prev_epc->task_ctx_data, next_epc->task_ctx_data);
/* /*
* Architecture specific synchronization makes sense in * Architecture specific synchronization makes sense in case
* case both prev->task_ctx_data and next->task_ctx_data * both prev_epc->task_ctx_data and next_epc->task_ctx_data
* pointers are allocated. * pointers are allocated.
*/ */
prev_ctx_data = next->task_ctx_data; prev_ctx_data = next_epc->task_ctx_data;
next_ctx_data = prev->task_ctx_data; next_ctx_data = prev_epc->task_ctx_data;
if (!prev_ctx_data || !next_ctx_data) if (!prev_ctx_data || !next_ctx_data)
return; return;
...@@ -538,7 +538,7 @@ void intel_pmu_lbr_swap_task_ctx(struct perf_event_context *prev, ...@@ -538,7 +538,7 @@ void intel_pmu_lbr_swap_task_ctx(struct perf_event_context *prev,
task_context_opt(next_ctx_data)->lbr_callstack_users); task_context_opt(next_ctx_data)->lbr_callstack_users);
} }
void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in) void intel_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
{ {
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
void *task_ctx; void *task_ctx;
...@@ -551,7 +551,7 @@ void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in) ...@@ -551,7 +551,7 @@ void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in)
* the task was scheduled out, restore the stack. Otherwise flush * the task was scheduled out, restore the stack. Otherwise flush
* the LBR stack. * the LBR stack.
*/ */
task_ctx = ctx ? ctx->task_ctx_data : NULL; task_ctx = pmu_ctx ? pmu_ctx->task_ctx_data : NULL;
if (task_ctx) { if (task_ctx) {
if (sched_in) if (sched_in)
__intel_pmu_lbr_restore(task_ctx); __intel_pmu_lbr_restore(task_ctx);
...@@ -587,8 +587,8 @@ void intel_pmu_lbr_add(struct perf_event *event) ...@@ -587,8 +587,8 @@ void intel_pmu_lbr_add(struct perf_event *event)
cpuc->br_sel = event->hw.branch_reg.reg; cpuc->br_sel = event->hw.branch_reg.reg;
if (branch_user_callstack(cpuc->br_sel) && event->ctx->task_ctx_data) if (branch_user_callstack(cpuc->br_sel) && event->pmu_ctx->task_ctx_data)
task_context_opt(event->ctx->task_ctx_data)->lbr_callstack_users++; task_context_opt(event->pmu_ctx->task_ctx_data)->lbr_callstack_users++;
/* /*
* Request pmu::sched_task() callback, which will fire inside the * Request pmu::sched_task() callback, which will fire inside the
...@@ -611,7 +611,7 @@ void intel_pmu_lbr_add(struct perf_event *event) ...@@ -611,7 +611,7 @@ void intel_pmu_lbr_add(struct perf_event *event)
*/ */
if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0) if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0)
cpuc->lbr_pebs_users++; cpuc->lbr_pebs_users++;
perf_sched_cb_inc(event->ctx->pmu); perf_sched_cb_inc(event->pmu);
if (!cpuc->lbr_users++ && !event->total_time_running) if (!cpuc->lbr_users++ && !event->total_time_running)
intel_pmu_lbr_reset(); intel_pmu_lbr_reset();
} }
...@@ -664,8 +664,8 @@ void intel_pmu_lbr_del(struct perf_event *event) ...@@ -664,8 +664,8 @@ void intel_pmu_lbr_del(struct perf_event *event)
return; return;
if (branch_user_callstack(cpuc->br_sel) && if (branch_user_callstack(cpuc->br_sel) &&
event->ctx->task_ctx_data) event->pmu_ctx->task_ctx_data)
task_context_opt(event->ctx->task_ctx_data)->lbr_callstack_users--; task_context_opt(event->pmu_ctx->task_ctx_data)->lbr_callstack_users--;
if (event->hw.flags & PERF_X86_EVENT_LBR_SELECT) if (event->hw.flags & PERF_X86_EVENT_LBR_SELECT)
cpuc->lbr_select = 0; cpuc->lbr_select = 0;
...@@ -675,7 +675,7 @@ void intel_pmu_lbr_del(struct perf_event *event) ...@@ -675,7 +675,7 @@ void intel_pmu_lbr_del(struct perf_event *event)
cpuc->lbr_users--; cpuc->lbr_users--;
WARN_ON_ONCE(cpuc->lbr_users < 0); WARN_ON_ONCE(cpuc->lbr_users < 0);
WARN_ON_ONCE(cpuc->lbr_pebs_users < 0); WARN_ON_ONCE(cpuc->lbr_pebs_users < 0);
perf_sched_cb_dec(event->ctx->pmu); perf_sched_cb_dec(event->pmu);
} }
static inline bool vlbr_exclude_host(void) static inline bool vlbr_exclude_host(void)
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <asm/apicdef.h> #include <asm/apicdef.h>
#include <asm/intel-family.h>
#include <linux/io-64-nonatomic-lo-hi.h> #include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/perf_event.h> #include <linux/perf_event.h>
...@@ -88,12 +89,12 @@ struct intel_uncore_type { ...@@ -88,12 +89,12 @@ struct intel_uncore_type {
* to identify which platform component each PMON block of that type is * to identify which platform component each PMON block of that type is
* supposed to monitor. * supposed to monitor.
*/ */
struct intel_uncore_topology *topology; struct intel_uncore_topology **topology;
/* /*
* Optional callbacks for managing mapping of Uncore units to PMONs * Optional callbacks for managing mapping of Uncore units to PMONs
*/ */
int (*get_topology)(struct intel_uncore_type *type); int (*get_topology)(struct intel_uncore_type *type);
int (*set_mapping)(struct intel_uncore_type *type); void (*set_mapping)(struct intel_uncore_type *type);
void (*cleanup_mapping)(struct intel_uncore_type *type); void (*cleanup_mapping)(struct intel_uncore_type *type);
}; };
...@@ -178,11 +179,26 @@ struct freerunning_counters { ...@@ -178,11 +179,26 @@ struct freerunning_counters {
unsigned *box_offsets; unsigned *box_offsets;
}; };
struct intel_uncore_topology { struct uncore_iio_topology {
u64 configuration; int pci_bus_no;
int segment; int segment;
}; };
struct uncore_upi_topology {
int die_to;
int pmu_idx_to;
int enabled;
};
struct intel_uncore_topology {
int pmu_idx;
union {
void *untyped;
struct uncore_iio_topology *iio;
struct uncore_upi_topology *upi;
};
};
struct pci2phy_map { struct pci2phy_map {
struct list_head list; struct list_head list;
int segment; int segment;
......
...@@ -1338,6 +1338,7 @@ static void __uncore_imc_init_box(struct intel_uncore_box *box, ...@@ -1338,6 +1338,7 @@ static void __uncore_imc_init_box(struct intel_uncore_box *box,
/* MCHBAR is disabled */ /* MCHBAR is disabled */
if (!(mch_bar & BIT(0))) { if (!(mch_bar & BIT(0))) {
pr_warn("perf uncore: MCHBAR is disabled. Failed to map IMC free-running counters.\n"); pr_warn("perf uncore: MCHBAR is disabled. Failed to map IMC free-running counters.\n");
pci_dev_put(pdev);
return; return;
} }
mch_bar &= ~BIT(0); mch_bar &= ~BIT(0);
...@@ -1352,6 +1353,8 @@ static void __uncore_imc_init_box(struct intel_uncore_box *box, ...@@ -1352,6 +1353,8 @@ static void __uncore_imc_init_box(struct intel_uncore_box *box,
box->io_addr = ioremap(addr, type->mmio_map_size); box->io_addr = ioremap(addr, type->mmio_map_size);
if (!box->io_addr) if (!box->io_addr)
pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name); pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
pci_dev_put(pdev);
} }
static void tgl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box) static void tgl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
......
...@@ -445,6 +445,7 @@ ...@@ -445,6 +445,7 @@
#define ICX_UPI_PCI_PMON_CTR0 0x320 #define ICX_UPI_PCI_PMON_CTR0 0x320
#define ICX_UPI_PCI_PMON_BOX_CTL 0x318 #define ICX_UPI_PCI_PMON_BOX_CTL 0x318
#define ICX_UPI_CTL_UMASK_EXT 0xffffff #define ICX_UPI_CTL_UMASK_EXT 0xffffff
#define ICX_UBOX_DID 0x3450
/* ICX M3UPI*/ /* ICX M3UPI*/
#define ICX_M3UPI_PCI_PMON_CTL0 0xd8 #define ICX_M3UPI_PCI_PMON_CTL0 0xd8
...@@ -457,6 +458,7 @@ ...@@ -457,6 +458,7 @@
/* SPR */ /* SPR */
#define SPR_RAW_EVENT_MASK_EXT 0xffffff #define SPR_RAW_EVENT_MASK_EXT 0xffffff
#define SPR_UBOX_DID 0x3250
/* SPR CHA */ /* SPR CHA */
#define SPR_CHA_PMON_CTL_TID_EN (1 << 16) #define SPR_CHA_PMON_CTL_TID_EN (1 << 16)
...@@ -1372,6 +1374,28 @@ static struct pci_driver snbep_uncore_pci_driver = { ...@@ -1372,6 +1374,28 @@ static struct pci_driver snbep_uncore_pci_driver = {
#define NODE_ID_MASK 0x7 #define NODE_ID_MASK 0x7
/* Each three bits from 0 to 23 of GIDNIDMAP register correspond Node ID. */
#define GIDNIDMAP(config, id) (((config) >> (3 * (id))) & 0x7)
static int upi_nodeid_groupid(struct pci_dev *ubox_dev, int nodeid_loc, int idmap_loc,
int *nodeid, int *groupid)
{
int ret;
/* get the Node ID of the local register */
ret = pci_read_config_dword(ubox_dev, nodeid_loc, nodeid);
if (ret)
goto err;
*nodeid = *nodeid & NODE_ID_MASK;
/* get the Node ID mapping */
ret = pci_read_config_dword(ubox_dev, idmap_loc, groupid);
if (ret)
goto err;
err:
return ret;
}
/* /*
* build pci bus to socket mapping * build pci bus to socket mapping
*/ */
...@@ -1397,13 +1421,8 @@ static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool ...@@ -1397,13 +1421,8 @@ static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool
* the topology. * the topology.
*/ */
if (nr_node_ids <= 8) { if (nr_node_ids <= 8) {
/* get the Node ID of the local register */ err = upi_nodeid_groupid(ubox_dev, nodeid_loc, idmap_loc,
err = pci_read_config_dword(ubox_dev, nodeid_loc, &config); &nodeid, &config);
if (err)
break;
nodeid = config & NODE_ID_MASK;
/* get the Node ID mapping */
err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
if (err) if (err)
break; break;
...@@ -1421,7 +1440,7 @@ static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool ...@@ -1421,7 +1440,7 @@ static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool
* to a particular node. * to a particular node.
*/ */
for (i = 0; i < 8; i++) { for (i = 0; i < 8; i++) {
if (nodeid == ((config >> (3 * i)) & 0x7)) { if (nodeid == GIDNIDMAP(config, i)) {
if (topology_max_die_per_package() > 1) if (topology_max_die_per_package() > 1)
die_id = i; die_id = i;
else else
...@@ -2891,6 +2910,7 @@ static bool hswep_has_limit_sbox(unsigned int device) ...@@ -2891,6 +2910,7 @@ static bool hswep_has_limit_sbox(unsigned int device)
return false; return false;
pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4); pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4);
pci_dev_put(dev);
if (!hswep_get_chop(capid4)) if (!hswep_get_chop(capid4))
return true; return true;
...@@ -3699,10 +3719,16 @@ static struct intel_uncore_ops skx_uncore_iio_ops = { ...@@ -3699,10 +3719,16 @@ static struct intel_uncore_ops skx_uncore_iio_ops = {
.read_counter = uncore_msr_read_counter, .read_counter = uncore_msr_read_counter,
}; };
static inline u8 skx_iio_stack(struct intel_uncore_pmu *pmu, int die) static struct intel_uncore_topology *pmu_topology(struct intel_uncore_pmu *pmu, int die)
{ {
return pmu->type->topology[die].configuration >> int idx;
(pmu->pmu_idx * BUS_NUM_STRIDE);
for (idx = 0; idx < pmu->type->num_boxes; idx++) {
if (pmu->type->topology[die][idx].pmu_idx == pmu->pmu_idx)
return &pmu->type->topology[die][idx];
}
return NULL;
} }
static umode_t static umode_t
...@@ -3710,8 +3736,9 @@ pmu_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, ...@@ -3710,8 +3736,9 @@ pmu_iio_mapping_visible(struct kobject *kobj, struct attribute *attr,
int die, int zero_bus_pmu) int die, int zero_bus_pmu)
{ {
struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj)); struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj));
struct intel_uncore_topology *pmut = pmu_topology(pmu, die);
return (!skx_iio_stack(pmu, die) && pmu->pmu_idx != zero_bus_pmu) ? 0 : attr->mode; return (pmut && !pmut->iio->pci_bus_no && pmu->pmu_idx != zero_bus_pmu) ? 0 : attr->mode;
} }
static umode_t static umode_t
...@@ -3727,9 +3754,10 @@ static ssize_t skx_iio_mapping_show(struct device *dev, ...@@ -3727,9 +3754,10 @@ static ssize_t skx_iio_mapping_show(struct device *dev,
struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev); struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
struct dev_ext_attribute *ea = to_dev_ext_attribute(attr); struct dev_ext_attribute *ea = to_dev_ext_attribute(attr);
long die = (long)ea->var; long die = (long)ea->var;
struct intel_uncore_topology *pmut = pmu_topology(pmu, die);
return sprintf(buf, "%04x:%02x\n", pmu->type->topology[die].segment, return sprintf(buf, "%04x:%02x\n", pmut ? pmut->iio->segment : 0,
skx_iio_stack(pmu, die)); pmut ? pmut->iio->pci_bus_no : 0);
} }
static int skx_msr_cpu_bus_read(int cpu, u64 *topology) static int skx_msr_cpu_bus_read(int cpu, u64 *topology)
...@@ -3764,18 +3792,79 @@ static int die_to_cpu(int die) ...@@ -3764,18 +3792,79 @@ static int die_to_cpu(int die)
return res; return res;
} }
static int skx_iio_get_topology(struct intel_uncore_type *type) enum {
IIO_TOPOLOGY_TYPE,
UPI_TOPOLOGY_TYPE,
TOPOLOGY_MAX
};
static const size_t topology_size[TOPOLOGY_MAX] = {
sizeof(*((struct intel_uncore_topology *)NULL)->iio),
sizeof(*((struct intel_uncore_topology *)NULL)->upi)
};
static int pmu_alloc_topology(struct intel_uncore_type *type, int topology_type)
{ {
int die, ret = -EPERM; int die, idx;
struct intel_uncore_topology **topology;
if (!type->num_boxes)
return -EPERM;
type->topology = kcalloc(uncore_max_dies(), sizeof(*type->topology), topology = kcalloc(uncore_max_dies(), sizeof(*topology), GFP_KERNEL);
GFP_KERNEL); if (!topology)
if (!type->topology) goto err;
return -ENOMEM;
for (die = 0; die < uncore_max_dies(); die++) { for (die = 0; die < uncore_max_dies(); die++) {
ret = skx_msr_cpu_bus_read(die_to_cpu(die), topology[die] = kcalloc(type->num_boxes, sizeof(**topology), GFP_KERNEL);
&type->topology[die].configuration); if (!topology[die])
goto clear;
for (idx = 0; idx < type->num_boxes; idx++) {
topology[die][idx].untyped = kcalloc(type->num_boxes,
topology_size[topology_type],
GFP_KERNEL);
if (!topology[die][idx].untyped)
goto clear;
}
}
type->topology = topology;
return 0;
clear:
for (; die >= 0; die--) {
for (idx = 0; idx < type->num_boxes; idx++)
kfree(topology[die][idx].untyped);
kfree(topology[die]);
}
kfree(topology);
err:
return -ENOMEM;
}
static void pmu_free_topology(struct intel_uncore_type *type)
{
int die, idx;
if (type->topology) {
for (die = 0; die < uncore_max_dies(); die++) {
for (idx = 0; idx < type->num_boxes; idx++)
kfree(type->topology[die][idx].untyped);
kfree(type->topology[die]);
}
kfree(type->topology);
type->topology = NULL;
}
}
static int skx_pmu_get_topology(struct intel_uncore_type *type,
int (*topology_cb)(struct intel_uncore_type*, int, int, u64))
{
int die, ret = -EPERM;
u64 cpu_bus_msr;
for (die = 0; die < uncore_max_dies(); die++) {
ret = skx_msr_cpu_bus_read(die_to_cpu(die), &cpu_bus_msr);
if (ret) if (ret)
break; break;
...@@ -3783,15 +3872,33 @@ static int skx_iio_get_topology(struct intel_uncore_type *type) ...@@ -3783,15 +3872,33 @@ static int skx_iio_get_topology(struct intel_uncore_type *type)
if (ret < 0) if (ret < 0)
break; break;
type->topology[die].segment = ret; ret = topology_cb(type, ret, die, cpu_bus_msr);
if (ret)
break;
} }
if (ret < 0) { return ret;
kfree(type->topology); }
type->topology = NULL;
static int skx_iio_topology_cb(struct intel_uncore_type *type, int segment,
int die, u64 cpu_bus_msr)
{
int idx;
struct intel_uncore_topology *t;
for (idx = 0; idx < type->num_boxes; idx++) {
t = &type->topology[die][idx];
t->pmu_idx = idx;
t->iio->segment = segment;
t->iio->pci_bus_no = (cpu_bus_msr >> (idx * BUS_NUM_STRIDE)) & 0xff;
} }
return ret; return 0;
}
static int skx_iio_get_topology(struct intel_uncore_type *type)
{
return skx_pmu_get_topology(type, skx_iio_topology_cb);
} }
static struct attribute_group skx_iio_mapping_group = { static struct attribute_group skx_iio_mapping_group = {
...@@ -3803,8 +3910,25 @@ static const struct attribute_group *skx_iio_attr_update[] = { ...@@ -3803,8 +3910,25 @@ static const struct attribute_group *skx_iio_attr_update[] = {
NULL, NULL,
}; };
static int static void pmu_clear_mapping_attr(const struct attribute_group **groups,
pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag) struct attribute_group *ag)
{
int i;
for (i = 0; groups[i]; i++) {
if (groups[i] == ag) {
for (i++; groups[i]; i++)
groups[i - 1] = groups[i];
groups[i - 1] = NULL;
break;
}
}
}
static void
pmu_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag,
ssize_t (*show)(struct device*, struct device_attribute*, char*),
int topology_type)
{ {
char buf[64]; char buf[64];
int ret; int ret;
...@@ -3812,11 +3936,13 @@ pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag) ...@@ -3812,11 +3936,13 @@ pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
struct attribute **attrs = NULL; struct attribute **attrs = NULL;
struct dev_ext_attribute *eas = NULL; struct dev_ext_attribute *eas = NULL;
ret = type->get_topology(type); ret = pmu_alloc_topology(type, topology_type);
if (ret < 0) if (ret < 0)
goto clear_attr_update; goto clear_attr_update;
ret = -ENOMEM; ret = type->get_topology(type);
if (ret < 0)
goto clear_topology;
/* One more for NULL. */ /* One more for NULL. */
attrs = kcalloc((uncore_max_dies() + 1), sizeof(*attrs), GFP_KERNEL); attrs = kcalloc((uncore_max_dies() + 1), sizeof(*attrs), GFP_KERNEL);
...@@ -3828,20 +3954,20 @@ pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag) ...@@ -3828,20 +3954,20 @@ pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
goto clear_attrs; goto clear_attrs;
for (die = 0; die < uncore_max_dies(); die++) { for (die = 0; die < uncore_max_dies(); die++) {
sprintf(buf, "die%ld", die); snprintf(buf, sizeof(buf), "die%ld", die);
sysfs_attr_init(&eas[die].attr.attr); sysfs_attr_init(&eas[die].attr.attr);
eas[die].attr.attr.name = kstrdup(buf, GFP_KERNEL); eas[die].attr.attr.name = kstrdup(buf, GFP_KERNEL);
if (!eas[die].attr.attr.name) if (!eas[die].attr.attr.name)
goto err; goto err;
eas[die].attr.attr.mode = 0444; eas[die].attr.attr.mode = 0444;
eas[die].attr.show = skx_iio_mapping_show; eas[die].attr.show = show;
eas[die].attr.store = NULL; eas[die].attr.store = NULL;
eas[die].var = (void *)die; eas[die].var = (void *)die;
attrs[die] = &eas[die].attr.attr; attrs[die] = &eas[die].attr.attr;
} }
ag->attrs = attrs; ag->attrs = attrs;
return 0; return;
err: err:
for (; die >= 0; die--) for (; die >= 0; die--)
kfree(eas[die].attr.attr.name); kfree(eas[die].attr.attr.name);
...@@ -3849,14 +3975,13 @@ pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag) ...@@ -3849,14 +3975,13 @@ pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
clear_attrs: clear_attrs:
kfree(attrs); kfree(attrs);
clear_topology: clear_topology:
kfree(type->topology); pmu_free_topology(type);
clear_attr_update: clear_attr_update:
type->attr_update = NULL; pmu_clear_mapping_attr(type->attr_update, ag);
return ret;
} }
static void static void
pmu_iio_cleanup_mapping(struct intel_uncore_type *type, struct attribute_group *ag) pmu_cleanup_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
{ {
struct attribute **attr = ag->attrs; struct attribute **attr = ag->attrs;
...@@ -3868,17 +3993,23 @@ pmu_iio_cleanup_mapping(struct intel_uncore_type *type, struct attribute_group * ...@@ -3868,17 +3993,23 @@ pmu_iio_cleanup_mapping(struct intel_uncore_type *type, struct attribute_group *
kfree(attr_to_ext_attr(*ag->attrs)); kfree(attr_to_ext_attr(*ag->attrs));
kfree(ag->attrs); kfree(ag->attrs);
ag->attrs = NULL; ag->attrs = NULL;
kfree(type->topology); pmu_free_topology(type);
}
static void
pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
{
pmu_set_mapping(type, ag, skx_iio_mapping_show, IIO_TOPOLOGY_TYPE);
} }
static int skx_iio_set_mapping(struct intel_uncore_type *type) static void skx_iio_set_mapping(struct intel_uncore_type *type)
{ {
return pmu_iio_set_mapping(type, &skx_iio_mapping_group); pmu_iio_set_mapping(type, &skx_iio_mapping_group);
} }
static void skx_iio_cleanup_mapping(struct intel_uncore_type *type) static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
{ {
pmu_iio_cleanup_mapping(type, &skx_iio_mapping_group); pmu_cleanup_mapping(type, &skx_iio_mapping_group);
} }
static struct intel_uncore_type skx_uncore_iio = { static struct intel_uncore_type skx_uncore_iio = {
...@@ -4139,6 +4270,132 @@ static struct intel_uncore_ops skx_upi_uncore_pci_ops = { ...@@ -4139,6 +4270,132 @@ static struct intel_uncore_ops skx_upi_uncore_pci_ops = {
.read_counter = snbep_uncore_pci_read_counter, .read_counter = snbep_uncore_pci_read_counter,
}; };
static umode_t
skx_upi_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
{
struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj));
return pmu->type->topology[die][pmu->pmu_idx].upi->enabled ? attr->mode : 0;
}
static ssize_t skx_upi_mapping_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
struct dev_ext_attribute *ea = to_dev_ext_attribute(attr);
long die = (long)ea->var;
struct uncore_upi_topology *upi = pmu->type->topology[die][pmu->pmu_idx].upi;
return sysfs_emit(buf, "upi_%d,die_%d\n", upi->pmu_idx_to, upi->die_to);
}
#define SKX_UPI_REG_DID 0x2058
#define SKX_UPI_REGS_ADDR_DEVICE_LINK0 0x0e
#define SKX_UPI_REGS_ADDR_FUNCTION 0x00
/*
* UPI Link Parameter 0
* | Bit | Default | Description
* | 19:16 | 0h | base_nodeid - The NodeID of the sending socket.
* | 12:8 | 00h | sending_port - The processor die port number of the sending port.
*/
#define SKX_KTILP0_OFFSET 0x94
/*
* UPI Pcode Status. This register is used by PCode to store the link training status.
* | Bit | Default | Description
* | 4 | 0h | ll_status_valid — Bit indicates the valid training status
* logged from PCode to the BIOS.
*/
#define SKX_KTIPCSTS_OFFSET 0x120
static int upi_fill_topology(struct pci_dev *dev, struct intel_uncore_topology *tp,
int pmu_idx)
{
int ret;
u32 upi_conf;
struct uncore_upi_topology *upi = tp->upi;
tp->pmu_idx = pmu_idx;
ret = pci_read_config_dword(dev, SKX_KTIPCSTS_OFFSET, &upi_conf);
if (ret) {
ret = pcibios_err_to_errno(ret);
goto err;
}
upi->enabled = (upi_conf >> 4) & 1;
if (upi->enabled) {
ret = pci_read_config_dword(dev, SKX_KTILP0_OFFSET,
&upi_conf);
if (ret) {
ret = pcibios_err_to_errno(ret);
goto err;
}
upi->die_to = (upi_conf >> 16) & 0xf;
upi->pmu_idx_to = (upi_conf >> 8) & 0x1f;
}
err:
return ret;
}
static int skx_upi_topology_cb(struct intel_uncore_type *type, int segment,
int die, u64 cpu_bus_msr)
{
int idx, ret;
struct intel_uncore_topology *upi;
unsigned int devfn;
struct pci_dev *dev = NULL;
u8 bus = cpu_bus_msr >> (3 * BUS_NUM_STRIDE);
for (idx = 0; idx < type->num_boxes; idx++) {
upi = &type->topology[die][idx];
devfn = PCI_DEVFN(SKX_UPI_REGS_ADDR_DEVICE_LINK0 + idx,
SKX_UPI_REGS_ADDR_FUNCTION);
dev = pci_get_domain_bus_and_slot(segment, bus, devfn);
if (dev) {
ret = upi_fill_topology(dev, upi, idx);
if (ret)
break;
}
}
pci_dev_put(dev);
return ret;
}
static int skx_upi_get_topology(struct intel_uncore_type *type)
{
/* CPX case is not supported */
if (boot_cpu_data.x86_stepping == 11)
return -EPERM;
return skx_pmu_get_topology(type, skx_upi_topology_cb);
}
static struct attribute_group skx_upi_mapping_group = {
.is_visible = skx_upi_mapping_visible,
};
static const struct attribute_group *skx_upi_attr_update[] = {
&skx_upi_mapping_group,
NULL
};
static void
pmu_upi_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
{
pmu_set_mapping(type, ag, skx_upi_mapping_show, UPI_TOPOLOGY_TYPE);
}
static void skx_upi_set_mapping(struct intel_uncore_type *type)
{
pmu_upi_set_mapping(type, &skx_upi_mapping_group);
}
static void skx_upi_cleanup_mapping(struct intel_uncore_type *type)
{
pmu_cleanup_mapping(type, &skx_upi_mapping_group);
}
static struct intel_uncore_type skx_uncore_upi = { static struct intel_uncore_type skx_uncore_upi = {
.name = "upi", .name = "upi",
.num_counters = 4, .num_counters = 4,
...@@ -4151,6 +4408,10 @@ static struct intel_uncore_type skx_uncore_upi = { ...@@ -4151,6 +4408,10 @@ static struct intel_uncore_type skx_uncore_upi = {
.box_ctl = SKX_UPI_PCI_PMON_BOX_CTL, .box_ctl = SKX_UPI_PCI_PMON_BOX_CTL,
.ops = &skx_upi_uncore_pci_ops, .ops = &skx_upi_uncore_pci_ops,
.format_group = &skx_upi_uncore_format_group, .format_group = &skx_upi_uncore_format_group,
.attr_update = skx_upi_attr_update,
.get_topology = skx_upi_get_topology,
.set_mapping = skx_upi_set_mapping,
.cleanup_mapping = skx_upi_cleanup_mapping,
}; };
static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box) static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
...@@ -4461,11 +4722,6 @@ static int sad_cfg_iio_topology(struct intel_uncore_type *type, u8 *sad_pmon_map ...@@ -4461,11 +4722,6 @@ static int sad_cfg_iio_topology(struct intel_uncore_type *type, u8 *sad_pmon_map
int die, stack_id, ret = -EPERM; int die, stack_id, ret = -EPERM;
struct pci_dev *dev = NULL; struct pci_dev *dev = NULL;
type->topology = kcalloc(uncore_max_dies(), sizeof(*type->topology),
GFP_KERNEL);
if (!type->topology)
return -ENOMEM;
while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, SNR_ICX_MESH2IIO_MMAP_DID, dev))) { while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, SNR_ICX_MESH2IIO_MMAP_DID, dev))) {
ret = pci_read_config_dword(dev, SNR_ICX_SAD_CONTROL_CFG, &sad_cfg); ret = pci_read_config_dword(dev, SNR_ICX_SAD_CONTROL_CFG, &sad_cfg);
if (ret) { if (ret) {
...@@ -4483,14 +4739,12 @@ static int sad_cfg_iio_topology(struct intel_uncore_type *type, u8 *sad_pmon_map ...@@ -4483,14 +4739,12 @@ static int sad_cfg_iio_topology(struct intel_uncore_type *type, u8 *sad_pmon_map
/* Convert stack id from SAD_CONTROL to PMON notation. */ /* Convert stack id from SAD_CONTROL to PMON notation. */
stack_id = sad_pmon_mapping[stack_id]; stack_id = sad_pmon_mapping[stack_id];
((u8 *)&(type->topology[die].configuration))[stack_id] = dev->bus->number; type->topology[die][stack_id].iio->segment = pci_domain_nr(dev->bus);
type->topology[die].segment = pci_domain_nr(dev->bus); type->topology[die][stack_id].pmu_idx = stack_id;
type->topology[die][stack_id].iio->pci_bus_no = dev->bus->number;
} }
if (ret) { pci_dev_put(dev);
kfree(type->topology);
type->topology = NULL;
}
return ret; return ret;
} }
...@@ -4519,14 +4773,14 @@ static int snr_iio_get_topology(struct intel_uncore_type *type) ...@@ -4519,14 +4773,14 @@ static int snr_iio_get_topology(struct intel_uncore_type *type)
return sad_cfg_iio_topology(type, snr_sad_pmon_mapping); return sad_cfg_iio_topology(type, snr_sad_pmon_mapping);
} }
static int snr_iio_set_mapping(struct intel_uncore_type *type) static void snr_iio_set_mapping(struct intel_uncore_type *type)
{ {
return pmu_iio_set_mapping(type, &snr_iio_mapping_group); pmu_iio_set_mapping(type, &snr_iio_mapping_group);
} }
static void snr_iio_cleanup_mapping(struct intel_uncore_type *type) static void snr_iio_cleanup_mapping(struct intel_uncore_type *type)
{ {
pmu_iio_cleanup_mapping(type, &snr_iio_mapping_group); pmu_cleanup_mapping(type, &snr_iio_mapping_group);
} }
static struct event_constraint snr_uncore_iio_constraints[] = { static struct event_constraint snr_uncore_iio_constraints[] = {
...@@ -4857,6 +5111,8 @@ static int snr_uncore_mmio_map(struct intel_uncore_box *box, ...@@ -4857,6 +5111,8 @@ static int snr_uncore_mmio_map(struct intel_uncore_box *box,
addr += box_ctl; addr += box_ctl;
pci_dev_put(pdev);
box->io_addr = ioremap(addr, type->mmio_map_size); box->io_addr = ioremap(addr, type->mmio_map_size);
if (!box->io_addr) { if (!box->io_addr) {
pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name); pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
...@@ -5137,14 +5393,19 @@ static int icx_iio_get_topology(struct intel_uncore_type *type) ...@@ -5137,14 +5393,19 @@ static int icx_iio_get_topology(struct intel_uncore_type *type)
return sad_cfg_iio_topology(type, icx_sad_pmon_mapping); return sad_cfg_iio_topology(type, icx_sad_pmon_mapping);
} }
static int icx_iio_set_mapping(struct intel_uncore_type *type) static void icx_iio_set_mapping(struct intel_uncore_type *type)
{ {
return pmu_iio_set_mapping(type, &icx_iio_mapping_group); /* Detect ICX-D system. This case is not supported */
if (boot_cpu_data.x86_model == INTEL_FAM6_ICELAKE_D) {
pmu_clear_mapping_attr(type->attr_update, &icx_iio_mapping_group);
return;
}
pmu_iio_set_mapping(type, &icx_iio_mapping_group);
} }
static void icx_iio_cleanup_mapping(struct intel_uncore_type *type) static void icx_iio_cleanup_mapping(struct intel_uncore_type *type)
{ {
pmu_iio_cleanup_mapping(type, &icx_iio_mapping_group); pmu_cleanup_mapping(type, &icx_iio_mapping_group);
} }
static struct intel_uncore_type icx_uncore_iio = { static struct intel_uncore_type icx_uncore_iio = {
...@@ -5337,6 +5598,76 @@ static const struct attribute_group icx_upi_uncore_format_group = { ...@@ -5337,6 +5598,76 @@ static const struct attribute_group icx_upi_uncore_format_group = {
.attrs = icx_upi_uncore_formats_attr, .attrs = icx_upi_uncore_formats_attr,
}; };
#define ICX_UPI_REGS_ADDR_DEVICE_LINK0 0x02
#define ICX_UPI_REGS_ADDR_FUNCTION 0x01
static int discover_upi_topology(struct intel_uncore_type *type, int ubox_did, int dev_link0)
{
struct pci_dev *ubox = NULL;
struct pci_dev *dev = NULL;
u32 nid, gid;
int i, idx, ret = -EPERM;
struct intel_uncore_topology *upi;
unsigned int devfn;
/* GIDNIDMAP method supports machines which have less than 8 sockets. */
if (uncore_max_dies() > 8)
goto err;
while ((ubox = pci_get_device(PCI_VENDOR_ID_INTEL, ubox_did, ubox))) {
ret = upi_nodeid_groupid(ubox, SKX_CPUNODEID, SKX_GIDNIDMAP, &nid, &gid);
if (ret) {
ret = pcibios_err_to_errno(ret);
break;
}
for (i = 0; i < 8; i++) {
if (nid != GIDNIDMAP(gid, i))
continue;
for (idx = 0; idx < type->num_boxes; idx++) {
upi = &type->topology[nid][idx];
devfn = PCI_DEVFN(dev_link0 + idx, ICX_UPI_REGS_ADDR_FUNCTION);
dev = pci_get_domain_bus_and_slot(pci_domain_nr(ubox->bus),
ubox->bus->number,
devfn);
if (dev) {
ret = upi_fill_topology(dev, upi, idx);
if (ret)
goto err;
}
}
}
}
err:
pci_dev_put(ubox);
pci_dev_put(dev);
return ret;
}
static int icx_upi_get_topology(struct intel_uncore_type *type)
{
return discover_upi_topology(type, ICX_UBOX_DID, ICX_UPI_REGS_ADDR_DEVICE_LINK0);
}
static struct attribute_group icx_upi_mapping_group = {
.is_visible = skx_upi_mapping_visible,
};
static const struct attribute_group *icx_upi_attr_update[] = {
&icx_upi_mapping_group,
NULL
};
static void icx_upi_set_mapping(struct intel_uncore_type *type)
{
pmu_upi_set_mapping(type, &icx_upi_mapping_group);
}
static void icx_upi_cleanup_mapping(struct intel_uncore_type *type)
{
pmu_cleanup_mapping(type, &icx_upi_mapping_group);
}
static struct intel_uncore_type icx_uncore_upi = { static struct intel_uncore_type icx_uncore_upi = {
.name = "upi", .name = "upi",
.num_counters = 4, .num_counters = 4,
...@@ -5349,6 +5680,10 @@ static struct intel_uncore_type icx_uncore_upi = { ...@@ -5349,6 +5680,10 @@ static struct intel_uncore_type icx_uncore_upi = {
.box_ctl = ICX_UPI_PCI_PMON_BOX_CTL, .box_ctl = ICX_UPI_PCI_PMON_BOX_CTL,
.ops = &skx_upi_uncore_pci_ops, .ops = &skx_upi_uncore_pci_ops,
.format_group = &icx_upi_uncore_format_group, .format_group = &icx_upi_uncore_format_group,
.attr_update = icx_upi_attr_update,
.get_topology = icx_upi_get_topology,
.set_mapping = icx_upi_set_mapping,
.cleanup_mapping = icx_upi_cleanup_mapping,
}; };
static struct event_constraint icx_uncore_m3upi_constraints[] = { static struct event_constraint icx_uncore_m3upi_constraints[] = {
...@@ -5780,9 +6115,43 @@ static struct intel_uncore_type spr_uncore_m2m = { ...@@ -5780,9 +6115,43 @@ static struct intel_uncore_type spr_uncore_m2m = {
.name = "m2m", .name = "m2m",
}; };
static struct attribute_group spr_upi_mapping_group = {
.is_visible = skx_upi_mapping_visible,
};
static const struct attribute_group *spr_upi_attr_update[] = {
&uncore_alias_group,
&spr_upi_mapping_group,
NULL
};
#define SPR_UPI_REGS_ADDR_DEVICE_LINK0 0x01
static void spr_upi_set_mapping(struct intel_uncore_type *type)
{
pmu_upi_set_mapping(type, &spr_upi_mapping_group);
}
static void spr_upi_cleanup_mapping(struct intel_uncore_type *type)
{
pmu_cleanup_mapping(type, &spr_upi_mapping_group);
}
static int spr_upi_get_topology(struct intel_uncore_type *type)
{
return discover_upi_topology(type, SPR_UBOX_DID, SPR_UPI_REGS_ADDR_DEVICE_LINK0);
}
static struct intel_uncore_type spr_uncore_upi = { static struct intel_uncore_type spr_uncore_upi = {
SPR_UNCORE_PCI_COMMON_FORMAT(), .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
.event_mask_ext = SPR_RAW_EVENT_MASK_EXT,
.format_group = &spr_uncore_raw_format_group,
.ops = &spr_uncore_pci_ops,
.name = "upi", .name = "upi",
.attr_update = spr_upi_attr_update,
.get_topology = spr_upi_get_topology,
.set_mapping = spr_upi_set_mapping,
.cleanup_mapping = spr_upi_cleanup_mapping,
}; };
static struct intel_uncore_type spr_uncore_m3upi = { static struct intel_uncore_type spr_uncore_m3upi = {
...@@ -5986,6 +6355,12 @@ static void uncore_type_customized_copy(struct intel_uncore_type *to_type, ...@@ -5986,6 +6355,12 @@ static void uncore_type_customized_copy(struct intel_uncore_type *to_type,
to_type->format_group = from_type->format_group; to_type->format_group = from_type->format_group;
if (from_type->attr_update) if (from_type->attr_update)
to_type->attr_update = from_type->attr_update; to_type->attr_update = from_type->attr_update;
if (from_type->set_mapping)
to_type->set_mapping = from_type->set_mapping;
if (from_type->get_topology)
to_type->get_topology = from_type->get_topology;
if (from_type->cleanup_mapping)
to_type->cleanup_mapping = from_type->cleanup_mapping;
} }
static struct intel_uncore_type ** static struct intel_uncore_type **
......
...@@ -811,7 +811,7 @@ struct x86_pmu { ...@@ -811,7 +811,7 @@ struct x86_pmu {
void (*cpu_dead)(int cpu); void (*cpu_dead)(int cpu);
void (*check_microcode)(void); void (*check_microcode)(void);
void (*sched_task)(struct perf_event_context *ctx, void (*sched_task)(struct perf_event_pmu_context *pmu_ctx,
bool sched_in); bool sched_in);
/* /*
...@@ -894,12 +894,12 @@ struct x86_pmu { ...@@ -894,12 +894,12 @@ struct x86_pmu {
int num_topdown_events; int num_topdown_events;
/* /*
* perf task context (i.e. struct perf_event_context::task_ctx_data) * perf task context (i.e. struct perf_event_pmu_context::task_ctx_data)
* switch helper to bridge calls from perf/core to perf/x86. * switch helper to bridge calls from perf/core to perf/x86.
* See struct pmu::swap_task_ctx() usage for examples; * See struct pmu::swap_task_ctx() usage for examples;
*/ */
void (*swap_task_ctx)(struct perf_event_context *prev, void (*swap_task_ctx)(struct perf_event_pmu_context *prev_epc,
struct perf_event_context *next); struct perf_event_pmu_context *next_epc);
/* /*
* AMD bits * AMD bits
...@@ -925,7 +925,7 @@ struct x86_pmu { ...@@ -925,7 +925,7 @@ struct x86_pmu {
int (*aux_output_match) (struct perf_event *event); int (*aux_output_match) (struct perf_event *event);
int (*filter_match)(struct perf_event *event); void (*filter)(struct pmu *pmu, int cpu, bool *ret);
/* /*
* Hybrid support * Hybrid support
* *
...@@ -1180,8 +1180,6 @@ int x86_pmu_handle_irq(struct pt_regs *regs); ...@@ -1180,8 +1180,6 @@ int x86_pmu_handle_irq(struct pt_regs *regs);
void x86_pmu_show_pmu_cap(int num_counters, int num_counters_fixed, void x86_pmu_show_pmu_cap(int num_counters, int num_counters_fixed,
u64 intel_ctrl); u64 intel_ctrl);
void x86_pmu_update_cpu_context(struct pmu *pmu, int cpu);
extern struct event_constraint emptyconstraint; extern struct event_constraint emptyconstraint;
extern struct event_constraint unconstrained; extern struct event_constraint unconstrained;
...@@ -1306,7 +1304,7 @@ void amd_pmu_lbr_reset(void); ...@@ -1306,7 +1304,7 @@ void amd_pmu_lbr_reset(void);
void amd_pmu_lbr_read(void); void amd_pmu_lbr_read(void);
void amd_pmu_lbr_add(struct perf_event *event); void amd_pmu_lbr_add(struct perf_event *event);
void amd_pmu_lbr_del(struct perf_event *event); void amd_pmu_lbr_del(struct perf_event *event);
void amd_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in); void amd_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in);
void amd_pmu_lbr_enable_all(void); void amd_pmu_lbr_enable_all(void);
void amd_pmu_lbr_disable_all(void); void amd_pmu_lbr_disable_all(void);
int amd_pmu_lbr_hw_config(struct perf_event *event); int amd_pmu_lbr_hw_config(struct perf_event *event);
...@@ -1322,7 +1320,6 @@ void amd_brs_enable_all(void); ...@@ -1322,7 +1320,6 @@ void amd_brs_enable_all(void);
void amd_brs_disable_all(void); void amd_brs_disable_all(void);
void amd_brs_drain(void); void amd_brs_drain(void);
void amd_brs_lopwr_init(void); void amd_brs_lopwr_init(void);
void amd_brs_disable_all(void);
int amd_brs_hw_config(struct perf_event *event); int amd_brs_hw_config(struct perf_event *event);
void amd_brs_reset(void); void amd_brs_reset(void);
...@@ -1330,7 +1327,7 @@ static inline void amd_pmu_brs_add(struct perf_event *event) ...@@ -1330,7 +1327,7 @@ static inline void amd_pmu_brs_add(struct perf_event *event)
{ {
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
perf_sched_cb_inc(event->ctx->pmu); perf_sched_cb_inc(event->pmu);
cpuc->lbr_users++; cpuc->lbr_users++;
/* /*
* No need to reset BRS because it is reset * No need to reset BRS because it is reset
...@@ -1345,10 +1342,10 @@ static inline void amd_pmu_brs_del(struct perf_event *event) ...@@ -1345,10 +1342,10 @@ static inline void amd_pmu_brs_del(struct perf_event *event)
cpuc->lbr_users--; cpuc->lbr_users--;
WARN_ON_ONCE(cpuc->lbr_users < 0); WARN_ON_ONCE(cpuc->lbr_users < 0);
perf_sched_cb_dec(event->ctx->pmu); perf_sched_cb_dec(event->pmu);
} }
void amd_pmu_brs_sched_task(struct perf_event_context *ctx, bool sched_in); void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in);
#else #else
static inline int amd_brs_init(void) static inline int amd_brs_init(void)
{ {
...@@ -1373,7 +1370,7 @@ static inline void amd_pmu_brs_del(struct perf_event *event) ...@@ -1373,7 +1370,7 @@ static inline void amd_pmu_brs_del(struct perf_event *event)
{ {
} }
static inline void amd_pmu_brs_sched_task(struct perf_event_context *ctx, bool sched_in) static inline void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
{ {
} }
...@@ -1533,7 +1530,7 @@ void intel_pmu_pebs_enable_all(void); ...@@ -1533,7 +1530,7 @@ void intel_pmu_pebs_enable_all(void);
void intel_pmu_pebs_disable_all(void); void intel_pmu_pebs_disable_all(void);
void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in); void intel_pmu_pebs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in);
void intel_pmu_auto_reload_read(struct perf_event *event); void intel_pmu_auto_reload_read(struct perf_event *event);
...@@ -1541,10 +1538,10 @@ void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr); ...@@ -1541,10 +1538,10 @@ void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr);
void intel_ds_init(void); void intel_ds_init(void);
void intel_pmu_lbr_swap_task_ctx(struct perf_event_context *prev, void intel_pmu_lbr_swap_task_ctx(struct perf_event_pmu_context *prev_epc,
struct perf_event_context *next); struct perf_event_pmu_context *next_epc);
void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in); void intel_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in);
u64 lbr_from_signext_quirk_wr(u64 val); u64 lbr_from_signext_quirk_wr(u64 val);
......
...@@ -547,15 +547,14 @@ static void armpmu_disable(struct pmu *pmu) ...@@ -547,15 +547,14 @@ static void armpmu_disable(struct pmu *pmu)
* microarchitecture, and aren't suitable for another. Thus, only match CPUs of * microarchitecture, and aren't suitable for another. Thus, only match CPUs of
* the same microarchitecture. * the same microarchitecture.
*/ */
static int armpmu_filter_match(struct perf_event *event) static bool armpmu_filter(struct pmu *pmu, int cpu)
{ {
struct arm_pmu *armpmu = to_arm_pmu(event->pmu); struct arm_pmu *armpmu = to_arm_pmu(pmu);
unsigned int cpu = smp_processor_id(); bool ret;
int ret;
ret = cpumask_test_cpu(cpu, &armpmu->supported_cpus); ret = cpumask_test_cpu(cpu, &armpmu->supported_cpus);
if (ret && armpmu->filter_match) if (ret && armpmu->filter)
return armpmu->filter_match(event); return armpmu->filter(pmu, cpu);
return ret; return ret;
} }
...@@ -882,14 +881,13 @@ struct arm_pmu *armpmu_alloc(void) ...@@ -882,14 +881,13 @@ struct arm_pmu *armpmu_alloc(void)
.start = armpmu_start, .start = armpmu_start,
.stop = armpmu_stop, .stop = armpmu_stop,
.read = armpmu_read, .read = armpmu_read,
.filter_match = armpmu_filter_match, .filter = armpmu_filter,
.attr_groups = pmu->attr_groups, .attr_groups = pmu->attr_groups,
/* /*
* This is a CPU PMU potentially in a heterogeneous * This is a CPU PMU potentially in a heterogeneous
* configuration (e.g. big.LITTLE). This is not an uncore PMU, * configuration (e.g. big.LITTLE). This is not an uncore PMU,
* and we have taken ctx sharing into account (e.g. with our * and we have taken ctx sharing into account (e.g. with our
* pmu::filter_match callback and pmu::event_init group * pmu::filter callback and pmu::event_init group validation).
* validation).
*/ */
.capabilities = PERF_PMU_CAP_HETEROGENEOUS_CPUS | PERF_PMU_CAP_EXTENDED_REGS, .capabilities = PERF_PMU_CAP_HETEROGENEOUS_CPUS | PERF_PMU_CAP_EXTENDED_REGS,
}; };
......
...@@ -100,7 +100,7 @@ struct arm_pmu { ...@@ -100,7 +100,7 @@ struct arm_pmu {
void (*stop)(struct arm_pmu *); void (*stop)(struct arm_pmu *);
void (*reset)(void *); void (*reset)(void *);
int (*map_event)(struct perf_event *event); int (*map_event)(struct perf_event *event);
int (*filter_match)(struct perf_event *event); bool (*filter)(struct pmu *pmu, int cpu);
int num_events; int num_events;
bool secure_access; /* 32-bit ARM only */ bool secure_access; /* 32-bit ARM only */
#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40 #define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
......
...@@ -266,6 +266,7 @@ struct hw_perf_event { ...@@ -266,6 +266,7 @@ struct hw_perf_event {
}; };
struct perf_event; struct perf_event;
struct perf_event_pmu_context;
/* /*
* Common implementation detail of pmu::{start,commit,cancel}_txn * Common implementation detail of pmu::{start,commit,cancel}_txn
...@@ -308,7 +309,7 @@ struct pmu { ...@@ -308,7 +309,7 @@ struct pmu {
int capabilities; int capabilities;
int __percpu *pmu_disable_count; int __percpu *pmu_disable_count;
struct perf_cpu_context __percpu *pmu_cpu_context; struct perf_cpu_pmu_context __percpu *cpu_pmu_context;
atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */ atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */
int task_ctx_nr; int task_ctx_nr;
int hrtimer_interval_ms; int hrtimer_interval_ms;
...@@ -443,7 +444,7 @@ struct pmu { ...@@ -443,7 +444,7 @@ struct pmu {
/* /*
* context-switches callback * context-switches callback
*/ */
void (*sched_task) (struct perf_event_context *ctx, void (*sched_task) (struct perf_event_pmu_context *pmu_ctx,
bool sched_in); bool sched_in);
/* /*
...@@ -457,8 +458,8 @@ struct pmu { ...@@ -457,8 +458,8 @@ struct pmu {
* implementation and Perf core context switch handling callbacks for usage * implementation and Perf core context switch handling callbacks for usage
* examples. * examples.
*/ */
void (*swap_task_ctx) (struct perf_event_context *prev, void (*swap_task_ctx) (struct perf_event_pmu_context *prev_epc,
struct perf_event_context *next); struct perf_event_pmu_context *next_epc);
/* optional */ /* optional */
/* /*
...@@ -522,9 +523,10 @@ struct pmu { ...@@ -522,9 +523,10 @@ struct pmu {
/* optional */ /* optional */
/* /*
* Filter events for PMU-specific reasons. * Skip programming this PMU on the given CPU. Typically needed for
* big.LITTLE things.
*/ */
int (*filter_match) (struct perf_event *event); /* optional */ bool (*filter) (struct pmu *pmu, int cpu); /* optional */
/* /*
* Check period value for PERF_EVENT_IOC_PERIOD ioctl. * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
...@@ -695,6 +697,11 @@ struct perf_event { ...@@ -695,6 +697,11 @@ struct perf_event {
int group_caps; int group_caps;
struct perf_event *group_leader; struct perf_event *group_leader;
/*
* event->pmu will always point to pmu in which this event belongs.
* Whereas event->pmu_ctx->pmu may point to other pmu when group of
* different pmu events is created.
*/
struct pmu *pmu; struct pmu *pmu;
void *pmu_private; void *pmu_private;
...@@ -720,6 +727,12 @@ struct perf_event { ...@@ -720,6 +727,12 @@ struct perf_event {
struct hw_perf_event hw; struct hw_perf_event hw;
struct perf_event_context *ctx; struct perf_event_context *ctx;
/*
* event->pmu_ctx points to perf_event_pmu_context in which the event
* is added. This pmu_ctx can be of other pmu for sw event when that
* sw event is part of a group which also contains non-sw events.
*/
struct perf_event_pmu_context *pmu_ctx;
atomic_long_t refcount; atomic_long_t refcount;
/* /*
...@@ -812,19 +825,69 @@ struct perf_event { ...@@ -812,19 +825,69 @@ struct perf_event {
#endif /* CONFIG_PERF_EVENTS */ #endif /* CONFIG_PERF_EVENTS */
}; };
/*
* ,-----------------------[1:n]----------------------.
* V V
* perf_event_context <-[1:n]-> perf_event_pmu_context <--- perf_event
* ^ ^ | |
* `--------[1:n]---------' `-[n:1]-> pmu <-[1:n]-'
*
*
* struct perf_event_pmu_context lifetime is refcount based and RCU freed
* (similar to perf_event_context). Locking is as if it were a member of
* perf_event_context; specifically:
*
* modification, both: ctx->mutex && ctx->lock
* reading, either: ctx->mutex || ctx->lock
*
* There is one exception to this; namely put_pmu_ctx() isn't always called
* with ctx->mutex held; this means that as long as we can guarantee the epc
* has events the above rules hold.
*
* Specificially, sys_perf_event_open()'s group_leader case depends on
* ctx->mutex pinning the configuration. Since we hold a reference on
* group_leader (through the filedesc) it can't go away, therefore it's
* associated pmu_ctx must exist and cannot change due to ctx->mutex.
*/
struct perf_event_pmu_context {
struct pmu *pmu;
struct perf_event_context *ctx;
struct list_head pmu_ctx_entry;
struct list_head pinned_active;
struct list_head flexible_active;
/* Used to avoid freeing per-cpu perf_event_pmu_context */
unsigned int embedded : 1;
unsigned int nr_events;
atomic_t refcount; /* event <-> epc */
struct rcu_head rcu_head;
void *task_ctx_data; /* pmu specific data */
/*
* Set when one or more (plausibly active) event can't be scheduled
* due to pmu overcommit or pmu constraints, except tolerant to
* events not necessary to be active due to scheduling constraints,
* such as cgroups.
*/
int rotate_necessary;
};
struct perf_event_groups { struct perf_event_groups {
struct rb_root tree; struct rb_root tree;
u64 index; u64 index;
}; };
/** /**
* struct perf_event_context - event context structure * struct perf_event_context - event context structure
* *
* Used as a container for task events and CPU events as well: * Used as a container for task events and CPU events as well:
*/ */
struct perf_event_context { struct perf_event_context {
struct pmu *pmu;
/* /*
* Protect the states of the events in the list, * Protect the states of the events in the list,
* nr_active, and the list: * nr_active, and the list:
...@@ -837,27 +900,21 @@ struct perf_event_context { ...@@ -837,27 +900,21 @@ struct perf_event_context {
*/ */
struct mutex mutex; struct mutex mutex;
struct list_head active_ctx_list; struct list_head pmu_ctx_list;
struct perf_event_groups pinned_groups; struct perf_event_groups pinned_groups;
struct perf_event_groups flexible_groups; struct perf_event_groups flexible_groups;
struct list_head event_list; struct list_head event_list;
struct list_head pinned_active;
struct list_head flexible_active;
int nr_events; int nr_events;
int nr_active;
int nr_user; int nr_user;
int is_active; int is_active;
int nr_task_data;
int nr_stat; int nr_stat;
int nr_freq; int nr_freq;
int rotate_disable; int rotate_disable;
/*
* Set when nr_events != nr_active, except tolerant to events not refcount_t refcount; /* event <-> ctx */
* necessary to be active due to scheduling constraints, such as cgroups.
*/
int rotate_necessary;
refcount_t refcount;
struct task_struct *task; struct task_struct *task;
/* /*
...@@ -878,7 +935,6 @@ struct perf_event_context { ...@@ -878,7 +935,6 @@ struct perf_event_context {
#ifdef CONFIG_CGROUP_PERF #ifdef CONFIG_CGROUP_PERF
int nr_cgroups; /* cgroup evts */ int nr_cgroups; /* cgroup evts */
#endif #endif
void *task_ctx_data; /* pmu specific data */
struct rcu_head rcu_head; struct rcu_head rcu_head;
/* /*
...@@ -896,12 +952,13 @@ struct perf_event_context { ...@@ -896,12 +952,13 @@ struct perf_event_context {
*/ */
#define PERF_NR_CONTEXTS 4 #define PERF_NR_CONTEXTS 4
/** struct perf_cpu_pmu_context {
* struct perf_cpu_context - per cpu event context structure struct perf_event_pmu_context epc;
*/ struct perf_event_pmu_context *task_epc;
struct perf_cpu_context {
struct perf_event_context ctx; struct list_head sched_cb_entry;
struct perf_event_context *task_ctx; int sched_cb_usage;
int active_oncpu; int active_oncpu;
int exclusive; int exclusive;
...@@ -909,16 +966,20 @@ struct perf_cpu_context { ...@@ -909,16 +966,20 @@ struct perf_cpu_context {
struct hrtimer hrtimer; struct hrtimer hrtimer;
ktime_t hrtimer_interval; ktime_t hrtimer_interval;
unsigned int hrtimer_active; unsigned int hrtimer_active;
};
/**
* struct perf_event_cpu_context - per cpu event context structure
*/
struct perf_cpu_context {
struct perf_event_context ctx;
struct perf_event_context *task_ctx;
int online;
#ifdef CONFIG_CGROUP_PERF #ifdef CONFIG_CGROUP_PERF
struct perf_cgroup *cgrp; struct perf_cgroup *cgrp;
struct list_head cgrp_cpuctx_entry;
#endif #endif
struct list_head sched_cb_entry;
int sched_cb_usage;
int online;
/* /*
* Per-CPU storage for iterators used in visit_groups_merge. The default * Per-CPU storage for iterators used in visit_groups_merge. The default
* storage is of size 2 to hold the CPU and any CPU event iterators. * storage is of size 2 to hold the CPU and any CPU event iterators.
...@@ -982,6 +1043,8 @@ perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx) ...@@ -982,6 +1043,8 @@ perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
#ifdef CONFIG_PERF_EVENTS #ifdef CONFIG_PERF_EVENTS
extern struct perf_event_context *perf_cpu_task_ctx(void);
extern void *perf_aux_output_begin(struct perf_output_handle *handle, extern void *perf_aux_output_begin(struct perf_output_handle *handle,
struct perf_event *event); struct perf_event *event);
extern void perf_aux_output_end(struct perf_output_handle *handle, extern void perf_aux_output_end(struct perf_output_handle *handle,
...@@ -1187,7 +1250,7 @@ static inline int is_software_event(struct perf_event *event) ...@@ -1187,7 +1250,7 @@ static inline int is_software_event(struct perf_event *event)
*/ */
static inline int in_software_context(struct perf_event *event) static inline int in_software_context(struct perf_event *event)
{ {
return event->ctx->pmu->task_ctx_nr == perf_sw_context; return event->pmu_ctx->pmu->task_ctx_nr == perf_sw_context;
} }
static inline int is_exclusive_pmu(struct pmu *pmu) static inline int is_exclusive_pmu(struct pmu *pmu)
......
...@@ -1243,7 +1243,7 @@ struct task_struct { ...@@ -1243,7 +1243,7 @@ struct task_struct {
unsigned int futex_state; unsigned int futex_state;
#endif #endif
#ifdef CONFIG_PERF_EVENTS #ifdef CONFIG_PERF_EVENTS
struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; struct perf_event_context *perf_event_ctxp;
struct mutex perf_event_mutex; struct mutex perf_event_mutex;
struct list_head perf_event_list; struct list_head perf_event_list;
#endif #endif
......
This source diff could not be displayed because it is too large. You can view the blob instead.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment