Commit 941c122d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'perf-urgent-2024-09-18' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf event fixes from Ingo Molnar:
 "Left over from the v6.11 cycle:

   - Fix energy-pkg event enumeration on certain AMD CPUs

   - Set up the LBR branch stack for BPF counting events too"

* tag 'perf-urgent-2024-09-18' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf/x86/intel: Allow to setup LBR for counting event for BPF
  perf/x86/rapl: Fix the energy-pkg event for AMD CPUs
parents 9f397579 ef493f4b
...@@ -3972,8 +3972,12 @@ static int intel_pmu_hw_config(struct perf_event *event) ...@@ -3972,8 +3972,12 @@ static int intel_pmu_hw_config(struct perf_event *event)
x86_pmu.pebs_aliases(event); x86_pmu.pebs_aliases(event);
} }
if (needs_branch_stack(event) && is_sampling_event(event)) if (needs_branch_stack(event)) {
event->hw.flags |= PERF_X86_EVENT_NEEDS_BRANCH_STACK; /* Avoid branch stack setup for counting events in SAMPLE READ */
if (is_sampling_event(event) ||
!(event->attr.sample_type & PERF_SAMPLE_READ))
event->hw.flags |= PERF_X86_EVENT_NEEDS_BRANCH_STACK;
}
if (branch_sample_counters(event)) { if (branch_sample_counters(event)) {
struct perf_event *leader, *sibling; struct perf_event *leader, *sibling;
......
...@@ -103,6 +103,19 @@ static struct perf_pmu_events_attr event_attr_##v = { \ ...@@ -103,6 +103,19 @@ static struct perf_pmu_events_attr event_attr_##v = { \
.event_str = str, \ .event_str = str, \
}; };
/*
* RAPL Package energy counter scope:
* 1. AMD/HYGON platforms have a per-PKG package energy counter
* 2. For Intel platforms
* 2.1. CLX-AP is multi-die and its RAPL MSRs are die-scope
* 2.2. Other Intel platforms are single die systems so the scope can be
* considered as either pkg-scope or die-scope, and we are considering
* them as die-scope.
*/
#define rapl_pmu_is_pkg_scope() \
(boot_cpu_data.x86_vendor == X86_VENDOR_AMD || \
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
struct rapl_pmu { struct rapl_pmu {
raw_spinlock_t lock; raw_spinlock_t lock;
int n_active; int n_active;
...@@ -140,9 +153,25 @@ static unsigned int rapl_cntr_mask; ...@@ -140,9 +153,25 @@ static unsigned int rapl_cntr_mask;
static u64 rapl_timer_ms; static u64 rapl_timer_ms;
static struct perf_msr *rapl_msrs; static struct perf_msr *rapl_msrs;
/*
* Helper functions to get the correct topology macros according to the
* RAPL PMU scope.
*/
static inline unsigned int get_rapl_pmu_idx(int cpu)
{
return rapl_pmu_is_pkg_scope() ? topology_logical_package_id(cpu) :
topology_logical_die_id(cpu);
}
static inline const struct cpumask *get_rapl_pmu_cpumask(int cpu)
{
return rapl_pmu_is_pkg_scope() ? topology_core_cpumask(cpu) :
topology_die_cpumask(cpu);
}
static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu) static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
{ {
unsigned int rapl_pmu_idx = topology_logical_die_id(cpu); unsigned int rapl_pmu_idx = get_rapl_pmu_idx(cpu);
/* /*
* The unsigned check also catches the '-1' return value for non * The unsigned check also catches the '-1' return value for non
...@@ -552,7 +581,7 @@ static int rapl_cpu_offline(unsigned int cpu) ...@@ -552,7 +581,7 @@ static int rapl_cpu_offline(unsigned int cpu)
pmu->cpu = -1; pmu->cpu = -1;
/* Find a new cpu to collect rapl events */ /* Find a new cpu to collect rapl events */
target = cpumask_any_but(topology_die_cpumask(cpu), cpu); target = cpumask_any_but(get_rapl_pmu_cpumask(cpu), cpu);
/* Migrate rapl events to the new target */ /* Migrate rapl events to the new target */
if (target < nr_cpu_ids) { if (target < nr_cpu_ids) {
...@@ -565,6 +594,11 @@ static int rapl_cpu_offline(unsigned int cpu) ...@@ -565,6 +594,11 @@ static int rapl_cpu_offline(unsigned int cpu)
static int rapl_cpu_online(unsigned int cpu) static int rapl_cpu_online(unsigned int cpu)
{ {
s32 rapl_pmu_idx = get_rapl_pmu_idx(cpu);
if (rapl_pmu_idx < 0) {
pr_err("topology_logical_(package/die)_id() returned a negative value");
return -EINVAL;
}
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu); struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
int target; int target;
...@@ -579,14 +613,14 @@ static int rapl_cpu_online(unsigned int cpu) ...@@ -579,14 +613,14 @@ static int rapl_cpu_online(unsigned int cpu)
pmu->timer_interval = ms_to_ktime(rapl_timer_ms); pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
rapl_hrtimer_init(pmu); rapl_hrtimer_init(pmu);
rapl_pmus->pmus[topology_logical_die_id(cpu)] = pmu; rapl_pmus->pmus[rapl_pmu_idx] = pmu;
} }
/* /*
* Check if there is an online cpu in the package which collects rapl * Check if there is an online cpu in the package which collects rapl
* events already. * events already.
*/ */
target = cpumask_any_and(&rapl_cpu_mask, topology_die_cpumask(cpu)); target = cpumask_any_and(&rapl_cpu_mask, get_rapl_pmu_cpumask(cpu));
if (target < nr_cpu_ids) if (target < nr_cpu_ids)
return 0; return 0;
...@@ -675,7 +709,10 @@ static const struct attribute_group *rapl_attr_update[] = { ...@@ -675,7 +709,10 @@ static const struct attribute_group *rapl_attr_update[] = {
static int __init init_rapl_pmus(void) static int __init init_rapl_pmus(void)
{ {
int nr_rapl_pmu = topology_max_packages() * topology_max_dies_per_package(); int nr_rapl_pmu = topology_max_packages();
if (!rapl_pmu_is_pkg_scope())
nr_rapl_pmu *= topology_max_dies_per_package();
rapl_pmus = kzalloc(struct_size(rapl_pmus, pmus, nr_rapl_pmu), GFP_KERNEL); rapl_pmus = kzalloc(struct_size(rapl_pmus, pmus, nr_rapl_pmu), GFP_KERNEL);
if (!rapl_pmus) if (!rapl_pmus)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment