Commit a2186448 authored by Like Xu's avatar Like Xu Committed by Paolo Bonzini

KVM: x86/pmu: Fix available_event_types check for REF_CPU_CYCLES event

According to CPUID 0x0A.EBX bit vector, the event [7] should be the
unrealized event "Topdown Slots" instead of the *kernel* generalized
common hardware event "REF_CPU_CYCLES", so we need to skip the cpuid
unavaliblity check in the intel_pmc_perf_hw_id() for the last
REF_CPU_CYCLES event and update the confusing comment.

If the event is marked as unavailable in the Intel guest CPUID
0AH.EBX leaf, we need to avoid any perf_event creation, whether
it's a gp or fixed counter. To distinguish whether it is a rejected
event or an event that needs to be programmed with PERF_TYPE_RAW type,
a new special returned value of "PERF_COUNT_HW_MAX + 1" is introduced.

Fixes: 62079d8a ("KVM: PMU: add proper support for fixed counter 2")
Signed-off-by: default avatarLike Xu <likexu@tencent.com>
Message-Id: <20220105051509.69437-1-likexu@tencent.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent c862dcd1
...@@ -109,6 +109,9 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, ...@@ -109,6 +109,9 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
.config = config, .config = config,
}; };
if (type == PERF_TYPE_HARDWARE && config >= PERF_COUNT_HW_MAX)
return;
attr.sample_period = get_sample_period(pmc, pmc->counter); attr.sample_period = get_sample_period(pmc, pmc->counter);
if (in_tx) if (in_tx)
......
...@@ -21,7 +21,6 @@ ...@@ -21,7 +21,6 @@
#define MSR_PMC_FULL_WIDTH_BIT (MSR_IA32_PMC0 - MSR_IA32_PERFCTR0) #define MSR_PMC_FULL_WIDTH_BIT (MSR_IA32_PMC0 - MSR_IA32_PERFCTR0)
static struct kvm_event_hw_type_mapping intel_arch_events[] = { static struct kvm_event_hw_type_mapping intel_arch_events[] = {
/* Index must match CPUID 0x0A.EBX bit vector */
[0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES }, [0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS }, [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
[2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES }, [2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES },
...@@ -29,6 +28,7 @@ static struct kvm_event_hw_type_mapping intel_arch_events[] = { ...@@ -29,6 +28,7 @@ static struct kvm_event_hw_type_mapping intel_arch_events[] = {
[4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES }, [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
[5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
[6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES }, [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
/* The above index must match CPUID 0x0A.EBX bit vector */
[7] = { 0x00, 0x03, PERF_COUNT_HW_REF_CPU_CYCLES }, [7] = { 0x00, 0x03, PERF_COUNT_HW_REF_CPU_CYCLES },
}; };
...@@ -75,11 +75,17 @@ static unsigned int intel_pmc_perf_hw_id(struct kvm_pmc *pmc) ...@@ -75,11 +75,17 @@ static unsigned int intel_pmc_perf_hw_id(struct kvm_pmc *pmc)
u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8; u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
int i; int i;
for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++) for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++) {
if (intel_arch_events[i].eventsel == event_select && if (intel_arch_events[i].eventsel != event_select ||
intel_arch_events[i].unit_mask == unit_mask && intel_arch_events[i].unit_mask != unit_mask)
(pmc_is_fixed(pmc) || pmu->available_event_types & (1 << i))) continue;
/* disable event that reported as not present by cpuid */
if ((i < 7) && !(pmu->available_event_types & (1 << i)))
return PERF_COUNT_HW_MAX + 1;
break; break;
}
if (i == ARRAY_SIZE(intel_arch_events)) if (i == ARRAY_SIZE(intel_arch_events))
return PERF_COUNT_HW_MAX; return PERF_COUNT_HW_MAX;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment