Commit e840f42a authored by Marc Zyngier's avatar Marc Zyngier

KVM: arm64: Fix PMU probe ordering

Russell reported that since 5.13, KVM's probing of the PMU has
started to fail on his HW. As it turns out, there is an implicit
ordering dependency between the architectural PMU probing code and
and KVM's own probing. If, due to probe ordering reasons, KVM probes
before the PMU driver, it will fail to detect the PMU and prevent it
from being advertised to guests as well as the VMM.

Obviously, this is one probing too many, and we should be able to
deal with any ordering.

Add a callback from the PMU code into KVM to advertise the registration
of a host CPU PMU, allowing for any probing order.

Fixes: 5421db1b ("KVM: arm64: Divorce the perf code from oprofile helpers")
Reported-by: default avatar"Russell King (Oracle)" <linux@armlinux.org.uk>
Tested-by: default avatarRussell King (Oracle) <rmk+kernel@armlinux.org.uk>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/YUYRKVflRtUytzy5@shell.armlinux.org.uk
Cc: stable@vger.kernel.org
parent a49b50a3
......@@ -50,9 +50,6 @@ static struct perf_guest_info_callbacks kvm_guest_cbs = {
int kvm_perf_init(void)
{
if (kvm_pmu_probe_pmuver() != ID_AA64DFR0_PMUVER_IMP_DEF && !is_protected_kvm_enabled())
static_branch_enable(&kvm_arm_pmu_available);
return perf_register_guest_info_callbacks(&kvm_guest_cbs);
}
......
......@@ -740,7 +740,14 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
kvm_pmu_create_perf_event(vcpu, select_idx);
}
int kvm_pmu_probe_pmuver(void)
void kvm_host_pmu_init(struct arm_pmu *pmu)
{
if (pmu->pmuver != 0 && pmu->pmuver != ID_AA64DFR0_PMUVER_IMP_DEF &&
!kvm_arm_support_pmu_v3() && !is_protected_kvm_enabled())
static_branch_enable(&kvm_arm_pmu_available);
}
static int kvm_pmu_probe_pmuver(void)
{
struct perf_event_attr attr = { };
struct perf_event *event;
......
......@@ -952,6 +952,8 @@ int armpmu_register(struct arm_pmu *pmu)
pmu->name, pmu->num_events,
has_nmi ? ", using NMIs" : "");
kvm_host_pmu_init(pmu);
return 0;
out_destroy:
......
......@@ -61,7 +61,6 @@ int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr);
int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
int kvm_pmu_probe_pmuver(void);
#else
struct kvm_pmu {
};
......@@ -118,8 +117,6 @@ static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
return 0;
}
static inline int kvm_pmu_probe_pmuver(void) { return 0xf; }
#endif
#endif
......@@ -163,6 +163,12 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn);
static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
#endif
#ifdef CONFIG_KVM
void kvm_host_pmu_init(struct arm_pmu *pmu);
#else
#define kvm_host_pmu_init(x) do { } while(0)
#endif
/* Internal functions only for core arm_pmu code */
struct arm_pmu *armpmu_alloc(void);
struct arm_pmu *armpmu_alloc_atomic(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment