Commit 6a5cba7b authored by Aaron Lewis's avatar Aaron Lewis Committed by Sean Christopherson

KVM: x86/pmu: Correct the mask used in a pmu event filter lookup

When checking if a pmu event the guest is attempting to program should
be filtered, only consider the event select + unit mask in that
decision. Use an architecture specific mask to mask out all other bits,
including bits 35:32 on Intel.  Those bits are not part of the event
select and should not be considered in that decision.

Fixes: 66bb8a06 ("KVM: x86: PMU Event Filter")
Signed-off-by: default avatarAaron Lewis <aaronlewis@google.com>
Link: https://lore.kernel.org/r/20221220161236.555143-2-aaronlewis@google.comSigned-off-by: default avatarSean Christopherson <seanjc@google.com>
parent 7cb79f43
...@@ -279,7 +279,8 @@ static bool check_pmu_event_filter(struct kvm_pmc *pmc) ...@@ -279,7 +279,8 @@ static bool check_pmu_event_filter(struct kvm_pmc *pmc)
goto out; goto out;
if (pmc_is_gp(pmc)) { if (pmc_is_gp(pmc)) {
key = pmc->eventsel & AMD64_RAW_EVENT_MASK_NB; key = pmc->eventsel & (kvm_pmu_ops.EVENTSEL_EVENT |
ARCH_PERFMON_EVENTSEL_UMASK);
if (bsearch(&key, filter->events, filter->nevents, if (bsearch(&key, filter->events, filter->nevents,
sizeof(__u64), cmp_u64)) sizeof(__u64), cmp_u64))
allow_event = filter->action == KVM_PMU_EVENT_ALLOW; allow_event = filter->action == KVM_PMU_EVENT_ALLOW;
......
...@@ -40,6 +40,8 @@ struct kvm_pmu_ops { ...@@ -40,6 +40,8 @@ struct kvm_pmu_ops {
void (*reset)(struct kvm_vcpu *vcpu); void (*reset)(struct kvm_vcpu *vcpu);
void (*deliver_pmi)(struct kvm_vcpu *vcpu); void (*deliver_pmi)(struct kvm_vcpu *vcpu);
void (*cleanup)(struct kvm_vcpu *vcpu); void (*cleanup)(struct kvm_vcpu *vcpu);
const u64 EVENTSEL_EVENT;
}; };
void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops); void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops);
......
...@@ -231,4 +231,5 @@ struct kvm_pmu_ops amd_pmu_ops __initdata = { ...@@ -231,4 +231,5 @@ struct kvm_pmu_ops amd_pmu_ops __initdata = {
.refresh = amd_pmu_refresh, .refresh = amd_pmu_refresh,
.init = amd_pmu_init, .init = amd_pmu_init,
.reset = amd_pmu_reset, .reset = amd_pmu_reset,
.EVENTSEL_EVENT = AMD64_EVENTSEL_EVENT,
}; };
...@@ -811,4 +811,5 @@ struct kvm_pmu_ops intel_pmu_ops __initdata = { ...@@ -811,4 +811,5 @@ struct kvm_pmu_ops intel_pmu_ops __initdata = {
.reset = intel_pmu_reset, .reset = intel_pmu_reset,
.deliver_pmi = intel_pmu_deliver_pmi, .deliver_pmi = intel_pmu_deliver_pmi,
.cleanup = intel_pmu_cleanup, .cleanup = intel_pmu_cleanup,
.EVENTSEL_EVENT = ARCH_PERFMON_EVENTSEL_EVENT,
}; };
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment