Commit 6348aafa authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: VMX: Adjust number of LBR records for PERF_CAPABILITIES at refresh

Now that the PMU is refreshed when MSR_IA32_PERF_CAPABILITIES is written
by host userspace, zero out the number of LBR records for a vCPU during
PMU refresh if PMU_CAP_LBR_FMT is not set in PERF_CAPABILITIES instead of
handling the check at run-time.

guest_cpuid_has() is expensive due to the linear search of guest CPUID
entries, intel_pmu_lbr_is_enabled() is checked on every VM-Enter, _and_
simply enumerating the same "Model" as the host causes KVM to set the
number of LBR records to a non-zero value.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20220727233424.2968356-4-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 7de8e5b6
...@@ -171,13 +171,6 @@ static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr) ...@@ -171,13 +171,6 @@ static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
return get_gp_pmc(pmu, msr, MSR_IA32_PMC0); return get_gp_pmc(pmu, msr, MSR_IA32_PMC0);
} }
bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu)
{
struct x86_pmu_lbr *lbr = vcpu_to_lbr_records(vcpu);
return lbr->nr && (vcpu_get_perf_capabilities(vcpu) & PMU_CAP_LBR_FMT);
}
static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index) static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index)
{ {
struct x86_pmu_lbr *records = vcpu_to_lbr_records(vcpu); struct x86_pmu_lbr *records = vcpu_to_lbr_records(vcpu);
...@@ -592,7 +585,9 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) ...@@ -592,7 +585,9 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
bitmap_set(pmu->all_valid_pmc_idx, bitmap_set(pmu->all_valid_pmc_idx,
INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters); INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
if (cpuid_model_is_consistent(vcpu)) perf_capabilities = vcpu_get_perf_capabilities(vcpu);
if (cpuid_model_is_consistent(vcpu) &&
(perf_capabilities & PMU_CAP_LBR_FMT))
x86_perf_get_lbr(&lbr_desc->records); x86_perf_get_lbr(&lbr_desc->records);
else else
lbr_desc->records.nr = 0; lbr_desc->records.nr = 0;
...@@ -600,7 +595,6 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) ...@@ -600,7 +595,6 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
if (lbr_desc->records.nr) if (lbr_desc->records.nr)
bitmap_set(pmu->all_valid_pmc_idx, INTEL_PMC_IDX_FIXED_VLBR, 1); bitmap_set(pmu->all_valid_pmc_idx, INTEL_PMC_IDX_FIXED_VLBR, 1);
perf_capabilities = vcpu_get_perf_capabilities(vcpu);
if (perf_capabilities & PERF_CAP_PEBS_FORMAT) { if (perf_capabilities & PERF_CAP_PEBS_FORMAT) {
if (perf_capabilities & PERF_CAP_PEBS_BASELINE) { if (perf_capabilities & PERF_CAP_PEBS_BASELINE) {
pmu->pebs_enable_mask = counter_mask; pmu->pebs_enable_mask = counter_mask;
......
...@@ -544,9 +544,12 @@ static inline struct x86_pmu_lbr *vcpu_to_lbr_records(struct kvm_vcpu *vcpu) ...@@ -544,9 +544,12 @@ static inline struct x86_pmu_lbr *vcpu_to_lbr_records(struct kvm_vcpu *vcpu)
return &vcpu_to_lbr_desc(vcpu)->records; return &vcpu_to_lbr_desc(vcpu)->records;
} }
void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu); static inline bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu)
bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu); {
return !!vcpu_to_lbr_records(vcpu)->nr;
}
void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu);
int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu); int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu);
void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu); void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment