Commit 0b9ca98b authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

perf/x86/core: Zero @lbr instead of returning -1 in x86_perf_get_lbr() stub

Drop the return value from x86_perf_get_lbr() and have the stub zero out
the @lbr structure instead of returning -1 to indicate "no LBR support".
KVM doesn't actually check the return value, and instead subtly relies on
zeroing the number of LBRs in intel_pmu_init().

Formalize "nr=0 means unsupported" so that KVM doesn't need to add a
pointless check on the return value to fix KVM's benign bug.

Note, the stub is necessary even though KVM x86 selects PERF_EVENTS and
the caller exists only when CONFIG_KVM_INTEL=y.  Despite the name,
KVM_INTEL doesn't strictly require CPU_SUP_INTEL, it can be built with
any of INTEL || CENTAUR || ZHAOXIN CPUs.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20221006000314.73240-2-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent d72cf8ff
...@@ -1603,10 +1603,8 @@ void __init intel_pmu_arch_lbr_init(void) ...@@ -1603,10 +1603,8 @@ void __init intel_pmu_arch_lbr_init(void)
* x86_perf_get_lbr - get the LBR records information * x86_perf_get_lbr - get the LBR records information
* *
* @lbr: the caller's memory to store the LBR records information * @lbr: the caller's memory to store the LBR records information
*
* Returns: 0 indicates the LBR info has been successfully obtained
*/ */
int x86_perf_get_lbr(struct x86_pmu_lbr *lbr) void x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
{ {
int lbr_fmt = x86_pmu.intel_cap.lbr_format; int lbr_fmt = x86_pmu.intel_cap.lbr_format;
...@@ -1614,8 +1612,6 @@ int x86_perf_get_lbr(struct x86_pmu_lbr *lbr) ...@@ -1614,8 +1612,6 @@ int x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
lbr->from = x86_pmu.lbr_from; lbr->from = x86_pmu.lbr_from;
lbr->to = x86_pmu.lbr_to; lbr->to = x86_pmu.lbr_to;
lbr->info = (lbr_fmt == LBR_FORMAT_INFO) ? x86_pmu.lbr_info : 0; lbr->info = (lbr_fmt == LBR_FORMAT_INFO) ? x86_pmu.lbr_info : 0;
return 0;
} }
EXPORT_SYMBOL_GPL(x86_perf_get_lbr); EXPORT_SYMBOL_GPL(x86_perf_get_lbr);
......
...@@ -543,12 +543,12 @@ static inline void perf_check_microcode(void) { } ...@@ -543,12 +543,12 @@ static inline void perf_check_microcode(void) { }
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL) #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr, void *data); extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr, void *data);
extern int x86_perf_get_lbr(struct x86_pmu_lbr *lbr); extern void x86_perf_get_lbr(struct x86_pmu_lbr *lbr);
#else #else
struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr, void *data); struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr, void *data);
static inline int x86_perf_get_lbr(struct x86_pmu_lbr *lbr) static inline void x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
{ {
return -1; memset(lbr, 0, sizeof(*lbr));
} }
#endif #endif
......
...@@ -407,7 +407,8 @@ static inline u64 vmx_get_perf_capabilities(void) ...@@ -407,7 +407,8 @@ static inline u64 vmx_get_perf_capabilities(void)
if (boot_cpu_has(X86_FEATURE_PDCM)) if (boot_cpu_has(X86_FEATURE_PDCM))
rdmsrl(MSR_IA32_PERF_CAPABILITIES, host_perf_cap); rdmsrl(MSR_IA32_PERF_CAPABILITIES, host_perf_cap);
if (x86_perf_get_lbr(&lbr) >= 0 && lbr.nr) x86_perf_get_lbr(&lbr);
if (lbr.nr)
perf_cap |= host_perf_cap & PMU_CAP_LBR_FMT; perf_cap |= host_perf_cap & PMU_CAP_LBR_FMT;
if (vmx_pebs_supported()) { if (vmx_pebs_supported()) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment