Commit e47d8608 authored by Josh Poimboeuf's avatar Josh Poimboeuf Committed by Sean Christopherson

KVM: x86: Add SBPB support

Add support for the AMD Selective Branch Predictor Barrier (SBPB) by
advertising the CPUID bit and handling PRED_CMD writes accordingly.

Note, like SRSO_NO and IBPB_BRTYPE before it, advertise support for SBPB
even if it's not enumerated by in the raw CPUID.  Some CPUs that gained
support via a uCode patch don't report SBPB via CPUID (the kernel forces
the flag).
Signed-off-by: default avatarJosh Poimboeuf <jpoimboe@kernel.org>
Link: https://lore.kernel.org/r/a4ab1e7fe50096d50fde33e739ed2da40b41ea6a.1692919072.git.jpoimboe@kernel.orgCo-developed-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
parent 6f0f23ef
...@@ -764,6 +764,7 @@ void kvm_set_cpu_caps(void) ...@@ -764,6 +764,7 @@ void kvm_set_cpu_caps(void)
F(NULL_SEL_CLR_BASE) | F(AUTOIBRS) | 0 /* PrefetchCtlMsr */ F(NULL_SEL_CLR_BASE) | F(AUTOIBRS) | 0 /* PrefetchCtlMsr */
); );
kvm_cpu_cap_check_and_set(X86_FEATURE_SBPB);
kvm_cpu_cap_check_and_set(X86_FEATURE_IBPB_BRTYPE); kvm_cpu_cap_check_and_set(X86_FEATURE_IBPB_BRTYPE);
kvm_cpu_cap_check_and_set(X86_FEATURE_SRSO_NO); kvm_cpu_cap_check_and_set(X86_FEATURE_SRSO_NO);
......
...@@ -174,7 +174,8 @@ static inline bool guest_has_spec_ctrl_msr(struct kvm_vcpu *vcpu) ...@@ -174,7 +174,8 @@ static inline bool guest_has_spec_ctrl_msr(struct kvm_vcpu *vcpu)
static inline bool guest_has_pred_cmd_msr(struct kvm_vcpu *vcpu) static inline bool guest_has_pred_cmd_msr(struct kvm_vcpu *vcpu)
{ {
return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) || return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB)); guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB) ||
guest_cpuid_has(vcpu, X86_FEATURE_SBPB));
} }
static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu) static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu)
......
...@@ -3670,17 +3670,36 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -3670,17 +3670,36 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vcpu->arch.perf_capabilities = data; vcpu->arch.perf_capabilities = data;
kvm_pmu_refresh(vcpu); kvm_pmu_refresh(vcpu);
break; break;
case MSR_IA32_PRED_CMD: case MSR_IA32_PRED_CMD: {
if (!msr_info->host_initiated && !guest_has_pred_cmd_msr(vcpu)) u64 reserved_bits = ~(PRED_CMD_IBPB | PRED_CMD_SBPB);
if (!msr_info->host_initiated) {
if ((!guest_has_pred_cmd_msr(vcpu)))
return 1; return 1;
if (!boot_cpu_has(X86_FEATURE_IBPB) || (data & ~PRED_CMD_IBPB)) if (!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
!guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB))
reserved_bits |= PRED_CMD_IBPB;
if (!guest_cpuid_has(vcpu, X86_FEATURE_SBPB))
reserved_bits |= PRED_CMD_SBPB;
}
if (!boot_cpu_has(X86_FEATURE_IBPB))
reserved_bits |= PRED_CMD_IBPB;
if (!boot_cpu_has(X86_FEATURE_SBPB))
reserved_bits |= PRED_CMD_SBPB;
if (data & reserved_bits)
return 1; return 1;
if (!data) if (!data)
break; break;
wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB); wrmsrl(MSR_IA32_PRED_CMD, data);
break; break;
}
case MSR_IA32_FLUSH_CMD: case MSR_IA32_FLUSH_CMD:
if (!msr_info->host_initiated && if (!msr_info->host_initiated &&
!guest_cpuid_has(vcpu, X86_FEATURE_FLUSH_L1D)) !guest_cpuid_has(vcpu, X86_FEATURE_FLUSH_L1D))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment