Commit dc2b8b2b authored by Sean Christopherson's avatar Sean Christopherson

KVM: SVM: Emulate SYSENTER RIP/RSP behavior for all Intel compat vCPUs

Emulate bits 63:32 of the SYSENTER_R{I,S}P MSRs for all vCPUs that are
compatible with Intel's architecture, not just strictly vCPUs that have
vendor==Intel.  The behavior of bits 63:32 is architecturally defined in
the SDM, i.e. not some uarch specific quirk of Intel CPUs.

Link: https://lore.kernel.org/r/20240405235603.1173076-8-seanjc@google.comSigned-off-by: default avatarSean Christopherson <seanjc@google.com>
parent d99e4cb2
...@@ -112,14 +112,6 @@ static inline bool guest_cpuid_is_amd_or_hygon(struct kvm_vcpu *vcpu) ...@@ -112,14 +112,6 @@ static inline bool guest_cpuid_is_amd_or_hygon(struct kvm_vcpu *vcpu)
is_guest_vendor_hygon(best->ebx, best->ecx, best->edx)); is_guest_vendor_hygon(best->ebx, best->ecx, best->edx));
} }
static inline bool guest_cpuid_is_intel(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
best = kvm_find_cpuid_entry(vcpu, 0);
return best && is_guest_vendor_intel(best->ebx, best->ecx, best->edx);
}
static inline bool guest_cpuid_is_amd_compatible(struct kvm_vcpu *vcpu) static inline bool guest_cpuid_is_amd_compatible(struct kvm_vcpu *vcpu)
{ {
return vcpu->arch.is_amd_compatible; return vcpu->arch.is_amd_compatible;
......
...@@ -1196,7 +1196,7 @@ static inline void init_vmcb_after_set_cpuid(struct kvm_vcpu *vcpu) ...@@ -1196,7 +1196,7 @@ static inline void init_vmcb_after_set_cpuid(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
if (guest_cpuid_is_intel(vcpu)) { if (guest_cpuid_is_intel_compatible(vcpu)) {
/* /*
* We must intercept SYSENTER_EIP and SYSENTER_ESP * We must intercept SYSENTER_EIP and SYSENTER_ESP
* accesses because the processor only stores 32 bits. * accesses because the processor only stores 32 bits.
...@@ -2855,12 +2855,12 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -2855,12 +2855,12 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break; break;
case MSR_IA32_SYSENTER_EIP: case MSR_IA32_SYSENTER_EIP:
msr_info->data = (u32)svm->vmcb01.ptr->save.sysenter_eip; msr_info->data = (u32)svm->vmcb01.ptr->save.sysenter_eip;
if (guest_cpuid_is_intel(vcpu)) if (guest_cpuid_is_intel_compatible(vcpu))
msr_info->data |= (u64)svm->sysenter_eip_hi << 32; msr_info->data |= (u64)svm->sysenter_eip_hi << 32;
break; break;
case MSR_IA32_SYSENTER_ESP: case MSR_IA32_SYSENTER_ESP:
msr_info->data = svm->vmcb01.ptr->save.sysenter_esp; msr_info->data = svm->vmcb01.ptr->save.sysenter_esp;
if (guest_cpuid_is_intel(vcpu)) if (guest_cpuid_is_intel_compatible(vcpu))
msr_info->data |= (u64)svm->sysenter_esp_hi << 32; msr_info->data |= (u64)svm->sysenter_esp_hi << 32;
break; break;
case MSR_TSC_AUX: case MSR_TSC_AUX:
...@@ -3083,11 +3083,11 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) ...@@ -3083,11 +3083,11 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
* 32 bit part of these msrs to support Intel's * 32 bit part of these msrs to support Intel's
* implementation of SYSENTER/SYSEXIT. * implementation of SYSENTER/SYSEXIT.
*/ */
svm->sysenter_eip_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0; svm->sysenter_eip_hi = guest_cpuid_is_intel_compatible(vcpu) ? (data >> 32) : 0;
break; break;
case MSR_IA32_SYSENTER_ESP: case MSR_IA32_SYSENTER_ESP:
svm->vmcb01.ptr->save.sysenter_esp = (u32)data; svm->vmcb01.ptr->save.sysenter_esp = (u32)data;
svm->sysenter_esp_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0; svm->sysenter_esp_hi = guest_cpuid_is_intel_compatible(vcpu) ? (data >> 32) : 0;
break; break;
case MSR_TSC_AUX: case MSR_TSC_AUX:
/* /*
...@@ -4337,11 +4337,11 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) ...@@ -4337,11 +4337,11 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_LBRV); kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_LBRV);
/* /*
* Intercept VMLOAD if the vCPU mode is Intel in order to emulate that * Intercept VMLOAD if the vCPU model is Intel in order to emulate that
* VMLOAD drops bits 63:32 of SYSENTER (ignoring the fact that exposing * VMLOAD drops bits 63:32 of SYSENTER (ignoring the fact that exposing
* SVM on Intel is bonkers and extremely unlikely to work). * SVM on Intel is bonkers and extremely unlikely to work).
*/ */
if (!guest_cpuid_is_intel(vcpu)) if (!guest_cpuid_is_intel_compatible(vcpu))
kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_V_VMSAVE_VMLOAD); kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_V_VMSAVE_VMLOAD);
kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_PAUSEFILTER); kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_PAUSEFILTER);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment