Commit 64f7a115 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: vmx: update sec exec controls for UMIP iff emulating UMIP

Update SECONDARY_EXEC_DESC for UMIP emulation if and only UMIP
is actually being emulated.  Skipping the VMCS update eliminates
unnecessary VMREAD/VMWRITE when UMIP is supported in hardware,
and on platforms that don't have SECONDARY_VM_EXEC_CONTROL.  The
latter case resolves a bug where KVM would fill the kernel log
with warnings due to failed VMWRITEs on older platforms.

Fixes: 0367f205 ("KVM: vmx: add support for emulating UMIP")
Cc: stable@vger.kernel.org #4.16
Reported-by: default avatarPaolo Zeppegno <pzeppegno@gmail.com>
Suggested-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Suggested-by: Radim KrÄmáŠ<rkrcmar@redhat.com>
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent c19986fe
...@@ -1494,6 +1494,12 @@ static inline bool cpu_has_vmx_vmfunc(void) ...@@ -1494,6 +1494,12 @@ static inline bool cpu_has_vmx_vmfunc(void)
SECONDARY_EXEC_ENABLE_VMFUNC; SECONDARY_EXEC_ENABLE_VMFUNC;
} }
static bool vmx_umip_emulated(void)
{
return vmcs_config.cpu_based_2nd_exec_ctrl &
SECONDARY_EXEC_DESC;
}
static inline bool report_flexpriority(void) static inline bool report_flexpriority(void)
{ {
return flexpriority_enabled; return flexpriority_enabled;
...@@ -4761,7 +4767,8 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) ...@@ -4761,7 +4767,8 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
else else
hw_cr4 |= KVM_PMODE_VM_CR4_ALWAYS_ON; hw_cr4 |= KVM_PMODE_VM_CR4_ALWAYS_ON;
if ((cr4 & X86_CR4_UMIP) && !boot_cpu_has(X86_FEATURE_UMIP)) { if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated()) {
if (cr4 & X86_CR4_UMIP) {
vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL, vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
SECONDARY_EXEC_DESC); SECONDARY_EXEC_DESC);
hw_cr4 &= ~X86_CR4_UMIP; hw_cr4 &= ~X86_CR4_UMIP;
...@@ -4769,6 +4776,7 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) ...@@ -4769,6 +4776,7 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
!nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC)) !nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC))
vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
SECONDARY_EXEC_DESC); SECONDARY_EXEC_DESC);
}
if (cr4 & X86_CR4_VMXE) { if (cr4 & X86_CR4_VMXE) {
/* /*
...@@ -9497,12 +9505,6 @@ static bool vmx_xsaves_supported(void) ...@@ -9497,12 +9505,6 @@ static bool vmx_xsaves_supported(void)
SECONDARY_EXEC_XSAVES; SECONDARY_EXEC_XSAVES;
} }
static bool vmx_umip_emulated(void)
{
return vmcs_config.cpu_based_2nd_exec_ctrl &
SECONDARY_EXEC_DESC;
}
static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
{ {
u32 exit_intr_info; u32 exit_intr_info;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment