Commit 4f6ea0a8 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: VMX: Gracefully handle faults on VMXON

Gracefully handle faults on VMXON, e.g. #GP due to VMX being disabled by
BIOS, instead of letting the fault crash the system.  Now that KVM uses
cpufeatures to query support instead of reading MSR_IA32_FEAT_CTL
directly, it's possible for a bug in a different subsystem to cause KVM
to incorrectly attempt VMXON[*].  Crashing the system is especially
annoying if the system is configured such that hardware_enable() will
be triggered during boot.

Oppurtunistically rename @addr to @vmxon_pointer and use a named param
to reference it in the inline assembly.

Print 0xdeadbeef in the ultra-"rare" case that reading MSR_IA32_FEAT_CTL
also faults.

[*] https://lkml.kernel.org/r/20200226231615.13664-1-sean.j.christopherson@intel.comSigned-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200321193751.24985-4-sean.j.christopherson@intel.com>
Reviewed-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent d260f9ef
...@@ -2218,18 +2218,33 @@ static __init int vmx_disabled_by_bios(void) ...@@ -2218,18 +2218,33 @@ static __init int vmx_disabled_by_bios(void)
!boot_cpu_has(X86_FEATURE_VMX); !boot_cpu_has(X86_FEATURE_VMX);
} }
static void kvm_cpu_vmxon(u64 addr) static int kvm_cpu_vmxon(u64 vmxon_pointer)
{ {
u64 msr;
cr4_set_bits(X86_CR4_VMXE); cr4_set_bits(X86_CR4_VMXE);
intel_pt_handle_vmx(1); intel_pt_handle_vmx(1);
asm volatile ("vmxon %0" : : "m"(addr)); asm_volatile_goto("1: vmxon %[vmxon_pointer]\n\t"
_ASM_EXTABLE(1b, %l[fault])
: : [vmxon_pointer] "m"(vmxon_pointer)
: : fault);
return 0;
fault:
WARN_ONCE(1, "VMXON faulted, MSR_IA32_FEAT_CTL (0x3a) = 0x%llx\n",
rdmsrl_safe(MSR_IA32_FEAT_CTL, &msr) ? 0xdeadbeef : msr);
intel_pt_handle_vmx(0);
cr4_clear_bits(X86_CR4_VMXE);
return -EFAULT;
} }
static int hardware_enable(void) static int hardware_enable(void)
{ {
int cpu = raw_smp_processor_id(); int cpu = raw_smp_processor_id();
u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
int r;
if (cr4_read_shadow() & X86_CR4_VMXE) if (cr4_read_shadow() & X86_CR4_VMXE)
return -EBUSY; return -EBUSY;
...@@ -2246,7 +2261,10 @@ static int hardware_enable(void) ...@@ -2246,7 +2261,10 @@ static int hardware_enable(void)
INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu)); INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu));
spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
kvm_cpu_vmxon(phys_addr); r = kvm_cpu_vmxon(phys_addr);
if (r)
return r;
if (enable_ept) if (enable_ept)
ept_sync_global(); ept_sync_global();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment