Commit fc595f35 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: nVMX: Add VM-Enter failed tracepoints for super early checks

Add tracepoints for the early consistency checks in nested_vmx_run().
The "VMLAUNCH vs. VMRESUME" check in particular is useful to trace, as
there is no architectural way to check VMCS.LAUNCH_STATE, and subtle
bugs such as VMCLEAR on the wrong HPA can lead to confusing errors in
the L1 VMM.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200812180615.22372-1-sean.j.christopherson@intel.com>
Reviewed-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent fb0f33fd
...@@ -3471,11 +3471,11 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) ...@@ -3471,11 +3471,11 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
if (evmptrld_status == EVMPTRLD_ERROR) { if (evmptrld_status == EVMPTRLD_ERROR) {
kvm_queue_exception(vcpu, UD_VECTOR); kvm_queue_exception(vcpu, UD_VECTOR);
return 1; return 1;
} else if (evmptrld_status == EVMPTRLD_VMFAIL) { } else if (CC(evmptrld_status == EVMPTRLD_VMFAIL)) {
return nested_vmx_failInvalid(vcpu); return nested_vmx_failInvalid(vcpu);
} }
if (!vmx->nested.hv_evmcs && vmx->nested.current_vmptr == -1ull) if (CC(!vmx->nested.hv_evmcs && vmx->nested.current_vmptr == -1ull))
return nested_vmx_failInvalid(vcpu); return nested_vmx_failInvalid(vcpu);
vmcs12 = get_vmcs12(vcpu); vmcs12 = get_vmcs12(vcpu);
...@@ -3486,7 +3486,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) ...@@ -3486,7 +3486,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
* rather than RFLAGS.ZF, and no error number is stored to the * rather than RFLAGS.ZF, and no error number is stored to the
* VM-instruction error field. * VM-instruction error field.
*/ */
if (vmcs12->hdr.shadow_vmcs) if (CC(vmcs12->hdr.shadow_vmcs))
return nested_vmx_failInvalid(vcpu); return nested_vmx_failInvalid(vcpu);
if (vmx->nested.hv_evmcs) { if (vmx->nested.hv_evmcs) {
...@@ -3507,10 +3507,10 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) ...@@ -3507,10 +3507,10 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
* for misconfigurations which will anyway be caught by the processor * for misconfigurations which will anyway be caught by the processor
* when using the merged vmcs02. * when using the merged vmcs02.
*/ */
if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS) if (CC(interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS))
return nested_vmx_fail(vcpu, VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS); return nested_vmx_fail(vcpu, VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS);
if (vmcs12->launch_state == launch) if (CC(vmcs12->launch_state == launch))
return nested_vmx_fail(vcpu, return nested_vmx_fail(vcpu,
launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
: VMXERR_VMRESUME_NONLAUNCHED_VMCS); : VMXERR_VMRESUME_NONLAUNCHED_VMCS);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment