Commit 31a6cd7f authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-x86-vmx-6.10' of https://github.com/kvm-x86/linux into HEAD

KVM VMX changes for 6.10:

 - Clear vmcs.EXIT_QUALIFICATION when synthesizing an EPT Misconfig VM-Exit to
   L1, as per the SDM.

 - Move kvm_vcpu_arch's exit_qualification into x86_exception, as the field is
   used only when synthesizing nested EPT violation, i.e. it's not the vCPU's
   "real" exit_qualification, which is tracked elsewhere.

 - Add a sanity check to assert that EPT Violations are the only sources of
   nested PML Full VM-Exits.
parents 56f40708 23ffe4bb
...@@ -997,9 +997,6 @@ struct kvm_vcpu_arch { ...@@ -997,9 +997,6 @@ struct kvm_vcpu_arch {
u64 msr_kvm_poll_control; u64 msr_kvm_poll_control;
/* set at EPT violation at this point */
unsigned long exit_qualification;
/* pv related host specific info */ /* pv related host specific info */
struct { struct {
bool pv_unhalted; bool pv_unhalted;
......
...@@ -26,6 +26,7 @@ struct x86_exception { ...@@ -26,6 +26,7 @@ struct x86_exception {
bool nested_page_fault; bool nested_page_fault;
u64 address; /* cr2 or nested page fault gpa */ u64 address; /* cr2 or nested page fault gpa */
u8 async_page_fault; u8 async_page_fault;
unsigned long exit_qualification;
}; };
/* /*
......
...@@ -497,21 +497,21 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, ...@@ -497,21 +497,21 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
* The other bits are set to 0. * The other bits are set to 0.
*/ */
if (!(errcode & PFERR_RSVD_MASK)) { if (!(errcode & PFERR_RSVD_MASK)) {
vcpu->arch.exit_qualification &= (EPT_VIOLATION_GVA_IS_VALID | walker->fault.exit_qualification = 0;
EPT_VIOLATION_GVA_TRANSLATED);
if (write_fault) if (write_fault)
vcpu->arch.exit_qualification |= EPT_VIOLATION_ACC_WRITE; walker->fault.exit_qualification |= EPT_VIOLATION_ACC_WRITE;
if (user_fault) if (user_fault)
vcpu->arch.exit_qualification |= EPT_VIOLATION_ACC_READ; walker->fault.exit_qualification |= EPT_VIOLATION_ACC_READ;
if (fetch_fault) if (fetch_fault)
vcpu->arch.exit_qualification |= EPT_VIOLATION_ACC_INSTR; walker->fault.exit_qualification |= EPT_VIOLATION_ACC_INSTR;
/* /*
* Note, pte_access holds the raw RWX bits from the EPTE, not * Note, pte_access holds the raw RWX bits from the EPTE, not
* ACC_*_MASK flags! * ACC_*_MASK flags!
*/ */
vcpu->arch.exit_qualification |= (pte_access & VMX_EPT_RWX_MASK) << walker->fault.exit_qualification |= (pte_access & VMX_EPT_RWX_MASK) <<
EPT_VIOLATION_RWX_SHIFT; EPT_VIOLATION_RWX_SHIFT;
} }
#endif #endif
walker->fault.address = addr; walker->fault.address = addr;
......
...@@ -409,18 +409,40 @@ static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu, ...@@ -409,18 +409,40 @@ static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
{ {
struct vmcs12 *vmcs12 = get_vmcs12(vcpu); struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long exit_qualification;
u32 vm_exit_reason; u32 vm_exit_reason;
unsigned long exit_qualification = vcpu->arch.exit_qualification;
if (vmx->nested.pml_full) { if (vmx->nested.pml_full) {
vm_exit_reason = EXIT_REASON_PML_FULL; vm_exit_reason = EXIT_REASON_PML_FULL;
vmx->nested.pml_full = false; vmx->nested.pml_full = false;
exit_qualification &= INTR_INFO_UNBLOCK_NMI;
/*
* It should be impossible to trigger a nested PML Full VM-Exit
* for anything other than an EPT Violation from L2. KVM *can*
* trigger nEPT page fault injection in response to an EPT
* Misconfig, e.g. if the MMIO SPTE was stale and L1's EPT
* tables also changed, but KVM should not treat EPT Misconfig
* VM-Exits as writes.
*/
WARN_ON_ONCE(vmx->exit_reason.basic != EXIT_REASON_EPT_VIOLATION);
/*
* PML Full and EPT Violation VM-Exits both use bit 12 to report
* "NMI unblocking due to IRET", i.e. the bit can be propagated
* as-is from the original EXIT_QUALIFICATION.
*/
exit_qualification = vmx_get_exit_qual(vcpu) & INTR_INFO_UNBLOCK_NMI;
} else { } else {
if (fault->error_code & PFERR_RSVD_MASK) if (fault->error_code & PFERR_RSVD_MASK) {
vm_exit_reason = EXIT_REASON_EPT_MISCONFIG; vm_exit_reason = EXIT_REASON_EPT_MISCONFIG;
else exit_qualification = 0;
} else {
exit_qualification = fault->exit_qualification;
exit_qualification |= vmx_get_exit_qual(vcpu) &
(EPT_VIOLATION_GVA_IS_VALID |
EPT_VIOLATION_GVA_TRANSLATED);
vm_exit_reason = EXIT_REASON_EPT_VIOLATION; vm_exit_reason = EXIT_REASON_EPT_VIOLATION;
}
/* /*
* Although the caller (kvm_inject_emulated_page_fault) would * Although the caller (kvm_inject_emulated_page_fault) would
......
...@@ -5783,8 +5783,6 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu) ...@@ -5783,8 +5783,6 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
error_code |= (exit_qualification & EPT_VIOLATION_GVA_TRANSLATED) != 0 ? error_code |= (exit_qualification & EPT_VIOLATION_GVA_TRANSLATED) != 0 ?
PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK; PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK;
vcpu->arch.exit_qualification = exit_qualification;
/* /*
* Check that the GPA doesn't exceed physical memory limits, as that is * Check that the GPA doesn't exceed physical memory limits, as that is
* a guest page fault. We have to emulate the instruction here, because * a guest page fault. We have to emulate the instruction here, because
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment