Commit 5d55a052 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-x86-mmu-6.12' of https://github.com/kvm-x86/linux into HEAD

KVM x86 MMU changes for 6.12:

 - Overhaul the "unprotect and retry" logic to more precisely identify cases
   where retrying is actually helpful, and to harden all retry paths against
   putting the guest into an infinite retry loop.

 - Add support for yielding, e.g. to honor NEED_RESCHED, when zapping rmaps in
   the shadow MMU.

 - Refactor pieces of the shadow MMU related to aging SPTEs in prepartion for
   adding MGLRU support in KVM.

 - Misc cleanups
parents c345344e 9a5bff7f
...@@ -282,10 +282,6 @@ enum x86_intercept_stage; ...@@ -282,10 +282,6 @@ enum x86_intercept_stage;
#define PFERR_PRIVATE_ACCESS BIT_ULL(49) #define PFERR_PRIVATE_ACCESS BIT_ULL(49)
#define PFERR_SYNTHETIC_MASK (PFERR_IMPLICIT_ACCESS | PFERR_PRIVATE_ACCESS) #define PFERR_SYNTHETIC_MASK (PFERR_IMPLICIT_ACCESS | PFERR_PRIVATE_ACCESS)
#define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK | \
PFERR_WRITE_MASK | \
PFERR_PRESENT_MASK)
/* apic attention bits */ /* apic attention bits */
#define KVM_APIC_CHECK_VAPIC 0 #define KVM_APIC_CHECK_VAPIC 0
/* /*
...@@ -2142,7 +2138,15 @@ int kvm_get_nr_pending_nmis(struct kvm_vcpu *vcpu); ...@@ -2142,7 +2138,15 @@ int kvm_get_nr_pending_nmis(struct kvm_vcpu *vcpu);
void kvm_update_dr7(struct kvm_vcpu *vcpu); void kvm_update_dr7(struct kvm_vcpu *vcpu);
int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn); bool __kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
bool always_retry);
static inline bool kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu,
gpa_t cr2_or_gpa)
{
return __kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa, false);
}
void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu, void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu,
ulong roots_to_free); ulong roots_to_free);
void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu); void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu);
......
This diff is collapsed.
...@@ -258,6 +258,8 @@ int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); ...@@ -258,6 +258,8 @@ int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
* RET_PF_CONTINUE: So far, so good, keep handling the page fault. * RET_PF_CONTINUE: So far, so good, keep handling the page fault.
* RET_PF_RETRY: let CPU fault again on the address. * RET_PF_RETRY: let CPU fault again on the address.
* RET_PF_EMULATE: mmio page fault, emulate the instruction directly. * RET_PF_EMULATE: mmio page fault, emulate the instruction directly.
* RET_PF_WRITE_PROTECTED: the gfn is write-protected, either unprotected the
* gfn and retry, or emulate the instruction directly.
* RET_PF_INVALID: the spte is invalid, let the real page fault path update it. * RET_PF_INVALID: the spte is invalid, let the real page fault path update it.
* RET_PF_FIXED: The faulting entry has been fixed. * RET_PF_FIXED: The faulting entry has been fixed.
* RET_PF_SPURIOUS: The faulting entry was already fixed, e.g. by another vCPU. * RET_PF_SPURIOUS: The faulting entry was already fixed, e.g. by another vCPU.
...@@ -274,6 +276,7 @@ enum { ...@@ -274,6 +276,7 @@ enum {
RET_PF_CONTINUE = 0, RET_PF_CONTINUE = 0,
RET_PF_RETRY, RET_PF_RETRY,
RET_PF_EMULATE, RET_PF_EMULATE,
RET_PF_WRITE_PROTECTED,
RET_PF_INVALID, RET_PF_INVALID,
RET_PF_FIXED, RET_PF_FIXED,
RET_PF_SPURIOUS, RET_PF_SPURIOUS,
......
...@@ -57,6 +57,7 @@ ...@@ -57,6 +57,7 @@
TRACE_DEFINE_ENUM(RET_PF_CONTINUE); TRACE_DEFINE_ENUM(RET_PF_CONTINUE);
TRACE_DEFINE_ENUM(RET_PF_RETRY); TRACE_DEFINE_ENUM(RET_PF_RETRY);
TRACE_DEFINE_ENUM(RET_PF_EMULATE); TRACE_DEFINE_ENUM(RET_PF_EMULATE);
TRACE_DEFINE_ENUM(RET_PF_WRITE_PROTECTED);
TRACE_DEFINE_ENUM(RET_PF_INVALID); TRACE_DEFINE_ENUM(RET_PF_INVALID);
TRACE_DEFINE_ENUM(RET_PF_FIXED); TRACE_DEFINE_ENUM(RET_PF_FIXED);
TRACE_DEFINE_ENUM(RET_PF_SPURIOUS); TRACE_DEFINE_ENUM(RET_PF_SPURIOUS);
......
...@@ -646,10 +646,10 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, ...@@ -646,10 +646,10 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
* really care if it changes underneath us after this point). * really care if it changes underneath us after this point).
*/ */
if (FNAME(gpte_changed)(vcpu, gw, top_level)) if (FNAME(gpte_changed)(vcpu, gw, top_level))
goto out_gpte_changed; return RET_PF_RETRY;
if (WARN_ON_ONCE(!VALID_PAGE(vcpu->arch.mmu->root.hpa))) if (WARN_ON_ONCE(!VALID_PAGE(vcpu->arch.mmu->root.hpa)))
goto out_gpte_changed; return RET_PF_RETRY;
/* /*
* Load a new root and retry the faulting instruction in the extremely * Load a new root and retry the faulting instruction in the extremely
...@@ -659,7 +659,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, ...@@ -659,7 +659,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
*/ */
if (unlikely(kvm_mmu_is_dummy_root(vcpu->arch.mmu->root.hpa))) { if (unlikely(kvm_mmu_is_dummy_root(vcpu->arch.mmu->root.hpa))) {
kvm_make_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu); kvm_make_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu);
goto out_gpte_changed; return RET_PF_RETRY;
} }
for_each_shadow_entry(vcpu, fault->addr, it) { for_each_shadow_entry(vcpu, fault->addr, it) {
...@@ -674,34 +674,38 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, ...@@ -674,34 +674,38 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
sp = kvm_mmu_get_child_sp(vcpu, it.sptep, table_gfn, sp = kvm_mmu_get_child_sp(vcpu, it.sptep, table_gfn,
false, access); false, access);
if (sp != ERR_PTR(-EEXIST)) { /*
/* * Synchronize the new page before linking it, as the CPU (KVM)
* We must synchronize the pagetable before linking it * is architecturally disallowed from inserting non-present
* because the guest doesn't need to flush tlb when * entries into the TLB, i.e. the guest isn't required to flush
* the gpte is changed from non-present to present. * the TLB when changing the gPTE from non-present to present.
* Otherwise, the guest may use the wrong mapping. *
* * For PG_LEVEL_4K, kvm_mmu_find_shadow_page() has already
* For PG_LEVEL_4K, kvm_mmu_get_page() has already * synchronized the page via kvm_sync_page().
* synchronized it transiently via kvm_sync_page(). *
* * For higher level pages, which cannot be unsync themselves
* For higher level pagetable, we synchronize it via * but can have unsync children, synchronize via the slower
* the slower mmu_sync_children(). If it needs to * mmu_sync_children(). If KVM needs to drop mmu_lock due to
* break, some progress has been made; return * contention or to reschedule, instruct the caller to retry
* RET_PF_RETRY and retry on the next #PF. * the #PF (mmu_sync_children() ensures forward progress will
* KVM_REQ_MMU_SYNC is not necessary but it * be made).
* expedites the process. */
*/ if (sp != ERR_PTR(-EEXIST) && sp->unsync_children &&
if (sp->unsync_children && mmu_sync_children(vcpu, sp, false))
mmu_sync_children(vcpu, sp, false)) return RET_PF_RETRY;
return RET_PF_RETRY;
}
/* /*
* Verify that the gpte in the page we've just write * Verify that the gpte in the page, which is now either
* protected is still there. * write-protected or unsync, wasn't modified between the fault
* and acquiring mmu_lock. This needs to be done even when
* reusing an existing shadow page to ensure the information
* gathered by the walker matches the information stored in the
* shadow page (which could have been modified by a different
* vCPU even if the page was already linked). Holding mmu_lock
* prevents the shadow page from changing after this point.
*/ */
if (FNAME(gpte_changed)(vcpu, gw, it.level - 1)) if (FNAME(gpte_changed)(vcpu, gw, it.level - 1))
goto out_gpte_changed; return RET_PF_RETRY;
if (sp != ERR_PTR(-EEXIST)) if (sp != ERR_PTR(-EEXIST))
link_shadow_page(vcpu, it.sptep, sp); link_shadow_page(vcpu, it.sptep, sp);
...@@ -755,9 +759,6 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, ...@@ -755,9 +759,6 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
FNAME(pte_prefetch)(vcpu, gw, it.sptep); FNAME(pte_prefetch)(vcpu, gw, it.sptep);
return ret; return ret;
out_gpte_changed:
return RET_PF_RETRY;
} }
/* /*
...@@ -805,7 +806,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault ...@@ -805,7 +806,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
if (page_fault_handle_page_track(vcpu, fault)) { if (page_fault_handle_page_track(vcpu, fault)) {
shadow_page_table_clear_flood(vcpu, fault->addr); shadow_page_table_clear_flood(vcpu, fault->addr);
return RET_PF_EMULATE; return RET_PF_WRITE_PROTECTED;
} }
r = mmu_topup_memory_caches(vcpu, true); r = mmu_topup_memory_caches(vcpu, true);
......
...@@ -1046,10 +1046,8 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, ...@@ -1046,10 +1046,8 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
* protected, emulation is needed. If the emulation was skipped, * protected, emulation is needed. If the emulation was skipped,
* the vCPU would have the same fault again. * the vCPU would have the same fault again.
*/ */
if (wrprot) { if (wrprot && fault->write)
if (fault->write) ret = RET_PF_WRITE_PROTECTED;
ret = RET_PF_EMULATE;
}
/* If a MMIO SPTE is installed, the MMIO will need to be emulated. */ /* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
if (unlikely(is_mmio_spte(vcpu->kvm, new_spte))) { if (unlikely(is_mmio_spte(vcpu->kvm, new_spte))) {
......
...@@ -8854,60 +8854,13 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type) ...@@ -8854,60 +8854,13 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type)
return 1; return 1;
} }
static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, static bool kvm_unprotect_and_retry_on_failure(struct kvm_vcpu *vcpu,
int emulation_type) gpa_t cr2_or_gpa,
int emulation_type)
{ {
gpa_t gpa = cr2_or_gpa;
kvm_pfn_t pfn;
if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF)) if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF))
return false; return false;
if (WARN_ON_ONCE(is_guest_mode(vcpu)) ||
WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF)))
return false;
if (!vcpu->arch.mmu->root_role.direct) {
/*
* Write permission should be allowed since only
* write access need to be emulated.
*/
gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
/*
* If the mapping is invalid in guest, let cpu retry
* it to generate fault.
*/
if (gpa == INVALID_GPA)
return true;
}
/*
* Do not retry the unhandleable instruction if it faults on the
* readonly host memory, otherwise it will goto a infinite loop:
* retry instruction -> write #PF -> emulation fail -> retry
* instruction -> ...
*/
pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa));
/*
* If the instruction failed on the error pfn, it can not be fixed,
* report the error to userspace.
*/
if (is_error_noslot_pfn(pfn))
return false;
kvm_release_pfn_clean(pfn);
/*
* If emulation may have been triggered by a write to a shadowed page
* table, unprotect the gfn (zap any relevant SPTEs) and re-enter the
* guest to let the CPU re-execute the instruction in the hope that the
* CPU can cleanly execute the instruction that KVM failed to emulate.
*/
if (vcpu->kvm->arch.indirect_shadow_pages)
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
/* /*
* If the failed instruction faulted on an access to page tables that * If the failed instruction faulted on an access to page tables that
* are used to translate any part of the instruction, KVM can't resolve * are used to translate any part of the instruction, KVM can't resolve
...@@ -8918,54 +8871,24 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, ...@@ -8918,54 +8871,24 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
* then zap the SPTE to unprotect the gfn, and then do it all over * then zap the SPTE to unprotect the gfn, and then do it all over
* again. Report the error to userspace. * again. Report the error to userspace.
*/ */
return !(emulation_type & EMULTYPE_WRITE_PF_TO_SP); if (emulation_type & EMULTYPE_WRITE_PF_TO_SP)
} return false;
static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
gpa_t cr2_or_gpa, int emulation_type)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
unsigned long last_retry_eip, last_retry_addr, gpa = cr2_or_gpa;
last_retry_eip = vcpu->arch.last_retry_eip;
last_retry_addr = vcpu->arch.last_retry_addr;
/* /*
* If the emulation is caused by #PF and it is non-page_table * If emulation may have been triggered by a write to a shadowed page
* writing instruction, it means the VM-EXIT is caused by shadow * table, unprotect the gfn (zap any relevant SPTEs) and re-enter the
* page protected, we can zap the shadow page and retry this * guest to let the CPU re-execute the instruction in the hope that the
* instruction directly. * CPU can cleanly execute the instruction that KVM failed to emulate.
*
* Note: if the guest uses a non-page-table modifying instruction
* on the PDE that points to the instruction, then we will unmap
* the instruction and go to an infinite loop. So, we cache the
* last retried eip and the last fault address, if we meet the eip
* and the address again, we can break out of the potential infinite
* loop.
*/ */
vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; __kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa, true);
if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF))
return false;
if (WARN_ON_ONCE(is_guest_mode(vcpu)) ||
WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF)))
return false;
if (x86_page_table_writing_insn(ctxt))
return false;
if (ctxt->eip == last_retry_eip && last_retry_addr == cr2_or_gpa)
return false;
vcpu->arch.last_retry_eip = ctxt->eip;
vcpu->arch.last_retry_addr = cr2_or_gpa;
if (!vcpu->arch.mmu->root_role.direct)
gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
/*
* Retry even if _this_ vCPU didn't unprotect the gfn, as it's possible
* all SPTEs were already zapped by a different task. The alternative
* is to report the error to userspace and likely terminate the guest,
* and the last_retry_{eip,addr} checks will prevent retrying the page
* fault indefinitely, i.e. there's nothing to lose by retrying.
*/
return true; return true;
} }
...@@ -9165,6 +9088,11 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, ...@@ -9165,6 +9088,11 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
bool writeback = true; bool writeback = true;
if ((emulation_type & EMULTYPE_ALLOW_RETRY_PF) &&
(WARN_ON_ONCE(is_guest_mode(vcpu)) ||
WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF))))
emulation_type &= ~EMULTYPE_ALLOW_RETRY_PF;
r = kvm_check_emulate_insn(vcpu, emulation_type, insn, insn_len); r = kvm_check_emulate_insn(vcpu, emulation_type, insn, insn_len);
if (r != X86EMUL_CONTINUE) { if (r != X86EMUL_CONTINUE) {
if (r == X86EMUL_RETRY_INSTR || r == X86EMUL_PROPAGATE_FAULT) if (r == X86EMUL_RETRY_INSTR || r == X86EMUL_PROPAGATE_FAULT)
...@@ -9195,8 +9123,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, ...@@ -9195,8 +9123,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
kvm_queue_exception(vcpu, UD_VECTOR); kvm_queue_exception(vcpu, UD_VECTOR);
return 1; return 1;
} }
if (reexecute_instruction(vcpu, cr2_or_gpa, if (kvm_unprotect_and_retry_on_failure(vcpu, cr2_or_gpa,
emulation_type)) emulation_type))
return 1; return 1;
if (ctxt->have_exception && if (ctxt->have_exception &&
...@@ -9243,7 +9171,15 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, ...@@ -9243,7 +9171,15 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
return 1; return 1;
} }
if (retry_instruction(ctxt, cr2_or_gpa, emulation_type)) /*
* If emulation was caused by a write-protection #PF on a non-page_table
* writing instruction, try to unprotect the gfn, i.e. zap shadow pages,
* and retry the instruction, as the vCPU is likely no longer using the
* gfn as a page table.
*/
if ((emulation_type & EMULTYPE_ALLOW_RETRY_PF) &&
!x86_page_table_writing_insn(ctxt) &&
kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa))
return 1; return 1;
/* this is needed for vmware backdoor interface to work since it /* this is needed for vmware backdoor interface to work since it
...@@ -9274,7 +9210,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, ...@@ -9274,7 +9210,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
return 1; return 1;
if (r == EMULATION_FAILED) { if (r == EMULATION_FAILED) {
if (reexecute_instruction(vcpu, cr2_or_gpa, emulation_type)) if (kvm_unprotect_and_retry_on_failure(vcpu, cr2_or_gpa,
emulation_type))
return 1; return 1;
return handle_emulation_failure(vcpu, emulation_type); return handle_emulation_failure(vcpu, emulation_type);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment