Commit 5d55a052 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-x86-mmu-6.12' of https://github.com/kvm-x86/linux into HEAD

KVM x86 MMU changes for 6.12:

 - Overhaul the "unprotect and retry" logic to more precisely identify cases
   where retrying is actually helpful, and to harden all retry paths against
   putting the guest into an infinite retry loop.

 - Add support for yielding, e.g. to honor NEED_RESCHED, when zapping rmaps in
   the shadow MMU.

 - Refactor pieces of the shadow MMU related to aging SPTEs in prepartion for
   adding MGLRU support in KVM.

 - Misc cleanups
parents c345344e 9a5bff7f
......@@ -282,10 +282,6 @@ enum x86_intercept_stage;
#define PFERR_PRIVATE_ACCESS BIT_ULL(49)
#define PFERR_SYNTHETIC_MASK (PFERR_IMPLICIT_ACCESS | PFERR_PRIVATE_ACCESS)
#define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK | \
PFERR_WRITE_MASK | \
PFERR_PRESENT_MASK)
/* apic attention bits */
#define KVM_APIC_CHECK_VAPIC 0
/*
......@@ -2142,7 +2138,15 @@ int kvm_get_nr_pending_nmis(struct kvm_vcpu *vcpu);
void kvm_update_dr7(struct kvm_vcpu *vcpu);
int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
bool __kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
bool always_retry);
static inline bool kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu,
gpa_t cr2_or_gpa)
{
return __kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa, false);
}
void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu,
ulong roots_to_free);
void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu);
......
This diff is collapsed.
......@@ -258,6 +258,8 @@ int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
* RET_PF_CONTINUE: So far, so good, keep handling the page fault.
* RET_PF_RETRY: let CPU fault again on the address.
* RET_PF_EMULATE: mmio page fault, emulate the instruction directly.
* RET_PF_WRITE_PROTECTED: the gfn is write-protected, either unprotected the
* gfn and retry, or emulate the instruction directly.
* RET_PF_INVALID: the spte is invalid, let the real page fault path update it.
* RET_PF_FIXED: The faulting entry has been fixed.
* RET_PF_SPURIOUS: The faulting entry was already fixed, e.g. by another vCPU.
......@@ -274,6 +276,7 @@ enum {
RET_PF_CONTINUE = 0,
RET_PF_RETRY,
RET_PF_EMULATE,
RET_PF_WRITE_PROTECTED,
RET_PF_INVALID,
RET_PF_FIXED,
RET_PF_SPURIOUS,
......
......@@ -57,6 +57,7 @@
TRACE_DEFINE_ENUM(RET_PF_CONTINUE);
TRACE_DEFINE_ENUM(RET_PF_RETRY);
TRACE_DEFINE_ENUM(RET_PF_EMULATE);
TRACE_DEFINE_ENUM(RET_PF_WRITE_PROTECTED);
TRACE_DEFINE_ENUM(RET_PF_INVALID);
TRACE_DEFINE_ENUM(RET_PF_FIXED);
TRACE_DEFINE_ENUM(RET_PF_SPURIOUS);
......
......@@ -646,10 +646,10 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
* really care if it changes underneath us after this point).
*/
if (FNAME(gpte_changed)(vcpu, gw, top_level))
goto out_gpte_changed;
return RET_PF_RETRY;
if (WARN_ON_ONCE(!VALID_PAGE(vcpu->arch.mmu->root.hpa)))
goto out_gpte_changed;
return RET_PF_RETRY;
/*
* Load a new root and retry the faulting instruction in the extremely
......@@ -659,7 +659,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
*/
if (unlikely(kvm_mmu_is_dummy_root(vcpu->arch.mmu->root.hpa))) {
kvm_make_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu);
goto out_gpte_changed;
return RET_PF_RETRY;
}
for_each_shadow_entry(vcpu, fault->addr, it) {
......@@ -674,34 +674,38 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
sp = kvm_mmu_get_child_sp(vcpu, it.sptep, table_gfn,
false, access);
if (sp != ERR_PTR(-EEXIST)) {
/*
* We must synchronize the pagetable before linking it
* because the guest doesn't need to flush tlb when
* the gpte is changed from non-present to present.
* Otherwise, the guest may use the wrong mapping.
*
* For PG_LEVEL_4K, kvm_mmu_get_page() has already
* synchronized it transiently via kvm_sync_page().
*
* For higher level pagetable, we synchronize it via
* the slower mmu_sync_children(). If it needs to
* break, some progress has been made; return
* RET_PF_RETRY and retry on the next #PF.
* KVM_REQ_MMU_SYNC is not necessary but it
* expedites the process.
*/
if (sp->unsync_children &&
mmu_sync_children(vcpu, sp, false))
return RET_PF_RETRY;
}
/*
* Synchronize the new page before linking it, as the CPU (KVM)
* is architecturally disallowed from inserting non-present
* entries into the TLB, i.e. the guest isn't required to flush
* the TLB when changing the gPTE from non-present to present.
*
* For PG_LEVEL_4K, kvm_mmu_find_shadow_page() has already
* synchronized the page via kvm_sync_page().
*
* For higher level pages, which cannot be unsync themselves
* but can have unsync children, synchronize via the slower
* mmu_sync_children(). If KVM needs to drop mmu_lock due to
* contention or to reschedule, instruct the caller to retry
* the #PF (mmu_sync_children() ensures forward progress will
* be made).
*/
if (sp != ERR_PTR(-EEXIST) && sp->unsync_children &&
mmu_sync_children(vcpu, sp, false))
return RET_PF_RETRY;
/*
* Verify that the gpte in the page we've just write
* protected is still there.
* Verify that the gpte in the page, which is now either
* write-protected or unsync, wasn't modified between the fault
* and acquiring mmu_lock. This needs to be done even when
* reusing an existing shadow page to ensure the information
* gathered by the walker matches the information stored in the
* shadow page (which could have been modified by a different
* vCPU even if the page was already linked). Holding mmu_lock
* prevents the shadow page from changing after this point.
*/
if (FNAME(gpte_changed)(vcpu, gw, it.level - 1))
goto out_gpte_changed;
return RET_PF_RETRY;
if (sp != ERR_PTR(-EEXIST))
link_shadow_page(vcpu, it.sptep, sp);
......@@ -755,9 +759,6 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
FNAME(pte_prefetch)(vcpu, gw, it.sptep);
return ret;
out_gpte_changed:
return RET_PF_RETRY;
}
/*
......@@ -805,7 +806,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
if (page_fault_handle_page_track(vcpu, fault)) {
shadow_page_table_clear_flood(vcpu, fault->addr);
return RET_PF_EMULATE;
return RET_PF_WRITE_PROTECTED;
}
r = mmu_topup_memory_caches(vcpu, true);
......
......@@ -1046,10 +1046,8 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
* protected, emulation is needed. If the emulation was skipped,
* the vCPU would have the same fault again.
*/
if (wrprot) {
if (fault->write)
ret = RET_PF_EMULATE;
}
if (wrprot && fault->write)
ret = RET_PF_WRITE_PROTECTED;
/* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
if (unlikely(is_mmio_spte(vcpu->kvm, new_spte))) {
......
......@@ -8854,60 +8854,13 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type)
return 1;
}
static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
int emulation_type)
static bool kvm_unprotect_and_retry_on_failure(struct kvm_vcpu *vcpu,
gpa_t cr2_or_gpa,
int emulation_type)
{
gpa_t gpa = cr2_or_gpa;
kvm_pfn_t pfn;
if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF))
return false;
if (WARN_ON_ONCE(is_guest_mode(vcpu)) ||
WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF)))
return false;
if (!vcpu->arch.mmu->root_role.direct) {
/*
* Write permission should be allowed since only
* write access need to be emulated.
*/
gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
/*
* If the mapping is invalid in guest, let cpu retry
* it to generate fault.
*/
if (gpa == INVALID_GPA)
return true;
}
/*
* Do not retry the unhandleable instruction if it faults on the
* readonly host memory, otherwise it will goto a infinite loop:
* retry instruction -> write #PF -> emulation fail -> retry
* instruction -> ...
*/
pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa));
/*
* If the instruction failed on the error pfn, it can not be fixed,
* report the error to userspace.
*/
if (is_error_noslot_pfn(pfn))
return false;
kvm_release_pfn_clean(pfn);
/*
* If emulation may have been triggered by a write to a shadowed page
* table, unprotect the gfn (zap any relevant SPTEs) and re-enter the
* guest to let the CPU re-execute the instruction in the hope that the
* CPU can cleanly execute the instruction that KVM failed to emulate.
*/
if (vcpu->kvm->arch.indirect_shadow_pages)
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
/*
* If the failed instruction faulted on an access to page tables that
* are used to translate any part of the instruction, KVM can't resolve
......@@ -8918,54 +8871,24 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
* then zap the SPTE to unprotect the gfn, and then do it all over
* again. Report the error to userspace.
*/
return !(emulation_type & EMULTYPE_WRITE_PF_TO_SP);
}
static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
gpa_t cr2_or_gpa, int emulation_type)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
unsigned long last_retry_eip, last_retry_addr, gpa = cr2_or_gpa;
last_retry_eip = vcpu->arch.last_retry_eip;
last_retry_addr = vcpu->arch.last_retry_addr;
if (emulation_type & EMULTYPE_WRITE_PF_TO_SP)
return false;
/*
* If the emulation is caused by #PF and it is non-page_table
* writing instruction, it means the VM-EXIT is caused by shadow
* page protected, we can zap the shadow page and retry this
* instruction directly.
*
* Note: if the guest uses a non-page-table modifying instruction
* on the PDE that points to the instruction, then we will unmap
* the instruction and go to an infinite loop. So, we cache the
* last retried eip and the last fault address, if we meet the eip
* and the address again, we can break out of the potential infinite
* loop.
* If emulation may have been triggered by a write to a shadowed page
* table, unprotect the gfn (zap any relevant SPTEs) and re-enter the
* guest to let the CPU re-execute the instruction in the hope that the
* CPU can cleanly execute the instruction that KVM failed to emulate.
*/
vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF))
return false;
if (WARN_ON_ONCE(is_guest_mode(vcpu)) ||
WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF)))
return false;
if (x86_page_table_writing_insn(ctxt))
return false;
if (ctxt->eip == last_retry_eip && last_retry_addr == cr2_or_gpa)
return false;
vcpu->arch.last_retry_eip = ctxt->eip;
vcpu->arch.last_retry_addr = cr2_or_gpa;
if (!vcpu->arch.mmu->root_role.direct)
gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
__kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa, true);
/*
* Retry even if _this_ vCPU didn't unprotect the gfn, as it's possible
* all SPTEs were already zapped by a different task. The alternative
* is to report the error to userspace and likely terminate the guest,
* and the last_retry_{eip,addr} checks will prevent retrying the page
* fault indefinitely, i.e. there's nothing to lose by retrying.
*/
return true;
}
......@@ -9165,6 +9088,11 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
bool writeback = true;
if ((emulation_type & EMULTYPE_ALLOW_RETRY_PF) &&
(WARN_ON_ONCE(is_guest_mode(vcpu)) ||
WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF))))
emulation_type &= ~EMULTYPE_ALLOW_RETRY_PF;
r = kvm_check_emulate_insn(vcpu, emulation_type, insn, insn_len);
if (r != X86EMUL_CONTINUE) {
if (r == X86EMUL_RETRY_INSTR || r == X86EMUL_PROPAGATE_FAULT)
......@@ -9195,8 +9123,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
if (reexecute_instruction(vcpu, cr2_or_gpa,
emulation_type))
if (kvm_unprotect_and_retry_on_failure(vcpu, cr2_or_gpa,
emulation_type))
return 1;
if (ctxt->have_exception &&
......@@ -9243,7 +9171,15 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
return 1;
}
if (retry_instruction(ctxt, cr2_or_gpa, emulation_type))
/*
* If emulation was caused by a write-protection #PF on a non-page_table
* writing instruction, try to unprotect the gfn, i.e. zap shadow pages,
* and retry the instruction, as the vCPU is likely no longer using the
* gfn as a page table.
*/
if ((emulation_type & EMULTYPE_ALLOW_RETRY_PF) &&
!x86_page_table_writing_insn(ctxt) &&
kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa))
return 1;
/* this is needed for vmware backdoor interface to work since it
......@@ -9274,7 +9210,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
return 1;
if (r == EMULATION_FAILED) {
if (reexecute_instruction(vcpu, cr2_or_gpa, emulation_type))
if (kvm_unprotect_and_retry_on_failure(vcpu, cr2_or_gpa,
emulation_type))
return 1;
return handle_emulation_failure(vcpu, emulation_type);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment