Commit cd42853e authored by Lai Jiangshan's avatar Lai Jiangshan Committed by Sean Christopherson

kvm: x86/mmu: Use KVM_MMU_ROOT_XXX for kvm_mmu_invalidate_addr()

The @root_hpa for kvm_mmu_invalidate_addr() is called with @mmu->root.hpa
or INVALID_PAGE where @mmu->root.hpa is to invalidate gva for the current
root (the same meaning as KVM_MMU_ROOT_CURRENT) and INVALID_PAGE is to
invalidate gva for all roots (the same meaning as KVM_MMU_ROOTS_ALL).

Change the argument type of kvm_mmu_invalidate_addr() and use
KVM_MMU_ROOT_XXX instead so that we can reuse the function for
kvm_mmu_invpcid_gva() and nested_ept_invalidate_addr() for invalidating
gva for different set of roots.

No fuctionalities changed.
Signed-off-by: default avatarLai Jiangshan <jiangshan.ljs@antgroup.com>
Link: https://lore.kernel.org/r/20230216154115.710033-9-jiangshanlai@gmail.com
[sean: massage comment slightly]
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
parent f94db0c8
...@@ -2045,7 +2045,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code, ...@@ -2045,7 +2045,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
void *insn, int insn_len); void *insn, int insn_len);
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva); void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
u64 addr, hpa_t root_hpa); u64 addr, unsigned long roots);
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid); void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd); void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd);
......
...@@ -5765,10 +5765,12 @@ int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 err ...@@ -5765,10 +5765,12 @@ int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 err
EXPORT_SYMBOL_GPL(kvm_mmu_page_fault); EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
u64 addr, hpa_t root_hpa) u64 addr, unsigned long roots)
{ {
int i; int i;
WARN_ON_ONCE(roots & ~KVM_MMU_ROOTS_ALL);
/* It's actually a GPA for vcpu->arch.guest_mmu. */ /* It's actually a GPA for vcpu->arch.guest_mmu. */
if (mmu != &vcpu->arch.guest_mmu) { if (mmu != &vcpu->arch.guest_mmu) {
/* INVLPG on a non-canonical address is a NOP according to the SDM. */ /* INVLPG on a non-canonical address is a NOP according to the SDM. */
...@@ -5781,31 +5783,29 @@ void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, ...@@ -5781,31 +5783,29 @@ void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
if (!mmu->invlpg) if (!mmu->invlpg)
return; return;
if (root_hpa == INVALID_PAGE) { if (roots & KVM_MMU_ROOT_CURRENT)
mmu->invlpg(vcpu, addr, mmu->root.hpa); mmu->invlpg(vcpu, addr, mmu->root.hpa);
/* for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
* INVLPG is required to invalidate any global mappings for the VA, if ((roots & KVM_MMU_ROOT_PREVIOUS(i)) &&
* irrespective of PCID. Since it would take us roughly similar amount VALID_PAGE(mmu->prev_roots[i].hpa))
* of work to determine whether any of the prev_root mappings of the VA mmu->invlpg(vcpu, addr, mmu->prev_roots[i].hpa);
* is marked global, or to just sync it blindly, so we might as well
* just always sync it.
*
* Mappings not reachable via the current cr3 or the prev_roots will be
* synced when switching to that cr3, so nothing needs to be done here
* for them.
*/
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
if (VALID_PAGE(mmu->prev_roots[i].hpa))
mmu->invlpg(vcpu, addr, mmu->prev_roots[i].hpa);
} else {
mmu->invlpg(vcpu, addr, root_hpa);
} }
} }
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
{ {
kvm_mmu_invalidate_addr(vcpu, vcpu->arch.walk_mmu, gva, INVALID_PAGE); /*
* INVLPG is required to invalidate any global mappings for the VA,
* irrespective of PCID. Blindly sync all roots as it would take
* roughly the same amount of work/time to determine whether any of the
* previous roots have a global mapping.
*
* Mappings not reachable via the current or previous cached roots will
* be synced when switching to that new cr3, so nothing needs to be
* done here for them.
*/
kvm_mmu_invalidate_addr(vcpu, vcpu->arch.walk_mmu, gva, KVM_MMU_ROOTS_ALL);
++vcpu->stat.invlpg; ++vcpu->stat.invlpg;
} }
EXPORT_SYMBOL_GPL(kvm_mmu_invlpg); EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
......
...@@ -803,7 +803,7 @@ void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu, ...@@ -803,7 +803,7 @@ void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
if ((fault->error_code & PFERR_PRESENT_MASK) && if ((fault->error_code & PFERR_PRESENT_MASK) &&
!(fault->error_code & PFERR_RSVD_MASK)) !(fault->error_code & PFERR_RSVD_MASK))
kvm_mmu_invalidate_addr(vcpu, fault_mmu, fault->address, kvm_mmu_invalidate_addr(vcpu, fault_mmu, fault->address,
fault_mmu->root.hpa); KVM_MMU_ROOT_CURRENT);
fault_mmu->inject_page_fault(vcpu, fault); fault_mmu->inject_page_fault(vcpu, fault);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment