Commit b5129100 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86: Drop skip MMU sync and TLB flush params from "new PGD" helpers

Drop skip_mmu_sync and skip_tlb_flush from __kvm_mmu_new_pgd() now that
all call sites unconditionally skip both the sync and flush.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20210609234235.1244004-8-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent d2e56019
...@@ -1708,8 +1708,7 @@ void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva); ...@@ -1708,8 +1708,7 @@ void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
gva_t gva, hpa_t root_hpa); gva_t gva, hpa_t root_hpa);
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid); void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd, bool skip_tlb_flush, void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd);
bool skip_mmu_sync);
void kvm_configure_mmu(bool enable_tdp, int tdp_max_root_level, void kvm_configure_mmu(bool enable_tdp, int tdp_max_root_level,
int tdp_huge_page_level); int tdp_huge_page_level);
......
...@@ -3949,8 +3949,7 @@ static bool fast_pgd_switch(struct kvm_vcpu *vcpu, gpa_t new_pgd, ...@@ -3949,8 +3949,7 @@ static bool fast_pgd_switch(struct kvm_vcpu *vcpu, gpa_t new_pgd,
} }
static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd, static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd,
union kvm_mmu_page_role new_role, union kvm_mmu_page_role new_role)
bool skip_tlb_flush, bool skip_mmu_sync)
{ {
if (!fast_pgd_switch(vcpu, new_pgd, new_role)) { if (!fast_pgd_switch(vcpu, new_pgd, new_role)) {
kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, KVM_MMU_ROOT_CURRENT); kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, KVM_MMU_ROOT_CURRENT);
...@@ -3965,10 +3964,10 @@ static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd, ...@@ -3965,10 +3964,10 @@ static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd,
*/ */
kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu); kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);
if (!skip_mmu_sync || force_flush_and_sync_on_reuse) if (force_flush_and_sync_on_reuse) {
kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
if (!skip_tlb_flush || force_flush_and_sync_on_reuse)
kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
}
/* /*
* The last MMIO access's GVA and GPA are cached in the VCPU. When * The last MMIO access's GVA and GPA are cached in the VCPU. When
...@@ -3987,11 +3986,9 @@ static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd, ...@@ -3987,11 +3986,9 @@ static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd,
to_shadow_page(vcpu->arch.mmu->root_hpa)); to_shadow_page(vcpu->arch.mmu->root_hpa));
} }
void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd, bool skip_tlb_flush, void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
bool skip_mmu_sync)
{ {
__kvm_mmu_new_pgd(vcpu, new_pgd, kvm_mmu_calc_root_page_role(vcpu), __kvm_mmu_new_pgd(vcpu, new_pgd, kvm_mmu_calc_root_page_role(vcpu));
skip_tlb_flush, skip_mmu_sync);
} }
EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd); EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd);
...@@ -4684,7 +4681,7 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer, ...@@ -4684,7 +4681,7 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer,
struct kvm_mmu *context = &vcpu->arch.guest_mmu; struct kvm_mmu *context = &vcpu->arch.guest_mmu;
union kvm_mmu_role new_role = kvm_calc_shadow_npt_root_page_role(vcpu); union kvm_mmu_role new_role = kvm_calc_shadow_npt_root_page_role(vcpu);
__kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base, true, true); __kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base);
if (new_role.as_u64 != context->mmu_role.as_u64) { if (new_role.as_u64 != context->mmu_role.as_u64) {
shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role); shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role);
...@@ -4736,7 +4733,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, ...@@ -4736,7 +4733,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty, kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
execonly, level); execonly, level);
__kvm_mmu_new_pgd(vcpu, new_eptp, new_role.base, true, true); __kvm_mmu_new_pgd(vcpu, new_eptp, new_role.base);
if (new_role.as_u64 == context->mmu_role.as_u64) if (new_role.as_u64 == context->mmu_role.as_u64)
return; return;
......
...@@ -414,7 +414,7 @@ static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, ...@@ -414,7 +414,7 @@ static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
return -EINVAL; return -EINVAL;
if (!nested_npt) if (!nested_npt)
kvm_mmu_new_pgd(vcpu, cr3, true, true); kvm_mmu_new_pgd(vcpu, cr3);
vcpu->arch.cr3 = cr3; vcpu->arch.cr3 = cr3;
kvm_register_mark_available(vcpu, VCPU_EXREG_CR3); kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
......
...@@ -1129,12 +1129,8 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, ...@@ -1129,12 +1129,8 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
return -EINVAL; return -EINVAL;
} }
/*
* Unconditionally skip the TLB flush on fast CR3 switch, all TLB
* flushes are handled by nested_vmx_transition_tlb_flush().
*/
if (!nested_ept) { if (!nested_ept) {
kvm_mmu_new_pgd(vcpu, cr3, true, true); kvm_mmu_new_pgd(vcpu, cr3);
/* /*
* A TLB flush on VM-Enter/VM-Exit flushes all linear mappings * A TLB flush on VM-Enter/VM-Exit flushes all linear mappings
......
...@@ -1115,7 +1115,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) ...@@ -1115,7 +1115,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
return 1; return 1;
if (cr3 != kvm_read_cr3(vcpu)) if (cr3 != kvm_read_cr3(vcpu))
kvm_mmu_new_pgd(vcpu, cr3, true, true); kvm_mmu_new_pgd(vcpu, cr3);
vcpu->arch.cr3 = cr3; vcpu->arch.cr3 = cr3;
kvm_register_mark_available(vcpu, VCPU_EXREG_CR3); kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment