Commit f55ac304 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86: Drop @invalidate_gpa param from kvm_x86_ops' tlb_flush()

Drop @invalidate_gpa from ->tlb_flush() and kvm_vcpu_flush_tlb() now
that all callers pass %true for said param, or ignore the param (SVM has
an internal call to svm_flush_tlb() in svm_flush_tlb_guest that somewhat
arbitrarily passes %false).

Remove __vmx_flush_tlb() as it is no longer used.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200320212833.3507-17-sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent ad104b5e
...@@ -1103,7 +1103,7 @@ struct kvm_x86_ops { ...@@ -1103,7 +1103,7 @@ struct kvm_x86_ops {
unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
void (*tlb_flush)(struct kvm_vcpu *vcpu, bool invalidate_gpa); void (*tlb_flush)(struct kvm_vcpu *vcpu);
int (*tlb_remote_flush)(struct kvm *kvm); int (*tlb_remote_flush)(struct kvm *kvm);
int (*tlb_remote_flush_with_range)(struct kvm *kvm, int (*tlb_remote_flush_with_range)(struct kvm *kvm,
struct kvm_tlb_range *range); struct kvm_tlb_range *range);
......
...@@ -5179,7 +5179,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu) ...@@ -5179,7 +5179,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
if (r) if (r)
goto out; goto out;
kvm_mmu_load_pgd(vcpu); kvm_mmu_load_pgd(vcpu);
kvm_x86_ops.tlb_flush(vcpu, true); kvm_x86_ops.tlb_flush(vcpu);
out: out:
return r; return r;
} }
......
...@@ -279,7 +279,7 @@ void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, ...@@ -279,7 +279,7 @@ void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions; svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
svm->nested.intercept = nested_vmcb->control.intercept; svm->nested.intercept = nested_vmcb->control.intercept;
svm_flush_tlb(&svm->vcpu, true); svm_flush_tlb(&svm->vcpu);
svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK; svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK) if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
svm->vcpu.arch.hflags |= HF_VINTR_MASK; svm->vcpu.arch.hflags |= HF_VINTR_MASK;
......
...@@ -1603,7 +1603,7 @@ int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) ...@@ -1603,7 +1603,7 @@ int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
return 1; return 1;
if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE)) if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
svm_flush_tlb(vcpu, true); svm_flush_tlb(vcpu);
vcpu->arch.cr4 = cr4; vcpu->arch.cr4 = cr4;
if (!npt_enabled) if (!npt_enabled)
...@@ -3153,7 +3153,7 @@ static int svm_set_identity_map_addr(struct kvm *kvm, u64 ident_addr) ...@@ -3153,7 +3153,7 @@ static int svm_set_identity_map_addr(struct kvm *kvm, u64 ident_addr)
return 0; return 0;
} }
void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa) void svm_flush_tlb(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
...@@ -3172,7 +3172,7 @@ static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva) ...@@ -3172,7 +3172,7 @@ static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva)
static void svm_flush_tlb_guest(struct kvm_vcpu *vcpu) static void svm_flush_tlb_guest(struct kvm_vcpu *vcpu)
{ {
svm_flush_tlb(vcpu, false); svm_flush_tlb(vcpu);
} }
static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu) static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
......
...@@ -360,7 +360,7 @@ u32 svm_msrpm_offset(u32 msr); ...@@ -360,7 +360,7 @@ u32 svm_msrpm_offset(u32 msr);
void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer); void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa); void svm_flush_tlb(struct kvm_vcpu *vcpu);
void disable_nmi_singlestep(struct vcpu_svm *svm); void disable_nmi_singlestep(struct vcpu_svm *svm);
/* nested.c */ /* nested.c */
......
...@@ -6083,7 +6083,7 @@ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu) ...@@ -6083,7 +6083,7 @@ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
if (flexpriority_enabled) { if (flexpriority_enabled) {
sec_exec_control |= sec_exec_control |=
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
vmx_flush_tlb(vcpu, true); vmx_flush_tlb(vcpu);
} }
break; break;
case LAPIC_MODE_X2APIC: case LAPIC_MODE_X2APIC:
...@@ -6101,7 +6101,7 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa) ...@@ -6101,7 +6101,7 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
{ {
if (!is_guest_mode(vcpu)) { if (!is_guest_mode(vcpu)) {
vmcs_write64(APIC_ACCESS_ADDR, hpa); vmcs_write64(APIC_ACCESS_ADDR, hpa);
vmx_flush_tlb(vcpu, true); vmx_flush_tlb(vcpu);
} }
} }
......
...@@ -500,46 +500,28 @@ static inline struct vmcs *alloc_vmcs(bool shadow) ...@@ -500,46 +500,28 @@ static inline struct vmcs *alloc_vmcs(bool shadow)
u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa); u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid, static inline void vmx_flush_tlb(struct kvm_vcpu *vcpu)
bool invalidate_gpa)
{
if (enable_ept && (invalidate_gpa || !enable_vpid)) {
if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
return;
ept_sync_context(construct_eptp(vcpu,
vcpu->arch.mmu->root_hpa));
} else {
vpid_sync_context(vpid);
}
}
static inline void vmx_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
/* /*
* Flush all EPTP/VPID contexts if the TLB flush _may_ have been * Flush all EPTP/VPID contexts, as the TLB flush _may_ have been
* invoked via kvm_flush_remote_tlbs(), which always passes %true for * invoked via kvm_flush_remote_tlbs(). Flushing remote TLBs requires
* @invalidate_gpa. Flushing remote TLBs requires all contexts to be * all contexts to be flushed, not just the active context.
* flushed, not just the active context.
* *
* Note, this also ensures a deferred TLB flush with VPID enabled and * Note, this also ensures a deferred TLB flush with VPID enabled and
* EPT disabled invalidates the "correct" VPID, by nuking both L1 and * EPT disabled invalidates the "correct" VPID, by nuking both L1 and
* L2's VPIDs. * L2's VPIDs.
*/ */
if (invalidate_gpa) { if (enable_ept) {
if (enable_ept) { ept_sync_global();
ept_sync_global(); } else if (enable_vpid) {
} else if (enable_vpid) { if (cpu_has_vmx_invvpid_global()) {
if (cpu_has_vmx_invvpid_global()) { vpid_sync_vcpu_global();
vpid_sync_vcpu_global(); } else {
} else { vpid_sync_vcpu_single(vmx->vpid);
vpid_sync_vcpu_single(vmx->vpid); vpid_sync_vcpu_single(vmx->nested.vpid02);
vpid_sync_vcpu_single(vmx->nested.vpid02);
}
} }
} else {
__vmx_flush_tlb(vcpu, vmx->vpid, false);
} }
} }
......
...@@ -2690,10 +2690,10 @@ static void kvmclock_reset(struct kvm_vcpu *vcpu) ...@@ -2690,10 +2690,10 @@ static void kvmclock_reset(struct kvm_vcpu *vcpu)
vcpu->arch.time = 0; vcpu->arch.time = 0;
} }
static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa) static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
{ {
++vcpu->stat.tlb_flush; ++vcpu->stat.tlb_flush;
kvm_x86_ops.tlb_flush(vcpu, invalidate_gpa); kvm_x86_ops.tlb_flush(vcpu);
} }
static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu) static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu)
...@@ -8223,7 +8223,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -8223,7 +8223,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (kvm_check_request(KVM_REQ_LOAD_MMU_PGD, vcpu)) if (kvm_check_request(KVM_REQ_LOAD_MMU_PGD, vcpu))
kvm_mmu_load_pgd(vcpu); kvm_mmu_load_pgd(vcpu);
if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
kvm_vcpu_flush_tlb(vcpu, true); kvm_vcpu_flush_tlb(vcpu);
if (kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu)) if (kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu))
kvm_vcpu_flush_tlb_guest(vcpu); kvm_vcpu_flush_tlb_guest(vcpu);
if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) { if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment