Commit c2ba05cc authored by Wanpeng Li's avatar Wanpeng Li Committed by Radim Krčmář

KVM: X86: introduce invalidate_gpa argument to tlb flush

Introduce a new bool invalidate_gpa argument to kvm_x86_ops->tlb_flush,
it will be used by later patches to just flush guest tlb.

For VMX, this will use INVVPID instead of INVEPT, which will invalidate
combined mappings while keeping guest-physical mappings.

Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: default avatarWanpeng Li <wanpeng.li@hotmail.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarRadim Krčmář <rkrcmar@redhat.com>
parent 858a43aa
...@@ -966,7 +966,7 @@ struct kvm_x86_ops { ...@@ -966,7 +966,7 @@ struct kvm_x86_ops {
unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
void (*tlb_flush)(struct kvm_vcpu *vcpu); void (*tlb_flush)(struct kvm_vcpu *vcpu, bool invalidate_gpa);
void (*run)(struct kvm_vcpu *vcpu); void (*run)(struct kvm_vcpu *vcpu);
int (*handle_exit)(struct kvm_vcpu *vcpu); int (*handle_exit)(struct kvm_vcpu *vcpu);
......
...@@ -285,7 +285,7 @@ static int vgif = true; ...@@ -285,7 +285,7 @@ static int vgif = true;
module_param(vgif, int, 0444); module_param(vgif, int, 0444);
static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
static void svm_flush_tlb(struct kvm_vcpu *vcpu); static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa);
static void svm_complete_interrupts(struct vcpu_svm *svm); static void svm_complete_interrupts(struct vcpu_svm *svm);
static int nested_svm_exit_handled(struct vcpu_svm *svm); static int nested_svm_exit_handled(struct vcpu_svm *svm);
...@@ -2035,7 +2035,7 @@ static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) ...@@ -2035,7 +2035,7 @@ static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
return 1; return 1;
if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE)) if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
svm_flush_tlb(vcpu); svm_flush_tlb(vcpu, true);
vcpu->arch.cr4 = cr4; vcpu->arch.cr4 = cr4;
if (!npt_enabled) if (!npt_enabled)
...@@ -2385,7 +2385,7 @@ static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu, ...@@ -2385,7 +2385,7 @@ static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
svm->vmcb->control.nested_cr3 = __sme_set(root); svm->vmcb->control.nested_cr3 = __sme_set(root);
mark_dirty(svm->vmcb, VMCB_NPT); mark_dirty(svm->vmcb, VMCB_NPT);
svm_flush_tlb(vcpu); svm_flush_tlb(vcpu, true);
} }
static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu, static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
...@@ -2989,7 +2989,7 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, ...@@ -2989,7 +2989,7 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions; svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
svm->nested.intercept = nested_vmcb->control.intercept; svm->nested.intercept = nested_vmcb->control.intercept;
svm_flush_tlb(&svm->vcpu); svm_flush_tlb(&svm->vcpu, true);
svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK; svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK) if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
svm->vcpu.arch.hflags |= HF_VINTR_MASK; svm->vcpu.arch.hflags |= HF_VINTR_MASK;
...@@ -4785,7 +4785,7 @@ static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr) ...@@ -4785,7 +4785,7 @@ static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
return 0; return 0;
} }
static void svm_flush_tlb(struct kvm_vcpu *vcpu) static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
...@@ -5076,7 +5076,7 @@ static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root) ...@@ -5076,7 +5076,7 @@ static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
svm->vmcb->save.cr3 = __sme_set(root); svm->vmcb->save.cr3 = __sme_set(root);
mark_dirty(svm->vmcb, VMCB_CR); mark_dirty(svm->vmcb, VMCB_CR);
svm_flush_tlb(vcpu); svm_flush_tlb(vcpu, true);
} }
static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root) static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
...@@ -5090,7 +5090,7 @@ static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root) ...@@ -5090,7 +5090,7 @@ static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
svm->vmcb->save.cr3 = kvm_read_cr3(vcpu); svm->vmcb->save.cr3 = kvm_read_cr3(vcpu);
mark_dirty(svm->vmcb, VMCB_CR); mark_dirty(svm->vmcb, VMCB_CR);
svm_flush_tlb(vcpu); svm_flush_tlb(vcpu, true);
} }
static int is_disabled(void) static int is_disabled(void)
......
...@@ -4140,9 +4140,10 @@ static void exit_lmode(struct kvm_vcpu *vcpu) ...@@ -4140,9 +4140,10 @@ static void exit_lmode(struct kvm_vcpu *vcpu)
#endif #endif
static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid) static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid,
bool invalidate_gpa)
{ {
if (enable_ept) { if (enable_ept && (invalidate_gpa || !enable_vpid)) {
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
return; return;
ept_sync_context(construct_eptp(vcpu, vcpu->arch.mmu.root_hpa)); ept_sync_context(construct_eptp(vcpu, vcpu->arch.mmu.root_hpa));
...@@ -4151,15 +4152,15 @@ static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid) ...@@ -4151,15 +4152,15 @@ static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid)
} }
} }
static void vmx_flush_tlb(struct kvm_vcpu *vcpu) static void vmx_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
{ {
__vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid); __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa);
} }
static void vmx_flush_tlb_ept_only(struct kvm_vcpu *vcpu) static void vmx_flush_tlb_ept_only(struct kvm_vcpu *vcpu)
{ {
if (enable_ept) if (enable_ept)
vmx_flush_tlb(vcpu); vmx_flush_tlb(vcpu, true);
} }
static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
...@@ -4357,7 +4358,7 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) ...@@ -4357,7 +4358,7 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
ept_load_pdptrs(vcpu); ept_load_pdptrs(vcpu);
} }
vmx_flush_tlb(vcpu); vmx_flush_tlb(vcpu, true);
vmcs_writel(GUEST_CR3, guest_cr3); vmcs_writel(GUEST_CR3, guest_cr3);
} }
...@@ -7932,7 +7933,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) ...@@ -7932,7 +7933,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
return kvm_skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
} }
__vmx_flush_tlb(vcpu, vmx->nested.vpid02); __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true);
nested_vmx_succeed(vcpu); nested_vmx_succeed(vcpu);
return kvm_skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
...@@ -10614,11 +10615,11 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, ...@@ -10614,11 +10615,11 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02); vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) { if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
vmx->nested.last_vpid = vmcs12->virtual_processor_id; vmx->nested.last_vpid = vmcs12->virtual_processor_id;
__vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02); __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02, true);
} }
} else { } else {
vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
vmx_flush_tlb(vcpu); vmx_flush_tlb(vcpu, true);
} }
} }
...@@ -11323,7 +11324,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, ...@@ -11323,7 +11324,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
* L1's vpid. TODO: move to a more elaborate solution, giving * L1's vpid. TODO: move to a more elaborate solution, giving
* each L2 its own vpid and exposing the vpid feature to L1. * each L2 its own vpid and exposing the vpid feature to L1.
*/ */
vmx_flush_tlb(vcpu); vmx_flush_tlb(vcpu, true);
} }
/* Restore posted intr vector. */ /* Restore posted intr vector. */
if (nested_cpu_has_posted_intr(vmcs12)) if (nested_cpu_has_posted_intr(vmcs12))
......
...@@ -6781,10 +6781,10 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) ...@@ -6781,10 +6781,10 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap); kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap);
} }
static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu) static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
{ {
++vcpu->stat.tlb_flush; ++vcpu->stat.tlb_flush;
kvm_x86_ops->tlb_flush(vcpu); kvm_x86_ops->tlb_flush(vcpu, invalidate_gpa);
} }
void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
...@@ -6855,7 +6855,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -6855,7 +6855,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu)) if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
kvm_mmu_sync_roots(vcpu); kvm_mmu_sync_roots(vcpu);
if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
kvm_vcpu_flush_tlb(vcpu); kvm_vcpu_flush_tlb(vcpu, true);
if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) { if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
r = 0; r = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment