Commit 8a1300ff authored by Sean Christopherson's avatar Sean Christopherson

KVM: x86: Rename Hyper-V remote TLB hooks to match established scheme

Rename the Hyper-V hooks for TLB flushing to match the naming scheme used
by all the other TLB flushing hooks, e.g. in kvm_x86_ops, vendor code,
arch hooks from common code, etc.
Reviewed-by: default avatarDavid Matlack <dmatlack@google.com>
Reviewed-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Link: https://lore.kernel.org/r/20230405003133.419177-2-seanjc@google.comSigned-off-by: default avatarSean Christopherson <seanjc@google.com>
parent 40fa907e
...@@ -54,8 +54,8 @@ KVM_X86_OP(set_rflags) ...@@ -54,8 +54,8 @@ KVM_X86_OP(set_rflags)
KVM_X86_OP(get_if_flag) KVM_X86_OP(get_if_flag)
KVM_X86_OP(flush_tlb_all) KVM_X86_OP(flush_tlb_all)
KVM_X86_OP(flush_tlb_current) KVM_X86_OP(flush_tlb_current)
KVM_X86_OP_OPTIONAL(tlb_remote_flush) KVM_X86_OP_OPTIONAL(flush_remote_tlbs)
KVM_X86_OP_OPTIONAL(tlb_remote_flush_with_range) KVM_X86_OP_OPTIONAL(flush_remote_tlbs_range)
KVM_X86_OP(flush_tlb_gva) KVM_X86_OP(flush_tlb_gva)
KVM_X86_OP(flush_tlb_guest) KVM_X86_OP(flush_tlb_guest)
KVM_X86_OP(vcpu_pre_run) KVM_X86_OP(vcpu_pre_run)
......
...@@ -1588,9 +1588,9 @@ struct kvm_x86_ops { ...@@ -1588,9 +1588,9 @@ struct kvm_x86_ops {
void (*flush_tlb_all)(struct kvm_vcpu *vcpu); void (*flush_tlb_all)(struct kvm_vcpu *vcpu);
void (*flush_tlb_current)(struct kvm_vcpu *vcpu); void (*flush_tlb_current)(struct kvm_vcpu *vcpu);
int (*tlb_remote_flush)(struct kvm *kvm); int (*flush_remote_tlbs)(struct kvm *kvm);
int (*tlb_remote_flush_with_range)(struct kvm *kvm, int (*flush_remote_tlbs_range)(struct kvm *kvm,
struct kvm_tlb_range *range); struct kvm_tlb_range *range);
/* /*
* Flush any TLB entries associated with the given GVA. * Flush any TLB entries associated with the given GVA.
...@@ -1794,8 +1794,8 @@ void kvm_arch_free_vm(struct kvm *kvm); ...@@ -1794,8 +1794,8 @@ void kvm_arch_free_vm(struct kvm *kvm);
#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm) static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
{ {
if (kvm_x86_ops.tlb_remote_flush && if (kvm_x86_ops.flush_remote_tlbs &&
!static_call(kvm_x86_tlb_remote_flush)(kvm)) !static_call(kvm_x86_flush_remote_tlbs)(kvm))
return 0; return 0;
else else
return -ENOTSUPP; return -ENOTSUPP;
......
...@@ -29,8 +29,7 @@ static inline int hv_remote_flush_root_tdp(hpa_t root_tdp, ...@@ -29,8 +29,7 @@ static inline int hv_remote_flush_root_tdp(hpa_t root_tdp,
return hyperv_flush_guest_mapping(root_tdp); return hyperv_flush_guest_mapping(root_tdp);
} }
int hv_remote_flush_tlb_with_range(struct kvm *kvm, int hv_flush_remote_tlbs_range(struct kvm *kvm, struct kvm_tlb_range *range)
struct kvm_tlb_range *range)
{ {
struct kvm_arch *kvm_arch = &kvm->arch; struct kvm_arch *kvm_arch = &kvm->arch;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
...@@ -86,19 +85,19 @@ int hv_remote_flush_tlb_with_range(struct kvm *kvm, ...@@ -86,19 +85,19 @@ int hv_remote_flush_tlb_with_range(struct kvm *kvm,
spin_unlock(&kvm_arch->hv_root_tdp_lock); spin_unlock(&kvm_arch->hv_root_tdp_lock);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(hv_remote_flush_tlb_with_range); EXPORT_SYMBOL_GPL(hv_flush_remote_tlbs_range);
int hv_remote_flush_tlb(struct kvm *kvm) int hv_flush_remote_tlbs(struct kvm *kvm)
{ {
return hv_remote_flush_tlb_with_range(kvm, NULL); return hv_flush_remote_tlbs_range(kvm, NULL);
} }
EXPORT_SYMBOL_GPL(hv_remote_flush_tlb); EXPORT_SYMBOL_GPL(hv_flush_remote_tlbs);
void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp) void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp)
{ {
struct kvm_arch *kvm_arch = &vcpu->kvm->arch; struct kvm_arch *kvm_arch = &vcpu->kvm->arch;
if (kvm_x86_ops.tlb_remote_flush == hv_remote_flush_tlb) { if (kvm_x86_ops.flush_remote_tlbs == hv_flush_remote_tlbs) {
spin_lock(&kvm_arch->hv_root_tdp_lock); spin_lock(&kvm_arch->hv_root_tdp_lock);
vcpu->arch.hv_root_tdp = root_tdp; vcpu->arch.hv_root_tdp = root_tdp;
if (root_tdp != kvm_arch->hv_root_tdp) if (root_tdp != kvm_arch->hv_root_tdp)
......
...@@ -7,9 +7,8 @@ ...@@ -7,9 +7,8 @@
#define __ARCH_X86_KVM_KVM_ONHYPERV_H__ #define __ARCH_X86_KVM_KVM_ONHYPERV_H__
#if IS_ENABLED(CONFIG_HYPERV) #if IS_ENABLED(CONFIG_HYPERV)
int hv_remote_flush_tlb_with_range(struct kvm *kvm, int hv_flush_remote_tlbs_range(struct kvm *kvm, struct kvm_tlb_range *range);
struct kvm_tlb_range *range); int hv_flush_remote_tlbs(struct kvm *kvm);
int hv_remote_flush_tlb(struct kvm *kvm);
void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp); void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp);
#else /* !CONFIG_HYPERV */ #else /* !CONFIG_HYPERV */
static inline void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp) static inline void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp)
......
...@@ -270,9 +270,9 @@ static inline unsigned long kvm_mmu_get_guest_pgd(struct kvm_vcpu *vcpu, ...@@ -270,9 +270,9 @@ static inline unsigned long kvm_mmu_get_guest_pgd(struct kvm_vcpu *vcpu,
return mmu->get_guest_pgd(vcpu); return mmu->get_guest_pgd(vcpu);
} }
static inline bool kvm_available_flush_tlb_with_range(void) static inline bool kvm_available_flush_remote_tlbs_range(void)
{ {
return kvm_x86_ops.tlb_remote_flush_with_range; return kvm_x86_ops.flush_remote_tlbs_range;
} }
void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn, void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn,
...@@ -284,8 +284,8 @@ void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn, ...@@ -284,8 +284,8 @@ void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn,
range.start_gfn = start_gfn; range.start_gfn = start_gfn;
range.pages = nr_pages; range.pages = nr_pages;
if (kvm_x86_ops.tlb_remote_flush_with_range) if (kvm_x86_ops.flush_remote_tlbs_range)
ret = static_call(kvm_x86_tlb_remote_flush_with_range)(kvm, &range); ret = static_call(kvm_x86_flush_remote_tlbs_range)(kvm, &range);
if (ret) if (ret)
kvm_flush_remote_tlbs(kvm); kvm_flush_remote_tlbs(kvm);
...@@ -1498,7 +1498,7 @@ static bool kvm_set_pte_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, ...@@ -1498,7 +1498,7 @@ static bool kvm_set_pte_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
} }
} }
if (need_flush && kvm_available_flush_tlb_with_range()) { if (need_flush && kvm_available_flush_remote_tlbs_range()) {
kvm_flush_remote_tlbs_gfn(kvm, gfn, level); kvm_flush_remote_tlbs_gfn(kvm, gfn, level);
return false; return false;
} }
...@@ -6623,7 +6623,7 @@ static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm, ...@@ -6623,7 +6623,7 @@ static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
PG_LEVEL_NUM)) { PG_LEVEL_NUM)) {
kvm_zap_one_rmap_spte(kvm, rmap_head, sptep); kvm_zap_one_rmap_spte(kvm, rmap_head, sptep);
if (kvm_available_flush_tlb_with_range()) if (kvm_available_flush_remote_tlbs_range())
kvm_flush_remote_tlbs_sptep(kvm, sptep); kvm_flush_remote_tlbs_sptep(kvm, sptep);
else else
need_tlb_flush = 1; need_tlb_flush = 1;
......
...@@ -35,9 +35,8 @@ static inline __init void svm_hv_hardware_setup(void) ...@@ -35,9 +35,8 @@ static inline __init void svm_hv_hardware_setup(void)
if (npt_enabled && if (npt_enabled &&
ms_hyperv.nested_features & HV_X64_NESTED_ENLIGHTENED_TLB) { ms_hyperv.nested_features & HV_X64_NESTED_ENLIGHTENED_TLB) {
pr_info(KBUILD_MODNAME ": Hyper-V enlightened NPT TLB flush enabled\n"); pr_info(KBUILD_MODNAME ": Hyper-V enlightened NPT TLB flush enabled\n");
svm_x86_ops.tlb_remote_flush = hv_remote_flush_tlb; svm_x86_ops.flush_remote_tlbs = hv_flush_remote_tlbs;
svm_x86_ops.tlb_remote_flush_with_range = svm_x86_ops.flush_remote_tlbs_range = hv_flush_remote_tlbs_range;
hv_remote_flush_tlb_with_range;
} }
if (ms_hyperv.nested_features & HV_X64_NESTED_DIRECT_FLUSH) { if (ms_hyperv.nested_features & HV_X64_NESTED_DIRECT_FLUSH) {
......
...@@ -8432,9 +8432,8 @@ static __init int hardware_setup(void) ...@@ -8432,9 +8432,8 @@ static __init int hardware_setup(void)
#if IS_ENABLED(CONFIG_HYPERV) #if IS_ENABLED(CONFIG_HYPERV)
if (ms_hyperv.nested_features & HV_X64_NESTED_GUEST_MAPPING_FLUSH if (ms_hyperv.nested_features & HV_X64_NESTED_GUEST_MAPPING_FLUSH
&& enable_ept) { && enable_ept) {
vmx_x86_ops.tlb_remote_flush = hv_remote_flush_tlb; vmx_x86_ops.flush_remote_tlbs = hv_flush_remote_tlbs;
vmx_x86_ops.tlb_remote_flush_with_range = vmx_x86_ops.flush_remote_tlbs_range = hv_flush_remote_tlbs_range;
hv_remote_flush_tlb_with_range;
} }
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment