Commit 9ed3bf41 authored by Sean Christopherson's avatar Sean Christopherson

KVM: x86/mmu: Move filling of Hyper-V's TLB range struct into Hyper-V code

Refactor Hyper-V's range-based TLB flushing API to take a gfn+nr_pages
pair instead of a struct, and bury said struct in Hyper-V specific code.

Passing along two params generates much better code for the common case
where KVM is _not_ running on Hyper-V, as forwarding the flush on to
Hyper-V's hv_flush_remote_tlbs_range() from kvm_flush_remote_tlbs_range()
becomes a tail call.

Cc: David Matlack <dmatlack@google.com>
Reviewed-by: default avatarDavid Matlack <dmatlack@google.com>
Reviewed-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Link: https://lore.kernel.org/r/20230405003133.419177-3-seanjc@google.comSigned-off-by: default avatarSean Christopherson <seanjc@google.com>
parent 8a1300ff
...@@ -482,11 +482,6 @@ struct kvm_mmu { ...@@ -482,11 +482,6 @@ struct kvm_mmu {
u64 pdptrs[4]; /* pae */ u64 pdptrs[4]; /* pae */
}; };
struct kvm_tlb_range {
u64 start_gfn;
u64 pages;
};
enum pmc_type { enum pmc_type {
KVM_PMC_GP = 0, KVM_PMC_GP = 0,
KVM_PMC_FIXED, KVM_PMC_FIXED,
...@@ -1589,8 +1584,8 @@ struct kvm_x86_ops { ...@@ -1589,8 +1584,8 @@ struct kvm_x86_ops {
void (*flush_tlb_all)(struct kvm_vcpu *vcpu); void (*flush_tlb_all)(struct kvm_vcpu *vcpu);
void (*flush_tlb_current)(struct kvm_vcpu *vcpu); void (*flush_tlb_current)(struct kvm_vcpu *vcpu);
int (*flush_remote_tlbs)(struct kvm *kvm); int (*flush_remote_tlbs)(struct kvm *kvm);
int (*flush_remote_tlbs_range)(struct kvm *kvm, int (*flush_remote_tlbs_range)(struct kvm *kvm, gfn_t gfn,
struct kvm_tlb_range *range); gfn_t nr_pages);
/* /*
* Flush any TLB entries associated with the given GVA. * Flush any TLB entries associated with the given GVA.
......
...@@ -10,17 +10,22 @@ ...@@ -10,17 +10,22 @@
#include "hyperv.h" #include "hyperv.h"
#include "kvm_onhyperv.h" #include "kvm_onhyperv.h"
struct kvm_hv_tlb_range {
u64 start_gfn;
u64 pages;
};
static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush, static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush,
void *data) void *data)
{ {
struct kvm_tlb_range *range = data; struct kvm_hv_tlb_range *range = data;
return hyperv_fill_flush_guest_mapping_list(flush, range->start_gfn, return hyperv_fill_flush_guest_mapping_list(flush, range->start_gfn,
range->pages); range->pages);
} }
static inline int hv_remote_flush_root_tdp(hpa_t root_tdp, static inline int hv_remote_flush_root_tdp(hpa_t root_tdp,
struct kvm_tlb_range *range) struct kvm_hv_tlb_range *range)
{ {
if (range) if (range)
return hyperv_flush_guest_mapping_range(root_tdp, return hyperv_flush_guest_mapping_range(root_tdp,
...@@ -29,7 +34,8 @@ static inline int hv_remote_flush_root_tdp(hpa_t root_tdp, ...@@ -29,7 +34,8 @@ static inline int hv_remote_flush_root_tdp(hpa_t root_tdp,
return hyperv_flush_guest_mapping(root_tdp); return hyperv_flush_guest_mapping(root_tdp);
} }
int hv_flush_remote_tlbs_range(struct kvm *kvm, struct kvm_tlb_range *range) static int __hv_flush_remote_tlbs_range(struct kvm *kvm,
struct kvm_hv_tlb_range *range)
{ {
struct kvm_arch *kvm_arch = &kvm->arch; struct kvm_arch *kvm_arch = &kvm->arch;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
...@@ -85,11 +91,21 @@ int hv_flush_remote_tlbs_range(struct kvm *kvm, struct kvm_tlb_range *range) ...@@ -85,11 +91,21 @@ int hv_flush_remote_tlbs_range(struct kvm *kvm, struct kvm_tlb_range *range)
spin_unlock(&kvm_arch->hv_root_tdp_lock); spin_unlock(&kvm_arch->hv_root_tdp_lock);
return ret; return ret;
} }
int hv_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn, gfn_t nr_pages)
{
struct kvm_hv_tlb_range range = {
.start_gfn = start_gfn,
.pages = nr_pages,
};
return __hv_flush_remote_tlbs_range(kvm, &range);
}
EXPORT_SYMBOL_GPL(hv_flush_remote_tlbs_range); EXPORT_SYMBOL_GPL(hv_flush_remote_tlbs_range);
int hv_flush_remote_tlbs(struct kvm *kvm) int hv_flush_remote_tlbs(struct kvm *kvm)
{ {
return hv_flush_remote_tlbs_range(kvm, NULL); return __hv_flush_remote_tlbs_range(kvm, NULL);
} }
EXPORT_SYMBOL_GPL(hv_flush_remote_tlbs); EXPORT_SYMBOL_GPL(hv_flush_remote_tlbs);
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#define __ARCH_X86_KVM_KVM_ONHYPERV_H__ #define __ARCH_X86_KVM_KVM_ONHYPERV_H__
#if IS_ENABLED(CONFIG_HYPERV) #if IS_ENABLED(CONFIG_HYPERV)
int hv_flush_remote_tlbs_range(struct kvm *kvm, struct kvm_tlb_range *range); int hv_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, gfn_t nr_pages);
int hv_flush_remote_tlbs(struct kvm *kvm); int hv_flush_remote_tlbs(struct kvm *kvm);
void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp); void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp);
#else /* !CONFIG_HYPERV */ #else /* !CONFIG_HYPERV */
......
...@@ -278,15 +278,11 @@ static inline bool kvm_available_flush_remote_tlbs_range(void) ...@@ -278,15 +278,11 @@ static inline bool kvm_available_flush_remote_tlbs_range(void)
void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn, void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn,
gfn_t nr_pages) gfn_t nr_pages)
{ {
struct kvm_tlb_range range;
int ret = -EOPNOTSUPP; int ret = -EOPNOTSUPP;
range.start_gfn = start_gfn;
range.pages = nr_pages;
if (kvm_x86_ops.flush_remote_tlbs_range) if (kvm_x86_ops.flush_remote_tlbs_range)
ret = static_call(kvm_x86_flush_remote_tlbs_range)(kvm, &range); ret = static_call(kvm_x86_flush_remote_tlbs_range)(kvm, start_gfn,
nr_pages);
if (ret) if (ret)
kvm_flush_remote_tlbs(kvm); kvm_flush_remote_tlbs(kvm);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment