Commit 4f8396b9 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86: Move guts of kvm_arch_init() to standalone helper

Move the guts of kvm_arch_init() to a new helper, kvm_x86_vendor_init(),
so that VMX can do _all_ arch and vendor initialization before calling
kvm_init().  Calling kvm_init() must be the _very_ last step during init,
as kvm_init() exposes /dev/kvm to userspace, i.e. allows creating VMs.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20221130230934.1014142-14-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 451d39e8
...@@ -1751,6 +1751,9 @@ extern struct kvm_x86_ops kvm_x86_ops; ...@@ -1751,6 +1751,9 @@ extern struct kvm_x86_ops kvm_x86_ops;
#define KVM_X86_OP_OPTIONAL_RET0 KVM_X86_OP #define KVM_X86_OP_OPTIONAL_RET0 KVM_X86_OP
#include <asm/kvm-x86-ops.h> #include <asm/kvm-x86-ops.h>
int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops);
void kvm_x86_vendor_exit(void);
#define __KVM_HAVE_ARCH_VM_ALLOC #define __KVM_HAVE_ARCH_VM_ALLOC
static inline struct kvm *kvm_arch_alloc_vm(void) static inline struct kvm *kvm_arch_alloc_vm(void)
{ {
......
...@@ -5097,15 +5097,34 @@ static struct kvm_x86_init_ops svm_init_ops __initdata = { ...@@ -5097,15 +5097,34 @@ static struct kvm_x86_init_ops svm_init_ops __initdata = {
static int __init svm_init(void) static int __init svm_init(void)
{ {
int r;
__unused_size_checks(); __unused_size_checks();
return kvm_init(&svm_init_ops, sizeof(struct vcpu_svm), r = kvm_x86_vendor_init(&svm_init_ops);
__alignof__(struct vcpu_svm), THIS_MODULE); if (r)
return r;
/*
* Common KVM initialization _must_ come last, after this, /dev/kvm is
* exposed to userspace!
*/
r = kvm_init(&svm_init_ops, sizeof(struct vcpu_svm),
__alignof__(struct vcpu_svm), THIS_MODULE);
if (r)
goto err_kvm_init;
return 0;
err_kvm_init:
kvm_x86_vendor_exit();
return r;
} }
static void __exit svm_exit(void) static void __exit svm_exit(void)
{ {
kvm_exit(); kvm_exit();
kvm_x86_vendor_exit();
} }
module_init(svm_init) module_init(svm_init)
......
...@@ -8572,6 +8572,7 @@ static void vmx_exit(void) ...@@ -8572,6 +8572,7 @@ static void vmx_exit(void)
#endif #endif
kvm_exit(); kvm_exit();
kvm_x86_vendor_exit();
vmx_cleanup_l1d_flush(); vmx_cleanup_l1d_flush();
...@@ -8589,23 +8590,25 @@ static int __init vmx_init(void) ...@@ -8589,23 +8590,25 @@ static int __init vmx_init(void)
*/ */
hv_init_evmcs(); hv_init_evmcs();
r = kvm_x86_vendor_init(&vmx_init_ops);
if (r)
return r;
r = kvm_init(&vmx_init_ops, sizeof(struct vcpu_vmx), r = kvm_init(&vmx_init_ops, sizeof(struct vcpu_vmx),
__alignof__(struct vcpu_vmx), THIS_MODULE); __alignof__(struct vcpu_vmx), THIS_MODULE);
if (r) if (r)
return r; goto err_kvm_init;
/* /*
* Must be called after kvm_init() so enable_ept is properly set * Must be called after common x86 init so enable_ept is properly set
* up. Hand the parameter mitigation value in which was stored in * up. Hand the parameter mitigation value in which was stored in
* the pre module init parser. If no parameter was given, it will * the pre module init parser. If no parameter was given, it will
* contain 'auto' which will be turned into the default 'cond' * contain 'auto' which will be turned into the default 'cond'
* mitigation mode. * mitigation mode.
*/ */
r = vmx_setup_l1d_flush(vmentry_l1d_flush_param); r = vmx_setup_l1d_flush(vmentry_l1d_flush_param);
if (r) { if (r)
vmx_exit(); goto err_l1d_flush;
return r;
}
vmx_setup_fb_clear_ctrl(); vmx_setup_fb_clear_ctrl();
...@@ -8630,5 +8633,11 @@ static int __init vmx_init(void) ...@@ -8630,5 +8633,11 @@ static int __init vmx_init(void)
allow_smaller_maxphyaddr = true; allow_smaller_maxphyaddr = true;
return 0; return 0;
err_l1d_flush:
vmx_exit();
err_kvm_init:
kvm_x86_vendor_exit();
return r;
} }
module_init(vmx_init); module_init(vmx_init);
...@@ -9293,7 +9293,16 @@ static inline void kvm_ops_update(struct kvm_x86_init_ops *ops) ...@@ -9293,7 +9293,16 @@ static inline void kvm_ops_update(struct kvm_x86_init_ops *ops)
int kvm_arch_init(void *opaque) int kvm_arch_init(void *opaque)
{ {
struct kvm_x86_init_ops *ops = opaque; return 0;
}
void kvm_arch_exit(void)
{
}
int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
{
u64 host_pat; u64 host_pat;
int r; int r;
...@@ -9425,8 +9434,9 @@ int kvm_arch_init(void *opaque) ...@@ -9425,8 +9434,9 @@ int kvm_arch_init(void *opaque)
kmem_cache_destroy(x86_emulator_cache); kmem_cache_destroy(x86_emulator_cache);
return r; return r;
} }
EXPORT_SYMBOL_GPL(kvm_x86_vendor_init);
void kvm_arch_exit(void) void kvm_x86_vendor_exit(void)
{ {
kvm_unregister_perf_callbacks(); kvm_unregister_perf_callbacks();
...@@ -9456,6 +9466,7 @@ void kvm_arch_exit(void) ...@@ -9456,6 +9466,7 @@ void kvm_arch_exit(void)
WARN_ON(static_branch_unlikely(&kvm_xen_enabled.key)); WARN_ON(static_branch_unlikely(&kvm_xen_enabled.key));
#endif #endif
} }
EXPORT_SYMBOL_GPL(kvm_x86_vendor_exit);
static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason) static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment