Commit d008dfdb authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86: Move init-only kvm_x86_ops to separate struct

Move the kvm_x86_ops functions that are used only within the scope of
kvm_init() into a separate struct, kvm_x86_init_ops.  In addition to
identifying the init-only functions without restorting to code comments,
this also sets the stage for waiting until after ->hardware_setup() to
set kvm_x86_ops.  Setting kvm_x86_ops after ->hardware_setup() is
desirable as many of the hooks are not usable until ->hardware_setup()
completes.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200321202603.19355-3-sean.j.christopherson@intel.com>
Reviewed-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent b9904085
...@@ -1054,12 +1054,8 @@ static inline u16 kvm_lapic_irq_dest_mode(bool dest_mode_logical) ...@@ -1054,12 +1054,8 @@ static inline u16 kvm_lapic_irq_dest_mode(bool dest_mode_logical)
} }
struct kvm_x86_ops { struct kvm_x86_ops {
int (*cpu_has_kvm_support)(void); /* __init */
int (*disabled_by_bios)(void); /* __init */
int (*hardware_enable)(void); int (*hardware_enable)(void);
void (*hardware_disable)(void); void (*hardware_disable)(void);
int (*check_processor_compatibility)(void);/* __init */
int (*hardware_setup)(void); /* __init */
void (*hardware_unsetup)(void); /* __exit */ void (*hardware_unsetup)(void); /* __exit */
bool (*cpu_has_accelerated_tpr)(void); bool (*cpu_has_accelerated_tpr)(void);
bool (*has_emulated_msr)(int index); bool (*has_emulated_msr)(int index);
...@@ -1260,6 +1256,15 @@ struct kvm_x86_ops { ...@@ -1260,6 +1256,15 @@ struct kvm_x86_ops {
int (*enable_direct_tlbflush)(struct kvm_vcpu *vcpu); int (*enable_direct_tlbflush)(struct kvm_vcpu *vcpu);
}; };
struct kvm_x86_init_ops {
int (*cpu_has_kvm_support)(void);
int (*disabled_by_bios)(void);
int (*check_processor_compatibility)(void);
int (*hardware_setup)(void);
struct kvm_x86_ops *runtime_ops;
};
struct kvm_arch_async_pf { struct kvm_arch_async_pf {
u32 token; u32 token;
gfn_t gfn; gfn_t gfn;
......
...@@ -7354,11 +7354,7 @@ static void svm_pre_update_apicv_exec_ctrl(struct kvm *kvm, bool activate) ...@@ -7354,11 +7354,7 @@ static void svm_pre_update_apicv_exec_ctrl(struct kvm *kvm, bool activate)
} }
static struct kvm_x86_ops svm_x86_ops __ro_after_init = { static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.cpu_has_kvm_support = has_svm,
.disabled_by_bios = is_disabled,
.hardware_setup = svm_hardware_setup,
.hardware_unsetup = svm_hardware_teardown, .hardware_unsetup = svm_hardware_teardown,
.check_processor_compatibility = svm_check_processor_compat,
.hardware_enable = svm_hardware_enable, .hardware_enable = svm_hardware_enable,
.hardware_disable = svm_hardware_disable, .hardware_disable = svm_hardware_disable,
.cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr, .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
...@@ -7483,9 +7479,18 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { ...@@ -7483,9 +7479,18 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.check_nested_events = svm_check_nested_events, .check_nested_events = svm_check_nested_events,
}; };
static struct kvm_x86_init_ops svm_init_ops __initdata = {
.cpu_has_kvm_support = has_svm,
.disabled_by_bios = is_disabled,
.hardware_setup = svm_hardware_setup,
.check_processor_compatibility = svm_check_processor_compat,
.runtime_ops = &svm_x86_ops,
};
static int __init svm_init(void) static int __init svm_init(void)
{ {
return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm), return kvm_init(&svm_init_ops, sizeof(struct vcpu_svm),
__alignof__(struct vcpu_svm), THIS_MODULE); __alignof__(struct vcpu_svm), THIS_MODULE);
} }
......
...@@ -7836,11 +7836,8 @@ static bool vmx_check_apicv_inhibit_reasons(ulong bit) ...@@ -7836,11 +7836,8 @@ static bool vmx_check_apicv_inhibit_reasons(ulong bit)
} }
static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.cpu_has_kvm_support = cpu_has_kvm_support,
.disabled_by_bios = vmx_disabled_by_bios,
.hardware_setup = hardware_setup,
.hardware_unsetup = hardware_unsetup, .hardware_unsetup = hardware_unsetup,
.check_processor_compatibility = vmx_check_processor_compat,
.hardware_enable = hardware_enable, .hardware_enable = hardware_enable,
.hardware_disable = hardware_disable, .hardware_disable = hardware_disable,
.cpu_has_accelerated_tpr = report_flexpriority, .cpu_has_accelerated_tpr = report_flexpriority,
...@@ -7975,6 +7972,15 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { ...@@ -7975,6 +7972,15 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.apic_init_signal_blocked = vmx_apic_init_signal_blocked, .apic_init_signal_blocked = vmx_apic_init_signal_blocked,
}; };
static struct kvm_x86_init_ops vmx_init_ops __initdata = {
.cpu_has_kvm_support = cpu_has_kvm_support,
.disabled_by_bios = vmx_disabled_by_bios,
.check_processor_compatibility = vmx_check_processor_compat,
.hardware_setup = hardware_setup,
.runtime_ops = &vmx_x86_ops,
};
static void vmx_cleanup_l1d_flush(void) static void vmx_cleanup_l1d_flush(void)
{ {
if (vmx_l1d_flush_pages) { if (vmx_l1d_flush_pages) {
...@@ -8059,7 +8065,7 @@ static int __init vmx_init(void) ...@@ -8059,7 +8065,7 @@ static int __init vmx_init(void)
} }
#endif #endif
r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), r = kvm_init(&vmx_init_ops, sizeof(struct vcpu_vmx),
__alignof__(struct vcpu_vmx), THIS_MODULE); __alignof__(struct vcpu_vmx), THIS_MODULE);
if (r) if (r)
return r; return r;
......
...@@ -7303,8 +7303,8 @@ static struct notifier_block pvclock_gtod_notifier = { ...@@ -7303,8 +7303,8 @@ static struct notifier_block pvclock_gtod_notifier = {
int kvm_arch_init(void *opaque) int kvm_arch_init(void *opaque)
{ {
struct kvm_x86_init_ops *ops = opaque;
int r; int r;
struct kvm_x86_ops *ops = opaque;
if (kvm_x86_ops) { if (kvm_x86_ops) {
printk(KERN_ERR "kvm: already loaded the other module\n"); printk(KERN_ERR "kvm: already loaded the other module\n");
...@@ -7359,7 +7359,7 @@ int kvm_arch_init(void *opaque) ...@@ -7359,7 +7359,7 @@ int kvm_arch_init(void *opaque)
if (r) if (r)
goto out_free_percpu; goto out_free_percpu;
kvm_x86_ops = ops; kvm_x86_ops = ops->runtime_ops;
kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
PT_DIRTY_MASK, PT64_NX_MASK, 0, PT_DIRTY_MASK, PT64_NX_MASK, 0,
...@@ -9628,6 +9628,7 @@ void kvm_arch_hardware_disable(void) ...@@ -9628,6 +9628,7 @@ void kvm_arch_hardware_disable(void)
int kvm_arch_hardware_setup(void *opaque) int kvm_arch_hardware_setup(void *opaque)
{ {
struct kvm_x86_init_ops *ops = opaque;
int r; int r;
rdmsrl_safe(MSR_EFER, &host_efer); rdmsrl_safe(MSR_EFER, &host_efer);
...@@ -9635,7 +9636,7 @@ int kvm_arch_hardware_setup(void *opaque) ...@@ -9635,7 +9636,7 @@ int kvm_arch_hardware_setup(void *opaque)
if (boot_cpu_has(X86_FEATURE_XSAVES)) if (boot_cpu_has(X86_FEATURE_XSAVES))
rdmsrl(MSR_IA32_XSS, host_xss); rdmsrl(MSR_IA32_XSS, host_xss);
r = kvm_x86_ops->hardware_setup(); r = ops->hardware_setup();
if (r != 0) if (r != 0)
return r; return r;
...@@ -9670,13 +9671,14 @@ void kvm_arch_hardware_unsetup(void) ...@@ -9670,13 +9671,14 @@ void kvm_arch_hardware_unsetup(void)
int kvm_arch_check_processor_compat(void *opaque) int kvm_arch_check_processor_compat(void *opaque)
{ {
struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
struct kvm_x86_init_ops *ops = opaque;
WARN_ON(!irqs_disabled()); WARN_ON(!irqs_disabled());
if (kvm_host_cr4_reserved_bits(c) != cr4_reserved_bits) if (kvm_host_cr4_reserved_bits(c) != cr4_reserved_bits)
return -EIO; return -EIO;
return kvm_x86_ops->check_processor_compatibility(); return ops->check_processor_compatibility();
} }
bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu) bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment