Commit 3045c483 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86: Do CPU compatibility checks in x86 code

Move the CPU compatibility checks to pure x86 code, i.e. drop x86's use
of the common kvm_x86_check_cpu_compat() arch hook.  x86 is the only
architecture that "needs" to do per-CPU compatibility checks, moving
the logic to x86 will allow dropping the common code, and will also
give x86 more control over when/how the compatibility checks are
performed, e.g. TDX will need to enable hardware (do VMXON) in order to
perform compatibility checks.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Reviewed-by: default avatarIsaku Yamahata <isaku.yamahata@intel.com>
Reviewed-by: default avatarKai Huang <kai.huang@intel.com>
Message-Id: <20221130230934.1014142-32-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 58ca1930
......@@ -5109,7 +5109,7 @@ static int __init svm_init(void)
* Common KVM initialization _must_ come last, after this, /dev/kvm is
* exposed to userspace!
*/
r = kvm_init(&svm_init_ops, sizeof(struct vcpu_svm),
r = kvm_init(NULL, sizeof(struct vcpu_svm),
__alignof__(struct vcpu_svm), THIS_MODULE);
if (r)
goto err_kvm_init;
......
......@@ -8635,7 +8635,7 @@ static int __init vmx_init(void)
* Common KVM initialization _must_ come last, after this, /dev/kvm is
* exposed to userspace!
*/
r = kvm_init(&vmx_init_ops, sizeof(struct vcpu_vmx),
r = kvm_init(NULL, sizeof(struct vcpu_vmx),
__alignof__(struct vcpu_vmx), THIS_MODULE);
if (r)
goto err_kvm_init;
......
......@@ -9292,10 +9292,36 @@ static inline void kvm_ops_update(struct kvm_x86_init_ops *ops)
kvm_pmu_ops_update(ops->pmu_ops);
}
struct kvm_cpu_compat_check {
struct kvm_x86_init_ops *ops;
int *ret;
};
static int kvm_x86_check_processor_compatibility(struct kvm_x86_init_ops *ops)
{
struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
WARN_ON(!irqs_disabled());
if (__cr4_reserved_bits(cpu_has, c) !=
__cr4_reserved_bits(cpu_has, &boot_cpu_data))
return -EIO;
return ops->check_processor_compatibility();
}
static void kvm_x86_check_cpu_compat(void *data)
{
struct kvm_cpu_compat_check *c = data;
*c->ret = kvm_x86_check_processor_compatibility(c->ops);
}
static int __kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
{
struct kvm_cpu_compat_check c;
u64 host_pat;
int r;
int r, cpu;
if (kvm_x86_ops.hardware_enable) {
pr_err("kvm: already loaded vendor module '%s'\n", kvm_x86_ops.name);
......@@ -9375,6 +9401,14 @@ static int __kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
if (r != 0)
goto out_mmu_exit;
c.ret = &r;
c.ops = ops;
for_each_online_cpu(cpu) {
smp_call_function_single(cpu, kvm_x86_check_cpu_compat, &c, 1);
if (r < 0)
goto out_hardware_unsetup;
}
/*
* Point of no return! DO NOT add error paths below this point unless
* absolutely necessary, as most operations from this point forward
......@@ -9417,6 +9451,8 @@ static int __kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
kvm_init_msr_list();
return 0;
out_hardware_unsetup:
ops->runtime_ops->hardware_unsetup();
out_mmu_exit:
kvm_mmu_vendor_module_exit();
out_free_percpu:
......@@ -12050,16 +12086,7 @@ void kvm_arch_hardware_disable(void)
int kvm_arch_check_processor_compat(void *opaque)
{
struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
struct kvm_x86_init_ops *ops = opaque;
WARN_ON(!irqs_disabled());
if (__cr4_reserved_bits(cpu_has, c) !=
__cr4_reserved_bits(cpu_has, &boot_cpu_data))
return -EIO;
return ops->check_processor_compatibility();
return 0;
}
bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment