Commit 590b09b1 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86: Register "emergency disable" callbacks when virt is enabled

Register the "disable virtualization in an emergency" callback just
before KVM enables virtualization in hardware, as there is no functional
need to keep the callbacks registered while KVM happens to be loaded, but
is inactive, i.e. if KVM hasn't enabled virtualization.

Note, unregistering the callback every time the last VM is destroyed could
have measurable latency due to the synchronize_rcu() needed to ensure all
references to the callback are dropped before KVM is unloaded.  But the
latency should be a small fraction of the total latency of disabling
virtualization across all CPUs, and userspace can set enable_virt_at_load
to completely eliminate the runtime overhead.

Add a pointer in kvm_x86_ops to allow vendor code to provide its callback.
There is no reason to force vendor code to do the registration, and either
way KVM would need a new kvm_x86_ops hook.
Suggested-by: default avatarKai Huang <kai.huang@intel.com>
Reviewed-by: default avatarChao Gao <chao.gao@intel.com>
Reviewed-by: default avatarKai Huang <kai.huang@intel.com>
Acked-by: default avatarKai Huang <kai.huang@intel.com>
Tested-by: default avatarFarrah Chen <farrah.chen@intel.com>
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-ID: <20240830043600.127750-11-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 6d55a942
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include <asm/kvm_page_track.h> #include <asm/kvm_page_track.h>
#include <asm/kvm_vcpu_regs.h> #include <asm/kvm_vcpu_regs.h>
#include <asm/hyperv-tlfs.h> #include <asm/hyperv-tlfs.h>
#include <asm/reboot.h>
#define __KVM_HAVE_ARCH_VCPU_DEBUGFS #define __KVM_HAVE_ARCH_VCPU_DEBUGFS
...@@ -1631,6 +1632,8 @@ struct kvm_x86_ops { ...@@ -1631,6 +1632,8 @@ struct kvm_x86_ops {
int (*enable_virtualization_cpu)(void); int (*enable_virtualization_cpu)(void);
void (*disable_virtualization_cpu)(void); void (*disable_virtualization_cpu)(void);
cpu_emergency_virt_cb *emergency_disable_virtualization_cpu;
void (*hardware_unsetup)(void); void (*hardware_unsetup)(void);
bool (*has_emulated_msr)(struct kvm *kvm, u32 index); bool (*has_emulated_msr)(struct kvm *kvm, u32 index);
void (*vcpu_after_set_cpuid)(struct kvm_vcpu *vcpu); void (*vcpu_after_set_cpuid)(struct kvm_vcpu *vcpu);
......
...@@ -4982,6 +4982,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { ...@@ -4982,6 +4982,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.hardware_unsetup = svm_hardware_unsetup, .hardware_unsetup = svm_hardware_unsetup,
.enable_virtualization_cpu = svm_enable_virtualization_cpu, .enable_virtualization_cpu = svm_enable_virtualization_cpu,
.disable_virtualization_cpu = svm_disable_virtualization_cpu, .disable_virtualization_cpu = svm_disable_virtualization_cpu,
.emergency_disable_virtualization_cpu = svm_emergency_disable_virtualization_cpu,
.has_emulated_msr = svm_has_emulated_msr, .has_emulated_msr = svm_has_emulated_msr,
.vcpu_create = svm_vcpu_create, .vcpu_create = svm_vcpu_create,
...@@ -5410,8 +5411,6 @@ static struct kvm_x86_init_ops svm_init_ops __initdata = { ...@@ -5410,8 +5411,6 @@ static struct kvm_x86_init_ops svm_init_ops __initdata = {
static void __svm_exit(void) static void __svm_exit(void)
{ {
kvm_x86_vendor_exit(); kvm_x86_vendor_exit();
cpu_emergency_unregister_virt_callback(svm_emergency_disable_virtualization_cpu);
} }
static int __init svm_init(void) static int __init svm_init(void)
...@@ -5427,8 +5426,6 @@ static int __init svm_init(void) ...@@ -5427,8 +5426,6 @@ static int __init svm_init(void)
if (r) if (r)
return r; return r;
cpu_emergency_register_virt_callback(svm_emergency_disable_virtualization_cpu);
/* /*
* Common KVM initialization _must_ come last, after this, /dev/kvm is * Common KVM initialization _must_ come last, after this, /dev/kvm is
* exposed to userspace! * exposed to userspace!
......
...@@ -25,6 +25,8 @@ struct kvm_x86_ops vt_x86_ops __initdata = { ...@@ -25,6 +25,8 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
.enable_virtualization_cpu = vmx_enable_virtualization_cpu, .enable_virtualization_cpu = vmx_enable_virtualization_cpu,
.disable_virtualization_cpu = vmx_disable_virtualization_cpu, .disable_virtualization_cpu = vmx_disable_virtualization_cpu,
.emergency_disable_virtualization_cpu = vmx_emergency_disable_virtualization_cpu,
.has_emulated_msr = vmx_has_emulated_msr, .has_emulated_msr = vmx_has_emulated_msr,
.vm_size = sizeof(struct kvm_vmx), .vm_size = sizeof(struct kvm_vmx),
......
...@@ -755,7 +755,7 @@ static int kvm_cpu_vmxoff(void) ...@@ -755,7 +755,7 @@ static int kvm_cpu_vmxoff(void)
return -EIO; return -EIO;
} }
static void vmx_emergency_disable_virtualization_cpu(void) void vmx_emergency_disable_virtualization_cpu(void)
{ {
int cpu = raw_smp_processor_id(); int cpu = raw_smp_processor_id();
struct loaded_vmcs *v; struct loaded_vmcs *v;
...@@ -8584,8 +8584,6 @@ static void __vmx_exit(void) ...@@ -8584,8 +8584,6 @@ static void __vmx_exit(void)
{ {
allow_smaller_maxphyaddr = false; allow_smaller_maxphyaddr = false;
cpu_emergency_unregister_virt_callback(vmx_emergency_disable_virtualization_cpu);
vmx_cleanup_l1d_flush(); vmx_cleanup_l1d_flush();
} }
...@@ -8632,8 +8630,6 @@ static int __init vmx_init(void) ...@@ -8632,8 +8630,6 @@ static int __init vmx_init(void)
pi_init_cpu(cpu); pi_init_cpu(cpu);
} }
cpu_emergency_register_virt_callback(vmx_emergency_disable_virtualization_cpu);
vmx_check_vmcs12_offsets(); vmx_check_vmcs12_offsets();
/* /*
......
...@@ -15,6 +15,7 @@ void vmx_hardware_unsetup(void); ...@@ -15,6 +15,7 @@ void vmx_hardware_unsetup(void);
int vmx_check_processor_compat(void); int vmx_check_processor_compat(void);
int vmx_enable_virtualization_cpu(void); int vmx_enable_virtualization_cpu(void);
void vmx_disable_virtualization_cpu(void); void vmx_disable_virtualization_cpu(void);
void vmx_emergency_disable_virtualization_cpu(void);
int vmx_vm_init(struct kvm *kvm); int vmx_vm_init(struct kvm *kvm);
void vmx_vm_destroy(struct kvm *kvm); void vmx_vm_destroy(struct kvm *kvm);
int vmx_vcpu_precreate(struct kvm *kvm); int vmx_vcpu_precreate(struct kvm *kvm);
......
...@@ -12512,6 +12512,16 @@ void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) ...@@ -12512,6 +12512,16 @@ void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
} }
EXPORT_SYMBOL_GPL(kvm_vcpu_deliver_sipi_vector); EXPORT_SYMBOL_GPL(kvm_vcpu_deliver_sipi_vector);
void kvm_arch_enable_virtualization(void)
{
cpu_emergency_register_virt_callback(kvm_x86_ops.emergency_disable_virtualization_cpu);
}
void kvm_arch_disable_virtualization(void)
{
cpu_emergency_unregister_virt_callback(kvm_x86_ops.emergency_disable_virtualization_cpu);
}
int kvm_arch_enable_virtualization_cpu(void) int kvm_arch_enable_virtualization_cpu(void)
{ {
struct kvm *kvm; struct kvm *kvm;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment