Commit 1d0e8480 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86/mmu: Resolve nx_huge_pages when kvm.ko is loaded

Resolve nx_huge_pages to true/false when kvm.ko is loaded, leaving it as
-1 is technically undefined behavior when its value is read out by
param_get_bool(), as boolean values are supposed to be '0' or '1'.

Alternatively, KVM could define a custom getter for the param, but the
auto value doesn't depend on the vendor module in any way, and printing
"auto" would be unnecessarily unfriendly to the user.

In addition to fixing the undefined behavior, resolving the auto value
also fixes the scenario where the auto value resolves to N and no vendor
module is loaded.  Previously, -1 would result in Y being printed even
though KVM would ultimately disable the mitigation.

Rename the existing MMU module init/exit helpers to clarify that they're
invoked with respect to the vendor module, and add comments to document
why KVM has two separate "module init" flows.

  =========================================================================
  UBSAN: invalid-load in kernel/params.c:320:33
  load of value 255 is not a valid value for type '_Bool'
  CPU: 6 PID: 892 Comm: tail Not tainted 5.17.0-rc3+ #799
  Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015
  Call Trace:
   <TASK>
   dump_stack_lvl+0x34/0x44
   ubsan_epilogue+0x5/0x40
   __ubsan_handle_load_invalid_value.cold+0x43/0x48
   param_get_bool.cold+0xf/0x14
   param_attr_show+0x55/0x80
   module_attr_show+0x1c/0x30
   sysfs_kf_seq_show+0x93/0xc0
   seq_read_iter+0x11c/0x450
   new_sync_read+0x11b/0x1a0
   vfs_read+0xf0/0x190
   ksys_read+0x5f/0xe0
   do_syscall_64+0x3b/0xc0
   entry_SYSCALL_64_after_hwframe+0x44/0xae
   </TASK>
  =========================================================================

Fixes: b8e8c830 ("kvm: mmu: ITLB_MULTIHIT mitigation")
Cc: stable@vger.kernel.org
Reported-by: default avatarBruno Goncalves <bgoncalv@redhat.com>
Reported-by: default avatarJan Stancek <jstancek@redhat.com>
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20220331221359.3912754-1-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 00c22013
...@@ -1585,8 +1585,9 @@ static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm) ...@@ -1585,8 +1585,9 @@ static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
#define kvm_arch_pmi_in_guest(vcpu) \ #define kvm_arch_pmi_in_guest(vcpu) \
((vcpu) && (vcpu)->arch.handling_intr_from_guest) ((vcpu) && (vcpu)->arch.handling_intr_from_guest)
int kvm_mmu_module_init(void); void kvm_mmu_x86_module_init(void);
void kvm_mmu_module_exit(void); int kvm_mmu_vendor_module_init(void);
void kvm_mmu_vendor_module_exit(void);
void kvm_mmu_destroy(struct kvm_vcpu *vcpu); void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
int kvm_mmu_create(struct kvm_vcpu *vcpu); int kvm_mmu_create(struct kvm_vcpu *vcpu);
......
...@@ -6237,12 +6237,24 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp) ...@@ -6237,12 +6237,24 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
return 0; return 0;
} }
int kvm_mmu_module_init(void) /*
* nx_huge_pages needs to be resolved to true/false when kvm.ko is loaded, as
* its default value of -1 is technically undefined behavior for a boolean.
*/
void kvm_mmu_x86_module_init(void)
{ {
int ret = -ENOMEM;
if (nx_huge_pages == -1) if (nx_huge_pages == -1)
__set_nx_huge_pages(get_nx_auto_mode()); __set_nx_huge_pages(get_nx_auto_mode());
}
/*
* The bulk of the MMU initialization is deferred until the vendor module is
* loaded as many of the masks/values may be modified by VMX or SVM, i.e. need
* to be reset when a potentially different vendor module is loaded.
*/
int kvm_mmu_vendor_module_init(void)
{
int ret = -ENOMEM;
/* /*
* MMU roles use union aliasing which is, generally speaking, an * MMU roles use union aliasing which is, generally speaking, an
...@@ -6290,7 +6302,7 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu) ...@@ -6290,7 +6302,7 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
mmu_free_memory_caches(vcpu); mmu_free_memory_caches(vcpu);
} }
void kvm_mmu_module_exit(void) void kvm_mmu_vendor_module_exit(void)
{ {
mmu_destroy_caches(); mmu_destroy_caches();
percpu_counter_destroy(&kvm_total_used_mmu_pages); percpu_counter_destroy(&kvm_total_used_mmu_pages);
......
...@@ -8926,7 +8926,7 @@ int kvm_arch_init(void *opaque) ...@@ -8926,7 +8926,7 @@ int kvm_arch_init(void *opaque)
} }
kvm_nr_uret_msrs = 0; kvm_nr_uret_msrs = 0;
r = kvm_mmu_module_init(); r = kvm_mmu_vendor_module_init();
if (r) if (r)
goto out_free_percpu; goto out_free_percpu;
...@@ -8974,7 +8974,7 @@ void kvm_arch_exit(void) ...@@ -8974,7 +8974,7 @@ void kvm_arch_exit(void)
cancel_work_sync(&pvclock_gtod_work); cancel_work_sync(&pvclock_gtod_work);
#endif #endif
kvm_x86_ops.hardware_enable = NULL; kvm_x86_ops.hardware_enable = NULL;
kvm_mmu_module_exit(); kvm_mmu_vendor_module_exit();
free_percpu(user_return_msrs); free_percpu(user_return_msrs);
kmem_cache_destroy(x86_emulator_cache); kmem_cache_destroy(x86_emulator_cache);
#ifdef CONFIG_KVM_XEN #ifdef CONFIG_KVM_XEN
...@@ -12986,3 +12986,19 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_enter); ...@@ -12986,3 +12986,19 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_enter);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_exit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_exit);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_enter); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_enter);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_exit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_exit);
static int __init kvm_x86_init(void)
{
kvm_mmu_x86_module_init();
return 0;
}
module_init(kvm_x86_init);
static void __exit kvm_x86_exit(void)
{
/*
* If module_init() is implemented, module_exit() must also be
* implemented to allow module unload.
*/
}
module_exit(kvm_x86_exit);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment