Commit 7db92e16 authored by Thomas Gleixner's avatar Thomas Gleixner

x86/kvm: Move l1tf setup function

In preparation of allowing run time control for L1D flushing, move the
setup code to the module parameter handler.

In case of pre module init parsing, just store the value and let vmx_init()
do the actual setup after running kvm_init() so that enable_ept is having
the correct state.

During run-time invoke it directly from the parameter setter to prepare for
run-time control.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Tested-by: default avatarJiri Kosina <jkosina@suse.cz>
Reviewed-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Reviewed-by: default avatarJosh Poimboeuf <jpoimboe@redhat.com>
Link: https://lkml.kernel.org/r/20180713142322.694063239@linutronix.de
parent a7b9020b
......@@ -193,7 +193,8 @@ extern const ulong vmx_return;
static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush = VMENTER_L1D_FLUSH_COND;
/* Storage for pre module init parameter parsing */
static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_L1D_FLUSH_AUTO;
static const struct {
const char *option;
......@@ -205,33 +206,85 @@ static const struct {
{"always", VMENTER_L1D_FLUSH_ALWAYS},
};
static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
#define L1D_CACHE_ORDER 4
static void *vmx_l1d_flush_pages;
static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
{
unsigned int i;
struct page *page;
if (!s)
return -EINVAL;
/* If set to 'auto' select 'cond' */
if (l1tf == VMENTER_L1D_FLUSH_AUTO)
l1tf = VMENTER_L1D_FLUSH_COND;
for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) {
if (!strcmp(s, vmentry_l1d_param[i].option)) {
vmentry_l1d_flush = vmentry_l1d_param[i].cmd;
if (!enable_ept) {
l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
return 0;
}
if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages &&
!boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
if (!page)
return -ENOMEM;
vmx_l1d_flush_pages = page_address(page);
}
l1tf_vmx_mitigation = l1tf;
if (l1tf != VMENTER_L1D_FLUSH_NEVER)
static_branch_enable(&vmx_l1d_should_flush);
return 0;
}
static int vmentry_l1d_flush_parse(const char *s)
{
unsigned int i;
if (s) {
for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) {
if (!strcmp(s, vmentry_l1d_param[i].option))
return vmentry_l1d_param[i].cmd;
}
}
return -EINVAL;
}
static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
{
int l1tf;
if (!boot_cpu_has(X86_BUG_L1TF))
return 0;
l1tf = vmentry_l1d_flush_parse(s);
if (l1tf < 0)
return l1tf;
/*
* Has vmx_init() run already? If not then this is the pre init
* parameter parsing. In that case just store the value and let
* vmx_init() do the proper setup after enable_ept has been
* established.
*/
if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) {
vmentry_l1d_flush_param = l1tf;
return 0;
}
return vmx_setup_l1d_flush(l1tf);
}
static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp)
{
return sprintf(s, "%s\n", vmentry_l1d_param[vmentry_l1d_flush].option);
return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option);
}
static const struct kernel_param_ops vmentry_l1d_flush_ops = {
.set = vmentry_l1d_flush_set,
.get = vmentry_l1d_flush_get,
};
module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, &vmentry_l1d_flush, S_IRUGO);
module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, S_IRUGO);
struct kvm_vmx {
struct kvm kvm;
......@@ -9608,7 +9661,7 @@ static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
* it. The flush bit gets set again either from vcpu_run() or from
* one of the unsafe VMEXIT handlers.
*/
always = vmentry_l1d_flush == VMENTER_L1D_FLUSH_ALWAYS;
always = l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_ALWAYS;
vcpu->arch.l1tf_flush_l1d = always;
vcpu->stat.l1d_flush++;
......@@ -13197,34 +13250,6 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.enable_smi_window = enable_smi_window,
};
static int __init vmx_setup_l1d_flush(void)
{
struct page *page;
if (!boot_cpu_has_bug(X86_BUG_L1TF))
return 0;
if (!enable_ept) {
l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
return 0;
}
l1tf_vmx_mitigation = vmentry_l1d_flush;
if (vmentry_l1d_flush == VMENTER_L1D_FLUSH_NEVER)
return 0;
if (!boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
if (!page)
return -ENOMEM;
vmx_l1d_flush_pages = page_address(page);
}
static_branch_enable(&vmx_l1d_should_flush);
return 0;
}
static void vmx_cleanup_l1d_flush(void)
{
if (vmx_l1d_flush_pages) {
......@@ -13309,13 +13334,19 @@ static int __init vmx_init(void)
return r;
/*
* Must be called after kvm_init() so enable_ept is properly set up
* Must be called after kvm_init() so enable_ept is properly set
* up. Hand the parameter mitigation value in which was stored in
* the pre module init parser. If no parameter was given, it will
* contain 'auto' which will be turned into the default 'cond'
* mitigation mode.
*/
r = vmx_setup_l1d_flush();
if (boot_cpu_has(X86_BUG_L1TF)) {
r = vmx_setup_l1d_flush(vmentry_l1d_flush_param);
if (r) {
vmx_exit();
return r;
}
}
#ifdef CONFIG_KEXEC_CORE
rcu_assign_pointer(crash_vmclear_loaded_vmcss,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment