Commit 4d8d3dbe authored by Konrad Rzeszutek Wilk's avatar Konrad Rzeszutek Wilk Committed by Juerg Haefliger

UBUNTU: SAUCE: x86/bugs, KVM: Support the combination of guest and host IBRS

CVE-2018-3639 (x86)

A guest may modify the SPEC_CTRL MSR from the value used by the
kernel. Since the kernel doesn't use IBRS, this means a value of zero is
what is needed in the host.

But the 336996-Speculative-Execution-Side-Channel-Mitigations.pdf refers to
the other bits as reserved so the kernel should respect the boot time
SPEC_CTRL value and use that.

This allows to deal with future extensions to the SPEC_CTRL interface if
any at all.

Note: This uses wrmsrl() instead of native_wrmsl(). I does not make any
difference as paravirt will over-write the callq *0xfff.. with the wrmsrl
assembler code.
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarBorislav Petkov <bp@suse.de>
Reviewed-by: default avatarIngo Molnar <mingo@kernel.org>

[juergh:
 - Context adjustments.
 - Evaluate ibrs_inuse (which exists in Xenial but not in upstream)
   instead of boot_cpu_has(X86_FEATURE_IBRS).]
Signed-off-by: default avatarJuerg Haefliger <juergh@canonical.com>
parent 3c2482b4
...@@ -205,6 +205,16 @@ enum spectre_v2_mitigation { ...@@ -205,6 +205,16 @@ enum spectre_v2_mitigation {
extern void x86_spec_ctrl_set(u64); extern void x86_spec_ctrl_set(u64);
extern u64 x86_spec_ctrl_get_default(void); extern u64 x86_spec_ctrl_get_default(void);
/*
* On VMENTER we must preserve whatever view of the SPEC_CTRL MSR
* the guest has, while on VMEXIT we restore the host view. This
* would be easier if SPEC_CTRL were architecturally maskable or
* shadowable for guests but this is not (currently) the case.
* Takes the guest view of SPEC_CTRL MSR as a parameter.
*/
extern void x86_spec_ctrl_set_guest(u64);
extern void x86_spec_ctrl_restore_host(u64);
extern char __indirect_thunk_start[]; extern char __indirect_thunk_start[];
extern char __indirect_thunk_end[]; extern char __indirect_thunk_end[];
......
...@@ -123,6 +123,24 @@ u64 x86_spec_ctrl_get_default(void) ...@@ -123,6 +123,24 @@ u64 x86_spec_ctrl_get_default(void)
} }
EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default); EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
{
if (!ibrs_inuse)
return;
if (x86_spec_ctrl_base != guest_spec_ctrl)
wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
}
EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest);
void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
{
if (!ibrs_inuse)
return;
if (x86_spec_ctrl_base != guest_spec_ctrl)
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
}
EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
#ifdef RETPOLINE #ifdef RETPOLINE
static bool spectre_v2_bad_module; static bool spectre_v2_bad_module;
......
...@@ -3858,8 +3858,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -3858,8 +3858,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
local_irq_enable(); local_irq_enable();
if (ibrs_inuse && (svm->spec_ctrl != SPEC_CTRL_IBRS)) x86_spec_ctrl_set_guest(svm->spec_ctrl);
wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
asm volatile ( asm volatile (
"push %%" _ASM_BP "; \n\t" "push %%" _ASM_BP "; \n\t"
...@@ -3934,15 +3933,11 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -3934,15 +3933,11 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
#endif #endif
); );
x86_spec_ctrl_restore_host(svm->spec_ctrl);
/* Eliminate branch target predictions from guest mode */ /* Eliminate branch target predictions from guest mode */
vmexit_fill_RSB(); vmexit_fill_RSB();
if (ibrs_inuse) {
rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
if (svm->spec_ctrl != SPEC_CTRL_IBRS)
wrmsrl(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS);
}
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
wrmsrl(MSR_GS_BASE, svm->host.gs_base); wrmsrl(MSR_GS_BASE, svm->host.gs_base);
#else #else
......
...@@ -8617,12 +8617,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -8617,12 +8617,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
atomic_switch_perf_msrs(vmx); atomic_switch_perf_msrs(vmx);
if (ibrs_inuse)
add_atomic_switch_msr(vmx, MSR_IA32_SPEC_CTRL,
vcpu->arch.spec_ctrl, SPEC_CTRL_IBRS);
debugctlmsr = get_debugctlmsr(); debugctlmsr = get_debugctlmsr();
x86_spec_ctrl_set_guest(vcpu->arch.spec_ctrl);
vmx->__launched = vmx->loaded_vmcs->launched; vmx->__launched = vmx->loaded_vmcs->launched;
asm( asm(
/* Store host registers */ /* Store host registers */
...@@ -8729,6 +8727,8 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -8729,6 +8727,8 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
#endif #endif
); );
x86_spec_ctrl_restore_host(vcpu->arch.spec_ctrl);
/* Eliminate branch target predictions from guest mode */ /* Eliminate branch target predictions from guest mode */
vmexit_fill_RSB(); vmexit_fill_RSB();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment