Commit cb2db5bb authored by Josh Poimboeuf's avatar Josh Poimboeuf Committed by Ingo Molnar

x86/bugs: Cache the value of MSR_IA32_ARCH_CAPABILITIES

There's no need to keep reading MSR_IA32_ARCH_CAPABILITIES over and
over.  It's even read in the BHI sysfs function which is a big no-no.
Just read it once and cache it.

Fixes: ec9404e4 ("x86/bhi: Add BHI mitigation knob")
Signed-off-by: default avatarJosh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Reviewed-by: default avatarNikolay Borisov <nik.borisov@suse.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Sean Christopherson <seanjc@google.com>
Link: https://lore.kernel.org/r/9592a18a814368e75f8f4b9d74d3883aa4fd1eaf.1712813475.git.jpoimboe@kernel.org
parent dfe64890
...@@ -61,6 +61,8 @@ EXPORT_PER_CPU_SYMBOL_GPL(x86_spec_ctrl_current); ...@@ -61,6 +61,8 @@ EXPORT_PER_CPU_SYMBOL_GPL(x86_spec_ctrl_current);
u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB; u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
EXPORT_SYMBOL_GPL(x86_pred_cmd); EXPORT_SYMBOL_GPL(x86_pred_cmd);
static u64 __ro_after_init ia32_cap;
static DEFINE_MUTEX(spec_ctrl_mutex); static DEFINE_MUTEX(spec_ctrl_mutex);
void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk; void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk;
...@@ -144,6 +146,8 @@ void __init cpu_select_mitigations(void) ...@@ -144,6 +146,8 @@ void __init cpu_select_mitigations(void)
x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK; x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
} }
ia32_cap = x86_read_arch_cap_msr();
/* Select the proper CPU mitigations before patching alternatives: */ /* Select the proper CPU mitigations before patching alternatives: */
spectre_v1_select_mitigation(); spectre_v1_select_mitigation();
spectre_v2_select_mitigation(); spectre_v2_select_mitigation();
...@@ -301,8 +305,6 @@ static const char * const taa_strings[] = { ...@@ -301,8 +305,6 @@ static const char * const taa_strings[] = {
static void __init taa_select_mitigation(void) static void __init taa_select_mitigation(void)
{ {
u64 ia32_cap;
if (!boot_cpu_has_bug(X86_BUG_TAA)) { if (!boot_cpu_has_bug(X86_BUG_TAA)) {
taa_mitigation = TAA_MITIGATION_OFF; taa_mitigation = TAA_MITIGATION_OFF;
return; return;
...@@ -341,7 +343,6 @@ static void __init taa_select_mitigation(void) ...@@ -341,7 +343,6 @@ static void __init taa_select_mitigation(void)
* On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
* update is required. * update is required.
*/ */
ia32_cap = x86_read_arch_cap_msr();
if ( (ia32_cap & ARCH_CAP_MDS_NO) && if ( (ia32_cap & ARCH_CAP_MDS_NO) &&
!(ia32_cap & ARCH_CAP_TSX_CTRL_MSR)) !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR))
taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
...@@ -401,8 +402,6 @@ static const char * const mmio_strings[] = { ...@@ -401,8 +402,6 @@ static const char * const mmio_strings[] = {
static void __init mmio_select_mitigation(void) static void __init mmio_select_mitigation(void)
{ {
u64 ia32_cap;
if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) || if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) || boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) ||
cpu_mitigations_off()) { cpu_mitigations_off()) {
...@@ -413,8 +412,6 @@ static void __init mmio_select_mitigation(void) ...@@ -413,8 +412,6 @@ static void __init mmio_select_mitigation(void)
if (mmio_mitigation == MMIO_MITIGATION_OFF) if (mmio_mitigation == MMIO_MITIGATION_OFF)
return; return;
ia32_cap = x86_read_arch_cap_msr();
/* /*
* Enable CPU buffer clear mitigation for host and VMM, if also affected * Enable CPU buffer clear mitigation for host and VMM, if also affected
* by MDS or TAA. Otherwise, enable mitigation for VMM only. * by MDS or TAA. Otherwise, enable mitigation for VMM only.
...@@ -508,7 +505,7 @@ static void __init rfds_select_mitigation(void) ...@@ -508,7 +505,7 @@ static void __init rfds_select_mitigation(void)
if (rfds_mitigation == RFDS_MITIGATION_OFF) if (rfds_mitigation == RFDS_MITIGATION_OFF)
return; return;
if (x86_read_arch_cap_msr() & ARCH_CAP_RFDS_CLEAR) if (ia32_cap & ARCH_CAP_RFDS_CLEAR)
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
else else
rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED; rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED;
...@@ -659,8 +656,6 @@ void update_srbds_msr(void) ...@@ -659,8 +656,6 @@ void update_srbds_msr(void)
static void __init srbds_select_mitigation(void) static void __init srbds_select_mitigation(void)
{ {
u64 ia32_cap;
if (!boot_cpu_has_bug(X86_BUG_SRBDS)) if (!boot_cpu_has_bug(X86_BUG_SRBDS))
return; return;
...@@ -669,7 +664,6 @@ static void __init srbds_select_mitigation(void) ...@@ -669,7 +664,6 @@ static void __init srbds_select_mitigation(void)
* are only exposed to SRBDS when TSX is enabled or when CPU is affected * are only exposed to SRBDS when TSX is enabled or when CPU is affected
* by Processor MMIO Stale Data vulnerability. * by Processor MMIO Stale Data vulnerability.
*/ */
ia32_cap = x86_read_arch_cap_msr();
if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) && if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
srbds_mitigation = SRBDS_MITIGATION_TSX_OFF; srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
...@@ -813,7 +807,7 @@ static void __init gds_select_mitigation(void) ...@@ -813,7 +807,7 @@ static void __init gds_select_mitigation(void)
/* Will verify below that mitigation _can_ be disabled */ /* Will verify below that mitigation _can_ be disabled */
/* No microcode */ /* No microcode */
if (!(x86_read_arch_cap_msr() & ARCH_CAP_GDS_CTRL)) { if (!(ia32_cap & ARCH_CAP_GDS_CTRL)) {
if (gds_mitigation == GDS_MITIGATION_FORCE) { if (gds_mitigation == GDS_MITIGATION_FORCE) {
/* /*
* This only needs to be done on the boot CPU so do it * This only needs to be done on the boot CPU so do it
...@@ -1908,8 +1902,6 @@ static void update_indir_branch_cond(void) ...@@ -1908,8 +1902,6 @@ static void update_indir_branch_cond(void)
/* Update the static key controlling the MDS CPU buffer clear in idle */ /* Update the static key controlling the MDS CPU buffer clear in idle */
static void update_mds_branch_idle(void) static void update_mds_branch_idle(void)
{ {
u64 ia32_cap = x86_read_arch_cap_msr();
/* /*
* Enable the idle clearing if SMT is active on CPUs which are * Enable the idle clearing if SMT is active on CPUs which are
* affected only by MSBDS and not any other MDS variant. * affected only by MSBDS and not any other MDS variant.
...@@ -2818,7 +2810,7 @@ static const char *spectre_bhi_state(void) ...@@ -2818,7 +2810,7 @@ static const char *spectre_bhi_state(void)
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP)) else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
return "; BHI: SW loop, KVM: SW loop"; return "; BHI: SW loop, KVM: SW loop";
else if (boot_cpu_has(X86_FEATURE_RETPOLINE) && else if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
!(x86_read_arch_cap_msr() & ARCH_CAP_RRSBA)) !(ia32_cap & ARCH_CAP_RRSBA))
return "; BHI: Retpoline"; return "; BHI: Retpoline";
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT)) else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT))
return "; BHI: Syscall hardening, KVM: SW loop"; return "; BHI: Syscall hardening, KVM: SW loop";
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment