Commit 13e1ad2b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 pti updates from Thomas Gleixner:
 "No point in speculating what's in this parcel:

   - Drop the swap storage limit when L1TF is disabled so the full space
     is available

   - Add support for the new AMD STIBP always on mitigation mode

   - Fix a bunch of STIPB typos"

* 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/speculation: Add support for STIBP always-on preferred mode
  x86/speculation/l1tf: Drop the swap storage limit restriction when l1tf=off
  x86/speculation: Change misspelled STIPB to STIBP
parents 9f687ddd 20c3a2c3
...@@ -2099,6 +2099,9 @@ ...@@ -2099,6 +2099,9 @@
off off
Disables hypervisor mitigations and doesn't Disables hypervisor mitigations and doesn't
emit any warnings. emit any warnings.
It also drops the swap size and available
RAM limit restriction on both hypervisor and
bare metal.
Default is 'flush'. Default is 'flush'.
......
...@@ -405,6 +405,9 @@ time with the option "l1tf=". The valid arguments for this option are: ...@@ -405,6 +405,9 @@ time with the option "l1tf=". The valid arguments for this option are:
off Disables hypervisor mitigations and doesn't emit any off Disables hypervisor mitigations and doesn't emit any
warnings. warnings.
It also drops the swap size and available RAM limit restrictions
on both hypervisor and bare metal.
============ ============================================================= ============ =============================================================
The default is 'flush'. For details about L1D flushing see :ref:`l1d_flush`. The default is 'flush'. For details about L1D flushing see :ref:`l1d_flush`.
...@@ -576,7 +579,8 @@ Default mitigations ...@@ -576,7 +579,8 @@ Default mitigations
The kernel default mitigations for vulnerable processors are: The kernel default mitigations for vulnerable processors are:
- PTE inversion to protect against malicious user space. This is done - PTE inversion to protect against malicious user space. This is done
unconditionally and cannot be controlled. unconditionally and cannot be controlled. The swap storage is limited
to ~16TB.
- L1D conditional flushing on VMENTER when EPT is enabled for - L1D conditional flushing on VMENTER when EPT is enabled for
a guest. a guest.
......
...@@ -284,6 +284,7 @@ ...@@ -284,6 +284,7 @@
#define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */ #define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */
#define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */ #define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */
#define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */ #define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */
#define X86_FEATURE_AMD_STIBP_ALWAYS_ON (13*32+17) /* "" Single Thread Indirect Branch Predictors always-on preferred */
#define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */ #define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */
#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */ #define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
#define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */ #define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
......
...@@ -232,6 +232,7 @@ enum spectre_v2_mitigation { ...@@ -232,6 +232,7 @@ enum spectre_v2_mitigation {
enum spectre_v2_user_mitigation { enum spectre_v2_user_mitigation {
SPECTRE_V2_USER_NONE, SPECTRE_V2_USER_NONE,
SPECTRE_V2_USER_STRICT, SPECTRE_V2_USER_STRICT,
SPECTRE_V2_USER_STRICT_PREFERRED,
SPECTRE_V2_USER_PRCTL, SPECTRE_V2_USER_PRCTL,
SPECTRE_V2_USER_SECCOMP, SPECTRE_V2_USER_SECCOMP,
}; };
......
...@@ -54,7 +54,7 @@ static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS; ...@@ -54,7 +54,7 @@ static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
u64 __ro_after_init x86_amd_ls_cfg_base; u64 __ro_after_init x86_amd_ls_cfg_base;
u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask; u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
/* Control conditional STIPB in switch_to() */ /* Control conditional STIBP in switch_to() */
DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp); DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
/* Control conditional IBPB in switch_mm() */ /* Control conditional IBPB in switch_mm() */
DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
...@@ -262,10 +262,11 @@ enum spectre_v2_user_cmd { ...@@ -262,10 +262,11 @@ enum spectre_v2_user_cmd {
}; };
static const char * const spectre_v2_user_strings[] = { static const char * const spectre_v2_user_strings[] = {
[SPECTRE_V2_USER_NONE] = "User space: Vulnerable", [SPECTRE_V2_USER_NONE] = "User space: Vulnerable",
[SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection", [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection",
[SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl", [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection",
[SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl", [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl",
[SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl",
}; };
static const struct { static const struct {
...@@ -355,6 +356,15 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd) ...@@ -355,6 +356,15 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
break; break;
} }
/*
* At this point, an STIBP mode other than "off" has been set.
* If STIBP support is not being forced, check if STIBP always-on
* is preferred.
*/
if (mode != SPECTRE_V2_USER_STRICT &&
boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
mode = SPECTRE_V2_USER_STRICT_PREFERRED;
/* Initialize Indirect Branch Prediction Barrier */ /* Initialize Indirect Branch Prediction Barrier */
if (boot_cpu_has(X86_FEATURE_IBPB)) { if (boot_cpu_has(X86_FEATURE_IBPB)) {
setup_force_cpu_cap(X86_FEATURE_USE_IBPB); setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
...@@ -379,12 +389,12 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd) ...@@ -379,12 +389,12 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
"always-on" : "conditional"); "always-on" : "conditional");
} }
/* If enhanced IBRS is enabled no STIPB required */ /* If enhanced IBRS is enabled no STIBP required */
if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
return; return;
/* /*
* If SMT is not possible or STIBP is not available clear the STIPB * If SMT is not possible or STIBP is not available clear the STIBP
* mode. * mode.
*/ */
if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP)) if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP))
...@@ -610,6 +620,7 @@ void arch_smt_update(void) ...@@ -610,6 +620,7 @@ void arch_smt_update(void)
case SPECTRE_V2_USER_NONE: case SPECTRE_V2_USER_NONE:
break; break;
case SPECTRE_V2_USER_STRICT: case SPECTRE_V2_USER_STRICT:
case SPECTRE_V2_USER_STRICT_PREFERRED:
update_stibp_strict(); update_stibp_strict();
break; break;
case SPECTRE_V2_USER_PRCTL: case SPECTRE_V2_USER_PRCTL:
...@@ -812,7 +823,8 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) ...@@ -812,7 +823,8 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
* Indirect branch speculation is always disabled in strict * Indirect branch speculation is always disabled in strict
* mode. * mode.
*/ */
if (spectre_v2_user == SPECTRE_V2_USER_STRICT) if (spectre_v2_user == SPECTRE_V2_USER_STRICT ||
spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED)
return -EPERM; return -EPERM;
task_clear_spec_ib_disable(task); task_clear_spec_ib_disable(task);
task_update_spec_tif(task); task_update_spec_tif(task);
...@@ -825,7 +837,8 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) ...@@ -825,7 +837,8 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
*/ */
if (spectre_v2_user == SPECTRE_V2_USER_NONE) if (spectre_v2_user == SPECTRE_V2_USER_NONE)
return -EPERM; return -EPERM;
if (spectre_v2_user == SPECTRE_V2_USER_STRICT) if (spectre_v2_user == SPECTRE_V2_USER_STRICT ||
spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED)
return 0; return 0;
task_set_spec_ib_disable(task); task_set_spec_ib_disable(task);
if (ctrl == PR_SPEC_FORCE_DISABLE) if (ctrl == PR_SPEC_FORCE_DISABLE)
...@@ -896,6 +909,7 @@ static int ib_prctl_get(struct task_struct *task) ...@@ -896,6 +909,7 @@ static int ib_prctl_get(struct task_struct *task)
return PR_SPEC_PRCTL | PR_SPEC_DISABLE; return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
return PR_SPEC_PRCTL | PR_SPEC_ENABLE; return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
case SPECTRE_V2_USER_STRICT: case SPECTRE_V2_USER_STRICT:
case SPECTRE_V2_USER_STRICT_PREFERRED:
return PR_SPEC_DISABLE; return PR_SPEC_DISABLE;
default: default:
return PR_SPEC_NOT_AFFECTED; return PR_SPEC_NOT_AFFECTED;
...@@ -1002,7 +1016,8 @@ static void __init l1tf_select_mitigation(void) ...@@ -1002,7 +1016,8 @@ static void __init l1tf_select_mitigation(void)
#endif #endif
half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n"); pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n", pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
half_pa); half_pa);
...@@ -1088,6 +1103,8 @@ static char *stibp_state(void) ...@@ -1088,6 +1103,8 @@ static char *stibp_state(void)
return ", STIBP: disabled"; return ", STIBP: disabled";
case SPECTRE_V2_USER_STRICT: case SPECTRE_V2_USER_STRICT:
return ", STIBP: forced"; return ", STIBP: forced";
case SPECTRE_V2_USER_STRICT_PREFERRED:
return ", STIBP: always-on";
case SPECTRE_V2_USER_PRCTL: case SPECTRE_V2_USER_PRCTL:
case SPECTRE_V2_USER_SECCOMP: case SPECTRE_V2_USER_SECCOMP:
if (static_key_enabled(&switch_to_cond_stibp)) if (static_key_enabled(&switch_to_cond_stibp))
......
...@@ -19,7 +19,7 @@ static inline void switch_to_extra(struct task_struct *prev, ...@@ -19,7 +19,7 @@ static inline void switch_to_extra(struct task_struct *prev,
if (IS_ENABLED(CONFIG_SMP)) { if (IS_ENABLED(CONFIG_SMP)) {
/* /*
* Avoid __switch_to_xtra() invocation when conditional * Avoid __switch_to_xtra() invocation when conditional
* STIPB is disabled and the only different bit is * STIBP is disabled and the only different bit is
* TIF_SPEC_IB. For CONFIG_SMP=n TIF_SPEC_IB is not * TIF_SPEC_IB. For CONFIG_SMP=n TIF_SPEC_IB is not
* in the TIF_WORK_CTXSW masks. * in the TIF_WORK_CTXSW masks.
*/ */
......
...@@ -931,7 +931,7 @@ unsigned long max_swapfile_size(void) ...@@ -931,7 +931,7 @@ unsigned long max_swapfile_size(void)
pages = generic_max_swapfile_size(); pages = generic_max_swapfile_size();
if (boot_cpu_has_bug(X86_BUG_L1TF)) { if (boot_cpu_has_bug(X86_BUG_L1TF) && l1tf_mitigation != L1TF_MITIGATION_OFF) {
/* Limit the swap file size to MAX_PA/2 for L1TF workaround */ /* Limit the swap file size to MAX_PA/2 for L1TF workaround */
unsigned long long l1tf_limit = l1tf_pfn_limit(); unsigned long long l1tf_limit = l1tf_pfn_limit();
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment