Commit c2876207 authored by Will Deacon's avatar Will Deacon

arm64: Rewrite Spectre-v4 mitigation code

Rewrite the Spectre-v4 mitigation handling code to follow the same
approach as that taken by Spectre-v2.

For now, report to KVM that the system is vulnerable (by forcing
'ssbd_state' to ARM64_SSBD_UNKNOWN), as this will be cleared up in
subsequent steps.
Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent 9e78b659
...@@ -198,25 +198,12 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc) ...@@ -198,25 +198,12 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
regs->pmr_save = GIC_PRIO_IRQON; regs->pmr_save = GIC_PRIO_IRQON;
} }
static inline void set_ssbs_bit(struct pt_regs *regs)
{
regs->pstate |= PSR_SSBS_BIT;
}
static inline void set_compat_ssbs_bit(struct pt_regs *regs)
{
regs->pstate |= PSR_AA32_SSBS_BIT;
}
static inline void start_thread(struct pt_regs *regs, unsigned long pc, static inline void start_thread(struct pt_regs *regs, unsigned long pc,
unsigned long sp) unsigned long sp)
{ {
start_thread_common(regs, pc); start_thread_common(regs, pc);
regs->pstate = PSR_MODE_EL0t; regs->pstate = PSR_MODE_EL0t;
spectre_v4_enable_task_mitigation(current);
if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
set_ssbs_bit(regs);
regs->sp = sp; regs->sp = sp;
} }
...@@ -233,9 +220,7 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc, ...@@ -233,9 +220,7 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
regs->pstate |= PSR_AA32_E_BIT; regs->pstate |= PSR_AA32_E_BIT;
#endif #endif
if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE) spectre_v4_enable_task_mitigation(current);
set_compat_ssbs_bit(regs);
regs->compat_sp = sp; regs->compat_sp = sp;
} }
#endif #endif
......
...@@ -24,4 +24,9 @@ enum mitigation_state arm64_get_spectre_v2_state(void); ...@@ -24,4 +24,9 @@ enum mitigation_state arm64_get_spectre_v2_state(void);
bool has_spectre_v2(const struct arm64_cpu_capabilities *cap, int scope); bool has_spectre_v2(const struct arm64_cpu_capabilities *cap, int scope);
void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused); void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
enum mitigation_state arm64_get_spectre_v4_state(void);
bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope);
void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
void spectre_v4_enable_task_mitigation(struct task_struct *tsk);
#endif /* __ASM_SPECTRE_H */ #endif /* __ASM_SPECTRE_H */
...@@ -106,62 +106,7 @@ cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap) ...@@ -106,62 +106,7 @@ cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0); sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
} }
DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required); int ssbd_state __read_mostly = ARM64_SSBD_UNKNOWN;
int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
static bool __ssb_safe = true;
static const struct ssbd_options {
const char *str;
int state;
} ssbd_options[] = {
{ "force-on", ARM64_SSBD_FORCE_ENABLE, },
{ "force-off", ARM64_SSBD_FORCE_DISABLE, },
{ "kernel", ARM64_SSBD_KERNEL, },
};
static int __init ssbd_cfg(char *buf)
{
int i;
if (!buf || !buf[0])
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
int len = strlen(ssbd_options[i].str);
if (strncmp(buf, ssbd_options[i].str, len))
continue;
ssbd_state = ssbd_options[i].state;
return 0;
}
return -EINVAL;
}
early_param("ssbd", ssbd_cfg);
void __init arm64_update_smccc_conduit(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr,
int nr_inst)
{
u32 insn;
BUG_ON(nr_inst != 1);
switch (arm_smccc_1_1_get_conduit()) {
case SMCCC_CONDUIT_HVC:
insn = aarch64_insn_get_hvc_value();
break;
case SMCCC_CONDUIT_SMC:
insn = aarch64_insn_get_smc_value();
break;
default:
return;
}
*updptr = cpu_to_le32(insn);
}
void __init arm64_enable_wa2_handling(struct alt_instr *alt, void __init arm64_enable_wa2_handling(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, __le32 *origptr, __le32 *updptr,
...@@ -177,144 +122,6 @@ void __init arm64_enable_wa2_handling(struct alt_instr *alt, ...@@ -177,144 +122,6 @@ void __init arm64_enable_wa2_handling(struct alt_instr *alt,
*updptr = cpu_to_le32(aarch64_insn_gen_nop()); *updptr = cpu_to_le32(aarch64_insn_gen_nop());
} }
void arm64_set_ssbd_mitigation(bool state)
{
int conduit;
if (this_cpu_has_cap(ARM64_SSBS)) {
if (state)
asm volatile(SET_PSTATE_SSBS(0));
else
asm volatile(SET_PSTATE_SSBS(1));
return;
}
conduit = arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, state,
NULL);
WARN_ON_ONCE(conduit == SMCCC_CONDUIT_NONE);
}
static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
int scope)
{
struct arm_smccc_res res;
bool required = true;
s32 val;
bool this_cpu_safe = false;
int conduit;
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
if (cpu_mitigations_off())
ssbd_state = ARM64_SSBD_FORCE_DISABLE;
/* delay setting __ssb_safe until we get a firmware response */
if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
this_cpu_safe = true;
if (this_cpu_has_cap(ARM64_SSBS)) {
if (!this_cpu_safe)
__ssb_safe = false;
required = false;
goto out_printmsg;
}
conduit = arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
ARM_SMCCC_ARCH_WORKAROUND_2, &res);
if (conduit == SMCCC_CONDUIT_NONE) {
ssbd_state = ARM64_SSBD_UNKNOWN;
if (!this_cpu_safe)
__ssb_safe = false;
return false;
}
val = (s32)res.a0;
switch (val) {
case SMCCC_RET_NOT_SUPPORTED:
ssbd_state = ARM64_SSBD_UNKNOWN;
if (!this_cpu_safe)
__ssb_safe = false;
return false;
/* machines with mixed mitigation requirements must not return this */
case SMCCC_RET_NOT_REQUIRED:
pr_info_once("%s mitigation not required\n", entry->desc);
ssbd_state = ARM64_SSBD_MITIGATED;
return false;
case SMCCC_RET_SUCCESS:
__ssb_safe = false;
required = true;
break;
case 1: /* Mitigation not required on this CPU */
required = false;
break;
default:
WARN_ON(1);
if (!this_cpu_safe)
__ssb_safe = false;
return false;
}
switch (ssbd_state) {
case ARM64_SSBD_FORCE_DISABLE:
arm64_set_ssbd_mitigation(false);
required = false;
break;
case ARM64_SSBD_KERNEL:
if (required) {
__this_cpu_write(arm64_ssbd_callback_required, 1);
arm64_set_ssbd_mitigation(true);
}
break;
case ARM64_SSBD_FORCE_ENABLE:
arm64_set_ssbd_mitigation(true);
required = true;
break;
default:
WARN_ON(1);
break;
}
out_printmsg:
switch (ssbd_state) {
case ARM64_SSBD_FORCE_DISABLE:
pr_info_once("%s disabled from command-line\n", entry->desc);
break;
case ARM64_SSBD_FORCE_ENABLE:
pr_info_once("%s forced from command-line\n", entry->desc);
break;
}
return required;
}
static void cpu_enable_ssbd_mitigation(const struct arm64_cpu_capabilities *cap)
{
if (ssbd_state != ARM64_SSBD_FORCE_DISABLE)
cap->matches(cap, SCOPE_LOCAL_CPU);
}
/* known invulnerable cores */
static const struct midr_range arm64_ssb_cpus[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
{},
};
#ifdef CONFIG_ARM64_ERRATUM_1463225 #ifdef CONFIG_ARM64_ERRATUM_1463225
DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
...@@ -674,12 +481,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ...@@ -674,12 +481,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
}, },
#endif #endif
{ {
.desc = "Speculative Store Bypass Disable", .desc = "Spectre-v4",
.capability = ARM64_SPECTRE_V4, .capability = ARM64_SPECTRE_V4,
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
.matches = has_ssbd_mitigation, .matches = has_spectre_v4,
.cpu_enable = cpu_enable_ssbd_mitigation, .cpu_enable = spectre_v4_enable_mitigation,
.midr_range_list = arm64_ssb_cpus,
}, },
#ifdef CONFIG_ARM64_ERRATUM_1418040 #ifdef CONFIG_ARM64_ERRATUM_1418040
{ {
...@@ -732,18 +538,3 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ...@@ -732,18 +538,3 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
{ {
} }
}; };
ssize_t cpu_show_spec_store_bypass(struct device *dev,
struct device_attribute *attr, char *buf)
{
if (__ssb_safe)
return sprintf(buf, "Not affected\n");
switch (ssbd_state) {
case ARM64_SSBD_KERNEL:
case ARM64_SSBD_FORCE_ENABLE:
return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n");
}
return sprintf(buf, "Vulnerable\n");
}
...@@ -1583,46 +1583,6 @@ static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused) ...@@ -1583,46 +1583,6 @@ static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused)
WARN_ON(val & (7 << 27 | 7 << 21)); WARN_ON(val & (7 << 27 | 7 << 21));
} }
static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
{
if (user_mode(regs))
return 1;
if (instr & BIT(PSTATE_Imm_shift))
regs->pstate |= PSR_SSBS_BIT;
else
regs->pstate &= ~PSR_SSBS_BIT;
arm64_skip_faulting_instruction(regs, 4);
return 0;
}
static struct undef_hook ssbs_emulation_hook = {
.instr_mask = ~(1U << PSTATE_Imm_shift),
.instr_val = 0xd500401f | PSTATE_SSBS,
.fn = ssbs_emulation_handler,
};
static void cpu_enable_ssbs(const struct arm64_cpu_capabilities *__unused)
{
static bool undef_hook_registered = false;
static DEFINE_RAW_SPINLOCK(hook_lock);
raw_spin_lock(&hook_lock);
if (!undef_hook_registered) {
register_undef_hook(&ssbs_emulation_hook);
undef_hook_registered = true;
}
raw_spin_unlock(&hook_lock);
if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
arm64_set_ssbd_mitigation(false);
} else {
arm64_set_ssbd_mitigation(true);
}
}
#ifdef CONFIG_ARM64_PAN #ifdef CONFIG_ARM64_PAN
static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused) static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
{ {
...@@ -1983,7 +1943,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = { ...@@ -1983,7 +1943,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.field_pos = ID_AA64PFR1_SSBS_SHIFT, .field_pos = ID_AA64PFR1_SSBS_SHIFT,
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
.min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY, .min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY,
.cpu_enable = cpu_enable_ssbs,
}, },
#ifdef CONFIG_ARM64_CNP #ifdef CONFIG_ARM64_CNP
{ {
......
...@@ -132,8 +132,8 @@ alternative_else_nop_endif ...@@ -132,8 +132,8 @@ alternative_else_nop_endif
* them if required. * them if required.
*/ */
.macro apply_ssbd, state, tmp1, tmp2 .macro apply_ssbd, state, tmp1, tmp2
alternative_cb arm64_enable_wa2_handling alternative_cb spectre_v4_patch_fw_mitigation_enable
b .L__asm_ssbd_skip\@ b .L__asm_ssbd_skip\@ // Patched to NOP
alternative_cb_end alternative_cb_end
ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1 ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
cbz \tmp2, .L__asm_ssbd_skip\@ cbz \tmp2, .L__asm_ssbd_skip\@
...@@ -141,7 +141,7 @@ alternative_cb_end ...@@ -141,7 +141,7 @@ alternative_cb_end
tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@ tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
mov w1, #\state mov w1, #\state
alternative_cb arm64_update_smccc_conduit alternative_cb spectre_v4_patch_fw_mitigation_conduit
nop // Patched to SMC/HVC #0 nop // Patched to SMC/HVC #0
alternative_cb_end alternative_cb_end
.L__asm_ssbd_skip\@: .L__asm_ssbd_skip\@:
......
...@@ -332,11 +332,7 @@ int swsusp_arch_suspend(void) ...@@ -332,11 +332,7 @@ int swsusp_arch_suspend(void)
* mitigation off behind our back, let's set the state * mitigation off behind our back, let's set the state
* to what we expect it to be. * to what we expect it to be.
*/ */
switch (arm64_get_ssbd_state()) { spectre_v4_enable_mitigation(NULL);
case ARM64_SSBD_FORCE_ENABLE:
case ARM64_SSBD_KERNEL:
arm64_set_ssbd_mitigation(true);
}
} }
local_daif_restore(flags); local_daif_restore(flags);
......
...@@ -421,8 +421,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, ...@@ -421,8 +421,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
cpus_have_const_cap(ARM64_HAS_UAO)) cpus_have_const_cap(ARM64_HAS_UAO))
childregs->pstate |= PSR_UAO_BIT; childregs->pstate |= PSR_UAO_BIT;
if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) spectre_v4_enable_task_mitigation(p);
set_ssbs_bit(childregs);
if (system_uses_irq_prio_masking()) if (system_uses_irq_prio_masking())
childregs->pmr_save = GIC_PRIO_IRQON; childregs->pmr_save = GIC_PRIO_IRQON;
...@@ -472,8 +471,6 @@ void uao_thread_switch(struct task_struct *next) ...@@ -472,8 +471,6 @@ void uao_thread_switch(struct task_struct *next)
*/ */
static void ssbs_thread_switch(struct task_struct *next) static void ssbs_thread_switch(struct task_struct *next)
{ {
struct pt_regs *regs = task_pt_regs(next);
/* /*
* Nothing to do for kernel threads, but 'regs' may be junk * Nothing to do for kernel threads, but 'regs' may be junk
* (e.g. idle task) so check the flags and bail early. * (e.g. idle task) so check the flags and bail early.
...@@ -485,18 +482,10 @@ static void ssbs_thread_switch(struct task_struct *next) ...@@ -485,18 +482,10 @@ static void ssbs_thread_switch(struct task_struct *next)
* If all CPUs implement the SSBS extension, then we just need to * If all CPUs implement the SSBS extension, then we just need to
* context-switch the PSTATE field. * context-switch the PSTATE field.
*/ */
if (cpu_have_feature(cpu_feature(SSBS))) if (cpus_have_const_cap(ARM64_SSBS))
return;
/* If the mitigation is enabled, then we leave SSBS clear. */
if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) ||
test_tsk_thread_flag(next, TIF_SSBD))
return; return;
if (compat_user_mode(regs)) spectre_v4_enable_task_mitigation(next);
set_compat_ssbs_bit(regs);
else if (user_mode(regs))
set_ssbs_bit(regs);
} }
/* /*
......
...@@ -320,79 +320,394 @@ void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused) ...@@ -320,79 +320,394 @@ void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
update_mitigation_state(&spectre_v2_state, state); update_mitigation_state(&spectre_v2_state, state);
} }
/* Spectre v4 prctl */ /*
static void ssbd_ssbs_enable(struct task_struct *task) * Spectre v4.
*
* If you thought Spectre v2 was nasty, wait until you see this mess. A CPU is
* either:
*
* - Mitigated in hardware and listed in our "safe list".
* - Mitigated in hardware via PSTATE.SSBS.
* - Mitigated in software by firmware (sometimes referred to as SSBD).
*
* Wait, that doesn't sound so bad, does it? Keep reading...
*
* A major source of headaches is that the software mitigation is enabled both
* on a per-task basis, but can also be forced on for the kernel, necessitating
* both context-switch *and* entry/exit hooks. To make it even worse, some CPUs
* allow EL0 to toggle SSBS directly, which can end up with the prctl() state
* being stale when re-entering the kernel. The usual big.LITTLE caveats apply,
* so you can have systems that have both firmware and SSBS mitigations. This
* means we actually have to reject late onlining of CPUs with mitigations if
* all of the currently onlined CPUs are safelisted, as the mitigation tends to
* be opt-in for userspace. Yes, really, the cure is worse than the disease.
*
* The only good part is that if the firmware mitigation is present, then it is
* present for all CPUs, meaning we don't have to worry about late onlining of a
* vulnerable CPU if one of the boot CPUs is using the firmware mitigation.
*
* Give me a VAX-11/780 any day of the week...
*/
static enum mitigation_state spectre_v4_state;
/* This is the per-cpu state tracking whether we need to talk to firmware */
DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
enum spectre_v4_policy {
SPECTRE_V4_POLICY_MITIGATION_DYNAMIC,
SPECTRE_V4_POLICY_MITIGATION_ENABLED,
SPECTRE_V4_POLICY_MITIGATION_DISABLED,
};
static enum spectre_v4_policy __read_mostly __spectre_v4_policy;
static const struct spectre_v4_param {
const char *str;
enum spectre_v4_policy policy;
} spectre_v4_params[] = {
{ "force-on", SPECTRE_V4_POLICY_MITIGATION_ENABLED, },
{ "force-off", SPECTRE_V4_POLICY_MITIGATION_DISABLED, },
{ "kernel", SPECTRE_V4_POLICY_MITIGATION_DYNAMIC, },
};
static int __init parse_spectre_v4_param(char *str)
{ {
u64 val = is_compat_thread(task_thread_info(task)) ? int i;
PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
task_pt_regs(task)->pstate |= val; if (!str || !str[0])
} return -EINVAL;
static void ssbd_ssbs_disable(struct task_struct *task) for (i = 0; i < ARRAY_SIZE(spectre_v4_params); i++) {
{ const struct spectre_v4_param *param = &spectre_v4_params[i];
u64 val = is_compat_thread(task_thread_info(task)) ?
PSR_AA32_SSBS_BIT : PSR_SSBS_BIT; if (strncmp(str, param->str, strlen(param->str)))
continue;
task_pt_regs(task)->pstate &= ~val; __spectre_v4_policy = param->policy;
return 0;
}
return -EINVAL;
} }
early_param("ssbd", parse_spectre_v4_param);
/* /*
* prctl interface for SSBD * Because this was all written in a rush by people working in different silos,
* we've ended up with multiple command line options to control the same thing.
* Wrap these up in some helpers, which prefer disabling the mitigation if faced
* with contradictory parameters. The mitigation is always either "off",
* "dynamic" or "on".
*/ */
static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl) static bool spectre_v4_mitigations_off(void)
{ {
int state = arm64_get_ssbd_state(); bool ret = cpu_mitigations_off() ||
__spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DISABLED;
/* Unsupported */ if (ret)
if (state == ARM64_SSBD_UNKNOWN) pr_info_once("spectre-v4 mitigation disabled by command-line option\n");
return -ENODEV;
/* Treat the unaffected/mitigated state separately */ return ret;
if (state == ARM64_SSBD_MITIGATED) { }
switch (ctrl) {
case PR_SPEC_ENABLE: /* Do we need to toggle the mitigation state on entry to/exit from the kernel? */
return -EPERM; static bool spectre_v4_mitigations_dynamic(void)
case PR_SPEC_DISABLE: {
case PR_SPEC_FORCE_DISABLE: return !spectre_v4_mitigations_off() &&
return 0; __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DYNAMIC;
} }
static bool spectre_v4_mitigations_on(void)
{
return !spectre_v4_mitigations_off() &&
__spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_ENABLED;
}
ssize_t cpu_show_spec_store_bypass(struct device *dev,
struct device_attribute *attr, char *buf)
{
switch (spectre_v4_state) {
case SPECTRE_UNAFFECTED:
return sprintf(buf, "Not affected\n");
case SPECTRE_MITIGATED:
return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n");
case SPECTRE_VULNERABLE:
fallthrough;
default:
return sprintf(buf, "Vulnerable\n");
} }
}
enum mitigation_state arm64_get_spectre_v4_state(void)
{
return spectre_v4_state;
}
static enum mitigation_state spectre_v4_get_cpu_hw_mitigation_state(void)
{
static const struct midr_range spectre_v4_safe_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
{ /* sentinel */ },
};
if (is_midr_in_range_list(read_cpuid_id(), spectre_v4_safe_list))
return SPECTRE_UNAFFECTED;
/* CPU features are detected first */
if (this_cpu_has_cap(ARM64_SSBS))
return SPECTRE_MITIGATED;
return SPECTRE_VULNERABLE;
}
static enum mitigation_state spectre_v4_get_cpu_fw_mitigation_state(void)
{
int ret;
struct arm_smccc_res res;
arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
ARM_SMCCC_ARCH_WORKAROUND_2, &res);
ret = res.a0;
switch (ret) {
case SMCCC_RET_SUCCESS:
return SPECTRE_MITIGATED;
case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
fallthrough;
case SMCCC_RET_NOT_REQUIRED:
return SPECTRE_UNAFFECTED;
default:
fallthrough;
case SMCCC_RET_NOT_SUPPORTED:
return SPECTRE_VULNERABLE;
}
}
bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope)
{
enum mitigation_state state;
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
state = spectre_v4_get_cpu_hw_mitigation_state();
if (state == SPECTRE_VULNERABLE)
state = spectre_v4_get_cpu_fw_mitigation_state();
return state != SPECTRE_UNAFFECTED;
}
static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
{
if (user_mode(regs))
return 1;
if (instr & BIT(PSTATE_Imm_shift))
regs->pstate |= PSR_SSBS_BIT;
else
regs->pstate &= ~PSR_SSBS_BIT;
arm64_skip_faulting_instruction(regs, 4);
return 0;
}
static struct undef_hook ssbs_emulation_hook = {
.instr_mask = ~(1U << PSTATE_Imm_shift),
.instr_val = 0xd500401f | PSTATE_SSBS,
.fn = ssbs_emulation_handler,
};
static enum mitigation_state spectre_v4_enable_hw_mitigation(void)
{
static bool undef_hook_registered = false;
static DEFINE_RAW_SPINLOCK(hook_lock);
enum mitigation_state state;
/* /*
* Things are a bit backward here: the arm64 internal API * If the system is mitigated but this CPU doesn't have SSBS, then
* *enables the mitigation* when the userspace API *disables * we must be on the safelist and there's nothing more to do.
* speculation*. So much fun.
*/ */
state = spectre_v4_get_cpu_hw_mitigation_state();
if (state != SPECTRE_MITIGATED || !this_cpu_has_cap(ARM64_SSBS))
return state;
raw_spin_lock(&hook_lock);
if (!undef_hook_registered) {
register_undef_hook(&ssbs_emulation_hook);
undef_hook_registered = true;
}
raw_spin_unlock(&hook_lock);
if (spectre_v4_mitigations_off()) {
sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
asm volatile(SET_PSTATE_SSBS(1));
return SPECTRE_VULNERABLE;
}
/* SCTLR_EL1.DSSBS was initialised to 0 during boot */
asm volatile(SET_PSTATE_SSBS(0));
return SPECTRE_MITIGATED;
}
/*
* Patch a branch over the Spectre-v4 mitigation code with a NOP so that
* we fallthrough and check whether firmware needs to be called on this CPU.
*/
void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt,
__le32 *origptr,
__le32 *updptr, int nr_inst)
{
BUG_ON(nr_inst != 1); /* Branch -> NOP */
if (spectre_v4_mitigations_off())
return;
if (cpus_have_final_cap(ARM64_SSBS))
return;
if (spectre_v4_mitigations_dynamic())
*updptr = cpu_to_le32(aarch64_insn_gen_nop());
}
/*
* Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction
* to call into firmware to adjust the mitigation state.
*/
void __init spectre_v4_patch_fw_mitigation_conduit(struct alt_instr *alt,
__le32 *origptr,
__le32 *updptr, int nr_inst)
{
u32 insn;
BUG_ON(nr_inst != 1); /* NOP -> HVC/SMC */
switch (arm_smccc_1_1_get_conduit()) {
case SMCCC_CONDUIT_HVC:
insn = aarch64_insn_get_hvc_value();
break;
case SMCCC_CONDUIT_SMC:
insn = aarch64_insn_get_smc_value();
break;
default:
return;
}
*updptr = cpu_to_le32(insn);
}
static enum mitigation_state spectre_v4_enable_fw_mitigation(void)
{
enum mitigation_state state;
state = spectre_v4_get_cpu_fw_mitigation_state();
if (state != SPECTRE_MITIGATED)
return state;
if (spectre_v4_mitigations_off()) {
arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, false, NULL);
return SPECTRE_VULNERABLE;
}
arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, true, NULL);
if (spectre_v4_mitigations_dynamic())
__this_cpu_write(arm64_ssbd_callback_required, 1);
return SPECTRE_MITIGATED;
}
void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
{
enum mitigation_state state;
WARN_ON(preemptible());
state = spectre_v4_enable_hw_mitigation();
if (state == SPECTRE_VULNERABLE)
state = spectre_v4_enable_fw_mitigation();
update_mitigation_state(&spectre_v4_state, state);
}
static void __update_pstate_ssbs(struct pt_regs *regs, bool state)
{
u64 bit = compat_user_mode(regs) ? PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
if (state)
regs->pstate |= bit;
else
regs->pstate &= ~bit;
}
void spectre_v4_enable_task_mitigation(struct task_struct *tsk)
{
struct pt_regs *regs = task_pt_regs(tsk);
bool ssbs = false, kthread = tsk->flags & PF_KTHREAD;
if (spectre_v4_mitigations_off())
ssbs = true;
else if (spectre_v4_mitigations_dynamic() && !kthread)
ssbs = !test_tsk_thread_flag(tsk, TIF_SSBD);
__update_pstate_ssbs(regs, ssbs);
}
/*
* The Spectre-v4 mitigation can be controlled via a prctl() from userspace.
* This is interesting because the "speculation disabled" behaviour can be
* configured so that it is preserved across exec(), which means that the
* prctl() may be necessary even when PSTATE.SSBS can be toggled directly
* from userspace.
*/
static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
{
switch (ctrl) { switch (ctrl) {
case PR_SPEC_ENABLE: case PR_SPEC_ENABLE:
/* If speculation is force disabled, enable is not allowed */ /* Enable speculation: disable mitigation */
if (state == ARM64_SSBD_FORCE_ENABLE || /*
task_spec_ssb_force_disable(task)) * Force disabled speculation prevents it from being
* re-enabled.
*/
if (task_spec_ssb_force_disable(task))
return -EPERM;
/*
* If the mitigation is forced on, then speculation is forced
* off and we again prevent it from being re-enabled.
*/
if (spectre_v4_mitigations_on())
return -EPERM; return -EPERM;
task_clear_spec_ssb_disable(task); task_clear_spec_ssb_disable(task);
clear_tsk_thread_flag(task, TIF_SSBD); clear_tsk_thread_flag(task, TIF_SSBD);
ssbd_ssbs_enable(task);
break;
case PR_SPEC_DISABLE:
if (state == ARM64_SSBD_FORCE_DISABLE)
return -EPERM;
task_set_spec_ssb_disable(task);
set_tsk_thread_flag(task, TIF_SSBD);
ssbd_ssbs_disable(task);
break; break;
case PR_SPEC_FORCE_DISABLE: case PR_SPEC_FORCE_DISABLE:
if (state == ARM64_SSBD_FORCE_DISABLE) /* Force disable speculation: force enable mitigation */
/*
* If the mitigation is forced off, then speculation is forced
* on and we prevent it from being disabled.
*/
if (spectre_v4_mitigations_off())
return -EPERM; return -EPERM;
task_set_spec_ssb_disable(task);
task_set_spec_ssb_force_disable(task); task_set_spec_ssb_force_disable(task);
fallthrough;
case PR_SPEC_DISABLE:
/* Disable speculation: enable mitigation */
/* Same as PR_SPEC_FORCE_DISABLE */
if (spectre_v4_mitigations_off())
return -EPERM;
task_set_spec_ssb_disable(task);
set_tsk_thread_flag(task, TIF_SSBD); set_tsk_thread_flag(task, TIF_SSBD);
ssbd_ssbs_disable(task);
break; break;
default: default:
return -ERANGE; return -ERANGE;
} }
spectre_v4_enable_task_mitigation(task);
return 0; return 0;
} }
...@@ -409,22 +724,32 @@ int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, ...@@ -409,22 +724,32 @@ int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
static int ssbd_prctl_get(struct task_struct *task) static int ssbd_prctl_get(struct task_struct *task)
{ {
switch (arm64_get_ssbd_state()) { switch (spectre_v4_state) {
case ARM64_SSBD_UNKNOWN: case SPECTRE_UNAFFECTED:
return -ENODEV;
case ARM64_SSBD_FORCE_ENABLE:
return PR_SPEC_DISABLE;
case ARM64_SSBD_KERNEL:
if (task_spec_ssb_force_disable(task))
return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
if (task_spec_ssb_disable(task))
return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
case ARM64_SSBD_FORCE_DISABLE:
return PR_SPEC_ENABLE;
default:
return PR_SPEC_NOT_AFFECTED; return PR_SPEC_NOT_AFFECTED;
case SPECTRE_MITIGATED:
if (spectre_v4_mitigations_on())
return PR_SPEC_NOT_AFFECTED;
if (spectre_v4_mitigations_dynamic())
break;
/* Mitigations are disabled, so we're vulnerable. */
fallthrough;
case SPECTRE_VULNERABLE:
fallthrough;
default:
return PR_SPEC_ENABLE;
} }
/* Check the mitigation state for this task */
if (task_spec_ssb_force_disable(task))
return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
if (task_spec_ssb_disable(task))
return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
} }
int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
......
...@@ -72,8 +72,7 @@ void notrace __cpu_suspend_exit(void) ...@@ -72,8 +72,7 @@ void notrace __cpu_suspend_exit(void)
* have turned the mitigation on. If the user has forcefully * have turned the mitigation on. If the user has forcefully
* disabled it, make sure their wishes are obeyed. * disabled it, make sure their wishes are obeyed.
*/ */
if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) spectre_v4_enable_mitigation(NULL);
arm64_set_ssbd_mitigation(false);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment