Commit cbdf8a18 authored by Marc Zyngier's avatar Marc Zyngier Committed by Will Deacon

arm64: Force SSBS on context switch

On a CPU that doesn't support SSBS, PSTATE[12] is RES0.  In a system
where only some of the CPUs implement SSBS, we end-up losing track of
the SSBS bit across task migration.

To address this issue, let's force the SSBS bit on context switch.

Fixes: 8f04e8e6 ("arm64: ssbd: Add support for PSTATE.SSBS rather than trapping to EL3")
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
[will: inverted logic and added comments]
Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent 4574b0b9
...@@ -193,6 +193,16 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc) ...@@ -193,6 +193,16 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
regs->pmr_save = GIC_PRIO_IRQON; regs->pmr_save = GIC_PRIO_IRQON;
} }
static inline void set_ssbs_bit(struct pt_regs *regs)
{
regs->pstate |= PSR_SSBS_BIT;
}
static inline void set_compat_ssbs_bit(struct pt_regs *regs)
{
regs->pstate |= PSR_AA32_SSBS_BIT;
}
static inline void start_thread(struct pt_regs *regs, unsigned long pc, static inline void start_thread(struct pt_regs *regs, unsigned long pc,
unsigned long sp) unsigned long sp)
{ {
...@@ -200,7 +210,7 @@ static inline void start_thread(struct pt_regs *regs, unsigned long pc, ...@@ -200,7 +210,7 @@ static inline void start_thread(struct pt_regs *regs, unsigned long pc,
regs->pstate = PSR_MODE_EL0t; regs->pstate = PSR_MODE_EL0t;
if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE) if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
regs->pstate |= PSR_SSBS_BIT; set_ssbs_bit(regs);
regs->sp = sp; regs->sp = sp;
} }
...@@ -219,7 +229,7 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc, ...@@ -219,7 +229,7 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
#endif #endif
if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE) if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
regs->pstate |= PSR_AA32_SSBS_BIT; set_compat_ssbs_bit(regs);
regs->compat_sp = sp; regs->compat_sp = sp;
} }
......
...@@ -398,7 +398,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, ...@@ -398,7 +398,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
childregs->pstate |= PSR_UAO_BIT; childregs->pstate |= PSR_UAO_BIT;
if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
childregs->pstate |= PSR_SSBS_BIT; set_ssbs_bit(childregs);
if (system_uses_irq_prio_masking()) if (system_uses_irq_prio_masking())
childregs->pmr_save = GIC_PRIO_IRQON; childregs->pmr_save = GIC_PRIO_IRQON;
...@@ -442,6 +442,32 @@ void uao_thread_switch(struct task_struct *next) ...@@ -442,6 +442,32 @@ void uao_thread_switch(struct task_struct *next)
} }
} }
/*
* Force SSBS state on context-switch, since it may be lost after migrating
* from a CPU which treats the bit as RES0 in a heterogeneous system.
*/
static void ssbs_thread_switch(struct task_struct *next)
{
struct pt_regs *regs = task_pt_regs(next);
/*
* Nothing to do for kernel threads, but 'regs' may be junk
* (e.g. idle task) so check the flags and bail early.
*/
if (unlikely(next->flags & PF_KTHREAD))
return;
/* If the mitigation is enabled, then we leave SSBS clear. */
if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) ||
test_tsk_thread_flag(next, TIF_SSBD))
return;
if (compat_user_mode(regs))
set_compat_ssbs_bit(regs);
else if (user_mode(regs))
set_ssbs_bit(regs);
}
/* /*
* We store our current task in sp_el0, which is clobbered by userspace. Keep a * We store our current task in sp_el0, which is clobbered by userspace. Keep a
* shadow copy so that we can restore this upon entry from userspace. * shadow copy so that we can restore this upon entry from userspace.
...@@ -471,6 +497,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, ...@@ -471,6 +497,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
entry_task_switch(next); entry_task_switch(next);
uao_thread_switch(next); uao_thread_switch(next);
ptrauth_thread_switch(next); ptrauth_thread_switch(next);
ssbs_thread_switch(next);
/* /*
* Complete any pending TLB or cache maintenance on this CPU in case * Complete any pending TLB or cache maintenance on this CPU in case
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment