Commit 5ab6876c authored by Josh Poimboeuf's avatar Josh Poimboeuf Committed by Peter Zijlstra

arm64/cpu: Mark cpu_park_loop() and friends __noreturn

In preparation for marking panic_smp_self_stop() __noreturn across the
kernel, first mark the arm64 implementation of cpu_park_loop() and
related functions __noreturn.
Signed-off-by: default avatarJosh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/55787d3193ea3e295ccbb097abfab0a10ae49d45.1681342859.git.jpoimboe@kernel.org
parent 4208d2d7
...@@ -31,7 +31,7 @@ static inline unsigned long disr_to_esr(u64 disr) ...@@ -31,7 +31,7 @@ static inline unsigned long disr_to_esr(u64 disr)
return esr; return esr;
} }
asmlinkage void handle_bad_stack(struct pt_regs *regs); asmlinkage void __noreturn handle_bad_stack(struct pt_regs *regs);
asmlinkage void el1t_64_sync_handler(struct pt_regs *regs); asmlinkage void el1t_64_sync_handler(struct pt_regs *regs);
asmlinkage void el1t_64_irq_handler(struct pt_regs *regs); asmlinkage void el1t_64_irq_handler(struct pt_regs *regs);
...@@ -80,5 +80,5 @@ void do_el1_fpac(struct pt_regs *regs, unsigned long esr); ...@@ -80,5 +80,5 @@ void do_el1_fpac(struct pt_regs *regs, unsigned long esr);
void do_serror(struct pt_regs *regs, unsigned long esr); void do_serror(struct pt_regs *regs, unsigned long esr);
void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags); void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags);
void panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far); void __noreturn panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far);
#endif /* __ASM_EXCEPTION_H */ #endif /* __ASM_EXCEPTION_H */
...@@ -101,9 +101,9 @@ extern int __cpu_disable(void); ...@@ -101,9 +101,9 @@ extern int __cpu_disable(void);
extern void __cpu_die(unsigned int cpu); extern void __cpu_die(unsigned int cpu);
extern void __noreturn cpu_die(void); extern void __noreturn cpu_die(void);
extern void cpu_die_early(void); extern void __noreturn cpu_die_early(void);
static inline void cpu_park_loop(void) static inline void __noreturn cpu_park_loop(void)
{ {
for (;;) { for (;;) {
wfe(); wfe();
...@@ -123,7 +123,7 @@ static inline void update_cpu_boot_status(int val) ...@@ -123,7 +123,7 @@ static inline void update_cpu_boot_status(int val)
* which calls for a kernel panic. Update the boot status and park the calling * which calls for a kernel panic. Update the boot status and park the calling
* CPU. * CPU.
*/ */
static inline void cpu_panic_kernel(void) static inline void __noreturn cpu_panic_kernel(void)
{ {
update_cpu_boot_status(CPU_PANIC_KERNEL); update_cpu_boot_status(CPU_PANIC_KERNEL);
cpu_park_loop(); cpu_park_loop();
......
...@@ -840,7 +840,7 @@ UNHANDLED(el0t, 32, error) ...@@ -840,7 +840,7 @@ UNHANDLED(el0t, 32, error)
#endif /* CONFIG_COMPAT */ #endif /* CONFIG_COMPAT */
#ifdef CONFIG_VMAP_STACK #ifdef CONFIG_VMAP_STACK
asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs) asmlinkage void noinstr __noreturn handle_bad_stack(struct pt_regs *regs)
{ {
unsigned long esr = read_sysreg(esr_el1); unsigned long esr = read_sysreg(esr_el1);
unsigned long far = read_sysreg(far_el1); unsigned long far = read_sysreg(far_el1);
......
...@@ -398,7 +398,7 @@ static void __cpu_try_die(int cpu) ...@@ -398,7 +398,7 @@ static void __cpu_try_die(int cpu)
* Kill the calling secondary CPU, early in bringup before it is turned * Kill the calling secondary CPU, early in bringup before it is turned
* online. * online.
*/ */
void cpu_die_early(void) void __noreturn cpu_die_early(void)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
...@@ -816,7 +816,7 @@ void arch_irq_work_raise(void) ...@@ -816,7 +816,7 @@ void arch_irq_work_raise(void)
} }
#endif #endif
static void local_cpu_stop(void) static void __noreturn local_cpu_stop(void)
{ {
set_cpu_online(smp_processor_id(), false); set_cpu_online(smp_processor_id(), false);
...@@ -839,7 +839,7 @@ void panic_smp_self_stop(void) ...@@ -839,7 +839,7 @@ void panic_smp_self_stop(void)
static atomic_t waiting_for_crash_ipi = ATOMIC_INIT(0); static atomic_t waiting_for_crash_ipi = ATOMIC_INIT(0);
#endif #endif
static void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs) static void __noreturn ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
{ {
#ifdef CONFIG_KEXEC_CORE #ifdef CONFIG_KEXEC_CORE
crash_save_cpu(regs, cpu); crash_save_cpu(regs, cpu);
...@@ -854,6 +854,8 @@ static void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs) ...@@ -854,6 +854,8 @@ static void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
/* just in case */ /* just in case */
cpu_park_loop(); cpu_park_loop();
#else
BUG();
#endif #endif
} }
......
...@@ -863,7 +863,7 @@ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned long esr) ...@@ -863,7 +863,7 @@ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned long esr)
DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack) DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
__aligned(16); __aligned(16);
void panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far) void __noreturn panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far)
{ {
unsigned long tsk_stk = (unsigned long)current->stack; unsigned long tsk_stk = (unsigned long)current->stack;
unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr); unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr);
...@@ -905,7 +905,6 @@ void __noreturn arm64_serror_panic(struct pt_regs *regs, unsigned long esr) ...@@ -905,7 +905,6 @@ void __noreturn arm64_serror_panic(struct pt_regs *regs, unsigned long esr)
nmi_panic(regs, "Asynchronous SError Interrupt"); nmi_panic(regs, "Asynchronous SError Interrupt");
cpu_park_loop(); cpu_park_loop();
unreachable();
} }
bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned long esr) bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned long esr)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment