Commit 421dd6fa authored by Chris Metcalf's avatar Chris Metcalf Committed by Will Deacon

arm64: factor work_pending state machine to C

Currently ret_fast_syscall, work_pending, and ret_to_user form an ad-hoc
state machine that can be difficult to reason about due to duplicated
code and a large number of branch targets.

This patch factors the common logic out into the existing
do_notify_resume function, converting the code to C in the process,
making the code more legible.

This patch tries to closely mirror the existing behaviour while using
the usual C control flow primitives. As local_irq_{disable,enable} may
be instrumented, we balance exception entry (where we will almost most
likely enable IRQs) with a call to trace_hardirqs_on just before the
return to userspace.
Signed-off-by: default avatarChris Metcalf <cmetcalf@mellanox.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent 0a7d87a7
...@@ -707,18 +707,13 @@ ret_fast_syscall_trace: ...@@ -707,18 +707,13 @@ ret_fast_syscall_trace:
* Ok, we need to do extra processing, enter the slow path. * Ok, we need to do extra processing, enter the slow path.
*/ */
work_pending: work_pending:
tbnz x1, #TIF_NEED_RESCHED, work_resched
/* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
mov x0, sp // 'regs' mov x0, sp // 'regs'
enable_irq // enable interrupts for do_notify_resume()
bl do_notify_resume bl do_notify_resume
b ret_to_user
work_resched:
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off // the IRQs are off here, inform the tracing code bl trace_hardirqs_on // enabled while in userspace
#endif #endif
bl schedule ldr x1, [tsk, #TI_FLAGS] // re-check for single-step
b finish_ret_to_user
/* /*
* "slow" syscall return path. * "slow" syscall return path.
*/ */
...@@ -727,6 +722,7 @@ ret_to_user: ...@@ -727,6 +722,7 @@ ret_to_user:
ldr x1, [tsk, #TI_FLAGS] ldr x1, [tsk, #TI_FLAGS]
and x2, x1, #_TIF_WORK_MASK and x2, x1, #_TIF_WORK_MASK
cbnz x2, work_pending cbnz x2, work_pending
finish_ret_to_user:
enable_step_tsk x1, x2 enable_step_tsk x1, x2
kernel_exit 0 kernel_exit 0
ENDPROC(ret_to_user) ENDPROC(ret_to_user)
......
...@@ -402,6 +402,18 @@ static void do_signal(struct pt_regs *regs) ...@@ -402,6 +402,18 @@ static void do_signal(struct pt_regs *regs)
asmlinkage void do_notify_resume(struct pt_regs *regs, asmlinkage void do_notify_resume(struct pt_regs *regs,
unsigned int thread_flags) unsigned int thread_flags)
{ {
/*
* The assembly code enters us with IRQs off, but it hasn't
* informed the tracing code of that for efficiency reasons.
* Update the trace code with the current status.
*/
trace_hardirqs_off();
do {
if (thread_flags & _TIF_NEED_RESCHED) {
schedule();
} else {
local_irq_enable();
if (thread_flags & _TIF_SIGPENDING) if (thread_flags & _TIF_SIGPENDING)
do_signal(regs); do_signal(regs);
...@@ -412,5 +424,9 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, ...@@ -412,5 +424,9 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
if (thread_flags & _TIF_FOREIGN_FPSTATE) if (thread_flags & _TIF_FOREIGN_FPSTATE)
fpsimd_restore_current_state(); fpsimd_restore_current_state();
}
local_irq_disable();
thread_flags = READ_ONCE(current_thread_info()->flags);
} while (thread_flags & _TIF_WORK_MASK);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment