Commit c28ac96b authored by Russell King's avatar Russell King

[ARM] Optimise kernel->userspace exit code a little.

This improves interrupts and exceptions returning to user space
by a few of cycles.  We change the requirements for
*_restore_user_regs slightly so we can remove a few instructions.
We also re-shuffle the code to handle pending work.
parent 2dc559c8
...@@ -725,7 +725,7 @@ __dabt_svc: sub sp, sp, #S_FRAME_SIZE ...@@ -725,7 +725,7 @@ __dabt_svc: sub sp, sp, #S_FRAME_SIZE
msr cpsr_c, r9 msr cpsr_c, r9
mov r2, sp mov r2, sp
bl do_DataAbort bl do_DataAbort
set_cpsr_c r0, #PSR_I_BIT | MODE_SVC disable_irq r0
ldr r0, [sp, #S_PSR] ldr r0, [sp, #S_PSR]
msr spsr, r0 msr spsr, r0
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
...@@ -776,9 +776,9 @@ svc_preempt: teq r9, #0 @ was preempt count = 0 ...@@ -776,9 +776,9 @@ svc_preempt: teq r9, #0 @ was preempt count = 0
movne pc, lr movne pc, lr
mov r7, #PREEMPT_ACTIVE mov r7, #PREEMPT_ACTIVE
str r7, [r8, #TI_PREEMPT] @ set PREEMPT_ACTIVE str r7, [r8, #TI_PREEMPT] @ set PREEMPT_ACTIVE
1: set_cpsr_c r2, #MODE_SVC @ enable IRQs 1: enable_irq r2 @ enable IRQs
bl schedule bl schedule
set_cpsr_c r0, #PSR_I_BIT | MODE_SVC @ disable IRQs disable_irq r0 @ disable IRQs
ldr r0, [r8, #TI_FLAGS] @ get new tasks TI_FLAGS ldr r0, [r8, #TI_FLAGS] @ get new tasks TI_FLAGS
tst r0, #_TIF_NEED_RESCHED tst r0, #_TIF_NEED_RESCHED
beq preempt_return @ go again beq preempt_return @ go again
...@@ -801,7 +801,7 @@ __und_svc: sub sp, sp, #S_FRAME_SIZE ...@@ -801,7 +801,7 @@ __und_svc: sub sp, sp, #S_FRAME_SIZE
mov r0, sp @ struct pt_regs *regs mov r0, sp @ struct pt_regs *regs
bl do_undefinstr bl do_undefinstr
1: set_cpsr_c r0, #PSR_I_BIT | MODE_SVC 1: disable_irq r0
ldr lr, [sp, #S_PSR] @ Get SVC cpsr ldr lr, [sp, #S_PSR] @ Get SVC cpsr
msr spsr, lr msr spsr, lr
ldmia sp, {r0 - pc}^ @ Restore SVC registers ldmia sp, {r0 - pc}^ @ Restore SVC registers
...@@ -822,7 +822,7 @@ __pabt_svc: sub sp, sp, #S_FRAME_SIZE ...@@ -822,7 +822,7 @@ __pabt_svc: sub sp, sp, #S_FRAME_SIZE
mov r0, r2 @ address (pc) mov r0, r2 @ address (pc)
mov r1, sp @ regs mov r1, sp @ regs
bl do_PrefetchAbort @ call abort handler bl do_PrefetchAbort @ call abort handler
set_cpsr_c r0, #PSR_I_BIT | MODE_SVC disable_irq r0
ldr r0, [sp, #S_PSR] ldr r0, [sp, #S_PSR]
msr spsr, r0 msr spsr, r0
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
...@@ -861,7 +861,7 @@ __dabt_usr: sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go ...@@ -861,7 +861,7 @@ __dabt_usr: sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go
#else #else
bl CPU_ABORT_HANDLER bl CPU_ABORT_HANDLER
#endif #endif
set_cpsr_c r2, #MODE_SVC @ Enable interrupts enable_irq r2 @ Enable interrupts
mov r2, sp mov r2, sp
adrsvc al, lr, ret_from_exception adrsvc al, lr, ret_from_exception
b do_DataAbort b do_DataAbort
...@@ -916,7 +916,7 @@ __und_usr: sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go ...@@ -916,7 +916,7 @@ __und_usr: sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go
adrsvc al, r9, ret_from_exception @ r9 = normal FP return adrsvc al, r9, ret_from_exception @ r9 = normal FP return
adrsvc al, lr, fpundefinstr @ lr = undefined instr return adrsvc al, lr, fpundefinstr @ lr = undefined instr return
call_fpe: set_cpsr_c r0, #MODE_SVC @ Enable interrupts call_fpe: enable_irq r0 @ Enable interrupts
get_thread_info r10 @ get current thread get_thread_info r10 @ get current thread
ldr r4, [r10, #TI_TASK] @ get current task ldr r4, [r10, #TI_TASK] @ get current task
mov r8, #1 mov r8, #1
...@@ -939,7 +939,7 @@ __pabt_usr: sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go ...@@ -939,7 +939,7 @@ __pabt_usr: sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go
stmdb r8, {sp, lr}^ @ Save sp_usr lr_usr stmdb r8, {sp, lr}^ @ Save sp_usr lr_usr
alignment_trap r4, r7, __temp_abt alignment_trap r4, r7, __temp_abt
zero_fp zero_fp
set_cpsr_c r0, #MODE_SVC @ Enable interrupts enable_irq r0 @ Enable interrupts
mov r0, r5 @ address (pc) mov r0, r5 @ address (pc)
mov r1, sp @ regs mov r1, sp @ regs
bl do_PrefetchAbort @ call abort handler bl do_PrefetchAbort @ call abort handler
......
...@@ -35,18 +35,27 @@ ENTRY(__do_softirq) ...@@ -35,18 +35,27 @@ ENTRY(__do_softirq)
* stack. * stack.
*/ */
ret_fast_syscall: ret_fast_syscall:
set_cpsr_c r1, #PSR_I_BIT | MODE_SVC @ disable interrupts disable_irq r1 @ disable interrupts
ldr r1, [tsk, #TI_FLAGS] ldr r1, [tsk, #TI_FLAGS]
tst r1, #_TIF_WORK_MASK tst r1, #_TIF_WORK_MASK
bne ret_fast_work bne fast_work_pending
fast_restore_user_regs fast_restore_user_regs
/* /*
* Ok, we need to do extra processing, enter the slow path. * Ok, we need to do extra processing, enter the slow path.
*/ */
ret_fast_work: fast_work_pending:
str r0, [sp, #S_R0+S_OFF]! @ returned r0 str r0, [sp, #S_R0+S_OFF]! @ returned r0
b work_pending work_pending:
tst r1, #_TIF_NEED_RESCHED
bne work_resched
tst r1, #_TIF_NOTIFY_RESUME | _TIF_SIGPENDING
beq no_work_pending
mov r0, sp @ 'regs'
mov r2, why @ 'syscall'
bl do_notify_resume
disable_irq r1 @ disable interrupts
b no_work_pending
work_resched: work_resched:
bl schedule bl schedule
...@@ -55,22 +64,12 @@ work_resched: ...@@ -55,22 +64,12 @@ work_resched:
*/ */
ENTRY(ret_to_user) ENTRY(ret_to_user)
ret_slow_syscall: ret_slow_syscall:
set_cpsr_c r1, #PSR_I_BIT | MODE_SVC @ disable interrupts disable_irq r1 @ disable interrupts
ldr r1, [tsk, #TI_FLAGS] ldr r1, [tsk, #TI_FLAGS]
tst r1, #_TIF_WORK_MASK tst r1, #_TIF_WORK_MASK
beq no_work_pending bne work_pending
work_pending:
tst r1, #_TIF_NEED_RESCHED
bne work_resched
tst r1, #_TIF_NOTIFY_RESUME | _TIF_SIGPENDING
blne __do_notify_resume
no_work_pending: no_work_pending:
restore_user_regs slow_restore_user_regs
__do_notify_resume:
mov r0, sp @ 'regs'
mov r2, why @ 'syscall'
b do_notify_resume @ note the bl above sets lr
/* /*
* This is how we return from a fork. * This is how we return from a fork.
...@@ -80,9 +79,9 @@ ENTRY(ret_from_fork) ...@@ -80,9 +79,9 @@ ENTRY(ret_from_fork)
bl schedule_tail bl schedule_tail
#endif #endif
get_thread_info tsk get_thread_info tsk
ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing ldr r1, [tsk, #TI_FLAGS] @ check for syscall tracing
mov why, #1 mov why, #1
tst ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls? tst r1, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
beq ret_slow_syscall beq ret_slow_syscall
mov r1, sp mov r1, sp
mov r0, #1 @ trace exit [IP = 1] mov r0, #1 @ trace exit [IP = 1]
...@@ -134,7 +133,7 @@ ENTRY(vector_swi) ...@@ -134,7 +133,7 @@ ENTRY(vector_swi)
ldr ip, [ip] ldr ip, [ip]
mcr p15, 0, ip, c1, c0 @ update control register mcr p15, 0, ip, c1, c0 @ update control register
#endif #endif
enable_irqs ip enable_irq ip
str r4, [sp, #-S_OFF]! @ push fifth arg str r4, [sp, #-S_OFF]! @ push fifth arg
......
...@@ -69,6 +69,25 @@ ...@@ -69,6 +69,25 @@
#define S_OFF 8 #define S_OFF 8
#ifdef CONFIG_CPU_32 #ifdef CONFIG_CPU_32
.macro set_cpsr_c, reg, mode
#if 1
/* broken binutils */
mov \reg, \mode
msr cpsr_c, \reg
#else
msr cpsr_c, \mode
#endif
.endm
.macro disable_irq, temp
set_cpsr_c \temp, #PSR_I_BIT | MODE_SVC
.endm
.macro enable_irq, temp
set_cpsr_c \temp, #MODE_SVC
.endm
.macro save_user_regs .macro save_user_regs
sub sp, sp, #S_FRAME_SIZE sub sp, sp, #S_FRAME_SIZE
stmia sp, {r0 - r12} @ Calling r0 - r12 stmia sp, {r0 - r12} @ Calling r0 - r12
...@@ -81,20 +100,20 @@ ...@@ -81,20 +100,20 @@
.endm .endm
.macro restore_user_regs .macro restore_user_regs
ldr r0, [sp, #S_PSR] @ Get calling cpsr ldr r1, [sp, #S_PSR] @ Get calling cpsr
mov ip, #PSR_I_BIT | MODE_SVC disable_irq ip @ disable IRQs
msr cpsr_c, ip @ disable IRQs ldr lr, [sp, #S_PC]! @ Get PC
msr spsr, r0 @ save in spsr_svc msr spsr, r1 @ save in spsr_svc
ldr lr, [sp, #S_PC] @ Get PC ldmdb sp, {r0 - lr}^ @ Get calling r0 - lr
ldmia sp, {r0 - lr}^ @ Get calling r0 - lr
mov r0, r0 mov r0, r0
add sp, sp, #S_FRAME_SIZE add sp, sp, #S_FRAME_SIZE - S_PC
movs pc, lr @ return & move spsr_svc into cpsr movs pc, lr @ return & move spsr_svc into cpsr
.endm .endm
/*
* Must be called with IRQs already disabled.
*/
.macro fast_restore_user_regs .macro fast_restore_user_regs
mov ip, #PSR_I_BIT | MODE_SVC
msr cpsr_c, ip @ disable IRQs
ldr r1, [sp, #S_OFF + S_PSR] @ get calling cpsr ldr r1, [sp, #S_OFF + S_PSR] @ get calling cpsr
ldr lr, [sp, #S_OFF + S_PC]! @ get pc ldr lr, [sp, #S_OFF + S_PC]! @ get pc
msr spsr, r1 @ save in spsr_svc msr spsr, r1 @ save in spsr_svc
...@@ -104,12 +123,20 @@ ...@@ -104,12 +123,20 @@
movs pc, lr @ return & move spsr_svc into cpsr movs pc, lr @ return & move spsr_svc into cpsr
.endm .endm
.macro mask_pc, rd, rm /*
* Must be called with IRQs already disabled.
*/
.macro slow_restore_user_regs
ldr r1, [sp, #S_PSR] @ get calling cpsr
ldr lr, [sp, #S_PC]! @ get pc
msr spsr, r1 @ save in spsr_svc
ldmdb sp, {r0 - lr}^ @ get calling r1 - lr
mov r0, r0
add sp, sp, #S_FRAME_SIZE - S_PC
movs pc, lr @ return & move spsr_svc into cpsr
.endm .endm
.macro enable_irqs, temp .macro mask_pc, rd, rm
mov \temp, #MODE_SVC
msr cpsr_c, \temp
.endm .endm
.macro get_thread_info, rd .macro get_thread_info, rd
...@@ -117,7 +144,7 @@ ...@@ -117,7 +144,7 @@
mov \rd, \rd, lsl #13 mov \rd, \rd, lsl #13
.endm .endm
/* /*
* Like adr, but force SVC mode (if required) * Like adr, but force SVC mode (if required)
*/ */
.macro adrsvc, cond, reg, label .macro adrsvc, cond, reg, label
...@@ -215,13 +242,3 @@ tsk .req r9 @ current thread_info ...@@ -215,13 +242,3 @@ tsk .req r9 @ current thread_info
ldr scno, [lr, #-4] @ get SWI instruction ldr scno, [lr, #-4] @ get SWI instruction
#endif #endif
.endm .endm
.macro set_cpsr_c, reg, mode
#if 1
mov \reg, \mode
msr cpsr_c, \reg
#else
msr cpsr_c, \mode
#endif
.endm
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment