Commit 0b537257 authored by Max Filippov's avatar Max Filippov

xtensa: implement call0 ABI support in assembly

Replace hardcoded register and opcode names with ABI-agnostic macros.
Add register save/restore code where necessary. Conditionalize windowed
only or call0 only code. Add stack initialization matching _switch_to
epilogue to copy_thread.
Signed-off-by: default avatarMax Filippov <jcmvbkbc@gmail.com>
parent 5cce39b6
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include <asm/regs.h> #include <asm/regs.h>
#include <asm/asmmacro.h> #include <asm/asmmacro.h>
#include <asm/cacheasm.h> #include <asm/cacheasm.h>
#include <asm/processor.h>
/* /*
* RB-Data: RedBoot data/bss * RB-Data: RedBoot data/bss
* P: Boot-Parameters * P: Boot-Parameters
...@@ -36,7 +37,7 @@ ...@@ -36,7 +37,7 @@
.globl __start .globl __start
/* this must be the first byte of the loader! */ /* this must be the first byte of the loader! */
__start: __start:
entry sp, 32 # we do not intend to return abi_entry(32) # we do not intend to return
_call0 _start _call0 _start
__start_a0: __start_a0:
.align 4 .align 4
...@@ -62,10 +63,12 @@ _start: ...@@ -62,10 +63,12 @@ _start:
wsr a4, windowstart wsr a4, windowstart
rsync rsync
movi a4, 0x00040000 movi a4, KERNEL_PS_WOE_MASK
wsr a4, ps wsr a4, ps
rsync rsync
KABI_C0 mov abi_saved0, abi_arg0
/* copy the loader to its address /* copy the loader to its address
* Note: The loader itself is a very small piece, so we assume we * Note: The loader itself is a very small piece, so we assume we
* don't partially overlap. We also assume (even more important) * don't partially overlap. We also assume (even more important)
...@@ -168,52 +171,52 @@ _reloc: ...@@ -168,52 +171,52 @@ _reloc:
movi a3, __image_load movi a3, __image_load
sub a4, a3, a4 sub a4, a3, a4
add a8, a0, a4 add abi_arg2, a0, a4
# a1 Stack # a1 Stack
# a8(a4) Load address of the image # a8(a4) Load address of the image
movi a6, _image_start movi abi_arg0, _image_start
movi a10, _image_end movi abi_arg4, _image_end
movi a7, 0x1000000 movi abi_arg1, 0x1000000
sub a11, a10, a6 sub abi_tmp0, abi_arg4, abi_arg0
movi a9, complen movi abi_arg3, complen
s32i a11, a9, 0 s32i abi_tmp0, abi_arg3, 0
movi a0, 0 movi a0, 0
# a6 destination # abi_arg0 destination
# a7 maximum size of destination # abi_arg1 maximum size of destination
# a8 source # abi_arg2 source
# a9 ptr to length # abi_arg3 ptr to length
.extern gunzip .extern gunzip
movi a4, gunzip movi abi_tmp0, gunzip
beqz a4, 1f beqz abi_tmp0, 1f
callx4 a4 abi_callx abi_tmp0
j 2f j 2f
# a6 destination start # abi_arg0 destination start
# a7 maximum size of destination # abi_arg1 maximum size of destination
# a8 source start # abi_arg2 source start
# a9 ptr to length # abi_arg3 ptr to length
# a10 destination end # abi_arg4 destination end
1: 1:
l32i a9, a8, 0 l32i abi_tmp0, abi_arg2, 0
l32i a11, a8, 4 l32i abi_tmp1, abi_arg2, 4
s32i a9, a6, 0 s32i abi_tmp0, abi_arg0, 0
s32i a11, a6, 4 s32i abi_tmp1, abi_arg0, 4
l32i a9, a8, 8 l32i abi_tmp0, abi_arg2, 8
l32i a11, a8, 12 l32i abi_tmp1, abi_arg2, 12
s32i a9, a6, 8 s32i abi_tmp0, abi_arg0, 8
s32i a11, a6, 12 s32i abi_tmp1, abi_arg0, 12
addi a6, a6, 16 addi abi_arg0, abi_arg0, 16
addi a8, a8, 16 addi abi_arg2, abi_arg2, 16
blt a6, a10, 1b blt abi_arg0, abi_arg4, 1b
/* jump to the kernel */ /* jump to the kernel */
...@@ -230,6 +233,7 @@ _reloc: ...@@ -230,6 +233,7 @@ _reloc:
# a2 Boot parameter list # a2 Boot parameter list
KABI_C0 mov abi_arg0, abi_saved0
movi a0, _image_start movi a0, _image_start
jx a0 jx a0
......
...@@ -158,6 +158,7 @@ _user_exception: ...@@ -158,6 +158,7 @@ _user_exception:
/* Rotate ws so that the current windowbase is at bit0. */ /* Rotate ws so that the current windowbase is at bit0. */
/* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */ /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
#if defined(USER_SUPPORT_WINDOWED)
rsr a2, windowbase rsr a2, windowbase
rsr a3, windowstart rsr a3, windowstart
ssr a2 ssr a2
...@@ -167,24 +168,33 @@ _user_exception: ...@@ -167,24 +168,33 @@ _user_exception:
src a2, a3, a2 src a2, a3, a2
srli a2, a2, 32-WSBITS srli a2, a2, 32-WSBITS
s32i a2, a1, PT_WMASK # needed for restoring registers s32i a2, a1, PT_WMASK # needed for restoring registers
#else
movi a2, 0
movi a3, 1
s32i a2, a1, PT_WINDOWBASE
s32i a3, a1, PT_WINDOWSTART
s32i a3, a1, PT_WMASK
#endif
/* Save only live registers. */ /* Save only live registers. */
_bbsi.l a2, 1, 1f UABI_W _bbsi.l a2, 1, 1f
s32i a4, a1, PT_AREG4 s32i a4, a1, PT_AREG4
s32i a5, a1, PT_AREG5 s32i a5, a1, PT_AREG5
s32i a6, a1, PT_AREG6 s32i a6, a1, PT_AREG6
s32i a7, a1, PT_AREG7 s32i a7, a1, PT_AREG7
_bbsi.l a2, 2, 1f UABI_W _bbsi.l a2, 2, 1f
s32i a8, a1, PT_AREG8 s32i a8, a1, PT_AREG8
s32i a9, a1, PT_AREG9 s32i a9, a1, PT_AREG9
s32i a10, a1, PT_AREG10 s32i a10, a1, PT_AREG10
s32i a11, a1, PT_AREG11 s32i a11, a1, PT_AREG11
_bbsi.l a2, 3, 1f UABI_W _bbsi.l a2, 3, 1f
s32i a12, a1, PT_AREG12 s32i a12, a1, PT_AREG12
s32i a13, a1, PT_AREG13 s32i a13, a1, PT_AREG13
s32i a14, a1, PT_AREG14 s32i a14, a1, PT_AREG14
s32i a15, a1, PT_AREG15 s32i a15, a1, PT_AREG15
#if defined(USER_SUPPORT_WINDOWED)
_bnei a2, 1, 1f # only one valid frame? _bnei a2, 1, 1f # only one valid frame?
/* Only one valid frame, skip saving regs. */ /* Only one valid frame, skip saving regs. */
...@@ -239,7 +249,7 @@ _user_exception: ...@@ -239,7 +249,7 @@ _user_exception:
rsync rsync
/* We are back to the original stack pointer (a1) */ /* We are back to the original stack pointer (a1) */
#endif
2: /* Now, jump to the common exception handler. */ 2: /* Now, jump to the common exception handler. */
j common_exception j common_exception
...@@ -295,6 +305,7 @@ _kernel_exception: ...@@ -295,6 +305,7 @@ _kernel_exception:
s32i a3, a1, PT_SAR s32i a3, a1, PT_SAR
s32i a2, a1, PT_ICOUNTLEVEL s32i a2, a1, PT_ICOUNTLEVEL
#if defined(__XTENSA_WINDOWED_ABI__)
/* Rotate ws so that the current windowbase is at bit0. */ /* Rotate ws so that the current windowbase is at bit0. */
/* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */ /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
...@@ -305,27 +316,28 @@ _kernel_exception: ...@@ -305,27 +316,28 @@ _kernel_exception:
src a2, a3, a2 src a2, a3, a2
srli a2, a2, 32-WSBITS srli a2, a2, 32-WSBITS
s32i a2, a1, PT_WMASK # needed for kernel_exception_exit s32i a2, a1, PT_WMASK # needed for kernel_exception_exit
#endif
/* Save only the live window-frame */ /* Save only the live window-frame */
_bbsi.l a2, 1, 1f KABI_W _bbsi.l a2, 1, 1f
s32i a4, a1, PT_AREG4 s32i a4, a1, PT_AREG4
s32i a5, a1, PT_AREG5 s32i a5, a1, PT_AREG5
s32i a6, a1, PT_AREG6 s32i a6, a1, PT_AREG6
s32i a7, a1, PT_AREG7 s32i a7, a1, PT_AREG7
_bbsi.l a2, 2, 1f KABI_W _bbsi.l a2, 2, 1f
s32i a8, a1, PT_AREG8 s32i a8, a1, PT_AREG8
s32i a9, a1, PT_AREG9 s32i a9, a1, PT_AREG9
s32i a10, a1, PT_AREG10 s32i a10, a1, PT_AREG10
s32i a11, a1, PT_AREG11 s32i a11, a1, PT_AREG11
_bbsi.l a2, 3, 1f KABI_W _bbsi.l a2, 3, 1f
s32i a12, a1, PT_AREG12 s32i a12, a1, PT_AREG12
s32i a13, a1, PT_AREG13 s32i a13, a1, PT_AREG13
s32i a14, a1, PT_AREG14 s32i a14, a1, PT_AREG14
s32i a15, a1, PT_AREG15 s32i a15, a1, PT_AREG15
#ifdef __XTENSA_WINDOWED_ABI__
_bnei a2, 1, 1f _bnei a2, 1, 1f
/* Copy spill slots of a0 and a1 to imitate movsp /* Copy spill slots of a0 and a1 to imitate movsp
* in order to keep exception stack continuous * in order to keep exception stack continuous
*/ */
...@@ -333,6 +345,7 @@ _kernel_exception: ...@@ -333,6 +345,7 @@ _kernel_exception:
l32i a0, a1, PT_SIZE + 4 l32i a0, a1, PT_SIZE + 4
s32e a3, a1, -16 s32e a3, a1, -16
s32e a0, a1, -12 s32e a0, a1, -12
#endif
1: 1:
l32i a0, a1, PT_AREG0 # restore saved a0 l32i a0, a1, PT_AREG0 # restore saved a0
wsr a0, depc wsr a0, depc
...@@ -419,16 +432,16 @@ common_exception: ...@@ -419,16 +432,16 @@ common_exception:
movi a3, LOCKLEVEL movi a3, LOCKLEVEL
.Lexception: .Lexception:
movi a0, PS_WOE_MASK KABI_W movi a0, PS_WOE_MASK
or a3, a3, a0 KABI_W or a3, a3, a0
#else #else
addi a2, a2, -EXCCAUSE_LEVEL1_INTERRUPT addi a2, a2, -EXCCAUSE_LEVEL1_INTERRUPT
movi a0, LOCKLEVEL movi a0, LOCKLEVEL
extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
# a3 = PS.INTLEVEL # a3 = PS.INTLEVEL
moveqz a3, a0, a2 # a3 = LOCKLEVEL iff interrupt moveqz a3, a0, a2 # a3 = LOCKLEVEL iff interrupt
movi a2, PS_WOE_MASK KABI_W movi a2, PS_WOE_MASK
or a3, a3, a2 KABI_W or a3, a3, a2
rsr a2, exccause rsr a2, exccause
#endif #endif
...@@ -461,14 +474,14 @@ common_exception: ...@@ -461,14 +474,14 @@ common_exception:
*/ */
rsr a4, excsave1 rsr a4, excsave1
mov a6, a1 # pass stack frame
mov a7, a2 # pass EXCCAUSE
addx4 a4, a2, a4 addx4 a4, a2, a4
l32i a4, a4, EXC_TABLE_DEFAULT # load handler l32i a4, a4, EXC_TABLE_DEFAULT # load handler
mov abi_arg1, a2 # pass EXCCAUSE
mov abi_arg0, a1 # pass stack frame
/* Call the second-level handler */ /* Call the second-level handler */
callx4 a4 abi_callx a4
/* Jump here for exception exit */ /* Jump here for exception exit */
.global common_exception_return .global common_exception_return
...@@ -482,15 +495,15 @@ common_exception_return: ...@@ -482,15 +495,15 @@ common_exception_return:
1: 1:
irq_save a2, a3 irq_save a2, a3
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
call4 trace_hardirqs_off abi_call trace_hardirqs_off
#endif #endif
/* Jump if we are returning from kernel exceptions. */ /* Jump if we are returning from kernel exceptions. */
l32i a3, a1, PT_PS l32i abi_saved1, a1, PT_PS
GET_THREAD_INFO(a2, a1) GET_THREAD_INFO(a2, a1)
l32i a4, a2, TI_FLAGS l32i a4, a2, TI_FLAGS
_bbci.l a3, PS_UM_BIT, 6f _bbci.l abi_saved1, PS_UM_BIT, 6f
/* Specific to a user exception exit: /* Specific to a user exception exit:
* We need to check some flags for signal handling and rescheduling, * We need to check some flags for signal handling and rescheduling,
...@@ -509,20 +522,20 @@ common_exception_return: ...@@ -509,20 +522,20 @@ common_exception_return:
/* Call do_signal() */ /* Call do_signal() */
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
call4 trace_hardirqs_on abi_call trace_hardirqs_on
#endif #endif
rsil a2, 0 rsil a2, 0
mov a6, a1 mov abi_arg0, a1
call4 do_notify_resume # int do_notify_resume(struct pt_regs*) abi_call do_notify_resume # int do_notify_resume(struct pt_regs*)
j 1b j 1b
3: /* Reschedule */ 3: /* Reschedule */
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
call4 trace_hardirqs_on abi_call trace_hardirqs_on
#endif #endif
rsil a2, 0 rsil a2, 0
call4 schedule # void schedule (void) abi_call schedule # void schedule (void)
j 1b j 1b
#ifdef CONFIG_PREEMPTION #ifdef CONFIG_PREEMPTION
...@@ -533,33 +546,33 @@ common_exception_return: ...@@ -533,33 +546,33 @@ common_exception_return:
l32i a4, a2, TI_PRE_COUNT l32i a4, a2, TI_PRE_COUNT
bnez a4, 4f bnez a4, 4f
call4 preempt_schedule_irq abi_call preempt_schedule_irq
j 4f j 4f
#endif #endif
#if XTENSA_FAKE_NMI #if XTENSA_FAKE_NMI
.LNMIexit: .LNMIexit:
l32i a3, a1, PT_PS l32i abi_saved1, a1, PT_PS
_bbci.l a3, PS_UM_BIT, 4f _bbci.l abi_saved1, PS_UM_BIT, 4f
#endif #endif
5: 5:
#ifdef CONFIG_HAVE_HW_BREAKPOINT #ifdef CONFIG_HAVE_HW_BREAKPOINT
_bbci.l a4, TIF_DB_DISABLED, 7f _bbci.l a4, TIF_DB_DISABLED, 7f
call4 restore_dbreak abi_call restore_dbreak
7: 7:
#endif #endif
#ifdef CONFIG_DEBUG_TLB_SANITY #ifdef CONFIG_DEBUG_TLB_SANITY
l32i a4, a1, PT_DEPC l32i a4, a1, PT_DEPC
bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
call4 check_tlb_sanity abi_call check_tlb_sanity
#endif #endif
6: 6:
4: 4:
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
extui a4, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH extui a4, abi_saved1, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
bgei a4, LOCKLEVEL, 1f bgei a4, LOCKLEVEL, 1f
call4 trace_hardirqs_on abi_call trace_hardirqs_on
1: 1:
#endif #endif
/* Restore optional registers. */ /* Restore optional registers. */
...@@ -572,14 +585,15 @@ common_exception_return: ...@@ -572,14 +585,15 @@ common_exception_return:
l32i a2, a1, PT_SCOMPARE1 l32i a2, a1, PT_SCOMPARE1
wsr a2, scompare1 wsr a2, scompare1
#endif #endif
wsr a3, ps /* disable interrupts */ wsr abi_saved1, ps /* disable interrupts */
_bbci.l a3, PS_UM_BIT, kernel_exception_exit _bbci.l abi_saved1, PS_UM_BIT, kernel_exception_exit
user_exception_exit: user_exception_exit:
/* Restore the state of the task and return from the exception. */ /* Restore the state of the task and return from the exception. */
#if defined(USER_SUPPORT_WINDOWED)
/* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */ /* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */
l32i a2, a1, PT_WINDOWBASE l32i a2, a1, PT_WINDOWBASE
...@@ -634,8 +648,10 @@ user_exception_exit: ...@@ -634,8 +648,10 @@ user_exception_exit:
* frame where we had loaded a2), or at least the lower 4 bits * frame where we had loaded a2), or at least the lower 4 bits
* (if we have restored WSBITS-1 frames). * (if we have restored WSBITS-1 frames).
*/ */
2: 2:
#else
movi a2, 1
#endif
#if XCHAL_HAVE_THREADPTR #if XCHAL_HAVE_THREADPTR
l32i a3, a1, PT_THREADPTR l32i a3, a1, PT_THREADPTR
wur a3, threadptr wur a3, threadptr
...@@ -650,6 +666,7 @@ user_exception_exit: ...@@ -650,6 +666,7 @@ user_exception_exit:
kernel_exception_exit: kernel_exception_exit:
#if defined(__XTENSA_WINDOWED_ABI__)
/* Check if we have to do a movsp. /* Check if we have to do a movsp.
* *
* We only have to do a movsp if the previous window-frame has * We only have to do a movsp if the previous window-frame has
...@@ -702,6 +719,9 @@ kernel_exception_exit: ...@@ -702,6 +719,9 @@ kernel_exception_exit:
* *
* Note: We expect a2 to hold PT_WMASK * Note: We expect a2 to hold PT_WMASK
*/ */
#else
movi a2, 1
#endif
common_exception_exit: common_exception_exit:
...@@ -927,7 +947,7 @@ ENTRY(unrecoverable_exception) ...@@ -927,7 +947,7 @@ ENTRY(unrecoverable_exception)
wsr a1, windowbase wsr a1, windowbase
rsync rsync
movi a1, PS_WOE_MASK | LOCKLEVEL movi a1, KERNEL_PS_WOE_MASK | LOCKLEVEL
wsr a1, ps wsr a1, ps
rsync rsync
...@@ -935,8 +955,8 @@ ENTRY(unrecoverable_exception) ...@@ -935,8 +955,8 @@ ENTRY(unrecoverable_exception)
movi a0, 0 movi a0, 0
addi a1, a1, PT_REGS_OFFSET addi a1, a1, PT_REGS_OFFSET
movi a6, unrecoverable_text movi abi_arg0, unrecoverable_text
call4 panic abi_call panic
1: j 1b 1: j 1b
...@@ -1403,12 +1423,12 @@ ENTRY(fast_syscall_spill_registers) ...@@ -1403,12 +1423,12 @@ ENTRY(fast_syscall_spill_registers)
rsr a3, excsave1 rsr a3, excsave1
l32i a1, a3, EXC_TABLE_KSTK l32i a1, a3, EXC_TABLE_KSTK
movi a4, PS_WOE_MASK | LOCKLEVEL movi a4, KERNEL_PS_WOE_MASK | LOCKLEVEL
wsr a4, ps wsr a4, ps
rsync rsync
movi a6, SIGSEGV movi abi_arg0, SIGSEGV
call4 do_exit abi_call do_exit
/* shouldn't return, so panic */ /* shouldn't return, so panic */
...@@ -1887,57 +1907,77 @@ ENDPROC(fast_store_prohibited) ...@@ -1887,57 +1907,77 @@ ENDPROC(fast_store_prohibited)
ENTRY(system_call) ENTRY(system_call)
#if defined(__XTENSA_WINDOWED_ABI__)
abi_entry_default abi_entry_default
#elif defined(__XTENSA_CALL0_ABI__)
abi_entry(12)
s32i a0, sp, 0
s32i abi_saved0, sp, 4
s32i abi_saved1, sp, 8
mov abi_saved0, a2
#else
#error Unsupported Xtensa ABI
#endif
/* regs->syscall = regs->areg[2] */ /* regs->syscall = regs->areg[2] */
l32i a7, a2, PT_AREG2 l32i a7, abi_saved0, PT_AREG2
s32i a7, a2, PT_SYSCALL s32i a7, abi_saved0, PT_SYSCALL
GET_THREAD_INFO(a4, a1) GET_THREAD_INFO(a4, a1)
l32i a3, a4, TI_FLAGS l32i abi_saved1, a4, TI_FLAGS
movi a4, _TIF_WORK_MASK movi a4, _TIF_WORK_MASK
and a3, a3, a4 and abi_saved1, abi_saved1, a4
beqz a3, 1f beqz abi_saved1, 1f
mov a6, a2 mov abi_arg0, abi_saved0
call4 do_syscall_trace_enter abi_call do_syscall_trace_enter
beqz a6, .Lsyscall_exit beqz abi_rv, .Lsyscall_exit
l32i a7, a2, PT_SYSCALL l32i a7, abi_saved0, PT_SYSCALL
1: 1:
/* syscall = sys_call_table[syscall_nr] */ /* syscall = sys_call_table[syscall_nr] */
movi a4, sys_call_table movi a4, sys_call_table
movi a5, __NR_syscalls movi a5, __NR_syscalls
movi a6, -ENOSYS movi abi_rv, -ENOSYS
bgeu a7, a5, 1f bgeu a7, a5, 1f
addx4 a4, a7, a4 addx4 a4, a7, a4
l32i a4, a4, 0 l32i abi_tmp0, a4, 0
/* Load args: arg0 - arg5 are passed via regs. */ /* Load args: arg0 - arg5 are passed via regs. */
l32i a6, a2, PT_AREG6 l32i abi_arg0, abi_saved0, PT_AREG6
l32i a7, a2, PT_AREG3 l32i abi_arg1, abi_saved0, PT_AREG3
l32i a8, a2, PT_AREG4 l32i abi_arg2, abi_saved0, PT_AREG4
l32i a9, a2, PT_AREG5 l32i abi_arg3, abi_saved0, PT_AREG5
l32i a10, a2, PT_AREG8 l32i abi_arg4, abi_saved0, PT_AREG8
l32i a11, a2, PT_AREG9 l32i abi_arg5, abi_saved0, PT_AREG9
callx4 a4 abi_callx abi_tmp0
1: /* regs->areg[2] = return_value */ 1: /* regs->areg[2] = return_value */
s32i a6, a2, PT_AREG2 s32i abi_rv, abi_saved0, PT_AREG2
bnez a3, 1f bnez abi_saved1, 1f
.Lsyscall_exit: .Lsyscall_exit:
#if defined(__XTENSA_WINDOWED_ABI__)
abi_ret_default abi_ret_default
#elif defined(__XTENSA_CALL0_ABI__)
l32i a0, sp, 0
l32i abi_saved0, sp, 4
l32i abi_saved1, sp, 8
abi_ret(12)
#else
#error Unsupported Xtensa ABI
#endif
1: 1:
mov a6, a2 mov abi_arg0, abi_saved0
call4 do_syscall_trace_leave abi_call do_syscall_trace_leave
abi_ret_default j .Lsyscall_exit
ENDPROC(system_call) ENDPROC(system_call)
...@@ -1988,8 +2028,18 @@ ENDPROC(system_call) ...@@ -1988,8 +2028,18 @@ ENDPROC(system_call)
ENTRY(_switch_to) ENTRY(_switch_to)
#if defined(__XTENSA_WINDOWED_ABI__)
abi_entry(XTENSA_SPILL_STACK_RESERVE) abi_entry(XTENSA_SPILL_STACK_RESERVE)
#elif defined(__XTENSA_CALL0_ABI__)
abi_entry(16)
s32i a12, sp, 0
s32i a13, sp, 4
s32i a14, sp, 8
s32i a15, sp, 12
#else
#error Unsupported Xtensa ABI
#endif
mov a11, a3 # and 'next' (a3) mov a11, a3 # and 'next' (a3)
l32i a4, a2, TASK_THREAD_INFO l32i a4, a2, TASK_THREAD_INFO
...@@ -2033,7 +2083,9 @@ ENTRY(_switch_to) ...@@ -2033,7 +2083,9 @@ ENTRY(_switch_to)
/* Flush register file. */ /* Flush register file. */
#if defined(__XTENSA_WINDOWED_ABI__)
spill_registers_kernel spill_registers_kernel
#endif
/* Set kernel stack (and leave critical section) /* Set kernel stack (and leave critical section)
* Note: It's save to set it here. The stack will not be overwritten * Note: It's save to set it here. The stack will not be overwritten
...@@ -2055,34 +2107,43 @@ ENTRY(_switch_to) ...@@ -2055,34 +2107,43 @@ ENTRY(_switch_to)
wsr a14, ps wsr a14, ps
rsync rsync
#if defined(__XTENSA_WINDOWED_ABI__)
abi_ret(XTENSA_SPILL_STACK_RESERVE) abi_ret(XTENSA_SPILL_STACK_RESERVE)
#elif defined(__XTENSA_CALL0_ABI__)
l32i a12, sp, 0
l32i a13, sp, 4
l32i a14, sp, 8
l32i a15, sp, 12
abi_ret(16)
#else
#error Unsupported Xtensa ABI
#endif
ENDPROC(_switch_to) ENDPROC(_switch_to)
ENTRY(ret_from_fork) ENTRY(ret_from_fork)
/* void schedule_tail (struct task_struct *prev) /* void schedule_tail (struct task_struct *prev)
* Note: prev is still in a6 (return value from fake call4 frame) * Note: prev is still in abi_arg0 (return value from fake call frame)
*/ */
call4 schedule_tail abi_call schedule_tail
mov a6, a1
call4 do_syscall_trace_leave
mov abi_arg0, a1
abi_call do_syscall_trace_leave
j common_exception_return j common_exception_return
ENDPROC(ret_from_fork) ENDPROC(ret_from_fork)
/* /*
* Kernel thread creation helper * Kernel thread creation helper
* On entry, set up by copy_thread: a2 = thread_fn, a3 = thread_fn arg * On entry, set up by copy_thread: abi_saved0 = thread_fn,
* left from _switch_to: a6 = prev * abi_saved1 = thread_fn arg. Left from _switch_to: abi_arg0 = prev
*/ */
ENTRY(ret_from_kernel_thread) ENTRY(ret_from_kernel_thread)
call4 schedule_tail abi_call schedule_tail
mov a6, a3 mov abi_arg0, abi_saved1
callx4 a2 abi_callx abi_saved0
j common_exception_return j common_exception_return
ENDPROC(ret_from_kernel_thread) ENDPROC(ret_from_kernel_thread)
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
* Kevin Chea * Kevin Chea
*/ */
#include <asm/asmmacro.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/cacheasm.h> #include <asm/cacheasm.h>
...@@ -193,9 +194,10 @@ ENTRY(_startup) ...@@ -193,9 +194,10 @@ ENTRY(_startup)
movi a1, start_info movi a1, start_info
l32i a1, a1, 0 l32i a1, a1, 0
movi a2, PS_WOE_MASK | LOCKLEVEL /* Disable interrupts. */
# WOE=1, INTLEVEL=LOCKLEVEL, UM=0 /* Enable window exceptions if kernel is built with windowed ABI. */
wsr a2, ps # (enable reg-windows; progmode stack) movi a2, KERNEL_PS_WOE_MASK | LOCKLEVEL
wsr a2, ps
rsync rsync
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -267,13 +269,13 @@ ENTRY(_startup) ...@@ -267,13 +269,13 @@ ENTRY(_startup)
l32i a1, a1, 0 l32i a1, a1, 0
#endif #endif
movi a6, 0 movi abi_arg0, 0
xsr a6, excsave1 xsr abi_arg0, excsave1
/* init_arch kick-starts the linux kernel */ /* init_arch kick-starts the linux kernel */
call4 init_arch abi_call init_arch
call4 start_kernel abi_call start_kernel
should_never_return: should_never_return:
j should_never_return j should_never_return
...@@ -297,10 +299,10 @@ should_never_return: ...@@ -297,10 +299,10 @@ should_never_return:
s32i a3, a2, 0 s32i a3, a2, 0
memw memw
movi a6, 0 movi abi_arg0, 0
wsr a6, excsave1 wsr abi_arg0, excsave1
call4 secondary_start_kernel abi_call secondary_start_kernel
j should_never_return j should_never_return
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
......
...@@ -17,11 +17,16 @@ ...@@ -17,11 +17,16 @@
/* /*
* Entry condition: * Entry condition:
* *
* a2: a0 of the caller * a2: a0 of the caller in windowed ABI
* a10: a0 of the caller in call0 ABI
*
* In call0 ABI the function _mcount is called with the special ABI:
* its argument is in a10 and all the usual argument registers (a2 - a7)
* must be preserved in addition to callee-saved a12 - a15.
*/ */
ENTRY(_mcount) ENTRY(_mcount)
#if defined(__XTENSA_WINDOWED_ABI__)
abi_entry_default abi_entry_default
movi a4, ftrace_trace_function movi a4, ftrace_trace_function
...@@ -42,7 +47,36 @@ ENTRY(_mcount) ...@@ -42,7 +47,36 @@ ENTRY(_mcount)
callx4 a4 callx4 a4
abi_ret_default abi_ret_default
#elif defined(__XTENSA_CALL0_ABI__)
abi_entry_default
movi a9, ftrace_trace_function
l32i a9, a9, 0
movi a11, ftrace_stub
bne a9, a11, 1f
abi_ret_default
1: abi_entry(28)
s32i a0, sp, 0
s32i a2, sp, 4
s32i a3, sp, 8
s32i a4, sp, 12
s32i a5, sp, 16
s32i a6, sp, 20
s32i a7, sp, 24
addi a2, a10, -MCOUNT_INSN_SIZE
callx0 a9
l32i a0, sp, 0
l32i a2, sp, 4
l32i a3, sp, 8
l32i a4, sp, 12
l32i a5, sp, 16
l32i a6, sp, 20
l32i a7, sp, 24
abi_ret(28)
#else
#error Unsupported Xtensa ABI
#endif
ENDPROC(_mcount) ENDPROC(_mcount)
ENTRY(ftrace_stub) ENTRY(ftrace_stub)
......
...@@ -211,11 +211,18 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn, ...@@ -211,11 +211,18 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
struct thread_info *ti; struct thread_info *ti;
#endif #endif
#if defined(__XTENSA_WINDOWED_ABI__)
/* Create a call4 dummy-frame: a0 = 0, a1 = childregs. */ /* Create a call4 dummy-frame: a0 = 0, a1 = childregs. */
SPILL_SLOT(childregs, 1) = (unsigned long)childregs; SPILL_SLOT(childregs, 1) = (unsigned long)childregs;
SPILL_SLOT(childregs, 0) = 0; SPILL_SLOT(childregs, 0) = 0;
p->thread.sp = (unsigned long)childregs; p->thread.sp = (unsigned long)childregs;
#elif defined(__XTENSA_CALL0_ABI__)
/* Reserve 16 bytes for the _switch_to stack frame. */
p->thread.sp = (unsigned long)childregs - 16;
#else
#error Unsupported Xtensa ABI
#endif
if (!(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { if (!(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
struct pt_regs *regs = current_pt_regs(); struct pt_regs *regs = current_pt_regs();
...@@ -272,11 +279,25 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn, ...@@ -272,11 +279,25 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
p->thread.ra = MAKE_RA_FOR_CALL( p->thread.ra = MAKE_RA_FOR_CALL(
(unsigned long)ret_from_kernel_thread, 1); (unsigned long)ret_from_kernel_thread, 1);
/* pass parameters to ret_from_kernel_thread: /* pass parameters to ret_from_kernel_thread: */
* a2 = thread_fn, a3 = thread_fn arg #if defined(__XTENSA_WINDOWED_ABI__)
/*
* a2 = thread_fn, a3 = thread_fn arg.
* Window underflow will load registers from the
* spill slots on the stack on return from _switch_to.
*/ */
SPILL_SLOT(childregs, 3) = thread_fn_arg;
SPILL_SLOT(childregs, 2) = usp_thread_fn; SPILL_SLOT(childregs, 2) = usp_thread_fn;
SPILL_SLOT(childregs, 3) = thread_fn_arg;
#elif defined(__XTENSA_CALL0_ABI__)
/*
* a12 = thread_fn, a13 = thread_fn arg.
* _switch_to epilogue will load registers from the stack.
*/
((unsigned long *)p->thread.sp)[0] = usp_thread_fn;
((unsigned long *)p->thread.sp)[1] = thread_fn_arg;
#else
#error Unsupported Xtensa ABI
#endif
/* Childregs are only used when we're going to userspace /* Childregs are only used when we're going to userspace
* in which case start_thread will set them up. * in which case start_thread will set them up.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment