Commit d1538c46 authored by Chris Zankel's avatar Chris Zankel

xtensa: provide proper assembler function boundaries with ENDPROC()

Use ENDPROC() to mark the end of assembler functions.
Signed-off-by: default avatarChris Zankel <chris@zankel.net>
parent c0226e34
...@@ -450,6 +450,7 @@ ENTRY(fast_unaligned) ...@@ -450,6 +450,7 @@ ENTRY(fast_unaligned)
1: movi a0, _user_exception 1: movi a0, _user_exception
jx a0 jx a0
ENDPROC(fast_unaligned)
#endif /* XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION */ #endif /* XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION */
...@@ -43,10 +43,13 @@ ...@@ -43,10 +43,13 @@
/* IO protection is currently unsupported. */ /* IO protection is currently unsupported. */
ENTRY(fast_io_protect) ENTRY(fast_io_protect)
wsr a0, excsave1 wsr a0, excsave1
movi a0, unrecoverable_exception movi a0, unrecoverable_exception
callx0 a0 callx0 a0
ENDPROC(fast_io_protect)
#if XTENSA_HAVE_COPROCESSORS #if XTENSA_HAVE_COPROCESSORS
/* /*
...@@ -139,6 +142,7 @@ ENTRY(fast_io_protect) ...@@ -139,6 +142,7 @@ ENTRY(fast_io_protect)
*/ */
ENTRY(coprocessor_save) ENTRY(coprocessor_save)
entry a1, 32 entry a1, 32
s32i a0, a1, 0 s32i a0, a1, 0
movi a0, .Lsave_cp_regs_jump_table movi a0, .Lsave_cp_regs_jump_table
...@@ -150,7 +154,10 @@ ENTRY(coprocessor_save) ...@@ -150,7 +154,10 @@ ENTRY(coprocessor_save)
1: l32i a0, a1, 0 1: l32i a0, a1, 0
retw retw
ENDPROC(coprocessor_save)
ENTRY(coprocessor_load) ENTRY(coprocessor_load)
entry a1, 32 entry a1, 32
s32i a0, a1, 0 s32i a0, a1, 0
movi a0, .Lload_cp_regs_jump_table movi a0, .Lload_cp_regs_jump_table
...@@ -162,6 +169,8 @@ ENTRY(coprocessor_load) ...@@ -162,6 +169,8 @@ ENTRY(coprocessor_load)
1: l32i a0, a1, 0 1: l32i a0, a1, 0
retw retw
ENDPROC(coprocessor_load)
/* /*
* coprocessor_flush(struct task_info*, index) * coprocessor_flush(struct task_info*, index)
* a2 a3 * a2 a3
...@@ -178,6 +187,7 @@ ENTRY(coprocessor_load) ...@@ -178,6 +187,7 @@ ENTRY(coprocessor_load)
ENTRY(coprocessor_flush) ENTRY(coprocessor_flush)
entry a1, 32 entry a1, 32
s32i a0, a1, 0 s32i a0, a1, 0
movi a0, .Lsave_cp_regs_jump_table movi a0, .Lsave_cp_regs_jump_table
...@@ -191,6 +201,8 @@ ENTRY(coprocessor_flush) ...@@ -191,6 +201,8 @@ ENTRY(coprocessor_flush)
1: l32i a0, a1, 0 1: l32i a0, a1, 0
retw retw
ENDPROC(coprocessor_flush)
ENTRY(coprocessor_restore) ENTRY(coprocessor_restore)
entry a1, 32 entry a1, 32
s32i a0, a1, 0 s32i a0, a1, 0
...@@ -205,6 +217,8 @@ ENTRY(coprocessor_restore) ...@@ -205,6 +217,8 @@ ENTRY(coprocessor_restore)
1: l32i a0, a1, 0 1: l32i a0, a1, 0
retw retw
ENDPROC(coprocessor_restore)
/* /*
* Entry condition: * Entry condition:
* *
...@@ -220,10 +234,12 @@ ENTRY(coprocessor_restore) ...@@ -220,10 +234,12 @@ ENTRY(coprocessor_restore)
*/ */
ENTRY(fast_coprocessor_double) ENTRY(fast_coprocessor_double)
wsr a0, excsave1 wsr a0, excsave1
movi a0, unrecoverable_exception movi a0, unrecoverable_exception
callx0 a0 callx0 a0
ENDPROC(fast_coprocessor_double)
ENTRY(fast_coprocessor) ENTRY(fast_coprocessor)
...@@ -327,9 +343,15 @@ ENTRY(fast_coprocessor) ...@@ -327,9 +343,15 @@ ENTRY(fast_coprocessor)
rfe rfe
ENDPROC(fast_coprocessor)
.data .data
ENTRY(coprocessor_owner) ENTRY(coprocessor_owner)
.fill XCHAL_CP_MAX, 4, 0 .fill XCHAL_CP_MAX, 4, 0
END(coprocessor_owner)
#endif /* XTENSA_HAVE_COPROCESSORS */ #endif /* XTENSA_HAVE_COPROCESSORS */
...@@ -219,6 +219,7 @@ _user_exception: ...@@ -219,6 +219,7 @@ _user_exception:
j common_exception j common_exception
ENDPROC(user_exception)
/* /*
* First-level exit handler for kernel exceptions * First-level exit handler for kernel exceptions
...@@ -641,6 +642,8 @@ common_exception_exit: ...@@ -641,6 +642,8 @@ common_exception_exit:
l32i a1, a1, PT_AREG1 l32i a1, a1, PT_AREG1
rfde rfde
ENDPROC(kernel_exception)
/* /*
* Debug exception handler. * Debug exception handler.
* *
...@@ -701,6 +704,7 @@ ENTRY(debug_exception) ...@@ -701,6 +704,7 @@ ENTRY(debug_exception)
/* Debug exception while in exception mode. */ /* Debug exception while in exception mode. */
1: j 1b // FIXME!! 1: j 1b // FIXME!!
ENDPROC(debug_exception)
/* /*
* We get here in case of an unrecoverable exception. * We get here in case of an unrecoverable exception.
...@@ -751,6 +755,7 @@ ENTRY(unrecoverable_exception) ...@@ -751,6 +755,7 @@ ENTRY(unrecoverable_exception)
1: j 1b 1: j 1b
ENDPROC(unrecoverable_exception)
/* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */ /* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */
...@@ -929,6 +934,7 @@ ENTRY(fast_alloca) ...@@ -929,6 +934,7 @@ ENTRY(fast_alloca)
l32i a2, a2, PT_AREG2 l32i a2, a2, PT_AREG2
rfe rfe
ENDPROC(fast_alloca)
/* /*
* fast system calls. * fast system calls.
...@@ -966,6 +972,8 @@ ENTRY(fast_syscall_kernel) ...@@ -966,6 +972,8 @@ ENTRY(fast_syscall_kernel)
j kernel_exception j kernel_exception
ENDPROC(fast_syscall_kernel)
ENTRY(fast_syscall_user) ENTRY(fast_syscall_user)
/* Skip syscall. */ /* Skip syscall. */
...@@ -983,6 +991,8 @@ ENTRY(fast_syscall_user) ...@@ -983,6 +991,8 @@ ENTRY(fast_syscall_user)
j user_exception j user_exception
ENDPROC(fast_syscall_user)
ENTRY(fast_syscall_unrecoverable) ENTRY(fast_syscall_unrecoverable)
/* Restore all states. */ /* Restore all states. */
...@@ -995,7 +1005,7 @@ ENTRY(fast_syscall_unrecoverable) ...@@ -995,7 +1005,7 @@ ENTRY(fast_syscall_unrecoverable)
movi a0, unrecoverable_exception movi a0, unrecoverable_exception
callx0 a0 callx0 a0
ENDPROC(fast_syscall_unrecoverable)
/* /*
* sysxtensa syscall handler * sysxtensa syscall handler
...@@ -1101,7 +1111,7 @@ CATCH ...@@ -1101,7 +1111,7 @@ CATCH
movi a2, -EINVAL movi a2, -EINVAL
rfe rfe
ENDPROC(fast_syscall_xtensa)
/* fast_syscall_spill_registers. /* fast_syscall_spill_registers.
...@@ -1160,6 +1170,8 @@ ENTRY(fast_syscall_spill_registers) ...@@ -1160,6 +1170,8 @@ ENTRY(fast_syscall_spill_registers)
movi a2, 0 movi a2, 0
rfe rfe
ENDPROC(fast_syscall_spill_registers)
/* Fixup handler. /* Fixup handler.
* *
* We get here if the spill routine causes an exception, e.g. tlb miss. * We get here if the spill routine causes an exception, e.g. tlb miss.
...@@ -1464,6 +1476,8 @@ ENTRY(_spill_registers) ...@@ -1464,6 +1476,8 @@ ENTRY(_spill_registers)
callx0 a0 # should not return callx0 a0 # should not return
1: j 1b 1: j 1b
ENDPROC(_spill_registers)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
/* /*
* We should never get here. Bail out! * We should never get here. Bail out!
...@@ -1475,6 +1489,8 @@ ENTRY(fast_second_level_miss_double_kernel) ...@@ -1475,6 +1489,8 @@ ENTRY(fast_second_level_miss_double_kernel)
callx0 a0 # should not return callx0 a0 # should not return
1: j 1b 1: j 1b
ENDPROC(fast_second_level_miss_double_kernel)
/* First-level entry handler for user, kernel, and double 2nd-level /* First-level entry handler for user, kernel, and double 2nd-level
* TLB miss exceptions. Note that for now, user and kernel miss * TLB miss exceptions. Note that for now, user and kernel miss
* exceptions share the same entry point and are handled identically. * exceptions share the same entry point and are handled identically.
...@@ -1682,6 +1698,7 @@ ENTRY(fast_second_level_miss) ...@@ -1682,6 +1698,7 @@ ENTRY(fast_second_level_miss)
j _kernel_exception j _kernel_exception
1: j _user_exception 1: j _user_exception
ENDPROC(fast_second_level_miss)
/* /*
* StoreProhibitedException * StoreProhibitedException
...@@ -1777,6 +1794,9 @@ ENTRY(fast_store_prohibited) ...@@ -1777,6 +1794,9 @@ ENTRY(fast_store_prohibited)
bbsi.l a2, PS_UM_BIT, 1f bbsi.l a2, PS_UM_BIT, 1f
j _kernel_exception j _kernel_exception
1: j _user_exception 1: j _user_exception
ENDPROC(fast_store_prohibited)
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
/* /*
...@@ -1787,6 +1807,7 @@ ENTRY(fast_store_prohibited) ...@@ -1787,6 +1807,7 @@ ENTRY(fast_store_prohibited)
*/ */
ENTRY(system_call) ENTRY(system_call)
entry a1, 32 entry a1, 32
/* regs->syscall = regs->areg[2] */ /* regs->syscall = regs->areg[2] */
...@@ -1831,6 +1852,8 @@ ENTRY(system_call) ...@@ -1831,6 +1852,8 @@ ENTRY(system_call)
callx4 a4 callx4 a4
retw retw
ENDPROC(system_call)
/* /*
* Task switch. * Task switch.
...@@ -1899,6 +1922,7 @@ ENTRY(_switch_to) ...@@ -1899,6 +1922,7 @@ ENTRY(_switch_to)
retw retw
ENDPROC(_switch_to)
ENTRY(ret_from_fork) ENTRY(ret_from_fork)
...@@ -1914,6 +1938,8 @@ ENTRY(ret_from_fork) ...@@ -1914,6 +1938,8 @@ ENTRY(ret_from_fork)
j common_exception_return j common_exception_return
ENDPROC(ret_from_fork)
/* /*
* Kernel thread creation helper * Kernel thread creation helper
* On entry, set up by copy_thread: a2 = thread_fn, a3 = thread_fn arg * On entry, set up by copy_thread: a2 = thread_fn, a3 = thread_fn arg
......
...@@ -47,16 +47,19 @@ ...@@ -47,16 +47,19 @@
*/ */
__HEAD __HEAD
.globl _start ENTRY(_start)
_start: _j 2f
_j 2f
.align 4 .align 4
1: .word _startup 1: .word _startup
2: l32r a0, 1b 2: l32r a0, 1b
jx a0 jx a0
ENDPROC(_start)
.section .init.text, "ax" .section .init.text, "ax"
.align 4
_startup: ENTRY(_startup)
/* Disable interrupts and exceptions. */ /* Disable interrupts and exceptions. */
...@@ -230,6 +233,7 @@ _startup: ...@@ -230,6 +233,7 @@ _startup:
should_never_return: should_never_return:
j should_never_return j should_never_return
ENDPROC(_startup)
/* /*
* BSS section * BSS section
...@@ -239,6 +243,8 @@ __PAGE_ALIGNED_BSS ...@@ -239,6 +243,8 @@ __PAGE_ALIGNED_BSS
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
ENTRY(swapper_pg_dir) ENTRY(swapper_pg_dir)
.fill PAGE_SIZE, 1, 0 .fill PAGE_SIZE, 1, 0
END(swapper_pg_dir)
#endif #endif
ENTRY(empty_zero_page) ENTRY(empty_zero_page)
.fill PAGE_SIZE, 1, 0 .fill PAGE_SIZE, 1, 0
END(empty_zero_page)
...@@ -79,6 +79,8 @@ ENTRY(_UserExceptionVector) ...@@ -79,6 +79,8 @@ ENTRY(_UserExceptionVector)
l32i a0, a0, EXC_TABLE_FAST_USER # load handler l32i a0, a0, EXC_TABLE_FAST_USER # load handler
jx a0 jx a0
ENDPROC(_UserExceptionVector)
/* /*
* Kernel exception vector. (Exceptions with PS.UM == 0, PS.EXCM == 0) * Kernel exception vector. (Exceptions with PS.UM == 0, PS.EXCM == 0)
* *
...@@ -103,6 +105,7 @@ ENTRY(_KernelExceptionVector) ...@@ -103,6 +105,7 @@ ENTRY(_KernelExceptionVector)
l32i a0, a0, EXC_TABLE_FAST_KERNEL # load handler address l32i a0, a0, EXC_TABLE_FAST_KERNEL # load handler address
jx a0 jx a0
ENDPROC(_KernelExceptionVector)
/* /*
* Double exception vector (Exceptions with PS.EXCM == 1) * Double exception vector (Exceptions with PS.EXCM == 1)
...@@ -344,6 +347,7 @@ ENTRY(_DoubleExceptionVector) ...@@ -344,6 +347,7 @@ ENTRY(_DoubleExceptionVector)
.end literal_prefix .end literal_prefix
ENDPROC(_DoubleExceptionVector)
/* /*
* Debug interrupt vector * Debug interrupt vector
...@@ -355,9 +359,11 @@ ENTRY(_DoubleExceptionVector) ...@@ -355,9 +359,11 @@ ENTRY(_DoubleExceptionVector)
.section .DebugInterruptVector.text, "ax" .section .DebugInterruptVector.text, "ax"
ENTRY(_DebugInterruptVector) ENTRY(_DebugInterruptVector)
xsr a0, SREG_EXCSAVE + XCHAL_DEBUGLEVEL xsr a0, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
jx a0 jx a0
ENDPROC(_DebugInterruptVector)
/* Window overflow and underflow handlers. /* Window overflow and underflow handlers.
...@@ -369,38 +375,43 @@ ENTRY(_DebugInterruptVector) ...@@ -369,38 +375,43 @@ ENTRY(_DebugInterruptVector)
* we try to access any page that would cause a page fault early. * we try to access any page that would cause a page fault early.
*/ */
#define ENTRY_ALIGN64(name) \
.globl name; \
.align 64; \
name:
.section .WindowVectors.text, "ax" .section .WindowVectors.text, "ax"
/* 4-Register Window Overflow Vector (Handler) */ /* 4-Register Window Overflow Vector (Handler) */
.align 64 ENTRY_ALIGN64(_WindowOverflow4)
.global _WindowOverflow4
_WindowOverflow4:
s32e a0, a5, -16 s32e a0, a5, -16
s32e a1, a5, -12 s32e a1, a5, -12
s32e a2, a5, -8 s32e a2, a5, -8
s32e a3, a5, -4 s32e a3, a5, -4
rfwo rfwo
ENDPROC(_WindowOverflow4)
/* 4-Register Window Underflow Vector (Handler) */ /* 4-Register Window Underflow Vector (Handler) */
.align 64 ENTRY_ALIGN64(_WindowUnderflow4)
.global _WindowUnderflow4
_WindowUnderflow4:
l32e a0, a5, -16 l32e a0, a5, -16
l32e a1, a5, -12 l32e a1, a5, -12
l32e a2, a5, -8 l32e a2, a5, -8
l32e a3, a5, -4 l32e a3, a5, -4
rfwu rfwu
ENDPROC(_WindowUnderflow4)
/* 8-Register Window Overflow Vector (Handler) */ /* 8-Register Window Overflow Vector (Handler) */
.align 64 ENTRY_ALIGN64(_WindowOverflow8)
.global _WindowOverflow8
_WindowOverflow8:
s32e a0, a9, -16 s32e a0, a9, -16
l32e a0, a1, -12 l32e a0, a1, -12
s32e a2, a9, -8 s32e a2, a9, -8
...@@ -412,11 +423,12 @@ _WindowOverflow8: ...@@ -412,11 +423,12 @@ _WindowOverflow8:
s32e a7, a0, -20 s32e a7, a0, -20
rfwo rfwo
ENDPROC(_WindowOverflow8)
/* 8-Register Window Underflow Vector (Handler) */ /* 8-Register Window Underflow Vector (Handler) */
.align 64 ENTRY_ALIGN64(_WindowUnderflow8)
.global _WindowUnderflow8
_WindowUnderflow8:
l32e a1, a9, -12 l32e a1, a9, -12
l32e a0, a9, -16 l32e a0, a9, -16
l32e a7, a1, -12 l32e a7, a1, -12
...@@ -428,12 +440,12 @@ _WindowUnderflow8: ...@@ -428,12 +440,12 @@ _WindowUnderflow8:
l32e a7, a7, -20 l32e a7, a7, -20
rfwu rfwu
ENDPROC(_WindowUnderflow8)
/* 12-Register Window Overflow Vector (Handler) */ /* 12-Register Window Overflow Vector (Handler) */
.align 64 ENTRY_ALIGN64(_WindowOverflow12)
.global _WindowOverflow12
_WindowOverflow12:
s32e a0, a13, -16 s32e a0, a13, -16
l32e a0, a1, -12 l32e a0, a1, -12
s32e a1, a13, -12 s32e a1, a13, -12
...@@ -449,11 +461,12 @@ _WindowOverflow12: ...@@ -449,11 +461,12 @@ _WindowOverflow12:
s32e a11, a0, -20 s32e a11, a0, -20
rfwo rfwo
ENDPROC(_WindowOverflow12)
/* 12-Register Window Underflow Vector (Handler) */ /* 12-Register Window Underflow Vector (Handler) */
.align 64 ENTRY_ALIGN64(_WindowUnderflow12)
.global _WindowUnderflow12
_WindowUnderflow12:
l32e a1, a13, -12 l32e a1, a13, -12
l32e a0, a13, -16 l32e a0, a13, -16
l32e a11, a1, -12 l32e a11, a1, -12
...@@ -469,6 +482,8 @@ _WindowUnderflow12: ...@@ -469,6 +482,8 @@ _WindowUnderflow12:
l32e a11, a11, -20 l32e a11, a11, -20
rfwu rfwu
ENDPROC(_WindowUnderflow12)
.text .text
...@@ -170,7 +170,7 @@ ENTRY(csum_partial) ...@@ -170,7 +170,7 @@ ENTRY(csum_partial)
3: 3:
j 5b /* branch to handle the remaining byte */ j 5b /* branch to handle the remaining byte */
ENDPROC(csum_partial)
/* /*
* Copy from ds while checksumming, otherwise like csum_partial * Copy from ds while checksumming, otherwise like csum_partial
...@@ -211,6 +211,7 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst, int len, ...@@ -211,6 +211,7 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst, int len,
*/ */
ENTRY(csum_partial_copy_generic) ENTRY(csum_partial_copy_generic)
entry sp, 32 entry sp, 32
mov a12, a3 mov a12, a3
mov a11, a4 mov a11, a4
...@@ -367,6 +368,8 @@ DST( s8i a8, a3, 1 ) ...@@ -367,6 +368,8 @@ DST( s8i a8, a3, 1 )
6: 6:
j 4b /* process the possible trailing odd byte */ j 4b /* process the possible trailing odd byte */
ENDPROC(csum_partial_copy_generic)
# Exception handler: # Exception handler:
.section .fixup, "ax" .section .fixup, "ax"
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
*/ */
ENTRY(clear_page) ENTRY(clear_page)
entry a1, 16 entry a1, 16
movi a3, 0 movi a3, 0
...@@ -45,6 +46,8 @@ ENTRY(clear_page) ...@@ -45,6 +46,8 @@ ENTRY(clear_page)
retw retw
ENDPROC(clear_page)
/* /*
* copy_page and copy_user_page are the same for non-cache-aliased configs. * copy_page and copy_user_page are the same for non-cache-aliased configs.
* *
...@@ -53,6 +56,7 @@ ENTRY(clear_page) ...@@ -53,6 +56,7 @@ ENTRY(clear_page)
*/ */
ENTRY(copy_page) ENTRY(copy_page)
entry a1, 16 entry a1, 16
__loopi a2, a4, PAGE_SIZE, 32 __loopi a2, a4, PAGE_SIZE, 32
...@@ -84,6 +88,8 @@ ENTRY(copy_page) ...@@ -84,6 +88,8 @@ ENTRY(copy_page)
retw retw
ENDPROC(copy_page)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
/* /*
* If we have to deal with cache aliasing, we use temporary memory mappings * If we have to deal with cache aliasing, we use temporary memory mappings
...@@ -109,6 +115,7 @@ ENTRY(__tlbtemp_mapping_start) ...@@ -109,6 +115,7 @@ ENTRY(__tlbtemp_mapping_start)
*/ */
ENTRY(clear_user_page) ENTRY(clear_user_page)
entry a1, 32 entry a1, 32
/* Mark page dirty and determine alias. */ /* Mark page dirty and determine alias. */
...@@ -164,6 +171,8 @@ ENTRY(clear_user_page) ...@@ -164,6 +171,8 @@ ENTRY(clear_user_page)
retw retw
ENDPROC(clear_user_page)
/* /*
* copy_page_user (void *to, void *from, unsigned long vaddr, struct page *page) * copy_page_user (void *to, void *from, unsigned long vaddr, struct page *page)
* a2 a3 a4 a5 * a2 a3 a4 a5
...@@ -262,6 +271,8 @@ ENTRY(copy_user_page) ...@@ -262,6 +271,8 @@ ENTRY(copy_user_page)
retw retw
ENDPROC(copy_user_page)
#endif #endif
#if (DCACHE_WAY_SIZE > PAGE_SIZE) #if (DCACHE_WAY_SIZE > PAGE_SIZE)
...@@ -272,6 +283,7 @@ ENTRY(copy_user_page) ...@@ -272,6 +283,7 @@ ENTRY(copy_user_page)
*/ */
ENTRY(__flush_invalidate_dcache_page_alias) ENTRY(__flush_invalidate_dcache_page_alias)
entry sp, 16 entry sp, 16
movi a7, 0 # required for exception handler movi a7, 0 # required for exception handler
...@@ -287,6 +299,7 @@ ENTRY(__flush_invalidate_dcache_page_alias) ...@@ -287,6 +299,7 @@ ENTRY(__flush_invalidate_dcache_page_alias)
retw retw
ENDPROC(__flush_invalidate_dcache_page_alias)
#endif #endif
ENTRY(__tlbtemp_mapping_itlb) ENTRY(__tlbtemp_mapping_itlb)
...@@ -294,6 +307,7 @@ ENTRY(__tlbtemp_mapping_itlb) ...@@ -294,6 +307,7 @@ ENTRY(__tlbtemp_mapping_itlb)
#if (ICACHE_WAY_SIZE > PAGE_SIZE) #if (ICACHE_WAY_SIZE > PAGE_SIZE)
ENTRY(__invalidate_icache_page_alias) ENTRY(__invalidate_icache_page_alias)
entry sp, 16 entry sp, 16
addi a6, a3, (PAGE_KERNEL_EXEC | _PAGE_HW_WRITE) addi a6, a3, (PAGE_KERNEL_EXEC | _PAGE_HW_WRITE)
...@@ -307,11 +321,14 @@ ENTRY(__invalidate_icache_page_alias) ...@@ -307,11 +321,14 @@ ENTRY(__invalidate_icache_page_alias)
isync isync
retw retw
ENDPROC(__invalidate_icache_page_alias)
#endif #endif
/* End of special treatment in tlb miss exception */ /* End of special treatment in tlb miss exception */
ENTRY(__tlbtemp_mapping_end) ENTRY(__tlbtemp_mapping_end)
#endif /* CONFIG_MMU #endif /* CONFIG_MMU
/* /*
...@@ -319,6 +336,7 @@ ENTRY(__tlbtemp_mapping_end) ...@@ -319,6 +336,7 @@ ENTRY(__tlbtemp_mapping_end)
*/ */
ENTRY(__invalidate_icache_page) ENTRY(__invalidate_icache_page)
entry sp, 16 entry sp, 16
___invalidate_icache_page a2 a3 ___invalidate_icache_page a2 a3
...@@ -326,11 +344,14 @@ ENTRY(__invalidate_icache_page) ...@@ -326,11 +344,14 @@ ENTRY(__invalidate_icache_page)
retw retw
ENDPROC(__invalidate_icache_page)
/* /*
* void __invalidate_dcache_page(ulong start) * void __invalidate_dcache_page(ulong start)
*/ */
ENTRY(__invalidate_dcache_page) ENTRY(__invalidate_dcache_page)
entry sp, 16 entry sp, 16
___invalidate_dcache_page a2 a3 ___invalidate_dcache_page a2 a3
...@@ -338,11 +359,14 @@ ENTRY(__invalidate_dcache_page) ...@@ -338,11 +359,14 @@ ENTRY(__invalidate_dcache_page)
retw retw
ENDPROC(__invalidate_dcache_page)
/* /*
* void __flush_invalidate_dcache_page(ulong start) * void __flush_invalidate_dcache_page(ulong start)
*/ */
ENTRY(__flush_invalidate_dcache_page) ENTRY(__flush_invalidate_dcache_page)
entry sp, 16 entry sp, 16
___flush_invalidate_dcache_page a2 a3 ___flush_invalidate_dcache_page a2 a3
...@@ -350,11 +374,14 @@ ENTRY(__flush_invalidate_dcache_page) ...@@ -350,11 +374,14 @@ ENTRY(__flush_invalidate_dcache_page)
dsync dsync
retw retw
ENDPROC(__flush_invalidate_dcache_page)
/* /*
* void __flush_dcache_page(ulong start) * void __flush_dcache_page(ulong start)
*/ */
ENTRY(__flush_dcache_page) ENTRY(__flush_dcache_page)
entry sp, 16 entry sp, 16
___flush_dcache_page a2 a3 ___flush_dcache_page a2 a3
...@@ -362,11 +389,14 @@ ENTRY(__flush_dcache_page) ...@@ -362,11 +389,14 @@ ENTRY(__flush_dcache_page)
dsync dsync
retw retw
ENDPROC(__flush_dcache_page)
/* /*
* void __invalidate_icache_range(ulong start, ulong size) * void __invalidate_icache_range(ulong start, ulong size)
*/ */
ENTRY(__invalidate_icache_range) ENTRY(__invalidate_icache_range)
entry sp, 16 entry sp, 16
___invalidate_icache_range a2 a3 a4 ___invalidate_icache_range a2 a3 a4
...@@ -374,11 +404,14 @@ ENTRY(__invalidate_icache_range) ...@@ -374,11 +404,14 @@ ENTRY(__invalidate_icache_range)
retw retw
ENDPROC(__invalidate_icache_range)
/* /*
* void __flush_invalidate_dcache_range(ulong start, ulong size) * void __flush_invalidate_dcache_range(ulong start, ulong size)
*/ */
ENTRY(__flush_invalidate_dcache_range) ENTRY(__flush_invalidate_dcache_range)
entry sp, 16 entry sp, 16
___flush_invalidate_dcache_range a2 a3 a4 ___flush_invalidate_dcache_range a2 a3 a4
...@@ -386,11 +419,14 @@ ENTRY(__flush_invalidate_dcache_range) ...@@ -386,11 +419,14 @@ ENTRY(__flush_invalidate_dcache_range)
retw retw
ENDPROC(__flush_invalidate_dcache_range)
/* /*
* void _flush_dcache_range(ulong start, ulong size) * void _flush_dcache_range(ulong start, ulong size)
*/ */
ENTRY(__flush_dcache_range) ENTRY(__flush_dcache_range)
entry sp, 16 entry sp, 16
___flush_dcache_range a2 a3 a4 ___flush_dcache_range a2 a3 a4
...@@ -398,22 +434,28 @@ ENTRY(__flush_dcache_range) ...@@ -398,22 +434,28 @@ ENTRY(__flush_dcache_range)
retw retw
ENDPROC(__flush_dcache_range)
/* /*
* void _invalidate_dcache_range(ulong start, ulong size) * void _invalidate_dcache_range(ulong start, ulong size)
*/ */
ENTRY(__invalidate_dcache_range) ENTRY(__invalidate_dcache_range)
entry sp, 16 entry sp, 16
___invalidate_dcache_range a2 a3 a4 ___invalidate_dcache_range a2 a3 a4
retw retw
ENDPROC(__invalidate_dcache_range)
/* /*
* void _invalidate_icache_all(void) * void _invalidate_icache_all(void)
*/ */
ENTRY(__invalidate_icache_all) ENTRY(__invalidate_icache_all)
entry sp, 16 entry sp, 16
___invalidate_icache_all a2 a3 ___invalidate_icache_all a2 a3
...@@ -421,11 +463,14 @@ ENTRY(__invalidate_icache_all) ...@@ -421,11 +463,14 @@ ENTRY(__invalidate_icache_all)
retw retw
ENDPROC(__invalidate_icache_all)
/* /*
* void _flush_invalidate_dcache_all(void) * void _flush_invalidate_dcache_all(void)
*/ */
ENTRY(__flush_invalidate_dcache_all) ENTRY(__flush_invalidate_dcache_all)
entry sp, 16 entry sp, 16
___flush_invalidate_dcache_all a2 a3 ___flush_invalidate_dcache_all a2 a3
...@@ -433,11 +478,14 @@ ENTRY(__flush_invalidate_dcache_all) ...@@ -433,11 +478,14 @@ ENTRY(__flush_invalidate_dcache_all)
retw retw
ENDPROC(__flush_invalidate_dcache_all)
/* /*
* void _invalidate_dcache_all(void) * void _invalidate_dcache_all(void)
*/ */
ENTRY(__invalidate_dcache_all) ENTRY(__invalidate_dcache_all)
entry sp, 16 entry sp, 16
___invalidate_dcache_all a2 a3 ___invalidate_dcache_all a2 a3
...@@ -445,3 +493,4 @@ ENTRY(__invalidate_dcache_all) ...@@ -445,3 +493,4 @@ ENTRY(__invalidate_dcache_all)
retw retw
ENDPROC(__invalidate_dcache_all)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment