Commit d8381a1d authored by David S. Miller's avatar David S. Miller

[SPARC64]: Add .type and .size directives to some asm files.

Signed-off-by: default avatarDavid S. Miller <davem@redhat.com>
parent 1914ba52
......@@ -117,6 +117,7 @@
.align 64
.globl FUNC_NAME
.type FUNC_NAME,#function
FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
PREAMBLE
mov %o0, %g5
......@@ -550,3 +551,5 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
add %o1, 1, %o1
retl
mov EX_RETVAL(%g5), %o0
.size FUNC_NAME, .-FUNC_NAME
......@@ -78,6 +78,7 @@
*/
.globl FUNC_NAME
.type FUNC_NAME,#function
FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
PREAMBLE
mov %o0, %g5
......@@ -412,3 +413,5 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
add %o1, 1, %o1
retl
mov EX_RETVAL(%g5), %o0
.size FUNC_NAME, .-FUNC_NAME
......@@ -22,9 +22,11 @@
flush %g2;
.globl cheetah_patch_copyops
.type cheetah_patch_copyops,#function
cheetah_patch_copyops:
ULTRA3_DO_PATCH(memcpy, U3memcpy)
ULTRA3_DO_PATCH(___copy_from_user, U3copy_from_user)
ULTRA3_DO_PATCH(___copy_to_user, U3copy_to_user)
retl
nop
.size cheetah_patch_copyops,.-cheetah_patch_copyops
......@@ -10,6 +10,7 @@
.align 64
.globl __atomic_add
.type __atomic_add,#function
__atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
lduw [%o1], %g5
add %g5, %o0, %g7
......@@ -19,8 +20,10 @@ __atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
membar #StoreLoad | #StoreStore
retl
add %g7, %o0, %o0
.size __atomic_add, .-__atomic_add
.globl __atomic_sub
.type __atomic_sub,#function
__atomic_sub: /* %o0 = increment, %o1 = atomic_ptr */
lduw [%o1], %g5
sub %g5, %o0, %g7
......@@ -30,8 +33,10 @@ __atomic_sub: /* %o0 = increment, %o1 = atomic_ptr */
membar #StoreLoad | #StoreStore
retl
sub %g7, %o0, %o0
.size __atomic_sub, .-__atomic_sub
.globl __atomic64_add
.type __atomic64_add,#function
__atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
ldx [%o1], %g5
add %g5, %o0, %g7
......@@ -41,8 +46,10 @@ __atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
membar #StoreLoad | #StoreStore
retl
add %g7, %o0, %o0
.size __atomic64_add, .-__atomic64_add
.globl __atomic64_sub
.type __atomic64_sub,#function
__atomic64_sub: /* %o0 = increment, %o1 = atomic_ptr */
ldx [%o1], %g5
sub %g5, %o0, %g7
......@@ -52,4 +59,4 @@ __atomic64_sub: /* %o0 = increment, %o1 = atomic_ptr */
membar #StoreLoad | #StoreStore
retl
sub %g7, %o0, %o0
.size __atomic64_sub, .-__atomic64_sub
......@@ -9,6 +9,7 @@
.text
.align 64
.globl ___test_and_set_bit
.type ___test_and_set_bit,#function
___test_and_set_bit: /* %o0=nr, %o1=addr */
srlx %o0, 6, %g1
mov 1, %g5
......@@ -26,8 +27,10 @@ ___test_and_set_bit: /* %o0=nr, %o1=addr */
ldx [%o1], %g7
2: retl
membar #StoreLoad | #StoreStore
.size ___test_and_set_bit, .-___test_and_set_bit
.globl ___test_and_clear_bit
.type ___test_and_clear_bit,#function
___test_and_clear_bit: /* %o0=nr, %o1=addr */
srlx %o0, 6, %g1
mov 1, %g5
......@@ -45,8 +48,10 @@ ___test_and_clear_bit: /* %o0=nr, %o1=addr */
ldx [%o1], %g7
2: retl
membar #StoreLoad | #StoreStore
.size ___test_and_clear_bit, .-___test_and_clear_bit
.globl ___test_and_change_bit
.type ___test_and_change_bit,#function
___test_and_change_bit: /* %o0=nr, %o1=addr */
srlx %o0, 6, %g1
mov 1, %g5
......@@ -64,3 +69,4 @@ ___test_and_change_bit: /* %o0=nr, %o1=addr */
2: retl
membar #StoreLoad | #StoreStore
nop
.size ___test_and_change_bit, .-___test_and_change_bit
......@@ -31,7 +31,8 @@
* to copy register windows around during thread cloning.
*/
.globl ___copy_in_user
.globl ___copy_in_user
.type ___copy_in_user,#function
___copy_in_user: /* %o0=dst, %o1=src, %o2=len */
/* Writing to %asi is _expensive_ so we hardcode it.
* Reading %asi to check for KERNEL_DS is comparatively
......@@ -99,11 +100,14 @@ ___copy_in_user: /* %o0=dst, %o1=src, %o2=len */
retl
clr %o0
.size ___copy_in_user, .-___copy_in_user
/* Act like copy_{to,in}_user(), ie. return zero instead
* of original destination pointer. This is invoked when
* copy_{to,in}_user() finds that %asi is kernel space.
*/
.globl memcpy_user_stub
.type memcpy_user_stub,#function
memcpy_user_stub:
save %sp, -192, %sp
mov %i0, %o0
......@@ -112,3 +116,4 @@ memcpy_user_stub:
mov %i2, %o2
ret
restore %g0, %g0, %o0
.size memcpy_user_stub, .-memcpy_user_stub
......@@ -45,6 +45,7 @@
.align 32
.globl copy_user_page
.type copy_user_page,#function
copy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */
lduw [%g6 + TI_PRE_COUNT], %o4
sethi %uhi(PAGE_OFFSET), %g2
......@@ -237,3 +238,5 @@ copy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */
retl
stw %o4, [%g6 + TI_PRE_COUNT]
.size copy_user_page, .-copy_user_page
.text
.align 32
.globl ip_fast_csum
.align 32
.globl ip_fast_csum
.type ip_fast_csum,#function
ip_fast_csum: /* %o0 = iph, %o1 = ihl */
sub %o1, 4, %g7
lduw [%o0 + 0x00], %o2
......@@ -30,3 +31,4 @@ ip_fast_csum: /* %o0 = iph, %o1 = ihl */
set 0xffff, %o1
retl
and %o2, %o1, %o0
.size ip_fast_csum, .-ip_fast_csum
......@@ -5,8 +5,9 @@
*/
.text
.align 32
.globl memmove
.align 32
.globl memmove
.type memmove,#function
memmove:
mov %o0, %g1
cmp %o0, %o1
......@@ -29,3 +30,4 @@ memmove:
retl
mov %g1, %o0
.size memmove, .-memmove
......@@ -7,6 +7,7 @@
.align 64
.globl _raw_spin_lock
.type _raw_spin_lock,#function
_raw_spin_lock: /* %o0 = lock_ptr */
1: ldstub [%o0], %g7
brnz,pn %g7, 2f
......@@ -17,8 +18,10 @@ _raw_spin_lock: /* %o0 = lock_ptr */
brnz,pt %g7, 2b
membar #LoadLoad
ba,a,pt %xcc, 1b
.size _raw_spin_lock, .-_raw_spin_lock
.globl _raw_spin_lock_flags
.globl _raw_spin_lock_flags
.type _raw_spin_lock_flags,#function
_raw_spin_lock_flags: /* %o0 = lock_ptr, %o1 = irq_flags */
1: ldstub [%o0], %g7
brnz,pn %g7, 2f
......@@ -33,3 +36,4 @@ _raw_spin_lock_flags: /* %o0 = lock_ptr, %o1 = irq_flags */
membar #LoadLoad
ba,pt %xcc, 1b ! Retry lock acquire
wrpr %g2, %pil ! Restore PIL
.size _raw_spin_lock_flags, .-_raw_spin_lock_flags
......@@ -9,7 +9,8 @@
#define HI_MAGIC 0x80808080
.align 32
.global strlen
.globl strlen
.type strlen,#function
strlen:
mov %o0, %o1
andcc %o0, 3, %g0
......@@ -75,3 +76,5 @@ strlen:
13:
retl
mov 2, %o0
.size strlen, .-strlen
......@@ -7,8 +7,9 @@
#include <asm/asi.h>
.text
.align 4
.global strncmp
.align 32
.globl strncmp
.type strncmp,#function
strncmp:
brlez,pn %o2, 3f
lduba [%o0] (ASI_PNF), %o3
......@@ -28,3 +29,4 @@ strncmp:
3:
retl
clr %o0
.size strncmp, .-strncmp
......@@ -12,7 +12,7 @@
0: .xword 0x0101010101010101
.text
.align 4
.align 32
/* Must return:
*
......@@ -31,6 +31,7 @@
*/
.globl __strncpy_from_user
.type __strncpy_from_user,#function
__strncpy_from_user:
/* %o0=dest, %o1=src, %o2=count */
sethi %hi(0b), %o5 ! IEU0 Group
......@@ -122,6 +123,7 @@ __strncpy_from_user:
mov %o2, %o0
2: retl
add %o2, %o3, %o0
.size __strncpy_from_user, .-__strncpy_from_user
.section .fixup,#alloc,#execinstr
.align 4
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment