Commit ae984d72 authored by David S. Miller's avatar David S. Miller

sparc: Unify strlen assembler.

Use the new asm/asm.h header to help commonize the
strlen assembler between 32-bit and 64-bit

While we're here, use proper linux/linkage.h macros
instead of by-hand stuff.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 5c03d590
...@@ -6,7 +6,7 @@ ccflags-y := -Werror ...@@ -6,7 +6,7 @@ ccflags-y := -Werror
lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
lib-$(CONFIG_SPARC32) += memcpy.o memset.o lib-$(CONFIG_SPARC32) += memcpy.o memset.o
lib-y += strlen_$(BITS).o lib-y += strlen.o
lib-y += checksum_$(BITS).o lib-y += checksum_$(BITS).o
lib-$(CONFIG_SPARC32) += blockops.o lib-$(CONFIG_SPARC32) += blockops.o
lib-y += memscan_$(BITS).o memcmp_$(BITS).o strncmp_$(BITS).o lib-y += memscan_$(BITS).o memcmp_$(BITS).o strncmp_$(BITS).o
......
/* strlen.S: Sparc64 optimized strlen code /* strlen.S: Sparc optimized strlen code
* Hand optimized from GNU libc's strlen * Hand optimized from GNU libc's strlen
* Copyright (C) 1991,1996 Free Software Foundation * Copyright (C) 1991,1996 Free Software Foundation
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1996,2008 David S. Miller (davem@davemloft.net)
* Copyright (C) 1996, 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) * Copyright (C) 1996, 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/ */
#include <linux/linkage.h>
#include <asm/asm.h>
#define LO_MAGIC 0x01010101 #define LO_MAGIC 0x01010101
#define HI_MAGIC 0x80808080 #define HI_MAGIC 0x80808080
.align 32 .text
.globl strlen ENTRY(strlen)
.type strlen,#function
strlen:
mov %o0, %o1 mov %o0, %o1
andcc %o0, 3, %g0 andcc %o0, 3, %g0
be,pt %icc, 9f BRANCH32(be, pt, 9f)
sethi %hi(HI_MAGIC), %o4 sethi %hi(HI_MAGIC), %o4
ldub [%o0], %o5 ldub [%o0], %o5
brz,pn %o5, 11f BRANCH_REG_ZERO(pn, %o5, 11f)
add %o0, 1, %o0 add %o0, 1, %o0
andcc %o0, 3, %g0 andcc %o0, 3, %g0
be,pn %icc, 4f BRANCH32(be, pn, 4f)
or %o4, %lo(HI_MAGIC), %o3 or %o4, %lo(HI_MAGIC), %o3
ldub [%o0], %o5 ldub [%o0], %o5
brz,pn %o5, 12f BRANCH_REG_ZERO(pn, %o5, 12f)
add %o0, 1, %o0 add %o0, 1, %o0
andcc %o0, 3, %g0 andcc %o0, 3, %g0
be,pt %icc, 5f BRANCH32(be, pt, 5f)
sethi %hi(LO_MAGIC), %o4 sethi %hi(LO_MAGIC), %o4
ldub [%o0], %o5 ldub [%o0], %o5
brz,pn %o5, 13f BRANCH_REG_ZERO(pn, %o5, 13f)
add %o0, 1, %o0 add %o0, 1, %o0
ba,pt %icc, 8f BRANCH32(ba, pt, 8f)
or %o4, %lo(LO_MAGIC), %o2 or %o4, %lo(LO_MAGIC), %o2
9: 9:
or %o4, %lo(HI_MAGIC), %o3 or %o4, %lo(HI_MAGIC), %o3
...@@ -44,24 +45,24 @@ strlen: ...@@ -44,24 +45,24 @@ strlen:
2: 2:
sub %o5, %o2, %o4 sub %o5, %o2, %o4
andcc %o4, %o3, %g0 andcc %o4, %o3, %g0
be,pt %icc, 8b BRANCH32(be, pt, 8b)
add %o0, 4, %o0 add %o0, 4, %o0
/* Check every byte. */ /* Check every byte. */
srl %o5, 24, %g7 srl %o5, 24, %g7
andcc %g7, 0xff, %g0 andcc %g7, 0xff, %g0
be,pn %icc, 1f BRANCH32(be, pn, 1f)
add %o0, -4, %o4 add %o0, -4, %o4
srl %o5, 16, %g7 srl %o5, 16, %g7
andcc %g7, 0xff, %g0 andcc %g7, 0xff, %g0
be,pn %icc, 1f BRANCH32(be, pn, 1f)
add %o4, 1, %o4 add %o4, 1, %o4
srl %o5, 8, %g7 srl %o5, 8, %g7
andcc %g7, 0xff, %g0 andcc %g7, 0xff, %g0
be,pn %icc, 1f BRANCH32(be, pn, 1f)
add %o4, 1, %o4 add %o4, 1, %o4
andcc %o5, 0xff, %g0 andcc %o5, 0xff, %g0
bne,a,pt %icc, 2b BRANCH32_ANNUL(bne, pt, 2b)
ld [%o0], %o5 ld [%o0], %o5
add %o4, 1, %o4 add %o4, 1, %o4
1: 1:
...@@ -76,5 +77,4 @@ strlen: ...@@ -76,5 +77,4 @@ strlen:
13: 13:
retl retl
mov 2, %o0 mov 2, %o0
ENDPROC(strlen)
.size strlen, .-strlen
/* strlen.S: Sparc optimized strlen code
* Hand optimized from GNU libc's strlen
* Copyright (C) 1991,1996 Free Software Foundation
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#define LO_MAGIC 0x01010101
#define HI_MAGIC 0x80808080
0:
ldub [%o0], %o5
cmp %o5, 0
be 1f
add %o0, 1, %o0
andcc %o0, 3, %g0
be 4f
or %o4, %lo(HI_MAGIC), %o3
ldub [%o0], %o5
cmp %o5, 0
be 2f
add %o0, 1, %o0
andcc %o0, 3, %g0
be 5f
sethi %hi(LO_MAGIC), %o4
ldub [%o0], %o5
cmp %o5, 0
be 3f
add %o0, 1, %o0
b 8f
or %o4, %lo(LO_MAGIC), %o2
1:
retl
mov 0, %o0
2:
retl
mov 1, %o0
3:
retl
mov 2, %o0
.align 4
.global strlen
strlen:
mov %o0, %o1
andcc %o0, 3, %g0
bne 0b
sethi %hi(HI_MAGIC), %o4
or %o4, %lo(HI_MAGIC), %o3
4:
sethi %hi(LO_MAGIC), %o4
5:
or %o4, %lo(LO_MAGIC), %o2
8:
ld [%o0], %o5
2:
sub %o5, %o2, %o4
andcc %o4, %o3, %g0
be 8b
add %o0, 4, %o0
/* Check every byte. */
srl %o5, 24, %g5
andcc %g5, 0xff, %g0
be 1f
add %o0, -4, %o4
srl %o5, 16, %g5
andcc %g5, 0xff, %g0
be 1f
add %o4, 1, %o4
srl %o5, 8, %g5
andcc %g5, 0xff, %g0
be 1f
add %o4, 1, %o4
andcc %o5, 0xff, %g0
bne,a 2b
ld [%o0], %o5
add %o4, 1, %o4
1:
retl
sub %o4, %o1, %o0
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment