Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
94fd582e
Commit
94fd582e
authored
Feb 18, 2003
by
Richard Henderson
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[ALPHA] Use more compiler builtins instead of inline assembly.
parent
7ae4323c
Changes
3
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
53 additions
and
46 deletions
+53
-46
include/asm-alpha/bitops.h
include/asm-alpha/bitops.h
+14
-23
include/asm-alpha/byteorder.h
include/asm-alpha/byteorder.h
+3
-5
include/asm-alpha/compiler.h
include/asm-alpha/compiler.h
+36
-18
No files found.
include/asm-alpha/bitops.h
View file @
94fd582e
...
...
@@ -264,13 +264,11 @@ static inline unsigned long ffz(unsigned long word)
{
#if defined(__alpha_cix__) && defined(__alpha_fix__)
/* Whee. EV67 can calculate it directly. */
unsigned
long
result
;
__asm__
(
"cttz %1,%0"
:
"=r"
(
result
)
:
"r"
(
~
word
));
return
result
;
return
__kernel_cttz
(
~
word
);
#else
unsigned
long
bits
,
qofs
,
bofs
;
__asm__
(
"cmpbge %1,%2,%0"
:
"=r"
(
bits
)
:
"r"
(
word
),
"r"
(
~
0UL
)
);
bits
=
__kernel_cmpbge
(
word
,
~
0UL
);
qofs
=
ffz_b
(
bits
);
bits
=
__kernel_extbl
(
word
,
qofs
);
bofs
=
ffz_b
(
bits
);
...
...
@@ -286,13 +284,11 @@ static inline unsigned long __ffs(unsigned long word)
{
#if defined(__alpha_cix__) && defined(__alpha_fix__)
/* Whee. EV67 can calculate it directly. */
unsigned
long
result
;
__asm__
(
"cttz %1,%0"
:
"=r"
(
result
)
:
"r"
(
word
));
return
result
;
return
__kernel_cttz
(
word
);
#else
unsigned
long
bits
,
qofs
,
bofs
;
__asm__
(
"cmpbge $31,%1,%0"
:
"=r"
(
bits
)
:
"r"
(
word
)
);
bits
=
__kernel_cmpbge
(
word
,
0
);
qofs
=
ffz_b
(
bits
);
bits
=
__kernel_extbl
(
word
,
qofs
);
bofs
=
ffz_b
(
~
bits
);
...
...
@@ -311,8 +307,8 @@ static inline unsigned long __ffs(unsigned long word)
static
inline
int
ffs
(
int
word
)
{
int
result
=
__ffs
(
word
);
return
word
?
result
+
1
:
0
;
int
result
=
__ffs
(
word
)
+
1
;
return
word
?
result
:
0
;
}
/*
...
...
@@ -321,9 +317,7 @@ static inline int ffs(int word)
#if defined(__alpha_cix__) && defined(__alpha_fix__)
static
inline
int
fls
(
int
word
)
{
long
result
;
__asm__
(
"ctlz %1,%0"
:
"=r"
(
result
)
:
"r"
(
word
&
0xffffffff
));
return
64
-
result
;
return
64
-
__kernel_ctlz
(
word
&
0xffffffff
);
}
#else
#define fls generic_fls
...
...
@@ -332,11 +326,10 @@ static inline int fls(int word)
/* Compute powers of two for the given integer. */
static
inline
int
floor_log2
(
unsigned
long
word
)
{
long
bit
;
#if defined(__alpha_cix__) && defined(__alpha_fix__)
__asm__
(
"ctlz %1,%0"
:
"=r"
(
bit
)
:
"r"
(
word
));
return
63
-
bit
;
return
63
-
__kernel_ctlz
(
word
);
#else
long
bit
;
for
(
bit
=
-
1
;
word
;
bit
++
)
word
>>=
1
;
return
bit
;
...
...
@@ -358,9 +351,7 @@ static inline int ceil_log2(unsigned int word)
/* Whee. EV67 can calculate it directly. */
static
inline
unsigned
long
hweight64
(
unsigned
long
w
)
{
unsigned
long
result
;
__asm__
(
"ctpop %1,%0"
:
"=r"
(
result
)
:
"r"
(
w
));
return
result
;
return
__kernel_ctpop
(
w
);
}
#define hweight32(x) hweight64((x) & 0xfffffffful)
...
...
@@ -415,11 +406,11 @@ find_next_zero_bit(void * addr, unsigned long size, unsigned long offset)
if
(
!
size
)
return
result
;
tmp
=
*
p
;
found_first:
found_first:
tmp
|=
~
0UL
<<
size
;
if
(
tmp
==
~
0UL
)
/* Are any bits zero? */
return
result
+
size
;
/* Nope. */
found_middle:
found_middle:
return
result
+
ffz
(
tmp
);
}
...
...
@@ -456,11 +447,11 @@ find_next_bit(void * addr, unsigned long size, unsigned long offset)
if
(
!
size
)
return
result
;
tmp
=
*
p
;
found_first:
found_first:
tmp
&=
~
0UL
>>
(
64
-
size
);
if
(
!
tmp
)
return
result
+
size
;
found_middle:
found_middle:
return
result
+
__ffs
(
tmp
);
}
...
...
include/asm-alpha/byteorder.h
View file @
94fd582e
...
...
@@ -2,6 +2,7 @@
#define _ALPHA_BYTEORDER_H
#include <asm/types.h>
#include <asm/compiler.h>
#ifdef __GNUC__
...
...
@@ -23,11 +24,8 @@ static __inline __u32 __attribute__((__const)) __arch__swab32(__u32 x)
__u64
t0
,
t1
,
t2
,
t3
;
__asm__
(
"inslh %1, 7, %0"
/* t0 : 0000000000AABBCC */
:
"=r"
(
t0
)
:
"r"
(
x
));
__asm__
(
"inswl %1, 3, %0"
/* t1 : 000000CCDD000000 */
:
"=r"
(
t1
)
:
"r"
(
x
));
t0
=
__kernel_inslh
(
x
,
7
);
/* t0 : 0000000000AABBCC */
t1
=
__kernel_inswl
(
x
,
3
);
/* t1 : 000000CCDD000000 */
t1
|=
t0
;
/* t1 : 000000CCDDAABBCC */
t2
=
t1
>>
16
;
/* t2 : 0000000000CCDDAA */
t0
=
t1
&
0xFF00FF00
;
/* t0 : 00000000DD00BB00 */
...
...
include/asm-alpha/compiler.h
View file @
94fd582e
...
...
@@ -9,40 +9,58 @@
* these tests and macros.
*/
#if 0
#define __kernel_insbl(val, shift) \
(((unsigned long)(val) & 0xfful) << ((shift) * 8))
#define __kernel_inswl(val, shift) \
(((unsigned long)(val) & 0xfffful) << ((shift) * 8))
#define __kernel_insql(val, shift) \
((unsigned long)(val) << ((shift) * 8))
#if __GNUC__ == 3 && __GNUC_MINOR__ >= 4 || __GNUC__ > 3
# define __kernel_insbl(val, shift) __builtin_alpha_insbl(val, shift)
# define __kernel_inswl(val, shift) __builtin_alpha_inswl(val, shift)
# define __kernel_insql(val, shift) __builtin_alpha_insql(val, shift)
# define __kernel_inslh(val, shift) __builtin_alpha_inslh(val, shift)
# define __kernel_extbl(val, shift) __builtin_alpha_extbl(val, shift)
# define __kernel_extwl(val, shift) __builtin_alpha_extwl(val, shift)
# define __kernel_cmpbge(a, b) __builtin_alpha_cmpbge(a, b)
# define __kernel_cttz(x) __builtin_ctz(x)
# define __kernel_ctlz(x) __builtin_clz(x)
# define __kernel_ctpop(x) __builtin_popcount(x)
#else
#define __kernel_insbl(val, shift) \
#
define __kernel_insbl(val, shift) \
({ unsigned long __kir; \
__asm__("insbl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \
__kir; })
#define __kernel_inswl(val, shift) \
#
define __kernel_inswl(val, shift) \
({ unsigned long __kir; \
__asm__("inswl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \
__kir; })
#define __kernel_insql(val, shift) \
#
define __kernel_insql(val, shift) \
({ unsigned long __kir; \
__asm__("insql %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \
__kir; })
#endif
#if 0 && (__GNUC__ > 2 || __GNUC_MINOR__ >= 92)
#define __kernel_extbl(val, shift) (((val) >> (((shift) & 7) * 8)) & 0xfful)
#define __kernel_extwl(val, shift) (((val) >> (((shift) & 7) * 8)) & 0xfffful)
#else
#define __kernel_extbl(val, shift) \
# define __kernel_inslh(val, shift) \
({ unsigned long __kir; \
__asm__("inslh %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \
__kir; })
# define __kernel_extbl(val, shift) \
({ unsigned long __kir; \
__asm__("extbl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \
__kir; })
#define __kernel_extwl(val, shift) \
#
define __kernel_extwl(val, shift) \
({ unsigned long __kir; \
__asm__("extwl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \
__kir; })
# define __kernel_cmpbge(a, b) \
({ unsigned long __kir; \
__asm__("cmpbge %r2,%1,%0" : "=r"(__kir) : "rI"(b), "rJ"(val)); \
__kir; })
# define __kernel_cttz(x) \
({ unsigned long __kir; \
__asm__("cttz %1,%0" : "=r"(__kir) : "r"(x)); \
__kir; })
# define __kernel_ctlz(x) \
({ unsigned long __kir; \
__asm__("ctlz %1,%0" : "=r"(__kir) : "r"(x)); \
__kir; })
# define __kernel_ctpop(x) \
({ unsigned long __kir; \
__asm__("ctpop %1,%0" : "=r"(__kir) : "r"(x)); \
__kir; })
#endif
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment