Commit 85c9d14a authored by Matt Mackall's avatar Matt Mackall Committed by Linus Torvalds

[PATCH] random: kill redundant rotate_left definitions

We've got three definitions of rotate_left.  Remove x86 and duplicate rotate
definitions.  Remaining definition is fixed up such that recent gcc will
generate rol instructions on x86 at least.

A later patch will move this to bitops and clean up the other tree users.
Signed-off-by: default avatarMatt Mackall <mpm@selenic.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent ae334d0f
...@@ -401,26 +401,10 @@ static void sysctl_init_random(struct entropy_store *random_state); ...@@ -401,26 +401,10 @@ static void sysctl_init_random(struct entropy_store *random_state);
* purposes * purposes
* *
*****************************************************************/ *****************************************************************/
static inline __u32 rol32(__u32 word, int shift)
/*
* Unfortunately, while the GCC optimizer for the i386 understands how
* to optimize a static rotate left of x bits, it doesn't know how to
* deal with a variable rotate of x bits. So we use a bit of asm magic.
*/
#if (!defined (__i386__))
static inline __u32 rotate_left(int i, __u32 word)
{
return (word << i) | (word >> (32 - i));
}
#else
static inline __u32 rotate_left(int i, __u32 word)
{ {
__asm__("roll %%cl,%0" return (word << shift) | (word >> (32 - shift));
:"=r" (word)
:"0" (word),"c" (i));
return word;
} }
#endif
/* /*
* More asm magic.... * More asm magic....
...@@ -572,7 +556,7 @@ static void __add_entropy_words(struct entropy_store *r, const __u32 *in, ...@@ -572,7 +556,7 @@ static void __add_entropy_words(struct entropy_store *r, const __u32 *in,
add_ptr = r->add_ptr; add_ptr = r->add_ptr;
while (nwords--) { while (nwords--) {
w = rotate_left(input_rotate, next_w); w = rol32(input_rotate, next_w);
if (nwords > 0) if (nwords > 0)
next_w = *in++; next_w = *in++;
i = add_ptr = (add_ptr - 1) & wordmask; i = add_ptr = (add_ptr - 1) & wordmask;
...@@ -941,10 +925,8 @@ EXPORT_SYMBOL(add_disk_randomness); ...@@ -941,10 +925,8 @@ EXPORT_SYMBOL(add_disk_randomness);
#define K3 0x8F1BBCDCL /* Rounds 40-59: sqrt(5) * 2^30 */ #define K3 0x8F1BBCDCL /* Rounds 40-59: sqrt(5) * 2^30 */
#define K4 0xCA62C1D6L /* Rounds 60-79: sqrt(10) * 2^30 */ #define K4 0xCA62C1D6L /* Rounds 60-79: sqrt(10) * 2^30 */
#define ROTL(n,X) (((X) << n ) | ((X) >> (32 - n)))
#define subRound(a, b, c, d, e, f, k, data) \ #define subRound(a, b, c, d, e, f, k, data) \
(e += ROTL(5, a) + f(b, c, d) + k + data, b = ROTL(30, b)) (e += rol32(a, 5) + f(b, c, d) + k + data, b = rol32(b, 30))
static void SHATransform(__u32 digest[85], __u32 const data[16]) static void SHATransform(__u32 digest[85], __u32 const data[16])
{ {
...@@ -962,7 +944,7 @@ static void SHATransform(__u32 digest[85], __u32 const data[16]) ...@@ -962,7 +944,7 @@ static void SHATransform(__u32 digest[85], __u32 const data[16])
memcpy(W, data, 16*sizeof(__u32)); memcpy(W, data, 16*sizeof(__u32));
for (i = 0; i < 64; i++) { for (i = 0; i < 64; i++) {
TEMP = W[i] ^ W[i+2] ^ W[i+8] ^ W[i+13]; TEMP = W[i] ^ W[i+2] ^ W[i+8] ^ W[i+13];
W[i+16] = ROTL(1, TEMP); W[i+16] = rol32(TEMP, 1);
} }
/* Set up first buffer and local data buffer */ /* Set up first buffer and local data buffer */
...@@ -990,25 +972,25 @@ static void SHATransform(__u32 digest[85], __u32 const data[16]) ...@@ -990,25 +972,25 @@ static void SHATransform(__u32 digest[85], __u32 const data[16])
else else
TEMP = f4(B, C, D) + K4; TEMP = f4(B, C, D) + K4;
} }
TEMP += ROTL(5, A) + E + W[i]; TEMP += rol32(A, 5) + E + W[i];
E = D; D = C; C = ROTL(30, B); B = A; A = TEMP; E = D; D = C; C = rol32(B, 30); B = A; A = TEMP;
} }
#elif SHA_CODE_SIZE == 1 #elif SHA_CODE_SIZE == 1
for (i = 0; i < 20; i++) { for (i = 0; i < 20; i++) {
TEMP = f1(B, C, D) + K1 + ROTL(5, A) + E + W[i]; TEMP = f1(B, C, D) + K1 + rol32(A, 5) + E + W[i];
E = D; D = C; C = ROTL(30, B); B = A; A = TEMP; E = D; D = C; C = rol32(B, 30); B = A; A = TEMP;
} }
for (; i < 40; i++) { for (; i < 40; i++) {
TEMP = f2(B, C, D) + K2 + ROTL(5, A) + E + W[i]; TEMP = f2(B, C, D) + K2 + rol32(A, 5) + E + W[i];
E = D; D = C; C = ROTL(30, B); B = A; A = TEMP; E = D; D = C; C = rol32(B, 30); B = A; A = TEMP;
} }
for (; i < 60; i++) { for (; i < 60; i++) {
TEMP = f3(B, C, D) + K3 + ROTL(5, A) + E + W[i]; TEMP = f3(B, C, D) + K3 + rol32(A, 5) + E + W[i];
E = D; D = C; C = ROTL(30, B); B = A; A = TEMP; E = D; D = C; C = rol22(B, 30); B = A; A = TEMP;
} }
for (; i < 80; i++) { for (; i < 80; i++) {
TEMP = f4(B, C, D) + K4 + ROTL(5, A) + E + W[i]; TEMP = f4(B, C, D) + K4 + rol32(A, 5) + E + W[i];
E = D; D = C; C = ROTL(30, B); B = A; A = TEMP; E = D; D = C; C = rol32(B, 30); B = A; A = TEMP;
} }
#elif SHA_CODE_SIZE == 2 #elif SHA_CODE_SIZE == 2
for (i = 0; i < 20; i += 5) { for (i = 0; i < 20; i += 5) {
...@@ -1138,7 +1120,6 @@ static void SHATransform(__u32 digest[85], __u32 const data[16]) ...@@ -1138,7 +1120,6 @@ static void SHATransform(__u32 digest[85], __u32 const data[16])
#undef W #undef W
} }
#undef ROTL
#undef f1 #undef f1
#undef f2 #undef f2
#undef f3 #undef f3
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment