Commit 41419a28 authored by Kees Cook's avatar Kees Cook Committed by Herbert Xu

crypto: x86/sha - Eliminate casts on asm implementations

In order to avoid CFI function prototype mismatches, this removes the
casts on assembly implementations of sha1/256/512 accelerators. The
safety checks from BUILD_BUG_ON() remain.

Additionally, this renames various arguments for clarity, as suggested
by Eric Biggers.
Signed-off-by: default avatarKees Cook <keescook@chromium.org>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent e0437dc6
...@@ -62,11 +62,11 @@ ...@@ -62,11 +62,11 @@
*Visit http://software.intel.com/en-us/articles/ *Visit http://software.intel.com/en-us/articles/
*and refer to improving-the-performance-of-the-secure-hash-algorithm-1/ *and refer to improving-the-performance-of-the-secure-hash-algorithm-1/
* *
*Updates 20-byte SHA-1 record in 'hash' for even number of *Updates 20-byte SHA-1 record at start of 'state', from 'input', for
*'num_blocks' consecutive 64-byte blocks *even number of 'blocks' consecutive 64-byte blocks.
* *
*extern "C" void sha1_transform_avx2( *extern "C" void sha1_transform_avx2(
* int *hash, const char* input, size_t num_blocks ); * struct sha1_state *state, const u8* input, int blocks );
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
......
...@@ -457,9 +457,13 @@ W_PRECALC_SSSE3 ...@@ -457,9 +457,13 @@ W_PRECALC_SSSE3
movdqu \a,\b movdqu \a,\b
.endm .endm
/* SSSE3 optimized implementation: /*
* extern "C" void sha1_transform_ssse3(u32 *digest, const char *data, u32 *ws, * SSSE3 optimized implementation:
* unsigned int rounds); *
* extern "C" void sha1_transform_ssse3(struct sha1_state *state,
* const u8 *data, int blocks);
*
* Note that struct sha1_state is assumed to begin with u32 state[5].
*/ */
SHA1_VECTOR_ASM sha1_transform_ssse3 SHA1_VECTOR_ASM sha1_transform_ssse3
...@@ -545,8 +549,8 @@ W_PRECALC_AVX ...@@ -545,8 +549,8 @@ W_PRECALC_AVX
/* AVX optimized implementation: /* AVX optimized implementation:
* extern "C" void sha1_transform_avx(u32 *digest, const char *data, u32 *ws, * extern "C" void sha1_transform_avx(struct sha1_state *state,
* unsigned int rounds); * const u8 *data, int blocks);
*/ */
SHA1_VECTOR_ASM sha1_transform_avx SHA1_VECTOR_ASM sha1_transform_avx
......
...@@ -27,11 +27,8 @@ ...@@ -27,11 +27,8 @@
#include <crypto/sha1_base.h> #include <crypto/sha1_base.h>
#include <asm/simd.h> #include <asm/simd.h>
typedef void (sha1_transform_fn)(u32 *digest, const char *data,
unsigned int rounds);
static int sha1_update(struct shash_desc *desc, const u8 *data, static int sha1_update(struct shash_desc *desc, const u8 *data,
unsigned int len, sha1_transform_fn *sha1_xform) unsigned int len, sha1_block_fn *sha1_xform)
{ {
struct sha1_state *sctx = shash_desc_ctx(desc); struct sha1_state *sctx = shash_desc_ctx(desc);
...@@ -39,48 +36,47 @@ static int sha1_update(struct shash_desc *desc, const u8 *data, ...@@ -39,48 +36,47 @@ static int sha1_update(struct shash_desc *desc, const u8 *data,
(sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE) (sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
return crypto_sha1_update(desc, data, len); return crypto_sha1_update(desc, data, len);
/* make sure casting to sha1_block_fn() is safe */ /*
* Make sure struct sha1_state begins directly with the SHA1
* 160-bit internal state, as this is what the asm functions expect.
*/
BUILD_BUG_ON(offsetof(struct sha1_state, state) != 0); BUILD_BUG_ON(offsetof(struct sha1_state, state) != 0);
kernel_fpu_begin(); kernel_fpu_begin();
sha1_base_do_update(desc, data, len, sha1_base_do_update(desc, data, len, sha1_xform);
(sha1_block_fn *)sha1_xform);
kernel_fpu_end(); kernel_fpu_end();
return 0; return 0;
} }
static int sha1_finup(struct shash_desc *desc, const u8 *data, static int sha1_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out, sha1_transform_fn *sha1_xform) unsigned int len, u8 *out, sha1_block_fn *sha1_xform)
{ {
if (!crypto_simd_usable()) if (!crypto_simd_usable())
return crypto_sha1_finup(desc, data, len, out); return crypto_sha1_finup(desc, data, len, out);
kernel_fpu_begin(); kernel_fpu_begin();
if (len) if (len)
sha1_base_do_update(desc, data, len, sha1_base_do_update(desc, data, len, sha1_xform);
(sha1_block_fn *)sha1_xform); sha1_base_do_finalize(desc, sha1_xform);
sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_xform);
kernel_fpu_end(); kernel_fpu_end();
return sha1_base_finish(desc, out); return sha1_base_finish(desc, out);
} }
asmlinkage void sha1_transform_ssse3(u32 *digest, const char *data, asmlinkage void sha1_transform_ssse3(struct sha1_state *state,
unsigned int rounds); const u8 *data, int blocks);
static int sha1_ssse3_update(struct shash_desc *desc, const u8 *data, static int sha1_ssse3_update(struct shash_desc *desc, const u8 *data,
unsigned int len) unsigned int len)
{ {
return sha1_update(desc, data, len, return sha1_update(desc, data, len, sha1_transform_ssse3);
(sha1_transform_fn *) sha1_transform_ssse3);
} }
static int sha1_ssse3_finup(struct shash_desc *desc, const u8 *data, static int sha1_ssse3_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out) unsigned int len, u8 *out)
{ {
return sha1_finup(desc, data, len, out, return sha1_finup(desc, data, len, out, sha1_transform_ssse3);
(sha1_transform_fn *) sha1_transform_ssse3);
} }
/* Add padding and return the message digest. */ /* Add padding and return the message digest. */
...@@ -119,21 +115,19 @@ static void unregister_sha1_ssse3(void) ...@@ -119,21 +115,19 @@ static void unregister_sha1_ssse3(void)
} }
#ifdef CONFIG_AS_AVX #ifdef CONFIG_AS_AVX
asmlinkage void sha1_transform_avx(u32 *digest, const char *data, asmlinkage void sha1_transform_avx(struct sha1_state *state,
unsigned int rounds); const u8 *data, int blocks);
static int sha1_avx_update(struct shash_desc *desc, const u8 *data, static int sha1_avx_update(struct shash_desc *desc, const u8 *data,
unsigned int len) unsigned int len)
{ {
return sha1_update(desc, data, len, return sha1_update(desc, data, len, sha1_transform_avx);
(sha1_transform_fn *) sha1_transform_avx);
} }
static int sha1_avx_finup(struct shash_desc *desc, const u8 *data, static int sha1_avx_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out) unsigned int len, u8 *out)
{ {
return sha1_finup(desc, data, len, out, return sha1_finup(desc, data, len, out, sha1_transform_avx);
(sha1_transform_fn *) sha1_transform_avx);
} }
static int sha1_avx_final(struct shash_desc *desc, u8 *out) static int sha1_avx_final(struct shash_desc *desc, u8 *out)
...@@ -190,8 +184,8 @@ static inline void unregister_sha1_avx(void) { } ...@@ -190,8 +184,8 @@ static inline void unregister_sha1_avx(void) { }
#if defined(CONFIG_AS_AVX2) && (CONFIG_AS_AVX) #if defined(CONFIG_AS_AVX2) && (CONFIG_AS_AVX)
#define SHA1_AVX2_BLOCK_OPTSIZE 4 /* optimal 4*64 bytes of SHA1 blocks */ #define SHA1_AVX2_BLOCK_OPTSIZE 4 /* optimal 4*64 bytes of SHA1 blocks */
asmlinkage void sha1_transform_avx2(u32 *digest, const char *data, asmlinkage void sha1_transform_avx2(struct sha1_state *state,
unsigned int rounds); const u8 *data, int blocks);
static bool avx2_usable(void) static bool avx2_usable(void)
{ {
...@@ -203,28 +197,26 @@ static bool avx2_usable(void) ...@@ -203,28 +197,26 @@ static bool avx2_usable(void)
return false; return false;
} }
static void sha1_apply_transform_avx2(u32 *digest, const char *data, static void sha1_apply_transform_avx2(struct sha1_state *state,
unsigned int rounds) const u8 *data, int blocks)
{ {
/* Select the optimal transform based on data block size */ /* Select the optimal transform based on data block size */
if (rounds >= SHA1_AVX2_BLOCK_OPTSIZE) if (blocks >= SHA1_AVX2_BLOCK_OPTSIZE)
sha1_transform_avx2(digest, data, rounds); sha1_transform_avx2(state, data, blocks);
else else
sha1_transform_avx(digest, data, rounds); sha1_transform_avx(state, data, blocks);
} }
static int sha1_avx2_update(struct shash_desc *desc, const u8 *data, static int sha1_avx2_update(struct shash_desc *desc, const u8 *data,
unsigned int len) unsigned int len)
{ {
return sha1_update(desc, data, len, return sha1_update(desc, data, len, sha1_apply_transform_avx2);
(sha1_transform_fn *) sha1_apply_transform_avx2);
} }
static int sha1_avx2_finup(struct shash_desc *desc, const u8 *data, static int sha1_avx2_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out) unsigned int len, u8 *out)
{ {
return sha1_finup(desc, data, len, out, return sha1_finup(desc, data, len, out, sha1_apply_transform_avx2);
(sha1_transform_fn *) sha1_apply_transform_avx2);
} }
static int sha1_avx2_final(struct shash_desc *desc, u8 *out) static int sha1_avx2_final(struct shash_desc *desc, u8 *out)
...@@ -267,21 +259,19 @@ static inline void unregister_sha1_avx2(void) { } ...@@ -267,21 +259,19 @@ static inline void unregister_sha1_avx2(void) { }
#endif #endif
#ifdef CONFIG_AS_SHA1_NI #ifdef CONFIG_AS_SHA1_NI
asmlinkage void sha1_ni_transform(u32 *digest, const char *data, asmlinkage void sha1_ni_transform(struct sha1_state *digest, const u8 *data,
unsigned int rounds); int rounds);
static int sha1_ni_update(struct shash_desc *desc, const u8 *data, static int sha1_ni_update(struct shash_desc *desc, const u8 *data,
unsigned int len) unsigned int len)
{ {
return sha1_update(desc, data, len, return sha1_update(desc, data, len, sha1_ni_transform);
(sha1_transform_fn *) sha1_ni_transform);
} }
static int sha1_ni_finup(struct shash_desc *desc, const u8 *data, static int sha1_ni_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out) unsigned int len, u8 *out)
{ {
return sha1_finup(desc, data, len, out, return sha1_finup(desc, data, len, out, sha1_ni_transform);
(sha1_transform_fn *) sha1_ni_transform);
} }
static int sha1_ni_final(struct shash_desc *desc, u8 *out) static int sha1_ni_final(struct shash_desc *desc, u8 *out)
......
...@@ -341,8 +341,8 @@ a = TMP_ ...@@ -341,8 +341,8 @@ a = TMP_
.endm .endm
######################################################################## ########################################################################
## void sha256_transform_avx(void *input_data, UINT32 digest[8], UINT64 num_blks) ## void sha256_transform_avx(state sha256_state *state, const u8 *data, int blocks)
## arg 1 : pointer to digest ## arg 1 : pointer to state
## arg 2 : pointer to input data ## arg 2 : pointer to input data
## arg 3 : Num blocks ## arg 3 : Num blocks
######################################################################## ########################################################################
......
...@@ -520,8 +520,8 @@ STACK_SIZE = _RSP + _RSP_SIZE ...@@ -520,8 +520,8 @@ STACK_SIZE = _RSP + _RSP_SIZE
.endm .endm
######################################################################## ########################################################################
## void sha256_transform_rorx(void *input_data, UINT32 digest[8], UINT64 num_blks) ## void sha256_transform_rorx(struct sha256_state *state, const u8 *data, int blocks)
## arg 1 : pointer to digest ## arg 1 : pointer to state
## arg 2 : pointer to input data ## arg 2 : pointer to input data
## arg 3 : Num blocks ## arg 3 : Num blocks
######################################################################## ########################################################################
......
...@@ -347,8 +347,10 @@ a = TMP_ ...@@ -347,8 +347,10 @@ a = TMP_
.endm .endm
######################################################################## ########################################################################
## void sha256_transform_ssse3(void *input_data, UINT32 digest[8], UINT64 num_blks) ## void sha256_transform_ssse3(struct sha256_state *state, const u8 *data,
## arg 1 : pointer to digest ## int blocks);
## arg 1 : pointer to state
## (struct sha256_state is assumed to begin with u32 state[8])
## arg 2 : pointer to input data ## arg 2 : pointer to input data
## arg 3 : Num blocks ## arg 3 : Num blocks
######################################################################## ########################################################################
......
...@@ -41,12 +41,11 @@ ...@@ -41,12 +41,11 @@
#include <linux/string.h> #include <linux/string.h>
#include <asm/simd.h> #include <asm/simd.h>
asmlinkage void sha256_transform_ssse3(u32 *digest, const char *data, asmlinkage void sha256_transform_ssse3(struct sha256_state *state,
u64 rounds); const u8 *data, int blocks);
typedef void (sha256_transform_fn)(u32 *digest, const char *data, u64 rounds);
static int _sha256_update(struct shash_desc *desc, const u8 *data, static int _sha256_update(struct shash_desc *desc, const u8 *data,
unsigned int len, sha256_transform_fn *sha256_xform) unsigned int len, sha256_block_fn *sha256_xform)
{ {
struct sha256_state *sctx = shash_desc_ctx(desc); struct sha256_state *sctx = shash_desc_ctx(desc);
...@@ -54,28 +53,29 @@ static int _sha256_update(struct shash_desc *desc, const u8 *data, ...@@ -54,28 +53,29 @@ static int _sha256_update(struct shash_desc *desc, const u8 *data,
(sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE) (sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
return crypto_sha256_update(desc, data, len); return crypto_sha256_update(desc, data, len);
/* make sure casting to sha256_block_fn() is safe */ /*
* Make sure struct sha256_state begins directly with the SHA256
* 256-bit internal state, as this is what the asm functions expect.
*/
BUILD_BUG_ON(offsetof(struct sha256_state, state) != 0); BUILD_BUG_ON(offsetof(struct sha256_state, state) != 0);
kernel_fpu_begin(); kernel_fpu_begin();
sha256_base_do_update(desc, data, len, sha256_base_do_update(desc, data, len, sha256_xform);
(sha256_block_fn *)sha256_xform);
kernel_fpu_end(); kernel_fpu_end();
return 0; return 0;
} }
static int sha256_finup(struct shash_desc *desc, const u8 *data, static int sha256_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out, sha256_transform_fn *sha256_xform) unsigned int len, u8 *out, sha256_block_fn *sha256_xform)
{ {
if (!crypto_simd_usable()) if (!crypto_simd_usable())
return crypto_sha256_finup(desc, data, len, out); return crypto_sha256_finup(desc, data, len, out);
kernel_fpu_begin(); kernel_fpu_begin();
if (len) if (len)
sha256_base_do_update(desc, data, len, sha256_base_do_update(desc, data, len, sha256_xform);
(sha256_block_fn *)sha256_xform); sha256_base_do_finalize(desc, sha256_xform);
sha256_base_do_finalize(desc, (sha256_block_fn *)sha256_xform);
kernel_fpu_end(); kernel_fpu_end();
return sha256_base_finish(desc, out); return sha256_base_finish(desc, out);
...@@ -145,8 +145,8 @@ static void unregister_sha256_ssse3(void) ...@@ -145,8 +145,8 @@ static void unregister_sha256_ssse3(void)
} }
#ifdef CONFIG_AS_AVX #ifdef CONFIG_AS_AVX
asmlinkage void sha256_transform_avx(u32 *digest, const char *data, asmlinkage void sha256_transform_avx(struct sha256_state *state,
u64 rounds); const u8 *data, int blocks);
static int sha256_avx_update(struct shash_desc *desc, const u8 *data, static int sha256_avx_update(struct shash_desc *desc, const u8 *data,
unsigned int len) unsigned int len)
...@@ -227,8 +227,8 @@ static inline void unregister_sha256_avx(void) { } ...@@ -227,8 +227,8 @@ static inline void unregister_sha256_avx(void) { }
#endif #endif
#if defined(CONFIG_AS_AVX2) && defined(CONFIG_AS_AVX) #if defined(CONFIG_AS_AVX2) && defined(CONFIG_AS_AVX)
asmlinkage void sha256_transform_rorx(u32 *digest, const char *data, asmlinkage void sha256_transform_rorx(struct sha256_state *state,
u64 rounds); const u8 *data, int blocks);
static int sha256_avx2_update(struct shash_desc *desc, const u8 *data, static int sha256_avx2_update(struct shash_desc *desc, const u8 *data,
unsigned int len) unsigned int len)
...@@ -307,8 +307,8 @@ static inline void unregister_sha256_avx2(void) { } ...@@ -307,8 +307,8 @@ static inline void unregister_sha256_avx2(void) { }
#endif #endif
#ifdef CONFIG_AS_SHA256_NI #ifdef CONFIG_AS_SHA256_NI
asmlinkage void sha256_ni_transform(u32 *digest, const char *data, asmlinkage void sha256_ni_transform(struct sha256_state *digest,
u64 rounds); /*unsigned int rounds);*/ const u8 *data, int rounds);
static int sha256_ni_update(struct shash_desc *desc, const u8 *data, static int sha256_ni_update(struct shash_desc *desc, const u8 *data,
unsigned int len) unsigned int len)
......
...@@ -271,11 +271,12 @@ frame_size = frame_GPRSAVE + GPRSAVE_SIZE ...@@ -271,11 +271,12 @@ frame_size = frame_GPRSAVE + GPRSAVE_SIZE
.endm .endm
######################################################################## ########################################################################
# void sha512_transform_avx(void* D, const void* M, u64 L) # void sha512_transform_avx(sha512_state *state, const u8 *data, int blocks)
# Purpose: Updates the SHA512 digest stored at D with the message stored in M. # Purpose: Updates the SHA512 digest stored at "state" with the message
# The size of the message pointed to by M must be an integer multiple of SHA512 # stored in "data".
# message blocks. # The size of the message pointed to by "data" must be an integer multiple
# L is the message length in SHA512 blocks # of SHA512 message blocks.
# "blocks" is the message length in SHA512 blocks
######################################################################## ########################################################################
SYM_FUNC_START(sha512_transform_avx) SYM_FUNC_START(sha512_transform_avx)
cmp $0, msglen cmp $0, msglen
......
...@@ -563,11 +563,12 @@ frame_size = frame_GPRSAVE + GPRSAVE_SIZE ...@@ -563,11 +563,12 @@ frame_size = frame_GPRSAVE + GPRSAVE_SIZE
.endm .endm
######################################################################## ########################################################################
# void sha512_transform_rorx(void* D, const void* M, uint64_t L)# # void sha512_transform_rorx(sha512_state *state, const u8 *data, int blocks)
# Purpose: Updates the SHA512 digest stored at D with the message stored in M. # Purpose: Updates the SHA512 digest stored at "state" with the message
# The size of the message pointed to by M must be an integer multiple of SHA512 # stored in "data".
# message blocks. # The size of the message pointed to by "data" must be an integer multiple
# L is the message length in SHA512 blocks # of SHA512 message blocks.
# "blocks" is the message length in SHA512 blocks
######################################################################## ########################################################################
SYM_FUNC_START(sha512_transform_rorx) SYM_FUNC_START(sha512_transform_rorx)
# Allocate Stack Space # Allocate Stack Space
......
...@@ -269,11 +269,14 @@ frame_size = frame_GPRSAVE + GPRSAVE_SIZE ...@@ -269,11 +269,14 @@ frame_size = frame_GPRSAVE + GPRSAVE_SIZE
.endm .endm
######################################################################## ########################################################################
# void sha512_transform_ssse3(void* D, const void* M, u64 L)# ## void sha512_transform_ssse3(struct sha512_state *state, const u8 *data,
# Purpose: Updates the SHA512 digest stored at D with the message stored in M. ## int blocks);
# The size of the message pointed to by M must be an integer multiple of SHA512 # (struct sha512_state is assumed to begin with u64 state[8])
# message blocks. # Purpose: Updates the SHA512 digest stored at "state" with the message
# L is the message length in SHA512 blocks. # stored in "data".
# The size of the message pointed to by "data" must be an integer multiple
# of SHA512 message blocks.
# "blocks" is the message length in SHA512 blocks.
######################################################################## ########################################################################
SYM_FUNC_START(sha512_transform_ssse3) SYM_FUNC_START(sha512_transform_ssse3)
......
...@@ -39,13 +39,11 @@ ...@@ -39,13 +39,11 @@
#include <crypto/sha512_base.h> #include <crypto/sha512_base.h>
#include <asm/simd.h> #include <asm/simd.h>
asmlinkage void sha512_transform_ssse3(u64 *digest, const char *data, asmlinkage void sha512_transform_ssse3(struct sha512_state *state,
u64 rounds); const u8 *data, int blocks);
typedef void (sha512_transform_fn)(u64 *digest, const char *data, u64 rounds);
static int sha512_update(struct shash_desc *desc, const u8 *data, static int sha512_update(struct shash_desc *desc, const u8 *data,
unsigned int len, sha512_transform_fn *sha512_xform) unsigned int len, sha512_block_fn *sha512_xform)
{ {
struct sha512_state *sctx = shash_desc_ctx(desc); struct sha512_state *sctx = shash_desc_ctx(desc);
...@@ -53,28 +51,29 @@ static int sha512_update(struct shash_desc *desc, const u8 *data, ...@@ -53,28 +51,29 @@ static int sha512_update(struct shash_desc *desc, const u8 *data,
(sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE) (sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE)
return crypto_sha512_update(desc, data, len); return crypto_sha512_update(desc, data, len);
/* make sure casting to sha512_block_fn() is safe */ /*
* Make sure struct sha512_state begins directly with the SHA512
* 512-bit internal state, as this is what the asm functions expect.
*/
BUILD_BUG_ON(offsetof(struct sha512_state, state) != 0); BUILD_BUG_ON(offsetof(struct sha512_state, state) != 0);
kernel_fpu_begin(); kernel_fpu_begin();
sha512_base_do_update(desc, data, len, sha512_base_do_update(desc, data, len, sha512_xform);
(sha512_block_fn *)sha512_xform);
kernel_fpu_end(); kernel_fpu_end();
return 0; return 0;
} }
static int sha512_finup(struct shash_desc *desc, const u8 *data, static int sha512_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out, sha512_transform_fn *sha512_xform) unsigned int len, u8 *out, sha512_block_fn *sha512_xform)
{ {
if (!crypto_simd_usable()) if (!crypto_simd_usable())
return crypto_sha512_finup(desc, data, len, out); return crypto_sha512_finup(desc, data, len, out);
kernel_fpu_begin(); kernel_fpu_begin();
if (len) if (len)
sha512_base_do_update(desc, data, len, sha512_base_do_update(desc, data, len, sha512_xform);
(sha512_block_fn *)sha512_xform); sha512_base_do_finalize(desc, sha512_xform);
sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_xform);
kernel_fpu_end(); kernel_fpu_end();
return sha512_base_finish(desc, out); return sha512_base_finish(desc, out);
...@@ -144,8 +143,8 @@ static void unregister_sha512_ssse3(void) ...@@ -144,8 +143,8 @@ static void unregister_sha512_ssse3(void)
} }
#ifdef CONFIG_AS_AVX #ifdef CONFIG_AS_AVX
asmlinkage void sha512_transform_avx(u64 *digest, const char *data, asmlinkage void sha512_transform_avx(struct sha512_state *state,
u64 rounds); const u8 *data, int blocks);
static bool avx_usable(void) static bool avx_usable(void)
{ {
if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) { if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
...@@ -225,8 +224,8 @@ static inline void unregister_sha512_avx(void) { } ...@@ -225,8 +224,8 @@ static inline void unregister_sha512_avx(void) { }
#endif #endif
#if defined(CONFIG_AS_AVX2) && defined(CONFIG_AS_AVX) #if defined(CONFIG_AS_AVX2) && defined(CONFIG_AS_AVX)
asmlinkage void sha512_transform_rorx(u64 *digest, const char *data, asmlinkage void sha512_transform_rorx(struct sha512_state *state,
u64 rounds); const u8 *data, int blocks);
static int sha512_avx2_update(struct shash_desc *desc, const u8 *data, static int sha512_avx2_update(struct shash_desc *desc, const u8 *data,
unsigned int len) unsigned int len)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment