Commit 455951b5 authored by Eric Biggers's avatar Eric Biggers Committed by Herbert Xu

crypto: arm64/sha256 - clean up backwards function names

In the Linux kernel, a function whose name has two leading underscores
is conventionally called by the same-named function without leading
underscores -- not the other way around.  __sha256_block_data_order()
and __sha256_block_neon() got this backwards.  Fix this, albeit without
changing the names in the perlasm since that is OpenSSL code.  No change
in behavior.
Signed-off-by: default avatarEric Biggers <ebiggers@google.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 5f720a3d
...@@ -27,8 +27,8 @@ asmlinkage void sha256_block_data_order(u32 *digest, const void *data, ...@@ -27,8 +27,8 @@ asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
unsigned int num_blks); unsigned int num_blks);
EXPORT_SYMBOL(sha256_block_data_order); EXPORT_SYMBOL(sha256_block_data_order);
static void __sha256_block_data_order(struct sha256_state *sst, u8 const *src, static void sha256_arm64_transform(struct sha256_state *sst, u8 const *src,
int blocks) int blocks)
{ {
sha256_block_data_order(sst->state, src, blocks); sha256_block_data_order(sst->state, src, blocks);
} }
...@@ -36,8 +36,8 @@ static void __sha256_block_data_order(struct sha256_state *sst, u8 const *src, ...@@ -36,8 +36,8 @@ static void __sha256_block_data_order(struct sha256_state *sst, u8 const *src,
asmlinkage void sha256_block_neon(u32 *digest, const void *data, asmlinkage void sha256_block_neon(u32 *digest, const void *data,
unsigned int num_blks); unsigned int num_blks);
static void __sha256_block_neon(struct sha256_state *sst, u8 const *src, static void sha256_neon_transform(struct sha256_state *sst, u8 const *src,
int blocks) int blocks)
{ {
sha256_block_neon(sst->state, src, blocks); sha256_block_neon(sst->state, src, blocks);
} }
...@@ -45,17 +45,15 @@ static void __sha256_block_neon(struct sha256_state *sst, u8 const *src, ...@@ -45,17 +45,15 @@ static void __sha256_block_neon(struct sha256_state *sst, u8 const *src,
static int crypto_sha256_arm64_update(struct shash_desc *desc, const u8 *data, static int crypto_sha256_arm64_update(struct shash_desc *desc, const u8 *data,
unsigned int len) unsigned int len)
{ {
return sha256_base_do_update(desc, data, len, return sha256_base_do_update(desc, data, len, sha256_arm64_transform);
__sha256_block_data_order);
} }
static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data, static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out) unsigned int len, u8 *out)
{ {
if (len) if (len)
sha256_base_do_update(desc, data, len, sha256_base_do_update(desc, data, len, sha256_arm64_transform);
__sha256_block_data_order); sha256_base_do_finalize(desc, sha256_arm64_transform);
sha256_base_do_finalize(desc, __sha256_block_data_order);
return sha256_base_finish(desc, out); return sha256_base_finish(desc, out);
} }
...@@ -98,7 +96,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data, ...@@ -98,7 +96,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
if (!crypto_simd_usable()) if (!crypto_simd_usable())
return sha256_base_do_update(desc, data, len, return sha256_base_do_update(desc, data, len,
__sha256_block_data_order); sha256_arm64_transform);
while (len > 0) { while (len > 0) {
unsigned int chunk = len; unsigned int chunk = len;
...@@ -114,7 +112,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data, ...@@ -114,7 +112,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
sctx->count % SHA256_BLOCK_SIZE; sctx->count % SHA256_BLOCK_SIZE;
kernel_neon_begin(); kernel_neon_begin();
sha256_base_do_update(desc, data, chunk, __sha256_block_neon); sha256_base_do_update(desc, data, chunk, sha256_neon_transform);
kernel_neon_end(); kernel_neon_end();
data += chunk; data += chunk;
len -= chunk; len -= chunk;
...@@ -128,13 +126,13 @@ static int sha256_finup_neon(struct shash_desc *desc, const u8 *data, ...@@ -128,13 +126,13 @@ static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
if (!crypto_simd_usable()) { if (!crypto_simd_usable()) {
if (len) if (len)
sha256_base_do_update(desc, data, len, sha256_base_do_update(desc, data, len,
__sha256_block_data_order); sha256_arm64_transform);
sha256_base_do_finalize(desc, __sha256_block_data_order); sha256_base_do_finalize(desc, sha256_arm64_transform);
} else { } else {
if (len) if (len)
sha256_update_neon(desc, data, len); sha256_update_neon(desc, data, len);
kernel_neon_begin(); kernel_neon_begin();
sha256_base_do_finalize(desc, __sha256_block_neon); sha256_base_do_finalize(desc, sha256_neon_transform);
kernel_neon_end(); kernel_neon_end();
} }
return sha256_base_finish(desc, out); return sha256_base_finish(desc, out);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment