Commit 6e78ad0b authored by Eric Biggers's avatar Eric Biggers Committed by Herbert Xu

crypto: lib - move __crypto_xor into utils

CRYPTO_LIB_CHACHA depends on CRYPTO for __crypto_xor, defined in
crypto/algapi.c.  This is a layering violation because the dependencies
should only go in the other direction (crypto/ => lib/crypto/).  Also
the correct dependency would be CRYPTO_ALGAPI, not CRYPTO.  Fix this by
moving __crypto_xor into the utils module in lib/crypto/.

Note that CRYPTO_LIB_CHACHA_GENERIC selected XOR_BLOCKS, which is
unrelated and unnecessary.  It was perhaps thought that XOR_BLOCKS was
needed for __crypto_xor, but that's not the case.
Signed-off-by: default avatarEric Biggers <ebiggers@google.com>
Reviewed-by: default avatarJason A. Donenfeld <Jason@zx2c4.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 7033b937
......@@ -997,77 +997,6 @@ void crypto_inc(u8 *a, unsigned int size)
}
EXPORT_SYMBOL_GPL(crypto_inc);
void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int len)
{
int relalign = 0;
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
int size = sizeof(unsigned long);
int d = (((unsigned long)dst ^ (unsigned long)src1) |
((unsigned long)dst ^ (unsigned long)src2)) &
(size - 1);
relalign = d ? 1 << __ffs(d) : size;
/*
* If we care about alignment, process as many bytes as
* needed to advance dst and src to values whose alignments
* equal their relative alignment. This will allow us to
* process the remainder of the input using optimal strides.
*/
while (((unsigned long)dst & (relalign - 1)) && len > 0) {
*dst++ = *src1++ ^ *src2++;
len--;
}
}
while (IS_ENABLED(CONFIG_64BIT) && len >= 8 && !(relalign & 7)) {
if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
u64 l = get_unaligned((u64 *)src1) ^
get_unaligned((u64 *)src2);
put_unaligned(l, (u64 *)dst);
} else {
*(u64 *)dst = *(u64 *)src1 ^ *(u64 *)src2;
}
dst += 8;
src1 += 8;
src2 += 8;
len -= 8;
}
while (len >= 4 && !(relalign & 3)) {
if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
u32 l = get_unaligned((u32 *)src1) ^
get_unaligned((u32 *)src2);
put_unaligned(l, (u32 *)dst);
} else {
*(u32 *)dst = *(u32 *)src1 ^ *(u32 *)src2;
}
dst += 4;
src1 += 4;
src2 += 4;
len -= 4;
}
while (len >= 2 && !(relalign & 1)) {
if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
u16 l = get_unaligned((u16 *)src1) ^
get_unaligned((u16 *)src2);
put_unaligned(l, (u16 *)dst);
} else {
*(u16 *)dst = *(u16 *)src1 ^ *(u16 *)src2;
}
dst += 2;
src1 += 2;
src2 += 2;
len -= 2;
}
while (len--)
*dst++ = *src1++ ^ *src2++;
}
EXPORT_SYMBOL_GPL(__crypto_xor);
unsigned int crypto_alg_extsize(struct crypto_alg *alg)
{
return alg->cra_ctxsize +
......
......@@ -36,7 +36,7 @@ config CRYPTO_ARCH_HAVE_LIB_CHACHA
config CRYPTO_LIB_CHACHA_GENERIC
tristate
select XOR_BLOCKS
select CRYPTO_LIB_UTILS
help
This symbol can be depended upon by arch implementations of the
ChaCha library interface that require the generic code as a
......@@ -46,7 +46,6 @@ config CRYPTO_LIB_CHACHA_GENERIC
config CRYPTO_LIB_CHACHA
tristate "ChaCha library interface"
depends on CRYPTO
depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA
select CRYPTO_LIB_CHACHA_GENERIC if CRYPTO_ARCH_HAVE_LIB_CHACHA=n
help
......
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CRYPTO_LIB_UTILS) += libcryptoutils.o
libcryptoutils-y := memneq.o
libcryptoutils-y := memneq.o utils.o
# chacha is used by the /dev/random driver which is always builtin
obj-y += chacha.o
......
......@@ -175,5 +175,3 @@ noinline unsigned long __crypto_memneq(const void *a, const void *b,
EXPORT_SYMBOL(__crypto_memneq);
#endif /* __HAVE_ARCH_CRYPTO_MEMNEQ */
MODULE_LICENSE("GPL");
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Crypto library utility functions
*
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
*/
#include <asm/unaligned.h>
#include <crypto/algapi.h>
#include <linux/module.h>
/*
* XOR @len bytes from @src1 and @src2 together, writing the result to @dst
* (which may alias one of the sources). Don't call this directly; call
* crypto_xor() or crypto_xor_cpy() instead.
*/
void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int len)
{
int relalign = 0;
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
int size = sizeof(unsigned long);
int d = (((unsigned long)dst ^ (unsigned long)src1) |
((unsigned long)dst ^ (unsigned long)src2)) &
(size - 1);
relalign = d ? 1 << __ffs(d) : size;
/*
* If we care about alignment, process as many bytes as
* needed to advance dst and src to values whose alignments
* equal their relative alignment. This will allow us to
* process the remainder of the input using optimal strides.
*/
while (((unsigned long)dst & (relalign - 1)) && len > 0) {
*dst++ = *src1++ ^ *src2++;
len--;
}
}
while (IS_ENABLED(CONFIG_64BIT) && len >= 8 && !(relalign & 7)) {
if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
u64 l = get_unaligned((u64 *)src1) ^
get_unaligned((u64 *)src2);
put_unaligned(l, (u64 *)dst);
} else {
*(u64 *)dst = *(u64 *)src1 ^ *(u64 *)src2;
}
dst += 8;
src1 += 8;
src2 += 8;
len -= 8;
}
while (len >= 4 && !(relalign & 3)) {
if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
u32 l = get_unaligned((u32 *)src1) ^
get_unaligned((u32 *)src2);
put_unaligned(l, (u32 *)dst);
} else {
*(u32 *)dst = *(u32 *)src1 ^ *(u32 *)src2;
}
dst += 4;
src1 += 4;
src2 += 4;
len -= 4;
}
while (len >= 2 && !(relalign & 1)) {
if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
u16 l = get_unaligned((u16 *)src1) ^
get_unaligned((u16 *)src2);
put_unaligned(l, (u16 *)dst);
} else {
*(u16 *)dst = *(u16 *)src1 ^ *(u16 *)src2;
}
dst += 2;
src1 += 2;
src2 += 2;
len -= 2;
}
while (len--)
*dst++ = *src1++ ^ *src2++;
}
EXPORT_SYMBOL_GPL(__crypto_xor);
MODULE_LICENSE("GPL");
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment