Commit fe3b99b6 authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Herbert Xu

crypto: arm64/ghash - switch to AES library

The GHASH code uses the generic AES key expansion routines, and calls
directly into the scalar table based AES cipher for arm64 from the
fallback path, and since this implementation is known to be non-time
invariant, doing so from a time invariant SIMD cipher is a bit nasty.

So let's switch to the AES library - this makes the code more robust,
and drops the dependency on the generic AES cipher, allowing us to
omit it entirely in the future.
Signed-off-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 363a90c2
...@@ -58,8 +58,7 @@ config CRYPTO_GHASH_ARM64_CE ...@@ -58,8 +58,7 @@ config CRYPTO_GHASH_ARM64_CE
depends on KERNEL_MODE_NEON depends on KERNEL_MODE_NEON
select CRYPTO_HASH select CRYPTO_HASH
select CRYPTO_GF128MUL select CRYPTO_GF128MUL
select CRYPTO_AES select CRYPTO_LIB_AES
select CRYPTO_AES_ARM64
config CRYPTO_CRCT10DIF_ARM64_CE config CRYPTO_CRCT10DIF_ARM64_CE
tristate "CRCT10DIF digest algorithm using PMULL instructions" tristate "CRCT10DIF digest algorithm using PMULL instructions"
......
...@@ -70,8 +70,6 @@ asmlinkage void pmull_gcm_decrypt(int blocks, u64 dg[], u8 dst[], ...@@ -70,8 +70,6 @@ asmlinkage void pmull_gcm_decrypt(int blocks, u64 dg[], u8 dst[],
asmlinkage void pmull_gcm_encrypt_block(u8 dst[], u8 const src[], asmlinkage void pmull_gcm_encrypt_block(u8 dst[], u8 const src[],
u32 const rk[], int rounds); u32 const rk[], int rounds);
asmlinkage void __aes_arm64_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
static int ghash_init(struct shash_desc *desc) static int ghash_init(struct shash_desc *desc)
{ {
struct ghash_desc_ctx *ctx = shash_desc_ctx(desc); struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
...@@ -309,14 +307,13 @@ static int gcm_setkey(struct crypto_aead *tfm, const u8 *inkey, ...@@ -309,14 +307,13 @@ static int gcm_setkey(struct crypto_aead *tfm, const u8 *inkey,
u8 key[GHASH_BLOCK_SIZE]; u8 key[GHASH_BLOCK_SIZE];
int ret; int ret;
ret = crypto_aes_expand_key(&ctx->aes_key, inkey, keylen); ret = aes_expandkey(&ctx->aes_key, inkey, keylen);
if (ret) { if (ret) {
tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL; return -EINVAL;
} }
__aes_arm64_encrypt(ctx->aes_key.key_enc, key, (u8[AES_BLOCK_SIZE]){}, aes_encrypt(&ctx->aes_key, key, (u8[AES_BLOCK_SIZE]){});
num_rounds(&ctx->aes_key));
return __ghash_setkey(&ctx->ghash_key, key, sizeof(be128)); return __ghash_setkey(&ctx->ghash_key, key, sizeof(be128));
} }
...@@ -467,7 +464,7 @@ static int gcm_encrypt(struct aead_request *req) ...@@ -467,7 +464,7 @@ static int gcm_encrypt(struct aead_request *req)
rk = ctx->aes_key.key_enc; rk = ctx->aes_key.key_enc;
} while (walk.nbytes >= 2 * AES_BLOCK_SIZE); } while (walk.nbytes >= 2 * AES_BLOCK_SIZE);
} else { } else {
__aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds); aes_encrypt(&ctx->aes_key, tag, iv);
put_unaligned_be32(2, iv + GCM_IV_SIZE); put_unaligned_be32(2, iv + GCM_IV_SIZE);
while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) { while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
...@@ -478,8 +475,7 @@ static int gcm_encrypt(struct aead_request *req) ...@@ -478,8 +475,7 @@ static int gcm_encrypt(struct aead_request *req)
int remaining = blocks; int remaining = blocks;
do { do {
__aes_arm64_encrypt(ctx->aes_key.key_enc, aes_encrypt(&ctx->aes_key, ks, iv);
ks, iv, nrounds);
crypto_xor_cpy(dst, src, ks, AES_BLOCK_SIZE); crypto_xor_cpy(dst, src, ks, AES_BLOCK_SIZE);
crypto_inc(iv, AES_BLOCK_SIZE); crypto_inc(iv, AES_BLOCK_SIZE);
...@@ -495,13 +491,10 @@ static int gcm_encrypt(struct aead_request *req) ...@@ -495,13 +491,10 @@ static int gcm_encrypt(struct aead_request *req)
walk.nbytes % (2 * AES_BLOCK_SIZE)); walk.nbytes % (2 * AES_BLOCK_SIZE));
} }
if (walk.nbytes) { if (walk.nbytes) {
__aes_arm64_encrypt(ctx->aes_key.key_enc, ks, iv, aes_encrypt(&ctx->aes_key, ks, iv);
nrounds);
if (walk.nbytes > AES_BLOCK_SIZE) { if (walk.nbytes > AES_BLOCK_SIZE) {
crypto_inc(iv, AES_BLOCK_SIZE); crypto_inc(iv, AES_BLOCK_SIZE);
__aes_arm64_encrypt(ctx->aes_key.key_enc, aes_encrypt(&ctx->aes_key, ks + AES_BLOCK_SIZE, iv);
ks + AES_BLOCK_SIZE, iv,
nrounds);
} }
} }
} }
...@@ -605,7 +598,7 @@ static int gcm_decrypt(struct aead_request *req) ...@@ -605,7 +598,7 @@ static int gcm_decrypt(struct aead_request *req)
rk = ctx->aes_key.key_enc; rk = ctx->aes_key.key_enc;
} while (walk.nbytes >= 2 * AES_BLOCK_SIZE); } while (walk.nbytes >= 2 * AES_BLOCK_SIZE);
} else { } else {
__aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds); aes_encrypt(&ctx->aes_key, tag, iv);
put_unaligned_be32(2, iv + GCM_IV_SIZE); put_unaligned_be32(2, iv + GCM_IV_SIZE);
while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) { while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
...@@ -618,8 +611,7 @@ static int gcm_decrypt(struct aead_request *req) ...@@ -618,8 +611,7 @@ static int gcm_decrypt(struct aead_request *req)
pmull_ghash_update_p64); pmull_ghash_update_p64);
do { do {
__aes_arm64_encrypt(ctx->aes_key.key_enc, aes_encrypt(&ctx->aes_key, buf, iv);
buf, iv, nrounds);
crypto_xor_cpy(dst, src, buf, AES_BLOCK_SIZE); crypto_xor_cpy(dst, src, buf, AES_BLOCK_SIZE);
crypto_inc(iv, AES_BLOCK_SIZE); crypto_inc(iv, AES_BLOCK_SIZE);
...@@ -637,11 +629,9 @@ static int gcm_decrypt(struct aead_request *req) ...@@ -637,11 +629,9 @@ static int gcm_decrypt(struct aead_request *req)
memcpy(iv2, iv, AES_BLOCK_SIZE); memcpy(iv2, iv, AES_BLOCK_SIZE);
crypto_inc(iv2, AES_BLOCK_SIZE); crypto_inc(iv2, AES_BLOCK_SIZE);
__aes_arm64_encrypt(ctx->aes_key.key_enc, iv2, aes_encrypt(&ctx->aes_key, iv2, iv2);
iv2, nrounds);
} }
__aes_arm64_encrypt(ctx->aes_key.key_enc, iv, iv, aes_encrypt(&ctx->aes_key, iv, iv);
nrounds);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment