Commit b3482635 authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Herbert Xu

crypto: arm64/aes-ccm - remove non-SIMD fallback path

AES/CCM on arm64 is implemented as a synchronous AEAD, and so it is
guaranteed by the API that it is only invoked in task or softirq
context. Since softirqs are now only handled when the SIMD is not
being used in the task context that was interrupted to service the
softirq, we no longer need a fallback path. Let's remove it.
Signed-off-by: default avatarArd Biesheuvel <ardb@kernel.org>
Reviewed-by: default avatarEric Biggers <ebiggers@google.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 36a916af
...@@ -6,12 +6,10 @@ ...@@ -6,12 +6,10 @@
*/ */
#include <asm/neon.h> #include <asm/neon.h>
#include <asm/simd.h>
#include <asm/unaligned.h> #include <asm/unaligned.h>
#include <crypto/aes.h> #include <crypto/aes.h>
#include <crypto/scatterwalk.h> #include <crypto/scatterwalk.h>
#include <crypto/internal/aead.h> #include <crypto/internal/aead.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h> #include <crypto/internal/skcipher.h>
#include <linux/module.h> #include <linux/module.h>
...@@ -99,36 +97,10 @@ static int ccm_init_mac(struct aead_request *req, u8 maciv[], u32 msglen) ...@@ -99,36 +97,10 @@ static int ccm_init_mac(struct aead_request *req, u8 maciv[], u32 msglen)
static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[], static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
u32 abytes, u32 *macp) u32 abytes, u32 *macp)
{ {
if (crypto_simd_usable()) {
kernel_neon_begin(); kernel_neon_begin();
ce_aes_ccm_auth_data(mac, in, abytes, macp, key->key_enc, ce_aes_ccm_auth_data(mac, in, abytes, macp, key->key_enc,
num_rounds(key)); num_rounds(key));
kernel_neon_end(); kernel_neon_end();
} else {
if (*macp > 0 && *macp < AES_BLOCK_SIZE) {
int added = min(abytes, AES_BLOCK_SIZE - *macp);
crypto_xor(&mac[*macp], in, added);
*macp += added;
in += added;
abytes -= added;
}
while (abytes >= AES_BLOCK_SIZE) {
aes_encrypt(key, mac, mac);
crypto_xor(mac, in, AES_BLOCK_SIZE);
in += AES_BLOCK_SIZE;
abytes -= AES_BLOCK_SIZE;
}
if (abytes > 0) {
aes_encrypt(key, mac, mac);
crypto_xor(mac, in, abytes);
*macp = abytes;
}
}
} }
static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[]) static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[])
...@@ -172,54 +144,6 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[]) ...@@ -172,54 +144,6 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[])
} while (len); } while (len);
} }
static int ccm_crypt_fallback(struct skcipher_walk *walk, u8 mac[], u8 iv0[],
struct crypto_aes_ctx *ctx, bool enc)
{
u8 buf[AES_BLOCK_SIZE];
int err = 0;
while (walk->nbytes) {
int blocks = walk->nbytes / AES_BLOCK_SIZE;
u32 tail = walk->nbytes % AES_BLOCK_SIZE;
u8 *dst = walk->dst.virt.addr;
u8 *src = walk->src.virt.addr;
u32 nbytes = walk->nbytes;
if (nbytes == walk->total && tail > 0) {
blocks++;
tail = 0;
}
do {
u32 bsize = AES_BLOCK_SIZE;
if (nbytes < AES_BLOCK_SIZE)
bsize = nbytes;
crypto_inc(walk->iv, AES_BLOCK_SIZE);
aes_encrypt(ctx, buf, walk->iv);
aes_encrypt(ctx, mac, mac);
if (enc)
crypto_xor(mac, src, bsize);
crypto_xor_cpy(dst, src, buf, bsize);
if (!enc)
crypto_xor(mac, dst, bsize);
dst += bsize;
src += bsize;
nbytes -= bsize;
} while (--blocks);
err = skcipher_walk_done(walk, tail);
}
if (!err) {
aes_encrypt(ctx, buf, iv0);
aes_encrypt(ctx, mac, mac);
crypto_xor(mac, buf, AES_BLOCK_SIZE);
}
return err;
}
static int ccm_encrypt(struct aead_request *req) static int ccm_encrypt(struct aead_request *req)
{ {
struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_aead *aead = crypto_aead_reqtfm(req);
...@@ -242,7 +166,6 @@ static int ccm_encrypt(struct aead_request *req) ...@@ -242,7 +166,6 @@ static int ccm_encrypt(struct aead_request *req)
err = skcipher_walk_aead_encrypt(&walk, req, false); err = skcipher_walk_aead_encrypt(&walk, req, false);
if (crypto_simd_usable()) {
while (walk.nbytes) { while (walk.nbytes) {
u32 tail = walk.nbytes % AES_BLOCK_SIZE; u32 tail = walk.nbytes % AES_BLOCK_SIZE;
...@@ -250,8 +173,7 @@ static int ccm_encrypt(struct aead_request *req) ...@@ -250,8 +173,7 @@ static int ccm_encrypt(struct aead_request *req)
tail = 0; tail = 0;
kernel_neon_begin(); kernel_neon_begin();
ce_aes_ccm_encrypt(walk.dst.virt.addr, ce_aes_ccm_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
walk.src.virt.addr,
walk.nbytes - tail, ctx->key_enc, walk.nbytes - tail, ctx->key_enc,
num_rounds(ctx), mac, walk.iv); num_rounds(ctx), mac, walk.iv);
kernel_neon_end(); kernel_neon_end();
...@@ -260,13 +182,9 @@ static int ccm_encrypt(struct aead_request *req) ...@@ -260,13 +182,9 @@ static int ccm_encrypt(struct aead_request *req)
} }
if (!err) { if (!err) {
kernel_neon_begin(); kernel_neon_begin();
ce_aes_ccm_final(mac, buf, ctx->key_enc, ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx));
num_rounds(ctx));
kernel_neon_end(); kernel_neon_end();
} }
} else {
err = ccm_crypt_fallback(&walk, mac, buf, ctx, true);
}
if (err) if (err)
return err; return err;
...@@ -300,7 +218,6 @@ static int ccm_decrypt(struct aead_request *req) ...@@ -300,7 +218,6 @@ static int ccm_decrypt(struct aead_request *req)
err = skcipher_walk_aead_decrypt(&walk, req, false); err = skcipher_walk_aead_decrypt(&walk, req, false);
if (crypto_simd_usable()) {
while (walk.nbytes) { while (walk.nbytes) {
u32 tail = walk.nbytes % AES_BLOCK_SIZE; u32 tail = walk.nbytes % AES_BLOCK_SIZE;
...@@ -308,8 +225,7 @@ static int ccm_decrypt(struct aead_request *req) ...@@ -308,8 +225,7 @@ static int ccm_decrypt(struct aead_request *req)
tail = 0; tail = 0;
kernel_neon_begin(); kernel_neon_begin();
ce_aes_ccm_decrypt(walk.dst.virt.addr, ce_aes_ccm_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
walk.src.virt.addr,
walk.nbytes - tail, ctx->key_enc, walk.nbytes - tail, ctx->key_enc,
num_rounds(ctx), mac, walk.iv); num_rounds(ctx), mac, walk.iv);
kernel_neon_end(); kernel_neon_end();
...@@ -318,14 +234,9 @@ static int ccm_decrypt(struct aead_request *req) ...@@ -318,14 +234,9 @@ static int ccm_decrypt(struct aead_request *req)
} }
if (!err) { if (!err) {
kernel_neon_begin(); kernel_neon_begin();
ce_aes_ccm_final(mac, buf, ctx->key_enc, ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx));
num_rounds(ctx));
kernel_neon_end(); kernel_neon_end();
} }
} else {
err = ccm_crypt_fallback(&walk, mac, buf, ctx, false);
}
if (err) if (err)
return err; return err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment