Commit 557ecb45 authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Herbert Xu

crypto: arm64/aes-blk - remove pointless (u8 *) casts

For some reason, the asmlinkage prototypes of the NEON routines take
u8[] arguments for the round key arrays, while the actual round keys
are arrays of u32, and so passing them into those routines requires
u8* casts at each occurrence. Fix that.
Signed-off-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 718f608c
...@@ -63,24 +63,24 @@ MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); ...@@ -63,24 +63,24 @@ MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
/* defined in aes-modes.S */ /* defined in aes-modes.S */
asmlinkage void aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], asmlinkage void aes_ecb_encrypt(u8 out[], u8 const in[], u32 const rk[],
int rounds, int blocks); int rounds, int blocks);
asmlinkage void aes_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[], asmlinkage void aes_ecb_decrypt(u8 out[], u8 const in[], u32 const rk[],
int rounds, int blocks); int rounds, int blocks);
asmlinkage void aes_cbc_encrypt(u8 out[], u8 const in[], u8 const rk[], asmlinkage void aes_cbc_encrypt(u8 out[], u8 const in[], u32 const rk[],
int rounds, int blocks, u8 iv[]); int rounds, int blocks, u8 iv[]);
asmlinkage void aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], asmlinkage void aes_cbc_decrypt(u8 out[], u8 const in[], u32 const rk[],
int rounds, int blocks, u8 iv[]); int rounds, int blocks, u8 iv[]);
asmlinkage void aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], asmlinkage void aes_ctr_encrypt(u8 out[], u8 const in[], u32 const rk[],
int rounds, int blocks, u8 ctr[]); int rounds, int blocks, u8 ctr[]);
asmlinkage void aes_xts_encrypt(u8 out[], u8 const in[], u8 const rk1[], asmlinkage void aes_xts_encrypt(u8 out[], u8 const in[], u32 const rk1[],
int rounds, int blocks, u8 const rk2[], u8 iv[], int rounds, int blocks, u32 const rk2[], u8 iv[],
int first); int first);
asmlinkage void aes_xts_decrypt(u8 out[], u8 const in[], u8 const rk1[], asmlinkage void aes_xts_decrypt(u8 out[], u8 const in[], u32 const rk1[],
int rounds, int blocks, u8 const rk2[], u8 iv[], int rounds, int blocks, u32 const rk2[], u8 iv[],
int first); int first);
asmlinkage void aes_mac_update(u8 const in[], u32 const rk[], int rounds, asmlinkage void aes_mac_update(u8 const in[], u32 const rk[], int rounds,
...@@ -142,7 +142,7 @@ static int ecb_encrypt(struct skcipher_request *req) ...@@ -142,7 +142,7 @@ static int ecb_encrypt(struct skcipher_request *req)
while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
kernel_neon_begin(); kernel_neon_begin();
aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr, aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key_enc, rounds, blocks); ctx->key_enc, rounds, blocks);
kernel_neon_end(); kernel_neon_end();
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
} }
...@@ -162,7 +162,7 @@ static int ecb_decrypt(struct skcipher_request *req) ...@@ -162,7 +162,7 @@ static int ecb_decrypt(struct skcipher_request *req)
while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
kernel_neon_begin(); kernel_neon_begin();
aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr, aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key_dec, rounds, blocks); ctx->key_dec, rounds, blocks);
kernel_neon_end(); kernel_neon_end();
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
} }
...@@ -182,7 +182,7 @@ static int cbc_encrypt(struct skcipher_request *req) ...@@ -182,7 +182,7 @@ static int cbc_encrypt(struct skcipher_request *req)
while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
kernel_neon_begin(); kernel_neon_begin();
aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr, aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key_enc, rounds, blocks, walk.iv); ctx->key_enc, rounds, blocks, walk.iv);
kernel_neon_end(); kernel_neon_end();
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
} }
...@@ -202,7 +202,7 @@ static int cbc_decrypt(struct skcipher_request *req) ...@@ -202,7 +202,7 @@ static int cbc_decrypt(struct skcipher_request *req)
while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
kernel_neon_begin(); kernel_neon_begin();
aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr, aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key_dec, rounds, blocks, walk.iv); ctx->key_dec, rounds, blocks, walk.iv);
kernel_neon_end(); kernel_neon_end();
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
} }
...@@ -222,7 +222,7 @@ static int ctr_encrypt(struct skcipher_request *req) ...@@ -222,7 +222,7 @@ static int ctr_encrypt(struct skcipher_request *req)
while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
kernel_neon_begin(); kernel_neon_begin();
aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr, aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key_enc, rounds, blocks, walk.iv); ctx->key_enc, rounds, blocks, walk.iv);
kernel_neon_end(); kernel_neon_end();
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
} }
...@@ -238,7 +238,7 @@ static int ctr_encrypt(struct skcipher_request *req) ...@@ -238,7 +238,7 @@ static int ctr_encrypt(struct skcipher_request *req)
blocks = -1; blocks = -1;
kernel_neon_begin(); kernel_neon_begin();
aes_ctr_encrypt(tail, NULL, (u8 *)ctx->key_enc, rounds, aes_ctr_encrypt(tail, NULL, ctx->key_enc, rounds,
blocks, walk.iv); blocks, walk.iv);
kernel_neon_end(); kernel_neon_end();
crypto_xor_cpy(tdst, tsrc, tail, nbytes); crypto_xor_cpy(tdst, tsrc, tail, nbytes);
...@@ -272,8 +272,8 @@ static int xts_encrypt(struct skcipher_request *req) ...@@ -272,8 +272,8 @@ static int xts_encrypt(struct skcipher_request *req)
for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
kernel_neon_begin(); kernel_neon_begin();
aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key1.key_enc, rounds, blocks, ctx->key1.key_enc, rounds, blocks,
(u8 *)ctx->key2.key_enc, walk.iv, first); ctx->key2.key_enc, walk.iv, first);
kernel_neon_end(); kernel_neon_end();
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
} }
...@@ -294,8 +294,8 @@ static int xts_decrypt(struct skcipher_request *req) ...@@ -294,8 +294,8 @@ static int xts_decrypt(struct skcipher_request *req)
for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
kernel_neon_begin(); kernel_neon_begin();
aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key1.key_dec, rounds, blocks, ctx->key1.key_dec, rounds, blocks,
(u8 *)ctx->key2.key_enc, walk.iv, first); ctx->key2.key_enc, walk.iv, first);
kernel_neon_end(); kernel_neon_end();
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
} }
...@@ -412,7 +412,6 @@ static int cmac_setkey(struct crypto_shash *tfm, const u8 *in_key, ...@@ -412,7 +412,6 @@ static int cmac_setkey(struct crypto_shash *tfm, const u8 *in_key,
{ {
struct mac_tfm_ctx *ctx = crypto_shash_ctx(tfm); struct mac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
be128 *consts = (be128 *)ctx->consts; be128 *consts = (be128 *)ctx->consts;
u8 *rk = (u8 *)ctx->key.key_enc;
int rounds = 6 + key_len / 4; int rounds = 6 + key_len / 4;
int err; int err;
...@@ -422,7 +421,8 @@ static int cmac_setkey(struct crypto_shash *tfm, const u8 *in_key, ...@@ -422,7 +421,8 @@ static int cmac_setkey(struct crypto_shash *tfm, const u8 *in_key,
/* encrypt the zero vector */ /* encrypt the zero vector */
kernel_neon_begin(); kernel_neon_begin();
aes_ecb_encrypt(ctx->consts, (u8[AES_BLOCK_SIZE]){}, rk, rounds, 1); aes_ecb_encrypt(ctx->consts, (u8[AES_BLOCK_SIZE]){}, ctx->key.key_enc,
rounds, 1);
kernel_neon_end(); kernel_neon_end();
cmac_gf128_mul_by_x(consts, consts); cmac_gf128_mul_by_x(consts, consts);
...@@ -441,7 +441,6 @@ static int xcbc_setkey(struct crypto_shash *tfm, const u8 *in_key, ...@@ -441,7 +441,6 @@ static int xcbc_setkey(struct crypto_shash *tfm, const u8 *in_key,
}; };
struct mac_tfm_ctx *ctx = crypto_shash_ctx(tfm); struct mac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
u8 *rk = (u8 *)ctx->key.key_enc;
int rounds = 6 + key_len / 4; int rounds = 6 + key_len / 4;
u8 key[AES_BLOCK_SIZE]; u8 key[AES_BLOCK_SIZE];
int err; int err;
...@@ -451,8 +450,8 @@ static int xcbc_setkey(struct crypto_shash *tfm, const u8 *in_key, ...@@ -451,8 +450,8 @@ static int xcbc_setkey(struct crypto_shash *tfm, const u8 *in_key,
return err; return err;
kernel_neon_begin(); kernel_neon_begin();
aes_ecb_encrypt(key, ks[0], rk, rounds, 1); aes_ecb_encrypt(key, ks[0], ctx->key.key_enc, rounds, 1);
aes_ecb_encrypt(ctx->consts, ks[1], rk, rounds, 2); aes_ecb_encrypt(ctx->consts, ks[1], ctx->key.key_enc, rounds, 2);
kernel_neon_end(); kernel_neon_end();
return cbcmac_setkey(tfm, key, sizeof(key)); return cbcmac_setkey(tfm, key, sizeof(key));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment