Commit d0ed0db1 authored by Herbert Xu's avatar Herbert Xu

crypto: arm64/aes - Convert to skcipher

This patch converts arm64/aes over to the skcipher interface.
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 85671860
...@@ -11,8 +11,8 @@ ...@@ -11,8 +11,8 @@
#include <asm/neon.h> #include <asm/neon.h>
#include <asm/hwcap.h> #include <asm/hwcap.h>
#include <crypto/aes.h> #include <crypto/aes.h>
#include <crypto/ablk_helper.h> #include <crypto/internal/simd.h>
#include <crypto/algapi.h> #include <crypto/internal/skcipher.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/cpufeature.h> #include <linux/cpufeature.h>
#include <crypto/xts.h> #include <crypto/xts.h>
...@@ -80,13 +80,19 @@ struct crypto_aes_xts_ctx { ...@@ -80,13 +80,19 @@ struct crypto_aes_xts_ctx {
struct crypto_aes_ctx __aligned(8) key2; struct crypto_aes_ctx __aligned(8) key2;
}; };
static int xts_set_key(struct crypto_tfm *tfm, const u8 *in_key, static int skcipher_aes_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
return aes_setkey(crypto_skcipher_tfm(tfm), in_key, key_len);
}
static int xts_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len) unsigned int key_len)
{ {
struct crypto_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm); struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
int ret; int ret;
ret = xts_check_key(tfm, in_key, key_len); ret = xts_verify_key(tfm, in_key, key_len);
if (ret) if (ret)
return ret; return ret;
...@@ -97,111 +103,101 @@ static int xts_set_key(struct crypto_tfm *tfm, const u8 *in_key, ...@@ -97,111 +103,101 @@ static int xts_set_key(struct crypto_tfm *tfm, const u8 *in_key,
if (!ret) if (!ret)
return 0; return 0;
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL; return -EINVAL;
} }
static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, static int ecb_encrypt(struct skcipher_request *req)
struct scatterlist *src, unsigned int nbytes)
{ {
struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
int err, first, rounds = 6 + ctx->key_length / 4; int err, first, rounds = 6 + ctx->key_length / 4;
struct blkcipher_walk walk; struct skcipher_walk walk;
unsigned int blocks; unsigned int blocks;
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; err = skcipher_walk_virt(&walk, req, true);
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
kernel_neon_begin(); kernel_neon_begin();
for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr, aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key_enc, rounds, blocks, first); (u8 *)ctx->key_enc, rounds, blocks, first);
err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
} }
kernel_neon_end(); kernel_neon_end();
return err; return err;
} }
static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, static int ecb_decrypt(struct skcipher_request *req)
struct scatterlist *src, unsigned int nbytes)
{ {
struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
int err, first, rounds = 6 + ctx->key_length / 4; int err, first, rounds = 6 + ctx->key_length / 4;
struct blkcipher_walk walk; struct skcipher_walk walk;
unsigned int blocks; unsigned int blocks;
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; err = skcipher_walk_virt(&walk, req, true);
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
kernel_neon_begin(); kernel_neon_begin();
for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr, aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key_dec, rounds, blocks, first); (u8 *)ctx->key_dec, rounds, blocks, first);
err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
} }
kernel_neon_end(); kernel_neon_end();
return err; return err;
} }
static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, static int cbc_encrypt(struct skcipher_request *req)
struct scatterlist *src, unsigned int nbytes)
{ {
struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
int err, first, rounds = 6 + ctx->key_length / 4; int err, first, rounds = 6 + ctx->key_length / 4;
struct blkcipher_walk walk; struct skcipher_walk walk;
unsigned int blocks; unsigned int blocks;
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; err = skcipher_walk_virt(&walk, req, true);
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
kernel_neon_begin(); kernel_neon_begin();
for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr, aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key_enc, rounds, blocks, walk.iv, (u8 *)ctx->key_enc, rounds, blocks, walk.iv,
first); first);
err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
} }
kernel_neon_end(); kernel_neon_end();
return err; return err;
} }
static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, static int cbc_decrypt(struct skcipher_request *req)
struct scatterlist *src, unsigned int nbytes)
{ {
struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
int err, first, rounds = 6 + ctx->key_length / 4; int err, first, rounds = 6 + ctx->key_length / 4;
struct blkcipher_walk walk; struct skcipher_walk walk;
unsigned int blocks; unsigned int blocks;
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; err = skcipher_walk_virt(&walk, req, true);
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
kernel_neon_begin(); kernel_neon_begin();
for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr, aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key_dec, rounds, blocks, walk.iv, (u8 *)ctx->key_dec, rounds, blocks, walk.iv,
first); first);
err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
} }
kernel_neon_end(); kernel_neon_end();
return err; return err;
} }
static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, static int ctr_encrypt(struct skcipher_request *req)
struct scatterlist *src, unsigned int nbytes)
{ {
struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
int err, first, rounds = 6 + ctx->key_length / 4; int err, first, rounds = 6 + ctx->key_length / 4;
struct blkcipher_walk walk; struct skcipher_walk walk;
int blocks; int blocks;
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; err = skcipher_walk_virt(&walk, req, true);
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
first = 1; first = 1;
kernel_neon_begin(); kernel_neon_begin();
...@@ -209,17 +205,13 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ...@@ -209,17 +205,13 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr, aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key_enc, rounds, blocks, walk.iv, (u8 *)ctx->key_enc, rounds, blocks, walk.iv,
first); first);
first = 0; err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
nbytes -= blocks * AES_BLOCK_SIZE;
if (nbytes && nbytes == walk.nbytes % AES_BLOCK_SIZE)
break;
err = blkcipher_walk_done(desc, &walk,
walk.nbytes % AES_BLOCK_SIZE);
} }
if (walk.nbytes % AES_BLOCK_SIZE) { if (walk.nbytes) {
u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
u8 __aligned(8) tail[AES_BLOCK_SIZE]; u8 __aligned(8) tail[AES_BLOCK_SIZE];
unsigned int nbytes = walk.nbytes;
u8 *tdst = walk.dst.virt.addr;
u8 *tsrc = walk.src.virt.addr;
/* /*
* Minimum alignment is 8 bytes, so if nbytes is <= 8, we need * Minimum alignment is 8 bytes, so if nbytes is <= 8, we need
...@@ -230,227 +222,169 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ...@@ -230,227 +222,169 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
aes_ctr_encrypt(tail, tsrc, (u8 *)ctx->key_enc, rounds, aes_ctr_encrypt(tail, tsrc, (u8 *)ctx->key_enc, rounds,
blocks, walk.iv, first); blocks, walk.iv, first);
memcpy(tdst, tail, nbytes); memcpy(tdst, tail, nbytes);
err = blkcipher_walk_done(desc, &walk, 0); err = skcipher_walk_done(&walk, 0);
} }
kernel_neon_end(); kernel_neon_end();
return err; return err;
} }
static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, static int xts_encrypt(struct skcipher_request *req)
struct scatterlist *src, unsigned int nbytes)
{ {
struct crypto_aes_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
int err, first, rounds = 6 + ctx->key1.key_length / 4; int err, first, rounds = 6 + ctx->key1.key_length / 4;
struct blkcipher_walk walk; struct skcipher_walk walk;
unsigned int blocks; unsigned int blocks;
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; err = skcipher_walk_virt(&walk, req, true);
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
kernel_neon_begin(); kernel_neon_begin();
for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key1.key_enc, rounds, blocks, (u8 *)ctx->key1.key_enc, rounds, blocks,
(u8 *)ctx->key2.key_enc, walk.iv, first); (u8 *)ctx->key2.key_enc, walk.iv, first);
err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
} }
kernel_neon_end(); kernel_neon_end();
return err; return err;
} }
static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, static int xts_decrypt(struct skcipher_request *req)
struct scatterlist *src, unsigned int nbytes)
{ {
struct crypto_aes_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
int err, first, rounds = 6 + ctx->key1.key_length / 4; int err, first, rounds = 6 + ctx->key1.key_length / 4;
struct blkcipher_walk walk; struct skcipher_walk walk;
unsigned int blocks; unsigned int blocks;
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; err = skcipher_walk_virt(&walk, req, true);
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
kernel_neon_begin(); kernel_neon_begin();
for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key1.key_dec, rounds, blocks, (u8 *)ctx->key1.key_dec, rounds, blocks,
(u8 *)ctx->key2.key_enc, walk.iv, first); (u8 *)ctx->key2.key_enc, walk.iv, first);
err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
} }
kernel_neon_end(); kernel_neon_end();
return err; return err;
} }
static struct crypto_alg aes_algs[] = { { static struct skcipher_alg aes_algs[] = { {
.cra_name = "__ecb-aes-" MODE, .base = {
.cra_driver_name = "__driver-ecb-aes-" MODE, .cra_name = "__ecb(aes)",
.cra_priority = 0, .cra_driver_name = "__ecb-aes-" MODE,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | .cra_priority = PRIO,
CRYPTO_ALG_INTERNAL, .cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = AES_BLOCK_SIZE, .cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto_aes_ctx), .cra_ctxsize = sizeof(struct crypto_aes_ctx),
.cra_alignmask = 7, .cra_alignmask = 7,
.cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE,
.cra_module = THIS_MODULE,
.cra_blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = 0,
.setkey = aes_setkey,
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
}, },
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = skcipher_aes_setkey,
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
}, { }, {
.cra_name = "__cbc-aes-" MODE, .base = {
.cra_driver_name = "__driver-cbc-aes-" MODE, .cra_name = "__cbc(aes)",
.cra_priority = 0, .cra_driver_name = "__cbc-aes-" MODE,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | .cra_priority = PRIO,
CRYPTO_ALG_INTERNAL, .cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = AES_BLOCK_SIZE, .cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto_aes_ctx), .cra_ctxsize = sizeof(struct crypto_aes_ctx),
.cra_alignmask = 7, .cra_alignmask = 7,
.cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE,
.cra_module = THIS_MODULE,
.cra_blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = aes_setkey,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
}, },
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = skcipher_aes_setkey,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
}, { }, {
.cra_name = "__ctr-aes-" MODE, .base = {
.cra_driver_name = "__driver-ctr-aes-" MODE, .cra_name = "__ctr(aes)",
.cra_priority = 0, .cra_driver_name = "__ctr-aes-" MODE,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | .cra_priority = PRIO,
CRYPTO_ALG_INTERNAL, .cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = 1, .cra_blocksize = 1,
.cra_ctxsize = sizeof(struct crypto_aes_ctx), .cra_ctxsize = sizeof(struct crypto_aes_ctx),
.cra_alignmask = 7, .cra_alignmask = 7,
.cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE,
.cra_module = THIS_MODULE,
.cra_blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = aes_setkey,
.encrypt = ctr_encrypt,
.decrypt = ctr_encrypt,
}, },
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.chunksize = AES_BLOCK_SIZE,
.setkey = skcipher_aes_setkey,
.encrypt = ctr_encrypt,
.decrypt = ctr_encrypt,
}, { }, {
.cra_name = "__xts-aes-" MODE, .base = {
.cra_driver_name = "__driver-xts-aes-" MODE, .cra_name = "__xts(aes)",
.cra_priority = 0, .cra_driver_name = "__xts-aes-" MODE,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | .cra_priority = PRIO,
CRYPTO_ALG_INTERNAL, .cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = AES_BLOCK_SIZE, .cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto_aes_xts_ctx), .cra_ctxsize = sizeof(struct crypto_aes_xts_ctx),
.cra_alignmask = 7, .cra_alignmask = 7,
.cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE,
.cra_module = THIS_MODULE,
.cra_blkcipher = {
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = xts_set_key,
.encrypt = xts_encrypt,
.decrypt = xts_decrypt,
}, },
}, { .min_keysize = 2 * AES_MIN_KEY_SIZE,
.cra_name = "ecb(aes)", .max_keysize = 2 * AES_MAX_KEY_SIZE,
.cra_driver_name = "ecb-aes-" MODE, .ivsize = AES_BLOCK_SIZE,
.cra_priority = PRIO, .setkey = xts_set_key,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC, .encrypt = xts_encrypt,
.cra_blocksize = AES_BLOCK_SIZE, .decrypt = xts_decrypt,
.cra_ctxsize = sizeof(struct async_helper_ctx),
.cra_alignmask = 7,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = 0,
.setkey = ablk_set_key,
.encrypt = ablk_encrypt,
.decrypt = ablk_decrypt,
}
}, {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-" MODE,
.cra_priority = PRIO,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct async_helper_ctx),
.cra_alignmask = 7,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = ablk_set_key,
.encrypt = ablk_encrypt,
.decrypt = ablk_decrypt,
}
}, {
.cra_name = "ctr(aes)",
.cra_driver_name = "ctr-aes-" MODE,
.cra_priority = PRIO,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct async_helper_ctx),
.cra_alignmask = 7,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = ablk_set_key,
.encrypt = ablk_encrypt,
.decrypt = ablk_decrypt,
}
}, {
.cra_name = "xts(aes)",
.cra_driver_name = "xts-aes-" MODE,
.cra_priority = PRIO,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct async_helper_ctx),
.cra_alignmask = 7,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_ablkcipher = {
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = ablk_set_key,
.encrypt = ablk_encrypt,
.decrypt = ablk_decrypt,
}
} }; } };
static int __init aes_init(void) struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)];
static void aes_exit(void)
{ {
return crypto_register_algs(aes_algs, ARRAY_SIZE(aes_algs)); int i;
for (i = 0; i < ARRAY_SIZE(aes_simd_algs) && aes_simd_algs[i]; i++)
simd_skcipher_free(aes_simd_algs[i]);
crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
} }
static void __exit aes_exit(void) static int __init aes_init(void)
{ {
crypto_unregister_algs(aes_algs, ARRAY_SIZE(aes_algs)); struct simd_skcipher_alg *simd;
const char *basename;
const char *algname;
const char *drvname;
int err;
int i;
err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
if (err)
return err;
for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
algname = aes_algs[i].base.cra_name + 2;
drvname = aes_algs[i].base.cra_driver_name + 2;
basename = aes_algs[i].base.cra_driver_name;
simd = simd_skcipher_create_compat(algname, drvname, basename);
err = PTR_ERR(simd);
if (IS_ERR(simd))
goto unregister_simds;
aes_simd_algs[i] = simd;
}
return 0;
unregister_simds:
aes_exit();
return err;
} }
#ifdef USE_V8_CRYPTO_EXTENSIONS #ifdef USE_V8_CRYPTO_EXTENSIONS
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment