Commit 9acb3247 authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Herbert Xu

crypto: mxs - switch to skcipher API

Commit 7a7ffe65 ("crypto: skcipher - Add top-level skcipher interface")
dated 20 august 2015 introduced the new skcipher API which is supposed to
replace both blkcipher and ablkcipher. While all consumers of the API have
been converted long ago, some producers of the ablkcipher remain, forcing
us to keep the ablkcipher support routines alive, along with the matching
code to expose [a]blkciphers via the skcipher API.

So switch this driver to the skcipher API, allowing us to finally drop the
ablkcipher code in the near future.
Tested-by: default avatarHoria Geantă <horia.geanta@nxp.com>
Signed-off-by: default avatarArd Biesheuvel <ardb@kernel.org>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 4aaf3840
...@@ -211,11 +211,11 @@ static int mxs_dcp_start_dma(struct dcp_async_ctx *actx) ...@@ -211,11 +211,11 @@ static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
* Encryption (AES128) * Encryption (AES128)
*/ */
static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
struct ablkcipher_request *req, int init) struct skcipher_request *req, int init)
{ {
struct dcp *sdcp = global_sdcp; struct dcp *sdcp = global_sdcp;
struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
int ret; int ret;
dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key, dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
...@@ -274,9 +274,9 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) ...@@ -274,9 +274,9 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
{ {
struct dcp *sdcp = global_sdcp; struct dcp *sdcp = global_sdcp;
struct ablkcipher_request *req = ablkcipher_request_cast(arq); struct skcipher_request *req = skcipher_request_cast(arq);
struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm); struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
struct scatterlist *dst = req->dst; struct scatterlist *dst = req->dst;
struct scatterlist *src = req->src; struct scatterlist *src = req->src;
...@@ -305,7 +305,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) ...@@ -305,7 +305,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
if (!rctx->ecb) { if (!rctx->ecb) {
/* Copy the CBC IV just past the key. */ /* Copy the CBC IV just past the key. */
memcpy(key + AES_KEYSIZE_128, req->info, AES_KEYSIZE_128); memcpy(key + AES_KEYSIZE_128, req->iv, AES_KEYSIZE_128);
/* CBC needs the INIT set. */ /* CBC needs the INIT set. */
init = 1; init = 1;
} else { } else {
...@@ -316,10 +316,10 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) ...@@ -316,10 +316,10 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
src_buf = sg_virt(src); src_buf = sg_virt(src);
len = sg_dma_len(src); len = sg_dma_len(src);
tlen += len; tlen += len;
limit_hit = tlen > req->nbytes; limit_hit = tlen > req->cryptlen;
if (limit_hit) if (limit_hit)
len = req->nbytes - (tlen - len); len = req->cryptlen - (tlen - len);
do { do {
if (actx->fill + len > out_off) if (actx->fill + len > out_off)
...@@ -375,10 +375,10 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) ...@@ -375,10 +375,10 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
/* Copy the IV for CBC for chaining */ /* Copy the IV for CBC for chaining */
if (!rctx->ecb) { if (!rctx->ecb) {
if (rctx->enc) if (rctx->enc)
memcpy(req->info, out_buf+(last_out_len-AES_BLOCK_SIZE), memcpy(req->iv, out_buf+(last_out_len-AES_BLOCK_SIZE),
AES_BLOCK_SIZE); AES_BLOCK_SIZE);
else else
memcpy(req->info, in_buf+(last_out_len-AES_BLOCK_SIZE), memcpy(req->iv, in_buf+(last_out_len-AES_BLOCK_SIZE),
AES_BLOCK_SIZE); AES_BLOCK_SIZE);
} }
...@@ -422,17 +422,17 @@ static int dcp_chan_thread_aes(void *data) ...@@ -422,17 +422,17 @@ static int dcp_chan_thread_aes(void *data)
return 0; return 0;
} }
static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc) static int mxs_dcp_block_fallback(struct skcipher_request *req, int enc)
{ {
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx(tfm); struct dcp_async_ctx *ctx = crypto_skcipher_ctx(tfm);
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
int ret; int ret;
skcipher_request_set_sync_tfm(subreq, ctx->fallback); skcipher_request_set_sync_tfm(subreq, ctx->fallback);
skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL); skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst, skcipher_request_set_crypt(subreq, req->src, req->dst,
req->nbytes, req->info); req->cryptlen, req->iv);
if (enc) if (enc)
ret = crypto_skcipher_encrypt(subreq); ret = crypto_skcipher_encrypt(subreq);
...@@ -444,12 +444,12 @@ static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc) ...@@ -444,12 +444,12 @@ static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc)
return ret; return ret;
} }
static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb) static int mxs_dcp_aes_enqueue(struct skcipher_request *req, int enc, int ecb)
{ {
struct dcp *sdcp = global_sdcp; struct dcp *sdcp = global_sdcp;
struct crypto_async_request *arq = &req->base; struct crypto_async_request *arq = &req->base;
struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm); struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
int ret; int ret;
if (unlikely(actx->key_len != AES_KEYSIZE_128)) if (unlikely(actx->key_len != AES_KEYSIZE_128))
...@@ -468,30 +468,30 @@ static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb) ...@@ -468,30 +468,30 @@ static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
return ret; return ret;
} }
static int mxs_dcp_aes_ecb_decrypt(struct ablkcipher_request *req) static int mxs_dcp_aes_ecb_decrypt(struct skcipher_request *req)
{ {
return mxs_dcp_aes_enqueue(req, 0, 1); return mxs_dcp_aes_enqueue(req, 0, 1);
} }
static int mxs_dcp_aes_ecb_encrypt(struct ablkcipher_request *req) static int mxs_dcp_aes_ecb_encrypt(struct skcipher_request *req)
{ {
return mxs_dcp_aes_enqueue(req, 1, 1); return mxs_dcp_aes_enqueue(req, 1, 1);
} }
static int mxs_dcp_aes_cbc_decrypt(struct ablkcipher_request *req) static int mxs_dcp_aes_cbc_decrypt(struct skcipher_request *req)
{ {
return mxs_dcp_aes_enqueue(req, 0, 0); return mxs_dcp_aes_enqueue(req, 0, 0);
} }
static int mxs_dcp_aes_cbc_encrypt(struct ablkcipher_request *req) static int mxs_dcp_aes_cbc_encrypt(struct skcipher_request *req)
{ {
return mxs_dcp_aes_enqueue(req, 1, 0); return mxs_dcp_aes_enqueue(req, 1, 0);
} }
static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int len) unsigned int len)
{ {
struct dcp_async_ctx *actx = crypto_ablkcipher_ctx(tfm); struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
unsigned int ret; unsigned int ret;
/* /*
...@@ -525,10 +525,10 @@ static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, ...@@ -525,10 +525,10 @@ static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return ret; return ret;
} }
static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm) static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm)
{ {
const char *name = crypto_tfm_alg_name(tfm); const char *name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm));
struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm); struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
struct crypto_sync_skcipher *blk; struct crypto_sync_skcipher *blk;
blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
...@@ -536,13 +536,13 @@ static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm) ...@@ -536,13 +536,13 @@ static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm)
return PTR_ERR(blk); return PTR_ERR(blk);
actx->fallback = blk; actx->fallback = blk;
tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_aes_req_ctx); crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx));
return 0; return 0;
} }
static void mxs_dcp_aes_fallback_exit(struct crypto_tfm *tfm) static void mxs_dcp_aes_fallback_exit_tfm(struct crypto_skcipher *tfm)
{ {
struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm); struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
crypto_free_sync_skcipher(actx->fallback); crypto_free_sync_skcipher(actx->fallback);
} }
...@@ -854,54 +854,44 @@ static void dcp_sha_cra_exit(struct crypto_tfm *tfm) ...@@ -854,54 +854,44 @@ static void dcp_sha_cra_exit(struct crypto_tfm *tfm)
} }
/* AES 128 ECB and AES 128 CBC */ /* AES 128 ECB and AES 128 CBC */
static struct crypto_alg dcp_aes_algs[] = { static struct skcipher_alg dcp_aes_algs[] = {
{ {
.cra_name = "ecb(aes)", .base.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-dcp", .base.cra_driver_name = "ecb-aes-dcp",
.cra_priority = 400, .base.cra_priority = 400,
.cra_alignmask = 15, .base.cra_alignmask = 15,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | .base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK, CRYPTO_ALG_NEED_FALLBACK,
.cra_init = mxs_dcp_aes_fallback_init, .base.cra_blocksize = AES_BLOCK_SIZE,
.cra_exit = mxs_dcp_aes_fallback_exit, .base.cra_ctxsize = sizeof(struct dcp_async_ctx),
.cra_blocksize = AES_BLOCK_SIZE, .base.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct dcp_async_ctx),
.cra_type = &crypto_ablkcipher_type, .min_keysize = AES_MIN_KEY_SIZE,
.cra_module = THIS_MODULE, .max_keysize = AES_MAX_KEY_SIZE,
.cra_u = { .setkey = mxs_dcp_aes_setkey,
.ablkcipher = { .encrypt = mxs_dcp_aes_ecb_encrypt,
.min_keysize = AES_MIN_KEY_SIZE, .decrypt = mxs_dcp_aes_ecb_decrypt,
.max_keysize = AES_MAX_KEY_SIZE, .init = mxs_dcp_aes_fallback_init_tfm,
.setkey = mxs_dcp_aes_setkey, .exit = mxs_dcp_aes_fallback_exit_tfm,
.encrypt = mxs_dcp_aes_ecb_encrypt,
.decrypt = mxs_dcp_aes_ecb_decrypt
},
},
}, { }, {
.cra_name = "cbc(aes)", .base.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-dcp", .base.cra_driver_name = "cbc-aes-dcp",
.cra_priority = 400, .base.cra_priority = 400,
.cra_alignmask = 15, .base.cra_alignmask = 15,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | .base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK, CRYPTO_ALG_NEED_FALLBACK,
.cra_init = mxs_dcp_aes_fallback_init, .base.cra_blocksize = AES_BLOCK_SIZE,
.cra_exit = mxs_dcp_aes_fallback_exit, .base.cra_ctxsize = sizeof(struct dcp_async_ctx),
.cra_blocksize = AES_BLOCK_SIZE, .base.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct dcp_async_ctx),
.cra_type = &crypto_ablkcipher_type, .min_keysize = AES_MIN_KEY_SIZE,
.cra_module = THIS_MODULE, .max_keysize = AES_MAX_KEY_SIZE,
.cra_u = { .setkey = mxs_dcp_aes_setkey,
.ablkcipher = { .encrypt = mxs_dcp_aes_cbc_encrypt,
.min_keysize = AES_MIN_KEY_SIZE, .decrypt = mxs_dcp_aes_cbc_decrypt,
.max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE,
.setkey = mxs_dcp_aes_setkey, .init = mxs_dcp_aes_fallback_init_tfm,
.encrypt = mxs_dcp_aes_cbc_encrypt, .exit = mxs_dcp_aes_fallback_exit_tfm,
.decrypt = mxs_dcp_aes_cbc_decrypt,
.ivsize = AES_BLOCK_SIZE,
},
},
}, },
}; };
...@@ -1104,8 +1094,8 @@ static int mxs_dcp_probe(struct platform_device *pdev) ...@@ -1104,8 +1094,8 @@ static int mxs_dcp_probe(struct platform_device *pdev)
sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1); sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1);
if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) { if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) {
ret = crypto_register_algs(dcp_aes_algs, ret = crypto_register_skciphers(dcp_aes_algs,
ARRAY_SIZE(dcp_aes_algs)); ARRAY_SIZE(dcp_aes_algs));
if (ret) { if (ret) {
/* Failed to register algorithm. */ /* Failed to register algorithm. */
dev_err(dev, "Failed to register AES crypto!\n"); dev_err(dev, "Failed to register AES crypto!\n");
...@@ -1139,7 +1129,7 @@ static int mxs_dcp_probe(struct platform_device *pdev) ...@@ -1139,7 +1129,7 @@ static int mxs_dcp_probe(struct platform_device *pdev)
err_unregister_aes: err_unregister_aes:
if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs)); crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
err_destroy_aes_thread: err_destroy_aes_thread:
kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]); kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
...@@ -1164,7 +1154,7 @@ static int mxs_dcp_remove(struct platform_device *pdev) ...@@ -1164,7 +1154,7 @@ static int mxs_dcp_remove(struct platform_device *pdev)
crypto_unregister_ahash(&dcp_sha1_alg); crypto_unregister_ahash(&dcp_sha1_alg);
if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs)); crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]); kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]); kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment