Commit 7740bd51 authored by Eric Biggers's avatar Eric Biggers Committed by Herbert Xu

crypto: nx - don't abuse blkcipher_desc to pass iv around

The NX crypto driver is using 'struct blkcipher_desc' to pass the IV
around, even for AEADs (for which it creates the struct on the stack).
This is not appropriate since this structure is part of the "blkcipher"
API, which is deprecated and will be removed.

Just pass around the IV directly instead.
Signed-off-by: default avatarEric Biggers <ebiggers@google.com>
Reviewed-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 713b2e72
...@@ -72,8 +72,9 @@ static int cbc_aes_nx_crypt(struct blkcipher_desc *desc, ...@@ -72,8 +72,9 @@ static int cbc_aes_nx_crypt(struct blkcipher_desc *desc,
do { do {
to_process = nbytes - processed; to_process = nbytes - processed;
rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process, rc = nx_build_sg_lists(nx_ctx, desc->info, dst, src,
processed, csbcpb->cpb.aes_cbc.iv); &to_process, processed,
csbcpb->cpb.aes_cbc.iv);
if (rc) if (rc)
goto out; goto out;
......
...@@ -327,7 +327,7 @@ static int generate_pat(u8 *iv, ...@@ -327,7 +327,7 @@ static int generate_pat(u8 *iv,
} }
static int ccm_nx_decrypt(struct aead_request *req, static int ccm_nx_decrypt(struct aead_request *req,
struct blkcipher_desc *desc, u8 *iv,
unsigned int assoclen) unsigned int assoclen)
{ {
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
...@@ -348,7 +348,7 @@ static int ccm_nx_decrypt(struct aead_request *req, ...@@ -348,7 +348,7 @@ static int ccm_nx_decrypt(struct aead_request *req,
req->src, nbytes + req->assoclen, authsize, req->src, nbytes + req->assoclen, authsize,
SCATTERWALK_FROM_SG); SCATTERWALK_FROM_SG);
rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen, rc = generate_pat(iv, req, nx_ctx, authsize, nbytes, assoclen,
csbcpb->cpb.aes_ccm.in_pat_or_b0); csbcpb->cpb.aes_ccm.in_pat_or_b0);
if (rc) if (rc)
goto out; goto out;
...@@ -367,7 +367,7 @@ static int ccm_nx_decrypt(struct aead_request *req, ...@@ -367,7 +367,7 @@ static int ccm_nx_decrypt(struct aead_request *req,
NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, rc = nx_build_sg_lists(nx_ctx, iv, req->dst, req->src,
&to_process, processed + req->assoclen, &to_process, processed + req->assoclen,
csbcpb->cpb.aes_ccm.iv_or_ctr); csbcpb->cpb.aes_ccm.iv_or_ctr);
if (rc) if (rc)
...@@ -381,7 +381,7 @@ static int ccm_nx_decrypt(struct aead_request *req, ...@@ -381,7 +381,7 @@ static int ccm_nx_decrypt(struct aead_request *req,
/* for partial completion, copy following for next /* for partial completion, copy following for next
* entry into loop... * entry into loop...
*/ */
memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE); memcpy(iv, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0, memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE); csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
memcpy(csbcpb->cpb.aes_ccm.in_s0, memcpy(csbcpb->cpb.aes_ccm.in_s0,
...@@ -405,7 +405,7 @@ static int ccm_nx_decrypt(struct aead_request *req, ...@@ -405,7 +405,7 @@ static int ccm_nx_decrypt(struct aead_request *req,
} }
static int ccm_nx_encrypt(struct aead_request *req, static int ccm_nx_encrypt(struct aead_request *req,
struct blkcipher_desc *desc, u8 *iv,
unsigned int assoclen) unsigned int assoclen)
{ {
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
...@@ -418,7 +418,7 @@ static int ccm_nx_encrypt(struct aead_request *req, ...@@ -418,7 +418,7 @@ static int ccm_nx_encrypt(struct aead_request *req,
spin_lock_irqsave(&nx_ctx->lock, irq_flags); spin_lock_irqsave(&nx_ctx->lock, irq_flags);
rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen, rc = generate_pat(iv, req, nx_ctx, authsize, nbytes, assoclen,
csbcpb->cpb.aes_ccm.in_pat_or_b0); csbcpb->cpb.aes_ccm.in_pat_or_b0);
if (rc) if (rc)
goto out; goto out;
...@@ -436,7 +436,7 @@ static int ccm_nx_encrypt(struct aead_request *req, ...@@ -436,7 +436,7 @@ static int ccm_nx_encrypt(struct aead_request *req,
NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, rc = nx_build_sg_lists(nx_ctx, iv, req->dst, req->src,
&to_process, processed + req->assoclen, &to_process, processed + req->assoclen,
csbcpb->cpb.aes_ccm.iv_or_ctr); csbcpb->cpb.aes_ccm.iv_or_ctr);
if (rc) if (rc)
...@@ -450,7 +450,7 @@ static int ccm_nx_encrypt(struct aead_request *req, ...@@ -450,7 +450,7 @@ static int ccm_nx_encrypt(struct aead_request *req,
/* for partial completion, copy following for next /* for partial completion, copy following for next
* entry into loop... * entry into loop...
*/ */
memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE); memcpy(iv, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0, memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE); csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
memcpy(csbcpb->cpb.aes_ccm.in_s0, memcpy(csbcpb->cpb.aes_ccm.in_s0,
...@@ -481,60 +481,48 @@ static int ccm4309_aes_nx_encrypt(struct aead_request *req) ...@@ -481,60 +481,48 @@ static int ccm4309_aes_nx_encrypt(struct aead_request *req)
{ {
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
struct nx_gcm_rctx *rctx = aead_request_ctx(req); struct nx_gcm_rctx *rctx = aead_request_ctx(req);
struct blkcipher_desc desc;
u8 *iv = rctx->iv; u8 *iv = rctx->iv;
iv[0] = 3; iv[0] = 3;
memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3); memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
memcpy(iv + 4, req->iv, 8); memcpy(iv + 4, req->iv, 8);
desc.info = iv; return ccm_nx_encrypt(req, iv, req->assoclen - 8);
return ccm_nx_encrypt(req, &desc, req->assoclen - 8);
} }
static int ccm_aes_nx_encrypt(struct aead_request *req) static int ccm_aes_nx_encrypt(struct aead_request *req)
{ {
struct blkcipher_desc desc;
int rc; int rc;
desc.info = req->iv; rc = crypto_ccm_check_iv(req->iv);
rc = crypto_ccm_check_iv(desc.info);
if (rc) if (rc)
return rc; return rc;
return ccm_nx_encrypt(req, &desc, req->assoclen); return ccm_nx_encrypt(req, req->iv, req->assoclen);
} }
static int ccm4309_aes_nx_decrypt(struct aead_request *req) static int ccm4309_aes_nx_decrypt(struct aead_request *req)
{ {
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
struct nx_gcm_rctx *rctx = aead_request_ctx(req); struct nx_gcm_rctx *rctx = aead_request_ctx(req);
struct blkcipher_desc desc;
u8 *iv = rctx->iv; u8 *iv = rctx->iv;
iv[0] = 3; iv[0] = 3;
memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3); memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
memcpy(iv + 4, req->iv, 8); memcpy(iv + 4, req->iv, 8);
desc.info = iv; return ccm_nx_decrypt(req, iv, req->assoclen - 8);
return ccm_nx_decrypt(req, &desc, req->assoclen - 8);
} }
static int ccm_aes_nx_decrypt(struct aead_request *req) static int ccm_aes_nx_decrypt(struct aead_request *req)
{ {
struct blkcipher_desc desc;
int rc; int rc;
desc.info = req->iv; rc = crypto_ccm_check_iv(req->iv);
rc = crypto_ccm_check_iv(desc.info);
if (rc) if (rc)
return rc; return rc;
return ccm_nx_decrypt(req, &desc, req->assoclen); return ccm_nx_decrypt(req, req->iv, req->assoclen);
} }
/* tell the block cipher walk routines that this is a stream cipher by /* tell the block cipher walk routines that this is a stream cipher by
......
...@@ -85,8 +85,9 @@ static int ctr_aes_nx_crypt(struct blkcipher_desc *desc, ...@@ -85,8 +85,9 @@ static int ctr_aes_nx_crypt(struct blkcipher_desc *desc,
do { do {
to_process = nbytes - processed; to_process = nbytes - processed;
rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process, rc = nx_build_sg_lists(nx_ctx, desc->info, dst, src,
processed, csbcpb->cpb.aes_ctr.iv); &to_process, processed,
csbcpb->cpb.aes_ctr.iv);
if (rc) if (rc)
goto out; goto out;
......
...@@ -72,7 +72,7 @@ static int ecb_aes_nx_crypt(struct blkcipher_desc *desc, ...@@ -72,7 +72,7 @@ static int ecb_aes_nx_crypt(struct blkcipher_desc *desc,
do { do {
to_process = nbytes - processed; to_process = nbytes - processed;
rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process, rc = nx_build_sg_lists(nx_ctx, NULL, dst, src, &to_process,
processed, NULL); processed, NULL);
if (rc) if (rc)
goto out; goto out;
......
...@@ -166,8 +166,7 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx, ...@@ -166,8 +166,7 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx,
return rc; return rc;
} }
static int gmac(struct aead_request *req, struct blkcipher_desc *desc, static int gmac(struct aead_request *req, const u8 *iv, unsigned int assoclen)
unsigned int assoclen)
{ {
int rc; int rc;
struct nx_crypto_ctx *nx_ctx = struct nx_crypto_ctx *nx_ctx =
...@@ -190,7 +189,7 @@ static int gmac(struct aead_request *req, struct blkcipher_desc *desc, ...@@ -190,7 +189,7 @@ static int gmac(struct aead_request *req, struct blkcipher_desc *desc,
nx_ctx->ap->databytelen/NX_PAGE_SIZE); nx_ctx->ap->databytelen/NX_PAGE_SIZE);
/* Copy IV */ /* Copy IV */
memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, desc->info, AES_BLOCK_SIZE); memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, iv, AES_BLOCK_SIZE);
do { do {
/* /*
...@@ -240,8 +239,7 @@ static int gmac(struct aead_request *req, struct blkcipher_desc *desc, ...@@ -240,8 +239,7 @@ static int gmac(struct aead_request *req, struct blkcipher_desc *desc,
return rc; return rc;
} }
static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc, static int gcm_empty(struct aead_request *req, const u8 *iv, int enc)
int enc)
{ {
int rc; int rc;
struct nx_crypto_ctx *nx_ctx = struct nx_crypto_ctx *nx_ctx =
...@@ -268,7 +266,7 @@ static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc, ...@@ -268,7 +266,7 @@ static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
len = AES_BLOCK_SIZE; len = AES_BLOCK_SIZE;
/* Encrypt the counter/IV */ /* Encrypt the counter/IV */
in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) desc->info, in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) iv,
&len, nx_ctx->ap->sglen); &len, nx_ctx->ap->sglen);
if (len != AES_BLOCK_SIZE) if (len != AES_BLOCK_SIZE)
...@@ -285,7 +283,7 @@ static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc, ...@@ -285,7 +283,7 @@ static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
if (rc) if (rc)
goto out; goto out;
atomic_inc(&(nx_ctx->stats->aes_ops)); atomic_inc(&(nx_ctx->stats->aes_ops));
...@@ -313,7 +311,6 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc, ...@@ -313,7 +311,6 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
crypto_aead_ctx(crypto_aead_reqtfm(req)); crypto_aead_ctx(crypto_aead_reqtfm(req));
struct nx_gcm_rctx *rctx = aead_request_ctx(req); struct nx_gcm_rctx *rctx = aead_request_ctx(req);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct blkcipher_desc desc;
unsigned int nbytes = req->cryptlen; unsigned int nbytes = req->cryptlen;
unsigned int processed = 0, to_process; unsigned int processed = 0, to_process;
unsigned long irq_flags; unsigned long irq_flags;
...@@ -321,15 +318,14 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc, ...@@ -321,15 +318,14 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
spin_lock_irqsave(&nx_ctx->lock, irq_flags); spin_lock_irqsave(&nx_ctx->lock, irq_flags);
desc.info = rctx->iv;
/* initialize the counter */ /* initialize the counter */
*(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1; *(u32 *)&rctx->iv[NX_GCM_CTR_OFFSET] = 1;
if (nbytes == 0) { if (nbytes == 0) {
if (assoclen == 0) if (assoclen == 0)
rc = gcm_empty(req, &desc, enc); rc = gcm_empty(req, rctx->iv, enc);
else else
rc = gmac(req, &desc, assoclen); rc = gmac(req, rctx->iv, assoclen);
if (rc) if (rc)
goto out; goto out;
else else
...@@ -358,7 +354,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc, ...@@ -358,7 +354,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
to_process = nbytes - processed; to_process = nbytes - processed;
csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8; csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
rc = nx_build_sg_lists(nx_ctx, &desc, req->dst, rc = nx_build_sg_lists(nx_ctx, rctx->iv, req->dst,
req->src, &to_process, req->src, &to_process,
processed + req->assoclen, processed + req->assoclen,
csbcpb->cpb.aes_gcm.iv_or_cnt); csbcpb->cpb.aes_gcm.iv_or_cnt);
...@@ -377,7 +373,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc, ...@@ -377,7 +373,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
if (rc) if (rc)
goto out; goto out;
memcpy(desc.info, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE); memcpy(rctx->iv, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE);
memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad, memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE); csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
memcpy(csbcpb->cpb.aes_gcm.in_s0, memcpy(csbcpb->cpb.aes_gcm.in_s0,
......
...@@ -243,25 +243,25 @@ static long int trim_sg_list(struct nx_sg *sg, ...@@ -243,25 +243,25 @@ static long int trim_sg_list(struct nx_sg *sg,
* scatterlists based on them. * scatterlists based on them.
* *
* @nx_ctx: NX crypto context for the lists we're building * @nx_ctx: NX crypto context for the lists we're building
* @desc: the block cipher descriptor for the operation * @iv: iv data, if the algorithm requires it
* @dst: destination scatterlist * @dst: destination scatterlist
* @src: source scatterlist * @src: source scatterlist
* @nbytes: length of data described in the scatterlists * @nbytes: length of data described in the scatterlists
* @offset: number of bytes to fast-forward past at the beginning of * @offset: number of bytes to fast-forward past at the beginning of
* scatterlists. * scatterlists.
* @iv: destination for the iv data, if the algorithm requires it * @oiv: destination for the iv data, if the algorithm requires it
* *
* This is common code shared by all the AES algorithms. It uses the block * This is common code shared by all the AES algorithms. It uses the crypto
* cipher walk routines to traverse input and output scatterlists, building * scatterlist walk routines to traverse input and output scatterlists, building
* corresponding NX scatterlists * corresponding NX scatterlists
*/ */
int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx, int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx,
struct blkcipher_desc *desc, const u8 *iv,
struct scatterlist *dst, struct scatterlist *dst,
struct scatterlist *src, struct scatterlist *src,
unsigned int *nbytes, unsigned int *nbytes,
unsigned int offset, unsigned int offset,
u8 *iv) u8 *oiv)
{ {
unsigned int delta = 0; unsigned int delta = 0;
unsigned int total = *nbytes; unsigned int total = *nbytes;
...@@ -274,8 +274,8 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx, ...@@ -274,8 +274,8 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx,
max_sg_len = min_t(u64, max_sg_len, max_sg_len = min_t(u64, max_sg_len,
nx_ctx->ap->databytelen/NX_PAGE_SIZE); nx_ctx->ap->databytelen/NX_PAGE_SIZE);
if (iv) if (oiv)
memcpy(iv, desc->info, AES_BLOCK_SIZE); memcpy(oiv, iv, AES_BLOCK_SIZE);
*nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen); *nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen);
......
...@@ -155,9 +155,9 @@ void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function); ...@@ -155,9 +155,9 @@ void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function);
int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op, int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op,
u32 may_sleep); u32 may_sleep);
struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int *, u32); struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int *, u32);
int nx_build_sg_lists(struct nx_crypto_ctx *, struct blkcipher_desc *, int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx, const u8 *iv,
struct scatterlist *, struct scatterlist *, unsigned int *, struct scatterlist *dst, struct scatterlist *src,
unsigned int, u8 *); unsigned int *nbytes, unsigned int offset, u8 *oiv);
struct nx_sg *nx_walk_and_build(struct nx_sg *, unsigned int, struct nx_sg *nx_walk_and_build(struct nx_sg *, unsigned int,
struct scatterlist *, unsigned int, struct scatterlist *, unsigned int,
unsigned int *); unsigned int *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment