Commit 10d87b73 authored by Leonidas Da Silva Barbosa's avatar Leonidas Da Silva Barbosa Committed by Herbert Xu

crypto: nx - Fixing SHA update bug

Bug happens when a data size less than SHA block size is passed.
Since first attempt will be saved in buffer, second round attempt
get into two step to calculate op.inlen and op.outlen. The issue
resides in this step. A  wrong value of op.inlen and outlen was being
calculated.

This patch fix this eliminate the nx_sha_build_sg_list, that is
useless in SHA's algorithm context. Instead we call nx_build_sg_list
directly and pass a previous calculated max_sg_len to it.
Signed-off-by: default avatarLeonidas S. Barbosa <leosilva@linux.vnet.ibm.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent c3365ce1
...@@ -33,8 +33,9 @@ static int nx_sha256_init(struct shash_desc *desc) ...@@ -33,8 +33,9 @@ static int nx_sha256_init(struct shash_desc *desc)
{ {
struct sha256_state *sctx = shash_desc_ctx(desc); struct sha256_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_sg *out_sg;
int len; int len;
int rc; u32 max_sg_len;
nx_ctx_init(nx_ctx, HCOP_FC_SHA); nx_ctx_init(nx_ctx, HCOP_FC_SHA);
...@@ -44,15 +45,18 @@ static int nx_sha256_init(struct shash_desc *desc) ...@@ -44,15 +45,18 @@ static int nx_sha256_init(struct shash_desc *desc)
NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256); NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256);
max_sg_len = min_t(u64, nx_ctx->ap->sglen,
nx_driver.of.max_sg_len/sizeof(struct nx_sg));
max_sg_len = min_t(u64, max_sg_len,
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
len = SHA256_DIGEST_SIZE; len = SHA256_DIGEST_SIZE;
rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg, out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
&nx_ctx->op.outlen, &len, max_sg_len);
&len, nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
(u8 *) sctx->state,
NX_DS_SHA256);
if (rc) if (len != SHA256_DIGEST_SIZE)
goto out; return -EINVAL;
sctx->state[0] = __cpu_to_be32(SHA256_H0); sctx->state[0] = __cpu_to_be32(SHA256_H0);
sctx->state[1] = __cpu_to_be32(SHA256_H1); sctx->state[1] = __cpu_to_be32(SHA256_H1);
...@@ -64,7 +68,6 @@ static int nx_sha256_init(struct shash_desc *desc) ...@@ -64,7 +68,6 @@ static int nx_sha256_init(struct shash_desc *desc)
sctx->state[7] = __cpu_to_be32(SHA256_H7); sctx->state[7] = __cpu_to_be32(SHA256_H7);
sctx->count = 0; sctx->count = 0;
out:
return 0; return 0;
} }
...@@ -74,10 +77,12 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, ...@@ -74,10 +77,12 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
struct sha256_state *sctx = shash_desc_ctx(desc); struct sha256_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct nx_sg *in_sg;
u64 to_process = 0, leftover, total; u64 to_process = 0, leftover, total;
unsigned long irq_flags; unsigned long irq_flags;
int rc = 0; int rc = 0;
int data_len; int data_len;
u32 max_sg_len;
u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE); u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE);
spin_lock_irqsave(&nx_ctx->lock, irq_flags); spin_lock_irqsave(&nx_ctx->lock, irq_flags);
...@@ -97,6 +102,12 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, ...@@ -97,6 +102,12 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
in_sg = nx_ctx->in_sg;
max_sg_len = min_t(u64, nx_ctx->ap->sglen,
nx_driver.of.max_sg_len/sizeof(struct nx_sg));
max_sg_len = min_t(u64, max_sg_len,
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
do { do {
/* /*
* to_process: the SHA256_BLOCK_SIZE data chunk to process in * to_process: the SHA256_BLOCK_SIZE data chunk to process in
...@@ -108,25 +119,22 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, ...@@ -108,25 +119,22 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
if (buf_len) { if (buf_len) {
data_len = buf_len; data_len = buf_len;
rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, in_sg = nx_build_sg_list(nx_ctx->in_sg,
&nx_ctx->op.inlen, (u8 *) sctx->buf,
&data_len, &data_len,
(u8 *) sctx->buf, max_sg_len);
NX_DS_SHA256);
if (rc || data_len != buf_len) if (data_len != buf_len) {
rc = -EINVAL;
goto out; goto out;
}
} }
data_len = to_process - buf_len; data_len = to_process - buf_len;
rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, in_sg = nx_build_sg_list(in_sg, (u8 *) data,
&nx_ctx->op.inlen, &data_len, max_sg_len);
&data_len,
(u8 *) data,
NX_DS_SHA256);
if (rc) nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
goto out;
to_process = (data_len + buf_len); to_process = (data_len + buf_len);
leftover = total - to_process; leftover = total - to_process;
...@@ -173,12 +181,19 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out) ...@@ -173,12 +181,19 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
struct sha256_state *sctx = shash_desc_ctx(desc); struct sha256_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct nx_sg *in_sg, *out_sg;
unsigned long irq_flags; unsigned long irq_flags;
int rc; u32 max_sg_len;
int rc = 0;
int len; int len;
spin_lock_irqsave(&nx_ctx->lock, irq_flags); spin_lock_irqsave(&nx_ctx->lock, irq_flags);
max_sg_len = min_t(u64, nx_ctx->ap->sglen,
nx_driver.of.max_sg_len/sizeof(struct nx_sg));
max_sg_len = min_t(u64, max_sg_len,
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
/* final is represented by continuing the operation and indicating that /* final is represented by continuing the operation and indicating that
* this is not an intermediate operation */ * this is not an intermediate operation */
if (sctx->count >= SHA256_BLOCK_SIZE) { if (sctx->count >= SHA256_BLOCK_SIZE) {
...@@ -195,25 +210,24 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out) ...@@ -195,25 +210,24 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8); csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8);
len = sctx->count & (SHA256_BLOCK_SIZE - 1); len = sctx->count & (SHA256_BLOCK_SIZE - 1);
rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) sctx->buf,
&nx_ctx->op.inlen, &len, max_sg_len);
&len,
(u8 *) sctx->buf,
NX_DS_SHA256);
if (rc || len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) if (len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) {
rc = -EINVAL;
goto out; goto out;
}
len = SHA256_DIGEST_SIZE; len = SHA256_DIGEST_SIZE;
rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg, out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, max_sg_len);
&nx_ctx->op.outlen,
&len,
out,
NX_DS_SHA256);
if (rc || len != SHA256_DIGEST_SIZE) if (len != SHA256_DIGEST_SIZE) {
rc = -EINVAL;
goto out; goto out;
}
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
if (!nx_ctx->op.outlen) { if (!nx_ctx->op.outlen) {
rc = -EINVAL; rc = -EINVAL;
goto out; goto out;
......
...@@ -32,8 +32,9 @@ static int nx_sha512_init(struct shash_desc *desc) ...@@ -32,8 +32,9 @@ static int nx_sha512_init(struct shash_desc *desc)
{ {
struct sha512_state *sctx = shash_desc_ctx(desc); struct sha512_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_sg *out_sg;
int len; int len;
int rc; u32 max_sg_len;
nx_ctx_init(nx_ctx, HCOP_FC_SHA); nx_ctx_init(nx_ctx, HCOP_FC_SHA);
...@@ -43,15 +44,18 @@ static int nx_sha512_init(struct shash_desc *desc) ...@@ -43,15 +44,18 @@ static int nx_sha512_init(struct shash_desc *desc)
NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512); NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512);
max_sg_len = min_t(u64, nx_ctx->ap->sglen,
nx_driver.of.max_sg_len/sizeof(struct nx_sg));
max_sg_len = min_t(u64, max_sg_len,
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
len = SHA512_DIGEST_SIZE; len = SHA512_DIGEST_SIZE;
rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg, out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
&nx_ctx->op.outlen, &len, max_sg_len);
&len, nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
(u8 *)sctx->state,
NX_DS_SHA512);
if (rc || len != SHA512_DIGEST_SIZE) if (len != SHA512_DIGEST_SIZE)
goto out; return -EINVAL;
sctx->state[0] = __cpu_to_be64(SHA512_H0); sctx->state[0] = __cpu_to_be64(SHA512_H0);
sctx->state[1] = __cpu_to_be64(SHA512_H1); sctx->state[1] = __cpu_to_be64(SHA512_H1);
...@@ -63,7 +67,6 @@ static int nx_sha512_init(struct shash_desc *desc) ...@@ -63,7 +67,6 @@ static int nx_sha512_init(struct shash_desc *desc)
sctx->state[7] = __cpu_to_be64(SHA512_H7); sctx->state[7] = __cpu_to_be64(SHA512_H7);
sctx->count[0] = 0; sctx->count[0] = 0;
out:
return 0; return 0;
} }
...@@ -73,10 +76,12 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, ...@@ -73,10 +76,12 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
struct sha512_state *sctx = shash_desc_ctx(desc); struct sha512_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct nx_sg *in_sg;
u64 to_process, leftover = 0, total; u64 to_process, leftover = 0, total;
unsigned long irq_flags; unsigned long irq_flags;
int rc = 0; int rc = 0;
int data_len; int data_len;
u32 max_sg_len;
u64 buf_len = (sctx->count[0] % SHA512_BLOCK_SIZE); u64 buf_len = (sctx->count[0] % SHA512_BLOCK_SIZE);
spin_lock_irqsave(&nx_ctx->lock, irq_flags); spin_lock_irqsave(&nx_ctx->lock, irq_flags);
...@@ -96,6 +101,12 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, ...@@ -96,6 +101,12 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
in_sg = nx_ctx->in_sg;
max_sg_len = min_t(u64, nx_ctx->ap->sglen,
nx_driver.of.max_sg_len/sizeof(struct nx_sg));
max_sg_len = min_t(u64, max_sg_len,
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
do { do {
/* /*
* to_process: the SHA512_BLOCK_SIZE data chunk to process in * to_process: the SHA512_BLOCK_SIZE data chunk to process in
...@@ -108,25 +119,26 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, ...@@ -108,25 +119,26 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
if (buf_len) { if (buf_len) {
data_len = buf_len; data_len = buf_len;
rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, in_sg = nx_build_sg_list(nx_ctx->in_sg,
&nx_ctx->op.inlen, (u8 *) sctx->buf,
&data_len, &data_len, max_sg_len);
(u8 *) sctx->buf,
NX_DS_SHA512);
if (rc || data_len != buf_len) if (data_len != buf_len) {
rc = -EINVAL;
goto out; goto out;
}
} }
data_len = to_process - buf_len; data_len = to_process - buf_len;
rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, in_sg = nx_build_sg_list(in_sg, (u8 *) data,
&nx_ctx->op.inlen, &data_len, max_sg_len);
&data_len,
(u8 *) data,
NX_DS_SHA512);
if (rc || data_len != (to_process - buf_len)) nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
if (data_len != (to_process - buf_len)) {
rc = -EINVAL;
goto out; goto out;
}
to_process = (data_len + buf_len); to_process = (data_len + buf_len);
leftover = total - to_process; leftover = total - to_process;
...@@ -172,13 +184,20 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out) ...@@ -172,13 +184,20 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
struct sha512_state *sctx = shash_desc_ctx(desc); struct sha512_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct nx_sg *in_sg, *out_sg;
u32 max_sg_len;
u64 count0; u64 count0;
unsigned long irq_flags; unsigned long irq_flags;
int rc; int rc = 0;
int len; int len;
spin_lock_irqsave(&nx_ctx->lock, irq_flags); spin_lock_irqsave(&nx_ctx->lock, irq_flags);
max_sg_len = min_t(u64, nx_ctx->ap->sglen,
nx_driver.of.max_sg_len/sizeof(struct nx_sg));
max_sg_len = min_t(u64, max_sg_len,
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
/* final is represented by continuing the operation and indicating that /* final is represented by continuing the operation and indicating that
* this is not an intermediate operation */ * this is not an intermediate operation */
if (sctx->count[0] >= SHA512_BLOCK_SIZE) { if (sctx->count[0] >= SHA512_BLOCK_SIZE) {
...@@ -200,24 +219,20 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out) ...@@ -200,24 +219,20 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
csbcpb->cpb.sha512.message_bit_length_lo = count0; csbcpb->cpb.sha512.message_bit_length_lo = count0;
len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1); len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1);
rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, &len,
&nx_ctx->op.inlen, max_sg_len);
&len,
(u8 *)sctx->buf,
NX_DS_SHA512);
if (rc || len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1))) if (len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1))) {
rc = -EINVAL;
goto out; goto out;
}
len = SHA512_DIGEST_SIZE; len = SHA512_DIGEST_SIZE;
rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg, out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
&nx_ctx->op.outlen, max_sg_len);
&len,
out,
NX_DS_SHA512);
if (rc) nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
goto out; nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
if (!nx_ctx->op.outlen) { if (!nx_ctx->op.outlen) {
rc = -EINVAL; rc = -EINVAL;
......
...@@ -251,53 +251,6 @@ static long int trim_sg_list(struct nx_sg *sg, ...@@ -251,53 +251,6 @@ static long int trim_sg_list(struct nx_sg *sg,
return oplen; return oplen;
} }
/**
* nx_sha_build_sg_list - walk and build sg list to sha modes
* using right bounds and limits.
* @nx_ctx: NX crypto context for the lists we're building
* @nx_sg: current sg list in or out list
* @op_len: current op_len to be used in order to build a sg list
* @nbytes: number or bytes to be processed
* @offset: buf offset
* @mode: SHA256 or SHA512
*/
int nx_sha_build_sg_list(struct nx_crypto_ctx *nx_ctx,
struct nx_sg *nx_in_outsg,
s64 *op_len,
unsigned int *nbytes,
u8 *offset,
u32 mode)
{
unsigned int delta = 0;
unsigned int total = *nbytes;
struct nx_sg *nx_insg = nx_in_outsg;
unsigned int max_sg_len;
max_sg_len = min_t(u64, nx_ctx->ap->sglen,
nx_driver.of.max_sg_len/sizeof(struct nx_sg));
max_sg_len = min_t(u64, max_sg_len,
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
*nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen);
nx_insg = nx_build_sg_list(nx_insg, offset, nbytes, max_sg_len);
switch (mode) {
case NX_DS_SHA256:
if (*nbytes < total)
delta = *nbytes - (*nbytes & ~(SHA256_BLOCK_SIZE - 1));
break;
case NX_DS_SHA512:
if (*nbytes < total)
delta = *nbytes - (*nbytes & ~(SHA512_BLOCK_SIZE - 1));
break;
default:
return -EINVAL;
}
*op_len = trim_sg_list(nx_in_outsg, nx_insg, delta);
return 0;
}
/** /**
* nx_build_sg_lists - walk the input scatterlists and build arrays of NX * nx_build_sg_lists - walk the input scatterlists and build arrays of NX
* scatterlists based on them. * scatterlists based on them.
......
...@@ -153,8 +153,6 @@ void nx_crypto_ctx_exit(struct crypto_tfm *tfm); ...@@ -153,8 +153,6 @@ void nx_crypto_ctx_exit(struct crypto_tfm *tfm);
void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function); void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function);
int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op, int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op,
u32 may_sleep); u32 may_sleep);
int nx_sha_build_sg_list(struct nx_crypto_ctx *, struct nx_sg *,
s64 *, unsigned int *, u8 *, u32);
struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int *, u32); struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int *, u32);
int nx_build_sg_lists(struct nx_crypto_ctx *, struct blkcipher_desc *, int nx_build_sg_lists(struct nx_crypto_ctx *, struct blkcipher_desc *,
struct scatterlist *, struct scatterlist *, unsigned int *, struct scatterlist *, struct scatterlist *, unsigned int *,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment