Commit a36304b9 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto fixes from Herbert Xu:
 "This fixes the following issues:

   - a regression caused by the conversion of IPsec ESP to the new AEAD
     interface: ESN with authencesn no longer works because it relied on
     the AD input SG list having a specific layout which is no longer
     the case.  In linux-next authencesn is fixed properly and no longer
     assumes anything about the SG list format.  While for this release
     a minimal fix is applied to authencesn so that it works with the
     new linear layout.

   - fix memory corruption caused by bogus index in the caam hash code.

   - fix powerpc nx SHA hashing which could cause module load failures
     if module signature verification is enabled"

* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6:
  crypto: caam - fix memory corruption in ahash_final_ctx
  crypto: nx - respect sg limit bounds when building sg lists for SHA
  crypto: authencesn - Fix breakage with new ESP code
parents 2c6625cd b310c178
...@@ -393,8 +393,6 @@ static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv, ...@@ -393,8 +393,6 @@ static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv,
struct scatterlist *cipher = areq_ctx->cipher; struct scatterlist *cipher = areq_ctx->cipher;
struct scatterlist *hsg = areq_ctx->hsg; struct scatterlist *hsg = areq_ctx->hsg;
struct scatterlist *tsg = areq_ctx->tsg; struct scatterlist *tsg = areq_ctx->tsg;
struct scatterlist *assoc1;
struct scatterlist *assoc2;
unsigned int ivsize = crypto_aead_ivsize(authenc_esn); unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
unsigned int cryptlen = req->cryptlen; unsigned int cryptlen = req->cryptlen;
struct page *dstp; struct page *dstp;
...@@ -412,27 +410,19 @@ static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv, ...@@ -412,27 +410,19 @@ static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv,
cryptlen += ivsize; cryptlen += ivsize;
} }
if (sg_is_last(assoc)) if (assoc->length < 12)
return -EINVAL;
assoc1 = assoc + 1;
if (sg_is_last(assoc1))
return -EINVAL;
assoc2 = assoc + 2;
if (!sg_is_last(assoc2))
return -EINVAL; return -EINVAL;
sg_init_table(hsg, 2); sg_init_table(hsg, 2);
sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset); sg_set_page(hsg, sg_page(assoc), 4, assoc->offset);
sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset); sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8);
sg_init_table(tsg, 1); sg_init_table(tsg, 1);
sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset); sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4);
areq_ctx->cryptlen = cryptlen; areq_ctx->cryptlen = cryptlen;
areq_ctx->headlen = assoc->length + assoc2->length; areq_ctx->headlen = 8;
areq_ctx->trailen = assoc1->length; areq_ctx->trailen = 4;
areq_ctx->sg = dst; areq_ctx->sg = dst;
areq_ctx->complete = authenc_esn_geniv_ahash_done; areq_ctx->complete = authenc_esn_geniv_ahash_done;
...@@ -563,8 +553,6 @@ static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv, ...@@ -563,8 +553,6 @@ static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
struct scatterlist *cipher = areq_ctx->cipher; struct scatterlist *cipher = areq_ctx->cipher;
struct scatterlist *hsg = areq_ctx->hsg; struct scatterlist *hsg = areq_ctx->hsg;
struct scatterlist *tsg = areq_ctx->tsg; struct scatterlist *tsg = areq_ctx->tsg;
struct scatterlist *assoc1;
struct scatterlist *assoc2;
unsigned int ivsize = crypto_aead_ivsize(authenc_esn); unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
struct page *srcp; struct page *srcp;
u8 *vsrc; u8 *vsrc;
...@@ -580,27 +568,19 @@ static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv, ...@@ -580,27 +568,19 @@ static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
cryptlen += ivsize; cryptlen += ivsize;
} }
if (sg_is_last(assoc)) if (assoc->length < 12)
return -EINVAL;
assoc1 = assoc + 1;
if (sg_is_last(assoc1))
return -EINVAL;
assoc2 = assoc + 2;
if (!sg_is_last(assoc2))
return -EINVAL; return -EINVAL;
sg_init_table(hsg, 2); sg_init_table(hsg, 2);
sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset); sg_set_page(hsg, sg_page(assoc), 4, assoc->offset);
sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset); sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8);
sg_init_table(tsg, 1); sg_init_table(tsg, 1);
sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset); sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4);
areq_ctx->cryptlen = cryptlen; areq_ctx->cryptlen = cryptlen;
areq_ctx->headlen = assoc->length + assoc2->length; areq_ctx->headlen = 8;
areq_ctx->trailen = assoc1->length; areq_ctx->trailen = 4;
areq_ctx->sg = src; areq_ctx->sg = src;
areq_ctx->complete = authenc_esn_verify_ahash_done; areq_ctx->complete = authenc_esn_verify_ahash_done;
......
...@@ -909,13 +909,14 @@ static int ahash_final_ctx(struct ahash_request *req) ...@@ -909,13 +909,14 @@ static int ahash_final_ctx(struct ahash_request *req)
state->buflen_1; state->buflen_1;
u32 *sh_desc = ctx->sh_desc_fin, *desc; u32 *sh_desc = ctx->sh_desc_fin, *desc;
dma_addr_t ptr = ctx->sh_desc_fin_dma; dma_addr_t ptr = ctx->sh_desc_fin_dma;
int sec4_sg_bytes; int sec4_sg_bytes, sec4_sg_src_index;
int digestsize = crypto_ahash_digestsize(ahash); int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc; struct ahash_edesc *edesc;
int ret = 0; int ret = 0;
int sh_len; int sh_len;
sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry); sec4_sg_src_index = 1 + (buflen ? 1 : 0);
sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */ /* allocate space for base edesc and hw desc commands, link tables */
edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
...@@ -942,7 +943,7 @@ static int ahash_final_ctx(struct ahash_request *req) ...@@ -942,7 +943,7 @@ static int ahash_final_ctx(struct ahash_request *req)
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
buf, state->buf_dma, buflen, buf, state->buf_dma, buflen,
last_buflen); last_buflen);
(edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN; (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN;
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE); sec4_sg_bytes, DMA_TO_DEVICE);
......
...@@ -71,7 +71,6 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, ...@@ -71,7 +71,6 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
struct sha256_state *sctx = shash_desc_ctx(desc); struct sha256_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct nx_sg *in_sg;
struct nx_sg *out_sg; struct nx_sg *out_sg;
u64 to_process = 0, leftover, total; u64 to_process = 0, leftover, total;
unsigned long irq_flags; unsigned long irq_flags;
...@@ -97,7 +96,6 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, ...@@ -97,7 +96,6 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
in_sg = nx_ctx->in_sg;
max_sg_len = min_t(u64, nx_ctx->ap->sglen, max_sg_len = min_t(u64, nx_ctx->ap->sglen,
nx_driver.of.max_sg_len/sizeof(struct nx_sg)); nx_driver.of.max_sg_len/sizeof(struct nx_sg));
max_sg_len = min_t(u64, max_sg_len, max_sg_len = min_t(u64, max_sg_len,
...@@ -114,17 +112,12 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, ...@@ -114,17 +112,12 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
} }
do { do {
/* int used_sgs = 0;
* to_process: the SHA256_BLOCK_SIZE data chunk to process in struct nx_sg *in_sg = nx_ctx->in_sg;
* this update. This value is also restricted by the sg list
* limits.
*/
to_process = total - to_process;
to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
if (buf_len) { if (buf_len) {
data_len = buf_len; data_len = buf_len;
in_sg = nx_build_sg_list(nx_ctx->in_sg, in_sg = nx_build_sg_list(in_sg,
(u8 *) sctx->buf, (u8 *) sctx->buf,
&data_len, &data_len,
max_sg_len); max_sg_len);
...@@ -133,15 +126,27 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, ...@@ -133,15 +126,27 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
rc = -EINVAL; rc = -EINVAL;
goto out; goto out;
} }
used_sgs = in_sg - nx_ctx->in_sg;
} }
/* to_process: SHA256_BLOCK_SIZE aligned chunk to be
* processed in this iteration. This value is restricted
* by sg list limits and number of sgs we already used
* for leftover data. (see above)
* In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
* but because data may not be aligned, we need to account
* for that too. */
to_process = min_t(u64, total,
(max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
data_len = to_process - buf_len; data_len = to_process - buf_len;
in_sg = nx_build_sg_list(in_sg, (u8 *) data, in_sg = nx_build_sg_list(in_sg, (u8 *) data,
&data_len, max_sg_len); &data_len, max_sg_len);
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
to_process = (data_len + buf_len); to_process = data_len + buf_len;
leftover = total - to_process; leftover = total - to_process;
/* /*
......
...@@ -71,7 +71,6 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, ...@@ -71,7 +71,6 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
struct sha512_state *sctx = shash_desc_ctx(desc); struct sha512_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct nx_sg *in_sg;
struct nx_sg *out_sg; struct nx_sg *out_sg;
u64 to_process, leftover = 0, total; u64 to_process, leftover = 0, total;
unsigned long irq_flags; unsigned long irq_flags;
...@@ -97,7 +96,6 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, ...@@ -97,7 +96,6 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
in_sg = nx_ctx->in_sg;
max_sg_len = min_t(u64, nx_ctx->ap->sglen, max_sg_len = min_t(u64, nx_ctx->ap->sglen,
nx_driver.of.max_sg_len/sizeof(struct nx_sg)); nx_driver.of.max_sg_len/sizeof(struct nx_sg));
max_sg_len = min_t(u64, max_sg_len, max_sg_len = min_t(u64, max_sg_len,
...@@ -114,18 +112,12 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, ...@@ -114,18 +112,12 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
} }
do { do {
/* int used_sgs = 0;
* to_process: the SHA512_BLOCK_SIZE data chunk to process in struct nx_sg *in_sg = nx_ctx->in_sg;
* this update. This value is also restricted by the sg list
* limits.
*/
to_process = total - leftover;
to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
leftover = total - to_process;
if (buf_len) { if (buf_len) {
data_len = buf_len; data_len = buf_len;
in_sg = nx_build_sg_list(nx_ctx->in_sg, in_sg = nx_build_sg_list(in_sg,
(u8 *) sctx->buf, (u8 *) sctx->buf,
&data_len, max_sg_len); &data_len, max_sg_len);
...@@ -133,8 +125,20 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, ...@@ -133,8 +125,20 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
rc = -EINVAL; rc = -EINVAL;
goto out; goto out;
} }
used_sgs = in_sg - nx_ctx->in_sg;
} }
/* to_process: SHA512_BLOCK_SIZE aligned chunk to be
* processed in this iteration. This value is restricted
* by sg list limits and number of sgs we already used
* for leftover data. (see above)
* In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
* but because data may not be aligned, we need to account
* for that too. */
to_process = min_t(u64, total,
(max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
data_len = to_process - buf_len; data_len = to_process - buf_len;
in_sg = nx_build_sg_list(in_sg, (u8 *) data, in_sg = nx_build_sg_list(in_sg, (u8 *) data,
&data_len, max_sg_len); &data_len, max_sg_len);
...@@ -146,7 +150,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, ...@@ -146,7 +150,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
goto out; goto out;
} }
to_process = (data_len + buf_len); to_process = data_len + buf_len;
leftover = total - to_process; leftover = total - to_process;
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment