Commit c709eeba authored by Radu Solea's avatar Radu Solea Committed by Herbert Xu

crypto: mxs-dcp - Fix SHA null hashes and output length

DCP writes at least 32 bytes in the output buffer instead of hash length
as documented. Add intermediate buffer to prevent write out of bounds.

When requested to produce null hashes DCP fails to produce valid output.
Add software workaround to bypass hardware and return valid output.
Signed-off-by: default avatarRadu Solea <radu.solea@nxp.com>
Signed-off-by: default avatarLeonard Crestez <leonard.crestez@nxp.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent ea9e7568
...@@ -28,9 +28,24 @@ ...@@ -28,9 +28,24 @@
#define DCP_MAX_CHANS 4 #define DCP_MAX_CHANS 4
#define DCP_BUF_SZ PAGE_SIZE #define DCP_BUF_SZ PAGE_SIZE
#define DCP_SHA_PAY_SZ 64
#define DCP_ALIGNMENT 64 #define DCP_ALIGNMENT 64
/*
* Null hashes to align with hw behavior on imx6sl and ull
* these are flipped for consistency with hw output
*/
const uint8_t sha1_null_hash[] =
"\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf"
"\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda";
const uint8_t sha256_null_hash[] =
"\x55\xb8\x52\x78\x1b\x99\x95\xa4"
"\x4c\x93\x9b\x64\xe4\x41\xae\x27"
"\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a"
"\x14\x1c\xfc\x98\x42\xc4\xb0\xe3";
/* DCP DMA descriptor. */ /* DCP DMA descriptor. */
struct dcp_dma_desc { struct dcp_dma_desc {
uint32_t next_cmd_addr; uint32_t next_cmd_addr;
...@@ -48,6 +63,7 @@ struct dcp_coherent_block { ...@@ -48,6 +63,7 @@ struct dcp_coherent_block {
uint8_t aes_in_buf[DCP_BUF_SZ]; uint8_t aes_in_buf[DCP_BUF_SZ];
uint8_t aes_out_buf[DCP_BUF_SZ]; uint8_t aes_out_buf[DCP_BUF_SZ];
uint8_t sha_in_buf[DCP_BUF_SZ]; uint8_t sha_in_buf[DCP_BUF_SZ];
uint8_t sha_out_buf[DCP_SHA_PAY_SZ];
uint8_t aes_key[2 * AES_KEYSIZE_128]; uint8_t aes_key[2 * AES_KEYSIZE_128];
...@@ -513,8 +529,6 @@ static int mxs_dcp_run_sha(struct ahash_request *req) ...@@ -513,8 +529,6 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
dma_addr_t digest_phys = 0; dma_addr_t digest_phys = 0;
...@@ -536,10 +550,23 @@ static int mxs_dcp_run_sha(struct ahash_request *req) ...@@ -536,10 +550,23 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
desc->payload = 0; desc->payload = 0;
desc->status = 0; desc->status = 0;
/*
* Align driver with hw behavior when generating null hashes
*/
if (rctx->init && rctx->fini && desc->size == 0) {
struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
const uint8_t *sha_buf =
(actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ?
sha1_null_hash : sha256_null_hash;
memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize);
ret = 0;
goto done_run;
}
/* Set HASH_TERM bit for last transfer block. */ /* Set HASH_TERM bit for last transfer block. */
if (rctx->fini) { if (rctx->fini) {
digest_phys = dma_map_single(sdcp->dev, req->result, digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf,
halg->digestsize, DMA_FROM_DEVICE); DCP_SHA_PAY_SZ, DMA_FROM_DEVICE);
desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM; desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
desc->payload = digest_phys; desc->payload = digest_phys;
} }
...@@ -547,9 +574,10 @@ static int mxs_dcp_run_sha(struct ahash_request *req) ...@@ -547,9 +574,10 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
ret = mxs_dcp_start_dma(actx); ret = mxs_dcp_start_dma(actx);
if (rctx->fini) if (rctx->fini)
dma_unmap_single(sdcp->dev, digest_phys, halg->digestsize, dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
done_run:
dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE); dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
return ret; return ret;
...@@ -567,6 +595,7 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq) ...@@ -567,6 +595,7 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
const int nents = sg_nents(req->src); const int nents = sg_nents(req->src);
uint8_t *in_buf = sdcp->coh->sha_in_buf; uint8_t *in_buf = sdcp->coh->sha_in_buf;
uint8_t *out_buf = sdcp->coh->sha_out_buf;
uint8_t *src_buf; uint8_t *src_buf;
...@@ -621,11 +650,9 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq) ...@@ -621,11 +650,9 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
actx->fill = 0; actx->fill = 0;
/* For some reason, the result is flipped. */ /* For some reason the result is flipped */
for (i = 0; i < halg->digestsize / 2; i++) { for (i = 0; i < halg->digestsize; i++)
swap(req->result[i], req->result[i] = out_buf[halg->digestsize - i - 1];
req->result[halg->digestsize - i - 1]);
}
} }
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment