Commit aabc11c1 authored by John Allen's avatar John Allen Committed by Greg Kroah-Hartman

crypto: ccp - Fix use of merged scatterlists

commit 8a302808 upstream.

Running the crypto manager self tests with
CONFIG_CRYPTO_MANAGER_EXTRA_TESTS may result in several types of errors
when using the ccp-crypto driver:

alg: skcipher: cbc-des3-ccp encryption failed on test vector 0; expected_error=0, actual_error=-5 ...

alg: skcipher: ctr-aes-ccp decryption overran dst buffer on test vector 0 ...

alg: ahash: sha224-ccp test failed (wrong result) on test vector ...

These errors are the result of improper processing of scatterlists mapped
for DMA.

Given a scatterlist in which entries are merged as part of mapping the
scatterlist for DMA, the DMA length of a merged entry will reflect the
combined length of the entries that were merged. The subsequent
scatterlist entry will contain DMA information for the scatterlist entry
after the last merged entry, but the non-DMA information will be that of
the first merged entry.

The ccp driver does not take this scatterlist merging into account. To
address this, add a second scatterlist pointer to track the current
position in the DMA mapped representation of the scatterlist. Both the DMA
representation and the original representation of the scatterlist must be
tracked as while most of the driver can use just the DMA representation,
scatterlist_map_and_copy() must use the original representation and
expects the scatterlist pointer to be accurate to the original
representation.

In order to properly walk the original scatterlist, the scatterlist must
be walked until the combined lengths of the entries seen is equal to the
DMA length of the current entry being processed in the DMA mapped
representation.

Fixes: 63b94509 ("crypto: ccp - CCP device driver and interface support")
Signed-off-by: default avatarJohn Allen <john.allen@amd.com>
Cc: stable@vger.kernel.org
Acked-by: default avatarTom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent ef1b9164
...@@ -471,6 +471,7 @@ struct ccp_sg_workarea { ...@@ -471,6 +471,7 @@ struct ccp_sg_workarea {
unsigned int sg_used; unsigned int sg_used;
struct scatterlist *dma_sg; struct scatterlist *dma_sg;
struct scatterlist *dma_sg_head;
struct device *dma_dev; struct device *dma_dev;
unsigned int dma_count; unsigned int dma_count;
enum dma_data_direction dma_dir; enum dma_data_direction dma_dir;
......
...@@ -67,7 +67,7 @@ static u32 ccp_gen_jobid(struct ccp_device *ccp) ...@@ -67,7 +67,7 @@ static u32 ccp_gen_jobid(struct ccp_device *ccp)
static void ccp_sg_free(struct ccp_sg_workarea *wa) static void ccp_sg_free(struct ccp_sg_workarea *wa)
{ {
if (wa->dma_count) if (wa->dma_count)
dma_unmap_sg(wa->dma_dev, wa->dma_sg, wa->nents, wa->dma_dir); dma_unmap_sg(wa->dma_dev, wa->dma_sg_head, wa->nents, wa->dma_dir);
wa->dma_count = 0; wa->dma_count = 0;
} }
...@@ -96,6 +96,7 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev, ...@@ -96,6 +96,7 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
return 0; return 0;
wa->dma_sg = sg; wa->dma_sg = sg;
wa->dma_sg_head = sg;
wa->dma_dev = dev; wa->dma_dev = dev;
wa->dma_dir = dma_dir; wa->dma_dir = dma_dir;
wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir); wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir);
...@@ -108,14 +109,28 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev, ...@@ -108,14 +109,28 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len) static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len)
{ {
unsigned int nbytes = min_t(u64, len, wa->bytes_left); unsigned int nbytes = min_t(u64, len, wa->bytes_left);
unsigned int sg_combined_len = 0;
if (!wa->sg) if (!wa->sg)
return; return;
wa->sg_used += nbytes; wa->sg_used += nbytes;
wa->bytes_left -= nbytes; wa->bytes_left -= nbytes;
if (wa->sg_used == wa->sg->length) { if (wa->sg_used == sg_dma_len(wa->dma_sg)) {
wa->sg = sg_next(wa->sg); /* Advance to the next DMA scatterlist entry */
wa->dma_sg = sg_next(wa->dma_sg);
/* In the case that the DMA mapped scatterlist has entries
* that have been merged, the non-DMA mapped scatterlist
* must be advanced multiple times for each merged entry.
* This ensures that the current non-DMA mapped entry
* corresponds to the current DMA mapped entry.
*/
do {
sg_combined_len += wa->sg->length;
wa->sg = sg_next(wa->sg);
} while (wa->sg_used > sg_combined_len);
wa->sg_used = 0; wa->sg_used = 0;
} }
} }
...@@ -304,7 +319,7 @@ static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from) ...@@ -304,7 +319,7 @@ static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from)
/* Update the structures and generate the count */ /* Update the structures and generate the count */
buf_count = 0; buf_count = 0;
while (sg_wa->bytes_left && (buf_count < dm_wa->length)) { while (sg_wa->bytes_left && (buf_count < dm_wa->length)) {
nbytes = min(sg_wa->sg->length - sg_wa->sg_used, nbytes = min(sg_dma_len(sg_wa->dma_sg) - sg_wa->sg_used,
dm_wa->length - buf_count); dm_wa->length - buf_count);
nbytes = min_t(u64, sg_wa->bytes_left, nbytes); nbytes = min_t(u64, sg_wa->bytes_left, nbytes);
...@@ -336,11 +351,11 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst, ...@@ -336,11 +351,11 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
* and destination. The resulting len values will always be <= UINT_MAX * and destination. The resulting len values will always be <= UINT_MAX
* because the dma length is an unsigned int. * because the dma length is an unsigned int.
*/ */
sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used; sg_src_len = sg_dma_len(src->sg_wa.dma_sg) - src->sg_wa.sg_used;
sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len); sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len);
if (dst) { if (dst) {
sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used; sg_dst_len = sg_dma_len(dst->sg_wa.dma_sg) - dst->sg_wa.sg_used;
sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len); sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len);
op_len = min(sg_src_len, sg_dst_len); op_len = min(sg_src_len, sg_dst_len);
} else { } else {
...@@ -370,7 +385,7 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst, ...@@ -370,7 +385,7 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
/* Enough data in the sg element, but we need to /* Enough data in the sg element, but we need to
* adjust for any previously copied data * adjust for any previously copied data
*/ */
op->src.u.dma.address = sg_dma_address(src->sg_wa.sg); op->src.u.dma.address = sg_dma_address(src->sg_wa.dma_sg);
op->src.u.dma.offset = src->sg_wa.sg_used; op->src.u.dma.offset = src->sg_wa.sg_used;
op->src.u.dma.length = op_len & ~(block_size - 1); op->src.u.dma.length = op_len & ~(block_size - 1);
...@@ -391,7 +406,7 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst, ...@@ -391,7 +406,7 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
/* Enough room in the sg element, but we need to /* Enough room in the sg element, but we need to
* adjust for any previously used area * adjust for any previously used area
*/ */
op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg); op->dst.u.dma.address = sg_dma_address(dst->sg_wa.dma_sg);
op->dst.u.dma.offset = dst->sg_wa.sg_used; op->dst.u.dma.offset = dst->sg_wa.sg_used;
op->dst.u.dma.length = op->src.u.dma.length; op->dst.u.dma.length = op->src.u.dma.length;
} }
...@@ -2034,7 +2049,7 @@ ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) ...@@ -2034,7 +2049,7 @@ ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
dst.sg_wa.sg_used = 0; dst.sg_wa.sg_used = 0;
for (i = 1; i <= src.sg_wa.dma_count; i++) { for (i = 1; i <= src.sg_wa.dma_count; i++) {
if (!dst.sg_wa.sg || if (!dst.sg_wa.sg ||
(dst.sg_wa.sg->length < src.sg_wa.sg->length)) { (sg_dma_len(dst.sg_wa.sg) < sg_dma_len(src.sg_wa.sg))) {
ret = -EINVAL; ret = -EINVAL;
goto e_dst; goto e_dst;
} }
...@@ -2060,8 +2075,8 @@ ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) ...@@ -2060,8 +2075,8 @@ ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
goto e_dst; goto e_dst;
} }
dst.sg_wa.sg_used += src.sg_wa.sg->length; dst.sg_wa.sg_used += sg_dma_len(src.sg_wa.sg);
if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) { if (dst.sg_wa.sg_used == sg_dma_len(dst.sg_wa.sg)) {
dst.sg_wa.sg = sg_next(dst.sg_wa.sg); dst.sg_wa.sg = sg_next(dst.sg_wa.sg);
dst.sg_wa.sg_used = 0; dst.sg_wa.sg_used = 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment