Commit f1bf9e60 authored by Horia Geantă's avatar Horia Geantă Committed by Herbert Xu

crypto: caam - fix DMA mapping direction for RSA forms 2 & 3

Crypto engine needs some temporary locations in external memory for
running RSA decrypt forms 2 and 3 (CRT).
These are named "tmp1" and "tmp2" in the PDB.

Update DMA mapping direction of tmp1 and tmp2 from TO_DEVICE to
BIDIRECTIONAL, since engine needs r/w access.

Cc: <stable@vger.kernel.org> # 4.13+
Fixes: 52e26d77 ("crypto: caam - add support for RSA key form 2")
Fixes: 4a651b12 ("crypto: caam - add support for RSA key form 3")
Signed-off-by: default avatarHoria Geantă <horia.geanta@nxp.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent ad876a18
...@@ -71,8 +71,8 @@ static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc, ...@@ -71,8 +71,8 @@ static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
} }
static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
...@@ -90,8 +90,8 @@ static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, ...@@ -90,8 +90,8 @@ static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
} }
/* RSA Job Completion handler */ /* RSA Job Completion handler */
...@@ -417,13 +417,13 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req, ...@@ -417,13 +417,13 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
goto unmap_p; goto unmap_p;
} }
pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE); pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, pdb->tmp1_dma)) { if (dma_mapping_error(dev, pdb->tmp1_dma)) {
dev_err(dev, "Unable to map RSA tmp1 memory\n"); dev_err(dev, "Unable to map RSA tmp1 memory\n");
goto unmap_q; goto unmap_q;
} }
pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE); pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, pdb->tmp2_dma)) { if (dma_mapping_error(dev, pdb->tmp2_dma)) {
dev_err(dev, "Unable to map RSA tmp2 memory\n"); dev_err(dev, "Unable to map RSA tmp2 memory\n");
goto unmap_tmp1; goto unmap_tmp1;
...@@ -451,7 +451,7 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req, ...@@ -451,7 +451,7 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
return 0; return 0;
unmap_tmp1: unmap_tmp1:
dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
unmap_q: unmap_q:
dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
unmap_p: unmap_p:
...@@ -504,13 +504,13 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req, ...@@ -504,13 +504,13 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
goto unmap_dq; goto unmap_dq;
} }
pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE); pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, pdb->tmp1_dma)) { if (dma_mapping_error(dev, pdb->tmp1_dma)) {
dev_err(dev, "Unable to map RSA tmp1 memory\n"); dev_err(dev, "Unable to map RSA tmp1 memory\n");
goto unmap_qinv; goto unmap_qinv;
} }
pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE); pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, pdb->tmp2_dma)) { if (dma_mapping_error(dev, pdb->tmp2_dma)) {
dev_err(dev, "Unable to map RSA tmp2 memory\n"); dev_err(dev, "Unable to map RSA tmp2 memory\n");
goto unmap_tmp1; goto unmap_tmp1;
...@@ -538,7 +538,7 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req, ...@@ -538,7 +538,7 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
return 0; return 0;
unmap_tmp1: unmap_tmp1:
dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
unmap_qinv: unmap_qinv:
dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
unmap_dq: unmap_dq:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment