Commit 334d37c9 authored by Horia Geantă's avatar Horia Geantă Committed by Herbert Xu

crypto: caam - update IV using HW support

Modify drivers to perform skcipher IV update using the crypto engine,
instead of performing the operation in SW.

Besides being more efficient, this also fixes IV update for CTR mode.

Output HW S/G table is appended with an entry pointing to the same
IV buffer used as input (which is now mapped BIDIRECTIONAL).

AS (Algorithm State) parameter of the OPERATION command is changed
from INIFINAL to INIT in descriptors used by ctr(aes), cbc(aes).
This is needed since in case FINAL bit is set, HW skips IV updating
in the Context Register for the last data block.
Signed-off-by: default avatarHoria Geantă <horia.geanta@nxp.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 059d73ee
...@@ -898,7 +898,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src, ...@@ -898,7 +898,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
} }
if (iv_dma) if (iv_dma)
dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); dma_unmap_single(dev, iv_dma, ivsize, DMA_BIDIRECTIONAL);
if (sec4_sg_bytes) if (sec4_sg_bytes)
dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes, dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
DMA_TO_DEVICE); DMA_TO_DEVICE);
...@@ -977,7 +977,6 @@ static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, ...@@ -977,7 +977,6 @@ static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
struct skcipher_request *req = context; struct skcipher_request *req = context;
struct skcipher_edesc *edesc; struct skcipher_edesc *edesc;
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
int ivsize = crypto_skcipher_ivsize(skcipher); int ivsize = crypto_skcipher_ivsize(skcipher);
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
...@@ -991,16 +990,17 @@ static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, ...@@ -991,16 +990,17 @@ static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
/* /*
* The crypto API expects us to set the IV (req->iv) to the last * The crypto API expects us to set the IV (req->iv) to the last
* ciphertext block when running in CBC mode. * ciphertext block (CBC mode) or last counter (CTR mode).
* This is used e.g. by the CTS mode.
*/ */
if ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == OP_ALG_AAI_CBC) if (ivsize) {
scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes,
ivsize, ivsize, 0); ivsize);
if (ivsize)
print_hex_dump_debug("dstiv @"__stringify(__LINE__)": ", print_hex_dump_debug("dstiv @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->iv, DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
edesc->src_nents > 1 ? 100 : ivsize, 1); edesc->src_nents > 1 ? 100 : ivsize, 1);
}
caam_dump_sg("dst @" __stringify(__LINE__)": ", caam_dump_sg("dst @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->dst, DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
...@@ -1027,8 +1027,20 @@ static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, ...@@ -1027,8 +1027,20 @@ static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
skcipher_unmap(jrdev, edesc, req); skcipher_unmap(jrdev, edesc, req);
print_hex_dump_debug("dstiv @"__stringify(__LINE__)": ", /*
DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1); * The crypto API expects us to set the IV (req->iv) to the last
* ciphertext block (CBC mode) or last counter (CTR mode).
* This is used e.g. by the CTS mode.
*/
if (ivsize) {
memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes,
ivsize);
print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
ivsize, 1);
}
caam_dump_sg("dst @" __stringify(__LINE__)": ", caam_dump_sg("dst @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->dst, DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
...@@ -1260,7 +1272,7 @@ static void init_skcipher_job(struct skcipher_request *req, ...@@ -1260,7 +1272,7 @@ static void init_skcipher_job(struct skcipher_request *req,
if (likely(req->src == req->dst)) { if (likely(req->src == req->dst)) {
dst_dma = src_dma + !!ivsize * sizeof(struct sec4_sg_entry); dst_dma = src_dma + !!ivsize * sizeof(struct sec4_sg_entry);
out_options = in_options; out_options = in_options;
} else if (edesc->mapped_dst_nents == 1) { } else if (!ivsize && edesc->mapped_dst_nents == 1) {
dst_dma = sg_dma_address(req->dst); dst_dma = sg_dma_address(req->dst);
} else { } else {
dst_dma = edesc->sec4_sg_dma + sec4_sg_index * dst_dma = edesc->sec4_sg_dma + sec4_sg_index *
...@@ -1268,7 +1280,7 @@ static void init_skcipher_job(struct skcipher_request *req, ...@@ -1268,7 +1280,7 @@ static void init_skcipher_job(struct skcipher_request *req,
out_options = LDST_SGF; out_options = LDST_SGF;
} }
append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options); append_seq_out_ptr(desc, dst_dma, req->cryptlen + ivsize, out_options);
} }
/* /*
...@@ -1699,22 +1711,26 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, ...@@ -1699,22 +1711,26 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
dst_sg_idx = sec4_sg_ents; dst_sg_idx = sec4_sg_ents;
/* /*
* Input, output HW S/G tables: [IV, src][dst, IV]
* IV entries point to the same buffer
* If src == dst, S/G entries are reused (S/G tables overlap)
*
* HW reads 4 S/G entries at a time; make sure the reads don't go beyond * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
* the end of the table by allocating more S/G entries. Logic: * the end of the table by allocating more S/G entries. Logic:
* if (src != dst && output S/G) * if (output S/G)
* pad output S/G, if needed * pad output S/G, if needed
* else if (src == dst && S/G)
* overlapping S/Gs; pad one of them
* else if (input S/G) ... * else if (input S/G) ...
* pad input S/G, if needed * pad input S/G, if needed
*/ */
if (mapped_dst_nents > 1) if (ivsize || mapped_dst_nents > 1) {
sec4_sg_ents += pad_sg_nents(mapped_dst_nents); if (req->src == req->dst)
else if ((req->src == req->dst) && (mapped_src_nents > 1)) sec4_sg_ents = !!ivsize + pad_sg_nents(sec4_sg_ents);
sec4_sg_ents = max(pad_sg_nents(sec4_sg_ents),
!!ivsize + pad_sg_nents(mapped_src_nents));
else else
sec4_sg_ents += pad_sg_nents(mapped_dst_nents +
!!ivsize);
} else {
sec4_sg_ents = pad_sg_nents(sec4_sg_ents); sec4_sg_ents = pad_sg_nents(sec4_sg_ents);
}
sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry); sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
...@@ -1740,10 +1756,10 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, ...@@ -1740,10 +1756,10 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
/* Make sure IV is located in a DMAable area */ /* Make sure IV is located in a DMAable area */
if (ivsize) { if (ivsize) {
iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes; iv = (u8 *)edesc->sec4_sg + sec4_sg_bytes;
memcpy(iv, req->iv, ivsize); memcpy(iv, req->iv, ivsize);
iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE); iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_BIDIRECTIONAL);
if (dma_mapping_error(jrdev, iv_dma)) { if (dma_mapping_error(jrdev, iv_dma)) {
dev_err(jrdev, "unable to map IV\n"); dev_err(jrdev, "unable to map IV\n");
caam_unmap(jrdev, req->src, req->dst, src_nents, caam_unmap(jrdev, req->src, req->dst, src_nents,
...@@ -1755,13 +1771,20 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, ...@@ -1755,13 +1771,20 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0); dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
} }
if (dst_sg_idx) if (dst_sg_idx)
sg_to_sec4_sg_last(req->src, req->cryptlen, edesc->sec4_sg + sg_to_sec4_sg(req->src, req->cryptlen, edesc->sec4_sg +
!!ivsize, 0); !!ivsize, 0);
if (mapped_dst_nents > 1) { if (req->src != req->dst && (ivsize || mapped_dst_nents > 1))
sg_to_sec4_sg_last(req->dst, req->cryptlen, sg_to_sec4_sg(req->dst, req->cryptlen, edesc->sec4_sg +
edesc->sec4_sg + dst_sg_idx, 0); dst_sg_idx, 0);
}
if (ivsize)
dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx +
mapped_dst_nents, iv_dma, ivsize, 0);
if (ivsize || mapped_dst_nents > 1)
sg_to_sec4_set_last(edesc->sec4_sg + dst_sg_idx +
mapped_dst_nents);
if (sec4_sg_bytes) { if (sec4_sg_bytes) {
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
...@@ -1824,7 +1847,6 @@ static int skcipher_decrypt(struct skcipher_request *req) ...@@ -1824,7 +1847,6 @@ static int skcipher_decrypt(struct skcipher_request *req)
struct skcipher_edesc *edesc; struct skcipher_edesc *edesc;
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
int ivsize = crypto_skcipher_ivsize(skcipher);
struct device *jrdev = ctx->jrdev; struct device *jrdev = ctx->jrdev;
u32 *desc; u32 *desc;
int ret = 0; int ret = 0;
...@@ -1834,14 +1856,6 @@ static int skcipher_decrypt(struct skcipher_request *req) ...@@ -1834,14 +1856,6 @@ static int skcipher_decrypt(struct skcipher_request *req)
if (IS_ERR(edesc)) if (IS_ERR(edesc))
return PTR_ERR(edesc); return PTR_ERR(edesc);
/*
* The crypto API expects us to set the IV (req->iv) to the last
* ciphertext block when running in CBC mode.
*/
if ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == OP_ALG_AAI_CBC)
scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen -
ivsize, ivsize, 0);
/* Create and submit job descriptor*/ /* Create and submit job descriptor*/
init_skcipher_job(req, edesc, false); init_skcipher_job(req, edesc, false);
desc = edesc->hw_desc; desc = edesc->hw_desc;
......
...@@ -33,12 +33,11 @@ static inline void append_dec_op1(u32 *desc, u32 type) ...@@ -33,12 +33,11 @@ static inline void append_dec_op1(u32 *desc, u32 type)
} }
jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
append_operation(desc, type | OP_ALG_AS_INITFINAL | append_operation(desc, type | OP_ALG_AS_INIT | OP_ALG_DECRYPT);
OP_ALG_DECRYPT);
uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL); uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
set_jump_tgt_here(desc, jump_cmd); set_jump_tgt_here(desc, jump_cmd);
append_operation(desc, type | OP_ALG_AS_INITFINAL | append_operation(desc, type | OP_ALG_AS_INIT | OP_ALG_DECRYPT |
OP_ALG_DECRYPT | OP_ALG_AAI_DK); OP_ALG_AAI_DK);
set_jump_tgt_here(desc, uncond_jump_cmd); set_jump_tgt_here(desc, uncond_jump_cmd);
} }
...@@ -1392,12 +1391,18 @@ void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata, ...@@ -1392,12 +1391,18 @@ void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata,
LDST_OFFSET_SHIFT)); LDST_OFFSET_SHIFT));
/* Load operation */ /* Load operation */
append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | append_operation(desc, cdata->algtype | OP_ALG_AS_INIT |
OP_ALG_ENCRYPT); OP_ALG_ENCRYPT);
/* Perform operation */ /* Perform operation */
skcipher_append_src_dst(desc); skcipher_append_src_dst(desc);
/* Store IV */
if (ivsize)
append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
LDST_CLASS_1_CCB | (ctx1_iv_off <<
LDST_OFFSET_SHIFT));
print_hex_dump_debug("skcipher enc shdesc@" __stringify(__LINE__)": ", print_hex_dump_debug("skcipher enc shdesc@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1); 1);
...@@ -1459,7 +1464,7 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata, ...@@ -1459,7 +1464,7 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
/* Choose operation */ /* Choose operation */
if (ctx1_iv_off) if (ctx1_iv_off)
append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | append_operation(desc, cdata->algtype | OP_ALG_AS_INIT |
OP_ALG_DECRYPT); OP_ALG_DECRYPT);
else else
append_dec_op1(desc, cdata->algtype); append_dec_op1(desc, cdata->algtype);
...@@ -1467,6 +1472,12 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata, ...@@ -1467,6 +1472,12 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
/* Perform operation */ /* Perform operation */
skcipher_append_src_dst(desc); skcipher_append_src_dst(desc);
/* Store IV */
if (ivsize)
append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
LDST_CLASS_1_CCB | (ctx1_iv_off <<
LDST_OFFSET_SHIFT));
print_hex_dump_debug("skcipher dec shdesc@" __stringify(__LINE__)": ", print_hex_dump_debug("skcipher dec shdesc@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1); 1);
...@@ -1516,6 +1527,10 @@ void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata) ...@@ -1516,6 +1527,10 @@ void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata)
/* Perform operation */ /* Perform operation */
skcipher_append_src_dst(desc); skcipher_append_src_dst(desc);
/* Store upper 8B of IV */
append_seq_store(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
(0x20 << LDST_OFFSET_SHIFT));
print_hex_dump_debug("xts skcipher enc shdesc@" __stringify(__LINE__) print_hex_dump_debug("xts skcipher enc shdesc@" __stringify(__LINE__)
": ", DUMP_PREFIX_ADDRESS, 16, 4, ": ", DUMP_PREFIX_ADDRESS, 16, 4,
desc, desc_bytes(desc), 1); desc, desc_bytes(desc), 1);
...@@ -1564,6 +1579,10 @@ void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata) ...@@ -1564,6 +1579,10 @@ void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata)
/* Perform operation */ /* Perform operation */
skcipher_append_src_dst(desc); skcipher_append_src_dst(desc);
/* Store upper 8B of IV */
append_seq_store(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
(0x20 << LDST_OFFSET_SHIFT));
print_hex_dump_debug("xts skcipher dec shdesc@" __stringify(__LINE__) print_hex_dump_debug("xts skcipher dec shdesc@" __stringify(__LINE__)
": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1); desc_bytes(desc), 1);
......
...@@ -44,9 +44,9 @@ ...@@ -44,9 +44,9 @@
#define DESC_SKCIPHER_BASE (3 * CAAM_CMD_SZ) #define DESC_SKCIPHER_BASE (3 * CAAM_CMD_SZ)
#define DESC_SKCIPHER_ENC_LEN (DESC_SKCIPHER_BASE + \ #define DESC_SKCIPHER_ENC_LEN (DESC_SKCIPHER_BASE + \
20 * CAAM_CMD_SZ) 21 * CAAM_CMD_SZ)
#define DESC_SKCIPHER_DEC_LEN (DESC_SKCIPHER_BASE + \ #define DESC_SKCIPHER_DEC_LEN (DESC_SKCIPHER_BASE + \
15 * CAAM_CMD_SZ) 16 * CAAM_CMD_SZ)
void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata, void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
unsigned int icvsize, int era); unsigned int icvsize, int era);
......
...@@ -831,7 +831,8 @@ static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx, ...@@ -831,7 +831,8 @@ static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
static void caam_unmap(struct device *dev, struct scatterlist *src, static void caam_unmap(struct device *dev, struct scatterlist *src,
struct scatterlist *dst, int src_nents, struct scatterlist *dst, int src_nents,
int dst_nents, dma_addr_t iv_dma, int ivsize, int dst_nents, dma_addr_t iv_dma, int ivsize,
dma_addr_t qm_sg_dma, int qm_sg_bytes) enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma,
int qm_sg_bytes)
{ {
if (dst != src) { if (dst != src) {
if (src_nents) if (src_nents)
...@@ -843,7 +844,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src, ...@@ -843,7 +844,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
} }
if (iv_dma) if (iv_dma)
dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
if (qm_sg_bytes) if (qm_sg_bytes)
dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE); dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
} }
...@@ -856,7 +857,8 @@ static void aead_unmap(struct device *dev, ...@@ -856,7 +857,8 @@ static void aead_unmap(struct device *dev,
int ivsize = crypto_aead_ivsize(aead); int ivsize = crypto_aead_ivsize(aead);
caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes); edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
edesc->qm_sg_bytes);
dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
} }
...@@ -867,7 +869,8 @@ static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc, ...@@ -867,7 +869,8 @@ static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
int ivsize = crypto_skcipher_ivsize(skcipher); int ivsize = crypto_skcipher_ivsize(skcipher);
caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes); edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
edesc->qm_sg_bytes);
} }
static void aead_done(struct caam_drv_req *drv_req, u32 status) static void aead_done(struct caam_drv_req *drv_req, u32 status)
...@@ -1036,7 +1039,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, ...@@ -1036,7 +1039,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
qm_sg_ents, ivsize); qm_sg_ents, ivsize);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
0, 0, 0); 0, DMA_NONE, 0, 0);
qi_cache_free(edesc); qi_cache_free(edesc);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
...@@ -1051,7 +1054,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, ...@@ -1051,7 +1054,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
if (dma_mapping_error(qidev, iv_dma)) { if (dma_mapping_error(qidev, iv_dma)) {
dev_err(qidev, "unable to map IV\n"); dev_err(qidev, "unable to map IV\n");
caam_unmap(qidev, req->src, req->dst, src_nents, caam_unmap(qidev, req->src, req->dst, src_nents,
dst_nents, 0, 0, 0, 0); dst_nents, 0, 0, DMA_NONE, 0, 0);
qi_cache_free(edesc); qi_cache_free(edesc);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
...@@ -1070,7 +1073,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, ...@@ -1070,7 +1073,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
if (dma_mapping_error(qidev, edesc->assoclen_dma)) { if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
dev_err(qidev, "unable to map assoclen\n"); dev_err(qidev, "unable to map assoclen\n");
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
iv_dma, ivsize, 0, 0); iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
qi_cache_free(edesc); qi_cache_free(edesc);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
...@@ -1092,7 +1095,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, ...@@ -1092,7 +1095,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
dev_err(qidev, "unable to map S/G table\n"); dev_err(qidev, "unable to map S/G table\n");
dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
iv_dma, ivsize, 0, 0); iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
qi_cache_free(edesc); qi_cache_free(edesc);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
...@@ -1206,11 +1209,10 @@ static void skcipher_done(struct caam_drv_req *drv_req, u32 status) ...@@ -1206,11 +1209,10 @@ static void skcipher_done(struct caam_drv_req *drv_req, u32 status)
/* /*
* The crypto API expects us to set the IV (req->iv) to the last * The crypto API expects us to set the IV (req->iv) to the last
* ciphertext block. This is used e.g. by the CTS mode. * ciphertext block (CBC mode) or last counter (CTR mode).
* This is used e.g. by the CTS mode.
*/ */
if (edesc->drv_req.drv_ctx->op_type == ENCRYPT) memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize);
scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen -
ivsize, ivsize, 0);
qi_cache_free(edesc); qi_cache_free(edesc);
skcipher_request_complete(req, status); skcipher_request_complete(req, status);
...@@ -1279,22 +1281,17 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, ...@@ -1279,22 +1281,17 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
dst_sg_idx = qm_sg_ents; dst_sg_idx = qm_sg_ents;
/* /*
* Input, output HW S/G tables: [IV, src][dst, IV]
* IV entries point to the same buffer
* If src == dst, S/G entries are reused (S/G tables overlap)
*
* HW reads 4 S/G entries at a time; make sure the reads don't go beyond * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
* the end of the table by allocating more S/G entries. Logic: * the end of the table by allocating more S/G entries.
* if (src != dst && output S/G)
* pad output S/G, if needed
* else if (src == dst && S/G)
* overlapping S/Gs; pad one of them
* else if (input S/G) ...
* pad input S/G, if needed
*/ */
if (mapped_dst_nents > 1) if (req->src != req->dst)
qm_sg_ents += pad_sg_nents(mapped_dst_nents); qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1);
else if ((req->src == req->dst) && (mapped_src_nents > 1))
qm_sg_ents = max(pad_sg_nents(qm_sg_ents),
1 + pad_sg_nents(mapped_src_nents));
else else
qm_sg_ents = pad_sg_nents(qm_sg_ents); qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry); qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes + if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
...@@ -1302,7 +1299,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, ...@@ -1302,7 +1299,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
qm_sg_ents, ivsize); qm_sg_ents, ivsize);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
0, 0, 0); 0, DMA_NONE, 0, 0);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
...@@ -1311,7 +1308,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, ...@@ -1311,7 +1308,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
if (unlikely(!edesc)) { if (unlikely(!edesc)) {
dev_err(qidev, "could not allocate extended descriptor\n"); dev_err(qidev, "could not allocate extended descriptor\n");
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
0, 0, 0); 0, DMA_NONE, 0, 0);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
...@@ -1320,11 +1317,11 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, ...@@ -1320,11 +1317,11 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
iv = (u8 *)(sg_table + qm_sg_ents); iv = (u8 *)(sg_table + qm_sg_ents);
memcpy(iv, req->iv, ivsize); memcpy(iv, req->iv, ivsize);
iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE); iv_dma = dma_map_single(qidev, iv, ivsize, DMA_BIDIRECTIONAL);
if (dma_mapping_error(qidev, iv_dma)) { if (dma_mapping_error(qidev, iv_dma)) {
dev_err(qidev, "unable to map IV\n"); dev_err(qidev, "unable to map IV\n");
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
0, 0, 0); 0, DMA_NONE, 0, 0);
qi_cache_free(edesc); qi_cache_free(edesc);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
...@@ -1338,18 +1335,20 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, ...@@ -1338,18 +1335,20 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
edesc->drv_req.drv_ctx = drv_ctx; edesc->drv_req.drv_ctx = drv_ctx;
dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
sg_to_qm_sg_last(req->src, req->cryptlen, sg_table + 1, 0); sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
if (mapped_dst_nents > 1) if (req->src != req->dst)
sg_to_qm_sg_last(req->dst, req->cryptlen, sg_table + sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
dst_sg_idx, 0);
dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma,
ivsize, 0);
edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes, edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (dma_mapping_error(qidev, edesc->qm_sg_dma)) { if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
dev_err(qidev, "unable to map S/G table\n"); dev_err(qidev, "unable to map S/G table\n");
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
iv_dma, ivsize, 0, 0); iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0);
qi_cache_free(edesc); qi_cache_free(edesc);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
...@@ -1359,16 +1358,14 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, ...@@ -1359,16 +1358,14 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma, dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
ivsize + req->cryptlen, 0); ivsize + req->cryptlen, 0);
if (req->src == req->dst) { if (req->src == req->dst)
dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
sizeof(*sg_table), req->cryptlen, 0); sizeof(*sg_table), req->cryptlen + ivsize,
} else if (mapped_dst_nents > 1) { 0);
else
dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx * dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
sizeof(*sg_table), req->cryptlen, 0); sizeof(*sg_table), req->cryptlen + ivsize,
} else { 0);
dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
req->cryptlen, 0);
}
return edesc; return edesc;
} }
...@@ -1378,7 +1375,6 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt) ...@@ -1378,7 +1375,6 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
struct skcipher_edesc *edesc; struct skcipher_edesc *edesc;
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
int ivsize = crypto_skcipher_ivsize(skcipher);
int ret; int ret;
if (unlikely(caam_congested)) if (unlikely(caam_congested))
...@@ -1389,14 +1385,6 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt) ...@@ -1389,14 +1385,6 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
if (IS_ERR(edesc)) if (IS_ERR(edesc))
return PTR_ERR(edesc); return PTR_ERR(edesc);
/*
* The crypto API expects us to set the IV (req->iv) to the last
* ciphertext block.
*/
if (!encrypt)
scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen -
ivsize, ivsize, 0);
ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
if (!ret) { if (!ret) {
ret = -EINPROGRESS; ret = -EINPROGRESS;
......
...@@ -140,7 +140,8 @@ static struct caam_request *to_caam_req(struct crypto_async_request *areq) ...@@ -140,7 +140,8 @@ static struct caam_request *to_caam_req(struct crypto_async_request *areq)
static void caam_unmap(struct device *dev, struct scatterlist *src, static void caam_unmap(struct device *dev, struct scatterlist *src,
struct scatterlist *dst, int src_nents, struct scatterlist *dst, int src_nents,
int dst_nents, dma_addr_t iv_dma, int ivsize, int dst_nents, dma_addr_t iv_dma, int ivsize,
dma_addr_t qm_sg_dma, int qm_sg_bytes) enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma,
int qm_sg_bytes)
{ {
if (dst != src) { if (dst != src) {
if (src_nents) if (src_nents)
...@@ -152,7 +153,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src, ...@@ -152,7 +153,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
} }
if (iv_dma) if (iv_dma)
dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
if (qm_sg_bytes) if (qm_sg_bytes)
dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE); dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
...@@ -485,7 +486,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, ...@@ -485,7 +486,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
dev_err(dev, "No space for %d S/G entries and/or %dB IV\n", dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
qm_sg_nents, ivsize); qm_sg_nents, ivsize);
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
0, 0, 0); 0, DMA_NONE, 0, 0);
qi_cache_free(edesc); qi_cache_free(edesc);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
...@@ -500,7 +501,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, ...@@ -500,7 +501,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
if (dma_mapping_error(dev, iv_dma)) { if (dma_mapping_error(dev, iv_dma)) {
dev_err(dev, "unable to map IV\n"); dev_err(dev, "unable to map IV\n");
caam_unmap(dev, req->src, req->dst, src_nents, caam_unmap(dev, req->src, req->dst, src_nents,
dst_nents, 0, 0, 0, 0); dst_nents, 0, 0, DMA_NONE, 0, 0);
qi_cache_free(edesc); qi_cache_free(edesc);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
...@@ -524,7 +525,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, ...@@ -524,7 +525,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
if (dma_mapping_error(dev, edesc->assoclen_dma)) { if (dma_mapping_error(dev, edesc->assoclen_dma)) {
dev_err(dev, "unable to map assoclen\n"); dev_err(dev, "unable to map assoclen\n");
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
iv_dma, ivsize, 0, 0); iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
qi_cache_free(edesc); qi_cache_free(edesc);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
...@@ -546,7 +547,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, ...@@ -546,7 +547,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
dev_err(dev, "unable to map S/G table\n"); dev_err(dev, "unable to map S/G table\n");
dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
iv_dma, ivsize, 0, 0); iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
qi_cache_free(edesc); qi_cache_free(edesc);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
...@@ -1101,22 +1102,17 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req) ...@@ -1101,22 +1102,17 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
dst_sg_idx = qm_sg_ents; dst_sg_idx = qm_sg_ents;
/* /*
* Input, output HW S/G tables: [IV, src][dst, IV]
* IV entries point to the same buffer
* If src == dst, S/G entries are reused (S/G tables overlap)
*
* HW reads 4 S/G entries at a time; make sure the reads don't go beyond * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
* the end of the table by allocating more S/G entries. Logic: * the end of the table by allocating more S/G entries.
* if (src != dst && output S/G)
* pad output S/G, if needed
* else if (src == dst && S/G)
* overlapping S/Gs; pad one of them
* else if (input S/G) ...
* pad input S/G, if needed
*/ */
if (mapped_dst_nents > 1) if (req->src != req->dst)
qm_sg_ents += pad_sg_nents(mapped_dst_nents); qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1);
else if ((req->src == req->dst) && (mapped_src_nents > 1))
qm_sg_ents = max(pad_sg_nents(qm_sg_ents),
1 + pad_sg_nents(mapped_src_nents));
else else
qm_sg_ents = pad_sg_nents(qm_sg_ents); qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry); qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes + if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
...@@ -1124,7 +1120,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req) ...@@ -1124,7 +1120,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
dev_err(dev, "No space for %d S/G entries and/or %dB IV\n", dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
qm_sg_ents, ivsize); qm_sg_ents, ivsize);
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
0, 0, 0); 0, DMA_NONE, 0, 0);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
...@@ -1133,7 +1129,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req) ...@@ -1133,7 +1129,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
if (unlikely(!edesc)) { if (unlikely(!edesc)) {
dev_err(dev, "could not allocate extended descriptor\n"); dev_err(dev, "could not allocate extended descriptor\n");
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
0, 0, 0); 0, DMA_NONE, 0, 0);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
...@@ -1142,11 +1138,11 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req) ...@@ -1142,11 +1138,11 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
iv = (u8 *)(sg_table + qm_sg_ents); iv = (u8 *)(sg_table + qm_sg_ents);
memcpy(iv, req->iv, ivsize); memcpy(iv, req->iv, ivsize);
iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); iv_dma = dma_map_single(dev, iv, ivsize, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, iv_dma)) { if (dma_mapping_error(dev, iv_dma)) {
dev_err(dev, "unable to map IV\n"); dev_err(dev, "unable to map IV\n");
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
0, 0, 0); 0, DMA_NONE, 0, 0);
qi_cache_free(edesc); qi_cache_free(edesc);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
...@@ -1157,18 +1153,20 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req) ...@@ -1157,18 +1153,20 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
edesc->qm_sg_bytes = qm_sg_bytes; edesc->qm_sg_bytes = qm_sg_bytes;
dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
sg_to_qm_sg_last(req->src, req->cryptlen, sg_table + 1, 0); sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
if (mapped_dst_nents > 1) if (req->src != req->dst)
sg_to_qm_sg_last(req->dst, req->cryptlen, sg_table + sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
dst_sg_idx, 0);
dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma,
ivsize, 0);
edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes, edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (dma_mapping_error(dev, edesc->qm_sg_dma)) { if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
dev_err(dev, "unable to map S/G table\n"); dev_err(dev, "unable to map S/G table\n");
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
iv_dma, ivsize, 0, 0); iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0);
qi_cache_free(edesc); qi_cache_free(edesc);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
...@@ -1176,23 +1174,19 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req) ...@@ -1176,23 +1174,19 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
dpaa2_fl_set_final(in_fle, true); dpaa2_fl_set_final(in_fle, true);
dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize); dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
dpaa2_fl_set_len(out_fle, req->cryptlen); dpaa2_fl_set_len(out_fle, req->cryptlen + ivsize);
dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
if (req->src == req->dst) {
dpaa2_fl_set_format(out_fle, dpaa2_fl_sg); dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
if (req->src == req->dst)
dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
sizeof(*sg_table)); sizeof(*sg_table));
} else if (mapped_dst_nents > 1) { else
dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx * dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
sizeof(*sg_table)); sizeof(*sg_table));
} else {
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
}
return edesc; return edesc;
} }
...@@ -1204,7 +1198,8 @@ static void aead_unmap(struct device *dev, struct aead_edesc *edesc, ...@@ -1204,7 +1198,8 @@ static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
int ivsize = crypto_aead_ivsize(aead); int ivsize = crypto_aead_ivsize(aead);
caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes); edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
edesc->qm_sg_bytes);
dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
} }
...@@ -1215,7 +1210,8 @@ static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc, ...@@ -1215,7 +1210,8 @@ static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
int ivsize = crypto_skcipher_ivsize(skcipher); int ivsize = crypto_skcipher_ivsize(skcipher);
caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes); edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
edesc->qm_sg_bytes);
} }
static void aead_encrypt_done(void *cbk_ctx, u32 status) static void aead_encrypt_done(void *cbk_ctx, u32 status)
...@@ -1372,10 +1368,10 @@ static void skcipher_encrypt_done(void *cbk_ctx, u32 status) ...@@ -1372,10 +1368,10 @@ static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
/* /*
* The crypto API expects us to set the IV (req->iv) to the last * The crypto API expects us to set the IV (req->iv) to the last
* ciphertext block. This is used e.g. by the CTS mode. * ciphertext block (CBC mode) or last counter (CTR mode).
* This is used e.g. by the CTS mode.
*/ */
scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize, memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize);
ivsize, 0);
qi_cache_free(edesc); qi_cache_free(edesc);
skcipher_request_complete(req, ecode); skcipher_request_complete(req, ecode);
...@@ -1407,6 +1403,14 @@ static void skcipher_decrypt_done(void *cbk_ctx, u32 status) ...@@ -1407,6 +1403,14 @@ static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
skcipher_unmap(ctx->dev, edesc, req); skcipher_unmap(ctx->dev, edesc, req);
/*
* The crypto API expects us to set the IV (req->iv) to the last
* ciphertext block (CBC mode) or last counter (CTR mode).
* This is used e.g. by the CTS mode.
*/
memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize);
qi_cache_free(edesc); qi_cache_free(edesc);
skcipher_request_complete(req, ecode); skcipher_request_complete(req, ecode);
} }
...@@ -1445,7 +1449,6 @@ static int skcipher_decrypt(struct skcipher_request *req) ...@@ -1445,7 +1449,6 @@ static int skcipher_decrypt(struct skcipher_request *req)
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct caam_request *caam_req = skcipher_request_ctx(req); struct caam_request *caam_req = skcipher_request_ctx(req);
int ivsize = crypto_skcipher_ivsize(skcipher);
int ret; int ret;
/* allocate extended descriptor */ /* allocate extended descriptor */
...@@ -1453,13 +1456,6 @@ static int skcipher_decrypt(struct skcipher_request *req) ...@@ -1453,13 +1456,6 @@ static int skcipher_decrypt(struct skcipher_request *req)
if (IS_ERR(edesc)) if (IS_ERR(edesc))
return PTR_ERR(edesc); return PTR_ERR(edesc);
/*
* The crypto API expects us to set the IV (req->iv) to the last
* ciphertext block.
*/
scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize,
ivsize, 0);
caam_req->flc = &ctx->flc[DECRYPT]; caam_req->flc = &ctx->flc[DECRYPT];
caam_req->flc_dma = ctx->flc_dma[DECRYPT]; caam_req->flc_dma = ctx->flc_dma[DECRYPT];
caam_req->cbk = skcipher_decrypt_done; caam_req->cbk = skcipher_decrypt_done;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment