Commit c114cf7f authored by Herbert Xu's avatar Herbert Xu

crypto: marvell/cesa - Fix use of sg_pcopy on iomem pointer

The cesa driver mixes use of iomem pointers and normal kernel
pointers.  Sometimes it uses memcpy_toio/memcpy_fromio on both
while other times it would use straight memcpy on both, through
the sg_pcopy_* helpers.

This patch fixes this by adding a new field sram_pool to the engine
for the normal pointer case which then allows us to use the right
interface depending on the value of engine->pool.
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 43a942d2
...@@ -381,10 +381,10 @@ static int mv_cesa_get_sram(struct platform_device *pdev, int idx) ...@@ -381,10 +381,10 @@ static int mv_cesa_get_sram(struct platform_device *pdev, int idx)
engine->pool = of_gen_pool_get(cesa->dev->of_node, engine->pool = of_gen_pool_get(cesa->dev->of_node,
"marvell,crypto-srams", idx); "marvell,crypto-srams", idx);
if (engine->pool) { if (engine->pool) {
engine->sram = gen_pool_dma_alloc(engine->pool, engine->sram_pool = gen_pool_dma_alloc(engine->pool,
cesa->sram_size, cesa->sram_size,
&engine->sram_dma); &engine->sram_dma);
if (engine->sram) if (engine->sram_pool)
return 0; return 0;
engine->pool = NULL; engine->pool = NULL;
...@@ -422,7 +422,7 @@ static void mv_cesa_put_sram(struct platform_device *pdev, int idx) ...@@ -422,7 +422,7 @@ static void mv_cesa_put_sram(struct platform_device *pdev, int idx)
struct mv_cesa_engine *engine = &cesa->engines[idx]; struct mv_cesa_engine *engine = &cesa->engines[idx];
if (engine->pool) if (engine->pool)
gen_pool_free(engine->pool, (unsigned long)engine->sram, gen_pool_free(engine->pool, (unsigned long)engine->sram_pool,
cesa->sram_size); cesa->sram_size);
else else
dma_unmap_resource(cesa->dev, engine->sram_dma, dma_unmap_resource(cesa->dev, engine->sram_dma,
......
...@@ -428,6 +428,7 @@ struct mv_cesa_dev { ...@@ -428,6 +428,7 @@ struct mv_cesa_dev {
* @id: engine id * @id: engine id
* @regs: engine registers * @regs: engine registers
* @sram: SRAM memory region * @sram: SRAM memory region
* @sram_pool: SRAM memory region from pool
* @sram_dma: DMA address of the SRAM memory region * @sram_dma: DMA address of the SRAM memory region
* @lock: engine lock * @lock: engine lock
* @req: current crypto request * @req: current crypto request
...@@ -448,7 +449,10 @@ struct mv_cesa_dev { ...@@ -448,7 +449,10 @@ struct mv_cesa_dev {
struct mv_cesa_engine { struct mv_cesa_engine {
int id; int id;
void __iomem *regs; void __iomem *regs;
void __iomem *sram; union {
void __iomem *sram;
void *sram_pool;
};
dma_addr_t sram_dma; dma_addr_t sram_dma;
spinlock_t lock; spinlock_t lock;
struct crypto_async_request *req; struct crypto_async_request *req;
...@@ -867,6 +871,31 @@ int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain, ...@@ -867,6 +871,31 @@ int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain,
struct mv_cesa_sg_dma_iter *sgiter, struct mv_cesa_sg_dma_iter *sgiter,
gfp_t gfp_flags); gfp_t gfp_flags);
size_t mv_cesa_sg_copy(struct mv_cesa_engine *engine,
struct scatterlist *sgl, unsigned int nents,
unsigned int sram_off, size_t buflen, off_t skip,
bool to_sram);
static inline size_t mv_cesa_sg_copy_to_sram(struct mv_cesa_engine *engine,
struct scatterlist *sgl,
unsigned int nents,
unsigned int sram_off,
size_t buflen, off_t skip)
{
return mv_cesa_sg_copy(engine, sgl, nents, sram_off, buflen, skip,
true);
}
static inline size_t mv_cesa_sg_copy_from_sram(struct mv_cesa_engine *engine,
struct scatterlist *sgl,
unsigned int nents,
unsigned int sram_off,
size_t buflen, off_t skip)
{
return mv_cesa_sg_copy(engine, sgl, nents, sram_off, buflen, skip,
false);
}
/* Algorithm definitions */ /* Algorithm definitions */
extern struct ahash_alg mv_md5_alg; extern struct ahash_alg mv_md5_alg;
......
...@@ -89,22 +89,29 @@ static void mv_cesa_skcipher_std_step(struct skcipher_request *req) ...@@ -89,22 +89,29 @@ static void mv_cesa_skcipher_std_step(struct skcipher_request *req)
CESA_SA_SRAM_PAYLOAD_SIZE); CESA_SA_SRAM_PAYLOAD_SIZE);
mv_cesa_adjust_op(engine, &sreq->op); mv_cesa_adjust_op(engine, &sreq->op);
memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op)); if (engine->pool)
memcpy(engine->sram_pool, &sreq->op, sizeof(sreq->op));
else
memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
len = sg_pcopy_to_buffer(req->src, creq->src_nents, len = mv_cesa_sg_copy_to_sram(engine, req->src, creq->src_nents,
engine->sram + CESA_SA_DATA_SRAM_OFFSET, CESA_SA_DATA_SRAM_OFFSET, len,
len, sreq->offset); sreq->offset);
sreq->size = len; sreq->size = len;
mv_cesa_set_crypt_op_len(&sreq->op, len); mv_cesa_set_crypt_op_len(&sreq->op, len);
/* FIXME: only update enc_len field */ /* FIXME: only update enc_len field */
if (!sreq->skip_ctx) { if (!sreq->skip_ctx) {
memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op)); if (engine->pool)
memcpy(engine->sram_pool, &sreq->op, sizeof(sreq->op));
else
memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
sreq->skip_ctx = true; sreq->skip_ctx = true;
} else { } else if (engine->pool)
memcpy(engine->sram_pool, &sreq->op, sizeof(sreq->op.desc));
else
memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op.desc)); memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op.desc));
}
mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE); mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG); writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
...@@ -121,9 +128,9 @@ static int mv_cesa_skcipher_std_process(struct skcipher_request *req, ...@@ -121,9 +128,9 @@ static int mv_cesa_skcipher_std_process(struct skcipher_request *req,
struct mv_cesa_engine *engine = creq->base.engine; struct mv_cesa_engine *engine = creq->base.engine;
size_t len; size_t len;
len = sg_pcopy_from_buffer(req->dst, creq->dst_nents, len = mv_cesa_sg_copy_from_sram(engine, req->dst, creq->dst_nents,
engine->sram + CESA_SA_DATA_SRAM_OFFSET, CESA_SA_DATA_SRAM_OFFSET, sreq->size,
sreq->size, sreq->offset); sreq->offset);
sreq->offset += len; sreq->offset += len;
if (sreq->offset < req->cryptlen) if (sreq->offset < req->cryptlen)
...@@ -214,11 +221,14 @@ mv_cesa_skcipher_complete(struct crypto_async_request *req) ...@@ -214,11 +221,14 @@ mv_cesa_skcipher_complete(struct crypto_async_request *req)
basereq = &creq->base; basereq = &creq->base;
memcpy(skreq->iv, basereq->chain.last->op->ctx.skcipher.iv, memcpy(skreq->iv, basereq->chain.last->op->ctx.skcipher.iv,
ivsize); ivsize);
} else { } else if (engine->pool)
memcpy(skreq->iv,
engine->sram_pool + CESA_SA_CRYPT_IV_SRAM_OFFSET,
ivsize);
else
memcpy_fromio(skreq->iv, memcpy_fromio(skreq->iv,
engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET, engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
ivsize); ivsize);
}
} }
static const struct mv_cesa_req_ops mv_cesa_skcipher_req_ops = { static const struct mv_cesa_req_ops mv_cesa_skcipher_req_ops = {
......
...@@ -168,7 +168,12 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req) ...@@ -168,7 +168,12 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
int i; int i;
mv_cesa_adjust_op(engine, &creq->op_tmpl); mv_cesa_adjust_op(engine, &creq->op_tmpl);
memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl)); if (engine->pool)
memcpy(engine->sram_pool, &creq->op_tmpl,
sizeof(creq->op_tmpl));
else
memcpy_toio(engine->sram, &creq->op_tmpl,
sizeof(creq->op_tmpl));
if (!sreq->offset) { if (!sreq->offset) {
digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
...@@ -177,9 +182,14 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req) ...@@ -177,9 +182,14 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
engine->regs + CESA_IVDIG(i)); engine->regs + CESA_IVDIG(i));
} }
if (creq->cache_ptr) if (creq->cache_ptr) {
memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET, if (engine->pool)
creq->cache, creq->cache_ptr); memcpy(engine->sram_pool + CESA_SA_DATA_SRAM_OFFSET,
creq->cache, creq->cache_ptr);
else
memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
creq->cache, creq->cache_ptr);
}
len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset, len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset,
CESA_SA_SRAM_PAYLOAD_SIZE); CESA_SA_SRAM_PAYLOAD_SIZE);
...@@ -190,12 +200,10 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req) ...@@ -190,12 +200,10 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
} }
if (len - creq->cache_ptr) if (len - creq->cache_ptr)
sreq->offset += sg_pcopy_to_buffer(req->src, creq->src_nents, sreq->offset += mv_cesa_sg_copy_to_sram(
engine->sram + engine, req->src, creq->src_nents,
CESA_SA_DATA_SRAM_OFFSET + CESA_SA_DATA_SRAM_OFFSET + creq->cache_ptr,
creq->cache_ptr, len - creq->cache_ptr, sreq->offset);
len - creq->cache_ptr,
sreq->offset);
op = &creq->op_tmpl; op = &creq->op_tmpl;
...@@ -220,16 +228,28 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req) ...@@ -220,16 +228,28 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) { if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) {
len &= CESA_HASH_BLOCK_SIZE_MSK; len &= CESA_HASH_BLOCK_SIZE_MSK;
new_cache_ptr = 64 - trailerlen; new_cache_ptr = 64 - trailerlen;
memcpy_fromio(creq->cache, if (engine->pool)
engine->sram + memcpy(creq->cache,
CESA_SA_DATA_SRAM_OFFSET + len, engine->sram_pool +
new_cache_ptr); CESA_SA_DATA_SRAM_OFFSET + len,
new_cache_ptr);
else
memcpy_fromio(creq->cache,
engine->sram +
CESA_SA_DATA_SRAM_OFFSET +
len,
new_cache_ptr);
} else { } else {
i = mv_cesa_ahash_pad_req(creq, creq->cache); i = mv_cesa_ahash_pad_req(creq, creq->cache);
len += i; len += i;
memcpy_toio(engine->sram + len + if (engine->pool)
CESA_SA_DATA_SRAM_OFFSET, memcpy(engine->sram_pool + len +
creq->cache, i); CESA_SA_DATA_SRAM_OFFSET,
creq->cache, i);
else
memcpy_toio(engine->sram + len +
CESA_SA_DATA_SRAM_OFFSET,
creq->cache, i);
} }
if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG)
...@@ -243,7 +263,10 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req) ...@@ -243,7 +263,10 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK); mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK);
/* FIXME: only update enc_len field */ /* FIXME: only update enc_len field */
memcpy_toio(engine->sram, op, sizeof(*op)); if (engine->pool)
memcpy(engine->sram_pool, op, sizeof(*op));
else
memcpy_toio(engine->sram, op, sizeof(*op));
if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG) if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG, mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG,
......
...@@ -350,3 +350,53 @@ int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain, ...@@ -350,3 +350,53 @@ int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain,
return 0; return 0;
} }
size_t mv_cesa_sg_copy(struct mv_cesa_engine *engine,
struct scatterlist *sgl, unsigned int nents,
unsigned int sram_off, size_t buflen, off_t skip,
bool to_sram)
{
unsigned int sg_flags = SG_MITER_ATOMIC;
struct sg_mapping_iter miter;
unsigned int offset = 0;
if (to_sram)
sg_flags |= SG_MITER_FROM_SG;
else
sg_flags |= SG_MITER_TO_SG;
sg_miter_start(&miter, sgl, nents, sg_flags);
if (!sg_miter_skip(&miter, skip))
return 0;
while ((offset < buflen) && sg_miter_next(&miter)) {
unsigned int len;
len = min(miter.length, buflen - offset);
if (to_sram) {
if (engine->pool)
memcpy(engine->sram_pool + sram_off + offset,
miter.addr, len);
else
memcpy_toio(engine->sram + sram_off + offset,
miter.addr, len);
} else {
if (engine->pool)
memcpy(miter.addr,
engine->sram_pool + sram_off + offset,
len);
else
memcpy_fromio(miter.addr,
engine->sram + sram_off + offset,
len);
}
offset += len;
}
sg_miter_stop(&miter);
return offset;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment