Commit b8592027 authored by Ofer Heifetz's avatar Ofer Heifetz Committed by Herbert Xu

crypto: inside-secure - move hash result dma mapping to request

In heavy traffic the DMA mapping is overwritten by multiple requests as
the DMA address is stored in a global context. This patch moves this
information to the per-hash request context so that it can't be
overwritten.

Fixes: 1b44c5a6 ("crypto: inside-secure - add SafeXcel EIP197 crypto engine driver")
Signed-off-by: default avatarOfer Heifetz <oferh@marvell.com>
[Antoine: rebased the patch, small fixes, commit message.]
Signed-off-by: default avatarAntoine Tenart <antoine.tenart@bootlin.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 23ea8b63
...@@ -538,15 +538,10 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring) ...@@ -538,15 +538,10 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
} }
void safexcel_free_context(struct safexcel_crypto_priv *priv, void safexcel_free_context(struct safexcel_crypto_priv *priv,
struct crypto_async_request *req, struct crypto_async_request *req)
int result_sz)
{ {
struct safexcel_context *ctx = crypto_tfm_ctx(req->tfm); struct safexcel_context *ctx = crypto_tfm_ctx(req->tfm);
if (ctx->result_dma)
dma_unmap_single(priv->dev, ctx->result_dma, result_sz,
DMA_FROM_DEVICE);
if (ctx->cache) { if (ctx->cache) {
dma_unmap_single(priv->dev, ctx->cache_dma, ctx->cache_sz, dma_unmap_single(priv->dev, ctx->cache_dma, ctx->cache_sz,
DMA_TO_DEVICE); DMA_TO_DEVICE);
......
...@@ -580,7 +580,6 @@ struct safexcel_context { ...@@ -580,7 +580,6 @@ struct safexcel_context {
bool exit_inv; bool exit_inv;
/* Used for ahash requests */ /* Used for ahash requests */
dma_addr_t result_dma;
void *cache; void *cache;
dma_addr_t cache_dma; dma_addr_t cache_dma;
unsigned int cache_sz; unsigned int cache_sz;
...@@ -608,8 +607,7 @@ struct safexcel_inv_result { ...@@ -608,8 +607,7 @@ struct safexcel_inv_result {
void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring); void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring);
void safexcel_complete(struct safexcel_crypto_priv *priv, int ring); void safexcel_complete(struct safexcel_crypto_priv *priv, int ring);
void safexcel_free_context(struct safexcel_crypto_priv *priv, void safexcel_free_context(struct safexcel_crypto_priv *priv,
struct crypto_async_request *req, struct crypto_async_request *req);
int result_sz);
int safexcel_invalidate_cache(struct crypto_async_request *async, int safexcel_invalidate_cache(struct crypto_async_request *async,
struct safexcel_crypto_priv *priv, struct safexcel_crypto_priv *priv,
dma_addr_t ctxr_dma, int ring, dma_addr_t ctxr_dma, int ring,
......
...@@ -34,6 +34,7 @@ struct safexcel_ahash_req { ...@@ -34,6 +34,7 @@ struct safexcel_ahash_req {
bool needs_inv; bool needs_inv;
int nents; int nents;
dma_addr_t result_dma;
u8 state_sz; /* expected sate size, only set once */ u8 state_sz; /* expected sate size, only set once */
u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32)); u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
...@@ -158,7 +159,13 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin ...@@ -158,7 +159,13 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
sreq->nents = 0; sreq->nents = 0;
} }
safexcel_free_context(priv, async, sreq->state_sz); if (sreq->result_dma) {
dma_unmap_single(priv->dev, sreq->result_dma, sreq->state_sz,
DMA_FROM_DEVICE);
sreq->result_dma = 0;
}
safexcel_free_context(priv, async);
cache_len = sreq->len - sreq->processed; cache_len = sreq->len - sreq->processed;
if (cache_len) if (cache_len)
...@@ -291,15 +298,15 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, ...@@ -291,15 +298,15 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
/* Add the token */ /* Add the token */
safexcel_hash_token(first_cdesc, len, req->state_sz); safexcel_hash_token(first_cdesc, len, req->state_sz);
ctx->base.result_dma = dma_map_single(priv->dev, req->state, req->result_dma = dma_map_single(priv->dev, req->state, req->state_sz,
req->state_sz, DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (dma_mapping_error(priv->dev, ctx->base.result_dma)) { if (dma_mapping_error(priv->dev, req->result_dma)) {
ret = -EINVAL; ret = -EINVAL;
goto cdesc_rollback; goto cdesc_rollback;
} }
/* Add a result descriptor */ /* Add a result descriptor */
rdesc = safexcel_add_rdesc(priv, ring, 1, 1, ctx->base.result_dma, rdesc = safexcel_add_rdesc(priv, ring, 1, 1, req->result_dma,
req->state_sz); req->state_sz);
if (IS_ERR(rdesc)) { if (IS_ERR(rdesc)) {
ret = PTR_ERR(rdesc); ret = PTR_ERR(rdesc);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment