Commit b6e9eb69 authored by Jia Jie Ho's avatar Jia Jie Ho Committed by Herbert Xu

crypto: starfive - Update hash dma usage

Current hash uses sw fallback for non-word aligned input scatterlists.
Add support for unaligned cases utilizing the data valid mask for dma.
Signed-off-by: default avatarJia Jie Ho <jiajie.ho@starfivetech.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 2ccf7a5d
......@@ -103,12 +103,6 @@ static irqreturn_t starfive_cryp_irq(int irq, void *priv)
tasklet_schedule(&cryp->aes_done);
}
if (status & STARFIVE_IE_FLAG_HASH_DONE) {
mask |= STARFIVE_IE_MASK_HASH_DONE;
writel(mask, cryp->base + STARFIVE_IE_MASK_OFFSET);
tasklet_schedule(&cryp->hash_done);
}
return IRQ_HANDLED;
}
......@@ -132,7 +126,6 @@ static int starfive_cryp_probe(struct platform_device *pdev)
"Error remapping memory for platform device\n");
tasklet_init(&cryp->aes_done, starfive_aes_done_task, (unsigned long)cryp);
tasklet_init(&cryp->hash_done, starfive_hash_done_task, (unsigned long)cryp);
cryp->phys_base = res->start;
cryp->dma_maxburst = 32;
......@@ -220,7 +213,6 @@ static int starfive_cryp_probe(struct platform_device *pdev)
reset_control_assert(cryp->rst);
tasklet_kill(&cryp->aes_done);
tasklet_kill(&cryp->hash_done);
return ret;
}
......@@ -234,7 +226,6 @@ static void starfive_cryp_remove(struct platform_device *pdev)
starfive_rsa_unregister_algs();
tasklet_kill(&cryp->aes_done);
tasklet_kill(&cryp->hash_done);
crypto_engine_stop(cryp->engine);
crypto_engine_exit(cryp->engine);
......
......@@ -91,6 +91,7 @@ union starfive_hash_csr {
#define STARFIVE_HASH_KEY_DONE BIT(13)
u32 key_done :1;
u32 key_flag :1;
#define STARFIVE_HASH_HMAC_DONE BIT(15)
u32 hmac_done :1;
#define STARFIVE_HASH_BUSY BIT(16)
u32 busy :1;
......@@ -189,7 +190,7 @@ struct starfive_cryp_dev {
struct scatter_walk out_walk;
struct crypto_engine *engine;
struct tasklet_struct aes_done;
struct tasklet_struct hash_done;
struct completion dma_done;
size_t assoclen;
size_t total_in;
size_t total_out;
......@@ -237,6 +238,5 @@ void starfive_rsa_unregister_algs(void);
int starfive_aes_register_algs(void);
void starfive_aes_unregister_algs(void);
void starfive_hash_done_task(unsigned long param);
void starfive_aes_done_task(unsigned long param);
#endif
......@@ -36,15 +36,22 @@
#define STARFIVE_HASH_BUFLEN SHA512_BLOCK_SIZE
#define STARFIVE_HASH_RESET 0x2
static inline int starfive_hash_wait_busy(struct starfive_cryp_ctx *ctx)
static inline int starfive_hash_wait_busy(struct starfive_cryp_dev *cryp)
{
struct starfive_cryp_dev *cryp = ctx->cryp;
u32 status;
return readl_relaxed_poll_timeout(cryp->base + STARFIVE_HASH_SHACSR, status,
!(status & STARFIVE_HASH_BUSY), 10, 100000);
}
static inline int starfive_hash_wait_hmac_done(struct starfive_cryp_dev *cryp)
{
u32 status;
return readl_relaxed_poll_timeout(cryp->base + STARFIVE_HASH_SHACSR, status,
(status & STARFIVE_HASH_HMAC_DONE), 10, 100000);
}
static inline int starfive_hash_wait_key_done(struct starfive_cryp_ctx *ctx)
{
struct starfive_cryp_dev *cryp = ctx->cryp;
......@@ -84,64 +91,26 @@ static int starfive_hash_hmac_key(struct starfive_cryp_ctx *ctx)
return 0;
}
static void starfive_hash_start(void *param)
static void starfive_hash_start(struct starfive_cryp_dev *cryp)
{
struct starfive_cryp_ctx *ctx = param;
struct starfive_cryp_request_ctx *rctx = ctx->rctx;
struct starfive_cryp_dev *cryp = ctx->cryp;
union starfive_alg_cr alg_cr;
union starfive_hash_csr csr;
u32 stat;
dma_unmap_sg(cryp->dev, rctx->in_sg, rctx->in_sg_len, DMA_TO_DEVICE);
alg_cr.v = 0;
alg_cr.clear = 1;
writel(alg_cr.v, cryp->base + STARFIVE_ALG_CR_OFFSET);
csr.v = readl(cryp->base + STARFIVE_HASH_SHACSR);
csr.firstb = 0;
csr.final = 1;
stat = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
stat &= ~STARFIVE_IE_MASK_HASH_DONE;
writel(stat, cryp->base + STARFIVE_IE_MASK_OFFSET);
writel(csr.v, cryp->base + STARFIVE_HASH_SHACSR);
}
static int starfive_hash_xmit_dma(struct starfive_cryp_ctx *ctx)
static void starfive_hash_dma_callback(void *param)
{
struct starfive_cryp_request_ctx *rctx = ctx->rctx;
struct starfive_cryp_dev *cryp = ctx->cryp;
struct dma_async_tx_descriptor *in_desc;
union starfive_alg_cr alg_cr;
int total_len;
int ret;
if (!rctx->total) {
starfive_hash_start(ctx);
return 0;
}
writel(rctx->total, cryp->base + STARFIVE_DMA_IN_LEN_OFFSET);
total_len = rctx->total;
total_len = (total_len & 0x3) ? (((total_len >> 2) + 1) << 2) : total_len;
sg_dma_len(rctx->in_sg) = total_len;
struct starfive_cryp_dev *cryp = param;
alg_cr.v = 0;
alg_cr.start = 1;
alg_cr.hash_dma_en = 1;
writel(alg_cr.v, cryp->base + STARFIVE_ALG_CR_OFFSET);
ret = dma_map_sg(cryp->dev, rctx->in_sg, rctx->in_sg_len, DMA_TO_DEVICE);
if (!ret)
return dev_err_probe(cryp->dev, -EINVAL, "dma_map_sg() error\n");
complete(&cryp->dma_done);
}
cryp->cfg_in.direction = DMA_MEM_TO_DEV;
cryp->cfg_in.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
static void starfive_hash_dma_init(struct starfive_cryp_dev *cryp)
{
cryp->cfg_in.src_addr_width = DMA_SLAVE_BUSWIDTH_16_BYTES;
cryp->cfg_in.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
cryp->cfg_in.src_maxburst = cryp->dma_maxburst;
cryp->cfg_in.dst_maxburst = cryp->dma_maxburst;
......@@ -149,50 +118,48 @@ static int starfive_hash_xmit_dma(struct starfive_cryp_ctx *ctx)
dmaengine_slave_config(cryp->tx, &cryp->cfg_in);
in_desc = dmaengine_prep_slave_sg(cryp->tx, rctx->in_sg,
ret, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
init_completion(&cryp->dma_done);
}
if (!in_desc)
return -EINVAL;
static int starfive_hash_dma_xfer(struct starfive_cryp_dev *cryp,
struct scatterlist *sg)
{
struct dma_async_tx_descriptor *in_desc;
union starfive_alg_cr alg_cr;
int ret = 0;
in_desc->callback = starfive_hash_start;
in_desc->callback_param = ctx;
alg_cr.v = 0;
alg_cr.start = 1;
alg_cr.hash_dma_en = 1;
writel(alg_cr.v, cryp->base + STARFIVE_ALG_CR_OFFSET);
dmaengine_submit(in_desc);
dma_async_issue_pending(cryp->tx);
writel(sg_dma_len(sg), cryp->base + STARFIVE_DMA_IN_LEN_OFFSET);
sg_dma_len(sg) = ALIGN(sg_dma_len(sg), sizeof(u32));
return 0;
}
in_desc = dmaengine_prep_slave_sg(cryp->tx, sg, 1, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!in_desc) {
ret = -EINVAL;
goto end;
}
static int starfive_hash_xmit(struct starfive_cryp_ctx *ctx)
{
struct starfive_cryp_request_ctx *rctx = ctx->rctx;
struct starfive_cryp_dev *cryp = ctx->cryp;
int ret = 0;
reinit_completion(&cryp->dma_done);
in_desc->callback = starfive_hash_dma_callback;
in_desc->callback_param = cryp;
rctx->csr.hash.v = 0;
rctx->csr.hash.reset = 1;
writel(rctx->csr.hash.v, cryp->base + STARFIVE_HASH_SHACSR);
dmaengine_submit(in_desc);
dma_async_issue_pending(cryp->tx);
if (starfive_hash_wait_busy(ctx))
return dev_err_probe(cryp->dev, -ETIMEDOUT, "Error resetting engine.\n");
if (!wait_for_completion_timeout(&cryp->dma_done,
msecs_to_jiffies(1000)))
ret = -ETIMEDOUT;
rctx->csr.hash.v = 0;
rctx->csr.hash.mode = ctx->hash_mode;
rctx->csr.hash.ie = 1;
end:
alg_cr.v = 0;
alg_cr.clear = 1;
writel(alg_cr.v, cryp->base + STARFIVE_ALG_CR_OFFSET);
if (ctx->is_hmac) {
ret = starfive_hash_hmac_key(ctx);
if (ret)
return ret;
} else {
rctx->csr.hash.start = 1;
rctx->csr.hash.firstb = 1;
writel(rctx->csr.hash.v, cryp->base + STARFIVE_HASH_SHACSR);
}
return starfive_hash_xmit_dma(ctx);
}
static int starfive_hash_copy_hash(struct ahash_request *req)
......@@ -215,58 +182,74 @@ static int starfive_hash_copy_hash(struct ahash_request *req)
return 0;
}
void starfive_hash_done_task(unsigned long param)
static void starfive_hash_done_task(struct starfive_cryp_dev *cryp)
{
struct starfive_cryp_dev *cryp = (struct starfive_cryp_dev *)param;
int err = cryp->err;
if (!err)
err = starfive_hash_copy_hash(cryp->req.hreq);
/* Reset to clear hash_done in irq register*/
writel(STARFIVE_HASH_RESET, cryp->base + STARFIVE_HASH_SHACSR);
crypto_finalize_hash_request(cryp->engine, cryp->req.hreq, err);
}
static int starfive_hash_check_aligned(struct scatterlist *sg, size_t total, size_t align)
static int starfive_hash_one_request(struct crypto_engine *engine, void *areq)
{
int len = 0;
struct ahash_request *req = container_of(areq, struct ahash_request,
base);
struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct starfive_cryp_request_ctx *rctx = ctx->rctx;
struct starfive_cryp_dev *cryp = ctx->cryp;
struct scatterlist *tsg;
int ret, src_nents, i;
if (!total)
return 0;
writel(STARFIVE_HASH_RESET, cryp->base + STARFIVE_HASH_SHACSR);
if (!IS_ALIGNED(total, align))
return -EINVAL;
if (starfive_hash_wait_busy(cryp))
return dev_err_probe(cryp->dev, -ETIMEDOUT, "Error resetting hardware\n");
while (sg) {
if (!IS_ALIGNED(sg->offset, sizeof(u32)))
return -EINVAL;
rctx->csr.hash.v = 0;
rctx->csr.hash.mode = ctx->hash_mode;
if (!IS_ALIGNED(sg->length, align))
return -EINVAL;
if (ctx->is_hmac) {
ret = starfive_hash_hmac_key(ctx);
if (ret)
return ret;
} else {
rctx->csr.hash.start = 1;
rctx->csr.hash.firstb = 1;
writel(rctx->csr.hash.v, cryp->base + STARFIVE_HASH_SHACSR);
}
/* No input message, get digest and end. */
if (!rctx->total)
goto hash_start;
len += sg->length;
sg = sg_next(sg);
starfive_hash_dma_init(cryp);
for_each_sg(rctx->in_sg, tsg, rctx->in_sg_len, i) {
src_nents = dma_map_sg(cryp->dev, tsg, 1, DMA_TO_DEVICE);
if (src_nents == 0)
return dev_err_probe(cryp->dev, -ENOMEM,
"dma_map_sg error\n");
ret = starfive_hash_dma_xfer(cryp, tsg);
dma_unmap_sg(cryp->dev, tsg, 1, DMA_TO_DEVICE);
if (ret)
return ret;
}
if (len != total)
return -EINVAL;
hash_start:
starfive_hash_start(cryp);
return 0;
}
if (starfive_hash_wait_busy(cryp))
return dev_err_probe(cryp->dev, -ETIMEDOUT, "Error generating digest\n");
static int starfive_hash_one_request(struct crypto_engine *engine, void *areq)
{
struct ahash_request *req = container_of(areq, struct ahash_request,
base);
struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct starfive_cryp_dev *cryp = ctx->cryp;
if (ctx->is_hmac)
cryp->err = starfive_hash_wait_hmac_done(cryp);
if (!cryp)
return -ENODEV;
starfive_hash_done_task(cryp);
return starfive_hash_xmit(ctx);
return 0;
}
static int starfive_hash_init(struct ahash_request *req)
......@@ -337,22 +320,6 @@ static int starfive_hash_finup(struct ahash_request *req)
return crypto_ahash_finup(&rctx->ahash_fbk_req);
}
static int starfive_hash_digest_fb(struct ahash_request *req)
{
struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->ahash_fbk_req, ctx->ahash_fbk);
ahash_request_set_callback(&rctx->ahash_fbk_req, req->base.flags,
req->base.complete, req->base.data);
ahash_request_set_crypt(&rctx->ahash_fbk_req, req->src,
req->result, req->nbytes);
return crypto_ahash_digest(&rctx->ahash_fbk_req);
}
static int starfive_hash_digest(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
......@@ -370,9 +337,6 @@ static int starfive_hash_digest(struct ahash_request *req)
rctx->in_sg_len = sg_nents_for_len(rctx->in_sg, rctx->total);
ctx->rctx = rctx;
if (starfive_hash_check_aligned(rctx->in_sg, rctx->total, rctx->blksize))
return starfive_hash_digest_fb(req);
return crypto_transfer_hash_request_to_engine(cryp->engine, req);
}
......@@ -406,7 +370,8 @@ static int starfive_hash_import(struct ahash_request *req, const void *in)
static int starfive_hash_init_tfm(struct crypto_ahash *hash,
const char *alg_name,
unsigned int mode)
unsigned int mode,
bool is_hmac)
{
struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
......@@ -426,7 +391,7 @@ static int starfive_hash_init_tfm(struct crypto_ahash *hash,
crypto_ahash_set_reqsize(hash, sizeof(struct starfive_cryp_request_ctx) +
crypto_ahash_reqsize(ctx->ahash_fbk));
ctx->keylen = 0;
ctx->is_hmac = is_hmac;
ctx->hash_mode = mode;
return 0;
......@@ -529,81 +494,61 @@ static int starfive_hash_setkey(struct crypto_ahash *hash,
static int starfive_sha224_init_tfm(struct crypto_ahash *hash)
{
return starfive_hash_init_tfm(hash, "sha224-generic",
STARFIVE_HASH_SHA224);
STARFIVE_HASH_SHA224, 0);
}
static int starfive_sha256_init_tfm(struct crypto_ahash *hash)
{
return starfive_hash_init_tfm(hash, "sha256-generic",
STARFIVE_HASH_SHA256);
STARFIVE_HASH_SHA256, 0);
}
static int starfive_sha384_init_tfm(struct crypto_ahash *hash)
{
return starfive_hash_init_tfm(hash, "sha384-generic",
STARFIVE_HASH_SHA384);
STARFIVE_HASH_SHA384, 0);
}
static int starfive_sha512_init_tfm(struct crypto_ahash *hash)
{
return starfive_hash_init_tfm(hash, "sha512-generic",
STARFIVE_HASH_SHA512);
STARFIVE_HASH_SHA512, 0);
}
static int starfive_sm3_init_tfm(struct crypto_ahash *hash)
{
return starfive_hash_init_tfm(hash, "sm3-generic",
STARFIVE_HASH_SM3);
STARFIVE_HASH_SM3, 0);
}
static int starfive_hmac_sha224_init_tfm(struct crypto_ahash *hash)
{
struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
ctx->is_hmac = true;
return starfive_hash_init_tfm(hash, "hmac(sha224-generic)",
STARFIVE_HASH_SHA224);
STARFIVE_HASH_SHA224, 1);
}
static int starfive_hmac_sha256_init_tfm(struct crypto_ahash *hash)
{
struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
ctx->is_hmac = true;
return starfive_hash_init_tfm(hash, "hmac(sha256-generic)",
STARFIVE_HASH_SHA256);
STARFIVE_HASH_SHA256, 1);
}
static int starfive_hmac_sha384_init_tfm(struct crypto_ahash *hash)
{
struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
ctx->is_hmac = true;
return starfive_hash_init_tfm(hash, "hmac(sha384-generic)",
STARFIVE_HASH_SHA384);
STARFIVE_HASH_SHA384, 1);
}
static int starfive_hmac_sha512_init_tfm(struct crypto_ahash *hash)
{
struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
ctx->is_hmac = true;
return starfive_hash_init_tfm(hash, "hmac(sha512-generic)",
STARFIVE_HASH_SHA512);
STARFIVE_HASH_SHA512, 1);
}
static int starfive_hmac_sm3_init_tfm(struct crypto_ahash *hash)
{
struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
ctx->is_hmac = true;
return starfive_hash_init_tfm(hash, "hmac(sm3-generic)",
STARFIVE_HASH_SM3);
STARFIVE_HASH_SM3, 1);
}
static struct ahash_engine_alg algs_sha2_sm3[] = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment