Commit 133c3d43 authored by Tero Kristo's avatar Tero Kristo Committed by Herbert Xu

crypto: omap-sham - convert to use crypto engine

Convert the omap-sham driver to use crypto engine for queue handling,
instead of using local implementation.
Signed-off-by: default avatarTero Kristo <t-kristo@ti.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 61f033ba
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#include <crypto/hash.h> #include <crypto/hash.h>
#include <crypto/hmac.h> #include <crypto/hmac.h>
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
#include <crypto/engine.h>
#define MD5_DIGEST_SIZE 16 #define MD5_DIGEST_SIZE 16
...@@ -100,7 +101,6 @@ ...@@ -100,7 +101,6 @@
#define DEFAULT_AUTOSUSPEND_DELAY 1000 #define DEFAULT_AUTOSUSPEND_DELAY 1000
/* mostly device flags */ /* mostly device flags */
#define FLAGS_BUSY 0
#define FLAGS_FINAL 1 #define FLAGS_FINAL 1
#define FLAGS_DMA_ACTIVE 2 #define FLAGS_DMA_ACTIVE 2
#define FLAGS_OUTPUT_READY 3 #define FLAGS_OUTPUT_READY 3
...@@ -144,7 +144,7 @@ struct omap_sham_dev; ...@@ -144,7 +144,7 @@ struct omap_sham_dev;
struct omap_sham_reqctx { struct omap_sham_reqctx {
struct omap_sham_dev *dd; struct omap_sham_dev *dd;
unsigned long flags; unsigned long flags;
unsigned long op; u8 op;
u8 digest[SHA512_DIGEST_SIZE] OMAP_ALIGNED; u8 digest[SHA512_DIGEST_SIZE] OMAP_ALIGNED;
size_t digcnt; size_t digcnt;
...@@ -168,6 +168,7 @@ struct omap_sham_hmac_ctx { ...@@ -168,6 +168,7 @@ struct omap_sham_hmac_ctx {
}; };
struct omap_sham_ctx { struct omap_sham_ctx {
struct crypto_engine_ctx enginectx;
unsigned long flags; unsigned long flags;
/* fallback stuff */ /* fallback stuff */
...@@ -219,7 +220,6 @@ struct omap_sham_dev { ...@@ -219,7 +220,6 @@ struct omap_sham_dev {
struct device *dev; struct device *dev;
void __iomem *io_base; void __iomem *io_base;
int irq; int irq;
spinlock_t lock;
int err; int err;
struct dma_chan *dma_lch; struct dma_chan *dma_lch;
struct tasklet_struct done_task; struct tasklet_struct done_task;
...@@ -230,6 +230,7 @@ struct omap_sham_dev { ...@@ -230,6 +230,7 @@ struct omap_sham_dev {
int fallback_sz; int fallback_sz;
struct crypto_queue queue; struct crypto_queue queue;
struct ahash_request *req; struct ahash_request *req;
struct crypto_engine *engine;
const struct omap_sham_pdata *pdata; const struct omap_sham_pdata *pdata;
}; };
...@@ -245,6 +246,9 @@ static struct omap_sham_drv sham = { ...@@ -245,6 +246,9 @@ static struct omap_sham_drv sham = {
.lock = __SPIN_LOCK_UNLOCKED(sham.lock), .lock = __SPIN_LOCK_UNLOCKED(sham.lock),
}; };
static int omap_sham_enqueue(struct ahash_request *req, unsigned int op);
static void omap_sham_finish_req(struct ahash_request *req, int err);
static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset) static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset)
{ {
return __raw_readl(dd->io_base + offset); return __raw_readl(dd->io_base + offset);
...@@ -854,13 +858,16 @@ static int omap_sham_align_sgs(struct scatterlist *sg, ...@@ -854,13 +858,16 @@ static int omap_sham_align_sgs(struct scatterlist *sg,
return 0; return 0;
} }
static int omap_sham_prepare_request(struct ahash_request *req, bool update) static int omap_sham_prepare_request(struct crypto_engine *engine, void *areq)
{ {
struct ahash_request *req = container_of(areq, struct ahash_request,
base);
struct omap_sham_reqctx *rctx = ahash_request_ctx(req); struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
int bs; int bs;
int ret; int ret;
unsigned int nbytes; unsigned int nbytes;
bool final = rctx->flags & BIT(FLAGS_FINUP); bool final = rctx->flags & BIT(FLAGS_FINUP);
bool update = rctx->op == OP_UPDATE;
int hash_later; int hash_later;
bs = get_block_size(rctx); bs = get_block_size(rctx);
...@@ -1069,6 +1076,39 @@ static int omap_sham_final_req(struct omap_sham_dev *dd) ...@@ -1069,6 +1076,39 @@ static int omap_sham_final_req(struct omap_sham_dev *dd)
return err; return err;
} }
static int omap_sham_hash_one_req(struct crypto_engine *engine, void *areq)
{
struct ahash_request *req = container_of(areq, struct ahash_request,
base);
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
struct omap_sham_dev *dd = ctx->dd;
int err;
bool final = (ctx->flags & BIT(FLAGS_FINUP)) &&
!(dd->flags & BIT(FLAGS_HUGE));
dev_dbg(dd->dev, "hash-one: op: %u, total: %u, digcnt: %zd, final: %d",
ctx->op, ctx->total, ctx->digcnt, final);
dd->req = req;
err = omap_sham_hw_init(dd);
if (err)
return err;
if (ctx->digcnt)
dd->pdata->copy_hash(req, 0);
if (ctx->op == OP_UPDATE)
err = omap_sham_update_req(dd);
else if (ctx->op == OP_FINAL)
err = omap_sham_final_req(dd);
if (err != -EINPROGRESS)
omap_sham_finish_req(req, err);
return 0;
}
static int omap_sham_finish_hmac(struct ahash_request *req) static int omap_sham_finish_hmac(struct ahash_request *req)
{ {
struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
...@@ -1116,25 +1156,20 @@ static void omap_sham_finish_req(struct ahash_request *req, int err) ...@@ -1116,25 +1156,20 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
ctx->sg = NULL; ctx->sg = NULL;
dd->flags &= ~(BIT(FLAGS_SGS_ALLOCED) | BIT(FLAGS_SGS_COPIED)); dd->flags &= ~(BIT(FLAGS_SGS_ALLOCED) | BIT(FLAGS_SGS_COPIED) |
BIT(FLAGS_CPU) | BIT(FLAGS_DMA_READY) |
BIT(FLAGS_OUTPUT_READY));
if (!err)
dd->pdata->copy_hash(req, 1);
if (dd->flags & BIT(FLAGS_HUGE)) { if (dd->flags & BIT(FLAGS_HUGE)) {
dd->flags &= ~(BIT(FLAGS_CPU) | BIT(FLAGS_DMA_READY) | /* Re-enqueue the request */
BIT(FLAGS_OUTPUT_READY) | BIT(FLAGS_HUGE)); omap_sham_enqueue(req, ctx->op);
omap_sham_prepare_request(req, ctx->op == OP_UPDATE);
if (ctx->op == OP_UPDATE || (dd->flags & BIT(FLAGS_HUGE))) {
err = omap_sham_update_req(dd);
if (err != -EINPROGRESS &&
(ctx->flags & BIT(FLAGS_FINUP)))
err = omap_sham_final_req(dd);
} else if (ctx->op == OP_FINAL) {
omap_sham_final_req(dd);
}
return; return;
} }
if (!err) { if (!err) {
dd->pdata->copy_hash(req, 1);
if (test_bit(FLAGS_FINAL, &dd->flags)) if (test_bit(FLAGS_FINAL, &dd->flags))
err = omap_sham_finish(req); err = omap_sham_finish(req);
} else { } else {
...@@ -1142,7 +1177,7 @@ static void omap_sham_finish_req(struct ahash_request *req, int err) ...@@ -1142,7 +1177,7 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
} }
/* atomic operation is not needed here */ /* atomic operation is not needed here */
dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) | dd->flags &= ~(BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY)); BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY));
pm_runtime_mark_last_busy(dd->dev); pm_runtime_mark_last_busy(dd->dev);
...@@ -1150,81 +1185,13 @@ static void omap_sham_finish_req(struct ahash_request *req, int err) ...@@ -1150,81 +1185,13 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
ctx->offset = 0; ctx->offset = 0;
if (req->base.complete) crypto_finalize_hash_request(dd->engine, req, err);
req->base.complete(&req->base, err);
} }
static int omap_sham_handle_queue(struct omap_sham_dev *dd, static int omap_sham_handle_queue(struct omap_sham_dev *dd,
struct ahash_request *req) struct ahash_request *req)
{ {
struct crypto_async_request *async_req, *backlog; return crypto_transfer_hash_request_to_engine(dd->engine, req);
struct omap_sham_reqctx *ctx;
unsigned long flags;
int err = 0, ret = 0;
retry:
spin_lock_irqsave(&dd->lock, flags);
if (req)
ret = ahash_enqueue_request(&dd->queue, req);
if (test_bit(FLAGS_BUSY, &dd->flags)) {
spin_unlock_irqrestore(&dd->lock, flags);
return ret;
}
backlog = crypto_get_backlog(&dd->queue);
async_req = crypto_dequeue_request(&dd->queue);
if (async_req)
set_bit(FLAGS_BUSY, &dd->flags);
spin_unlock_irqrestore(&dd->lock, flags);
if (!async_req)
return ret;
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
req = ahash_request_cast(async_req);
dd->req = req;
ctx = ahash_request_ctx(req);
err = omap_sham_prepare_request(req, ctx->op == OP_UPDATE);
if (err || !ctx->total)
goto err1;
dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
ctx->op, req->nbytes);
err = omap_sham_hw_init(dd);
if (err)
goto err1;
if (ctx->digcnt)
/* request has changed - restore hash */
dd->pdata->copy_hash(req, 0);
if (ctx->op == OP_UPDATE || (dd->flags & BIT(FLAGS_HUGE))) {
err = omap_sham_update_req(dd);
if (err != -EINPROGRESS && (ctx->flags & BIT(FLAGS_FINUP)))
/* no final() after finup() */
err = omap_sham_final_req(dd);
} else if (ctx->op == OP_FINAL) {
err = omap_sham_final_req(dd);
}
err1:
dev_dbg(dd->dev, "exit, err: %d\n", err);
if (err != -EINPROGRESS) {
/* done_task will not finish it, so do it here */
omap_sham_finish_req(req, err);
req = NULL;
/*
* Execute next request immediately if there is anything
* in queue.
*/
goto retry;
}
return ret;
} }
static int omap_sham_enqueue(struct ahash_request *req, unsigned int op) static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
...@@ -1394,6 +1361,10 @@ static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base) ...@@ -1394,6 +1361,10 @@ static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
} }
tctx->enginectx.op.do_one_request = omap_sham_hash_one_req;
tctx->enginectx.op.prepare_request = omap_sham_prepare_request;
tctx->enginectx.op.unprepare_request = NULL;
return 0; return 0;
} }
...@@ -1757,11 +1728,6 @@ static void omap_sham_done_task(unsigned long data) ...@@ -1757,11 +1728,6 @@ static void omap_sham_done_task(unsigned long data)
dev_dbg(dd->dev, "%s: flags=%lx\n", __func__, dd->flags); dev_dbg(dd->dev, "%s: flags=%lx\n", __func__, dd->flags);
if (!test_bit(FLAGS_BUSY, &dd->flags)) {
omap_sham_handle_queue(dd, NULL);
return;
}
if (test_bit(FLAGS_CPU, &dd->flags)) { if (test_bit(FLAGS_CPU, &dd->flags)) {
if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags))
goto finish; goto finish;
...@@ -1786,20 +1752,12 @@ static void omap_sham_done_task(unsigned long data) ...@@ -1786,20 +1752,12 @@ static void omap_sham_done_task(unsigned long data)
dev_dbg(dd->dev, "update done: err: %d\n", err); dev_dbg(dd->dev, "update done: err: %d\n", err);
/* finish curent request */ /* finish curent request */
omap_sham_finish_req(dd->req, err); omap_sham_finish_req(dd->req, err);
/* If we are not busy, process next req */
if (!test_bit(FLAGS_BUSY, &dd->flags))
omap_sham_handle_queue(dd, NULL);
} }
static irqreturn_t omap_sham_irq_common(struct omap_sham_dev *dd) static irqreturn_t omap_sham_irq_common(struct omap_sham_dev *dd)
{ {
if (!test_bit(FLAGS_BUSY, &dd->flags)) {
dev_warn(dd->dev, "Interrupt when no active requests.\n");
} else {
set_bit(FLAGS_OUTPUT_READY, &dd->flags); set_bit(FLAGS_OUTPUT_READY, &dd->flags);
tasklet_schedule(&dd->done_task); tasklet_schedule(&dd->done_task);
}
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -2072,7 +2030,6 @@ static ssize_t queue_len_store(struct device *dev, ...@@ -2072,7 +2030,6 @@ static ssize_t queue_len_store(struct device *dev,
struct omap_sham_dev *dd = dev_get_drvdata(dev); struct omap_sham_dev *dd = dev_get_drvdata(dev);
ssize_t status; ssize_t status;
long value; long value;
unsigned long flags;
status = kstrtol(buf, 0, &value); status = kstrtol(buf, 0, &value);
if (status) if (status)
...@@ -2086,9 +2043,7 @@ static ssize_t queue_len_store(struct device *dev, ...@@ -2086,9 +2043,7 @@ static ssize_t queue_len_store(struct device *dev,
* than current size, it will just not accept new entries until * than current size, it will just not accept new entries until
* it has shrank enough. * it has shrank enough.
*/ */
spin_lock_irqsave(&dd->lock, flags);
dd->queue.max_qlen = value; dd->queue.max_qlen = value;
spin_unlock_irqrestore(&dd->lock, flags);
return size; return size;
} }
...@@ -2125,7 +2080,6 @@ static int omap_sham_probe(struct platform_device *pdev) ...@@ -2125,7 +2080,6 @@ static int omap_sham_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dd); platform_set_drvdata(pdev, dd);
INIT_LIST_HEAD(&dd->list); INIT_LIST_HEAD(&dd->list);
spin_lock_init(&dd->lock);
tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd); tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH); crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
...@@ -2190,6 +2144,16 @@ static int omap_sham_probe(struct platform_device *pdev) ...@@ -2190,6 +2144,16 @@ static int omap_sham_probe(struct platform_device *pdev)
list_add_tail(&dd->list, &sham.dev_list); list_add_tail(&dd->list, &sham.dev_list);
spin_unlock(&sham.lock); spin_unlock(&sham.lock);
dd->engine = crypto_engine_alloc_init(dev, 1);
if (!dd->engine) {
err = -ENOMEM;
goto err_engine;
}
err = crypto_engine_start(dd->engine);
if (err)
goto err_engine_start;
for (i = 0; i < dd->pdata->algs_info_size; i++) { for (i = 0; i < dd->pdata->algs_info_size; i++) {
if (dd->pdata->algs_info[i].registered) if (dd->pdata->algs_info[i].registered)
break; break;
...@@ -2223,6 +2187,12 @@ static int omap_sham_probe(struct platform_device *pdev) ...@@ -2223,6 +2187,12 @@ static int omap_sham_probe(struct platform_device *pdev)
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
crypto_unregister_ahash( crypto_unregister_ahash(
&dd->pdata->algs_info[i].algs_list[j]); &dd->pdata->algs_info[i].algs_list[j]);
err_engine_start:
crypto_engine_exit(dd->engine);
err_engine:
spin_lock(&sham.lock);
list_del(&dd->list);
spin_unlock(&sham.lock);
err_pm: err_pm:
pm_runtime_disable(dev); pm_runtime_disable(dev);
if (!dd->polling_mode) if (!dd->polling_mode)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment