Commit db509a45 authored by Boris BREZILLON's avatar Boris BREZILLON Committed by Herbert Xu

crypto: marvell/cesa - add TDMA support

The CESA IP supports CPU offload through a dedicated DMA engine (TDMA)
which can control the crypto block.
When you use this mode, all the required data (operation metadata and
payload data) are transferred using DMA, and the results are retrieved
through DMA when possible (hash results are not retrieved through DMA yet),
thus reducing the involvement of the CPU and providing better performances
in most cases (for small requests, the cost of DMA preparation might
exceed the performance gain).

Note that some CESA IPs do not embed this dedicated DMA, hence the
activation of this feature on a per platform basis.
Signed-off-by: default avatarBoris Brezillon <boris.brezillon@free-electrons.com>
Signed-off-by: default avatarArnaud Ebalard <arno@natisbad.org>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent f63601fd
...@@ -184,6 +184,7 @@ config CRYPTO_DEV_MARVELL_CESA ...@@ -184,6 +184,7 @@ config CRYPTO_DEV_MARVELL_CESA
help help
This driver allows you to utilize the Cryptographic Engines and This driver allows you to utilize the Cryptographic Engines and
Security Accelerator (CESA) which can be found on the Armada 370. Security Accelerator (CESA) which can be found on the Armada 370.
This driver supports CPU offload through DMA transfers.
This driver is aimed at replacing the mv_cesa driver. This will only This driver is aimed at replacing the mv_cesa driver. This will only
happen once it has received proper testing. happen once it has received proper testing.
......
obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell-cesa.o obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell-cesa.o
marvell-cesa-objs := cesa.o cipher.o hash.o marvell-cesa-objs := cesa.o cipher.o hash.o tdma.o
...@@ -184,6 +184,7 @@ static const struct mv_cesa_caps armada_370_caps = { ...@@ -184,6 +184,7 @@ static const struct mv_cesa_caps armada_370_caps = {
.ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs), .ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs),
.ahash_algs = armada_370_ahash_algs, .ahash_algs = armada_370_ahash_algs,
.nahash_algs = ARRAY_SIZE(armada_370_ahash_algs), .nahash_algs = ARRAY_SIZE(armada_370_ahash_algs),
.has_tdma = true,
}; };
static const struct of_device_id mv_cesa_of_match_table[] = { static const struct of_device_id mv_cesa_of_match_table[] = {
...@@ -192,6 +193,66 @@ static const struct of_device_id mv_cesa_of_match_table[] = { ...@@ -192,6 +193,66 @@ static const struct of_device_id mv_cesa_of_match_table[] = {
}; };
MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table); MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table);
static void
mv_cesa_conf_mbus_windows(struct mv_cesa_engine *engine,
const struct mbus_dram_target_info *dram)
{
void __iomem *iobase = engine->regs;
int i;
for (i = 0; i < 4; i++) {
writel(0, iobase + CESA_TDMA_WINDOW_CTRL(i));
writel(0, iobase + CESA_TDMA_WINDOW_BASE(i));
}
for (i = 0; i < dram->num_cs; i++) {
const struct mbus_dram_window *cs = dram->cs + i;
writel(((cs->size - 1) & 0xffff0000) |
(cs->mbus_attr << 8) |
(dram->mbus_dram_target_id << 4) | 1,
iobase + CESA_TDMA_WINDOW_CTRL(i));
writel(cs->base, iobase + CESA_TDMA_WINDOW_BASE(i));
}
}
static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa)
{
struct device *dev = cesa->dev;
struct mv_cesa_dev_dma *dma;
if (!cesa->caps->has_tdma)
return 0;
dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
if (!dma)
return -ENOMEM;
dma->tdma_desc_pool = dmam_pool_create("tdma_desc", dev,
sizeof(struct mv_cesa_tdma_desc),
16, 0);
if (!dma->tdma_desc_pool)
return -ENOMEM;
dma->op_pool = dmam_pool_create("cesa_op", dev,
sizeof(struct mv_cesa_op_ctx), 16, 0);
if (!dma->op_pool)
return -ENOMEM;
dma->cache_pool = dmam_pool_create("cesa_cache", dev,
CESA_MAX_HASH_BLOCK_SIZE, 1, 0);
if (!dma->cache_pool)
return -ENOMEM;
dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0);
if (!dma->cache_pool)
return -ENOMEM;
cesa->dma = dma;
return 0;
}
static int mv_cesa_get_sram(struct platform_device *pdev, int idx) static int mv_cesa_get_sram(struct platform_device *pdev, int idx)
{ {
struct mv_cesa_dev *cesa = platform_get_drvdata(pdev); struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
...@@ -299,6 +360,10 @@ static int mv_cesa_probe(struct platform_device *pdev) ...@@ -299,6 +360,10 @@ static int mv_cesa_probe(struct platform_device *pdev)
if (IS_ERR(cesa->regs)) if (IS_ERR(cesa->regs))
return -ENOMEM; return -ENOMEM;
ret = mv_cesa_dev_dma_init(cesa);
if (ret)
return ret;
dram = mv_mbus_dram_info_nooverlap(); dram = mv_mbus_dram_info_nooverlap();
platform_set_drvdata(pdev, cesa); platform_set_drvdata(pdev, cesa);
...@@ -347,6 +412,9 @@ static int mv_cesa_probe(struct platform_device *pdev) ...@@ -347,6 +412,9 @@ static int mv_cesa_probe(struct platform_device *pdev)
engine->regs = cesa->regs + CESA_ENGINE_OFF(i); engine->regs = cesa->regs + CESA_ENGINE_OFF(i);
if (dram && cesa->caps->has_tdma)
mv_cesa_conf_mbus_windows(&cesa->engines[i], dram);
writel(0, cesa->engines[i].regs + CESA_SA_INT_STATUS); writel(0, cesa->engines[i].regs + CESA_SA_INT_STATUS);
writel(CESA_SA_CFG_STOP_DIG_ERR, writel(CESA_SA_CFG_STOP_DIG_ERR,
cesa->engines[i].regs + CESA_SA_CFG); cesa->engines[i].regs + CESA_SA_CFG);
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
#include <linux/crypto.h> #include <linux/crypto.h>
#include <linux/dmapool.h>
#define CESA_ENGINE_OFF(i) (((i) * 0x2000)) #define CESA_ENGINE_OFF(i) (((i) * 0x2000))
...@@ -267,11 +268,94 @@ struct mv_cesa_op_ctx { ...@@ -267,11 +268,94 @@ struct mv_cesa_op_ctx {
} ctx; } ctx;
}; };
/* TDMA descriptor flags */
#define CESA_TDMA_DST_IN_SRAM BIT(31)
#define CESA_TDMA_SRC_IN_SRAM BIT(30)
#define CESA_TDMA_TYPE_MSK GENMASK(29, 0)
#define CESA_TDMA_DUMMY 0
#define CESA_TDMA_DATA 1
#define CESA_TDMA_OP 2
/**
* struct mv_cesa_tdma_desc - TDMA descriptor
* @byte_cnt: number of bytes to transfer
* @src: DMA address of the source
* @dst: DMA address of the destination
* @next_dma: DMA address of the next TDMA descriptor
* @cur_dma: DMA address of this TDMA descriptor
* @next: pointer to the next TDMA descriptor
* @op: CESA operation attached to this TDMA descriptor
* @data: raw data attached to this TDMA descriptor
* @flags: flags describing the TDMA transfer. See the
* "TDMA descriptor flags" section above
*
* TDMA descriptor used to create a transfer chain describing a crypto
* operation.
*/
struct mv_cesa_tdma_desc {
u32 byte_cnt;
u32 src;
u32 dst;
u32 next_dma;
u32 cur_dma;
struct mv_cesa_tdma_desc *next;
union {
struct mv_cesa_op_ctx *op;
void *data;
};
u32 flags;
};
/**
* struct mv_cesa_sg_dma_iter - scatter-gather iterator
* @dir: transfer direction
* @sg: scatter list
* @offset: current position in the scatter list
* @op_offset: current position in the crypto operation
*
* Iterator used to iterate over a scatterlist while creating a TDMA chain for
* a crypto operation.
*/
struct mv_cesa_sg_dma_iter {
enum dma_data_direction dir;
struct scatterlist *sg;
unsigned int offset;
unsigned int op_offset;
};
/**
* struct mv_cesa_dma_iter - crypto operation iterator
* @len: the crypto operation length
* @offset: current position in the crypto operation
* @op_len: sub-operation length (the crypto engine can only act on 2kb
* chunks)
*
* Iterator used to create a TDMA chain for a given crypto operation.
*/
struct mv_cesa_dma_iter {
unsigned int len;
unsigned int offset;
unsigned int op_len;
};
/**
* struct mv_cesa_tdma_chain - TDMA chain
* @first: first entry in the TDMA chain
* @last: last entry in the TDMA chain
*
* Stores a TDMA chain for a specific crypto operation.
*/
struct mv_cesa_tdma_chain {
struct mv_cesa_tdma_desc *first;
struct mv_cesa_tdma_desc *last;
};
struct mv_cesa_engine; struct mv_cesa_engine;
/** /**
* struct mv_cesa_caps - CESA device capabilities * struct mv_cesa_caps - CESA device capabilities
* @engines: number of engines * @engines: number of engines
* @has_tdma: whether this device has a TDMA block
* @cipher_algs: supported cipher algorithms * @cipher_algs: supported cipher algorithms
* @ncipher_algs: number of supported cipher algorithms * @ncipher_algs: number of supported cipher algorithms
* @ahash_algs: supported hash algorithms * @ahash_algs: supported hash algorithms
...@@ -281,12 +365,31 @@ struct mv_cesa_engine; ...@@ -281,12 +365,31 @@ struct mv_cesa_engine;
*/ */
struct mv_cesa_caps { struct mv_cesa_caps {
int nengines; int nengines;
bool has_tdma;
struct crypto_alg **cipher_algs; struct crypto_alg **cipher_algs;
int ncipher_algs; int ncipher_algs;
struct ahash_alg **ahash_algs; struct ahash_alg **ahash_algs;
int nahash_algs; int nahash_algs;
}; };
/**
* struct mv_cesa_dev_dma - DMA pools
* @tdma_desc_pool: TDMA desc pool
* @op_pool: crypto operation pool
* @cache_pool: data cache pool (used by hash implementation when the
* hash request is smaller than the hash block size)
* @padding_pool: padding pool (used by hash implementation when hardware
* padding cannot be used)
*
* Structure containing the different DMA pools used by this driver.
*/
struct mv_cesa_dev_dma {
struct dma_pool *tdma_desc_pool;
struct dma_pool *op_pool;
struct dma_pool *cache_pool;
struct dma_pool *padding_pool;
};
/** /**
* struct mv_cesa_dev - CESA device * struct mv_cesa_dev - CESA device
* @caps: device capabilities * @caps: device capabilities
...@@ -295,6 +398,7 @@ struct mv_cesa_caps { ...@@ -295,6 +398,7 @@ struct mv_cesa_caps {
* @lock: device lock * @lock: device lock
* @queue: crypto request queue * @queue: crypto request queue
* @engines: array of engines * @engines: array of engines
* @dma: dma pools
* *
* Structure storing CESA device information. * Structure storing CESA device information.
*/ */
...@@ -306,6 +410,7 @@ struct mv_cesa_dev { ...@@ -306,6 +410,7 @@ struct mv_cesa_dev {
spinlock_t lock; spinlock_t lock;
struct crypto_queue queue; struct crypto_queue queue;
struct mv_cesa_engine *engines; struct mv_cesa_engine *engines;
struct mv_cesa_dev_dma *dma;
}; };
/** /**
...@@ -391,9 +496,11 @@ struct mv_cesa_hmac_ctx { ...@@ -391,9 +496,11 @@ struct mv_cesa_hmac_ctx {
/** /**
* enum mv_cesa_req_type - request type definitions * enum mv_cesa_req_type - request type definitions
* @CESA_STD_REQ: standard request * @CESA_STD_REQ: standard request
* @CESA_DMA_REQ: DMA request
*/ */
enum mv_cesa_req_type { enum mv_cesa_req_type {
CESA_STD_REQ, CESA_STD_REQ,
CESA_DMA_REQ,
}; };
/** /**
...@@ -406,6 +513,27 @@ struct mv_cesa_req { ...@@ -406,6 +513,27 @@ struct mv_cesa_req {
struct mv_cesa_engine *engine; struct mv_cesa_engine *engine;
}; };
/**
* struct mv_cesa_tdma_req - CESA TDMA request
* @base: base information
* @chain: TDMA chain
*/
struct mv_cesa_tdma_req {
struct mv_cesa_req base;
struct mv_cesa_tdma_chain chain;
};
/**
* struct mv_cesa_sg_std_iter - CESA scatter-gather iterator for standard
* requests
* @iter: sg mapping iterator
* @offset: current offset in the SG entry mapped in memory
*/
struct mv_cesa_sg_std_iter {
struct sg_mapping_iter iter;
unsigned int offset;
};
/** /**
* struct mv_cesa_ablkcipher_std_req - cipher standard request * struct mv_cesa_ablkcipher_std_req - cipher standard request
* @base: base information * @base: base information
...@@ -430,6 +558,7 @@ struct mv_cesa_ablkcipher_std_req { ...@@ -430,6 +558,7 @@ struct mv_cesa_ablkcipher_std_req {
struct mv_cesa_ablkcipher_req { struct mv_cesa_ablkcipher_req {
union { union {
struct mv_cesa_req base; struct mv_cesa_req base;
struct mv_cesa_tdma_req dma;
struct mv_cesa_ablkcipher_std_req std; struct mv_cesa_ablkcipher_std_req std;
} req; } req;
int src_nents; int src_nents;
...@@ -446,6 +575,20 @@ struct mv_cesa_ahash_std_req { ...@@ -446,6 +575,20 @@ struct mv_cesa_ahash_std_req {
unsigned int offset; unsigned int offset;
}; };
/**
* struct mv_cesa_ahash_dma_req - DMA hash request
* @base: base information
* @padding: padding buffer
* @padding_dma: DMA address of the padding buffer
* @cache_dma: DMA address of the cache buffer
*/
struct mv_cesa_ahash_dma_req {
struct mv_cesa_tdma_req base;
u8 *padding;
dma_addr_t padding_dma;
dma_addr_t cache_dma;
};
/** /**
* struct mv_cesa_ahash_req - hash request * struct mv_cesa_ahash_req - hash request
* @req: type specific request information * @req: type specific request information
...@@ -460,6 +603,7 @@ struct mv_cesa_ahash_std_req { ...@@ -460,6 +603,7 @@ struct mv_cesa_ahash_std_req {
struct mv_cesa_ahash_req { struct mv_cesa_ahash_req {
union { union {
struct mv_cesa_req base; struct mv_cesa_req base;
struct mv_cesa_ahash_dma_req dma;
struct mv_cesa_ahash_std_req std; struct mv_cesa_ahash_std_req std;
} req; } req;
struct mv_cesa_op_ctx op_tmpl; struct mv_cesa_op_ctx op_tmpl;
...@@ -543,6 +687,91 @@ static inline u32 mv_cesa_get_int_mask(struct mv_cesa_engine *engine) ...@@ -543,6 +687,91 @@ static inline u32 mv_cesa_get_int_mask(struct mv_cesa_engine *engine)
int mv_cesa_queue_req(struct crypto_async_request *req); int mv_cesa_queue_req(struct crypto_async_request *req);
/* TDMA functions */
static inline void mv_cesa_req_dma_iter_init(struct mv_cesa_dma_iter *iter,
unsigned int len)
{
iter->len = len;
iter->op_len = min(len, CESA_SA_SRAM_PAYLOAD_SIZE);
iter->offset = 0;
}
static inline void mv_cesa_sg_dma_iter_init(struct mv_cesa_sg_dma_iter *iter,
struct scatterlist *sg,
enum dma_data_direction dir)
{
iter->op_offset = 0;
iter->offset = 0;
iter->sg = sg;
iter->dir = dir;
}
static inline unsigned int
mv_cesa_req_dma_iter_transfer_len(struct mv_cesa_dma_iter *iter,
struct mv_cesa_sg_dma_iter *sgiter)
{
return min(iter->op_len - sgiter->op_offset,
sg_dma_len(sgiter->sg) - sgiter->offset);
}
bool mv_cesa_req_dma_iter_next_transfer(struct mv_cesa_dma_iter *chain,
struct mv_cesa_sg_dma_iter *sgiter,
unsigned int len);
static inline bool mv_cesa_req_dma_iter_next_op(struct mv_cesa_dma_iter *iter)
{
iter->offset += iter->op_len;
iter->op_len = min(iter->len - iter->offset,
CESA_SA_SRAM_PAYLOAD_SIZE);
return iter->op_len;
}
void mv_cesa_dma_step(struct mv_cesa_tdma_req *dreq);
static inline int mv_cesa_dma_process(struct mv_cesa_tdma_req *dreq,
u32 status)
{
if (!(status & CESA_SA_INT_ACC0_IDMA_DONE))
return -EINPROGRESS;
if (status & CESA_SA_INT_IDMA_OWN_ERR)
return -EINVAL;
return 0;
}
void mv_cesa_dma_prepare(struct mv_cesa_tdma_req *dreq,
struct mv_cesa_engine *engine);
void mv_cesa_dma_cleanup(struct mv_cesa_tdma_req *dreq);
static inline void
mv_cesa_tdma_desc_iter_init(struct mv_cesa_tdma_chain *chain)
{
memset(chain, 0, sizeof(*chain));
}
struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain,
const struct mv_cesa_op_ctx *op_templ,
bool skip_ctx,
gfp_t flags);
int mv_cesa_dma_add_data_transfer(struct mv_cesa_tdma_chain *chain,
dma_addr_t dst, dma_addr_t src, u32 size,
u32 flags, gfp_t gfp_flags);
int mv_cesa_dma_add_dummy_launch(struct mv_cesa_tdma_chain *chain,
u32 flags);
int mv_cesa_dma_add_dummy_end(struct mv_cesa_tdma_chain *chain, u32 flags);
int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain,
struct mv_cesa_dma_iter *dma_iter,
struct mv_cesa_sg_dma_iter *sgiter,
gfp_t gfp_flags);
/* Algorithm definitions */ /* Algorithm definitions */
extern struct ahash_alg mv_sha1_alg; extern struct ahash_alg mv_sha1_alg;
......
...@@ -21,6 +21,55 @@ struct mv_cesa_aes_ctx { ...@@ -21,6 +21,55 @@ struct mv_cesa_aes_ctx {
struct crypto_aes_ctx aes; struct crypto_aes_ctx aes;
}; };
struct mv_cesa_ablkcipher_dma_iter {
struct mv_cesa_dma_iter base;
struct mv_cesa_sg_dma_iter src;
struct mv_cesa_sg_dma_iter dst;
};
static inline void
mv_cesa_ablkcipher_req_iter_init(struct mv_cesa_ablkcipher_dma_iter *iter,
struct ablkcipher_request *req)
{
mv_cesa_req_dma_iter_init(&iter->base, req->nbytes);
mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE);
}
static inline bool
mv_cesa_ablkcipher_req_iter_next_op(struct mv_cesa_ablkcipher_dma_iter *iter)
{
iter->src.op_offset = 0;
iter->dst.op_offset = 0;
return mv_cesa_req_dma_iter_next_op(&iter->base);
}
static inline void
mv_cesa_ablkcipher_dma_cleanup(struct ablkcipher_request *req)
{
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
if (req->dst != req->src) {
dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
DMA_FROM_DEVICE);
dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
DMA_TO_DEVICE);
} else {
dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
DMA_BIDIRECTIONAL);
}
mv_cesa_dma_cleanup(&creq->req.dma);
}
static inline void mv_cesa_ablkcipher_cleanup(struct ablkcipher_request *req)
{
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
if (creq->req.base.type == CESA_DMA_REQ)
mv_cesa_ablkcipher_dma_cleanup(req);
}
static void mv_cesa_ablkcipher_std_step(struct ablkcipher_request *req) static void mv_cesa_ablkcipher_std_step(struct ablkcipher_request *req)
{ {
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
...@@ -77,7 +126,11 @@ static int mv_cesa_ablkcipher_process(struct crypto_async_request *req, ...@@ -77,7 +126,11 @@ static int mv_cesa_ablkcipher_process(struct crypto_async_request *req,
struct mv_cesa_engine *engine = sreq->base.engine; struct mv_cesa_engine *engine = sreq->base.engine;
int ret; int ret;
ret = mv_cesa_ablkcipher_std_process(ablkreq, status); if (creq->req.base.type == CESA_DMA_REQ)
ret = mv_cesa_dma_process(&creq->req.dma, status);
else
ret = mv_cesa_ablkcipher_std_process(ablkreq, status);
if (ret) if (ret)
return ret; return ret;
...@@ -90,8 +143,21 @@ static int mv_cesa_ablkcipher_process(struct crypto_async_request *req, ...@@ -90,8 +143,21 @@ static int mv_cesa_ablkcipher_process(struct crypto_async_request *req,
static void mv_cesa_ablkcipher_step(struct crypto_async_request *req) static void mv_cesa_ablkcipher_step(struct crypto_async_request *req)
{ {
struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
mv_cesa_ablkcipher_std_step(ablkreq); if (creq->req.base.type == CESA_DMA_REQ)
mv_cesa_dma_step(&creq->req.dma);
else
mv_cesa_ablkcipher_std_step(ablkreq);
}
static inline void
mv_cesa_ablkcipher_dma_prepare(struct ablkcipher_request *req)
{
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
struct mv_cesa_tdma_req *dreq = &creq->req.dma;
mv_cesa_dma_prepare(dreq, dreq->base.engine);
} }
static inline void static inline void
...@@ -115,12 +181,18 @@ static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req, ...@@ -115,12 +181,18 @@ static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req,
creq->req.base.engine = engine; creq->req.base.engine = engine;
mv_cesa_ablkcipher_std_prepare(ablkreq); if (creq->req.base.type == CESA_DMA_REQ)
mv_cesa_ablkcipher_dma_prepare(ablkreq);
else
mv_cesa_ablkcipher_std_prepare(ablkreq);
} }
static inline void static inline void
mv_cesa_ablkcipher_req_cleanup(struct crypto_async_request *req) mv_cesa_ablkcipher_req_cleanup(struct crypto_async_request *req)
{ {
struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
mv_cesa_ablkcipher_cleanup(ablkreq);
} }
static const struct mv_cesa_req_ops mv_cesa_ablkcipher_req_ops = { static const struct mv_cesa_req_ops mv_cesa_ablkcipher_req_ops = {
...@@ -166,6 +238,92 @@ static int mv_cesa_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, ...@@ -166,6 +238,92 @@ static int mv_cesa_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0; return 0;
} }
static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
const struct mv_cesa_op_ctx *op_templ)
{
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
struct mv_cesa_tdma_req *dreq = &creq->req.dma;
struct mv_cesa_ablkcipher_dma_iter iter;
struct mv_cesa_tdma_chain chain;
bool skip_ctx = false;
int ret;
dreq->base.type = CESA_DMA_REQ;
dreq->chain.first = NULL;
dreq->chain.last = NULL;
if (req->src != req->dst) {
ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
DMA_TO_DEVICE);
if (!ret)
return -ENOMEM;
ret = dma_map_sg(cesa_dev->dev, req->dst, creq->dst_nents,
DMA_FROM_DEVICE);
if (!ret) {
ret = -ENOMEM;
goto err_unmap_src;
}
} else {
ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
DMA_BIDIRECTIONAL);
if (!ret)
return -ENOMEM;
}
mv_cesa_tdma_desc_iter_init(&chain);
mv_cesa_ablkcipher_req_iter_init(&iter, req);
do {
struct mv_cesa_op_ctx *op;
op = mv_cesa_dma_add_op(&chain, op_templ, skip_ctx, flags);
if (IS_ERR(op)) {
ret = PTR_ERR(op);
goto err_free_tdma;
}
skip_ctx = true;
mv_cesa_set_crypt_op_len(op, iter.base.op_len);
/* Add input transfers */
ret = mv_cesa_dma_add_op_transfers(&chain, &iter.base,
&iter.src, flags);
if (ret)
goto err_free_tdma;
/* Add dummy desc to launch the crypto operation */
ret = mv_cesa_dma_add_dummy_launch(&chain, flags);
if (ret)
goto err_free_tdma;
/* Add output transfers */
ret = mv_cesa_dma_add_op_transfers(&chain, &iter.base,
&iter.dst, flags);
if (ret)
goto err_free_tdma;
} while (mv_cesa_ablkcipher_req_iter_next_op(&iter));
dreq->chain = chain;
return 0;
err_free_tdma:
mv_cesa_dma_cleanup(dreq);
if (req->dst != req->src)
dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
DMA_FROM_DEVICE);
err_unmap_src:
dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
req->dst != req->src ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
return ret;
}
static inline int static inline int
mv_cesa_ablkcipher_std_req_init(struct ablkcipher_request *req, mv_cesa_ablkcipher_std_req_init(struct ablkcipher_request *req,
const struct mv_cesa_op_ctx *op_templ) const struct mv_cesa_op_ctx *op_templ)
...@@ -186,6 +344,7 @@ static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req, ...@@ -186,6 +344,7 @@ static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req,
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
unsigned int blksize = crypto_ablkcipher_blocksize(tfm); unsigned int blksize = crypto_ablkcipher_blocksize(tfm);
int ret;
if (!IS_ALIGNED(req->nbytes, blksize)) if (!IS_ALIGNED(req->nbytes, blksize))
return -EINVAL; return -EINVAL;
...@@ -196,7 +355,13 @@ static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req, ...@@ -196,7 +355,13 @@ static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req,
mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY, mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY,
CESA_SA_DESC_CFG_OP_MSK); CESA_SA_DESC_CFG_OP_MSK);
return mv_cesa_ablkcipher_std_req_init(req, tmpl); /* TODO: add a threshold for DMA usage */
if (cesa_dev->caps->has_tdma)
ret = mv_cesa_ablkcipher_dma_req_init(req, tmpl);
else
ret = mv_cesa_ablkcipher_std_req_init(req, tmpl);
return ret;
} }
static int mv_cesa_aes_op(struct ablkcipher_request *req, static int mv_cesa_aes_op(struct ablkcipher_request *req,
...@@ -230,7 +395,11 @@ static int mv_cesa_aes_op(struct ablkcipher_request *req, ...@@ -230,7 +395,11 @@ static int mv_cesa_aes_op(struct ablkcipher_request *req,
if (ret) if (ret)
return ret; return ret;
return mv_cesa_queue_req(&req->base); ret = mv_cesa_queue_req(&req->base);
if (ret && ret != -EINPROGRESS)
mv_cesa_ablkcipher_cleanup(req);
return ret;
} }
static int mv_cesa_ecb_aes_encrypt(struct ablkcipher_request *req) static int mv_cesa_ecb_aes_encrypt(struct ablkcipher_request *req)
......
This diff is collapsed.
/*
* Provide TDMA helper functions used by cipher and hash algorithm
* implementations.
*
* Author: Boris Brezillon <boris.brezillon@free-electrons.com>
* Author: Arnaud Ebalard <arno@natisbad.org>
*
* This work is based on an initial version written by
* Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include "cesa.h"
bool mv_cesa_req_dma_iter_next_transfer(struct mv_cesa_dma_iter *iter,
struct mv_cesa_sg_dma_iter *sgiter,
unsigned int len)
{
if (!sgiter->sg)
return false;
sgiter->op_offset += len;
sgiter->offset += len;
if (sgiter->offset == sg_dma_len(sgiter->sg)) {
if (sg_is_last(sgiter->sg))
return false;
sgiter->offset = 0;
sgiter->sg = sg_next(sgiter->sg);
}
if (sgiter->op_offset == iter->op_len)
return false;
return true;
}
void mv_cesa_dma_step(struct mv_cesa_tdma_req *dreq)
{
struct mv_cesa_engine *engine = dreq->base.engine;
writel(0, engine->regs + CESA_SA_CFG);
mv_cesa_set_int_mask(engine, CESA_SA_INT_ACC0_IDMA_DONE);
writel(CESA_TDMA_DST_BURST_128B | CESA_TDMA_SRC_BURST_128B |
CESA_TDMA_NO_BYTE_SWAP | CESA_TDMA_EN,
engine->regs + CESA_TDMA_CONTROL);
writel(CESA_SA_CFG_ACT_CH0_IDMA | CESA_SA_CFG_MULTI_PKT |
CESA_SA_CFG_CH0_W_IDMA | CESA_SA_CFG_PARA_DIS,
engine->regs + CESA_SA_CFG);
writel(dreq->chain.first->cur_dma,
engine->regs + CESA_TDMA_NEXT_ADDR);
writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
}
void mv_cesa_dma_cleanup(struct mv_cesa_tdma_req *dreq)
{
struct mv_cesa_tdma_desc *tdma;
for (tdma = dreq->chain.first; tdma;) {
struct mv_cesa_tdma_desc *old_tdma = tdma;
if (tdma->flags & CESA_TDMA_OP)
dma_pool_free(cesa_dev->dma->op_pool, tdma->op,
le32_to_cpu(tdma->src));
tdma = tdma->next;
dma_pool_free(cesa_dev->dma->tdma_desc_pool, old_tdma,
le32_to_cpu(old_tdma->cur_dma));
}
dreq->chain.first = NULL;
dreq->chain.last = NULL;
}
void mv_cesa_dma_prepare(struct mv_cesa_tdma_req *dreq,
struct mv_cesa_engine *engine)
{
struct mv_cesa_tdma_desc *tdma;
for (tdma = dreq->chain.first; tdma; tdma = tdma->next) {
if (tdma->flags & CESA_TDMA_DST_IN_SRAM)
tdma->dst = cpu_to_le32(tdma->dst + engine->sram_dma);
if (tdma->flags & CESA_TDMA_SRC_IN_SRAM)
tdma->src = cpu_to_le32(tdma->src + engine->sram_dma);
if (tdma->flags & CESA_TDMA_OP)
mv_cesa_adjust_op(engine, tdma->op);
}
}
static struct mv_cesa_tdma_desc *
mv_cesa_dma_add_desc(struct mv_cesa_tdma_chain *chain, gfp_t flags)
{
struct mv_cesa_tdma_desc *new_tdma = NULL;
dma_addr_t dma_handle;
new_tdma = dma_pool_alloc(cesa_dev->dma->tdma_desc_pool, flags,
&dma_handle);
if (!new_tdma)
return ERR_PTR(-ENOMEM);
memset(new_tdma, 0, sizeof(*new_tdma));
new_tdma->cur_dma = cpu_to_le32(dma_handle);
if (chain->last) {
chain->last->next_dma = new_tdma->cur_dma;
chain->last->next = new_tdma;
} else {
chain->first = new_tdma;
}
chain->last = new_tdma;
return new_tdma;
}
struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain,
const struct mv_cesa_op_ctx *op_templ,
bool skip_ctx,
gfp_t flags)
{
struct mv_cesa_tdma_desc *tdma;
struct mv_cesa_op_ctx *op;
dma_addr_t dma_handle;
tdma = mv_cesa_dma_add_desc(chain, flags);
if (IS_ERR(tdma))
return ERR_CAST(tdma);
op = dma_pool_alloc(cesa_dev->dma->op_pool, flags, &dma_handle);
if (!op)
return ERR_PTR(-ENOMEM);
*op = *op_templ;
tdma = chain->last;
tdma->op = op;
tdma->byte_cnt = (skip_ctx ? sizeof(op->desc) : sizeof(*op)) | BIT(31);
tdma->src = dma_handle;
tdma->flags = CESA_TDMA_DST_IN_SRAM | CESA_TDMA_OP;
return op;
}
int mv_cesa_dma_add_data_transfer(struct mv_cesa_tdma_chain *chain,
dma_addr_t dst, dma_addr_t src, u32 size,
u32 flags, gfp_t gfp_flags)
{
struct mv_cesa_tdma_desc *tdma;
tdma = mv_cesa_dma_add_desc(chain, gfp_flags);
if (IS_ERR(tdma))
return PTR_ERR(tdma);
tdma->byte_cnt = size | BIT(31);
tdma->src = src;
tdma->dst = dst;
flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM);
tdma->flags = flags | CESA_TDMA_DATA;
return 0;
}
int mv_cesa_dma_add_dummy_launch(struct mv_cesa_tdma_chain *chain,
u32 flags)
{
struct mv_cesa_tdma_desc *tdma;
tdma = mv_cesa_dma_add_desc(chain, flags);
if (IS_ERR(tdma))
return PTR_ERR(tdma);
return 0;
}
int mv_cesa_dma_add_dummy_end(struct mv_cesa_tdma_chain *chain, u32 flags)
{
struct mv_cesa_tdma_desc *tdma;
tdma = mv_cesa_dma_add_desc(chain, flags);
if (IS_ERR(tdma))
return PTR_ERR(tdma);
tdma->byte_cnt = BIT(31);
return 0;
}
int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain,
struct mv_cesa_dma_iter *dma_iter,
struct mv_cesa_sg_dma_iter *sgiter,
gfp_t gfp_flags)
{
u32 flags = sgiter->dir == DMA_TO_DEVICE ?
CESA_TDMA_DST_IN_SRAM : CESA_TDMA_SRC_IN_SRAM;
unsigned int len;
do {
dma_addr_t dst, src;
int ret;
len = mv_cesa_req_dma_iter_transfer_len(dma_iter, sgiter);
if (sgiter->dir == DMA_TO_DEVICE) {
dst = CESA_SA_DATA_SRAM_OFFSET + sgiter->op_offset;
src = sg_dma_address(sgiter->sg) + sgiter->offset;
} else {
dst = sg_dma_address(sgiter->sg) + sgiter->offset;
src = CESA_SA_DATA_SRAM_OFFSET + sgiter->op_offset;
}
ret = mv_cesa_dma_add_data_transfer(chain, dst, src, len,
flags, gfp_flags);
if (ret)
return ret;
} while (mv_cesa_req_dma_iter_next_transfer(dma_iter, sgiter, len));
return 0;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment