Commit 655ff1a1 authored by SrujanaChalla's avatar SrujanaChalla Committed by Herbert Xu

crypto: marvell - create common Kconfig and Makefile for Marvell

Creats common Kconfig and Makefile for Marvell crypto drivers.
Signed-off-by: default avatarSrujanaChalla <schalla@marvell.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 82ff493e
......@@ -233,20 +233,6 @@ config CRYPTO_CRC32_S390
It is available with IBM z13 or later.
config CRYPTO_DEV_MARVELL_CESA
tristate "Marvell's Cryptographic Engine driver"
depends on PLAT_ORION || ARCH_MVEBU
select CRYPTO_LIB_AES
select CRYPTO_LIB_DES
select CRYPTO_SKCIPHER
select CRYPTO_HASH
select SRAM
help
This driver allows you to utilize the Cryptographic Engines and
Security Accelerator (CESA) which can be found on MVEBU and ORION
platforms.
This driver supports CPU offload through DMA transfers.
config CRYPTO_DEV_NIAGARA2
tristate "Niagara2 Stream Processing Unit driver"
select CRYPTO_LIB_DES
......@@ -606,6 +592,7 @@ config CRYPTO_DEV_MXS_DCP
source "drivers/crypto/qat/Kconfig"
source "drivers/crypto/cavium/cpt/Kconfig"
source "drivers/crypto/cavium/nitrox/Kconfig"
source "drivers/crypto/marvell/Kconfig"
config CRYPTO_DEV_CAVIUM_ZIP
tristate "Cavium ZIP driver"
......
......@@ -18,7 +18,7 @@ obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell/
obj-$(CONFIG_CRYPTO_DEV_MARVELL) += marvell/
obj-$(CONFIG_CRYPTO_DEV_MEDIATEK) += mediatek/
obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
......
#
# Marvell crypto drivers configuration
#
config CRYPTO_DEV_MARVELL
tristate
config CRYPTO_DEV_MARVELL_CESA
tristate "Marvell's Cryptographic Engine driver"
depends on PLAT_ORION || ARCH_MVEBU
select CRYPTO_LIB_AES
select CRYPTO_LIB_DES
select CRYPTO_SKCIPHER
select CRYPTO_HASH
select SRAM
select CRYPTO_DEV_MARVELL
help
This driver allows you to utilize the Cryptographic Engines and
Security Accelerator (CESA) which can be found on MVEBU and ORION
platforms.
This driver supports CPU offload through DMA transfers.
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell-cesa.o
marvell-cesa-objs := cesa.o cipher.o hash.o tdma.o
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += cesa/
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell-cesa.o
marvell-cesa-objs := cesa.o cipher.o hash.o tdma.o
......@@ -734,6 +734,7 @@ static inline struct mv_cesa_engine *mv_cesa_select_engine(int weight)
for (i = 0; i < cesa_dev->caps->nengines; i++) {
struct mv_cesa_engine *engine = cesa_dev->engines + i;
u32 load = atomic_read(&engine->load);
if (load < min_load) {
min_load = load;
selected = engine;
......
......@@ -106,7 +106,7 @@ static void mv_cesa_skcipher_std_step(struct skcipher_request *req)
mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
BUG_ON(readl(engine->regs + CESA_SA_CMD) &
WARN_ON(readl(engine->regs + CESA_SA_CMD) &
CESA_SA_CMD_EN_CESA_SA_ACCL0);
writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
}
......@@ -178,6 +178,7 @@ static inline void mv_cesa_skcipher_prepare(struct crypto_async_request *req,
{
struct skcipher_request *skreq = skcipher_request_cast(req);
struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
creq->base.engine = engine;
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
......@@ -336,7 +337,8 @@ static int mv_cesa_skcipher_dma_req_init(struct skcipher_request *req,
do {
struct mv_cesa_op_ctx *op;
op = mv_cesa_dma_add_op(&basereq->chain, op_templ, skip_ctx, flags);
op = mv_cesa_dma_add_op(&basereq->chain, op_templ, skip_ctx,
flags);
if (IS_ERR(op)) {
ret = PTR_ERR(op);
goto err_free_tdma;
......@@ -365,7 +367,8 @@ static int mv_cesa_skcipher_dma_req_init(struct skcipher_request *req,
} while (mv_cesa_skcipher_req_iter_next_op(&iter));
/* Add output data for IV */
ret = mv_cesa_dma_add_result_op(&basereq->chain, CESA_SA_CFG_SRAM_OFFSET,
ret = mv_cesa_dma_add_result_op(&basereq->chain,
CESA_SA_CFG_SRAM_OFFSET,
CESA_SA_DATA_SRAM_OFFSET,
CESA_TDMA_SRC_IN_SRAM, flags);
......
......@@ -141,9 +141,11 @@ static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf)
if (creq->algo_le) {
__le64 bits = cpu_to_le64(creq->len << 3);
memcpy(buf + padlen, &bits, sizeof(bits));
} else {
__be64 bits = cpu_to_be64(creq->len << 3);
memcpy(buf + padlen, &bits, sizeof(bits));
}
......@@ -168,7 +170,8 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
if (!sreq->offset) {
digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
for (i = 0; i < digsize / 4; i++)
writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i));
writel_relaxed(creq->state[i],
engine->regs + CESA_IVDIG(i));
}
if (creq->cache_ptr)
......@@ -245,7 +248,7 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
BUG_ON(readl(engine->regs + CESA_SA_CMD) &
WARN_ON(readl(engine->regs + CESA_SA_CMD) &
CESA_SA_CMD_EN_CESA_SA_ACCL0);
writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
}
......@@ -329,11 +332,12 @@ static void mv_cesa_ahash_complete(struct crypto_async_request *req)
digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ &&
(creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) == CESA_TDMA_RESULT) {
(creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) ==
CESA_TDMA_RESULT) {
__le32 *data = NULL;
/*
* Result is already in the correct endianess when the SA is
* Result is already in the correct endianness when the SA is
* used
*/
data = creq->base.chain.last->op->ctx.hash.hash;
......@@ -439,7 +443,8 @@ static bool mv_cesa_ahash_cache_req(struct ahash_request *req)
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
bool cached = false;
if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE && !creq->last_req) {
if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE &&
!creq->last_req) {
cached = true;
if (!req->nbytes)
......@@ -648,7 +653,8 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
if (!mv_cesa_ahash_req_iter_next_op(&iter))
break;
op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
op = mv_cesa_dma_add_frag(&basereq->chain,
&creq->op_tmpl,
frag_len, flags);
if (IS_ERR(op)) {
ret = PTR_ERR(op);
......
......@@ -50,7 +50,7 @@ void mv_cesa_dma_step(struct mv_cesa_req *dreq)
engine->regs + CESA_SA_CFG);
writel_relaxed(dreq->chain.first->cur_dma,
engine->regs + CESA_TDMA_NEXT_ADDR);
BUG_ON(readl(engine->regs + CESA_SA_CMD) &
WARN_ON(readl(engine->regs + CESA_SA_CMD) &
CESA_SA_CMD_EN_CESA_SA_ACCL0);
writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
}
......@@ -175,8 +175,10 @@ int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status)
break;
}
/* Save the last request in error to engine->req, so that the core
* knows which request was fautly */
/*
* Save the last request in error to engine->req, so that the core
* knows which request was fautly
*/
if (res) {
spin_lock_bh(&engine->lock);
engine->req = req;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment