Commit 655ff1a1 authored by SrujanaChalla's avatar SrujanaChalla Committed by Herbert Xu

crypto: marvell - create common Kconfig and Makefile for Marvell

Creats common Kconfig and Makefile for Marvell crypto drivers.
Signed-off-by: default avatarSrujanaChalla <schalla@marvell.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 82ff493e
......@@ -233,20 +233,6 @@ config CRYPTO_CRC32_S390
It is available with IBM z13 or later.
config CRYPTO_DEV_MARVELL_CESA
tristate "Marvell's Cryptographic Engine driver"
depends on PLAT_ORION || ARCH_MVEBU
select CRYPTO_LIB_AES
select CRYPTO_LIB_DES
select CRYPTO_SKCIPHER
select CRYPTO_HASH
select SRAM
help
This driver allows you to utilize the Cryptographic Engines and
Security Accelerator (CESA) which can be found on MVEBU and ORION
platforms.
This driver supports CPU offload through DMA transfers.
config CRYPTO_DEV_NIAGARA2
tristate "Niagara2 Stream Processing Unit driver"
select CRYPTO_LIB_DES
......@@ -606,6 +592,7 @@ config CRYPTO_DEV_MXS_DCP
source "drivers/crypto/qat/Kconfig"
source "drivers/crypto/cavium/cpt/Kconfig"
source "drivers/crypto/cavium/nitrox/Kconfig"
source "drivers/crypto/marvell/Kconfig"
config CRYPTO_DEV_CAVIUM_ZIP
tristate "Cavium ZIP driver"
......
......@@ -18,7 +18,7 @@ obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell/
obj-$(CONFIG_CRYPTO_DEV_MARVELL) += marvell/
obj-$(CONFIG_CRYPTO_DEV_MEDIATEK) += mediatek/
obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
......
#
# Marvell crypto drivers configuration
#
config CRYPTO_DEV_MARVELL
tristate
config CRYPTO_DEV_MARVELL_CESA
tristate "Marvell's Cryptographic Engine driver"
depends on PLAT_ORION || ARCH_MVEBU
select CRYPTO_LIB_AES
select CRYPTO_LIB_DES
select CRYPTO_SKCIPHER
select CRYPTO_HASH
select SRAM
select CRYPTO_DEV_MARVELL
help
This driver allows you to utilize the Cryptographic Engines and
Security Accelerator (CESA) which can be found on MVEBU and ORION
platforms.
This driver supports CPU offload through DMA transfers.
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell-cesa.o
marvell-cesa-objs := cesa.o cipher.o hash.o tdma.o
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += cesa/
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell-cesa.o
marvell-cesa-objs := cesa.o cipher.o hash.o tdma.o
......@@ -436,7 +436,7 @@ struct mv_cesa_dev {
* @queue: fifo of the pending crypto requests
* @load: engine load counter, useful for load balancing
* @chain: list of the current tdma descriptors being processed
* by this engine.
* by this engine.
* @complete_queue: fifo of the processed requests by the engine
*
* Structure storing CESA engine information.
......@@ -467,7 +467,7 @@ struct mv_cesa_engine {
* @step: launch the crypto operation on the next chunk
* @cleanup: cleanup the crypto request (release associated data)
* @complete: complete the request, i.e copy result or context from sram when
* needed.
* needed.
*/
struct mv_cesa_req_ops {
int (*process)(struct crypto_async_request *req, u32 status);
......@@ -734,6 +734,7 @@ static inline struct mv_cesa_engine *mv_cesa_select_engine(int weight)
for (i = 0; i < cesa_dev->caps->nengines; i++) {
struct mv_cesa_engine *engine = cesa_dev->engines + i;
u32 load = atomic_read(&engine->load);
if (load < min_load) {
min_load = load;
selected = engine;
......
......@@ -106,8 +106,8 @@ static void mv_cesa_skcipher_std_step(struct skcipher_request *req)
mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
BUG_ON(readl(engine->regs + CESA_SA_CMD) &
CESA_SA_CMD_EN_CESA_SA_ACCL0);
WARN_ON(readl(engine->regs + CESA_SA_CMD) &
CESA_SA_CMD_EN_CESA_SA_ACCL0);
writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
}
......@@ -178,6 +178,7 @@ static inline void mv_cesa_skcipher_prepare(struct crypto_async_request *req,
{
struct skcipher_request *skreq = skcipher_request_cast(req);
struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
creq->base.engine = engine;
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
......@@ -336,7 +337,8 @@ static int mv_cesa_skcipher_dma_req_init(struct skcipher_request *req,
do {
struct mv_cesa_op_ctx *op;
op = mv_cesa_dma_add_op(&basereq->chain, op_templ, skip_ctx, flags);
op = mv_cesa_dma_add_op(&basereq->chain, op_templ, skip_ctx,
flags);
if (IS_ERR(op)) {
ret = PTR_ERR(op);
goto err_free_tdma;
......@@ -365,9 +367,10 @@ static int mv_cesa_skcipher_dma_req_init(struct skcipher_request *req,
} while (mv_cesa_skcipher_req_iter_next_op(&iter));
/* Add output data for IV */
ret = mv_cesa_dma_add_result_op(&basereq->chain, CESA_SA_CFG_SRAM_OFFSET,
CESA_SA_DATA_SRAM_OFFSET,
CESA_TDMA_SRC_IN_SRAM, flags);
ret = mv_cesa_dma_add_result_op(&basereq->chain,
CESA_SA_CFG_SRAM_OFFSET,
CESA_SA_DATA_SRAM_OFFSET,
CESA_TDMA_SRC_IN_SRAM, flags);
if (ret)
goto err_free_tdma;
......
......@@ -141,9 +141,11 @@ static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf)
if (creq->algo_le) {
__le64 bits = cpu_to_le64(creq->len << 3);
memcpy(buf + padlen, &bits, sizeof(bits));
} else {
__be64 bits = cpu_to_be64(creq->len << 3);
memcpy(buf + padlen, &bits, sizeof(bits));
}
......@@ -168,7 +170,8 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
if (!sreq->offset) {
digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
for (i = 0; i < digsize / 4; i++)
writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i));
writel_relaxed(creq->state[i],
engine->regs + CESA_IVDIG(i));
}
if (creq->cache_ptr)
......@@ -245,8 +248,8 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
BUG_ON(readl(engine->regs + CESA_SA_CMD) &
CESA_SA_CMD_EN_CESA_SA_ACCL0);
WARN_ON(readl(engine->regs + CESA_SA_CMD) &
CESA_SA_CMD_EN_CESA_SA_ACCL0);
writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
}
......@@ -329,11 +332,12 @@ static void mv_cesa_ahash_complete(struct crypto_async_request *req)
digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ &&
(creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) == CESA_TDMA_RESULT) {
(creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) ==
CESA_TDMA_RESULT) {
__le32 *data = NULL;
/*
* Result is already in the correct endianess when the SA is
* Result is already in the correct endianness when the SA is
* used
*/
data = creq->base.chain.last->op->ctx.hash.hash;
......@@ -347,9 +351,9 @@ static void mv_cesa_ahash_complete(struct crypto_async_request *req)
CESA_IVDIG(i));
if (creq->last_req) {
/*
* Hardware's MD5 digest is in little endian format, but
* SHA in big endian format
*/
* Hardware's MD5 digest is in little endian format, but
* SHA in big endian format
*/
if (creq->algo_le) {
__le32 *result = (void *)ahashreq->result;
......@@ -439,7 +443,8 @@ static bool mv_cesa_ahash_cache_req(struct ahash_request *req)
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
bool cached = false;
if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE && !creq->last_req) {
if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE &&
!creq->last_req) {
cached = true;
if (!req->nbytes)
......@@ -648,7 +653,8 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
if (!mv_cesa_ahash_req_iter_next_op(&iter))
break;
op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
op = mv_cesa_dma_add_frag(&basereq->chain,
&creq->op_tmpl,
frag_len, flags);
if (IS_ERR(op)) {
ret = PTR_ERR(op);
......@@ -920,7 +926,7 @@ struct ahash_alg mv_md5_alg = {
.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
.cra_init = mv_cesa_ahash_cra_init,
.cra_module = THIS_MODULE,
}
}
}
};
......@@ -990,7 +996,7 @@ struct ahash_alg mv_sha1_alg = {
.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
.cra_init = mv_cesa_ahash_cra_init,
.cra_module = THIS_MODULE,
}
}
}
};
......@@ -1063,7 +1069,7 @@ struct ahash_alg mv_sha256_alg = {
.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
.cra_init = mv_cesa_ahash_cra_init,
.cra_module = THIS_MODULE,
}
}
}
};
......@@ -1297,7 +1303,7 @@ struct ahash_alg mv_ahmac_md5_alg = {
.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
.cra_init = mv_cesa_ahmac_cra_init,
.cra_module = THIS_MODULE,
}
}
}
};
......@@ -1367,7 +1373,7 @@ struct ahash_alg mv_ahmac_sha1_alg = {
.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
.cra_init = mv_cesa_ahmac_cra_init,
.cra_module = THIS_MODULE,
}
}
}
};
......@@ -1437,6 +1443,6 @@ struct ahash_alg mv_ahmac_sha256_alg = {
.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
.cra_init = mv_cesa_ahmac_cra_init,
.cra_module = THIS_MODULE,
}
}
}
};
......@@ -50,8 +50,8 @@ void mv_cesa_dma_step(struct mv_cesa_req *dreq)
engine->regs + CESA_SA_CFG);
writel_relaxed(dreq->chain.first->cur_dma,
engine->regs + CESA_TDMA_NEXT_ADDR);
BUG_ON(readl(engine->regs + CESA_SA_CMD) &
CESA_SA_CMD_EN_CESA_SA_ACCL0);
WARN_ON(readl(engine->regs + CESA_SA_CMD) &
CESA_SA_CMD_EN_CESA_SA_ACCL0);
writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
}
......@@ -175,8 +175,10 @@ int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status)
break;
}
/* Save the last request in error to engine->req, so that the core
* knows which request was fautly */
/*
* Save the last request in error to engine->req, so that the core
* knows which request was fautly
*/
if (res) {
spin_lock_bh(&engine->lock);
engine->req = req;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment