Commit 9db83b4e authored by Gilad Ben-Yossef's avatar Gilad Ben-Yossef Committed by Greg Kroah-Hartman

staging: ccree: remove unproven likely/unlikely

The ccree code made a lot of use of likely/unlikely qualifiers without
proven measurements showing any benefits. Remove them all until we
see what is justified and what is not.
Signed-off-by: default avatarGilad Ben-Yossef <gilad@benyossef.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 8977c824
...@@ -251,7 +251,7 @@ static void ssi_aead_complete(struct device *dev, void *ssi_req) ...@@ -251,7 +251,7 @@ static void ssi_aead_complete(struct device *dev, void *ssi_req)
err = -EBADMSG; err = -EBADMSG;
} }
} else { /*ENCRYPT*/ } else { /*ENCRYPT*/
if (unlikely(areq_ctx->is_icv_fragmented)) { if (areq_ctx->is_icv_fragmented) {
u32 skip = areq->cryptlen + areq_ctx->dst_offset; u32 skip = areq->cryptlen + areq_ctx->dst_offset;
cc_copy_sg_portion(dev, areq_ctx->mac_buf, cc_copy_sg_portion(dev, areq_ctx->mac_buf,
...@@ -412,7 +412,7 @@ static int validate_keys_sizes(struct ssi_aead_ctx *ctx) ...@@ -412,7 +412,7 @@ static int validate_keys_sizes(struct ssi_aead_ctx *ctx)
return -EINVAL; return -EINVAL;
} }
/* Check cipher key size */ /* Check cipher key size */
if (unlikely(ctx->flow_mode == S_DIN_to_DES)) { if (ctx->flow_mode == S_DIN_to_DES) {
if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) { if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
dev_err(dev, "Invalid cipher(3DES) key size: %u\n", dev_err(dev, "Invalid cipher(3DES) key size: %u\n",
ctx->enc_keylen); ctx->enc_keylen);
...@@ -465,10 +465,10 @@ ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, ...@@ -465,10 +465,10 @@ ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
hashmode = DRV_HASH_HW_SHA256; hashmode = DRV_HASH_HW_SHA256;
} }
if (likely(keylen != 0)) { if (keylen != 0) {
key_dma_addr = dma_map_single(dev, (void *)key, keylen, key_dma_addr = dma_map_single(dev, (void *)key, keylen,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, key_dma_addr))) { if (dma_mapping_error(dev, key_dma_addr)) {
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n", dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
key, keylen); key, keylen);
return -ENOMEM; return -ENOMEM;
...@@ -547,10 +547,10 @@ ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, ...@@ -547,10 +547,10 @@ ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
} }
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0); rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
if (unlikely(rc)) if (rc)
dev_err(dev, "send_request() failed (rc=%d)\n", rc); dev_err(dev, "send_request() failed (rc=%d)\n", rc);
if (likely(key_dma_addr)) if (key_dma_addr)
dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE); dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
return rc; return rc;
...@@ -607,7 +607,7 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) ...@@ -607,7 +607,7 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
} }
rc = validate_keys_sizes(ctx); rc = validate_keys_sizes(ctx);
if (unlikely(rc)) if (rc)
goto badkey; goto badkey;
/* STAT_PHASE_1: Copy key to ctx */ /* STAT_PHASE_1: Copy key to ctx */
...@@ -646,7 +646,7 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) ...@@ -646,7 +646,7 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
if (seq_len > 0) { /* For CCM there is no sequence to setup the key */ if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
rc = send_request(ctx->drvdata, &ssi_req, desc, seq_len, 0); rc = send_request(ctx->drvdata, &ssi_req, desc, seq_len, 0);
if (unlikely(rc)) { if (rc) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc); dev_err(dev, "send_request() failed (rc=%d)\n", rc);
goto setkey_error; goto setkey_error;
} }
...@@ -818,7 +818,7 @@ ssi_aead_process_authenc_data_desc( ...@@ -818,7 +818,7 @@ ssi_aead_process_authenc_data_desc(
ssi_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr; ssi_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr;
u32 mlli_nents = areq_ctx->assoc.mlli_nents; u32 mlli_nents = areq_ctx->assoc.mlli_nents;
if (likely(areq_ctx->is_single_pass)) { if (areq_ctx->is_single_pass) {
if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) { if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
mlli_addr = areq_ctx->dst.sram_addr; mlli_addr = areq_ctx->dst.sram_addr;
mlli_nents = areq_ctx->dst.mlli_nents; mlli_nents = areq_ctx->dst.mlli_nents;
...@@ -1202,10 +1202,9 @@ static void ssi_aead_load_mlli_to_sram( ...@@ -1202,10 +1202,9 @@ static void ssi_aead_load_mlli_to_sram(
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm); struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata); struct device *dev = drvdata_to_dev(ctx->drvdata);
if (unlikely( if (req_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI ||
req_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI ||
req_ctx->data_buff_type == SSI_DMA_BUF_MLLI || req_ctx->data_buff_type == SSI_DMA_BUF_MLLI ||
!req_ctx->is_single_pass)) { !req_ctx->is_single_pass) {
dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n", dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
(unsigned int)ctx->drvdata->mlli_sram_addr, (unsigned int)ctx->drvdata->mlli_sram_addr,
req_ctx->mlli_params.mlli_len); req_ctx->mlli_params.mlli_len);
...@@ -1231,17 +1230,17 @@ static enum cc_flow_mode ssi_aead_get_data_flow_mode( ...@@ -1231,17 +1230,17 @@ static enum cc_flow_mode ssi_aead_get_data_flow_mode(
if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) { if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
if (setup_flow_mode == S_DIN_to_AES) if (setup_flow_mode == S_DIN_to_AES)
data_flow_mode = likely(is_single_pass) ? data_flow_mode = is_single_pass ?
AES_to_HASH_and_DOUT : DIN_AES_DOUT; AES_to_HASH_and_DOUT : DIN_AES_DOUT;
else else
data_flow_mode = likely(is_single_pass) ? data_flow_mode = is_single_pass ?
DES_to_HASH_and_DOUT : DIN_DES_DOUT; DES_to_HASH_and_DOUT : DIN_DES_DOUT;
} else { /* Decrypt */ } else { /* Decrypt */
if (setup_flow_mode == S_DIN_to_AES) if (setup_flow_mode == S_DIN_to_AES)
data_flow_mode = likely(is_single_pass) ? data_flow_mode = is_single_pass ?
AES_and_HASH : DIN_AES_DOUT; AES_and_HASH : DIN_AES_DOUT;
else else
data_flow_mode = likely(is_single_pass) ? data_flow_mode = is_single_pass ?
DES_and_HASH : DIN_DES_DOUT; DES_and_HASH : DIN_DES_DOUT;
} }
...@@ -1367,16 +1366,16 @@ static int validate_data_size(struct ssi_aead_ctx *ctx, ...@@ -1367,16 +1366,16 @@ static int validate_data_size(struct ssi_aead_ctx *ctx,
unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ? unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
(req->cryptlen - ctx->authsize) : req->cryptlen; (req->cryptlen - ctx->authsize) : req->cryptlen;
if (unlikely(direct == DRV_CRYPTO_DIRECTION_DECRYPT && if (direct == DRV_CRYPTO_DIRECTION_DECRYPT &&
req->cryptlen < ctx->authsize)) req->cryptlen < ctx->authsize)
goto data_size_err; goto data_size_err;
areq_ctx->is_single_pass = true; /*defaulted to fast flow*/ areq_ctx->is_single_pass = true; /*defaulted to fast flow*/
switch (ctx->flow_mode) { switch (ctx->flow_mode) {
case S_DIN_to_AES: case S_DIN_to_AES:
if (unlikely(ctx->cipher_mode == DRV_CIPHER_CBC && if (ctx->cipher_mode == DRV_CIPHER_CBC &&
!IS_ALIGNED(cipherlen, AES_BLOCK_SIZE))) !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE))
goto data_size_err; goto data_size_err;
if (ctx->cipher_mode == DRV_CIPHER_CCM) if (ctx->cipher_mode == DRV_CIPHER_CCM)
break; break;
...@@ -1395,9 +1394,9 @@ static int validate_data_size(struct ssi_aead_ctx *ctx, ...@@ -1395,9 +1394,9 @@ static int validate_data_size(struct ssi_aead_ctx *ctx,
break; break;
case S_DIN_to_DES: case S_DIN_to_DES:
if (unlikely(!IS_ALIGNED(cipherlen, DES_BLOCK_SIZE))) if (!IS_ALIGNED(cipherlen, DES_BLOCK_SIZE))
goto data_size_err; goto data_size_err;
if (unlikely(!IS_ALIGNED(assoclen, DES_BLOCK_SIZE))) if (!IS_ALIGNED(assoclen, DES_BLOCK_SIZE))
areq_ctx->is_single_pass = false; areq_ctx->is_single_pass = false;
break; break;
default: default:
...@@ -2024,7 +2023,7 @@ static int ssi_aead_process(struct aead_request *req, ...@@ -2024,7 +2023,7 @@ static int ssi_aead_process(struct aead_request *req,
/* STAT_PHASE_0: Init and sanity checks */ /* STAT_PHASE_0: Init and sanity checks */
/* Check data length according to mode */ /* Check data length according to mode */
if (unlikely(validate_data_size(ctx, direct, req))) { if (validate_data_size(ctx, direct, req)) {
dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n", dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
req->cryptlen, req->assoclen); req->cryptlen, req->assoclen);
crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN); crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
...@@ -2073,7 +2072,7 @@ static int ssi_aead_process(struct aead_request *req, ...@@ -2073,7 +2072,7 @@ static int ssi_aead_process(struct aead_request *req,
#if SSI_CC_HAS_AES_CCM #if SSI_CC_HAS_AES_CCM
if (ctx->cipher_mode == DRV_CIPHER_CCM) { if (ctx->cipher_mode == DRV_CIPHER_CCM) {
rc = config_ccm_adata(req); rc = config_ccm_adata(req);
if (unlikely(rc)) { if (rc) {
dev_dbg(dev, "config_ccm_adata() returned with a failure %d!", dev_dbg(dev, "config_ccm_adata() returned with a failure %d!",
rc); rc);
goto exit; goto exit;
...@@ -2088,7 +2087,7 @@ static int ssi_aead_process(struct aead_request *req, ...@@ -2088,7 +2087,7 @@ static int ssi_aead_process(struct aead_request *req,
#if SSI_CC_HAS_AES_GCM #if SSI_CC_HAS_AES_GCM
if (ctx->cipher_mode == DRV_CIPHER_GCTR) { if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
rc = config_gcm_context(req); rc = config_gcm_context(req);
if (unlikely(rc)) { if (rc) {
dev_dbg(dev, "config_gcm_context() returned with a failure %d!", dev_dbg(dev, "config_gcm_context() returned with a failure %d!",
rc); rc);
goto exit; goto exit;
...@@ -2097,7 +2096,7 @@ static int ssi_aead_process(struct aead_request *req, ...@@ -2097,7 +2096,7 @@ static int ssi_aead_process(struct aead_request *req,
#endif /*SSI_CC_HAS_AES_GCM*/ #endif /*SSI_CC_HAS_AES_GCM*/
rc = cc_map_aead_request(ctx->drvdata, req); rc = cc_map_aead_request(ctx->drvdata, req);
if (unlikely(rc)) { if (rc) {
dev_err(dev, "map_request() failed\n"); dev_err(dev, "map_request() failed\n");
goto exit; goto exit;
} }
...@@ -2173,7 +2172,7 @@ static int ssi_aead_process(struct aead_request *req, ...@@ -2173,7 +2172,7 @@ static int ssi_aead_process(struct aead_request *req,
rc = send_request(ctx->drvdata, &ssi_req, desc, seq_len, 1); rc = send_request(ctx->drvdata, &ssi_req, desc, seq_len, 1);
if (unlikely(rc != -EINPROGRESS)) { if (rc != -EINPROGRESS) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc); dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_aead_request(dev, req); cc_unmap_aead_request(dev, req);
} }
...@@ -2829,7 +2828,7 @@ int ssi_aead_alloc(struct ssi_drvdata *drvdata) ...@@ -2829,7 +2828,7 @@ int ssi_aead_alloc(struct ssi_drvdata *drvdata)
} }
t_alg->drvdata = drvdata; t_alg->drvdata = drvdata;
rc = crypto_register_aead(&t_alg->aead_alg); rc = crypto_register_aead(&t_alg->aead_alg);
if (unlikely(rc)) { if (rc) {
dev_err(dev, "%s alg registration failed\n", dev_err(dev, "%s alg registration failed\n",
t_alg->aead_alg.base.cra_driver_name); t_alg->aead_alg.base.cra_driver_name);
goto fail2; goto fail2;
......
...@@ -247,7 +247,7 @@ static int cc_generate_mlli( ...@@ -247,7 +247,7 @@ static int cc_generate_mlli(
mlli_params->mlli_virt_addr = dma_pool_alloc( mlli_params->mlli_virt_addr = dma_pool_alloc(
mlli_params->curr_pool, GFP_KERNEL, mlli_params->curr_pool, GFP_KERNEL,
&mlli_params->mlli_dma_addr); &mlli_params->mlli_dma_addr);
if (unlikely(!mlli_params->mlli_virt_addr)) { if (!mlli_params->mlli_virt_addr) {
dev_err(dev, "dma_pool_alloc() failed\n"); dev_err(dev, "dma_pool_alloc() failed\n");
rc = -ENOMEM; rc = -ENOMEM;
goto build_mlli_exit; goto build_mlli_exit;
...@@ -350,7 +350,7 @@ cc_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents, ...@@ -350,7 +350,7 @@ cc_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
for (i = 0; i < nents; i++) { for (i = 0; i < nents; i++) {
if (!l_sg) if (!l_sg)
break; break;
if (unlikely(dma_map_sg(dev, l_sg, 1, direction) != 1)) { if (dma_map_sg(dev, l_sg, 1, direction) != 1) {
dev_err(dev, "dma_map_page() sg buffer failed\n"); dev_err(dev, "dma_map_page() sg buffer failed\n");
goto err; goto err;
} }
...@@ -379,7 +379,7 @@ static int cc_map_sg( ...@@ -379,7 +379,7 @@ static int cc_map_sg(
if (sg_is_last(sg)) { if (sg_is_last(sg)) {
/* One entry only case -set to DLLI */ /* One entry only case -set to DLLI */
if (unlikely(dma_map_sg(dev, sg, 1, direction) != 1)) { if (dma_map_sg(dev, sg, 1, direction) != 1) {
dev_err(dev, "dma_map_sg() single buffer failed\n"); dev_err(dev, "dma_map_sg() single buffer failed\n");
return -ENOMEM; return -ENOMEM;
} }
...@@ -403,7 +403,7 @@ static int cc_map_sg( ...@@ -403,7 +403,7 @@ static int cc_map_sg(
* be changed from the original sgl nents * be changed from the original sgl nents
*/ */
*mapped_nents = dma_map_sg(dev, sg, *nents, direction); *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
if (unlikely(*mapped_nents == 0)) { if (*mapped_nents == 0) {
*nents = 0; *nents = 0;
dev_err(dev, "dma_map_sg() sg buffer failed\n"); dev_err(dev, "dma_map_sg() sg buffer failed\n");
return -ENOMEM; return -ENOMEM;
...@@ -414,7 +414,7 @@ static int cc_map_sg( ...@@ -414,7 +414,7 @@ static int cc_map_sg(
*/ */
*mapped_nents = cc_dma_map_sg(dev, sg, *nents, *mapped_nents = cc_dma_map_sg(dev, sg, *nents,
direction); direction);
if (unlikely(*mapped_nents != *nents)) { if (*mapped_nents != *nents) {
*nents = *mapped_nents; *nents = *mapped_nents;
dev_err(dev, "dma_map_sg() sg buffer failed\n"); dev_err(dev, "dma_map_sg() sg buffer failed\n");
return -ENOMEM; return -ENOMEM;
...@@ -436,8 +436,7 @@ ssi_aead_handle_config_buf(struct device *dev, ...@@ -436,8 +436,7 @@ ssi_aead_handle_config_buf(struct device *dev,
/* create sg for the current buffer */ /* create sg for the current buffer */
sg_init_one(&areq_ctx->ccm_adata_sg, config_data, sg_init_one(&areq_ctx->ccm_adata_sg, config_data,
AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size); AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
if (unlikely(dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1, if (dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE) != 1) {
DMA_TO_DEVICE) != 1)) {
dev_err(dev, "dma_map_sg() config buffer failed\n"); dev_err(dev, "dma_map_sg() config buffer failed\n");
return -ENOMEM; return -ENOMEM;
} }
...@@ -463,8 +462,7 @@ static int ssi_ahash_handle_curr_buf(struct device *dev, ...@@ -463,8 +462,7 @@ static int ssi_ahash_handle_curr_buf(struct device *dev,
dev_dbg(dev, " handle curr buff %x set to DLLI\n", curr_buff_cnt); dev_dbg(dev, " handle curr buff %x set to DLLI\n", curr_buff_cnt);
/* create sg for the current buffer */ /* create sg for the current buffer */
sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt); sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
if (unlikely(dma_map_sg(dev, areq_ctx->buff_sg, 1, if (dma_map_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE) != 1) {
DMA_TO_DEVICE) != 1)) {
dev_err(dev, "dma_map_sg() src buffer failed\n"); dev_err(dev, "dma_map_sg() src buffer failed\n");
return -ENOMEM; return -ENOMEM;
} }
...@@ -490,7 +488,7 @@ void cc_unmap_blkcipher_request( ...@@ -490,7 +488,7 @@ void cc_unmap_blkcipher_request(
{ {
struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx; struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
if (likely(req_ctx->gen_ctx.iv_dma_addr)) { if (req_ctx->gen_ctx.iv_dma_addr) {
dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n", dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
&req_ctx->gen_ctx.iv_dma_addr, ivsize); &req_ctx->gen_ctx.iv_dma_addr, ivsize);
dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr, dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
...@@ -537,15 +535,14 @@ int cc_map_blkcipher_request( ...@@ -537,15 +535,14 @@ int cc_map_blkcipher_request(
sg_data.num_of_buffers = 0; sg_data.num_of_buffers = 0;
/* Map IV buffer */ /* Map IV buffer */
if (likely(ivsize)) { if (ivsize) {
dump_byte_array("iv", (u8 *)info, ivsize); dump_byte_array("iv", (u8 *)info, ivsize);
req_ctx->gen_ctx.iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr =
dma_map_single(dev, (void *)info, dma_map_single(dev, (void *)info,
ivsize, ivsize,
req_ctx->is_giv ? DMA_BIDIRECTIONAL : req_ctx->is_giv ? DMA_BIDIRECTIONAL :
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
req_ctx->gen_ctx.iv_dma_addr))) {
dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n", dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
ivsize, info); ivsize, info);
return -ENOMEM; return -ENOMEM;
...@@ -559,16 +556,16 @@ int cc_map_blkcipher_request( ...@@ -559,16 +556,16 @@ int cc_map_blkcipher_request(
/* Map the src SGL */ /* Map the src SGL */
rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents, rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents); LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
if (unlikely(rc)) { if (rc) {
rc = -ENOMEM; rc = -ENOMEM;
goto ablkcipher_exit; goto ablkcipher_exit;
} }
if (mapped_nents > 1) if (mapped_nents > 1)
req_ctx->dma_buf_type = SSI_DMA_BUF_MLLI; req_ctx->dma_buf_type = SSI_DMA_BUF_MLLI;
if (unlikely(src == dst)) { if (src == dst) {
/* Handle inplace operation */ /* Handle inplace operation */
if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) { if (req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI) {
req_ctx->out_nents = 0; req_ctx->out_nents = 0;
cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src, cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
nbytes, 0, true, nbytes, 0, true,
...@@ -576,17 +573,16 @@ int cc_map_blkcipher_request( ...@@ -576,17 +573,16 @@ int cc_map_blkcipher_request(
} }
} else { } else {
/* Map the dst sg */ /* Map the dst sg */
if (unlikely(cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL, if (cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
&req_ctx->out_nents, &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &dummy, &mapped_nents)) {
&mapped_nents))) {
rc = -ENOMEM; rc = -ENOMEM;
goto ablkcipher_exit; goto ablkcipher_exit;
} }
if (mapped_nents > 1) if (mapped_nents > 1)
req_ctx->dma_buf_type = SSI_DMA_BUF_MLLI; req_ctx->dma_buf_type = SSI_DMA_BUF_MLLI;
if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) { if (req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI) {
cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src, cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
nbytes, 0, true, nbytes, 0, true,
&req_ctx->in_mlli_nents); &req_ctx->in_mlli_nents);
...@@ -596,10 +592,10 @@ int cc_map_blkcipher_request( ...@@ -596,10 +592,10 @@ int cc_map_blkcipher_request(
} }
} }
if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) { if (req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI) {
mlli_params->curr_pool = buff_mgr->mlli_buffs_pool; mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
rc = cc_generate_mlli(dev, &sg_data, mlli_params); rc = cc_generate_mlli(dev, &sg_data, mlli_params);
if (unlikely(rc)) if (rc)
goto ablkcipher_exit; goto ablkcipher_exit;
} }
...@@ -690,7 +686,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req) ...@@ -690,7 +686,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
cc_get_sgl_nents(dev, req->src, size_to_unmap, cc_get_sgl_nents(dev, req->src, size_to_unmap,
&dummy, &chained), &dummy, &chained),
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
if (unlikely(req->src != req->dst)) { if (req->src != req->dst) {
dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n", dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
sg_virt(req->dst)); sg_virt(req->dst));
dma_unmap_sg(dev, req->dst, dma_unmap_sg(dev, req->dst,
...@@ -700,7 +696,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req) ...@@ -700,7 +696,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
} }
if (drvdata->coherent && if (drvdata->coherent &&
areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT && areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
likely(req->src == req->dst)) { req->src == req->dst) {
/* copy back mac from temporary location to deal with possible /* copy back mac from temporary location to deal with possible
* data memory overriding that caused by cache coherence * data memory overriding that caused by cache coherence
* problem. * problem.
...@@ -774,7 +770,7 @@ static int cc_aead_chain_iv( ...@@ -774,7 +770,7 @@ static int cc_aead_chain_iv(
struct device *dev = drvdata_to_dev(drvdata); struct device *dev = drvdata_to_dev(drvdata);
int rc = 0; int rc = 0;
if (unlikely(!req->iv)) { if (!req->iv) {
areq_ctx->gen_ctx.iv_dma_addr = 0; areq_ctx->gen_ctx.iv_dma_addr = 0;
goto chain_iv_exit; goto chain_iv_exit;
} }
...@@ -782,7 +778,7 @@ static int cc_aead_chain_iv( ...@@ -782,7 +778,7 @@ static int cc_aead_chain_iv(
areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv, areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv,
hw_iv_size, hw_iv_size,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr))) { if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n", dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
hw_iv_size, req->iv); hw_iv_size, req->iv);
rc = -ENOMEM; rc = -ENOMEM;
...@@ -831,7 +827,7 @@ static int cc_aead_chain_assoc( ...@@ -831,7 +827,7 @@ static int cc_aead_chain_assoc(
goto chain_assoc_exit; goto chain_assoc_exit;
} }
if (unlikely(req->assoclen == 0)) { if (req->assoclen == 0) {
areq_ctx->assoc_buff_type = SSI_DMA_BUF_NULL; areq_ctx->assoc_buff_type = SSI_DMA_BUF_NULL;
areq_ctx->assoc.nents = 0; areq_ctx->assoc.nents = 0;
areq_ctx->assoc.mlli_nents = 0; areq_ctx->assoc.mlli_nents = 0;
...@@ -861,7 +857,7 @@ static int cc_aead_chain_assoc( ...@@ -861,7 +857,7 @@ static int cc_aead_chain_assoc(
mapped_nents++; mapped_nents++;
} }
} }
if (unlikely(mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)) { if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
dev_err(dev, "Too many fragments. current %d max %d\n", dev_err(dev, "Too many fragments. current %d max %d\n",
mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES); mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
return -ENOMEM; return -ENOMEM;
...@@ -872,8 +868,7 @@ static int cc_aead_chain_assoc( ...@@ -872,8 +868,7 @@ static int cc_aead_chain_assoc(
* ccm header configurations * ccm header configurations
*/ */
if (areq_ctx->ccm_hdr_size != ccm_header_size_null) { if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
if (unlikely((mapped_nents + 1) > if ((mapped_nents + 1) > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)) {
dev_err(dev, "CCM case.Too many fragments. Current %d max %d\n", dev_err(dev, "CCM case.Too many fragments. Current %d max %d\n",
(areq_ctx->assoc.nents + 1), (areq_ctx->assoc.nents + 1),
LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES); LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
...@@ -882,14 +877,12 @@ static int cc_aead_chain_assoc( ...@@ -882,14 +877,12 @@ static int cc_aead_chain_assoc(
} }
} }
if (likely(mapped_nents == 1) && if (mapped_nents == 1 && areq_ctx->ccm_hdr_size == ccm_header_size_null)
areq_ctx->ccm_hdr_size == ccm_header_size_null)
areq_ctx->assoc_buff_type = SSI_DMA_BUF_DLLI; areq_ctx->assoc_buff_type = SSI_DMA_BUF_DLLI;
else else
areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI; areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
if (unlikely((do_chain) || if ((do_chain) || areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) {
areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI)) {
dev_dbg(dev, "Chain assoc: buff_type=%s nents=%u\n", dev_dbg(dev, "Chain assoc: buff_type=%s nents=%u\n",
cc_dma_buf_type(areq_ctx->assoc_buff_type), cc_dma_buf_type(areq_ctx->assoc_buff_type),
areq_ctx->assoc.nents); areq_ctx->assoc.nents);
...@@ -912,7 +905,7 @@ static void cc_prepare_aead_data_dlli( ...@@ -912,7 +905,7 @@ static void cc_prepare_aead_data_dlli(
unsigned int authsize = areq_ctx->req_authsize; unsigned int authsize = areq_ctx->req_authsize;
areq_ctx->is_icv_fragmented = false; areq_ctx->is_icv_fragmented = false;
if (likely(req->src == req->dst)) { if (req->src == req->dst) {
/*INPLACE*/ /*INPLACE*/
areq_ctx->icv_dma_addr = sg_dma_address( areq_ctx->icv_dma_addr = sg_dma_address(
areq_ctx->src_sgl) + areq_ctx->src_sgl) +
...@@ -952,7 +945,7 @@ static int cc_prepare_aead_data_mlli( ...@@ -952,7 +945,7 @@ static int cc_prepare_aead_data_mlli(
int rc = 0, icv_nents; int rc = 0, icv_nents;
struct device *dev = drvdata_to_dev(drvdata); struct device *dev = drvdata_to_dev(drvdata);
if (likely(req->src == req->dst)) { if (req->src == req->dst) {
/*INPLACE*/ /*INPLACE*/
cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents, cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
areq_ctx->src_sgl, areq_ctx->cryptlen, areq_ctx->src_sgl, areq_ctx->cryptlen,
...@@ -963,12 +956,12 @@ static int cc_prepare_aead_data_mlli( ...@@ -963,12 +956,12 @@ static int cc_prepare_aead_data_mlli(
areq_ctx->src.nents, areq_ctx->src.nents,
authsize, *src_last_bytes, authsize, *src_last_bytes,
&areq_ctx->is_icv_fragmented); &areq_ctx->is_icv_fragmented);
if (unlikely(icv_nents < 0)) { if (icv_nents < 0) {
rc = -ENOTSUPP; rc = -ENOTSUPP;
goto prepare_data_mlli_exit; goto prepare_data_mlli_exit;
} }
if (unlikely(areq_ctx->is_icv_fragmented)) { if (areq_ctx->is_icv_fragmented) {
/* Backup happens only when ICV is fragmented, ICV /* Backup happens only when ICV is fragmented, ICV
* verification is made by CPU compare in order to * verification is made by CPU compare in order to
* simplify MAC verification upon request completion * simplify MAC verification upon request completion
...@@ -1013,7 +1006,7 @@ static int cc_prepare_aead_data_mlli( ...@@ -1013,7 +1006,7 @@ static int cc_prepare_aead_data_mlli(
areq_ctx->src.nents, areq_ctx->src.nents,
authsize, *src_last_bytes, authsize, *src_last_bytes,
&areq_ctx->is_icv_fragmented); &areq_ctx->is_icv_fragmented);
if (unlikely(icv_nents < 0)) { if (icv_nents < 0) {
rc = -ENOTSUPP; rc = -ENOTSUPP;
goto prepare_data_mlli_exit; goto prepare_data_mlli_exit;
} }
...@@ -1022,7 +1015,7 @@ static int cc_prepare_aead_data_mlli( ...@@ -1022,7 +1015,7 @@ static int cc_prepare_aead_data_mlli(
* verification is made by CPU compare in order to simplify * verification is made by CPU compare in order to simplify
* MAC verification upon request completion * MAC verification upon request completion
*/ */
if (unlikely(areq_ctx->is_icv_fragmented)) { if (areq_ctx->is_icv_fragmented) {
cc_copy_mac(dev, req, SSI_SG_TO_BUF); cc_copy_mac(dev, req, SSI_SG_TO_BUF);
areq_ctx->icv_virt_addr = areq_ctx->backup_mac; areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
...@@ -1051,12 +1044,12 @@ static int cc_prepare_aead_data_mlli( ...@@ -1051,12 +1044,12 @@ static int cc_prepare_aead_data_mlli(
areq_ctx->dst.nents, areq_ctx->dst.nents,
authsize, *dst_last_bytes, authsize, *dst_last_bytes,
&areq_ctx->is_icv_fragmented); &areq_ctx->is_icv_fragmented);
if (unlikely(icv_nents < 0)) { if (icv_nents < 0) {
rc = -ENOTSUPP; rc = -ENOTSUPP;
goto prepare_data_mlli_exit; goto prepare_data_mlli_exit;
} }
if (likely(!areq_ctx->is_icv_fragmented)) { if (!areq_ctx->is_icv_fragmented) {
/* Contig. ICV */ /* Contig. ICV */
areq_ctx->icv_dma_addr = sg_dma_address( areq_ctx->icv_dma_addr = sg_dma_address(
&areq_ctx->dst_sgl[areq_ctx->dst.nents - 1]) + &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1]) +
...@@ -1127,7 +1120,7 @@ static int cc_aead_chain_data( ...@@ -1127,7 +1120,7 @@ static int cc_aead_chain_data(
sg_index += areq_ctx->src_sgl->length; sg_index += areq_ctx->src_sgl->length;
src_mapped_nents--; src_mapped_nents--;
} }
if (unlikely(src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES)) { if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
dev_err(dev, "Too many fragments. current %d max %d\n", dev_err(dev, "Too many fragments. current %d max %d\n",
src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES); src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
return -ENOMEM; return -ENOMEM;
...@@ -1148,7 +1141,7 @@ static int cc_aead_chain_data( ...@@ -1148,7 +1141,7 @@ static int cc_aead_chain_data(
&areq_ctx->dst.nents, &areq_ctx->dst.nents,
LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes, LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
&dst_mapped_nents); &dst_mapped_nents);
if (unlikely(rc)) { if (rc) {
rc = -ENOMEM; rc = -ENOMEM;
goto chain_data_exit; goto chain_data_exit;
} }
...@@ -1171,7 +1164,7 @@ static int cc_aead_chain_data( ...@@ -1171,7 +1164,7 @@ static int cc_aead_chain_data(
sg_index += areq_ctx->dst_sgl->length; sg_index += areq_ctx->dst_sgl->length;
dst_mapped_nents--; dst_mapped_nents--;
} }
if (unlikely(dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES)) { if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
dev_err(dev, "Too many fragments. current %d max %d\n", dev_err(dev, "Too many fragments. current %d max %d\n",
dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES); dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
return -ENOMEM; return -ENOMEM;
...@@ -1271,7 +1264,7 @@ int cc_map_aead_request( ...@@ -1271,7 +1264,7 @@ int cc_map_aead_request(
*/ */
if (drvdata->coherent && if (drvdata->coherent &&
areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT && areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
likely(req->src == req->dst)) req->src == req->dst)
cc_copy_mac(dev, req, SSI_SG_TO_BUF); cc_copy_mac(dev, req, SSI_SG_TO_BUF);
/* cacluate the size for cipher remove ICV in decrypt*/ /* cacluate the size for cipher remove ICV in decrypt*/
...@@ -1282,7 +1275,7 @@ int cc_map_aead_request( ...@@ -1282,7 +1275,7 @@ int cc_map_aead_request(
dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE, dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, dma_addr))) { if (dma_mapping_error(dev, dma_addr)) {
dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n", dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
MAX_MAC_SIZE, areq_ctx->mac_buf); MAX_MAC_SIZE, areq_ctx->mac_buf);
rc = -ENOMEM; rc = -ENOMEM;
...@@ -1296,7 +1289,7 @@ int cc_map_aead_request( ...@@ -1296,7 +1289,7 @@ int cc_map_aead_request(
dma_addr = dma_map_single(dev, addr, AES_BLOCK_SIZE, dma_addr = dma_map_single(dev, addr, AES_BLOCK_SIZE,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, dma_addr))) { if (dma_mapping_error(dev, dma_addr)) {
dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n", dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
AES_BLOCK_SIZE, addr); AES_BLOCK_SIZE, addr);
areq_ctx->ccm_iv0_dma_addr = 0; areq_ctx->ccm_iv0_dma_addr = 0;
...@@ -1317,7 +1310,7 @@ int cc_map_aead_request( ...@@ -1317,7 +1310,7 @@ int cc_map_aead_request(
if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) { if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE, dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, dma_addr))) { if (dma_mapping_error(dev, dma_addr)) {
dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n", dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n",
AES_BLOCK_SIZE, areq_ctx->hkey); AES_BLOCK_SIZE, areq_ctx->hkey);
rc = -ENOMEM; rc = -ENOMEM;
...@@ -1327,7 +1320,7 @@ int cc_map_aead_request( ...@@ -1327,7 +1320,7 @@ int cc_map_aead_request(
dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block, dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block,
AES_BLOCK_SIZE, DMA_TO_DEVICE); AES_BLOCK_SIZE, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, dma_addr))) { if (dma_mapping_error(dev, dma_addr)) {
dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n", dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
AES_BLOCK_SIZE, &areq_ctx->gcm_len_block); AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
rc = -ENOMEM; rc = -ENOMEM;
...@@ -1338,7 +1331,7 @@ int cc_map_aead_request( ...@@ -1338,7 +1331,7 @@ int cc_map_aead_request(
dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc1, dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc1,
AES_BLOCK_SIZE, DMA_TO_DEVICE); AES_BLOCK_SIZE, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, dma_addr))) { if (dma_mapping_error(dev, dma_addr)) {
dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n", dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1)); AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1));
areq_ctx->gcm_iv_inc1_dma_addr = 0; areq_ctx->gcm_iv_inc1_dma_addr = 0;
...@@ -1350,7 +1343,7 @@ int cc_map_aead_request( ...@@ -1350,7 +1343,7 @@ int cc_map_aead_request(
dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc2, dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc2,
AES_BLOCK_SIZE, DMA_TO_DEVICE); AES_BLOCK_SIZE, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, dma_addr))) { if (dma_mapping_error(dev, dma_addr)) {
dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n", dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2)); AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2));
areq_ctx->gcm_iv_inc2_dma_addr = 0; areq_ctx->gcm_iv_inc2_dma_addr = 0;
...@@ -1372,12 +1365,12 @@ int cc_map_aead_request( ...@@ -1372,12 +1365,12 @@ int cc_map_aead_request(
(LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES + (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
LLI_MAX_NUM_OF_DATA_ENTRIES), LLI_MAX_NUM_OF_DATA_ENTRIES),
&dummy, &mapped_nents); &dummy, &mapped_nents);
if (unlikely(rc)) { if (rc) {
rc = -ENOMEM; rc = -ENOMEM;
goto aead_map_failure; goto aead_map_failure;
} }
if (likely(areq_ctx->is_single_pass)) { if (areq_ctx->is_single_pass) {
/* /*
* Create MLLI table for: * Create MLLI table for:
* (1) Assoc. data * (1) Assoc. data
...@@ -1385,13 +1378,13 @@ int cc_map_aead_request( ...@@ -1385,13 +1378,13 @@ int cc_map_aead_request(
* Note: IV is contg. buffer (not an SGL) * Note: IV is contg. buffer (not an SGL)
*/ */
rc = cc_aead_chain_assoc(drvdata, req, &sg_data, true, false); rc = cc_aead_chain_assoc(drvdata, req, &sg_data, true, false);
if (unlikely(rc)) if (rc)
goto aead_map_failure; goto aead_map_failure;
rc = cc_aead_chain_iv(drvdata, req, &sg_data, true, false); rc = cc_aead_chain_iv(drvdata, req, &sg_data, true, false);
if (unlikely(rc)) if (rc)
goto aead_map_failure; goto aead_map_failure;
rc = cc_aead_chain_data(drvdata, req, &sg_data, true, false); rc = cc_aead_chain_data(drvdata, req, &sg_data, true, false);
if (unlikely(rc)) if (rc)
goto aead_map_failure; goto aead_map_failure;
} else { /* DOUBLE-PASS flow */ } else { /* DOUBLE-PASS flow */
/* /*
...@@ -1415,25 +1408,24 @@ int cc_map_aead_request( ...@@ -1415,25 +1408,24 @@ int cc_map_aead_request(
* (4) MLLI for dst * (4) MLLI for dst
*/ */
rc = cc_aead_chain_assoc(drvdata, req, &sg_data, false, true); rc = cc_aead_chain_assoc(drvdata, req, &sg_data, false, true);
if (unlikely(rc)) if (rc)
goto aead_map_failure; goto aead_map_failure;
rc = cc_aead_chain_iv(drvdata, req, &sg_data, false, true); rc = cc_aead_chain_iv(drvdata, req, &sg_data, false, true);
if (unlikely(rc)) if (rc)
goto aead_map_failure; goto aead_map_failure;
rc = cc_aead_chain_data(drvdata, req, &sg_data, true, true); rc = cc_aead_chain_data(drvdata, req, &sg_data, true, true);
if (unlikely(rc)) if (rc)
goto aead_map_failure; goto aead_map_failure;
} }
/* Mlli support -start building the MLLI according to the above /* Mlli support -start building the MLLI according to the above
* results * results
*/ */
if (unlikely( if (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI ||
areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI || areq_ctx->data_buff_type == SSI_DMA_BUF_MLLI) {
areq_ctx->data_buff_type == SSI_DMA_BUF_MLLI)) {
mlli_params->curr_pool = buff_mgr->mlli_buffs_pool; mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
rc = cc_generate_mlli(dev, &sg_data, mlli_params); rc = cc_generate_mlli(dev, &sg_data, mlli_params);
if (unlikely(rc)) if (rc)
goto aead_map_failure; goto aead_map_failure;
cc_update_aead_mlli_nents(drvdata, req); cc_update_aead_mlli_nents(drvdata, req);
...@@ -1473,7 +1465,7 @@ int cc_map_hash_request_final(struct ssi_drvdata *drvdata, void *ctx, ...@@ -1473,7 +1465,7 @@ int cc_map_hash_request_final(struct ssi_drvdata *drvdata, void *ctx,
sg_data.num_of_buffers = 0; sg_data.num_of_buffers = 0;
areq_ctx->in_nents = 0; areq_ctx->in_nents = 0;
if (unlikely(nbytes == 0 && *curr_buff_cnt == 0)) { if (nbytes == 0 && *curr_buff_cnt == 0) {
/* nothing to do */ /* nothing to do */
return 0; return 0;
} }
...@@ -1488,10 +1480,9 @@ int cc_map_hash_request_final(struct ssi_drvdata *drvdata, void *ctx, ...@@ -1488,10 +1480,9 @@ int cc_map_hash_request_final(struct ssi_drvdata *drvdata, void *ctx,
} }
if (src && nbytes > 0 && do_update) { if (src && nbytes > 0 && do_update) {
if (unlikely(cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE, if (cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
&areq_ctx->in_nents, &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents)) {
&dummy, &mapped_nents))) {
goto unmap_curr_buff; goto unmap_curr_buff;
} }
if (src && mapped_nents == 1 && if (src && mapped_nents == 1 &&
...@@ -1507,12 +1498,12 @@ int cc_map_hash_request_final(struct ssi_drvdata *drvdata, void *ctx, ...@@ -1507,12 +1498,12 @@ int cc_map_hash_request_final(struct ssi_drvdata *drvdata, void *ctx,
} }
/*build mlli */ /*build mlli */
if (unlikely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI)) { if (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI) {
mlli_params->curr_pool = buff_mgr->mlli_buffs_pool; mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
/* add the src data to the sg_data */ /* add the src data to the sg_data */
cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes, cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
0, true, &areq_ctx->mlli_nents); 0, true, &areq_ctx->mlli_nents);
if (unlikely(cc_generate_mlli(dev, &sg_data, mlli_params))) if (cc_generate_mlli(dev, &sg_data, mlli_params))
goto fail_unmap_din; goto fail_unmap_din;
} }
/* change the buffer index for the unmap function */ /* change the buffer index for the unmap function */
...@@ -1563,7 +1554,7 @@ int cc_map_hash_request_update(struct ssi_drvdata *drvdata, void *ctx, ...@@ -1563,7 +1554,7 @@ int cc_map_hash_request_update(struct ssi_drvdata *drvdata, void *ctx,
sg_data.num_of_buffers = 0; sg_data.num_of_buffers = 0;
areq_ctx->in_nents = 0; areq_ctx->in_nents = 0;
if (unlikely(total_in_len < block_size)) { if (total_in_len < block_size) {
dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n", dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]); curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
areq_ctx->in_nents = areq_ctx->in_nents =
...@@ -1604,11 +1595,10 @@ int cc_map_hash_request_update(struct ssi_drvdata *drvdata, void *ctx, ...@@ -1604,11 +1595,10 @@ int cc_map_hash_request_update(struct ssi_drvdata *drvdata, void *ctx,
} }
if (update_data_len > *curr_buff_cnt) { if (update_data_len > *curr_buff_cnt) {
if (unlikely(cc_map_sg(dev, src, if (cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
(update_data_len - *curr_buff_cnt),
DMA_TO_DEVICE, &areq_ctx->in_nents, DMA_TO_DEVICE, &areq_ctx->in_nents,
LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
&mapped_nents))) { &mapped_nents)) {
goto unmap_curr_buff; goto unmap_curr_buff;
} }
if (mapped_nents == 1 && if (mapped_nents == 1 &&
...@@ -1624,13 +1614,13 @@ int cc_map_hash_request_update(struct ssi_drvdata *drvdata, void *ctx, ...@@ -1624,13 +1614,13 @@ int cc_map_hash_request_update(struct ssi_drvdata *drvdata, void *ctx,
} }
} }
if (unlikely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI)) { if (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI) {
mlli_params->curr_pool = buff_mgr->mlli_buffs_pool; mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
/* add the src data to the sg_data */ /* add the src data to the sg_data */
cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
(update_data_len - *curr_buff_cnt), 0, true, (update_data_len - *curr_buff_cnt), 0, true,
&areq_ctx->mlli_nents); &areq_ctx->mlli_nents);
if (unlikely(cc_generate_mlli(dev, &sg_data, mlli_params))) if (cc_generate_mlli(dev, &sg_data, mlli_params))
goto fail_unmap_din; goto fail_unmap_din;
} }
areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index); areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
...@@ -1666,7 +1656,7 @@ void cc_unmap_hash_request(struct device *dev, void *ctx, ...@@ -1666,7 +1656,7 @@ void cc_unmap_hash_request(struct device *dev, void *ctx,
areq_ctx->mlli_params.mlli_dma_addr); areq_ctx->mlli_params.mlli_dma_addr);
} }
if ((src) && likely(areq_ctx->in_nents)) { if ((src) && areq_ctx->in_nents) {
dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n", dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
sg_virt(src), &sg_dma_address(src), sg_dma_len(src)); sg_virt(src), &sg_dma_address(src), sg_dma_len(src));
dma_unmap_sg(dev, src, dma_unmap_sg(dev, src,
...@@ -1707,7 +1697,7 @@ int cc_buffer_mgr_init(struct ssi_drvdata *drvdata) ...@@ -1707,7 +1697,7 @@ int cc_buffer_mgr_init(struct ssi_drvdata *drvdata)
LLI_ENTRY_BYTE_SIZE, LLI_ENTRY_BYTE_SIZE,
MLLI_TABLE_MIN_ALIGNMENT, 0); MLLI_TABLE_MIN_ALIGNMENT, 0);
if (unlikely(!buff_mgr_handle->mlli_buffs_pool)) if (!buff_mgr_handle->mlli_buffs_pool)
goto error; goto error;
return 0; return 0;
......
...@@ -76,30 +76,30 @@ static int validate_keys_sizes(struct ssi_ablkcipher_ctx *ctx_p, u32 size) ...@@ -76,30 +76,30 @@ static int validate_keys_sizes(struct ssi_ablkcipher_ctx *ctx_p, u32 size)
switch (size) { switch (size) {
case CC_AES_128_BIT_KEY_SIZE: case CC_AES_128_BIT_KEY_SIZE:
case CC_AES_192_BIT_KEY_SIZE: case CC_AES_192_BIT_KEY_SIZE:
if (likely(ctx_p->cipher_mode != DRV_CIPHER_XTS && if (ctx_p->cipher_mode != DRV_CIPHER_XTS &&
ctx_p->cipher_mode != DRV_CIPHER_ESSIV && ctx_p->cipher_mode != DRV_CIPHER_ESSIV &&
ctx_p->cipher_mode != DRV_CIPHER_BITLOCKER)) ctx_p->cipher_mode != DRV_CIPHER_BITLOCKER)
return 0; return 0;
break; break;
case CC_AES_256_BIT_KEY_SIZE: case CC_AES_256_BIT_KEY_SIZE:
return 0; return 0;
case (CC_AES_192_BIT_KEY_SIZE * 2): case (CC_AES_192_BIT_KEY_SIZE * 2):
case (CC_AES_256_BIT_KEY_SIZE * 2): case (CC_AES_256_BIT_KEY_SIZE * 2):
if (likely(ctx_p->cipher_mode == DRV_CIPHER_XTS || if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
ctx_p->cipher_mode == DRV_CIPHER_ESSIV || ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER)) ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER)
return 0; return 0;
break; break;
default: default:
break; break;
} }
case S_DIN_to_DES: case S_DIN_to_DES:
if (likely(size == DES3_EDE_KEY_SIZE || size == DES_KEY_SIZE)) if (size == DES3_EDE_KEY_SIZE || size == DES_KEY_SIZE)
return 0; return 0;
break; break;
#if SSI_CC_HAS_MULTI2 #if SSI_CC_HAS_MULTI2
case S_DIN_to_MULTI2: case S_DIN_to_MULTI2:
if (likely(size == CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE)) if (size == CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE)
return 0; return 0;
break; break;
#endif #endif
...@@ -122,7 +122,7 @@ static int validate_data_size(struct ssi_ablkcipher_ctx *ctx_p, ...@@ -122,7 +122,7 @@ static int validate_data_size(struct ssi_ablkcipher_ctx *ctx_p,
return 0; return 0;
break; break;
case DRV_CIPHER_CBC_CTS: case DRV_CIPHER_CBC_CTS:
if (likely(size >= AES_BLOCK_SIZE)) if (size >= AES_BLOCK_SIZE)
return 0; return 0;
break; break;
case DRV_CIPHER_OFB: case DRV_CIPHER_OFB:
...@@ -132,7 +132,7 @@ static int validate_data_size(struct ssi_ablkcipher_ctx *ctx_p, ...@@ -132,7 +132,7 @@ static int validate_data_size(struct ssi_ablkcipher_ctx *ctx_p,
case DRV_CIPHER_CBC: case DRV_CIPHER_CBC:
case DRV_CIPHER_ESSIV: case DRV_CIPHER_ESSIV:
case DRV_CIPHER_BITLOCKER: case DRV_CIPHER_BITLOCKER:
if (likely(IS_ALIGNED(size, AES_BLOCK_SIZE))) if (IS_ALIGNED(size, AES_BLOCK_SIZE))
return 0; return 0;
break; break;
default: default:
...@@ -140,14 +140,14 @@ static int validate_data_size(struct ssi_ablkcipher_ctx *ctx_p, ...@@ -140,14 +140,14 @@ static int validate_data_size(struct ssi_ablkcipher_ctx *ctx_p,
} }
break; break;
case S_DIN_to_DES: case S_DIN_to_DES:
if (likely(IS_ALIGNED(size, DES_BLOCK_SIZE))) if (IS_ALIGNED(size, DES_BLOCK_SIZE))
return 0; return 0;
break; break;
#if SSI_CC_HAS_MULTI2 #if SSI_CC_HAS_MULTI2
case S_DIN_to_MULTI2: case S_DIN_to_MULTI2:
switch (ctx_p->cipher_mode) { switch (ctx_p->cipher_mode) {
case DRV_MULTI2_CBC: case DRV_MULTI2_CBC:
if (likely(IS_ALIGNED(size, CC_MULTI2_BLOCK_SIZE))) if (IS_ALIGNED(size, CC_MULTI2_BLOCK_SIZE))
return 0; return 0;
break; break;
case DRV_MULTI2_OFB: case DRV_MULTI2_OFB:
...@@ -272,10 +272,10 @@ static int ssi_verify_3des_keys(const u8 *key, unsigned int keylen) ...@@ -272,10 +272,10 @@ static int ssi_verify_3des_keys(const u8 *key, unsigned int keylen)
struct tdes_keys *tdes_key = (struct tdes_keys *)key; struct tdes_keys *tdes_key = (struct tdes_keys *)key;
/* verify key1 != key2 and key3 != key2*/ /* verify key1 != key2 and key3 != key2*/
if (unlikely((memcmp((u8 *)tdes_key->key1, (u8 *)tdes_key->key2, if ((memcmp((u8 *)tdes_key->key1, (u8 *)tdes_key->key2,
sizeof(tdes_key->key1)) == 0) || sizeof(tdes_key->key1)) == 0) ||
(memcmp((u8 *)tdes_key->key3, (u8 *)tdes_key->key2, (memcmp((u8 *)tdes_key->key3, (u8 *)tdes_key->key2,
sizeof(tdes_key->key3)) == 0))) { sizeof(tdes_key->key3)) == 0)) {
return -ENOEXEC; return -ENOEXEC;
} }
...@@ -320,7 +320,7 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm, ...@@ -320,7 +320,7 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
keylen -= 1; keylen -= 1;
#endif /*SSI_CC_HAS_MULTI2*/ #endif /*SSI_CC_HAS_MULTI2*/
if (unlikely(validate_keys_sizes(ctx_p, keylen))) { if (validate_keys_sizes(ctx_p, keylen)) {
dev_err(dev, "Unsupported key size %d.\n", keylen); dev_err(dev, "Unsupported key size %d.\n", keylen);
crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL; return -EINVAL;
...@@ -330,13 +330,13 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm, ...@@ -330,13 +330,13 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
/* setting HW key slots */ /* setting HW key slots */
struct arm_hw_key_info *hki = (struct arm_hw_key_info *)key; struct arm_hw_key_info *hki = (struct arm_hw_key_info *)key;
if (unlikely(ctx_p->flow_mode != S_DIN_to_AES)) { if (ctx_p->flow_mode != S_DIN_to_AES) {
dev_err(dev, "HW key not supported for non-AES flows\n"); dev_err(dev, "HW key not supported for non-AES flows\n");
return -EINVAL; return -EINVAL;
} }
ctx_p->hw.key1_slot = hw_key_to_cc_hw_key(hki->hw_key1); ctx_p->hw.key1_slot = hw_key_to_cc_hw_key(hki->hw_key1);
if (unlikely(ctx_p->hw.key1_slot == END_OF_KEYS)) { if (ctx_p->hw.key1_slot == END_OF_KEYS) {
dev_err(dev, "Unsupported hw key1 number (%d)\n", dev_err(dev, "Unsupported hw key1 number (%d)\n",
hki->hw_key1); hki->hw_key1);
return -EINVAL; return -EINVAL;
...@@ -345,14 +345,14 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm, ...@@ -345,14 +345,14 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
if (ctx_p->cipher_mode == DRV_CIPHER_XTS || if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
ctx_p->cipher_mode == DRV_CIPHER_ESSIV || ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) { ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) {
if (unlikely(hki->hw_key1 == hki->hw_key2)) { if (hki->hw_key1 == hki->hw_key2) {
dev_err(dev, "Illegal hw key numbers (%d,%d)\n", dev_err(dev, "Illegal hw key numbers (%d,%d)\n",
hki->hw_key1, hki->hw_key2); hki->hw_key1, hki->hw_key2);
return -EINVAL; return -EINVAL;
} }
ctx_p->hw.key2_slot = ctx_p->hw.key2_slot =
hw_key_to_cc_hw_key(hki->hw_key2); hw_key_to_cc_hw_key(hki->hw_key2);
if (unlikely(ctx_p->hw.key2_slot == END_OF_KEYS)) { if (ctx_p->hw.key2_slot == END_OF_KEYS) {
dev_err(dev, "Unsupported hw key2 number (%d)\n", dev_err(dev, "Unsupported hw key2 number (%d)\n",
hki->hw_key2); hki->hw_key2);
return -EINVAL; return -EINVAL;
...@@ -367,7 +367,7 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm, ...@@ -367,7 +367,7 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
// verify weak keys // verify weak keys
if (ctx_p->flow_mode == S_DIN_to_DES) { if (ctx_p->flow_mode == S_DIN_to_DES) {
if (unlikely(!des_ekey(tmp, key)) && if (!des_ekey(tmp, key) &&
(crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_WEAK_KEY)) { (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_WEAK_KEY)) {
tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
dev_dbg(dev, "weak DES key"); dev_dbg(dev, "weak DES key");
...@@ -637,7 +637,7 @@ ssi_blkcipher_create_data_desc( ...@@ -637,7 +637,7 @@ ssi_blkcipher_create_data_desc(
return; return;
} }
/* Process */ /* Process */
if (likely(req_ctx->dma_buf_type == SSI_DMA_BUF_DLLI)) { if (req_ctx->dma_buf_type == SSI_DMA_BUF_DLLI) {
dev_dbg(dev, " data params addr %pad length 0x%X\n", dev_dbg(dev, " data params addr %pad length 0x%X\n",
&sg_dma_address(src), nbytes); &sg_dma_address(src), nbytes);
dev_dbg(dev, " data params addr %pad length 0x%X\n", dev_dbg(dev, " data params addr %pad length 0x%X\n",
...@@ -760,7 +760,7 @@ static int ssi_blkcipher_process( ...@@ -760,7 +760,7 @@ static int ssi_blkcipher_process(
/* STAT_PHASE_0: Init and sanity checks */ /* STAT_PHASE_0: Init and sanity checks */
/* TODO: check data length according to mode */ /* TODO: check data length according to mode */
if (unlikely(validate_data_size(ctx_p, nbytes))) { if (validate_data_size(ctx_p, nbytes)) {
dev_err(dev, "Unsupported data size %d.\n", nbytes); dev_err(dev, "Unsupported data size %d.\n", nbytes);
crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN); crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
rc = -EINVAL; rc = -EINVAL;
...@@ -806,7 +806,7 @@ static int ssi_blkcipher_process( ...@@ -806,7 +806,7 @@ static int ssi_blkcipher_process(
rc = cc_map_blkcipher_request(ctx_p->drvdata, req_ctx, ivsize, nbytes, rc = cc_map_blkcipher_request(ctx_p->drvdata, req_ctx, ivsize, nbytes,
req_ctx->iv, src, dst); req_ctx->iv, src, dst);
if (unlikely(rc)) { if (rc) {
dev_err(dev, "map_request() failed\n"); dev_err(dev, "map_request() failed\n");
goto exit_process; goto exit_process;
} }
...@@ -839,7 +839,7 @@ static int ssi_blkcipher_process( ...@@ -839,7 +839,7 @@ static int ssi_blkcipher_process(
rc = send_request(ctx_p->drvdata, &ssi_req, desc, seq_len, rc = send_request(ctx_p->drvdata, &ssi_req, desc, seq_len,
(!areq) ? 0 : 1); (!areq) ? 0 : 1);
if (areq) { if (areq) {
if (unlikely(rc != -EINPROGRESS)) { if (rc != -EINPROGRESS) {
/* Failed to send the request or request completed /* Failed to send the request or request completed
* synchronously * synchronously
*/ */
...@@ -1364,7 +1364,7 @@ int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata) ...@@ -1364,7 +1364,7 @@ int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata)
rc = crypto_register_alg(&t_alg->crypto_alg); rc = crypto_register_alg(&t_alg->crypto_alg);
dev_dbg(dev, "%s alg registration rc = %x\n", dev_dbg(dev, "%s alg registration rc = %x\n",
t_alg->crypto_alg.cra_driver_name, rc); t_alg->crypto_alg.cra_driver_name, rc);
if (unlikely(rc)) { if (rc) {
dev_err(dev, "%s alg registration failed\n", dev_err(dev, "%s alg registration failed\n",
t_alg->crypto_alg.cra_driver_name); t_alg->crypto_alg.cra_driver_name);
kfree(t_alg); kfree(t_alg);
......
...@@ -100,7 +100,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id) ...@@ -100,7 +100,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
/* read the interrupt status */ /* read the interrupt status */
irr = cc_ioread(drvdata, CC_REG(HOST_IRR)); irr = cc_ioread(drvdata, CC_REG(HOST_IRR));
dev_dbg(dev, "Got IRR=0x%08X\n", irr); dev_dbg(dev, "Got IRR=0x%08X\n", irr);
if (unlikely(irr == 0)) { /* Probably shared interrupt line */ if (irr == 0) { /* Probably shared interrupt line */
dev_err(dev, "Got interrupt with empty IRR\n"); dev_err(dev, "Got interrupt with empty IRR\n");
return IRQ_NONE; return IRQ_NONE;
} }
...@@ -111,7 +111,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id) ...@@ -111,7 +111,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
drvdata->irq = irr; drvdata->irq = irr;
/* Completion interrupt - most probable */ /* Completion interrupt - most probable */
if (likely((irr & SSI_COMP_IRQ_MASK))) { if ((irr & SSI_COMP_IRQ_MASK)) {
/* Mask AXI completion interrupt - will be unmasked in /* Mask AXI completion interrupt - will be unmasked in
* Deferred service handler * Deferred service handler
*/ */
...@@ -121,7 +121,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id) ...@@ -121,7 +121,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
} }
#ifdef CC_SUPPORT_FIPS #ifdef CC_SUPPORT_FIPS
/* TEE FIPS interrupt */ /* TEE FIPS interrupt */
if (likely((irr & SSI_GPR0_IRQ_MASK))) { if ((irr & SSI_GPR0_IRQ_MASK)) {
/* Mask interrupt - will be unmasked in Deferred service /* Mask interrupt - will be unmasked in Deferred service
* handler * handler
*/ */
...@@ -131,7 +131,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id) ...@@ -131,7 +131,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
} }
#endif #endif
/* AXI error interrupt */ /* AXI error interrupt */
if (unlikely((irr & SSI_AXI_ERR_IRQ_MASK))) { if ((irr & SSI_AXI_ERR_IRQ_MASK)) {
u32 axi_err; u32 axi_err;
/* Read the AXI error ID */ /* Read the AXI error ID */
...@@ -142,7 +142,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id) ...@@ -142,7 +142,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
irr &= ~SSI_AXI_ERR_IRQ_MASK; irr &= ~SSI_AXI_ERR_IRQ_MASK;
} }
if (unlikely(irr)) { if (irr) {
dev_dbg(dev, "IRR includes unknown cause bits (0x%08X)\n", dev_dbg(dev, "IRR includes unknown cause bits (0x%08X)\n",
irr); irr);
/* Just warning */ /* Just warning */
...@@ -295,78 +295,78 @@ static int init_cc_resources(struct platform_device *plat_dev) ...@@ -295,78 +295,78 @@ static int init_cc_resources(struct platform_device *plat_dev)
DRV_MODULE_VERSION); DRV_MODULE_VERSION);
rc = init_cc_regs(new_drvdata, true); rc = init_cc_regs(new_drvdata, true);
if (unlikely(rc)) { if (rc) {
dev_err(dev, "init_cc_regs failed\n"); dev_err(dev, "init_cc_regs failed\n");
goto post_clk_err; goto post_clk_err;
} }
#ifdef ENABLE_CC_SYSFS #ifdef ENABLE_CC_SYSFS
rc = ssi_sysfs_init(&dev->kobj, new_drvdata); rc = ssi_sysfs_init(&dev->kobj, new_drvdata);
if (unlikely(rc)) { if (rc) {
dev_err(dev, "init_stat_db failed\n"); dev_err(dev, "init_stat_db failed\n");
goto post_regs_err; goto post_regs_err;
} }
#endif #endif
rc = ssi_fips_init(new_drvdata); rc = ssi_fips_init(new_drvdata);
if (unlikely(rc)) { if (rc) {
dev_err(dev, "SSI_FIPS_INIT failed 0x%x\n", rc); dev_err(dev, "SSI_FIPS_INIT failed 0x%x\n", rc);
goto post_sysfs_err; goto post_sysfs_err;
} }
rc = ssi_sram_mgr_init(new_drvdata); rc = ssi_sram_mgr_init(new_drvdata);
if (unlikely(rc)) { if (rc) {
dev_err(dev, "ssi_sram_mgr_init failed\n"); dev_err(dev, "ssi_sram_mgr_init failed\n");
goto post_fips_init_err; goto post_fips_init_err;
} }
new_drvdata->mlli_sram_addr = new_drvdata->mlli_sram_addr =
cc_sram_alloc(new_drvdata, MAX_MLLI_BUFF_SIZE); cc_sram_alloc(new_drvdata, MAX_MLLI_BUFF_SIZE);
if (unlikely(new_drvdata->mlli_sram_addr == NULL_SRAM_ADDR)) { if (new_drvdata->mlli_sram_addr == NULL_SRAM_ADDR) {
dev_err(dev, "Failed to alloc MLLI Sram buffer\n"); dev_err(dev, "Failed to alloc MLLI Sram buffer\n");
rc = -ENOMEM; rc = -ENOMEM;
goto post_sram_mgr_err; goto post_sram_mgr_err;
} }
rc = request_mgr_init(new_drvdata); rc = request_mgr_init(new_drvdata);
if (unlikely(rc)) { if (rc) {
dev_err(dev, "request_mgr_init failed\n"); dev_err(dev, "request_mgr_init failed\n");
goto post_sram_mgr_err; goto post_sram_mgr_err;
} }
rc = cc_buffer_mgr_init(new_drvdata); rc = cc_buffer_mgr_init(new_drvdata);
if (unlikely(rc)) { if (rc) {
dev_err(dev, "buffer_mgr_init failed\n"); dev_err(dev, "buffer_mgr_init failed\n");
goto post_req_mgr_err; goto post_req_mgr_err;
} }
rc = cc_pm_init(new_drvdata); rc = cc_pm_init(new_drvdata);
if (unlikely(rc)) { if (rc) {
dev_err(dev, "ssi_power_mgr_init failed\n"); dev_err(dev, "ssi_power_mgr_init failed\n");
goto post_buf_mgr_err; goto post_buf_mgr_err;
} }
rc = ssi_ivgen_init(new_drvdata); rc = ssi_ivgen_init(new_drvdata);
if (unlikely(rc)) { if (rc) {
dev_err(dev, "ssi_ivgen_init failed\n"); dev_err(dev, "ssi_ivgen_init failed\n");
goto post_power_mgr_err; goto post_power_mgr_err;
} }
/* Allocate crypto algs */ /* Allocate crypto algs */
rc = ssi_ablkcipher_alloc(new_drvdata); rc = ssi_ablkcipher_alloc(new_drvdata);
if (unlikely(rc)) { if (rc) {
dev_err(dev, "ssi_ablkcipher_alloc failed\n"); dev_err(dev, "ssi_ablkcipher_alloc failed\n");
goto post_ivgen_err; goto post_ivgen_err;
} }
/* hash must be allocated before aead since hash exports APIs */ /* hash must be allocated before aead since hash exports APIs */
rc = ssi_hash_alloc(new_drvdata); rc = ssi_hash_alloc(new_drvdata);
if (unlikely(rc)) { if (rc) {
dev_err(dev, "ssi_hash_alloc failed\n"); dev_err(dev, "ssi_hash_alloc failed\n");
goto post_cipher_err; goto post_cipher_err;
} }
rc = ssi_aead_alloc(new_drvdata); rc = ssi_aead_alloc(new_drvdata);
if (unlikely(rc)) { if (rc) {
dev_err(dev, "ssi_aead_alloc failed\n"); dev_err(dev, "ssi_aead_alloc failed\n");
goto post_hash_err; goto post_hash_err;
} }
......
...@@ -116,9 +116,8 @@ static void ssi_hash_create_data_desc( ...@@ -116,9 +116,8 @@ static void ssi_hash_create_data_desc(
static void ssi_set_hash_endianity(u32 mode, struct cc_hw_desc *desc) static void ssi_set_hash_endianity(u32 mode, struct cc_hw_desc *desc)
{ {
if (unlikely(mode == DRV_HASH_MD5 || if (mode == DRV_HASH_MD5 || mode == DRV_HASH_SHA384 ||
mode == DRV_HASH_SHA384 || mode == DRV_HASH_SHA512) {
mode == DRV_HASH_SHA512)) {
set_bytes_swap(desc, 1); set_bytes_swap(desc, 1);
} else { } else {
set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN); set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
...@@ -133,7 +132,7 @@ static int ssi_hash_map_result(struct device *dev, ...@@ -133,7 +132,7 @@ static int ssi_hash_map_result(struct device *dev,
dma_map_single(dev, (void *)state->digest_result_buff, dma_map_single(dev, (void *)state->digest_result_buff,
digestsize, digestsize,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, state->digest_result_dma_addr))) { if (dma_mapping_error(dev, state->digest_result_dma_addr)) {
dev_err(dev, "Mapping digest result buffer %u B for DMA failed\n", dev_err(dev, "Mapping digest result buffer %u B for DMA failed\n",
digestsize); digestsize);
return -ENOMEM; return -ENOMEM;
...@@ -219,8 +218,8 @@ static int ssi_hash_map_request(struct device *dev, ...@@ -219,8 +218,8 @@ static int ssi_hash_map_request(struct device *dev,
memcpy(state->digest_buff, ctx->digest_buff, memcpy(state->digest_buff, ctx->digest_buff,
ctx->inter_digestsize); ctx->inter_digestsize);
#if (DX_DEV_SHA_MAX > 256) #if (DX_DEV_SHA_MAX > 256)
if (unlikely(ctx->hash_mode == DRV_HASH_SHA512 || if (ctx->hash_mode == DRV_HASH_SHA512 ||
ctx->hash_mode == DRV_HASH_SHA384)) ctx->hash_mode == DRV_HASH_SHA384)
memcpy(state->digest_bytes_len, memcpy(state->digest_bytes_len,
digest_len_sha512_init, HASH_LEN_SIZE); digest_len_sha512_init, HASH_LEN_SIZE);
else else
...@@ -254,7 +253,7 @@ static int ssi_hash_map_request(struct device *dev, ...@@ -254,7 +253,7 @@ static int ssi_hash_map_request(struct device *dev,
set_flow_mode(&desc, BYPASS); set_flow_mode(&desc, BYPASS);
rc = send_request(ctx->drvdata, &ssi_req, &desc, 1, 0); rc = send_request(ctx->drvdata, &ssi_req, &desc, 1, 0);
if (unlikely(rc)) { if (rc) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc); dev_err(dev, "send_request() failed (rc=%d)\n", rc);
goto fail4; goto fail4;
} }
...@@ -446,18 +445,17 @@ static int ssi_hash_digest(struct ahash_req_ctx *state, ...@@ -446,18 +445,17 @@ static int ssi_hash_digest(struct ahash_req_ctx *state,
dev_dbg(dev, "===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash", dev_dbg(dev, "===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash",
nbytes); nbytes);
if (unlikely(ssi_hash_map_request(dev, state, ctx))) { if (ssi_hash_map_request(dev, state, ctx)) {
dev_err(dev, "map_ahash_source() failed\n"); dev_err(dev, "map_ahash_source() failed\n");
return -ENOMEM; return -ENOMEM;
} }
if (unlikely(ssi_hash_map_result(dev, state, digestsize))) { if (ssi_hash_map_result(dev, state, digestsize)) {
dev_err(dev, "map_ahash_digest() failed\n"); dev_err(dev, "map_ahash_digest() failed\n");
return -ENOMEM; return -ENOMEM;
} }
if (unlikely(cc_map_hash_request_final(ctx->drvdata, state, if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1)) {
src, nbytes, 1))) {
dev_err(dev, "map_ahash_request_final() failed\n"); dev_err(dev, "map_ahash_request_final() failed\n");
return -ENOMEM; return -ENOMEM;
} }
...@@ -494,7 +492,7 @@ static int ssi_hash_digest(struct ahash_req_ctx *state, ...@@ -494,7 +492,7 @@ static int ssi_hash_digest(struct ahash_req_ctx *state,
NS_BIT); NS_BIT);
} else { } else {
set_din_const(&desc[idx], 0, HASH_LEN_SIZE); set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
if (likely(nbytes)) if (nbytes)
set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED); set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
else else
set_cipher_do(&desc[idx], DO_PAD); set_cipher_do(&desc[idx], DO_PAD);
...@@ -576,7 +574,7 @@ ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE); ...@@ -576,7 +574,7 @@ ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
if (async_req) { if (async_req) {
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1); rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
if (unlikely(rc != -EINPROGRESS)) { if (rc != -EINPROGRESS) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc); dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_hash_request(dev, state, src, true); cc_unmap_hash_request(dev, state, src, true);
ssi_hash_unmap_result(dev, state, digestsize, result); ssi_hash_unmap_result(dev, state, digestsize, result);
...@@ -619,7 +617,7 @@ static int ssi_hash_update(struct ahash_req_ctx *state, ...@@ -619,7 +617,7 @@ static int ssi_hash_update(struct ahash_req_ctx *state,
rc = cc_map_hash_request_update(ctx->drvdata, state, src, nbytes, rc = cc_map_hash_request_update(ctx->drvdata, state, src, nbytes,
block_size); block_size);
if (unlikely(rc)) { if (rc) {
if (rc == 1) { if (rc == 1) {
dev_dbg(dev, " data size not require HW update %x\n", dev_dbg(dev, " data size not require HW update %x\n",
nbytes); nbytes);
...@@ -677,7 +675,7 @@ static int ssi_hash_update(struct ahash_req_ctx *state, ...@@ -677,7 +675,7 @@ static int ssi_hash_update(struct ahash_req_ctx *state,
if (async_req) { if (async_req) {
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1); rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
if (unlikely(rc != -EINPROGRESS)) { if (rc != -EINPROGRESS) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc); dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_hash_request(dev, state, src, true); cc_unmap_hash_request(dev, state, src, true);
} }
...@@ -711,12 +709,11 @@ static int ssi_hash_finup(struct ahash_req_ctx *state, ...@@ -711,12 +709,11 @@ static int ssi_hash_finup(struct ahash_req_ctx *state,
dev_dbg(dev, "===== %s-finup (%d) ====\n", is_hmac ? "hmac" : "hash", dev_dbg(dev, "===== %s-finup (%d) ====\n", is_hmac ? "hmac" : "hash",
nbytes); nbytes);
if (unlikely(cc_map_hash_request_final(ctx->drvdata, state, src, if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1)) {
nbytes, 1))) {
dev_err(dev, "map_ahash_request_final() failed\n"); dev_err(dev, "map_ahash_request_final() failed\n");
return -ENOMEM; return -ENOMEM;
} }
if (unlikely(ssi_hash_map_result(dev, state, digestsize))) { if (ssi_hash_map_result(dev, state, digestsize)) {
dev_err(dev, "map_ahash_digest() failed\n"); dev_err(dev, "map_ahash_digest() failed\n");
return -ENOMEM; return -ENOMEM;
} }
...@@ -809,7 +806,7 @@ ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE); ...@@ -809,7 +806,7 @@ ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
if (async_req) { if (async_req) {
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1); rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
if (unlikely(rc != -EINPROGRESS)) { if (rc != -EINPROGRESS) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc); dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_hash_request(dev, state, src, true); cc_unmap_hash_request(dev, state, src, true);
ssi_hash_unmap_result(dev, state, digestsize, result); ssi_hash_unmap_result(dev, state, digestsize, result);
...@@ -847,13 +844,12 @@ static int ssi_hash_final(struct ahash_req_ctx *state, ...@@ -847,13 +844,12 @@ static int ssi_hash_final(struct ahash_req_ctx *state,
dev_dbg(dev, "===== %s-final (%d) ====\n", is_hmac ? "hmac" : "hash", dev_dbg(dev, "===== %s-final (%d) ====\n", is_hmac ? "hmac" : "hash",
nbytes); nbytes);
if (unlikely(cc_map_hash_request_final(ctx->drvdata, state, src, if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 0)) {
nbytes, 0))) {
dev_err(dev, "map_ahash_request_final() failed\n"); dev_err(dev, "map_ahash_request_final() failed\n");
return -ENOMEM; return -ENOMEM;
} }
if (unlikely(ssi_hash_map_result(dev, state, digestsize))) { if (ssi_hash_map_result(dev, state, digestsize)) {
dev_err(dev, "map_ahash_digest() failed\n"); dev_err(dev, "map_ahash_digest() failed\n");
return -ENOMEM; return -ENOMEM;
} }
...@@ -955,7 +951,7 @@ ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE); ...@@ -955,7 +951,7 @@ ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
if (async_req) { if (async_req) {
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1); rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
if (unlikely(rc != -EINPROGRESS)) { if (rc != -EINPROGRESS) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc); dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_hash_request(dev, state, src, true); cc_unmap_hash_request(dev, state, src, true);
ssi_hash_unmap_result(dev, state, digestsize, result); ssi_hash_unmap_result(dev, state, digestsize, result);
...@@ -1019,8 +1015,7 @@ static int ssi_ahash_setkey(struct crypto_ahash *ahash, const u8 *key, ...@@ -1019,8 +1015,7 @@ static int ssi_ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
ctx->key_params.key_dma_addr = dma_map_single( ctx->key_params.key_dma_addr = dma_map_single(
dev, (void *)key, dev, (void *)key,
keylen, DMA_TO_DEVICE); keylen, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
ctx->key_params.key_dma_addr))) {
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n", dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
key, keylen); key, keylen);
return -ENOMEM; return -ENOMEM;
...@@ -1105,7 +1100,7 @@ static int ssi_ahash_setkey(struct crypto_ahash *ahash, const u8 *key, ...@@ -1105,7 +1100,7 @@ static int ssi_ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
} }
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0); rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
if (unlikely(rc)) { if (rc) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc); dev_err(dev, "send_request() failed (rc=%d)\n", rc);
goto out; goto out;
} }
...@@ -1201,7 +1196,7 @@ static int ssi_xcbc_setkey(struct crypto_ahash *ahash, ...@@ -1201,7 +1196,7 @@ static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
ctx->key_params.key_dma_addr = dma_map_single( ctx->key_params.key_dma_addr = dma_map_single(
dev, (void *)key, dev, (void *)key,
keylen, DMA_TO_DEVICE); keylen, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, ctx->key_params.key_dma_addr))) { if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n", dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
key, keylen); key, keylen);
return -ENOMEM; return -ENOMEM;
...@@ -1415,7 +1410,7 @@ static int ssi_mac_update(struct ahash_request *req) ...@@ -1415,7 +1410,7 @@ static int ssi_mac_update(struct ahash_request *req)
rc = cc_map_hash_request_update(ctx->drvdata, state, req->src, rc = cc_map_hash_request_update(ctx->drvdata, state, req->src,
req->nbytes, block_size); req->nbytes, block_size);
if (unlikely(rc)) { if (rc) {
if (rc == 1) { if (rc == 1) {
dev_dbg(dev, " data size not require HW update %x\n", dev_dbg(dev, " data size not require HW update %x\n",
req->nbytes); req->nbytes);
...@@ -1448,7 +1443,7 @@ static int ssi_mac_update(struct ahash_request *req) ...@@ -1448,7 +1443,7 @@ static int ssi_mac_update(struct ahash_request *req)
ssi_req.user_arg = (void *)req; ssi_req.user_arg = (void *)req;
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1); rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
if (unlikely(rc != -EINPROGRESS)) { if (rc != -EINPROGRESS) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc); dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_hash_request(dev, state, req->src, true); cc_unmap_hash_request(dev, state, req->src, true);
} }
...@@ -1482,13 +1477,13 @@ static int ssi_mac_final(struct ahash_request *req) ...@@ -1482,13 +1477,13 @@ static int ssi_mac_final(struct ahash_request *req)
dev_dbg(dev, "===== final xcbc reminder (%d) ====\n", rem_cnt); dev_dbg(dev, "===== final xcbc reminder (%d) ====\n", rem_cnt);
if (unlikely(cc_map_hash_request_final(ctx->drvdata, state, req->src, if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
req->nbytes, 0))) { req->nbytes, 0)) {
dev_err(dev, "map_ahash_request_final() failed\n"); dev_err(dev, "map_ahash_request_final() failed\n");
return -ENOMEM; return -ENOMEM;
} }
if (unlikely(ssi_hash_map_result(dev, state, digestsize))) { if (ssi_hash_map_result(dev, state, digestsize)) {
dev_err(dev, "map_ahash_digest() failed\n"); dev_err(dev, "map_ahash_digest() failed\n");
return -ENOMEM; return -ENOMEM;
} }
...@@ -1562,7 +1557,7 @@ static int ssi_mac_final(struct ahash_request *req) ...@@ -1562,7 +1557,7 @@ static int ssi_mac_final(struct ahash_request *req)
idx++; idx++;
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1); rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
if (unlikely(rc != -EINPROGRESS)) { if (rc != -EINPROGRESS) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc); dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_hash_request(dev, state, req->src, true); cc_unmap_hash_request(dev, state, req->src, true);
ssi_hash_unmap_result(dev, state, digestsize, req->result); ssi_hash_unmap_result(dev, state, digestsize, req->result);
...@@ -1589,12 +1584,12 @@ static int ssi_mac_finup(struct ahash_request *req) ...@@ -1589,12 +1584,12 @@ static int ssi_mac_finup(struct ahash_request *req)
return ssi_mac_final(req); return ssi_mac_final(req);
} }
if (unlikely(cc_map_hash_request_final(ctx->drvdata, state, req->src, if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
req->nbytes, 1))) { req->nbytes, 1)) {
dev_err(dev, "map_ahash_request_final() failed\n"); dev_err(dev, "map_ahash_request_final() failed\n");
return -ENOMEM; return -ENOMEM;
} }
if (unlikely(ssi_hash_map_result(dev, state, digestsize))) { if (ssi_hash_map_result(dev, state, digestsize)) {
dev_err(dev, "map_ahash_digest() failed\n"); dev_err(dev, "map_ahash_digest() failed\n");
return -ENOMEM; return -ENOMEM;
} }
...@@ -1635,7 +1630,7 @@ static int ssi_mac_finup(struct ahash_request *req) ...@@ -1635,7 +1630,7 @@ static int ssi_mac_finup(struct ahash_request *req)
idx++; idx++;
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1); rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
if (unlikely(rc != -EINPROGRESS)) { if (rc != -EINPROGRESS) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc); dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_hash_request(dev, state, req->src, true); cc_unmap_hash_request(dev, state, req->src, true);
ssi_hash_unmap_result(dev, state, digestsize, req->result); ssi_hash_unmap_result(dev, state, digestsize, req->result);
...@@ -1658,17 +1653,17 @@ static int ssi_mac_digest(struct ahash_request *req) ...@@ -1658,17 +1653,17 @@ static int ssi_mac_digest(struct ahash_request *req)
dev_dbg(dev, "===== -digest mac (%d) ====\n", req->nbytes); dev_dbg(dev, "===== -digest mac (%d) ====\n", req->nbytes);
if (unlikely(ssi_hash_map_request(dev, state, ctx))) { if (ssi_hash_map_request(dev, state, ctx)) {
dev_err(dev, "map_ahash_source() failed\n"); dev_err(dev, "map_ahash_source() failed\n");
return -ENOMEM; return -ENOMEM;
} }
if (unlikely(ssi_hash_map_result(dev, state, digestsize))) { if (ssi_hash_map_result(dev, state, digestsize)) {
dev_err(dev, "map_ahash_digest() failed\n"); dev_err(dev, "map_ahash_digest() failed\n");
return -ENOMEM; return -ENOMEM;
} }
if (unlikely(cc_map_hash_request_final(ctx->drvdata, state, req->src, if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
req->nbytes, 1))) { req->nbytes, 1)) {
dev_err(dev, "map_ahash_request_final() failed\n"); dev_err(dev, "map_ahash_request_final() failed\n");
return -ENOMEM; return -ENOMEM;
} }
...@@ -1709,7 +1704,7 @@ static int ssi_mac_digest(struct ahash_request *req) ...@@ -1709,7 +1704,7 @@ static int ssi_mac_digest(struct ahash_request *req)
idx++; idx++;
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1); rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
if (unlikely(rc != -EINPROGRESS)) { if (rc != -EINPROGRESS) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc); dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_hash_request(dev, state, req->src, true); cc_unmap_hash_request(dev, state, req->src, true);
ssi_hash_unmap_result(dev, state, digestsize, req->result); ssi_hash_unmap_result(dev, state, digestsize, req->result);
...@@ -2153,7 +2148,7 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata) ...@@ -2153,7 +2148,7 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
ARRAY_SIZE(digest_len_init), larval_seq, ARRAY_SIZE(digest_len_init), larval_seq,
&larval_seq_len); &larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len); rc = send_request_init(drvdata, larval_seq, larval_seq_len);
if (unlikely(rc)) if (rc)
goto init_digest_const_err; goto init_digest_const_err;
sram_buff_ofs += sizeof(digest_len_init); sram_buff_ofs += sizeof(digest_len_init);
...@@ -2165,7 +2160,7 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata) ...@@ -2165,7 +2160,7 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
ARRAY_SIZE(digest_len_sha512_init), ARRAY_SIZE(digest_len_sha512_init),
larval_seq, &larval_seq_len); larval_seq, &larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len); rc = send_request_init(drvdata, larval_seq, larval_seq_len);
if (unlikely(rc)) if (rc)
goto init_digest_const_err; goto init_digest_const_err;
sram_buff_ofs += sizeof(digest_len_sha512_init); sram_buff_ofs += sizeof(digest_len_sha512_init);
...@@ -2180,7 +2175,7 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata) ...@@ -2180,7 +2175,7 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
ARRAY_SIZE(md5_init), larval_seq, ARRAY_SIZE(md5_init), larval_seq,
&larval_seq_len); &larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len); rc = send_request_init(drvdata, larval_seq, larval_seq_len);
if (unlikely(rc)) if (rc)
goto init_digest_const_err; goto init_digest_const_err;
sram_buff_ofs += sizeof(md5_init); sram_buff_ofs += sizeof(md5_init);
larval_seq_len = 0; larval_seq_len = 0;
...@@ -2189,7 +2184,7 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata) ...@@ -2189,7 +2184,7 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
ARRAY_SIZE(sha1_init), larval_seq, ARRAY_SIZE(sha1_init), larval_seq,
&larval_seq_len); &larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len); rc = send_request_init(drvdata, larval_seq, larval_seq_len);
if (unlikely(rc)) if (rc)
goto init_digest_const_err; goto init_digest_const_err;
sram_buff_ofs += sizeof(sha1_init); sram_buff_ofs += sizeof(sha1_init);
larval_seq_len = 0; larval_seq_len = 0;
...@@ -2198,7 +2193,7 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata) ...@@ -2198,7 +2193,7 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
ARRAY_SIZE(sha224_init), larval_seq, ARRAY_SIZE(sha224_init), larval_seq,
&larval_seq_len); &larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len); rc = send_request_init(drvdata, larval_seq, larval_seq_len);
if (unlikely(rc)) if (rc)
goto init_digest_const_err; goto init_digest_const_err;
sram_buff_ofs += sizeof(sha224_init); sram_buff_ofs += sizeof(sha224_init);
larval_seq_len = 0; larval_seq_len = 0;
...@@ -2207,7 +2202,7 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata) ...@@ -2207,7 +2202,7 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
ARRAY_SIZE(sha256_init), larval_seq, ARRAY_SIZE(sha256_init), larval_seq,
&larval_seq_len); &larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len); rc = send_request_init(drvdata, larval_seq, larval_seq_len);
if (unlikely(rc)) if (rc)
goto init_digest_const_err; goto init_digest_const_err;
sram_buff_ofs += sizeof(sha256_init); sram_buff_ofs += sizeof(sha256_init);
larval_seq_len = 0; larval_seq_len = 0;
...@@ -2228,7 +2223,7 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata) ...@@ -2228,7 +2223,7 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
sram_buff_ofs += sizeof(u32); sram_buff_ofs += sizeof(u32);
} }
rc = send_request_init(drvdata, larval_seq, larval_seq_len); rc = send_request_init(drvdata, larval_seq, larval_seq_len);
if (unlikely(rc)) { if (rc) {
dev_err(dev, "send_request() failed (rc = %d)\n", rc); dev_err(dev, "send_request() failed (rc = %d)\n", rc);
goto init_digest_const_err; goto init_digest_const_err;
} }
...@@ -2246,7 +2241,7 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata) ...@@ -2246,7 +2241,7 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
sram_buff_ofs += sizeof(u32); sram_buff_ofs += sizeof(u32);
} }
rc = send_request_init(drvdata, larval_seq, larval_seq_len); rc = send_request_init(drvdata, larval_seq, larval_seq_len);
if (unlikely(rc)) { if (rc) {
dev_err(dev, "send_request() failed (rc = %d)\n", rc); dev_err(dev, "send_request() failed (rc = %d)\n", rc);
goto init_digest_const_err; goto init_digest_const_err;
} }
...@@ -2295,7 +2290,7 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata) ...@@ -2295,7 +2290,7 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata)
/*must be set before the alg registration as it is being used there*/ /*must be set before the alg registration as it is being used there*/
rc = ssi_hash_init_sram_digest_consts(drvdata); rc = ssi_hash_init_sram_digest_consts(drvdata);
if (unlikely(rc)) { if (rc) {
dev_err(dev, "Init digest CONST failed (rc=%d)\n", rc); dev_err(dev, "Init digest CONST failed (rc=%d)\n", rc);
goto fail; goto fail;
} }
...@@ -2316,7 +2311,7 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata) ...@@ -2316,7 +2311,7 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata)
t_alg->drvdata = drvdata; t_alg->drvdata = drvdata;
rc = crypto_register_ahash(&t_alg->ahash_alg); rc = crypto_register_ahash(&t_alg->ahash_alg);
if (unlikely(rc)) { if (rc) {
dev_err(dev, "%s alg registration failed\n", dev_err(dev, "%s alg registration failed\n",
driver_hash[alg].driver_name); driver_hash[alg].driver_name);
kfree(t_alg); kfree(t_alg);
...@@ -2341,7 +2336,7 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata) ...@@ -2341,7 +2336,7 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata)
t_alg->drvdata = drvdata; t_alg->drvdata = drvdata;
rc = crypto_register_ahash(&t_alg->ahash_alg); rc = crypto_register_ahash(&t_alg->ahash_alg);
if (unlikely(rc)) { if (rc) {
dev_err(dev, "%s alg registration failed\n", dev_err(dev, "%s alg registration failed\n",
driver_hash[alg].driver_name); driver_hash[alg].driver_name);
kfree(t_alg); kfree(t_alg);
...@@ -2480,7 +2475,7 @@ static void ssi_hash_create_data_desc(struct ahash_req_ctx *areq_ctx, ...@@ -2480,7 +2475,7 @@ static void ssi_hash_create_data_desc(struct ahash_req_ctx *areq_ctx,
unsigned int idx = *seq_size; unsigned int idx = *seq_size;
struct device *dev = drvdata_to_dev(ctx->drvdata); struct device *dev = drvdata_to_dev(ctx->drvdata);
if (likely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_DLLI)) { if (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_DLLI) {
hw_desc_init(&desc[idx]); hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI, set_din_type(&desc[idx], DMA_DLLI,
sg_dma_address(areq_ctx->curr_sg), sg_dma_address(areq_ctx->curr_sg),
......
...@@ -143,7 +143,7 @@ int ssi_ivgen_init_sram_pool(struct ssi_drvdata *drvdata) ...@@ -143,7 +143,7 @@ int ssi_ivgen_init_sram_pool(struct ssi_drvdata *drvdata)
/* Generate initial pool */ /* Generate initial pool */
rc = ssi_ivgen_generate_pool(ivgen_ctx, iv_seq, &iv_seq_len); rc = ssi_ivgen_generate_pool(ivgen_ctx, iv_seq, &iv_seq_len);
if (unlikely(rc)) if (rc)
return rc; return rc;
/* Fire-and-forget */ /* Fire-and-forget */
......
...@@ -115,7 +115,7 @@ int request_mgr_init(struct ssi_drvdata *drvdata) ...@@ -115,7 +115,7 @@ int request_mgr_init(struct ssi_drvdata *drvdata)
#ifdef COMP_IN_WQ #ifdef COMP_IN_WQ
dev_dbg(dev, "Initializing completion workqueue\n"); dev_dbg(dev, "Initializing completion workqueue\n");
req_mgr_h->workq = create_singlethread_workqueue("arm_cc7x_wq"); req_mgr_h->workq = create_singlethread_workqueue("arm_cc7x_wq");
if (unlikely(!req_mgr_h->workq)) { if (!req_mgr_h->workq) {
dev_err(dev, "Failed creating work queue\n"); dev_err(dev, "Failed creating work queue\n");
rc = -ENOMEM; rc = -ENOMEM;
goto req_mgr_init_err; goto req_mgr_init_err;
...@@ -214,27 +214,25 @@ static int request_mgr_queues_status_check( ...@@ -214,27 +214,25 @@ static int request_mgr_queues_status_check(
* be chaned during the poll because the spinlock_bh * be chaned during the poll because the spinlock_bh
* is held by the thread * is held by the thread
*/ */
if (unlikely(((req_mgr_h->req_queue_head + 1) & if (((req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1)) ==
(MAX_REQUEST_QUEUE_SIZE - 1)) == req_mgr_h->req_queue_tail) {
req_mgr_h->req_queue_tail)) {
dev_err(dev, "SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n", dev_err(dev, "SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n",
req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE); req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE);
return -EBUSY; return -EBUSY;
} }
if ((likely(req_mgr_h->q_free_slots >= total_seq_len))) if ((req_mgr_h->q_free_slots >= total_seq_len))
return 0; return 0;
/* Wait for space in HW queue. Poll constant num of iterations. */ /* Wait for space in HW queue. Poll constant num of iterations. */
for (poll_queue = 0; poll_queue < SSI_MAX_POLL_ITER ; poll_queue++) { for (poll_queue = 0; poll_queue < SSI_MAX_POLL_ITER ; poll_queue++) {
req_mgr_h->q_free_slots = req_mgr_h->q_free_slots =
cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT)); cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
if (unlikely(req_mgr_h->q_free_slots < if (req_mgr_h->q_free_slots < req_mgr_h->min_free_hw_slots) {
req_mgr_h->min_free_hw_slots)) {
req_mgr_h->min_free_hw_slots = req_mgr_h->q_free_slots; req_mgr_h->min_free_hw_slots = req_mgr_h->q_free_slots;
} }
if (likely(req_mgr_h->q_free_slots >= total_seq_len)) { if (req_mgr_h->q_free_slots >= total_seq_len) {
/* If there is enough place return */ /* If there is enough place return */
return 0; return 0;
} }
...@@ -296,7 +294,7 @@ int send_request( ...@@ -296,7 +294,7 @@ int send_request(
*/ */
rc = request_mgr_queues_status_check(drvdata, req_mgr_h, rc = request_mgr_queues_status_check(drvdata, req_mgr_h,
max_required_seq_len); max_required_seq_len);
if (likely(rc == 0)) if (rc == 0)
/* There is enough place in the queue */ /* There is enough place in the queue */
break; break;
/* something wrong release the spinlock*/ /* something wrong release the spinlock*/
...@@ -340,7 +338,7 @@ int send_request( ...@@ -340,7 +338,7 @@ int send_request(
ssi_req->ivgen_dma_addr_len, ssi_req->ivgen_dma_addr_len,
ssi_req->ivgen_size, iv_seq, &iv_seq_len); ssi_req->ivgen_size, iv_seq, &iv_seq_len);
if (unlikely(rc)) { if (rc) {
dev_err(dev, "Failed to generate IV (rc=%d)\n", rc); dev_err(dev, "Failed to generate IV (rc=%d)\n", rc);
spin_unlock_bh(&req_mgr_h->hw_lock); spin_unlock_bh(&req_mgr_h->hw_lock);
#if defined(CONFIG_PM) #if defined(CONFIG_PM)
...@@ -355,7 +353,7 @@ int send_request( ...@@ -355,7 +353,7 @@ int send_request(
used_sw_slots = ((req_mgr_h->req_queue_head - used_sw_slots = ((req_mgr_h->req_queue_head -
req_mgr_h->req_queue_tail) & req_mgr_h->req_queue_tail) &
(MAX_REQUEST_QUEUE_SIZE - 1)); (MAX_REQUEST_QUEUE_SIZE - 1));
if (unlikely(used_sw_slots > req_mgr_h->max_used_sw_slots)) if (used_sw_slots > req_mgr_h->max_used_sw_slots)
req_mgr_h->max_used_sw_slots = used_sw_slots; req_mgr_h->max_used_sw_slots = used_sw_slots;
/* Enqueue request - must be locked with HW lock*/ /* Enqueue request - must be locked with HW lock*/
...@@ -381,7 +379,7 @@ int send_request( ...@@ -381,7 +379,7 @@ int send_request(
enqueue_seq(cc_base, desc, len); enqueue_seq(cc_base, desc, len);
enqueue_seq(cc_base, &req_mgr_h->compl_desc, (is_dout ? 0 : 1)); enqueue_seq(cc_base, &req_mgr_h->compl_desc, (is_dout ? 0 : 1));
if (unlikely(req_mgr_h->q_free_slots < total_seq_len)) { if (req_mgr_h->q_free_slots < total_seq_len) {
/* This situation should never occur. Maybe indicating problem /* This situation should never occur. Maybe indicating problem
* with resuming power. Set the free slot count to 0 and hope * with resuming power. Set the free slot count to 0 and hope
* for the best. * for the best.
...@@ -429,7 +427,7 @@ int send_request_init( ...@@ -429,7 +427,7 @@ int send_request_init(
*/ */
rc = request_mgr_queues_status_check(drvdata, req_mgr_h, rc = request_mgr_queues_status_check(drvdata, req_mgr_h,
total_seq_len); total_seq_len);
if (unlikely(rc)) if (rc)
return rc; return rc;
set_queue_last_ind(&desc[(len - 1)]); set_queue_last_ind(&desc[(len - 1)]);
...@@ -489,7 +487,7 @@ static void proc_completions(struct ssi_drvdata *drvdata) ...@@ -489,7 +487,7 @@ static void proc_completions(struct ssi_drvdata *drvdata)
request_mgr_handle->axi_completed--; request_mgr_handle->axi_completed--;
/* Dequeue request */ /* Dequeue request */
if (unlikely(*head == *tail)) { if (*head == *tail) {
/* We are supposed to handle a completion but our /* We are supposed to handle a completion but our
* queue is empty. This is not normal. Return and * queue is empty. This is not normal. Return and
* hope for the best. * hope for the best.
...@@ -518,7 +516,7 @@ static void proc_completions(struct ssi_drvdata *drvdata) ...@@ -518,7 +516,7 @@ static void proc_completions(struct ssi_drvdata *drvdata)
} }
#endif /* COMPLETION_DELAY */ #endif /* COMPLETION_DELAY */
if (likely(ssi_req->user_cb)) if (ssi_req->user_cb)
ssi_req->user_cb(dev, ssi_req->user_arg); ssi_req->user_cb(dev, ssi_req->user_arg);
*tail = (*tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1); *tail = (*tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
dev_dbg(dev, "Dequeue request tail=%u\n", *tail); dev_dbg(dev, "Dequeue request tail=%u\n", *tail);
......
...@@ -75,12 +75,12 @@ ssi_sram_addr_t cc_sram_alloc(struct ssi_drvdata *drvdata, u32 size) ...@@ -75,12 +75,12 @@ ssi_sram_addr_t cc_sram_alloc(struct ssi_drvdata *drvdata, u32 size)
struct device *dev = drvdata_to_dev(drvdata); struct device *dev = drvdata_to_dev(drvdata);
ssi_sram_addr_t p; ssi_sram_addr_t p;
if (unlikely((size & 0x3))) { if ((size & 0x3)) {
dev_err(dev, "Requested buffer size (%u) is not multiple of 4", dev_err(dev, "Requested buffer size (%u) is not multiple of 4",
size); size);
return NULL_SRAM_ADDR; return NULL_SRAM_ADDR;
} }
if (unlikely(size > (SSI_CC_SRAM_SIZE - smgr_ctx->sram_free_offset))) { if (size > (SSI_CC_SRAM_SIZE - smgr_ctx->sram_free_offset)) {
dev_err(dev, "Not enough space to allocate %u B (at offset %llu)\n", dev_err(dev, "Not enough space to allocate %u B (at offset %llu)\n",
size, smgr_ctx->sram_free_offset); size, smgr_ctx->sram_free_offset);
return NULL_SRAM_ADDR; return NULL_SRAM_ADDR;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment