Commit 9db83b4e authored by Gilad Ben-Yossef's avatar Gilad Ben-Yossef Committed by Greg Kroah-Hartman

staging: ccree: remove unproven likely/unlikely

The ccree code made a lot of use of likely/unlikely qualifiers without
proven measurements showing any benefits. Remove them all until we
see what is justified and what is not.
Signed-off-by: default avatarGilad Ben-Yossef <gilad@benyossef.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 8977c824
......@@ -251,7 +251,7 @@ static void ssi_aead_complete(struct device *dev, void *ssi_req)
err = -EBADMSG;
}
} else { /*ENCRYPT*/
if (unlikely(areq_ctx->is_icv_fragmented)) {
if (areq_ctx->is_icv_fragmented) {
u32 skip = areq->cryptlen + areq_ctx->dst_offset;
cc_copy_sg_portion(dev, areq_ctx->mac_buf,
......@@ -412,7 +412,7 @@ static int validate_keys_sizes(struct ssi_aead_ctx *ctx)
return -EINVAL;
}
/* Check cipher key size */
if (unlikely(ctx->flow_mode == S_DIN_to_DES)) {
if (ctx->flow_mode == S_DIN_to_DES) {
if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
dev_err(dev, "Invalid cipher(3DES) key size: %u\n",
ctx->enc_keylen);
......@@ -465,10 +465,10 @@ ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
hashmode = DRV_HASH_HW_SHA256;
}
if (likely(keylen != 0)) {
if (keylen != 0) {
key_dma_addr = dma_map_single(dev, (void *)key, keylen,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, key_dma_addr))) {
if (dma_mapping_error(dev, key_dma_addr)) {
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
key, keylen);
return -ENOMEM;
......@@ -547,10 +547,10 @@ ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
}
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
if (unlikely(rc))
if (rc)
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
if (likely(key_dma_addr))
if (key_dma_addr)
dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
return rc;
......@@ -607,7 +607,7 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
}
rc = validate_keys_sizes(ctx);
if (unlikely(rc))
if (rc)
goto badkey;
/* STAT_PHASE_1: Copy key to ctx */
......@@ -646,7 +646,7 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
rc = send_request(ctx->drvdata, &ssi_req, desc, seq_len, 0);
if (unlikely(rc)) {
if (rc) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
goto setkey_error;
}
......@@ -818,7 +818,7 @@ ssi_aead_process_authenc_data_desc(
ssi_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr;
u32 mlli_nents = areq_ctx->assoc.mlli_nents;
if (likely(areq_ctx->is_single_pass)) {
if (areq_ctx->is_single_pass) {
if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
mlli_addr = areq_ctx->dst.sram_addr;
mlli_nents = areq_ctx->dst.mlli_nents;
......@@ -1202,10 +1202,9 @@ static void ssi_aead_load_mlli_to_sram(
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
if (unlikely(
req_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI ||
req_ctx->data_buff_type == SSI_DMA_BUF_MLLI ||
!req_ctx->is_single_pass)) {
if (req_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI ||
req_ctx->data_buff_type == SSI_DMA_BUF_MLLI ||
!req_ctx->is_single_pass) {
dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
(unsigned int)ctx->drvdata->mlli_sram_addr,
req_ctx->mlli_params.mlli_len);
......@@ -1231,17 +1230,17 @@ static enum cc_flow_mode ssi_aead_get_data_flow_mode(
if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
if (setup_flow_mode == S_DIN_to_AES)
data_flow_mode = likely(is_single_pass) ?
data_flow_mode = is_single_pass ?
AES_to_HASH_and_DOUT : DIN_AES_DOUT;
else
data_flow_mode = likely(is_single_pass) ?
data_flow_mode = is_single_pass ?
DES_to_HASH_and_DOUT : DIN_DES_DOUT;
} else { /* Decrypt */
if (setup_flow_mode == S_DIN_to_AES)
data_flow_mode = likely(is_single_pass) ?
data_flow_mode = is_single_pass ?
AES_and_HASH : DIN_AES_DOUT;
else
data_flow_mode = likely(is_single_pass) ?
data_flow_mode = is_single_pass ?
DES_and_HASH : DIN_DES_DOUT;
}
......@@ -1367,16 +1366,16 @@ static int validate_data_size(struct ssi_aead_ctx *ctx,
unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
(req->cryptlen - ctx->authsize) : req->cryptlen;
if (unlikely(direct == DRV_CRYPTO_DIRECTION_DECRYPT &&
req->cryptlen < ctx->authsize))
if (direct == DRV_CRYPTO_DIRECTION_DECRYPT &&
req->cryptlen < ctx->authsize)
goto data_size_err;
areq_ctx->is_single_pass = true; /*defaulted to fast flow*/
switch (ctx->flow_mode) {
case S_DIN_to_AES:
if (unlikely(ctx->cipher_mode == DRV_CIPHER_CBC &&
!IS_ALIGNED(cipherlen, AES_BLOCK_SIZE)))
if (ctx->cipher_mode == DRV_CIPHER_CBC &&
!IS_ALIGNED(cipherlen, AES_BLOCK_SIZE))
goto data_size_err;
if (ctx->cipher_mode == DRV_CIPHER_CCM)
break;
......@@ -1395,9 +1394,9 @@ static int validate_data_size(struct ssi_aead_ctx *ctx,
break;
case S_DIN_to_DES:
if (unlikely(!IS_ALIGNED(cipherlen, DES_BLOCK_SIZE)))
if (!IS_ALIGNED(cipherlen, DES_BLOCK_SIZE))
goto data_size_err;
if (unlikely(!IS_ALIGNED(assoclen, DES_BLOCK_SIZE)))
if (!IS_ALIGNED(assoclen, DES_BLOCK_SIZE))
areq_ctx->is_single_pass = false;
break;
default:
......@@ -2024,7 +2023,7 @@ static int ssi_aead_process(struct aead_request *req,
/* STAT_PHASE_0: Init and sanity checks */
/* Check data length according to mode */
if (unlikely(validate_data_size(ctx, direct, req))) {
if (validate_data_size(ctx, direct, req)) {
dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
req->cryptlen, req->assoclen);
crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
......@@ -2073,7 +2072,7 @@ static int ssi_aead_process(struct aead_request *req,
#if SSI_CC_HAS_AES_CCM
if (ctx->cipher_mode == DRV_CIPHER_CCM) {
rc = config_ccm_adata(req);
if (unlikely(rc)) {
if (rc) {
dev_dbg(dev, "config_ccm_adata() returned with a failure %d!",
rc);
goto exit;
......@@ -2088,7 +2087,7 @@ static int ssi_aead_process(struct aead_request *req,
#if SSI_CC_HAS_AES_GCM
if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
rc = config_gcm_context(req);
if (unlikely(rc)) {
if (rc) {
dev_dbg(dev, "config_gcm_context() returned with a failure %d!",
rc);
goto exit;
......@@ -2097,7 +2096,7 @@ static int ssi_aead_process(struct aead_request *req,
#endif /*SSI_CC_HAS_AES_GCM*/
rc = cc_map_aead_request(ctx->drvdata, req);
if (unlikely(rc)) {
if (rc) {
dev_err(dev, "map_request() failed\n");
goto exit;
}
......@@ -2173,7 +2172,7 @@ static int ssi_aead_process(struct aead_request *req,
rc = send_request(ctx->drvdata, &ssi_req, desc, seq_len, 1);
if (unlikely(rc != -EINPROGRESS)) {
if (rc != -EINPROGRESS) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_aead_request(dev, req);
}
......@@ -2829,7 +2828,7 @@ int ssi_aead_alloc(struct ssi_drvdata *drvdata)
}
t_alg->drvdata = drvdata;
rc = crypto_register_aead(&t_alg->aead_alg);
if (unlikely(rc)) {
if (rc) {
dev_err(dev, "%s alg registration failed\n",
t_alg->aead_alg.base.cra_driver_name);
goto fail2;
......
This diff is collapsed.
......@@ -76,30 +76,30 @@ static int validate_keys_sizes(struct ssi_ablkcipher_ctx *ctx_p, u32 size)
switch (size) {
case CC_AES_128_BIT_KEY_SIZE:
case CC_AES_192_BIT_KEY_SIZE:
if (likely(ctx_p->cipher_mode != DRV_CIPHER_XTS &&
ctx_p->cipher_mode != DRV_CIPHER_ESSIV &&
ctx_p->cipher_mode != DRV_CIPHER_BITLOCKER))
if (ctx_p->cipher_mode != DRV_CIPHER_XTS &&
ctx_p->cipher_mode != DRV_CIPHER_ESSIV &&
ctx_p->cipher_mode != DRV_CIPHER_BITLOCKER)
return 0;
break;
case CC_AES_256_BIT_KEY_SIZE:
return 0;
case (CC_AES_192_BIT_KEY_SIZE * 2):
case (CC_AES_256_BIT_KEY_SIZE * 2):
if (likely(ctx_p->cipher_mode == DRV_CIPHER_XTS ||
ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER))
if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER)
return 0;
break;
default:
break;
}
case S_DIN_to_DES:
if (likely(size == DES3_EDE_KEY_SIZE || size == DES_KEY_SIZE))
if (size == DES3_EDE_KEY_SIZE || size == DES_KEY_SIZE)
return 0;
break;
#if SSI_CC_HAS_MULTI2
case S_DIN_to_MULTI2:
if (likely(size == CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE))
if (size == CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE)
return 0;
break;
#endif
......@@ -122,7 +122,7 @@ static int validate_data_size(struct ssi_ablkcipher_ctx *ctx_p,
return 0;
break;
case DRV_CIPHER_CBC_CTS:
if (likely(size >= AES_BLOCK_SIZE))
if (size >= AES_BLOCK_SIZE)
return 0;
break;
case DRV_CIPHER_OFB:
......@@ -132,7 +132,7 @@ static int validate_data_size(struct ssi_ablkcipher_ctx *ctx_p,
case DRV_CIPHER_CBC:
case DRV_CIPHER_ESSIV:
case DRV_CIPHER_BITLOCKER:
if (likely(IS_ALIGNED(size, AES_BLOCK_SIZE)))
if (IS_ALIGNED(size, AES_BLOCK_SIZE))
return 0;
break;
default:
......@@ -140,14 +140,14 @@ static int validate_data_size(struct ssi_ablkcipher_ctx *ctx_p,
}
break;
case S_DIN_to_DES:
if (likely(IS_ALIGNED(size, DES_BLOCK_SIZE)))
if (IS_ALIGNED(size, DES_BLOCK_SIZE))
return 0;
break;
#if SSI_CC_HAS_MULTI2
case S_DIN_to_MULTI2:
switch (ctx_p->cipher_mode) {
case DRV_MULTI2_CBC:
if (likely(IS_ALIGNED(size, CC_MULTI2_BLOCK_SIZE)))
if (IS_ALIGNED(size, CC_MULTI2_BLOCK_SIZE))
return 0;
break;
case DRV_MULTI2_OFB:
......@@ -272,10 +272,10 @@ static int ssi_verify_3des_keys(const u8 *key, unsigned int keylen)
struct tdes_keys *tdes_key = (struct tdes_keys *)key;
/* verify key1 != key2 and key3 != key2*/
if (unlikely((memcmp((u8 *)tdes_key->key1, (u8 *)tdes_key->key2,
sizeof(tdes_key->key1)) == 0) ||
(memcmp((u8 *)tdes_key->key3, (u8 *)tdes_key->key2,
sizeof(tdes_key->key3)) == 0))) {
if ((memcmp((u8 *)tdes_key->key1, (u8 *)tdes_key->key2,
sizeof(tdes_key->key1)) == 0) ||
(memcmp((u8 *)tdes_key->key3, (u8 *)tdes_key->key2,
sizeof(tdes_key->key3)) == 0)) {
return -ENOEXEC;
}
......@@ -320,7 +320,7 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
keylen -= 1;
#endif /*SSI_CC_HAS_MULTI2*/
if (unlikely(validate_keys_sizes(ctx_p, keylen))) {
if (validate_keys_sizes(ctx_p, keylen)) {
dev_err(dev, "Unsupported key size %d.\n", keylen);
crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
......@@ -330,13 +330,13 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
/* setting HW key slots */
struct arm_hw_key_info *hki = (struct arm_hw_key_info *)key;
if (unlikely(ctx_p->flow_mode != S_DIN_to_AES)) {
if (ctx_p->flow_mode != S_DIN_to_AES) {
dev_err(dev, "HW key not supported for non-AES flows\n");
return -EINVAL;
}
ctx_p->hw.key1_slot = hw_key_to_cc_hw_key(hki->hw_key1);
if (unlikely(ctx_p->hw.key1_slot == END_OF_KEYS)) {
if (ctx_p->hw.key1_slot == END_OF_KEYS) {
dev_err(dev, "Unsupported hw key1 number (%d)\n",
hki->hw_key1);
return -EINVAL;
......@@ -345,14 +345,14 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) {
if (unlikely(hki->hw_key1 == hki->hw_key2)) {
if (hki->hw_key1 == hki->hw_key2) {
dev_err(dev, "Illegal hw key numbers (%d,%d)\n",
hki->hw_key1, hki->hw_key2);
return -EINVAL;
}
ctx_p->hw.key2_slot =
hw_key_to_cc_hw_key(hki->hw_key2);
if (unlikely(ctx_p->hw.key2_slot == END_OF_KEYS)) {
if (ctx_p->hw.key2_slot == END_OF_KEYS) {
dev_err(dev, "Unsupported hw key2 number (%d)\n",
hki->hw_key2);
return -EINVAL;
......@@ -367,7 +367,7 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
// verify weak keys
if (ctx_p->flow_mode == S_DIN_to_DES) {
if (unlikely(!des_ekey(tmp, key)) &&
if (!des_ekey(tmp, key) &&
(crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_WEAK_KEY)) {
tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
dev_dbg(dev, "weak DES key");
......@@ -637,7 +637,7 @@ ssi_blkcipher_create_data_desc(
return;
}
/* Process */
if (likely(req_ctx->dma_buf_type == SSI_DMA_BUF_DLLI)) {
if (req_ctx->dma_buf_type == SSI_DMA_BUF_DLLI) {
dev_dbg(dev, " data params addr %pad length 0x%X\n",
&sg_dma_address(src), nbytes);
dev_dbg(dev, " data params addr %pad length 0x%X\n",
......@@ -760,7 +760,7 @@ static int ssi_blkcipher_process(
/* STAT_PHASE_0: Init and sanity checks */
/* TODO: check data length according to mode */
if (unlikely(validate_data_size(ctx_p, nbytes))) {
if (validate_data_size(ctx_p, nbytes)) {
dev_err(dev, "Unsupported data size %d.\n", nbytes);
crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
rc = -EINVAL;
......@@ -806,7 +806,7 @@ static int ssi_blkcipher_process(
rc = cc_map_blkcipher_request(ctx_p->drvdata, req_ctx, ivsize, nbytes,
req_ctx->iv, src, dst);
if (unlikely(rc)) {
if (rc) {
dev_err(dev, "map_request() failed\n");
goto exit_process;
}
......@@ -839,7 +839,7 @@ static int ssi_blkcipher_process(
rc = send_request(ctx_p->drvdata, &ssi_req, desc, seq_len,
(!areq) ? 0 : 1);
if (areq) {
if (unlikely(rc != -EINPROGRESS)) {
if (rc != -EINPROGRESS) {
/* Failed to send the request or request completed
* synchronously
*/
......@@ -1364,7 +1364,7 @@ int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata)
rc = crypto_register_alg(&t_alg->crypto_alg);
dev_dbg(dev, "%s alg registration rc = %x\n",
t_alg->crypto_alg.cra_driver_name, rc);
if (unlikely(rc)) {
if (rc) {
dev_err(dev, "%s alg registration failed\n",
t_alg->crypto_alg.cra_driver_name);
kfree(t_alg);
......
......@@ -100,7 +100,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
/* read the interrupt status */
irr = cc_ioread(drvdata, CC_REG(HOST_IRR));
dev_dbg(dev, "Got IRR=0x%08X\n", irr);
if (unlikely(irr == 0)) { /* Probably shared interrupt line */
if (irr == 0) { /* Probably shared interrupt line */
dev_err(dev, "Got interrupt with empty IRR\n");
return IRQ_NONE;
}
......@@ -111,7 +111,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
drvdata->irq = irr;
/* Completion interrupt - most probable */
if (likely((irr & SSI_COMP_IRQ_MASK))) {
if ((irr & SSI_COMP_IRQ_MASK)) {
/* Mask AXI completion interrupt - will be unmasked in
* Deferred service handler
*/
......@@ -121,7 +121,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
}
#ifdef CC_SUPPORT_FIPS
/* TEE FIPS interrupt */
if (likely((irr & SSI_GPR0_IRQ_MASK))) {
if ((irr & SSI_GPR0_IRQ_MASK)) {
/* Mask interrupt - will be unmasked in Deferred service
* handler
*/
......@@ -131,7 +131,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
}
#endif
/* AXI error interrupt */
if (unlikely((irr & SSI_AXI_ERR_IRQ_MASK))) {
if ((irr & SSI_AXI_ERR_IRQ_MASK)) {
u32 axi_err;
/* Read the AXI error ID */
......@@ -142,7 +142,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
irr &= ~SSI_AXI_ERR_IRQ_MASK;
}
if (unlikely(irr)) {
if (irr) {
dev_dbg(dev, "IRR includes unknown cause bits (0x%08X)\n",
irr);
/* Just warning */
......@@ -295,78 +295,78 @@ static int init_cc_resources(struct platform_device *plat_dev)
DRV_MODULE_VERSION);
rc = init_cc_regs(new_drvdata, true);
if (unlikely(rc)) {
if (rc) {
dev_err(dev, "init_cc_regs failed\n");
goto post_clk_err;
}
#ifdef ENABLE_CC_SYSFS
rc = ssi_sysfs_init(&dev->kobj, new_drvdata);
if (unlikely(rc)) {
if (rc) {
dev_err(dev, "init_stat_db failed\n");
goto post_regs_err;
}
#endif
rc = ssi_fips_init(new_drvdata);
if (unlikely(rc)) {
if (rc) {
dev_err(dev, "SSI_FIPS_INIT failed 0x%x\n", rc);
goto post_sysfs_err;
}
rc = ssi_sram_mgr_init(new_drvdata);
if (unlikely(rc)) {
if (rc) {
dev_err(dev, "ssi_sram_mgr_init failed\n");
goto post_fips_init_err;
}
new_drvdata->mlli_sram_addr =
cc_sram_alloc(new_drvdata, MAX_MLLI_BUFF_SIZE);
if (unlikely(new_drvdata->mlli_sram_addr == NULL_SRAM_ADDR)) {
if (new_drvdata->mlli_sram_addr == NULL_SRAM_ADDR) {
dev_err(dev, "Failed to alloc MLLI Sram buffer\n");
rc = -ENOMEM;
goto post_sram_mgr_err;
}
rc = request_mgr_init(new_drvdata);
if (unlikely(rc)) {
if (rc) {
dev_err(dev, "request_mgr_init failed\n");
goto post_sram_mgr_err;
}
rc = cc_buffer_mgr_init(new_drvdata);
if (unlikely(rc)) {
if (rc) {
dev_err(dev, "buffer_mgr_init failed\n");
goto post_req_mgr_err;
}
rc = cc_pm_init(new_drvdata);
if (unlikely(rc)) {
if (rc) {
dev_err(dev, "ssi_power_mgr_init failed\n");
goto post_buf_mgr_err;
}
rc = ssi_ivgen_init(new_drvdata);
if (unlikely(rc)) {
if (rc) {
dev_err(dev, "ssi_ivgen_init failed\n");
goto post_power_mgr_err;
}
/* Allocate crypto algs */
rc = ssi_ablkcipher_alloc(new_drvdata);
if (unlikely(rc)) {
if (rc) {
dev_err(dev, "ssi_ablkcipher_alloc failed\n");
goto post_ivgen_err;
}
/* hash must be allocated before aead since hash exports APIs */
rc = ssi_hash_alloc(new_drvdata);
if (unlikely(rc)) {
if (rc) {
dev_err(dev, "ssi_hash_alloc failed\n");
goto post_cipher_err;
}
rc = ssi_aead_alloc(new_drvdata);
if (unlikely(rc)) {
if (rc) {
dev_err(dev, "ssi_aead_alloc failed\n");
goto post_hash_err;
}
......
This diff is collapsed.
......@@ -143,7 +143,7 @@ int ssi_ivgen_init_sram_pool(struct ssi_drvdata *drvdata)
/* Generate initial pool */
rc = ssi_ivgen_generate_pool(ivgen_ctx, iv_seq, &iv_seq_len);
if (unlikely(rc))
if (rc)
return rc;
/* Fire-and-forget */
......
......@@ -115,7 +115,7 @@ int request_mgr_init(struct ssi_drvdata *drvdata)
#ifdef COMP_IN_WQ
dev_dbg(dev, "Initializing completion workqueue\n");
req_mgr_h->workq = create_singlethread_workqueue("arm_cc7x_wq");
if (unlikely(!req_mgr_h->workq)) {
if (!req_mgr_h->workq) {
dev_err(dev, "Failed creating work queue\n");
rc = -ENOMEM;
goto req_mgr_init_err;
......@@ -214,27 +214,25 @@ static int request_mgr_queues_status_check(
* be chaned during the poll because the spinlock_bh
* is held by the thread
*/
if (unlikely(((req_mgr_h->req_queue_head + 1) &
(MAX_REQUEST_QUEUE_SIZE - 1)) ==
req_mgr_h->req_queue_tail)) {
if (((req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1)) ==
req_mgr_h->req_queue_tail) {
dev_err(dev, "SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n",
req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE);
return -EBUSY;
}
if ((likely(req_mgr_h->q_free_slots >= total_seq_len)))
if ((req_mgr_h->q_free_slots >= total_seq_len))
return 0;
/* Wait for space in HW queue. Poll constant num of iterations. */
for (poll_queue = 0; poll_queue < SSI_MAX_POLL_ITER ; poll_queue++) {
req_mgr_h->q_free_slots =
cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
if (unlikely(req_mgr_h->q_free_slots <
req_mgr_h->min_free_hw_slots)) {
if (req_mgr_h->q_free_slots < req_mgr_h->min_free_hw_slots) {
req_mgr_h->min_free_hw_slots = req_mgr_h->q_free_slots;
}
if (likely(req_mgr_h->q_free_slots >= total_seq_len)) {
if (req_mgr_h->q_free_slots >= total_seq_len) {
/* If there is enough place return */
return 0;
}
......@@ -296,7 +294,7 @@ int send_request(
*/
rc = request_mgr_queues_status_check(drvdata, req_mgr_h,
max_required_seq_len);
if (likely(rc == 0))
if (rc == 0)
/* There is enough place in the queue */
break;
/* something wrong release the spinlock*/
......@@ -340,7 +338,7 @@ int send_request(
ssi_req->ivgen_dma_addr_len,
ssi_req->ivgen_size, iv_seq, &iv_seq_len);
if (unlikely(rc)) {
if (rc) {
dev_err(dev, "Failed to generate IV (rc=%d)\n", rc);
spin_unlock_bh(&req_mgr_h->hw_lock);
#if defined(CONFIG_PM)
......@@ -355,7 +353,7 @@ int send_request(
used_sw_slots = ((req_mgr_h->req_queue_head -
req_mgr_h->req_queue_tail) &
(MAX_REQUEST_QUEUE_SIZE - 1));
if (unlikely(used_sw_slots > req_mgr_h->max_used_sw_slots))
if (used_sw_slots > req_mgr_h->max_used_sw_slots)
req_mgr_h->max_used_sw_slots = used_sw_slots;
/* Enqueue request - must be locked with HW lock*/
......@@ -381,7 +379,7 @@ int send_request(
enqueue_seq(cc_base, desc, len);
enqueue_seq(cc_base, &req_mgr_h->compl_desc, (is_dout ? 0 : 1));
if (unlikely(req_mgr_h->q_free_slots < total_seq_len)) {
if (req_mgr_h->q_free_slots < total_seq_len) {
/* This situation should never occur. Maybe indicating problem
* with resuming power. Set the free slot count to 0 and hope
* for the best.
......@@ -429,7 +427,7 @@ int send_request_init(
*/
rc = request_mgr_queues_status_check(drvdata, req_mgr_h,
total_seq_len);
if (unlikely(rc))
if (rc)
return rc;
set_queue_last_ind(&desc[(len - 1)]);
......@@ -489,7 +487,7 @@ static void proc_completions(struct ssi_drvdata *drvdata)
request_mgr_handle->axi_completed--;
/* Dequeue request */
if (unlikely(*head == *tail)) {
if (*head == *tail) {
/* We are supposed to handle a completion but our
* queue is empty. This is not normal. Return and
* hope for the best.
......@@ -518,7 +516,7 @@ static void proc_completions(struct ssi_drvdata *drvdata)
}
#endif /* COMPLETION_DELAY */
if (likely(ssi_req->user_cb))
if (ssi_req->user_cb)
ssi_req->user_cb(dev, ssi_req->user_arg);
*tail = (*tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
dev_dbg(dev, "Dequeue request tail=%u\n", *tail);
......
......@@ -75,12 +75,12 @@ ssi_sram_addr_t cc_sram_alloc(struct ssi_drvdata *drvdata, u32 size)
struct device *dev = drvdata_to_dev(drvdata);
ssi_sram_addr_t p;
if (unlikely((size & 0x3))) {
if ((size & 0x3)) {
dev_err(dev, "Requested buffer size (%u) is not multiple of 4",
size);
return NULL_SRAM_ADDR;
}
if (unlikely(size > (SSI_CC_SRAM_SIZE - smgr_ctx->sram_free_offset))) {
if (size > (SSI_CC_SRAM_SIZE - smgr_ctx->sram_free_offset)) {
dev_err(dev, "Not enough space to allocate %u B (at offset %llu)\n",
size, smgr_ctx->sram_free_offset);
return NULL_SRAM_ADDR;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment