Commit b4562b61 authored by Gilad Ben-Yossef's avatar Gilad Ben-Yossef Committed by Greg Kroah-Hartman

staging: ccree: remove redundant blank lines

Remove redundant blank lines in brace blocks
Signed-off-by: default avatarGilad Ben-Yossef <gilad@benyossef.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 8683e627
...@@ -2042,7 +2042,6 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction ...@@ -2042,7 +2042,6 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
/* do we need to generate IV? */ /* do we need to generate IV? */
if (areq_ctx->backup_giv != NULL) { if (areq_ctx->backup_giv != NULL) {
/* set the DMA mapped IV address*/ /* set the DMA mapped IV address*/
if (ctx->cipher_mode == DRV_CIPHER_CTR) { if (ctx->cipher_mode == DRV_CIPHER_CTR) {
ssi_req.ivgen_dma_addr[0] = areq_ctx->gen_ctx.iv_dma_addr + CTR_RFC3686_NONCE_SIZE; ssi_req.ivgen_dma_addr[0] = areq_ctx->gen_ctx.iv_dma_addr + CTR_RFC3686_NONCE_SIZE;
...@@ -2181,7 +2180,6 @@ static int ssi_aead_decrypt(struct aead_request *req) ...@@ -2181,7 +2180,6 @@ static int ssi_aead_decrypt(struct aead_request *req)
req->iv = areq_ctx->backup_iv; req->iv = areq_ctx->backup_iv;
return rc; return rc;
} }
#if SSI_CC_HAS_AES_CCM #if SSI_CC_HAS_AES_CCM
......
...@@ -608,7 +608,6 @@ int ssi_buffer_mgr_map_blkcipher_request( ...@@ -608,7 +608,6 @@ int ssi_buffer_mgr_map_blkcipher_request(
rc = ssi_buffer_mgr_generate_mlli(dev, &sg_data, mlli_params); rc = ssi_buffer_mgr_generate_mlli(dev, &sg_data, mlli_params);
if (unlikely(rc != 0)) if (unlikely(rc != 0))
goto ablkcipher_exit; goto ablkcipher_exit;
} }
SSI_LOG_DEBUG("areq_ctx->dma_buf_type = %s\n", SSI_LOG_DEBUG("areq_ctx->dma_buf_type = %s\n",
...@@ -877,7 +876,6 @@ static inline int ssi_buffer_mgr_aead_chain_assoc( ...@@ -877,7 +876,6 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
if (areq_ctx->ccm_hdr_size != ccm_header_size_null) { if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
if (unlikely((mapped_nents + 1) > if (unlikely((mapped_nents + 1) >
LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)) { LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)) {
SSI_LOG_ERR("CCM case.Too many fragments. " SSI_LOG_ERR("CCM case.Too many fragments. "
"Current %d max %d\n", "Current %d max %d\n",
(areq_ctx->assoc.nents + 1), (areq_ctx->assoc.nents + 1),
...@@ -895,7 +893,6 @@ static inline int ssi_buffer_mgr_aead_chain_assoc( ...@@ -895,7 +893,6 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
if (unlikely((do_chain) || if (unlikely((do_chain) ||
(areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI))) { (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI))) {
SSI_LOG_DEBUG("Chain assoc: buff_type=%s nents=%u\n", SSI_LOG_DEBUG("Chain assoc: buff_type=%s nents=%u\n",
GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type), GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
areq_ctx->assoc.nents); areq_ctx->assoc.nents);
...@@ -1178,7 +1175,6 @@ static inline int ssi_buffer_mgr_aead_chain_data( ...@@ -1178,7 +1175,6 @@ static inline int ssi_buffer_mgr_aead_chain_data(
//check where the data starts //check where the data starts
while (sg_index <= size_to_skip) { while (sg_index <= size_to_skip) {
offset -= areq_ctx->dstSgl->length; offset -= areq_ctx->dstSgl->length;
areq_ctx->dstSgl = sg_next(areq_ctx->dstSgl); areq_ctx->dstSgl = sg_next(areq_ctx->dstSgl);
//if have reached the end of the sgl, then this is unexpected //if have reached the end of the sgl, then this is unexpected
...@@ -1450,7 +1446,6 @@ int ssi_buffer_mgr_map_aead_request( ...@@ -1450,7 +1446,6 @@ int ssi_buffer_mgr_map_aead_request(
if (unlikely( if (unlikely(
(areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) || (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) ||
(areq_ctx->data_buff_type == SSI_DMA_BUF_MLLI))) { (areq_ctx->data_buff_type == SSI_DMA_BUF_MLLI))) {
mlli_params->curr_pool = buff_mgr->mlli_buffs_pool; mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
rc = ssi_buffer_mgr_generate_mlli(dev, &sg_data, mlli_params); rc = ssi_buffer_mgr_generate_mlli(dev, &sg_data, mlli_params);
if (unlikely(rc != 0)) { if (unlikely(rc != 0)) {
...@@ -1528,7 +1523,6 @@ int ssi_buffer_mgr_map_hash_request_final( ...@@ -1528,7 +1523,6 @@ int ssi_buffer_mgr_map_hash_request_final(
} else { } else {
areq_ctx->data_dma_buf_type = SSI_DMA_BUF_MLLI; areq_ctx->data_dma_buf_type = SSI_DMA_BUF_MLLI;
} }
} }
/*build mlli */ /*build mlli */
...@@ -1675,7 +1669,6 @@ int ssi_buffer_mgr_map_hash_request_update( ...@@ -1675,7 +1669,6 @@ int ssi_buffer_mgr_map_hash_request_update(
mlli_params) != 0)) { mlli_params) != 0)) {
goto fail_unmap_din; goto fail_unmap_din;
} }
} }
areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index); areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
...@@ -1771,7 +1764,6 @@ int ssi_buffer_mgr_fini(struct ssi_drvdata *drvdata) ...@@ -1771,7 +1764,6 @@ int ssi_buffer_mgr_fini(struct ssi_drvdata *drvdata)
dma_pool_destroy(buff_mgr_handle->mlli_buffs_pool); dma_pool_destroy(buff_mgr_handle->mlli_buffs_pool);
kfree(drvdata->buff_mgr_handle); kfree(drvdata->buff_mgr_handle);
drvdata->buff_mgr_handle = NULL; drvdata->buff_mgr_handle = NULL;
} }
return 0; return 0;
} }
...@@ -104,7 +104,6 @@ static int validate_keys_sizes(struct ssi_ablkcipher_ctx *ctx_p, u32 size) { ...@@ -104,7 +104,6 @@ static int validate_keys_sizes(struct ssi_ablkcipher_ctx *ctx_p, u32 size) {
#endif #endif
default: default:
break; break;
} }
return -EINVAL; return -EINVAL;
} }
...@@ -158,7 +157,6 @@ static int validate_data_size(struct ssi_ablkcipher_ctx *ctx_p, unsigned int siz ...@@ -158,7 +157,6 @@ static int validate_data_size(struct ssi_ablkcipher_ctx *ctx_p, unsigned int siz
#endif /*SSI_CC_HAS_MULTI2*/ #endif /*SSI_CC_HAS_MULTI2*/
default: default:
break; break;
} }
return -EINVAL; return -EINVAL;
} }
...@@ -498,7 +496,6 @@ ssi_blkcipher_create_setup_desc( ...@@ -498,7 +496,6 @@ ssi_blkcipher_create_setup_desc(
set_cipher_mode(&desc[*seq_size], cipher_mode); set_cipher_mode(&desc[*seq_size], cipher_mode);
set_cipher_config0(&desc[*seq_size], direction); set_cipher_config0(&desc[*seq_size], direction);
if (flow_mode == S_DIN_to_AES) { if (flow_mode == S_DIN_to_AES) {
if (ssi_is_hw_key(tfm)) { if (ssi_is_hw_key(tfm)) {
set_hw_crypto_key(&desc[*seq_size], set_hw_crypto_key(&desc[*seq_size],
ctx_p->hw.key1_slot); ctx_p->hw.key1_slot);
...@@ -616,7 +613,6 @@ static inline void ssi_blkcipher_create_multi2_setup_desc( ...@@ -616,7 +613,6 @@ static inline void ssi_blkcipher_create_multi2_setup_desc(
set_cipher_mode(&desc[*seq_size], ctx_p->cipher_mode); set_cipher_mode(&desc[*seq_size], ctx_p->cipher_mode);
set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1); set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
(*seq_size)++; (*seq_size)++;
} }
#endif /*SSI_CC_HAS_MULTI2*/ #endif /*SSI_CC_HAS_MULTI2*/
...@@ -782,7 +778,6 @@ static int ssi_blkcipher_process( ...@@ -782,7 +778,6 @@ static int ssi_blkcipher_process(
} }
/*For CTS in case of data size aligned to 16 use CBC mode*/ /*For CTS in case of data size aligned to 16 use CBC mode*/
if (((nbytes % AES_BLOCK_SIZE) == 0) && (ctx_p->cipher_mode == DRV_CIPHER_CBC_CTS)) { if (((nbytes % AES_BLOCK_SIZE) == 0) && (ctx_p->cipher_mode == DRV_CIPHER_CBC_CTS)) {
ctx_p->cipher_mode = DRV_CIPHER_CBC; ctx_p->cipher_mode = DRV_CIPHER_CBC;
cts_restore_flag = 1; cts_restore_flag = 1;
} }
......
...@@ -444,7 +444,6 @@ void fini_cc_regs(struct ssi_drvdata *drvdata) ...@@ -444,7 +444,6 @@ void fini_cc_regs(struct ssi_drvdata *drvdata)
/* Mask all interrupts */ /* Mask all interrupts */
WRITE_REGISTER(drvdata->cc_base + WRITE_REGISTER(drvdata->cc_base +
CC_REG_OFFSET(HOST_RGF, HOST_IMR), 0xFFFFFFFF); CC_REG_OFFSET(HOST_RGF, HOST_IMR), 0xFFFFFFFF);
} }
static void cleanup_cc_resources(struct platform_device *plat_dev) static void cleanup_cc_resources(struct platform_device *plat_dev)
......
...@@ -1323,7 +1323,6 @@ static void ssi_hash_free_ctx(struct ssi_hash_ctx *ctx) ...@@ -1323,7 +1323,6 @@ static void ssi_hash_free_ctx(struct ssi_hash_ctx *ctx)
} }
ctx->key_params.keylen = 0; ctx->key_params.keylen = 0;
} }
...@@ -2365,7 +2364,6 @@ int ssi_hash_free(struct ssi_drvdata *drvdata) ...@@ -2365,7 +2364,6 @@ int ssi_hash_free(struct ssi_drvdata *drvdata)
struct ssi_hash_handle *hash_handle = drvdata->hash_handle; struct ssi_hash_handle *hash_handle = drvdata->hash_handle;
if (hash_handle != NULL) { if (hash_handle != NULL) {
list_for_each_entry_safe(t_hash_alg, hash_n, &hash_handle->hash_list, entry) { list_for_each_entry_safe(t_hash_alg, hash_n, &hash_handle->hash_list, entry) {
crypto_unregister_ahash(&t_hash_alg->ahash_alg); crypto_unregister_ahash(&t_hash_alg->ahash_alg);
list_del(&t_hash_alg->entry); list_del(&t_hash_alg->entry);
......
...@@ -115,7 +115,6 @@ int ssi_power_mgr_runtime_put_suspend(struct device *dev) ...@@ -115,7 +115,6 @@ int ssi_power_mgr_runtime_put_suspend(struct device *dev)
BUG(); BUG();
} }
return rc; return rc;
} }
#endif #endif
......
...@@ -549,7 +549,6 @@ static void comp_handler(unsigned long devarg) ...@@ -549,7 +549,6 @@ static void comp_handler(unsigned long devarg)
request_mgr_handle->axi_completed += request_mgr_handle->axi_completed +=
cc_axi_comp_count(cc_base); cc_axi_comp_count(cc_base);
} }
} }
/* after verifing that there is nothing to do, Unmask AXI completion interrupt */ /* after verifing that there is nothing to do, Unmask AXI completion interrupt */
CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR), CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment