Commit c804f359 authored by Gilad Ben-Yossef's avatar Gilad Ben-Yossef Committed by Greg Kroah-Hartman

staging: ccree: use local vars for readability

Refactor cc_map_aead_request() to use local vars for addresses
for better readability of code.
Signed-off-by: default avatarGilad Ben-Yossef <gilad@benyossef.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent d59c6d9c
...@@ -1259,7 +1259,7 @@ int cc_map_aead_request( ...@@ -1259,7 +1259,7 @@ int cc_map_aead_request(
int rc = 0; int rc = 0;
struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req);
bool is_gcm4543 = areq_ctx->is_gcm4543; bool is_gcm4543 = areq_ctx->is_gcm4543;
dma_addr_t dma_addr;
u32 mapped_nents = 0; u32 mapped_nents = 0;
u32 dummy = 0; /*used for the assoc data fragments */ u32 dummy = 0; /*used for the assoc data fragments */
u32 size_to_map = 0; u32 size_to_map = 0;
...@@ -1281,32 +1281,31 @@ int cc_map_aead_request( ...@@ -1281,32 +1281,31 @@ int cc_map_aead_request(
req->cryptlen : req->cryptlen :
(req->cryptlen - authsize); (req->cryptlen - authsize);
areq_ctx->mac_buf_dma_addr = dma_map_single(dev, areq_ctx->mac_buf, dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE,
MAX_MAC_SIZE,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, areq_ctx->mac_buf_dma_addr))) { if (unlikely(dma_mapping_error(dev, dma_addr))) {
dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n", dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
MAX_MAC_SIZE, areq_ctx->mac_buf); MAX_MAC_SIZE, areq_ctx->mac_buf);
rc = -ENOMEM; rc = -ENOMEM;
goto aead_map_failure; goto aead_map_failure;
} }
areq_ctx->mac_buf_dma_addr = dma_addr;
if (areq_ctx->ccm_hdr_size != ccm_header_size_null) { if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
areq_ctx->ccm_iv0_dma_addr = void *addr = areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
dma_map_single(dev, (areq_ctx->ccm_config +
CCM_CTR_COUNT_0_OFFSET),
AES_BLOCK_SIZE, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, dma_addr = dma_map_single(dev, addr, AES_BLOCK_SIZE,
areq_ctx->ccm_iv0_dma_addr))) { DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, dma_addr))) {
dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n", dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
AES_BLOCK_SIZE, AES_BLOCK_SIZE, addr);
(areq_ctx->ccm_config +
CCM_CTR_COUNT_0_OFFSET));
areq_ctx->ccm_iv0_dma_addr = 0; areq_ctx->ccm_iv0_dma_addr = 0;
rc = -ENOMEM; rc = -ENOMEM;
goto aead_map_failure; goto aead_map_failure;
} }
areq_ctx->ccm_iv0_dma_addr = dma_addr;
if (ssi_aead_handle_config_buf(dev, areq_ctx, if (ssi_aead_handle_config_buf(dev, areq_ctx,
areq_ctx->ccm_config, &sg_data, areq_ctx->ccm_config, &sg_data,
req->assoclen)) { req->assoclen)) {
...@@ -1317,54 +1316,49 @@ int cc_map_aead_request( ...@@ -1317,54 +1316,49 @@ int cc_map_aead_request(
#if SSI_CC_HAS_AES_GCM #if SSI_CC_HAS_AES_GCM
if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) { if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
areq_ctx->hkey_dma_addr = dma_map_single(dev, dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE,
areq_ctx->hkey,
AES_BLOCK_SIZE,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, if (unlikely(dma_mapping_error(dev, dma_addr))) {
areq_ctx->hkey_dma_addr))) {
dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n", dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n",
AES_BLOCK_SIZE, areq_ctx->hkey); AES_BLOCK_SIZE, areq_ctx->hkey);
rc = -ENOMEM; rc = -ENOMEM;
goto aead_map_failure; goto aead_map_failure;
} }
areq_ctx->hkey_dma_addr = dma_addr;
areq_ctx->gcm_block_len_dma_addr = dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block,
dma_map_single(dev, &areq_ctx->gcm_len_block,
AES_BLOCK_SIZE, DMA_TO_DEVICE); AES_BLOCK_SIZE, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, if (unlikely(dma_mapping_error(dev, dma_addr))) {
areq_ctx->gcm_block_len_dma_addr))) {
dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n", dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
AES_BLOCK_SIZE, &areq_ctx->gcm_len_block); AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
rc = -ENOMEM; rc = -ENOMEM;
goto aead_map_failure; goto aead_map_failure;
} }
areq_ctx->gcm_block_len_dma_addr = dma_addr;
areq_ctx->gcm_iv_inc1_dma_addr = dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc1,
dma_map_single(dev, areq_ctx->gcm_iv_inc1,
AES_BLOCK_SIZE, DMA_TO_DEVICE); AES_BLOCK_SIZE, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, if (unlikely(dma_mapping_error(dev, dma_addr))) {
areq_ctx->gcm_iv_inc1_dma_addr))) {
dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n", dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1)); AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1));
areq_ctx->gcm_iv_inc1_dma_addr = 0; areq_ctx->gcm_iv_inc1_dma_addr = 0;
rc = -ENOMEM; rc = -ENOMEM;
goto aead_map_failure; goto aead_map_failure;
} }
areq_ctx->gcm_iv_inc1_dma_addr = dma_addr;
areq_ctx->gcm_iv_inc2_dma_addr = dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc2,
dma_map_single(dev, areq_ctx->gcm_iv_inc2,
AES_BLOCK_SIZE, DMA_TO_DEVICE); AES_BLOCK_SIZE, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, if (unlikely(dma_mapping_error(dev, dma_addr))) {
areq_ctx->gcm_iv_inc2_dma_addr))) {
dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n", dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2)); AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2));
areq_ctx->gcm_iv_inc2_dma_addr = 0; areq_ctx->gcm_iv_inc2_dma_addr = 0;
rc = -ENOMEM; rc = -ENOMEM;
goto aead_map_failure; goto aead_map_failure;
} }
areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
} }
#endif /*SSI_CC_HAS_AES_GCM*/ #endif /*SSI_CC_HAS_AES_GCM*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment