Commit e1cb54fd authored by Gilad Ben-Yossef's avatar Gilad Ben-Yossef Committed by Greg Kroah-Hartman

staging: ccree: make long func call sites readable

The driver was using a function naming scheme
including common prefixes for driver global
functions based on the code module they came from.

The combination of long names with long common
prefixes made the whole thing too long for a human
to parse.

Switch to simple and shorter function naming
scheme. Where required, realign parameters and
add paranthesis for better code readability.
Signed-off-by: default avatarGilad Ben-Yossef <gilad@benyossef.com>
Reviewed-by: default avatarDan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 988b9ea9
......@@ -233,7 +233,7 @@ static void ssi_aead_complete(struct device *dev, void *ssi_req, void __iomem *c
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
int err = 0;
ssi_buffer_mgr_unmap_aead_request(dev, areq);
cc_unmap_aead_request(dev, areq);
/* Restore ordinary iv pointer */
areq->iv = areq_ctx->backup_iv;
......@@ -246,17 +246,20 @@ static void ssi_aead_complete(struct device *dev, void *ssi_req, void __iomem *c
/* In case of payload authentication failure, MUST NOT
* revealed the decrypted message --> zero its memory.
*/
ssi_buffer_mgr_zero_sgl(areq->dst, areq_ctx->cryptlen);
cc_zero_sgl(areq->dst, areq_ctx->cryptlen);
err = -EBADMSG;
}
} else { /*ENCRYPT*/
if (unlikely(areq_ctx->is_icv_fragmented))
ssi_buffer_mgr_copy_scatterlist_portion(
dev, areq_ctx->mac_buf, areq_ctx->dst_sgl,
areq->cryptlen + areq_ctx->dst_offset,
(areq->cryptlen + areq_ctx->dst_offset +
ctx->authsize),
SSI_SG_FROM_BUF);
if (unlikely(areq_ctx->is_icv_fragmented)) {
cc_copy_sg_portion(dev, areq_ctx->mac_buf,
areq_ctx->dst_sgl,
(areq->cryptlen +
areq_ctx->dst_offset),
(areq->cryptlen +
areq_ctx->dst_offset +
ctx->authsize),
SSI_SG_FROM_BUF);
}
/* If an IV was generated, copy it back to the user provided buffer. */
if (areq_ctx->backup_giv) {
......@@ -2053,7 +2056,7 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
}
#endif /*SSI_CC_HAS_AES_GCM*/
rc = ssi_buffer_mgr_map_aead_request(ctx->drvdata, req);
rc = cc_map_aead_request(ctx->drvdata, req);
if (unlikely(rc != 0)) {
dev_err(dev, "map_request() failed\n");
goto exit;
......@@ -2112,7 +2115,7 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
#endif
default:
dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
ssi_buffer_mgr_unmap_aead_request(dev, req);
cc_unmap_aead_request(dev, req);
rc = -ENOTSUPP;
goto exit;
}
......@@ -2123,7 +2126,7 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
if (unlikely(rc != -EINPROGRESS)) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
ssi_buffer_mgr_unmap_aead_request(dev, req);
cc_unmap_aead_request(dev, req);
}
exit:
......@@ -2753,8 +2756,9 @@ int ssi_aead_alloc(struct ssi_drvdata *drvdata)
INIT_LIST_HEAD(&aead_handle->aead_list);
drvdata->aead_handle = aead_handle;
aead_handle->sram_workspace_addr = ssi_sram_mgr_alloc(
drvdata, MAX_HMAC_DIGEST_SIZE);
aead_handle->sram_workspace_addr = cc_sram_alloc(drvdata,
MAX_HMAC_DIGEST_SIZE);
if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) {
dev_err(dev, "SRAM pool exhausted\n");
rc = -ENOMEM;
......
This diff is collapsed.
......@@ -50,42 +50,39 @@ struct mlli_params {
u32 mlli_len;
};
int ssi_buffer_mgr_init(struct ssi_drvdata *drvdata);
int cc_buffer_mgr_init(struct ssi_drvdata *drvdata);
int ssi_buffer_mgr_fini(struct ssi_drvdata *drvdata);
int cc_buffer_mgr_fini(struct ssi_drvdata *drvdata);
int ssi_buffer_mgr_map_blkcipher_request(
struct ssi_drvdata *drvdata,
void *ctx,
unsigned int ivsize,
unsigned int nbytes,
void *info,
struct scatterlist *src,
struct scatterlist *dst);
int cc_map_blkcipher_request(struct ssi_drvdata *drvdata, void *ctx,
unsigned int ivsize, unsigned int nbytes,
void *info, struct scatterlist *src,
struct scatterlist *dst);
void ssi_buffer_mgr_unmap_blkcipher_request(
struct device *dev,
void *ctx,
unsigned int ivsize,
struct scatterlist *src,
struct scatterlist *dst);
void cc_unmap_blkcipher_request(struct device *dev, void *ctx,
unsigned int ivsize,
struct scatterlist *src,
struct scatterlist *dst);
int ssi_buffer_mgr_map_aead_request(struct ssi_drvdata *drvdata, struct aead_request *req);
int cc_map_aead_request(struct ssi_drvdata *drvdata, struct aead_request *req);
void ssi_buffer_mgr_unmap_aead_request(struct device *dev, struct aead_request *req);
void cc_unmap_aead_request(struct device *dev, struct aead_request *req);
int ssi_buffer_mgr_map_hash_request_final(struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, bool do_update);
int cc_map_hash_request_final(struct ssi_drvdata *drvdata, void *ctx,
struct scatterlist *src, unsigned int nbytes,
bool do_update);
int ssi_buffer_mgr_map_hash_request_update(struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, unsigned int block_size);
int cc_map_hash_request_update(struct ssi_drvdata *drvdata, void *ctx,
struct scatterlist *src, unsigned int nbytes,
unsigned int block_size);
void ssi_buffer_mgr_unmap_hash_request(struct device *dev, void *ctx, struct scatterlist *src, bool do_revert);
void cc_unmap_hash_request(struct device *dev, void *ctx,
struct scatterlist *src, bool do_revert);
void ssi_buffer_mgr_copy_scatterlist_portion(struct device *dev, u8 *dest,
struct scatterlist *sg,
u32 to_skip, u32 end,
enum ssi_sg_cpy_direct direct);
void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
u32 to_skip, u32 end, enum ssi_sg_cpy_direct direct);
void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, u32 data_len);
void cc_zero_sgl(struct scatterlist *sgl, u32 data_len);
#endif /*__BUFFER_MGR_H__*/
......@@ -694,7 +694,7 @@ static int ssi_blkcipher_complete(struct device *dev,
int completion_error = 0;
struct ablkcipher_request *req = (struct ablkcipher_request *)areq;
ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
cc_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
kfree(req_ctx->iv);
if (areq) {
......@@ -786,9 +786,8 @@ static int ssi_blkcipher_process(
/* STAT_PHASE_1: Map buffers */
rc = ssi_buffer_mgr_map_blkcipher_request(ctx_p->drvdata, req_ctx,
ivsize, nbytes, req_ctx->iv,
src, dst);
rc = cc_map_blkcipher_request(ctx_p->drvdata, req_ctx, ivsize, nbytes,
req_ctx->iv, src, dst);
if (unlikely(rc != 0)) {
dev_err(dev, "map_request() failed\n");
goto exit_process;
......@@ -823,12 +822,14 @@ static int ssi_blkcipher_process(
if (areq) {
if (unlikely(rc != -EINPROGRESS)) {
/* Failed to send the request or request completed synchronously */
ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
cc_unmap_blkcipher_request(dev, req_ctx, ivsize, src,
dst);
}
} else {
if (rc != 0) {
ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
cc_unmap_blkcipher_request(dev, req_ctx, ivsize, src,
dst);
} else {
rc = ssi_blkcipher_complete(dev, ctx_p, req_ctx, dst,
src, ivsize, NULL,
......
......@@ -317,7 +317,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
}
new_drvdata->mlli_sram_addr =
ssi_sram_mgr_alloc(new_drvdata, MAX_MLLI_BUFF_SIZE);
cc_sram_alloc(new_drvdata, MAX_MLLI_BUFF_SIZE);
if (unlikely(new_drvdata->mlli_sram_addr == NULL_SRAM_ADDR)) {
dev_err(dev, "Failed to alloc MLLI Sram buffer\n");
rc = -ENOMEM;
......@@ -330,15 +330,15 @@ static int init_cc_resources(struct platform_device *plat_dev)
goto post_sram_mgr_err;
}
rc = ssi_buffer_mgr_init(new_drvdata);
rc = cc_buffer_mgr_init(new_drvdata);
if (unlikely(rc != 0)) {
dev_err(dev, "buffer_mgr_init failed\n");
goto post_req_mgr_err;
}
rc = ssi_power_mgr_init(new_drvdata);
rc = cc_pm_init(new_drvdata);
if (unlikely(rc != 0)) {
dev_err(dev, "ssi_power_mgr_init failed\n");
dev_err(dev, "cc_pm_init failed\n");
goto post_buf_mgr_err;
}
......@@ -383,9 +383,9 @@ static int init_cc_resources(struct platform_device *plat_dev)
post_ivgen_err:
ssi_ivgen_fini(new_drvdata);
post_power_mgr_err:
ssi_power_mgr_fini(new_drvdata);
cc_pm_fini(new_drvdata);
post_buf_mgr_err:
ssi_buffer_mgr_fini(new_drvdata);
cc_buffer_mgr_fini(new_drvdata);
post_req_mgr_err:
request_mgr_fini(new_drvdata);
post_sram_mgr_err:
......@@ -418,8 +418,8 @@ static void cleanup_cc_resources(struct platform_device *plat_dev)
ssi_hash_free(drvdata);
ssi_ablkcipher_free(drvdata);
ssi_ivgen_fini(drvdata);
ssi_power_mgr_fini(drvdata);
ssi_buffer_mgr_fini(drvdata);
cc_pm_fini(drvdata);
cc_buffer_mgr_fini(drvdata);
request_mgr_fini(drvdata);
ssi_sram_mgr_fini(drvdata);
ssi_fips_fini(drvdata);
......@@ -500,7 +500,7 @@ static int cc7x_remove(struct platform_device *plat_dev)
#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
static const struct dev_pm_ops arm_cc7x_driver_pm = {
SET_RUNTIME_PM_OPS(ssi_power_mgr_runtime_suspend, ssi_power_mgr_runtime_resume, NULL)
SET_RUNTIME_PM_OPS(cc_pm_suspend, cc_pm_resume, NULL)
};
#endif
......
This diff is collapsed.
......@@ -209,7 +209,7 @@ int ssi_ivgen_init(struct ssi_drvdata *drvdata)
goto out;
}
/* Allocate IV pool in SRAM */
ivgen_ctx->pool = ssi_sram_mgr_alloc(drvdata, SSI_IVPOOL_SIZE);
ivgen_ctx->pool = cc_sram_alloc(drvdata, SSI_IVPOOL_SIZE);
if (ivgen_ctx->pool == NULL_SRAM_ADDR) {
dev_err(device, "SRAM pool exhausted\n");
rc = -ENOMEM;
......
......@@ -34,7 +34,7 @@
#define POWER_DOWN_ENABLE 0x01
#define POWER_DOWN_DISABLE 0x00
int ssi_power_mgr_runtime_suspend(struct device *dev)
int cc_pm_suspend(struct device *dev)
{
struct ssi_drvdata *drvdata =
(struct ssi_drvdata *)dev_get_drvdata(dev);
......@@ -42,9 +42,9 @@ int ssi_power_mgr_runtime_suspend(struct device *dev)
dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
rc = ssi_request_mgr_runtime_suspend_queue(drvdata);
rc = cc_suspend_req_queue(drvdata);
if (rc != 0) {
dev_err(dev, "ssi_request_mgr_runtime_suspend_queue (%x)\n",
dev_err(dev, "cc_suspend_req_queue (%x)\n",
rc);
return rc;
}
......@@ -53,7 +53,7 @@ int ssi_power_mgr_runtime_suspend(struct device *dev)
return 0;
}
int ssi_power_mgr_runtime_resume(struct device *dev)
int cc_pm_resume(struct device *dev)
{
int rc;
struct ssi_drvdata *drvdata =
......@@ -74,9 +74,9 @@ int ssi_power_mgr_runtime_resume(struct device *dev)
return rc;
}
rc = ssi_request_mgr_runtime_resume_queue(drvdata);
rc = cc_resume_req_queue(drvdata);
if (rc != 0) {
dev_err(dev, "ssi_request_mgr_runtime_resume_queue (%x)\n", rc);
dev_err(dev, "cc_resume_req_queue (%x)\n", rc);
return rc;
}
......@@ -87,12 +87,11 @@ int ssi_power_mgr_runtime_resume(struct device *dev)
return 0;
}
int ssi_power_mgr_runtime_get(struct device *dev)
int cc_pm_get(struct device *dev)
{
int rc = 0;
if (ssi_request_mgr_is_queue_runtime_suspend(
(struct ssi_drvdata *)dev_get_drvdata(dev))) {
if (cc_req_queue_suspended((struct ssi_drvdata *)dev_get_drvdata(dev))) {
rc = pm_runtime_get_sync(dev);
} else {
pm_runtime_get_noresume(dev);
......@@ -100,12 +99,11 @@ int ssi_power_mgr_runtime_get(struct device *dev)
return rc;
}
int ssi_power_mgr_runtime_put_suspend(struct device *dev)
int cc_pm_put_suspend(struct device *dev)
{
int rc = 0;
if (!ssi_request_mgr_is_queue_runtime_suspend(
(struct ssi_drvdata *)dev_get_drvdata(dev))) {
if (!cc_req_queue_suspended((struct ssi_drvdata *)dev_get_drvdata(dev))) {
pm_runtime_mark_last_busy(dev);
rc = pm_runtime_put_autosuspend(dev);
} else {
......@@ -118,7 +116,7 @@ int ssi_power_mgr_runtime_put_suspend(struct device *dev)
#endif
int ssi_power_mgr_init(struct ssi_drvdata *drvdata)
int cc_pm_init(struct ssi_drvdata *drvdata)
{
int rc = 0;
#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
......@@ -137,7 +135,7 @@ int ssi_power_mgr_init(struct ssi_drvdata *drvdata)
return rc;
}
void ssi_power_mgr_fini(struct ssi_drvdata *drvdata)
void cc_pm_fini(struct ssi_drvdata *drvdata)
{
#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
pm_runtime_disable(drvdata_to_dev(drvdata));
......
......@@ -25,18 +25,18 @@
#define SSI_SUSPEND_TIMEOUT 3000
int ssi_power_mgr_init(struct ssi_drvdata *drvdata);
int cc_pm_init(struct ssi_drvdata *drvdata);
void ssi_power_mgr_fini(struct ssi_drvdata *drvdata);
void cc_pm_fini(struct ssi_drvdata *drvdata);
#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
int ssi_power_mgr_runtime_suspend(struct device *dev);
int cc_pm_suspend(struct device *dev);
int ssi_power_mgr_runtime_resume(struct device *dev);
int cc_pm_resume(struct device *dev);
int ssi_power_mgr_runtime_get(struct device *dev);
int cc_pm_get(struct device *dev);
int ssi_power_mgr_runtime_put_suspend(struct device *dev);
int cc_pm_put_suspend(struct device *dev);
#endif
#endif /*__POWER_MGR_H__*/
......
......@@ -274,9 +274,9 @@ int send_request(
(!is_dout ? 1 : 0));
#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
rc = ssi_power_mgr_runtime_get(dev);
rc = cc_pm_get(dev);
if (rc != 0) {
dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
dev_err(dev, "cc_pm_get returned %x\n", rc);
return rc;
}
#endif
......@@ -301,7 +301,7 @@ int send_request(
* (SW queue is full)
*/
#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
ssi_power_mgr_runtime_put_suspend(dev);
cc_pm_put_suspend(dev);
#endif
return rc;
}
......@@ -337,7 +337,7 @@ int send_request(
dev_err(dev, "Failed to generate IV (rc=%d)\n", rc);
spin_unlock_bh(&req_mgr_h->hw_lock);
#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
ssi_power_mgr_runtime_put_suspend(dev);
cc_pm_put_suspend(dev);
#endif
return rc;
}
......@@ -499,7 +499,7 @@ static void proc_completions(struct ssi_drvdata *drvdata)
dev_dbg(dev, "Request completed. axi_completed=%d\n",
request_mgr_handle->axi_completed);
#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
rc = ssi_power_mgr_runtime_put_suspend(dev);
rc = cc_pm_put_suspend(dev);
if (rc != 0)
dev_err(dev, "Failed to set runtime suspension %d\n",
rc);
......@@ -565,7 +565,7 @@ static void comp_handler(unsigned long devarg)
* the spin lock protection
*/
#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
int ssi_request_mgr_runtime_resume_queue(struct ssi_drvdata *drvdata)
int cc_resume_req_queue(struct ssi_drvdata *drvdata)
{
struct ssi_request_mgr_handle *request_mgr_handle = drvdata->request_mgr_handle;
......@@ -580,7 +580,7 @@ int ssi_request_mgr_runtime_resume_queue(struct ssi_drvdata *drvdata)
* suspend the queue configuration. Since it is used for the runtime suspend
* only verify that the queue can be suspended.
*/
int ssi_request_mgr_runtime_suspend_queue(struct ssi_drvdata *drvdata)
int cc_suspend_req_queue(struct ssi_drvdata *drvdata)
{
struct ssi_request_mgr_handle *request_mgr_handle =
drvdata->request_mgr_handle;
......@@ -598,7 +598,7 @@ int ssi_request_mgr_runtime_suspend_queue(struct ssi_drvdata *drvdata)
return 0;
}
bool ssi_request_mgr_is_queue_runtime_suspend(struct ssi_drvdata *drvdata)
bool cc_req_queue_suspended(struct ssi_drvdata *drvdata)
{
struct ssi_request_mgr_handle *request_mgr_handle =
drvdata->request_mgr_handle;
......
......@@ -50,11 +50,11 @@ void complete_request(struct ssi_drvdata *drvdata);
void request_mgr_fini(struct ssi_drvdata *drvdata);
#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
int ssi_request_mgr_runtime_resume_queue(struct ssi_drvdata *drvdata);
int cc_resume_req_queue(struct ssi_drvdata *drvdata);
int ssi_request_mgr_runtime_suspend_queue(struct ssi_drvdata *drvdata);
int cc_suspend_req_queue(struct ssi_drvdata *drvdata);
bool ssi_request_mgr_is_queue_runtime_suspend(struct ssi_drvdata *drvdata);
bool cc_req_queue_suspended(struct ssi_drvdata *drvdata);
#endif
#endif /*__REQUEST_MGR_H__*/
......@@ -69,7 +69,7 @@ int ssi_sram_mgr_init(struct ssi_drvdata *drvdata)
* \param drvdata
* \param size The requested bytes to allocate
*/
ssi_sram_addr_t ssi_sram_mgr_alloc(struct ssi_drvdata *drvdata, u32 size)
ssi_sram_addr_t cc_sram_alloc(struct ssi_drvdata *drvdata, u32 size)
{
struct ssi_sram_mgr_ctx *smgr_ctx = drvdata->sram_mgr_handle;
struct device *dev = drvdata_to_dev(drvdata);
......@@ -93,7 +93,7 @@ ssi_sram_addr_t ssi_sram_mgr_alloc(struct ssi_drvdata *drvdata, u32 size)
}
/**
* ssi_sram_mgr_const2sram_desc() - Create const descriptors sequence to
* cc_set_sram_desc() - Create const descriptors sequence to
* set values in given array into SRAM.
* Note: each const value can't exceed word size.
*
......@@ -103,10 +103,9 @@ ssi_sram_addr_t ssi_sram_mgr_alloc(struct ssi_drvdata *drvdata, u32 size)
* @seq: A pointer to the given IN/OUT descriptor sequence
* @seq_len: A pointer to the given IN/OUT sequence length
*/
void ssi_sram_mgr_const2sram_desc(
const u32 *src, ssi_sram_addr_t dst,
unsigned int nelement,
struct cc_hw_desc *seq, unsigned int *seq_len)
void cc_set_sram_desc(const u32 *src, ssi_sram_addr_t dst,
unsigned int nelement, struct cc_hw_desc *seq,
unsigned int *seq_len)
{
u32 i;
unsigned int idx = *seq_len;
......
......@@ -58,10 +58,10 @@ void ssi_sram_mgr_fini(struct ssi_drvdata *drvdata);
* \param drvdata
* \param size The requested bytes to allocate
*/
ssi_sram_addr_t ssi_sram_mgr_alloc(struct ssi_drvdata *drvdata, u32 size);
ssi_sram_addr_t cc_sram_alloc(struct ssi_drvdata *drvdata, u32 size);
/**
* ssi_sram_mgr_const2sram_desc() - Create const descriptors sequence to
* cc_set_sram_desc() - Create const descriptors sequence to
* set values in given array into SRAM.
* Note: each const value can't exceed word size.
*
......@@ -71,7 +71,7 @@ ssi_sram_addr_t ssi_sram_mgr_alloc(struct ssi_drvdata *drvdata, u32 size);
* @seq: A pointer to the given IN/OUT descriptor sequence
* @seq_len: A pointer to the given IN/OUT sequence length
*/
void ssi_sram_mgr_const2sram_desc(
void cc_set_sram_desc(
const u32 *src, ssi_sram_addr_t dst,
unsigned int nelement,
struct cc_hw_desc *seq, unsigned int *seq_len);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment