Commit a8739962 authored by Ryder Lee's avatar Ryder Lee Committed by Herbert Xu

crypto: mediatek - move HW control data to transformation context

This patch moves hardware control block members from
mtk_*_rec to transformation context and refines related
definition. This makes operational context to manage its
own control information easily for each DMA transfer.
Signed-off-by: default avatarRyder Lee <ryder.lee@mediatek.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent e183914a
...@@ -20,23 +20,25 @@ ...@@ -20,23 +20,25 @@
#define AES_BUF_SIZE ((PAGE_SIZE << AES_BUF_ORDER) \ #define AES_BUF_SIZE ((PAGE_SIZE << AES_BUF_ORDER) \
& ~(AES_BLOCK_SIZE - 1)) & ~(AES_BLOCK_SIZE - 1))
/* AES command token */ /* AES command token size */
#define AES_CT_SIZE_ECB 2 #define AES_CT_SIZE_ECB 2
#define AES_CT_SIZE_CBC 3 #define AES_CT_SIZE_CBC 3
#define AES_CT_CTRL_HDR cpu_to_le32(0x00220000) #define AES_CT_CTRL_HDR cpu_to_le32(0x00220000)
#define AES_COMMAND0 cpu_to_le32(0x05000000) /* AES-CBC/ECB command token */
#define AES_COMMAND1 cpu_to_le32(0x2d060000) #define AES_CMD0 cpu_to_le32(0x05000000)
#define AES_COMMAND2 cpu_to_le32(0xe4a63806) #define AES_CMD1 cpu_to_le32(0x2d060000)
#define AES_CMD2 cpu_to_le32(0xe4a63806)
/* AES transform information */
#define AES_TFM_ECB cpu_to_le32(0x0 << 0) /* AES transform information word 0 fields */
#define AES_TFM_CBC cpu_to_le32(0x1 << 0) #define AES_TFM_BASIC_OUT cpu_to_le32(0x4 << 0)
#define AES_TFM_DECRYPT cpu_to_le32(0x5 << 0) #define AES_TFM_BASIC_IN cpu_to_le32(0x5 << 0)
#define AES_TFM_ENCRYPT cpu_to_le32(0x4 << 0)
#define AES_TFM_SIZE(x) cpu_to_le32((x) << 8) #define AES_TFM_SIZE(x) cpu_to_le32((x) << 8)
#define AES_TFM_128BITS cpu_to_le32(0xb << 16) #define AES_TFM_128BITS cpu_to_le32(0xb << 16)
#define AES_TFM_192BITS cpu_to_le32(0xd << 16) #define AES_TFM_192BITS cpu_to_le32(0xd << 16)
#define AES_TFM_256BITS cpu_to_le32(0xf << 16) #define AES_TFM_256BITS cpu_to_le32(0xf << 16)
/* AES transform information word 1 fields */
#define AES_TFM_ECB cpu_to_le32(0x0 << 0)
#define AES_TFM_CBC cpu_to_le32(0x1 << 0)
#define AES_TFM_FULL_IV cpu_to_le32(0xf << 5) #define AES_TFM_FULL_IV cpu_to_le32(0xf << 5)
/* AES flags */ /* AES flags */
...@@ -47,47 +49,41 @@ ...@@ -47,47 +49,41 @@
#define AES_FLAGS_BUSY BIT(3) #define AES_FLAGS_BUSY BIT(3)
/** /**
* mtk_aes_ct is a set of hardware instructions(command token) * Command token(CT) is a set of hardware instructions that
* that are used to control engine's processing flow of AES. * are used to control engine's processing flow of AES.
*
* Transform information(TFM) is used to define AES state and
* contains all keys and initial vectors.
*
* The engine requires CT and TFM to do:
* - Commands decoding and control of the engine's data path.
* - Coordinating hardware data fetch and store operations.
* - Result token construction and output.
*/ */
struct mtk_aes_ct { struct mtk_aes_ct {
__le32 ct_ctrl0; __le32 cmd[AES_CT_SIZE_CBC];
__le32 ct_ctrl1;
__le32 ct_ctrl2;
}; };
/**
* mtk_aes_tfm is used to define AES transform state
* and contains all keys and initial vectors.
*/
struct mtk_aes_tfm { struct mtk_aes_tfm {
__le32 tfm_ctrl0; __le32 ctrl[2];
__le32 tfm_ctrl1;
__le32 state[SIZE_IN_WORDS(AES_KEYSIZE_256 + AES_BLOCK_SIZE)]; __le32 state[SIZE_IN_WORDS(AES_KEYSIZE_256 + AES_BLOCK_SIZE)];
}; };
/**
* mtk_aes_info consists of command token and transform state of AES,
* which should be encapsulated in command and result descriptors.
*
* The engine requires this information to do:
* - Commands decoding and control of the engine's data path.
* - Coordinating hardware data fetch and store operations.
* - Result token construction and output.
*/
struct mtk_aes_info {
struct mtk_aes_ct ct;
struct mtk_aes_tfm tfm;
};
struct mtk_aes_reqctx { struct mtk_aes_reqctx {
u64 mode; u64 mode;
}; };
struct mtk_aes_ctx { struct mtk_aes_ctx {
struct mtk_cryp *cryp; struct mtk_cryp *cryp;
struct mtk_aes_info info;
u32 keylen; u32 keylen;
struct mtk_aes_ct ct;
dma_addr_t ct_dma;
struct mtk_aes_tfm tfm;
dma_addr_t tfm_dma;
__le32 ct_hdr;
u32 ct_size;
}; };
struct mtk_aes_drv { struct mtk_aes_drv {
...@@ -174,57 +170,57 @@ static int mtk_aes_info_map(struct mtk_cryp *cryp, ...@@ -174,57 +170,57 @@ static int mtk_aes_info_map(struct mtk_cryp *cryp,
struct mtk_aes_rec *aes, struct mtk_aes_rec *aes,
size_t len) size_t len)
{ {
struct mtk_aes_ctx *ctx = crypto_ablkcipher_ctx( struct mtk_aes_ctx *ctx = aes->ctx;
crypto_ablkcipher_reqtfm(aes->req));
struct mtk_aes_info *info = aes->info;
struct mtk_aes_ct *ct = &info->ct;
struct mtk_aes_tfm *tfm = &info->tfm;
aes->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len); ctx->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len);
ctx->ct.cmd[0] = AES_CMD0 | cpu_to_le32(len);
ctx->ct.cmd[1] = AES_CMD1;
if (aes->flags & AES_FLAGS_ENCRYPT) if (aes->flags & AES_FLAGS_ENCRYPT)
tfm->tfm_ctrl0 = AES_TFM_ENCRYPT; ctx->tfm.ctrl[0] = AES_TFM_BASIC_OUT;
else else
tfm->tfm_ctrl0 = AES_TFM_DECRYPT; ctx->tfm.ctrl[0] = AES_TFM_BASIC_IN;
if (ctx->keylen == SIZE_IN_WORDS(AES_KEYSIZE_128)) if (ctx->keylen == SIZE_IN_WORDS(AES_KEYSIZE_128))
tfm->tfm_ctrl0 |= AES_TFM_128BITS; ctx->tfm.ctrl[0] |= AES_TFM_128BITS;
else if (ctx->keylen == SIZE_IN_WORDS(AES_KEYSIZE_256)) else if (ctx->keylen == SIZE_IN_WORDS(AES_KEYSIZE_256))
tfm->tfm_ctrl0 |= AES_TFM_256BITS; ctx->tfm.ctrl[0] |= AES_TFM_256BITS;
else if (ctx->keylen == SIZE_IN_WORDS(AES_KEYSIZE_192)) else if (ctx->keylen == SIZE_IN_WORDS(AES_KEYSIZE_192))
tfm->tfm_ctrl0 |= AES_TFM_192BITS; ctx->tfm.ctrl[0] |= AES_TFM_192BITS;
ct->ct_ctrl0 = AES_COMMAND0 | cpu_to_le32(len);
ct->ct_ctrl1 = AES_COMMAND1;
if (aes->flags & AES_FLAGS_CBC) { if (aes->flags & AES_FLAGS_CBC) {
const u32 *iv = (const u32 *)aes->req->info; const u32 *iv = (const u32 *)aes->req->info;
u32 *iv_state = tfm->state + ctx->keylen; u32 *iv_state = ctx->tfm.state + ctx->keylen;
int i; int i;
aes->ct_size = AES_CT_SIZE_CBC; ctx->tfm.ctrl[0] |= AES_TFM_SIZE(ctx->keylen +
ct->ct_ctrl2 = AES_COMMAND2;
tfm->tfm_ctrl0 |= AES_TFM_SIZE(ctx->keylen +
SIZE_IN_WORDS(AES_BLOCK_SIZE)); SIZE_IN_WORDS(AES_BLOCK_SIZE));
tfm->tfm_ctrl1 = AES_TFM_CBC | AES_TFM_FULL_IV; ctx->tfm.ctrl[1] = AES_TFM_CBC | AES_TFM_FULL_IV;
for (i = 0; i < SIZE_IN_WORDS(AES_BLOCK_SIZE); i++) for (i = 0; i < SIZE_IN_WORDS(AES_BLOCK_SIZE); i++)
iv_state[i] = cpu_to_le32(iv[i]); iv_state[i] = cpu_to_le32(iv[i]);
ctx->ct.cmd[2] = AES_CMD2;
ctx->ct_size = AES_CT_SIZE_CBC;
} else if (aes->flags & AES_FLAGS_ECB) { } else if (aes->flags & AES_FLAGS_ECB) {
aes->ct_size = AES_CT_SIZE_ECB; ctx->tfm.ctrl[0] |= AES_TFM_SIZE(ctx->keylen);
tfm->tfm_ctrl0 |= AES_TFM_SIZE(ctx->keylen); ctx->tfm.ctrl[1] = AES_TFM_ECB;
tfm->tfm_ctrl1 = AES_TFM_ECB;
ctx->ct_size = AES_CT_SIZE_ECB;
} }
aes->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info), ctx->ct_dma = dma_map_single(cryp->dev, &ctx->ct, sizeof(ctx->ct),
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(cryp->dev, aes->ct_dma))) { if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma)))
dev_err(cryp->dev, "dma %zu bytes error\n", sizeof(*info)); return -EINVAL;
ctx->tfm_dma = dma_map_single(cryp->dev, &ctx->tfm, sizeof(ctx->tfm),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(cryp->dev, ctx->tfm_dma))) {
dma_unmap_single(cryp->dev, ctx->tfm_dma, sizeof(ctx->tfm),
DMA_TO_DEVICE);
return -EINVAL; return -EINVAL;
} }
aes->tfm_dma = aes->ct_dma + sizeof(*ct);
return 0; return 0;
} }
...@@ -253,10 +249,10 @@ static int mtk_aes_xmit(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) ...@@ -253,10 +249,10 @@ static int mtk_aes_xmit(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
if (nents == 0) { if (nents == 0) {
res->hdr |= MTK_DESC_FIRST; res->hdr |= MTK_DESC_FIRST;
cmd->hdr |= MTK_DESC_FIRST | cmd->hdr |= MTK_DESC_FIRST |
MTK_DESC_CT_LEN(aes->ct_size); MTK_DESC_CT_LEN(aes->ctx->ct_size);
cmd->ct = cpu_to_le32(aes->ct_dma); cmd->ct = cpu_to_le32(aes->ctx->ct_dma);
cmd->ct_hdr = aes->ct_hdr; cmd->ct_hdr = aes->ctx->ct_hdr;
cmd->tfm = cpu_to_le32(aes->tfm_dma); cmd->tfm = cpu_to_le32(aes->ctx->tfm_dma);
} }
if (++ring->pos == MTK_DESC_NUM) if (++ring->pos == MTK_DESC_NUM)
...@@ -396,7 +392,7 @@ static int mtk_aes_handle_queue(struct mtk_cryp *cryp, u8 id, ...@@ -396,7 +392,7 @@ static int mtk_aes_handle_queue(struct mtk_cryp *cryp, u8 id,
rctx->mode &= AES_FLAGS_MODE_MSK; rctx->mode &= AES_FLAGS_MODE_MSK;
/* Assign new request to device */ /* Assign new request to device */
aes->req = req; aes->req = req;
aes->info = &ctx->info; aes->ctx = ctx;
aes->flags = (aes->flags & ~AES_FLAGS_MODE_MSK) | rctx->mode; aes->flags = (aes->flags & ~AES_FLAGS_MODE_MSK) | rctx->mode;
err = mtk_aes_map(cryp, aes); err = mtk_aes_map(cryp, aes);
...@@ -408,8 +404,12 @@ static int mtk_aes_handle_queue(struct mtk_cryp *cryp, u8 id, ...@@ -408,8 +404,12 @@ static int mtk_aes_handle_queue(struct mtk_cryp *cryp, u8 id,
static void mtk_aes_unmap(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) static void mtk_aes_unmap(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
{ {
dma_unmap_single(cryp->dev, aes->ct_dma, struct mtk_aes_ctx *ctx = aes->ctx;
sizeof(struct mtk_aes_info), DMA_TO_DEVICE);
dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->ct),
DMA_TO_DEVICE);
dma_unmap_single(cryp->dev, ctx->tfm_dma, sizeof(ctx->tfm),
DMA_TO_DEVICE);
if (aes->src.sg == aes->dst.sg) { if (aes->src.sg == aes->dst.sg) {
dma_unmap_sg(cryp->dev, aes->src.sg, dma_unmap_sg(cryp->dev, aes->src.sg,
...@@ -454,7 +454,7 @@ static int mtk_aes_setkey(struct crypto_ablkcipher *tfm, ...@@ -454,7 +454,7 @@ static int mtk_aes_setkey(struct crypto_ablkcipher *tfm,
{ {
struct mtk_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); struct mtk_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
const u32 *key_tmp = (const u32 *)key; const u32 *key_tmp = (const u32 *)key;
u32 *key_state = ctx->info.tfm.state; u32 *key_state = ctx->tfm.state;
int i; int i;
if (keylen != AES_KEYSIZE_128 && if (keylen != AES_KEYSIZE_128 &&
......
...@@ -113,22 +113,20 @@ struct mtk_aes_dma { ...@@ -113,22 +113,20 @@ struct mtk_aes_dma {
u32 sg_len; u32 sg_len;
}; };
struct mtk_aes_ctx;
/** /**
* struct mtk_aes_rec - AES operation record * struct mtk_aes_rec - AES operation record
* @queue: crypto request queue * @queue: crypto request queue
* @req: pointer to ablkcipher request * @req: pointer to ablkcipher request
* @task: the tasklet is use in AES interrupt * @task: the tasklet is use in AES interrupt
* @ctx: pointer to current context
* @src: the structure that holds source sg list info * @src: the structure that holds source sg list info
* @dst: the structure that holds destination sg list info * @dst: the structure that holds destination sg list info
* @aligned_sg: the scatter list is use to alignment * @aligned_sg: the scatter list is use to alignment
* @real_dst: pointer to the destination sg list * @real_dst: pointer to the destination sg list
* @total: request buffer length * @total: request buffer length
* @buf: pointer to page buffer * @buf: pointer to page buffer
* @info: pointer to AES transform state and command token
* @ct_hdr: AES command token control field
* @ct_size: size of AES command token
* @ct_dma: DMA address of AES command token
* @tfm_dma: DMA address of AES transform state
* @id: record identification * @id: record identification
* @flags: it's describing AES operation state * @flags: it's describing AES operation state
* @lock: the ablkcipher queue lock * @lock: the ablkcipher queue lock
...@@ -139,6 +137,7 @@ struct mtk_aes_rec { ...@@ -139,6 +137,7 @@ struct mtk_aes_rec {
struct crypto_queue queue; struct crypto_queue queue;
struct ablkcipher_request *req; struct ablkcipher_request *req;
struct tasklet_struct task; struct tasklet_struct task;
struct mtk_aes_ctx *ctx;
struct mtk_aes_dma src; struct mtk_aes_dma src;
struct mtk_aes_dma dst; struct mtk_aes_dma dst;
...@@ -148,12 +147,6 @@ struct mtk_aes_rec { ...@@ -148,12 +147,6 @@ struct mtk_aes_rec {
size_t total; size_t total;
void *buf; void *buf;
void *info;
__le32 ct_hdr;
u32 ct_size;
dma_addr_t ct_dma;
dma_addr_t tfm_dma;
u8 id; u8 id;
unsigned long flags; unsigned long flags;
/* queue lock */ /* queue lock */
...@@ -165,11 +158,6 @@ struct mtk_aes_rec { ...@@ -165,11 +158,6 @@ struct mtk_aes_rec {
* @queue: crypto request queue * @queue: crypto request queue
* @req: pointer to ahash request * @req: pointer to ahash request
* @task: the tasklet is use in SHA interrupt * @task: the tasklet is use in SHA interrupt
* @info: pointer to SHA transform state and command token
* @ct_hdr: SHA command token control field
* @ct_size: size of SHA command token
* @ct_dma: DMA address of SHA command token
* @tfm_dma: DMA address of SHA transform state
* @id: record identification * @id: record identification
* @flags: it's describing SHA operation state * @flags: it's describing SHA operation state
* @lock: the ablkcipher queue lock * @lock: the ablkcipher queue lock
...@@ -181,12 +169,6 @@ struct mtk_sha_rec { ...@@ -181,12 +169,6 @@ struct mtk_sha_rec {
struct ahash_request *req; struct ahash_request *req;
struct tasklet_struct task; struct tasklet_struct task;
void *info;
__le32 ct_hdr;
u32 ct_size;
dma_addr_t ct_dma;
dma_addr_t tfm_dma;
u8 id; u8 id;
unsigned long flags; unsigned long flags;
/* queue lock */ /* queue lock */
......
...@@ -28,9 +28,9 @@ ...@@ -28,9 +28,9 @@
/* SHA command token */ /* SHA command token */
#define SHA_CT_SIZE 5 #define SHA_CT_SIZE 5
#define SHA_CT_CTRL_HDR cpu_to_le32(0x02220000) #define SHA_CT_CTRL_HDR cpu_to_le32(0x02220000)
#define SHA_COMMAND0 cpu_to_le32(0x03020000) #define SHA_CMD0 cpu_to_le32(0x03020000)
#define SHA_COMMAND1 cpu_to_le32(0x21060000) #define SHA_CMD1 cpu_to_le32(0x21060000)
#define SHA_COMMAND2 cpu_to_le32(0xe0e63802) #define SHA_CMD2 cpu_to_le32(0xe0e63802)
/* SHA transform information */ /* SHA transform information */
#define SHA_TFM_HASH cpu_to_le32(0x2 << 0) #define SHA_TFM_HASH cpu_to_le32(0x2 << 0)
...@@ -66,11 +66,8 @@ ...@@ -66,11 +66,8 @@
* and it contains the first two words of transform state. * and it contains the first two words of transform state.
*/ */
struct mtk_sha_ct { struct mtk_sha_ct {
__le32 tfm_ctrl0; __le32 ctrl[2];
__le32 tfm_ctrl1; __le32 cmd[3];
__le32 ct_ctrl0;
__le32 ct_ctrl1;
__le32 ct_ctrl2;
}; };
/** /**
...@@ -78,8 +75,7 @@ struct mtk_sha_ct { ...@@ -78,8 +75,7 @@ struct mtk_sha_ct {
* and store result digest that produced by engine. * and store result digest that produced by engine.
*/ */
struct mtk_sha_tfm { struct mtk_sha_tfm {
__le32 tfm_ctrl0; __le32 ctrl[2];
__le32 tfm_ctrl1;
__le32 digest[SIZE_IN_WORDS(SHA512_DIGEST_SIZE)]; __le32 digest[SIZE_IN_WORDS(SHA512_DIGEST_SIZE)];
}; };
...@@ -102,6 +98,11 @@ struct mtk_sha_reqctx { ...@@ -102,6 +98,11 @@ struct mtk_sha_reqctx {
size_t bufcnt; size_t bufcnt;
dma_addr_t dma_addr; dma_addr_t dma_addr;
__le32 ct_hdr;
u32 ct_size;
dma_addr_t ct_dma;
dma_addr_t tfm_dma;
/* Walk state */ /* Walk state */
struct scatterlist *sg; struct scatterlist *sg;
u32 offset; /* Offset in current sg */ u32 offset; /* Offset in current sg */
...@@ -270,34 +271,32 @@ static void mtk_sha_fill_padding(struct mtk_sha_reqctx *ctx, u32 len) ...@@ -270,34 +271,32 @@ static void mtk_sha_fill_padding(struct mtk_sha_reqctx *ctx, u32 len)
} }
/* Initialize basic transform information of SHA */ /* Initialize basic transform information of SHA */
static void mtk_sha_info_init(struct mtk_sha_rec *sha, static void mtk_sha_info_init(struct mtk_sha_reqctx *ctx)
struct mtk_sha_reqctx *ctx)
{ {
struct mtk_sha_info *info = sha->info; struct mtk_sha_ct *ct = &ctx->info.ct;
struct mtk_sha_ct *ct = &info->ct; struct mtk_sha_tfm *tfm = &ctx->info.tfm;
struct mtk_sha_tfm *tfm = &info->tfm;
sha->ct_hdr = SHA_CT_CTRL_HDR; ctx->ct_hdr = SHA_CT_CTRL_HDR;
sha->ct_size = SHA_CT_SIZE; ctx->ct_size = SHA_CT_SIZE;
tfm->tfm_ctrl0 = SHA_TFM_HASH | SHA_TFM_INNER_DIG | tfm->ctrl[0] = SHA_TFM_HASH | SHA_TFM_INNER_DIG |
SHA_TFM_SIZE(SIZE_IN_WORDS(ctx->ds)); SHA_TFM_SIZE(SIZE_IN_WORDS(ctx->ds));
switch (ctx->flags & SHA_FLAGS_ALGO_MSK) { switch (ctx->flags & SHA_FLAGS_ALGO_MSK) {
case SHA_FLAGS_SHA1: case SHA_FLAGS_SHA1:
tfm->tfm_ctrl0 |= SHA_TFM_SHA1; tfm->ctrl[0] |= SHA_TFM_SHA1;
break; break;
case SHA_FLAGS_SHA224: case SHA_FLAGS_SHA224:
tfm->tfm_ctrl0 |= SHA_TFM_SHA224; tfm->ctrl[0] |= SHA_TFM_SHA224;
break; break;
case SHA_FLAGS_SHA256: case SHA_FLAGS_SHA256:
tfm->tfm_ctrl0 |= SHA_TFM_SHA256; tfm->ctrl[0] |= SHA_TFM_SHA256;
break; break;
case SHA_FLAGS_SHA384: case SHA_FLAGS_SHA384:
tfm->tfm_ctrl0 |= SHA_TFM_SHA384; tfm->ctrl[0] |= SHA_TFM_SHA384;
break; break;
case SHA_FLAGS_SHA512: case SHA_FLAGS_SHA512:
tfm->tfm_ctrl0 |= SHA_TFM_SHA512; tfm->ctrl[0] |= SHA_TFM_SHA512;
break; break;
default: default:
...@@ -305,13 +304,13 @@ static void mtk_sha_info_init(struct mtk_sha_rec *sha, ...@@ -305,13 +304,13 @@ static void mtk_sha_info_init(struct mtk_sha_rec *sha,
return; return;
} }
tfm->tfm_ctrl1 = SHA_TFM_HASH_STORE; tfm->ctrl[1] = SHA_TFM_HASH_STORE;
ct->tfm_ctrl0 = tfm->tfm_ctrl0 | SHA_TFM_CONTINUE | SHA_TFM_START; ct->ctrl[0] = tfm->ctrl[0] | SHA_TFM_CONTINUE | SHA_TFM_START;
ct->tfm_ctrl1 = tfm->tfm_ctrl1; ct->ctrl[1] = tfm->ctrl[1];
ct->ct_ctrl0 = SHA_COMMAND0; ct->cmd[0] = SHA_CMD0;
ct->ct_ctrl1 = SHA_COMMAND1; ct->cmd[1] = SHA_CMD1;
ct->ct_ctrl2 = SHA_COMMAND2 | SHA_TFM_DIGEST(SIZE_IN_WORDS(ctx->ds)); ct->cmd[2] = SHA_CMD2 | SHA_TFM_DIGEST(SIZE_IN_WORDS(ctx->ds));
} }
/* /*
...@@ -323,28 +322,28 @@ static int mtk_sha_info_map(struct mtk_cryp *cryp, ...@@ -323,28 +322,28 @@ static int mtk_sha_info_map(struct mtk_cryp *cryp,
size_t len) size_t len)
{ {
struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
struct mtk_sha_info *info = sha->info; struct mtk_sha_info *info = &ctx->info;
struct mtk_sha_ct *ct = &info->ct; struct mtk_sha_ct *ct = &info->ct;
if (ctx->start) if (ctx->start)
ctx->start = false; ctx->start = false;
else else
ct->tfm_ctrl0 &= ~SHA_TFM_START; ct->ctrl[0] &= ~SHA_TFM_START;
sha->ct_hdr &= ~SHA_DATA_LEN_MSK; ctx->ct_hdr &= ~SHA_DATA_LEN_MSK;
sha->ct_hdr |= cpu_to_le32(len); ctx->ct_hdr |= cpu_to_le32(len);
ct->ct_ctrl0 &= ~SHA_DATA_LEN_MSK; ct->cmd[0] &= ~SHA_DATA_LEN_MSK;
ct->ct_ctrl0 |= cpu_to_le32(len); ct->cmd[0] |= cpu_to_le32(len);
ctx->digcnt += len; ctx->digcnt += len;
sha->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info), ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info),
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(cryp->dev, sha->ct_dma))) { if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma))) {
dev_err(cryp->dev, "dma %zu bytes error\n", sizeof(*info)); dev_err(cryp->dev, "dma %zu bytes error\n", sizeof(*info));
return -EINVAL; return -EINVAL;
} }
sha->tfm_dma = sha->ct_dma + sizeof(*ct); ctx->tfm_dma = ctx->ct_dma + sizeof(*ct);
return 0; return 0;
} }
...@@ -425,6 +424,7 @@ static int mtk_sha_init(struct ahash_request *req) ...@@ -425,6 +424,7 @@ static int mtk_sha_init(struct ahash_request *req)
static int mtk_sha_xmit(struct mtk_cryp *cryp, struct mtk_sha_rec *sha, static int mtk_sha_xmit(struct mtk_cryp *cryp, struct mtk_sha_rec *sha,
dma_addr_t addr, size_t len) dma_addr_t addr, size_t len)
{ {
struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
struct mtk_ring *ring = cryp->ring[sha->id]; struct mtk_ring *ring = cryp->ring[sha->id];
struct mtk_desc *cmd = ring->cmd_base + ring->pos; struct mtk_desc *cmd = ring->cmd_base + ring->pos;
struct mtk_desc *res = ring->res_base + ring->pos; struct mtk_desc *res = ring->res_base + ring->pos;
...@@ -444,12 +444,12 @@ static int mtk_sha_xmit(struct mtk_cryp *cryp, struct mtk_sha_rec *sha, ...@@ -444,12 +444,12 @@ static int mtk_sha_xmit(struct mtk_cryp *cryp, struct mtk_sha_rec *sha,
cmd->hdr = MTK_DESC_FIRST | cmd->hdr = MTK_DESC_FIRST |
MTK_DESC_LAST | MTK_DESC_LAST |
MTK_DESC_BUF_LEN(len) | MTK_DESC_BUF_LEN(len) |
MTK_DESC_CT_LEN(sha->ct_size); MTK_DESC_CT_LEN(ctx->ct_size);
cmd->buf = cpu_to_le32(addr); cmd->buf = cpu_to_le32(addr);
cmd->ct = cpu_to_le32(sha->ct_dma); cmd->ct = cpu_to_le32(ctx->ct_dma);
cmd->ct_hdr = sha->ct_hdr; cmd->ct_hdr = ctx->ct_hdr;
cmd->tfm = cpu_to_le32(sha->tfm_dma); cmd->tfm = cpu_to_le32(ctx->tfm_dma);
if (++ring->pos == MTK_DESC_NUM) if (++ring->pos == MTK_DESC_NUM)
ring->pos = 0; ring->pos = 0;
...@@ -486,11 +486,11 @@ static int mtk_sha_xmit2(struct mtk_cryp *cryp, ...@@ -486,11 +486,11 @@ static int mtk_sha_xmit2(struct mtk_cryp *cryp,
cmd->hdr = MTK_DESC_BUF_LEN(len1) | cmd->hdr = MTK_DESC_BUF_LEN(len1) |
MTK_DESC_FIRST | MTK_DESC_FIRST |
MTK_DESC_CT_LEN(sha->ct_size); MTK_DESC_CT_LEN(ctx->ct_size);
cmd->buf = cpu_to_le32(sg_dma_address(ctx->sg)); cmd->buf = cpu_to_le32(sg_dma_address(ctx->sg));
cmd->ct = cpu_to_le32(sha->ct_dma); cmd->ct = cpu_to_le32(ctx->ct_dma);
cmd->ct_hdr = sha->ct_hdr; cmd->ct_hdr = ctx->ct_hdr;
cmd->tfm = cpu_to_le32(sha->tfm_dma); cmd->tfm = cpu_to_le32(ctx->tfm_dma);
if (++ring->pos == MTK_DESC_NUM) if (++ring->pos == MTK_DESC_NUM)
ring->pos = 0; ring->pos = 0;
...@@ -732,9 +732,8 @@ static int mtk_sha_handle_queue(struct mtk_cryp *cryp, u8 id, ...@@ -732,9 +732,8 @@ static int mtk_sha_handle_queue(struct mtk_cryp *cryp, u8 id,
ctx = ahash_request_ctx(req); ctx = ahash_request_ctx(req);
sha->req = req; sha->req = req;
sha->info = &ctx->info;
mtk_sha_info_init(sha, ctx); mtk_sha_info_init(ctx);
if (ctx->op == SHA_OP_UPDATE) { if (ctx->op == SHA_OP_UPDATE) {
err = mtk_sha_update_start(cryp, sha); err = mtk_sha_update_start(cryp, sha);
...@@ -766,8 +765,8 @@ static void mtk_sha_unmap(struct mtk_cryp *cryp, struct mtk_sha_rec *sha) ...@@ -766,8 +765,8 @@ static void mtk_sha_unmap(struct mtk_cryp *cryp, struct mtk_sha_rec *sha)
{ {
struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
dma_unmap_single(cryp->dev, sha->ct_dma, dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info),
sizeof(struct mtk_sha_info), DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
if (ctx->flags & SHA_FLAGS_SG) { if (ctx->flags & SHA_FLAGS_SG) {
dma_unmap_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE); dma_unmap_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment