Commit 0e479300 authored by Yuan Kang's avatar Yuan Kang Committed by Herbert Xu

crypto: caam - shorter names

"aead_authenc" and "ipsec_esp" changed to "aead,"
except for function "ipsec_esp," which is changed
to "init_aead_job."

Variable name of aead_request structures changed
to "req" and name of aead_givcrypt_request structure
changed to "areq"
Signed-off-by: default avatarYuan Kang <Yuan.Kang@freescale.com>
Signed-off-by: default avatarKim Phillips <kim.phillips@freescale.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 1c2b4abb
...@@ -94,7 +94,7 @@ struct caam_ctx { ...@@ -94,7 +94,7 @@ struct caam_ctx {
unsigned int authsize; unsigned int authsize;
}; };
static int aead_authenc_setauthsize(struct crypto_aead *authenc, static int aead_setauthsize(struct crypto_aead *authenc,
unsigned int authsize) unsigned int authsize)
{ {
struct caam_ctx *ctx = crypto_aead_ctx(authenc); struct caam_ctx *ctx = crypto_aead_ctx(authenc);
...@@ -286,7 +286,7 @@ static int build_sh_desc_ipsec(struct caam_ctx *ctx) ...@@ -286,7 +286,7 @@ static int build_sh_desc_ipsec(struct caam_ctx *ctx)
return 0; return 0;
} }
static int aead_authenc_setkey(struct crypto_aead *aead, static int aead_setkey(struct crypto_aead *aead,
const u8 *key, unsigned int keylen) const u8 *key, unsigned int keylen)
{ {
/* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */ /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
...@@ -379,7 +379,7 @@ struct link_tbl_entry { ...@@ -379,7 +379,7 @@ struct link_tbl_entry {
}; };
/* /*
* ipsec_esp_edesc - s/w-extended ipsec_esp descriptor * aead_edesc - s/w-extended ipsec_esp descriptor
* @src_nents: number of segments in input scatterlist * @src_nents: number of segments in input scatterlist
* @dst_nents: number of segments in output scatterlist * @dst_nents: number of segments in output scatterlist
* @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
...@@ -388,7 +388,7 @@ struct link_tbl_entry { ...@@ -388,7 +388,7 @@ struct link_tbl_entry {
* @link_tbl_dma: bus physical mapped address of h/w link table * @link_tbl_dma: bus physical mapped address of h/w link table
* @hw_desc: the h/w job descriptor followed by any referenced link tables * @hw_desc: the h/w job descriptor followed by any referenced link tables
*/ */
struct ipsec_esp_edesc { struct aead_edesc {
int assoc_nents; int assoc_nents;
int src_nents; int src_nents;
int dst_nents; int dst_nents;
...@@ -398,19 +398,19 @@ struct ipsec_esp_edesc { ...@@ -398,19 +398,19 @@ struct ipsec_esp_edesc {
u32 hw_desc[0]; u32 hw_desc[0];
}; };
static void ipsec_esp_unmap(struct device *dev, static void aead_unmap(struct device *dev,
struct ipsec_esp_edesc *edesc, struct aead_edesc *edesc,
struct aead_request *areq) struct aead_request *req)
{ {
dma_unmap_sg(dev, areq->assoc, edesc->assoc_nents, DMA_TO_DEVICE); dma_unmap_sg(dev, req->assoc, edesc->assoc_nents, DMA_TO_DEVICE);
if (unlikely(areq->dst != areq->src)) { if (unlikely(req->dst != req->src)) {
dma_unmap_sg(dev, areq->src, edesc->src_nents, dma_unmap_sg(dev, req->src, edesc->src_nents,
DMA_TO_DEVICE); DMA_TO_DEVICE);
dma_unmap_sg(dev, areq->dst, edesc->dst_nents, dma_unmap_sg(dev, req->dst, edesc->dst_nents,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
} else { } else {
dma_unmap_sg(dev, areq->src, edesc->src_nents, dma_unmap_sg(dev, req->src, edesc->src_nents,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
} }
...@@ -423,20 +423,20 @@ static void ipsec_esp_unmap(struct device *dev, ...@@ -423,20 +423,20 @@ static void ipsec_esp_unmap(struct device *dev,
/* /*
* ipsec_esp descriptor callbacks * ipsec_esp descriptor callbacks
*/ */
static void ipsec_esp_encrypt_done(struct device *jrdev, u32 *desc, u32 err, static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
void *context) void *context)
{ {
struct aead_request *areq = context; struct aead_request *req = context;
struct ipsec_esp_edesc *edesc; struct aead_edesc *edesc;
#ifdef DEBUG #ifdef DEBUG
struct crypto_aead *aead = crypto_aead_reqtfm(areq); struct crypto_aead *aead = crypto_aead_reqtfm(req);
int ivsize = crypto_aead_ivsize(aead); int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead); struct caam_ctx *ctx = crypto_aead_ctx(aead);
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif #endif
edesc = (struct ipsec_esp_edesc *)((char *)desc - edesc = (struct aead_edesc *)((char *)desc -
offsetof(struct ipsec_esp_edesc, hw_desc)); offsetof(struct aead_edesc, hw_desc));
if (err) { if (err) {
char tmp[CAAM_ERROR_STR_MAX]; char tmp[CAAM_ERROR_STR_MAX];
...@@ -444,39 +444,39 @@ static void ipsec_esp_encrypt_done(struct device *jrdev, u32 *desc, u32 err, ...@@ -444,39 +444,39 @@ static void ipsec_esp_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
} }
ipsec_esp_unmap(jrdev, edesc, areq); aead_unmap(jrdev, edesc, req);
#ifdef DEBUG #ifdef DEBUG
print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->assoc), DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
areq->assoclen , 1); req->assoclen , 1);
print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src) - ivsize, DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
edesc->src_nents ? 100 : ivsize, 1); edesc->src_nents ? 100 : ivsize, 1);
print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src), DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
edesc->src_nents ? 100 : areq->cryptlen + edesc->src_nents ? 100 : req->cryptlen +
ctx->authsize + 4, 1); ctx->authsize + 4, 1);
#endif #endif
kfree(edesc); kfree(edesc);
aead_request_complete(areq, err); aead_request_complete(req, err);
} }
static void ipsec_esp_decrypt_done(struct device *jrdev, u32 *desc, u32 err, static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
void *context) void *context)
{ {
struct aead_request *areq = context; struct aead_request *req = context;
struct ipsec_esp_edesc *edesc; struct aead_edesc *edesc;
#ifdef DEBUG #ifdef DEBUG
struct crypto_aead *aead = crypto_aead_reqtfm(areq); struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead); struct caam_ctx *ctx = crypto_aead_ctx(aead);
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif #endif
edesc = (struct ipsec_esp_edesc *)((char *)desc - edesc = (struct aead_edesc *)((char *)desc -
offsetof(struct ipsec_esp_edesc, hw_desc)); offsetof(struct aead_edesc, hw_desc));
if (err) { if (err) {
char tmp[CAAM_ERROR_STR_MAX]; char tmp[CAAM_ERROR_STR_MAX];
...@@ -484,7 +484,7 @@ static void ipsec_esp_decrypt_done(struct device *jrdev, u32 *desc, u32 err, ...@@ -484,7 +484,7 @@ static void ipsec_esp_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
} }
ipsec_esp_unmap(jrdev, edesc, areq); aead_unmap(jrdev, edesc, req);
/* /*
* verify hw auth check passed else return -EBADMSG * verify hw auth check passed else return -EBADMSG
...@@ -495,12 +495,12 @@ static void ipsec_esp_decrypt_done(struct device *jrdev, u32 *desc, u32 err, ...@@ -495,12 +495,12 @@ static void ipsec_esp_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
#ifdef DEBUG #ifdef DEBUG
print_hex_dump(KERN_ERR, "iphdrout@"xstr(__LINE__)": ", print_hex_dump(KERN_ERR, "iphdrout@"xstr(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, DUMP_PREFIX_ADDRESS, 16, 4,
((char *)sg_virt(areq->assoc) - sizeof(struct iphdr)), ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
sizeof(struct iphdr) + areq->assoclen + sizeof(struct iphdr) + req->assoclen +
((areq->cryptlen > 1500) ? 1500 : areq->cryptlen) + ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
ctx->authsize + 36, 1); ctx->authsize + 36, 1);
if (!err && edesc->link_tbl_bytes) { if (!err && edesc->link_tbl_bytes) {
struct scatterlist *sg = sg_last(areq->src, edesc->src_nents); struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ", print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg), DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
sg->length + ctx->authsize + 16, 1); sg->length + ctx->authsize + 16, 1);
...@@ -508,7 +508,7 @@ static void ipsec_esp_decrypt_done(struct device *jrdev, u32 *desc, u32 err, ...@@ -508,7 +508,7 @@ static void ipsec_esp_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
#endif #endif
kfree(edesc); kfree(edesc);
aead_request_complete(areq, err); aead_request_complete(req, err);
} }
/* /*
...@@ -537,12 +537,12 @@ static void sg_to_link_tbl(struct scatterlist *sg, int sg_count, ...@@ -537,12 +537,12 @@ static void sg_to_link_tbl(struct scatterlist *sg, int sg_count,
/* /*
* fill in and submit ipsec_esp job descriptor * fill in and submit ipsec_esp job descriptor
*/ */
static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, static int init_aead_job(struct aead_edesc *edesc, struct aead_request *req,
u32 encrypt, u32 encrypt,
void (*callback) (struct device *dev, u32 *desc, void (*callback) (struct device *dev, u32 *desc,
u32 err, void *context)) u32 err, void *context))
{ {
struct crypto_aead *aead = crypto_aead_reqtfm(areq); struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead); struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev; struct device *jrdev = ctx->jrdev;
u32 *desc = edesc->hw_desc, options; u32 *desc = edesc->hw_desc, options;
...@@ -554,27 +554,27 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, ...@@ -554,27 +554,27 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
u32 *sh_desc = ctx->sh_desc; u32 *sh_desc = ctx->sh_desc;
debug("assoclen %d cryptlen %d authsize %d\n", debug("assoclen %d cryptlen %d authsize %d\n",
areq->assoclen, areq->cryptlen, authsize); req->assoclen, req->cryptlen, authsize);
print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->assoc), DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
areq->assoclen , 1); req->assoclen , 1);
print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src) - ivsize, DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
edesc->src_nents ? 100 : ivsize, 1); edesc->src_nents ? 100 : ivsize, 1);
print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ", print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src), DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
edesc->src_nents ? 100 : areq->cryptlen + authsize, 1); edesc->src_nents ? 100 : req->cryptlen + authsize, 1);
print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ", print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sh_desc, DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
desc_bytes(sh_desc), 1); desc_bytes(sh_desc), 1);
#endif #endif
assoc_sg_count = dma_map_sg(jrdev, areq->assoc, edesc->assoc_nents ?: 1, assoc_sg_count = dma_map_sg(jrdev, req->assoc, edesc->assoc_nents ?: 1,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (areq->src == areq->dst) if (req->src == req->dst)
sg_count = dma_map_sg(jrdev, areq->src, edesc->src_nents ? : 1, sg_count = dma_map_sg(jrdev, req->src, edesc->src_nents ? : 1,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
else else
sg_count = dma_map_sg(jrdev, areq->src, edesc->src_nents ? : 1, sg_count = dma_map_sg(jrdev, req->src, edesc->src_nents ? : 1,
DMA_TO_DEVICE); DMA_TO_DEVICE);
/* start auth operation */ /* start auth operation */
...@@ -584,14 +584,14 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, ...@@ -584,14 +584,14 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
/* Load FIFO with data for Class 2 CHA */ /* Load FIFO with data for Class 2 CHA */
options = FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG; options = FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG;
if (!edesc->assoc_nents) { if (!edesc->assoc_nents) {
ptr = sg_dma_address(areq->assoc); ptr = sg_dma_address(req->assoc);
} else { } else {
sg_to_link_tbl(areq->assoc, edesc->assoc_nents, sg_to_link_tbl(req->assoc, edesc->assoc_nents,
edesc->link_tbl, 0); edesc->link_tbl, 0);
ptr = edesc->link_tbl_dma; ptr = edesc->link_tbl_dma;
options |= LDST_SGF; options |= LDST_SGF;
} }
append_fifo_load(desc, ptr, areq->assoclen, options); append_fifo_load(desc, ptr, req->assoclen, options);
/* copy iv from cipher/class1 input context to class2 infifo */ /* copy iv from cipher/class1 input context to class2 infifo */
append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize); append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
...@@ -621,31 +621,31 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, ...@@ -621,31 +621,31 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
/* load payload & instruct to class2 to snoop class 1 if encrypting */ /* load payload & instruct to class2 to snoop class 1 if encrypting */
options = 0; options = 0;
if (!edesc->src_nents) { if (!edesc->src_nents) {
src_dma = sg_dma_address(areq->src); src_dma = sg_dma_address(req->src);
} else { } else {
sg_to_link_tbl(areq->src, edesc->src_nents, edesc->link_tbl + sg_to_link_tbl(req->src, edesc->src_nents, edesc->link_tbl +
edesc->assoc_nents, 0); edesc->assoc_nents, 0);
src_dma = edesc->link_tbl_dma + edesc->assoc_nents * src_dma = edesc->link_tbl_dma + edesc->assoc_nents *
sizeof(struct link_tbl_entry); sizeof(struct link_tbl_entry);
options |= LDST_SGF; options |= LDST_SGF;
} }
append_seq_in_ptr(desc, src_dma, areq->cryptlen + authsize, options); append_seq_in_ptr(desc, src_dma, req->cryptlen + authsize, options);
append_seq_fifo_load(desc, areq->cryptlen, FIFOLD_CLASS_BOTH | append_seq_fifo_load(desc, req->cryptlen, FIFOLD_CLASS_BOTH |
FIFOLD_TYPE_LASTBOTH | FIFOLD_TYPE_LASTBOTH |
(encrypt ? FIFOLD_TYPE_MSG1OUT2 (encrypt ? FIFOLD_TYPE_MSG1OUT2
: FIFOLD_TYPE_MSG)); : FIFOLD_TYPE_MSG));
/* specify destination */ /* specify destination */
if (areq->src == areq->dst) { if (req->src == req->dst) {
dst_dma = src_dma; dst_dma = src_dma;
} else { } else {
sg_count = dma_map_sg(jrdev, areq->dst, edesc->dst_nents ? : 1, sg_count = dma_map_sg(jrdev, req->dst, edesc->dst_nents ? : 1,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (!edesc->dst_nents) { if (!edesc->dst_nents) {
dst_dma = sg_dma_address(areq->dst); dst_dma = sg_dma_address(req->dst);
options = 0; options = 0;
} else { } else {
sg_to_link_tbl(areq->dst, edesc->dst_nents, sg_to_link_tbl(req->dst, edesc->dst_nents,
edesc->link_tbl + edesc->assoc_nents + edesc->link_tbl + edesc->assoc_nents +
edesc->src_nents, 0); edesc->src_nents, 0);
dst_dma = edesc->link_tbl_dma + (edesc->assoc_nents + dst_dma = edesc->link_tbl_dma + (edesc->assoc_nents +
...@@ -654,8 +654,8 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, ...@@ -654,8 +654,8 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
options = LDST_SGF; options = LDST_SGF;
} }
} }
append_seq_out_ptr(desc, dst_dma, areq->cryptlen + authsize, options); append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize, options);
append_seq_fifo_store(desc, areq->cryptlen, FIFOST_TYPE_MESSAGE_DATA); append_seq_fifo_store(desc, req->cryptlen, FIFOST_TYPE_MESSAGE_DATA);
/* ICV */ /* ICV */
if (encrypt) if (encrypt)
...@@ -674,11 +674,11 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, ...@@ -674,11 +674,11 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
edesc->link_tbl_bytes, 1); edesc->link_tbl_bytes, 1);
#endif #endif
ret = caam_jr_enqueue(jrdev, desc, callback, areq); ret = caam_jr_enqueue(jrdev, desc, callback, req);
if (!ret) if (!ret)
ret = -EINPROGRESS; ret = -EINPROGRESS;
else { else {
ipsec_esp_unmap(jrdev, edesc, areq); aead_unmap(jrdev, edesc, req);
kfree(edesc); kfree(edesc);
} }
...@@ -708,30 +708,30 @@ static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained) ...@@ -708,30 +708,30 @@ static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained)
/* /*
* allocate and map the ipsec_esp extended descriptor * allocate and map the ipsec_esp extended descriptor
*/ */
static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq, static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
int desc_bytes) int desc_bytes)
{ {
struct crypto_aead *aead = crypto_aead_reqtfm(areq); struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead); struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev; struct device *jrdev = ctx->jrdev;
gfp_t flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC; GFP_ATOMIC;
int assoc_nents, src_nents, dst_nents = 0, chained, link_tbl_bytes; int assoc_nents, src_nents, dst_nents = 0, chained, link_tbl_bytes;
struct ipsec_esp_edesc *edesc; struct aead_edesc *edesc;
assoc_nents = sg_count(areq->assoc, areq->assoclen, &chained); assoc_nents = sg_count(req->assoc, req->assoclen, &chained);
BUG_ON(chained); BUG_ON(chained);
if (likely(assoc_nents == 1)) if (likely(assoc_nents == 1))
assoc_nents = 0; assoc_nents = 0;
src_nents = sg_count(areq->src, areq->cryptlen + ctx->authsize, src_nents = sg_count(req->src, req->cryptlen + ctx->authsize,
&chained); &chained);
BUG_ON(chained); BUG_ON(chained);
if (src_nents == 1) if (src_nents == 1)
src_nents = 0; src_nents = 0;
if (unlikely(areq->dst != areq->src)) { if (unlikely(req->dst != req->src)) {
dst_nents = sg_count(areq->dst, areq->cryptlen + ctx->authsize, dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
&chained); &chained);
BUG_ON(chained); BUG_ON(chained);
if (dst_nents == 1) if (dst_nents == 1)
...@@ -743,7 +743,7 @@ static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq, ...@@ -743,7 +743,7 @@ static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq,
debug("link_tbl_bytes %d\n", link_tbl_bytes); debug("link_tbl_bytes %d\n", link_tbl_bytes);
/* allocate space for base edesc and hw desc commands, link tables */ /* allocate space for base edesc and hw desc commands, link tables */
edesc = kmalloc(sizeof(struct ipsec_esp_edesc) + desc_bytes + edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
link_tbl_bytes, GFP_DMA | flags); link_tbl_bytes, GFP_DMA | flags);
if (!edesc) { if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n"); dev_err(jrdev, "could not allocate extended descriptor\n");
...@@ -753,7 +753,7 @@ static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq, ...@@ -753,7 +753,7 @@ static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq,
edesc->assoc_nents = assoc_nents; edesc->assoc_nents = assoc_nents;
edesc->src_nents = src_nents; edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents; edesc->dst_nents = dst_nents;
edesc->link_tbl = (void *)edesc + sizeof(struct ipsec_esp_edesc) + edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) +
desc_bytes; desc_bytes;
edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl, edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl,
link_tbl_bytes, DMA_TO_DEVICE); link_tbl_bytes, DMA_TO_DEVICE);
...@@ -762,10 +762,10 @@ static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq, ...@@ -762,10 +762,10 @@ static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq,
return edesc; return edesc;
} }
static int aead_authenc_encrypt(struct aead_request *areq) static int aead_encrypt(struct aead_request *req)
{ {
struct ipsec_esp_edesc *edesc; struct aead_edesc *edesc;
struct crypto_aead *aead = crypto_aead_reqtfm(areq); struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead); struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev; struct device *jrdev = ctx->jrdev;
int ivsize = crypto_aead_ivsize(aead); int ivsize = crypto_aead_ivsize(aead);
...@@ -773,7 +773,7 @@ static int aead_authenc_encrypt(struct aead_request *areq) ...@@ -773,7 +773,7 @@ static int aead_authenc_encrypt(struct aead_request *areq)
dma_addr_t iv_dma; dma_addr_t iv_dma;
/* allocate extended descriptor */ /* allocate extended descriptor */
edesc = ipsec_esp_edesc_alloc(areq, DESC_AEAD_ENCRYPT_TEXT_LEN * edesc = aead_edesc_alloc(req, DESC_AEAD_ENCRYPT_TEXT_LEN *
CAAM_CMD_SZ); CAAM_CMD_SZ);
if (IS_ERR(edesc)) if (IS_ERR(edesc))
return PTR_ERR(edesc); return PTR_ERR(edesc);
...@@ -784,29 +784,29 @@ static int aead_authenc_encrypt(struct aead_request *areq) ...@@ -784,29 +784,29 @@ static int aead_authenc_encrypt(struct aead_request *areq)
init_job_desc_shared(desc, ctx->shared_desc_phys, init_job_desc_shared(desc, ctx->shared_desc_phys,
desc_len(ctx->sh_desc), HDR_SHARE_DEFER); desc_len(ctx->sh_desc), HDR_SHARE_DEFER);
iv_dma = dma_map_single(jrdev, areq->iv, ivsize, DMA_TO_DEVICE); iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
/* check dma error */ /* check dma error */
append_load(desc, iv_dma, ivsize, append_load(desc, iv_dma, ivsize,
LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT); LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT);
return ipsec_esp(edesc, areq, OP_ALG_ENCRYPT, ipsec_esp_encrypt_done); return init_aead_job(edesc, req, OP_ALG_ENCRYPT, aead_encrypt_done);
} }
static int aead_authenc_decrypt(struct aead_request *req) static int aead_decrypt(struct aead_request *req)
{ {
struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_aead *aead = crypto_aead_reqtfm(req);
int ivsize = crypto_aead_ivsize(aead); int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead); struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev; struct device *jrdev = ctx->jrdev;
struct ipsec_esp_edesc *edesc; struct aead_edesc *edesc;
u32 *desc; u32 *desc;
dma_addr_t iv_dma; dma_addr_t iv_dma;
req->cryptlen -= ctx->authsize; req->cryptlen -= ctx->authsize;
/* allocate extended descriptor */ /* allocate extended descriptor */
edesc = ipsec_esp_edesc_alloc(req, DESC_AEAD_DECRYPT_TEXT_LEN * edesc = aead_edesc_alloc(req, DESC_AEAD_DECRYPT_TEXT_LEN *
CAAM_CMD_SZ); CAAM_CMD_SZ);
if (IS_ERR(edesc)) if (IS_ERR(edesc))
return PTR_ERR(edesc); return PTR_ERR(edesc);
...@@ -823,26 +823,26 @@ static int aead_authenc_decrypt(struct aead_request *req) ...@@ -823,26 +823,26 @@ static int aead_authenc_decrypt(struct aead_request *req)
append_load(desc, iv_dma, ivsize, append_load(desc, iv_dma, ivsize,
LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT); LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT);
return ipsec_esp(edesc, req, !OP_ALG_ENCRYPT, ipsec_esp_decrypt_done); return init_aead_job(edesc, req, !OP_ALG_ENCRYPT, aead_decrypt_done);
} }
static int aead_authenc_givencrypt(struct aead_givcrypt_request *req) static int aead_givencrypt(struct aead_givcrypt_request *areq)
{ {
struct aead_request *areq = &req->areq; struct aead_request *req = &areq->areq;
struct ipsec_esp_edesc *edesc; struct aead_edesc *edesc;
struct crypto_aead *aead = crypto_aead_reqtfm(areq); struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead); struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev; struct device *jrdev = ctx->jrdev;
int ivsize = crypto_aead_ivsize(aead); int ivsize = crypto_aead_ivsize(aead);
dma_addr_t iv_dma; dma_addr_t iv_dma;
u32 *desc; u32 *desc;
iv_dma = dma_map_single(jrdev, req->giv, ivsize, DMA_FROM_DEVICE); iv_dma = dma_map_single(jrdev, areq->giv, ivsize, DMA_FROM_DEVICE);
debug("%s: giv %p\n", __func__, req->giv); debug("%s: giv %p\n", __func__, areq->giv);
/* allocate extended descriptor */ /* allocate extended descriptor */
edesc = ipsec_esp_edesc_alloc(areq, DESC_AEAD_GIVENCRYPT_TEXT_LEN * edesc = aead_edesc_alloc(req, DESC_AEAD_GIVENCRYPT_TEXT_LEN *
CAAM_CMD_SZ); CAAM_CMD_SZ);
if (IS_ERR(edesc)) if (IS_ERR(edesc))
return PTR_ERR(edesc); return PTR_ERR(edesc);
...@@ -881,7 +881,7 @@ static int aead_authenc_givencrypt(struct aead_givcrypt_request *req) ...@@ -881,7 +881,7 @@ static int aead_authenc_givencrypt(struct aead_givcrypt_request *req)
append_fifo_store(desc, iv_dma, ivsize, FIFOST_TYPE_MESSAGE_DATA); append_fifo_store(desc, iv_dma, ivsize, FIFOST_TYPE_MESSAGE_DATA);
return ipsec_esp(edesc, areq, OP_ALG_ENCRYPT, ipsec_esp_encrypt_done); return init_aead_job(edesc, req, OP_ALG_ENCRYPT, aead_encrypt_done);
} }
struct caam_alg_template { struct caam_alg_template {
...@@ -901,11 +901,11 @@ static struct caam_alg_template driver_algs[] = { ...@@ -901,11 +901,11 @@ static struct caam_alg_template driver_algs[] = {
.driver_name = "authenc-hmac-sha1-cbc-aes-caam", .driver_name = "authenc-hmac-sha1-cbc-aes-caam",
.blocksize = AES_BLOCK_SIZE, .blocksize = AES_BLOCK_SIZE,
.aead = { .aead = {
.setkey = aead_authenc_setkey, .setkey = aead_setkey,
.setauthsize = aead_authenc_setauthsize, .setauthsize = aead_setauthsize,
.encrypt = aead_authenc_encrypt, .encrypt = aead_encrypt,
.decrypt = aead_authenc_decrypt, .decrypt = aead_decrypt,
.givencrypt = aead_authenc_givencrypt, .givencrypt = aead_givencrypt,
.geniv = "<built-in>", .geniv = "<built-in>",
.ivsize = AES_BLOCK_SIZE, .ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE, .maxauthsize = SHA1_DIGEST_SIZE,
...@@ -919,11 +919,11 @@ static struct caam_alg_template driver_algs[] = { ...@@ -919,11 +919,11 @@ static struct caam_alg_template driver_algs[] = {
.driver_name = "authenc-hmac-sha256-cbc-aes-caam", .driver_name = "authenc-hmac-sha256-cbc-aes-caam",
.blocksize = AES_BLOCK_SIZE, .blocksize = AES_BLOCK_SIZE,
.aead = { .aead = {
.setkey = aead_authenc_setkey, .setkey = aead_setkey,
.setauthsize = aead_authenc_setauthsize, .setauthsize = aead_setauthsize,
.encrypt = aead_authenc_encrypt, .encrypt = aead_encrypt,
.decrypt = aead_authenc_decrypt, .decrypt = aead_decrypt,
.givencrypt = aead_authenc_givencrypt, .givencrypt = aead_givencrypt,
.geniv = "<built-in>", .geniv = "<built-in>",
.ivsize = AES_BLOCK_SIZE, .ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE, .maxauthsize = SHA256_DIGEST_SIZE,
...@@ -938,11 +938,11 @@ static struct caam_alg_template driver_algs[] = { ...@@ -938,11 +938,11 @@ static struct caam_alg_template driver_algs[] = {
.driver_name = "authenc-hmac-sha512-cbc-aes-caam", .driver_name = "authenc-hmac-sha512-cbc-aes-caam",
.blocksize = AES_BLOCK_SIZE, .blocksize = AES_BLOCK_SIZE,
.aead = { .aead = {
.setkey = aead_authenc_setkey, .setkey = aead_setkey,
.setauthsize = aead_authenc_setauthsize, .setauthsize = aead_setauthsize,
.encrypt = aead_authenc_encrypt, .encrypt = aead_encrypt,
.decrypt = aead_authenc_decrypt, .decrypt = aead_decrypt,
.givencrypt = aead_authenc_givencrypt, .givencrypt = aead_givencrypt,
.geniv = "<built-in>", .geniv = "<built-in>",
.ivsize = AES_BLOCK_SIZE, .ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE, .maxauthsize = SHA512_DIGEST_SIZE,
...@@ -957,11 +957,11 @@ static struct caam_alg_template driver_algs[] = { ...@@ -957,11 +957,11 @@ static struct caam_alg_template driver_algs[] = {
.driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam", .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
.blocksize = DES3_EDE_BLOCK_SIZE, .blocksize = DES3_EDE_BLOCK_SIZE,
.aead = { .aead = {
.setkey = aead_authenc_setkey, .setkey = aead_setkey,
.setauthsize = aead_authenc_setauthsize, .setauthsize = aead_setauthsize,
.encrypt = aead_authenc_encrypt, .encrypt = aead_encrypt,
.decrypt = aead_authenc_decrypt, .decrypt = aead_decrypt,
.givencrypt = aead_authenc_givencrypt, .givencrypt = aead_givencrypt,
.geniv = "<built-in>", .geniv = "<built-in>",
.ivsize = DES3_EDE_BLOCK_SIZE, .ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE, .maxauthsize = SHA1_DIGEST_SIZE,
...@@ -975,11 +975,11 @@ static struct caam_alg_template driver_algs[] = { ...@@ -975,11 +975,11 @@ static struct caam_alg_template driver_algs[] = {
.driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam", .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
.blocksize = DES3_EDE_BLOCK_SIZE, .blocksize = DES3_EDE_BLOCK_SIZE,
.aead = { .aead = {
.setkey = aead_authenc_setkey, .setkey = aead_setkey,
.setauthsize = aead_authenc_setauthsize, .setauthsize = aead_setauthsize,
.encrypt = aead_authenc_encrypt, .encrypt = aead_encrypt,
.decrypt = aead_authenc_decrypt, .decrypt = aead_decrypt,
.givencrypt = aead_authenc_givencrypt, .givencrypt = aead_givencrypt,
.geniv = "<built-in>", .geniv = "<built-in>",
.ivsize = DES3_EDE_BLOCK_SIZE, .ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE, .maxauthsize = SHA256_DIGEST_SIZE,
...@@ -994,11 +994,11 @@ static struct caam_alg_template driver_algs[] = { ...@@ -994,11 +994,11 @@ static struct caam_alg_template driver_algs[] = {
.driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam", .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
.blocksize = DES3_EDE_BLOCK_SIZE, .blocksize = DES3_EDE_BLOCK_SIZE,
.aead = { .aead = {
.setkey = aead_authenc_setkey, .setkey = aead_setkey,
.setauthsize = aead_authenc_setauthsize, .setauthsize = aead_setauthsize,
.encrypt = aead_authenc_encrypt, .encrypt = aead_encrypt,
.decrypt = aead_authenc_decrypt, .decrypt = aead_decrypt,
.givencrypt = aead_authenc_givencrypt, .givencrypt = aead_givencrypt,
.geniv = "<built-in>", .geniv = "<built-in>",
.ivsize = DES3_EDE_BLOCK_SIZE, .ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE, .maxauthsize = SHA512_DIGEST_SIZE,
...@@ -1013,11 +1013,11 @@ static struct caam_alg_template driver_algs[] = { ...@@ -1013,11 +1013,11 @@ static struct caam_alg_template driver_algs[] = {
.driver_name = "authenc-hmac-sha1-cbc-des-caam", .driver_name = "authenc-hmac-sha1-cbc-des-caam",
.blocksize = DES_BLOCK_SIZE, .blocksize = DES_BLOCK_SIZE,
.aead = { .aead = {
.setkey = aead_authenc_setkey, .setkey = aead_setkey,
.setauthsize = aead_authenc_setauthsize, .setauthsize = aead_setauthsize,
.encrypt = aead_authenc_encrypt, .encrypt = aead_encrypt,
.decrypt = aead_authenc_decrypt, .decrypt = aead_decrypt,
.givencrypt = aead_authenc_givencrypt, .givencrypt = aead_givencrypt,
.geniv = "<built-in>", .geniv = "<built-in>",
.ivsize = DES_BLOCK_SIZE, .ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE, .maxauthsize = SHA1_DIGEST_SIZE,
...@@ -1031,11 +1031,11 @@ static struct caam_alg_template driver_algs[] = { ...@@ -1031,11 +1031,11 @@ static struct caam_alg_template driver_algs[] = {
.driver_name = "authenc-hmac-sha256-cbc-des-caam", .driver_name = "authenc-hmac-sha256-cbc-des-caam",
.blocksize = DES_BLOCK_SIZE, .blocksize = DES_BLOCK_SIZE,
.aead = { .aead = {
.setkey = aead_authenc_setkey, .setkey = aead_setkey,
.setauthsize = aead_authenc_setauthsize, .setauthsize = aead_setauthsize,
.encrypt = aead_authenc_encrypt, .encrypt = aead_encrypt,
.decrypt = aead_authenc_decrypt, .decrypt = aead_decrypt,
.givencrypt = aead_authenc_givencrypt, .givencrypt = aead_givencrypt,
.geniv = "<built-in>", .geniv = "<built-in>",
.ivsize = DES_BLOCK_SIZE, .ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE, .maxauthsize = SHA256_DIGEST_SIZE,
...@@ -1050,11 +1050,11 @@ static struct caam_alg_template driver_algs[] = { ...@@ -1050,11 +1050,11 @@ static struct caam_alg_template driver_algs[] = {
.driver_name = "authenc-hmac-sha512-cbc-des-caam", .driver_name = "authenc-hmac-sha512-cbc-des-caam",
.blocksize = DES_BLOCK_SIZE, .blocksize = DES_BLOCK_SIZE,
.aead = { .aead = {
.setkey = aead_authenc_setkey, .setkey = aead_setkey,
.setauthsize = aead_authenc_setauthsize, .setauthsize = aead_setauthsize,
.encrypt = aead_authenc_encrypt, .encrypt = aead_encrypt,
.decrypt = aead_authenc_decrypt, .decrypt = aead_decrypt,
.givencrypt = aead_authenc_givencrypt, .givencrypt = aead_givencrypt,
.geniv = "<built-in>", .geniv = "<built-in>",
.ivsize = DES_BLOCK_SIZE, .ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE, .maxauthsize = SHA512_DIGEST_SIZE,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment