Commit 2956f36c authored by Harsh Jain's avatar Harsh Jain Committed by Herbert Xu

crypto: chelsio - Remove allocation of sg list to implement 2K limit of dsgl header

Update DMA address index instead of allocating new sg list to impose  2k size limit for each entry.
Signed-off-by: default avatarHarsh Jain <harsh@chelsio.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent de1a00ac
...@@ -118,6 +118,21 @@ static inline unsigned int sgl_len(unsigned int n) ...@@ -118,6 +118,21 @@ static inline unsigned int sgl_len(unsigned int n)
return (3 * n) / 2 + (n & 1) + 2; return (3 * n) / 2 + (n & 1) + 2;
} }
static int dstsg_2k(struct scatterlist *sgl, unsigned int reqlen)
{
int nents = 0;
unsigned int less;
while (sgl && reqlen) {
less = min(reqlen, sgl->length);
nents += DIV_ROUND_UP(less, CHCR_SG_SIZE);
reqlen -= less;
sgl = sg_next(sgl);
}
return nents;
}
static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err) static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
{ {
u8 temp[SHA512_DIGEST_SIZE]; u8 temp[SHA512_DIGEST_SIZE];
...@@ -167,8 +182,6 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input, ...@@ -167,8 +182,6 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
kfree_skb(ctx_req.ctx.reqctx->skb); kfree_skb(ctx_req.ctx.reqctx->skb);
ctx_req.ctx.reqctx->skb = NULL; ctx_req.ctx.reqctx->skb = NULL;
} }
free_new_sg(ctx_req.ctx.reqctx->newdstsg);
ctx_req.ctx.reqctx->newdstsg = NULL;
if (ctx_req.ctx.reqctx->verify == VERIFY_SW) { if (ctx_req.ctx.reqctx->verify == VERIFY_SW) {
chcr_verify_tag(ctx_req.req.aead_req, input, chcr_verify_tag(ctx_req.req.aead_req, input,
&err); &err);
...@@ -389,31 +402,41 @@ static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl, ...@@ -389,31 +402,41 @@ static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
{ {
struct phys_sge_pairs *to; struct phys_sge_pairs *to;
unsigned int len = 0, left_size = sg_param->obsize; unsigned int len = 0, left_size = sg_param->obsize;
unsigned int nents = sg_param->nents, i, j = 0; unsigned int j = 0;
int offset, ent_len;
phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL) phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
| CPL_RX_PHYS_DSGL_ISRDMA_V(0)); | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
to = (struct phys_sge_pairs *)((unsigned char *)phys_cpl +
sizeof(struct cpl_rx_phys_dsgl));
while (left_size && sg) {
len = min_t(u32, left_size, sg_dma_len(sg));
offset = 0;
while (len) {
ent_len = min_t(u32, len, CHCR_SG_SIZE);
to->len[j % 8] = htons(ent_len);
to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
offset);
offset += ent_len;
len -= ent_len;
j++;
if ((j % 8) == 0)
to++;
}
left_size -= min(left_size, sg_dma_len(sg));
sg = sg_next(sg);
}
phys_cpl->pcirlxorder_to_noofsgentr = phys_cpl->pcirlxorder_to_noofsgentr =
htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) | htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) | CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) | CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
CPL_RX_PHYS_DSGL_PCITPHNT_V(0) | CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
CPL_RX_PHYS_DSGL_DCAID_V(0) | CPL_RX_PHYS_DSGL_DCAID_V(0) |
CPL_RX_PHYS_DSGL_NOOFSGENTR_V(nents)); CPL_RX_PHYS_DSGL_NOOFSGENTR_V(j));
phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR; phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
phys_cpl->rss_hdr_int.qid = htons(sg_param->qid); phys_cpl->rss_hdr_int.qid = htons(sg_param->qid);
phys_cpl->rss_hdr_int.hash_val = 0; phys_cpl->rss_hdr_int.hash_val = 0;
to = (struct phys_sge_pairs *)((unsigned char *)phys_cpl +
sizeof(struct cpl_rx_phys_dsgl));
for (i = 0; nents && left_size; to++) {
for (j = 0; j < 8 && nents && left_size; j++, nents--) {
len = min(left_size, sg_dma_len(sg));
to->len[j] = htons(len);
to->addr[j] = cpu_to_be64(sg_dma_address(sg));
left_size -= len;
sg = sg_next(sg);
}
}
} }
static inline int map_writesg_phys_cpl(struct device *dev, static inline int map_writesg_phys_cpl(struct device *dev,
...@@ -524,31 +547,33 @@ static int generate_copy_rrkey(struct ablk_ctx *ablkctx, ...@@ -524,31 +547,33 @@ static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
static int chcr_sg_ent_in_wr(struct scatterlist *src, static int chcr_sg_ent_in_wr(struct scatterlist *src,
struct scatterlist *dst, struct scatterlist *dst,
unsigned int minsg, unsigned int minsg,
unsigned int space, unsigned int space)
short int *sent,
short int *dent)
{ {
int srclen = 0, dstlen = 0; int srclen = 0, dstlen = 0;
int srcsg = minsg, dstsg = 0; int srcsg = minsg, dstsg = 0;
int offset = 0, less;
*sent = 0;
*dent = 0;
while (src && dst && ((srcsg + 1) <= MAX_SKB_FRAGS) && while (src && dst && ((srcsg + 1) <= MAX_SKB_FRAGS) &&
space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) { space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
srclen += src->length; srclen += src->length;
srcsg++; srcsg++;
offset = 0;
while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) && while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) { space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
if (srclen <= dstlen) if (srclen <= dstlen)
break; break;
dstlen += dst->length; less = min_t(unsigned int, dst->length - offset,
CHCR_SG_SIZE);
dstlen += less;
offset += less;
if (offset == dst->length) {
dst = sg_next(dst); dst = sg_next(dst);
offset = 0;
}
dstsg++; dstsg++;
} }
src = sg_next(src); src = sg_next(src);
} }
*sent = srcsg - minsg;
*dent = dstsg;
return min(srclen, dstlen); return min(srclen, dstlen);
} }
...@@ -632,13 +657,15 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam) ...@@ -632,13 +657,15 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
struct phys_sge_parm sg_param; struct phys_sge_parm sg_param;
unsigned int frags = 0, transhdr_len, phys_dsgl; unsigned int frags = 0, transhdr_len, phys_dsgl;
int error; int error;
int nents;
unsigned int ivsize = AES_BLOCK_SIZE, kctx_len; unsigned int ivsize = AES_BLOCK_SIZE, kctx_len;
gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
GFP_KERNEL : GFP_ATOMIC; GFP_KERNEL : GFP_ATOMIC;
struct adapter *adap = padap(ctx->dev); struct adapter *adap = padap(ctx->dev);
phys_dsgl = get_space_for_phys_dsgl(reqctx->dst_nents); reqctx->dst_nents = sg_nents_for_len(reqctx->dst, wrparam->bytes);
nents = dstsg_2k(reqctx->dst, wrparam->bytes);
phys_dsgl = get_space_for_phys_dsgl(nents);
kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16); kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl); transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags); skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
...@@ -1021,8 +1048,7 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req, ...@@ -1021,8 +1048,7 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
goto complete; goto complete;
} }
bytes = chcr_sg_ent_in_wr(wrparam.srcsg, reqctx->dst, 1, bytes = chcr_sg_ent_in_wr(wrparam.srcsg, reqctx->dst, 1,
SPACE_LEFT(ablkctx->enckey_len), SPACE_LEFT(ablkctx->enckey_len));
&wrparam.snent, &reqctx->dst_nents);
if ((bytes + reqctx->processed) >= req->nbytes) if ((bytes + reqctx->processed) >= req->nbytes)
bytes = req->nbytes - reqctx->processed; bytes = req->nbytes - reqctx->processed;
else else
...@@ -1061,8 +1087,6 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req, ...@@ -1061,8 +1087,6 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
chcr_send_wr(skb); chcr_send_wr(skb);
return 0; return 0;
complete: complete:
free_new_sg(reqctx->newdstsg);
reqctx->newdstsg = NULL;
req->base.complete(&req->base, err); req->base.complete(&req->base, err);
return err; return err;
} }
...@@ -1078,9 +1102,8 @@ static int process_cipher(struct ablkcipher_request *req, ...@@ -1078,9 +1102,8 @@ static int process_cipher(struct ablkcipher_request *req,
struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx); struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
struct cipher_wr_param wrparam; struct cipher_wr_param wrparam;
int bytes, nents, err = -EINVAL; int bytes, err = -EINVAL;
reqctx->newdstsg = NULL;
reqctx->processed = 0; reqctx->processed = 0;
if (!req->info) if (!req->info)
goto error; goto error;
...@@ -1092,18 +1115,9 @@ static int process_cipher(struct ablkcipher_request *req, ...@@ -1092,18 +1115,9 @@ static int process_cipher(struct ablkcipher_request *req,
goto error; goto error;
} }
wrparam.srcsg = req->src; wrparam.srcsg = req->src;
if (is_newsg(req->dst, &nents)) {
reqctx->newdstsg = alloc_new_sg(req->dst, nents);
if (IS_ERR(reqctx->newdstsg))
return PTR_ERR(reqctx->newdstsg);
reqctx->dstsg = reqctx->newdstsg;
} else {
reqctx->dstsg = req->dst; reqctx->dstsg = req->dst;
}
bytes = chcr_sg_ent_in_wr(wrparam.srcsg, reqctx->dstsg, MIN_CIPHER_SG, bytes = chcr_sg_ent_in_wr(wrparam.srcsg, reqctx->dstsg, MIN_CIPHER_SG,
SPACE_LEFT(ablkctx->enckey_len), SPACE_LEFT(ablkctx->enckey_len));
&wrparam.snent,
&reqctx->dst_nents);
if ((bytes + reqctx->processed) >= req->nbytes) if ((bytes + reqctx->processed) >= req->nbytes)
bytes = req->nbytes - reqctx->processed; bytes = req->nbytes - reqctx->processed;
else else
...@@ -1153,8 +1167,6 @@ static int process_cipher(struct ablkcipher_request *req, ...@@ -1153,8 +1167,6 @@ static int process_cipher(struct ablkcipher_request *req,
return 0; return 0;
error: error:
free_new_sg(reqctx->newdstsg);
reqctx->newdstsg = NULL;
return err; return err;
} }
...@@ -1825,63 +1837,6 @@ static void chcr_hmac_cra_exit(struct crypto_tfm *tfm) ...@@ -1825,63 +1837,6 @@ static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
} }
} }
static int is_newsg(struct scatterlist *sgl, unsigned int *newents)
{
int nents = 0;
int ret = 0;
while (sgl) {
if (sgl->length > CHCR_SG_SIZE)
ret = 1;
nents += DIV_ROUND_UP(sgl->length, CHCR_SG_SIZE);
sgl = sg_next(sgl);
}
*newents = nents;
return ret;
}
static inline void free_new_sg(struct scatterlist *sgl)
{
kfree(sgl);
}
static struct scatterlist *alloc_new_sg(struct scatterlist *sgl,
unsigned int nents)
{
struct scatterlist *newsg, *sg;
int i, len, processed = 0;
struct page *spage;
int offset;
newsg = kmalloc_array(nents, sizeof(struct scatterlist), GFP_KERNEL);
if (!newsg)
return ERR_PTR(-ENOMEM);
sg = newsg;
sg_init_table(sg, nents);
offset = sgl->offset;
spage = sg_page(sgl);
for (i = 0; i < nents; i++) {
len = min_t(u32, sgl->length - processed, CHCR_SG_SIZE);
sg_set_page(sg, spage, len, offset);
processed += len;
offset += len;
if (offset >= PAGE_SIZE) {
offset = offset % PAGE_SIZE;
spage++;
}
if (processed == sgl->length) {
processed = 0;
sgl = sg_next(sgl);
if (!sgl)
break;
spage = sg_page(sgl);
offset = sgl->offset;
}
sg = sg_next(sg);
}
return newsg;
}
static int chcr_copy_assoc(struct aead_request *req, static int chcr_copy_assoc(struct aead_request *req,
struct chcr_aead_ctx *ctx) struct chcr_aead_ctx *ctx)
{ {
...@@ -1954,7 +1909,6 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, ...@@ -1954,7 +1909,6 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
GFP_ATOMIC; GFP_ATOMIC;
struct adapter *adap = padap(ctx->dev); struct adapter *adap = padap(ctx->dev);
reqctx->newdstsg = NULL;
dst_size = req->assoclen + req->cryptlen + (op_type ? -authsize : dst_size = req->assoclen + req->cryptlen + (op_type ? -authsize :
authsize); authsize);
if (aeadctx->enckey_len == 0 || (req->cryptlen <= 0)) if (aeadctx->enckey_len == 0 || (req->cryptlen <= 0))
...@@ -1966,24 +1920,13 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, ...@@ -1966,24 +1920,13 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
if (src_nent < 0) if (src_nent < 0)
goto err; goto err;
src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen); src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
reqctx->dst = src;
if (req->src != req->dst) { if (req->src != req->dst) {
error = chcr_copy_assoc(req, aeadctx); error = chcr_copy_assoc(req, aeadctx);
if (error) if (error)
return ERR_PTR(error); return ERR_PTR(error);
} reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
if (dst_size && is_newsg(req->dst, &nents)) { req->assoclen);
reqctx->newdstsg = alloc_new_sg(req->dst, nents);
if (IS_ERR(reqctx->newdstsg))
return ERR_CAST(reqctx->newdstsg);
reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
reqctx->newdstsg, req->assoclen);
} else {
if (req->src == req->dst)
reqctx->dst = src;
else
reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
req->dst, req->assoclen);
} }
if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) { if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
null = 1; null = 1;
...@@ -1996,7 +1939,9 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, ...@@ -1996,7 +1939,9 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
error = -EINVAL; error = -EINVAL;
goto err; goto err;
} }
dst_size = get_space_for_phys_dsgl(reqctx->dst_nents); nents = dst_size ? dstsg_2k(reqctx->dst, req->cryptlen +
(op_type ? -authsize : authsize)) : 0;
dst_size = get_space_for_phys_dsgl(nents);
kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4) kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
- sizeof(chcr_req->key_ctx); - sizeof(chcr_req->key_ctx);
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
...@@ -2005,8 +1950,6 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, ...@@ -2005,8 +1950,6 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
transhdr_len + (sgl_len(src_nent + MIN_AUTH_SG) * 8), transhdr_len + (sgl_len(src_nent + MIN_AUTH_SG) * 8),
op_type)) { op_type)) {
atomic_inc(&adap->chcr_stats.fallback); atomic_inc(&adap->chcr_stats.fallback);
free_new_sg(reqctx->newdstsg);
reqctx->newdstsg = NULL;
return ERR_PTR(chcr_aead_fallback(req, op_type)); return ERR_PTR(chcr_aead_fallback(req, op_type));
} }
skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags); skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
...@@ -2089,8 +2032,6 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, ...@@ -2089,8 +2032,6 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
/* ivmap_fail: */ /* ivmap_fail: */
kfree_skb(skb); kfree_skb(skb);
err: err:
free_new_sg(reqctx->newdstsg);
reqctx->newdstsg = NULL;
return ERR_PTR(error); return ERR_PTR(error);
} }
...@@ -2308,7 +2249,6 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, ...@@ -2308,7 +2249,6 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
dst_size = req->assoclen + req->cryptlen + (op_type ? -authsize : dst_size = req->assoclen + req->cryptlen + (op_type ? -authsize :
authsize); authsize);
reqctx->newdstsg = NULL;
if (op_type && req->cryptlen < crypto_aead_authsize(tfm)) if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
goto err; goto err;
src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen); src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen);
...@@ -2317,25 +2257,15 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, ...@@ -2317,25 +2257,15 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
sub_type = get_aead_subtype(tfm); sub_type = get_aead_subtype(tfm);
src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen); src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
reqctx->dst = src;
if (req->src != req->dst) { if (req->src != req->dst) {
error = chcr_copy_assoc(req, aeadctx); error = chcr_copy_assoc(req, aeadctx);
if (error) { if (error) {
pr_err("AAD copy to destination buffer fails\n"); pr_err("AAD copy to destination buffer fails\n");
return ERR_PTR(error); return ERR_PTR(error);
} }
} reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
if (dst_size && is_newsg(req->dst, &nents)) { req->assoclen);
reqctx->newdstsg = alloc_new_sg(req->dst, nents);
if (IS_ERR(reqctx->newdstsg))
return ERR_CAST(reqctx->newdstsg);
reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
reqctx->newdstsg, req->assoclen);
} else {
if (req->src == req->dst)
reqctx->dst = src;
else
reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
req->dst, req->assoclen);
} }
reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen + reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
(op_type ? -authsize : authsize)); (op_type ? -authsize : authsize));
...@@ -2347,8 +2277,9 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, ...@@ -2347,8 +2277,9 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
error = aead_ccm_validate_input(op_type, req, aeadctx, sub_type); error = aead_ccm_validate_input(op_type, req, aeadctx, sub_type);
if (error) if (error)
goto err; goto err;
nents = dst_size ? dstsg_2k(reqctx->dst, req->cryptlen +
dst_size = get_space_for_phys_dsgl(reqctx->dst_nents); (op_type ? -authsize : authsize)) : 0;
dst_size = get_space_for_phys_dsgl(nents);
kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2; kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2;
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
if (chcr_aead_need_fallback(req, src_nent + MIN_CCM_SG, if (chcr_aead_need_fallback(req, src_nent + MIN_CCM_SG,
...@@ -2356,8 +2287,6 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, ...@@ -2356,8 +2287,6 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
transhdr_len + (sgl_len(src_nent + MIN_CCM_SG) * 8), transhdr_len + (sgl_len(src_nent + MIN_CCM_SG) * 8),
op_type)) { op_type)) {
atomic_inc(&adap->chcr_stats.fallback); atomic_inc(&adap->chcr_stats.fallback);
free_new_sg(reqctx->newdstsg);
reqctx->newdstsg = NULL;
return ERR_PTR(chcr_aead_fallback(req, op_type)); return ERR_PTR(chcr_aead_fallback(req, op_type));
} }
...@@ -2403,8 +2332,6 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, ...@@ -2403,8 +2332,6 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
dstmap_fail: dstmap_fail:
kfree_skb(skb); kfree_skb(skb);
err: err:
free_new_sg(reqctx->newdstsg);
reqctx->newdstsg = NULL;
return ERR_PTR(error); return ERR_PTR(error);
} }
...@@ -2433,7 +2360,6 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, ...@@ -2433,7 +2360,6 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
GFP_ATOMIC; GFP_ATOMIC;
struct adapter *adap = padap(ctx->dev); struct adapter *adap = padap(ctx->dev);
reqctx->newdstsg = NULL;
dst_size = assoclen + req->cryptlen + (op_type ? -authsize : dst_size = assoclen + req->cryptlen + (op_type ? -authsize :
authsize); authsize);
/* validate key size */ /* validate key size */
...@@ -2447,26 +2373,14 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, ...@@ -2447,26 +2373,14 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
goto err; goto err;
src = scatterwalk_ffwd(reqctx->srcffwd, req->src, assoclen); src = scatterwalk_ffwd(reqctx->srcffwd, req->src, assoclen);
reqctx->dst = src;
if (req->src != req->dst) { if (req->src != req->dst) {
error = chcr_copy_assoc(req, aeadctx); error = chcr_copy_assoc(req, aeadctx);
if (error) if (error)
return ERR_PTR(error); return ERR_PTR(error);
reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
req->assoclen);
} }
if (dst_size && is_newsg(req->dst, &nents)) {
reqctx->newdstsg = alloc_new_sg(req->dst, nents);
if (IS_ERR(reqctx->newdstsg))
return ERR_CAST(reqctx->newdstsg);
reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
reqctx->newdstsg, assoclen);
} else {
if (req->src == req->dst)
reqctx->dst = src;
else
reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
req->dst, assoclen);
}
reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen + reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
(op_type ? -authsize : authsize)); (op_type ? -authsize : authsize));
if (reqctx->dst_nents < 0) { if (reqctx->dst_nents < 0) {
...@@ -2475,8 +2389,9 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, ...@@ -2475,8 +2389,9 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
goto err; goto err;
} }
nents = dst_size ? dstsg_2k(reqctx->dst, req->cryptlen +
dst_size = get_space_for_phys_dsgl(reqctx->dst_nents); (op_type ? -authsize : authsize)) : 0;
dst_size = get_space_for_phys_dsgl(nents);
kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) + kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) +
AEAD_H_SIZE; AEAD_H_SIZE;
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
...@@ -2485,8 +2400,6 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, ...@@ -2485,8 +2400,6 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
transhdr_len + (sgl_len(src_nent + MIN_GCM_SG) * 8), transhdr_len + (sgl_len(src_nent + MIN_GCM_SG) * 8),
op_type)) { op_type)) {
atomic_inc(&adap->chcr_stats.fallback); atomic_inc(&adap->chcr_stats.fallback);
free_new_sg(reqctx->newdstsg);
reqctx->newdstsg = NULL;
return ERR_PTR(chcr_aead_fallback(req, op_type)); return ERR_PTR(chcr_aead_fallback(req, op_type));
} }
skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags); skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
...@@ -2564,8 +2477,6 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, ...@@ -2564,8 +2477,6 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
/* ivmap_fail: */ /* ivmap_fail: */
kfree_skb(skb); kfree_skb(skb);
err: err:
free_new_sg(reqctx->newdstsg);
reqctx->newdstsg = NULL;
return ERR_PTR(error); return ERR_PTR(error);
} }
......
...@@ -221,7 +221,7 @@ ...@@ -221,7 +221,7 @@
#define MAX_WR_SIZE 512 #define MAX_WR_SIZE 512
#define ROUND_16(bytes) ((bytes) & 0xFFFFFFF0) #define ROUND_16(bytes) ((bytes) & 0xFFFFFFF0)
#define MAX_DSGL_ENT 32 #define MAX_DSGL_ENT 32
#define MAX_DIGEST_SKB_SGE (MAX_SKB_FRAGS - 2) #define MAX_DIGEST_SKB_SGE (MAX_SKB_FRAGS - 1)
#define MIN_CIPHER_SG 1 /* IV */ #define MIN_CIPHER_SG 1 /* IV */
#define MIN_AUTH_SG 2 /*IV + AAD*/ #define MIN_AUTH_SG 2 /*IV + AAD*/
#define MIN_GCM_SG 2 /* IV + AAD*/ #define MIN_GCM_SG 2 /* IV + AAD*/
...@@ -261,7 +261,6 @@ struct cipher_wr_param { ...@@ -261,7 +261,6 @@ struct cipher_wr_param {
struct scatterlist *srcsg; struct scatterlist *srcsg;
char *iv; char *iv;
int bytes; int bytes;
short int snent;
unsigned short qid; unsigned short qid;
}; };
enum { enum {
......
...@@ -89,7 +89,7 @@ struct uld_ctx { ...@@ -89,7 +89,7 @@ struct uld_ctx {
struct chcr_dev *dev; struct chcr_dev *dev;
}; };
struct uld_ctx * assign_chcr_device(void); struct uld_ctx *assign_chcr_device(void);
int chcr_send_wr(struct sk_buff *skb); int chcr_send_wr(struct sk_buff *skb);
int start_crypto(void); int start_crypto(void);
int stop_crypto(void); int stop_crypto(void);
......
...@@ -166,7 +166,6 @@ struct ablk_ctx { ...@@ -166,7 +166,6 @@ struct ablk_ctx {
struct chcr_aead_reqctx { struct chcr_aead_reqctx {
struct sk_buff *skb; struct sk_buff *skb;
struct scatterlist *dst; struct scatterlist *dst;
struct scatterlist *newdstsg;
struct scatterlist srcffwd[2]; struct scatterlist srcffwd[2];
struct scatterlist dstffwd[2]; struct scatterlist dstffwd[2];
short int dst_nents; short int dst_nents;
...@@ -245,7 +244,6 @@ struct chcr_blkcipher_req_ctx { ...@@ -245,7 +244,6 @@ struct chcr_blkcipher_req_ctx {
struct scatterlist dstffwd[2]; struct scatterlist dstffwd[2];
struct scatterlist *dstsg; struct scatterlist *dstsg;
struct scatterlist *dst; struct scatterlist *dst;
struct scatterlist *newdstsg;
unsigned int processed; unsigned int processed;
unsigned int last_req_len; unsigned int last_req_len;
unsigned int op; unsigned int op;
...@@ -291,10 +289,6 @@ static int chcr_aead_op(struct aead_request *req_base, ...@@ -291,10 +289,6 @@ static int chcr_aead_op(struct aead_request *req_base,
int size, int size,
create_wr_t create_wr_fn); create_wr_t create_wr_fn);
static inline int get_aead_subtype(struct crypto_aead *aead); static inline int get_aead_subtype(struct crypto_aead *aead);
static int is_newsg(struct scatterlist *sgl, unsigned int *newents);
static struct scatterlist *alloc_new_sg(struct scatterlist *sgl,
unsigned int nents);
static inline void free_new_sg(struct scatterlist *sgl);
static int chcr_handle_cipher_resp(struct ablkcipher_request *req, static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
unsigned char *input, int err); unsigned char *input, int err);
#endif /* __CHCR_CRYPTO_H__ */ #endif /* __CHCR_CRYPTO_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment