Commit 2f47d580 authored by Harsh Jain's avatar Harsh Jain Committed by Herbert Xu

crypto: chelsio - Move DMA un/mapping to chcr from lld cxgb4 driver

Allow chcr to do DMA mapping/Unmapping instead of lld cxgb4.
It moves "Copy AAD to dst buffer" requirement from driver to
firmware.
Signed-off-by: default avatarHarsh Jain <harsh@chelsio.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 2956f36c
...@@ -71,6 +71,8 @@ ...@@ -71,6 +71,8 @@
#include "chcr_algo.h" #include "chcr_algo.h"
#include "chcr_crypto.h" #include "chcr_crypto.h"
#define IV AES_BLOCK_SIZE
static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx) static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
{ {
return ctx->crypto_ctx->aeadctx; return ctx->crypto_ctx->aeadctx;
...@@ -103,7 +105,7 @@ static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx) ...@@ -103,7 +105,7 @@ static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
static inline int is_ofld_imm(const struct sk_buff *skb) static inline int is_ofld_imm(const struct sk_buff *skb)
{ {
return (skb->len <= CRYPTO_MAX_IMM_TX_PKT_LEN); return (skb->len <= SGE_MAX_WR_LEN);
} }
/* /*
...@@ -118,21 +120,92 @@ static inline unsigned int sgl_len(unsigned int n) ...@@ -118,21 +120,92 @@ static inline unsigned int sgl_len(unsigned int n)
return (3 * n) / 2 + (n & 1) + 2; return (3 * n) / 2 + (n & 1) + 2;
} }
static int dstsg_2k(struct scatterlist *sgl, unsigned int reqlen) static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
unsigned int entlen,
unsigned int skip)
{ {
int nents = 0; int nents = 0;
unsigned int less; unsigned int less;
unsigned int skip_len = 0;
while (sgl && reqlen) { while (sg && skip) {
less = min(reqlen, sgl->length); if (sg_dma_len(sg) <= skip) {
nents += DIV_ROUND_UP(less, CHCR_SG_SIZE); skip -= sg_dma_len(sg);
reqlen -= less; skip_len = 0;
sgl = sg_next(sgl); sg = sg_next(sg);
} else {
skip_len = skip;
skip = 0;
}
} }
while (sg && reqlen) {
less = min(reqlen, sg_dma_len(sg) - skip_len);
nents += DIV_ROUND_UP(less, entlen);
reqlen -= less;
skip_len = 0;
sg = sg_next(sg);
}
return nents; return nents;
} }
static inline void chcr_handle_ahash_resp(struct ahash_request *req,
unsigned char *input,
int err)
{
struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
int digestsize, updated_digestsize;
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
if (input == NULL)
goto out;
reqctx = ahash_request_ctx(req);
digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
if (reqctx->is_sg_map)
chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
if (reqctx->dma_addr)
dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->dma_addr,
reqctx->dma_len, DMA_TO_DEVICE);
reqctx->dma_addr = 0;
updated_digestsize = digestsize;
if (digestsize == SHA224_DIGEST_SIZE)
updated_digestsize = SHA256_DIGEST_SIZE;
else if (digestsize == SHA384_DIGEST_SIZE)
updated_digestsize = SHA512_DIGEST_SIZE;
if (reqctx->result == 1) {
reqctx->result = 0;
memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
digestsize);
} else {
memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
updated_digestsize);
}
out:
req->base.complete(&req->base, err);
}
static inline void chcr_handle_aead_resp(struct aead_request *req,
unsigned char *input,
int err)
{
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
if (reqctx->b0_dma)
dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->b0_dma,
reqctx->b0_len, DMA_BIDIRECTIONAL);
if (reqctx->verify == VERIFY_SW) {
chcr_verify_tag(req, input, &err);
reqctx->verify = VERIFY_HW;
}
req->base.complete(&req->base, err);
}
static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err) static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
{ {
u8 temp[SHA512_DIGEST_SIZE]; u8 temp[SHA512_DIGEST_SIZE];
...@@ -167,27 +240,11 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input, ...@@ -167,27 +240,11 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
{ {
struct crypto_tfm *tfm = req->tfm; struct crypto_tfm *tfm = req->tfm;
struct chcr_context *ctx = crypto_tfm_ctx(tfm); struct chcr_context *ctx = crypto_tfm_ctx(tfm);
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct chcr_req_ctx ctx_req;
unsigned int digestsize, updated_digestsize;
struct adapter *adap = padap(ctx->dev); struct adapter *adap = padap(ctx->dev);
switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_AEAD: case CRYPTO_ALG_TYPE_AEAD:
ctx_req.req.aead_req = aead_request_cast(req); chcr_handle_aead_resp(aead_request_cast(req), input, err);
ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req);
dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.ctx.reqctx->dst,
ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE);
if (ctx_req.ctx.reqctx->skb) {
kfree_skb(ctx_req.ctx.reqctx->skb);
ctx_req.ctx.reqctx->skb = NULL;
}
if (ctx_req.ctx.reqctx->verify == VERIFY_SW) {
chcr_verify_tag(ctx_req.req.aead_req, input,
&err);
ctx_req.ctx.reqctx->verify = VERIFY_HW;
}
ctx_req.req.aead_req->base.complete(req, err);
break; break;
case CRYPTO_ALG_TYPE_ABLKCIPHER: case CRYPTO_ALG_TYPE_ABLKCIPHER:
...@@ -196,60 +253,13 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input, ...@@ -196,60 +253,13 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
break; break;
case CRYPTO_ALG_TYPE_AHASH: case CRYPTO_ALG_TYPE_AHASH:
ctx_req.req.ahash_req = ahash_request_cast(req); chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
ctx_req.ctx.ahash_ctx =
ahash_request_ctx(ctx_req.req.ahash_req);
digestsize =
crypto_ahash_digestsize(crypto_ahash_reqtfm(
ctx_req.req.ahash_req));
updated_digestsize = digestsize;
if (digestsize == SHA224_DIGEST_SIZE)
updated_digestsize = SHA256_DIGEST_SIZE;
else if (digestsize == SHA384_DIGEST_SIZE)
updated_digestsize = SHA512_DIGEST_SIZE;
if (ctx_req.ctx.ahash_ctx->skb) {
kfree_skb(ctx_req.ctx.ahash_ctx->skb);
ctx_req.ctx.ahash_ctx->skb = NULL;
}
if (ctx_req.ctx.ahash_ctx->result == 1) {
ctx_req.ctx.ahash_ctx->result = 0;
memcpy(ctx_req.req.ahash_req->result, input +
sizeof(struct cpl_fw6_pld),
digestsize);
} else {
memcpy(ctx_req.ctx.ahash_ctx->partial_hash, input +
sizeof(struct cpl_fw6_pld),
updated_digestsize);
}
ctx_req.req.ahash_req->base.complete(req, err);
break;
} }
atomic_inc(&adap->chcr_stats.complete); atomic_inc(&adap->chcr_stats.complete);
return err; return err;
} }
/* static void get_aes_decrypt_key(unsigned char *dec_key,
* calc_tx_flits_ofld - calculate # of flits for an offload packet
* @skb: the packet
* Returns the number of flits needed for the given offload packet.
* These packets are already fully constructed and no additional headers
* will be added.
*/
static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
{
unsigned int flits, cnt;
if (is_ofld_imm(skb))
return DIV_ROUND_UP(skb->len, 8);
flits = skb_transport_offset(skb) / 8; /* headers */
cnt = skb_shinfo(skb)->nr_frags;
if (skb_tail_pointer(skb) != skb_transport_header(skb))
cnt++;
return flits + sgl_len(cnt);
}
static inline void get_aes_decrypt_key(unsigned char *dec_key,
const unsigned char *key, const unsigned char *key,
unsigned int keylength) unsigned int keylength)
{ {
...@@ -396,64 +406,193 @@ static inline int is_hmac(struct crypto_tfm *tfm) ...@@ -396,64 +406,193 @@ static inline int is_hmac(struct crypto_tfm *tfm)
return 0; return 0;
} }
static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl, static inline void dsgl_walk_init(struct dsgl_walk *walk,
struct scatterlist *sg, struct cpl_rx_phys_dsgl *dsgl)
struct phys_sge_parm *sg_param)
{ {
struct phys_sge_pairs *to; walk->dsgl = dsgl;
unsigned int len = 0, left_size = sg_param->obsize; walk->nents = 0;
unsigned int j = 0; walk->to = (struct phys_sge_pairs *)(dsgl + 1);
int offset, ent_len; }
static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid)
{
struct cpl_rx_phys_dsgl *phys_cpl;
phys_cpl = walk->dsgl;
phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL) phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
| CPL_RX_PHYS_DSGL_ISRDMA_V(0)); | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
to = (struct phys_sge_pairs *)((unsigned char *)phys_cpl + phys_cpl->pcirlxorder_to_noofsgentr =
sizeof(struct cpl_rx_phys_dsgl)); htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
CPL_RX_PHYS_DSGL_DCAID_V(0) |
CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
phys_cpl->rss_hdr_int.qid = htons(qid);
phys_cpl->rss_hdr_int.hash_val = 0;
}
static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
size_t size,
dma_addr_t *addr)
{
int j;
if (!size)
return;
j = walk->nents;
walk->to->len[j % 8] = htons(size);
walk->to->addr[j % 8] = cpu_to_be64(*addr);
j++;
if ((j % 8) == 0)
walk->to++;
walk->nents = j;
}
static void dsgl_walk_add_sg(struct dsgl_walk *walk,
struct scatterlist *sg,
unsigned int slen,
unsigned int skip)
{
int skip_len = 0;
unsigned int left_size = slen, len = 0;
unsigned int j = walk->nents;
int offset, ent_len;
if (!slen)
return;
while (sg && skip) {
if (sg_dma_len(sg) <= skip) {
skip -= sg_dma_len(sg);
skip_len = 0;
sg = sg_next(sg);
} else {
skip_len = skip;
skip = 0;
}
}
while (left_size && sg) { while (left_size && sg) {
len = min_t(u32, left_size, sg_dma_len(sg)); len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
offset = 0; offset = 0;
while (len) { while (len) {
ent_len = min_t(u32, len, CHCR_SG_SIZE); ent_len = min_t(u32, len, CHCR_DST_SG_SIZE);
to->len[j % 8] = htons(ent_len); walk->to->len[j % 8] = htons(ent_len);
to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) + walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
offset); offset + skip_len);
offset += ent_len; offset += ent_len;
len -= ent_len; len -= ent_len;
j++; j++;
if ((j % 8) == 0) if ((j % 8) == 0)
to++; walk->to++;
} }
left_size -= min(left_size, sg_dma_len(sg)); walk->last_sg = sg;
walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
skip_len) + skip_len;
left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
skip_len = 0;
sg = sg_next(sg); sg = sg_next(sg);
} }
phys_cpl->pcirlxorder_to_noofsgentr = walk->nents = j;
htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) | }
CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
CPL_RX_PHYS_DSGL_DCAID_V(0) |
CPL_RX_PHYS_DSGL_NOOFSGENTR_V(j));
phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
phys_cpl->rss_hdr_int.qid = htons(sg_param->qid);
phys_cpl->rss_hdr_int.hash_val = 0;
static inline void ulptx_walk_init(struct ulptx_walk *walk,
struct ulptx_sgl *ulp)
{
walk->sgl = ulp;
walk->nents = 0;
walk->pair_idx = 0;
walk->pair = ulp->sge;
walk->last_sg = NULL;
walk->last_sg_len = 0;
} }
static inline int map_writesg_phys_cpl(struct device *dev, static inline void ulptx_walk_end(struct ulptx_walk *walk)
struct cpl_rx_phys_dsgl *phys_cpl, {
walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
ULPTX_NSGE_V(walk->nents));
}
static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
size_t size,
dma_addr_t *addr)
{
if (!size)
return;
if (walk->nents == 0) {
walk->sgl->len0 = cpu_to_be32(size);
walk->sgl->addr0 = cpu_to_be64(*addr);
} else {
walk->pair->addr[walk->pair_idx] = cpu_to_be64(*addr);
walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
walk->pair_idx = !walk->pair_idx;
if (!walk->pair_idx)
walk->pair++;
}
walk->nents++;
}
static void ulptx_walk_add_sg(struct ulptx_walk *walk,
struct scatterlist *sg, struct scatterlist *sg,
struct phys_sge_parm *sg_param) unsigned int len,
unsigned int skip)
{ {
if (!sg || !sg_param->nents) int small;
return -EINVAL; int skip_len = 0;
unsigned int sgmin;
sg_param->nents = dma_map_sg(dev, sg, sg_param->nents, DMA_FROM_DEVICE); if (!len)
if (sg_param->nents == 0) { return;
pr_err("CHCR : DMA mapping failed\n");
return -EINVAL; while (sg && skip) {
if (sg_dma_len(sg) <= skip) {
skip -= sg_dma_len(sg);
skip_len = 0;
sg = sg_next(sg);
} else {
skip_len = skip;
skip = 0;
}
}
if (walk->nents == 0) {
small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
walk->sgl->len0 = cpu_to_be32(sgmin);
walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
walk->nents++;
len -= sgmin;
walk->last_sg = sg;
walk->last_sg_len = sgmin + skip_len;
skip_len += sgmin;
if (sg_dma_len(sg) == skip_len) {
sg = sg_next(sg);
skip_len = 0;
}
}
while (sg && len) {
small = min(sg_dma_len(sg) - skip_len, len);
sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
walk->pair->addr[walk->pair_idx] =
cpu_to_be64(sg_dma_address(sg) + skip_len);
walk->pair_idx = !walk->pair_idx;
walk->nents++;
if (!walk->pair_idx)
walk->pair++;
len -= sgmin;
skip_len += sgmin;
walk->last_sg = sg;
walk->last_sg_len = skip_len;
if (sg_dma_len(sg) == skip_len) {
sg = sg_next(sg);
skip_len = 0;
}
} }
write_phys_cpl(phys_cpl, sg, sg_param);
return 0;
} }
static inline int get_aead_subtype(struct crypto_aead *aead) static inline int get_aead_subtype(struct crypto_aead *aead)
...@@ -473,45 +612,6 @@ static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm) ...@@ -473,45 +612,6 @@ static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK; return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
} }
static inline void write_buffer_to_skb(struct sk_buff *skb,
unsigned int *frags,
char *bfr,
u8 bfr_len)
{
skb->len += bfr_len;
skb->data_len += bfr_len;
skb->truesize += bfr_len;
get_page(virt_to_page(bfr));
skb_fill_page_desc(skb, *frags, virt_to_page(bfr),
offset_in_page(bfr), bfr_len);
(*frags)++;
}
static inline void
write_sg_to_skb(struct sk_buff *skb, unsigned int *frags,
struct scatterlist *sg, unsigned int count)
{
struct page *spage;
unsigned int page_len;
skb->len += count;
skb->data_len += count;
skb->truesize += count;
while (count > 0) {
if (!sg || (!(sg->length)))
break;
spage = sg_page(sg);
get_page(spage);
page_len = min(sg->length, count);
skb_fill_page_desc(skb, *frags, spage, sg->offset, page_len);
(*frags)++;
count -= page_len;
sg = sg_next(sg);
}
}
static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx) static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
{ {
struct adapter *adap = netdev2adap(dev); struct adapter *adap = netdev2adap(dev);
...@@ -547,32 +647,46 @@ static int generate_copy_rrkey(struct ablk_ctx *ablkctx, ...@@ -547,32 +647,46 @@ static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
static int chcr_sg_ent_in_wr(struct scatterlist *src, static int chcr_sg_ent_in_wr(struct scatterlist *src,
struct scatterlist *dst, struct scatterlist *dst,
unsigned int minsg, unsigned int minsg,
unsigned int space) unsigned int space,
unsigned int srcskip,
unsigned int dstskip)
{ {
int srclen = 0, dstlen = 0; int srclen = 0, dstlen = 0;
int srcsg = minsg, dstsg = 0; int srcsg = minsg, dstsg = minsg;
int offset = 0, less; int offset = 0, less;
while (src && dst && ((srcsg + 1) <= MAX_SKB_FRAGS) && if (sg_dma_len(src) == srcskip) {
src = sg_next(src);
srcskip = 0;
}
if (sg_dma_len(dst) == dstskip) {
dst = sg_next(dst);
dstskip = 0;
}
while (src && dst &&
space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) { space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
srclen += src->length; srclen += (sg_dma_len(src) - srcskip);
srcsg++; srcsg++;
offset = 0; offset = 0;
while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) && while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) { space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
if (srclen <= dstlen) if (srclen <= dstlen)
break; break;
less = min_t(unsigned int, dst->length - offset, less = min_t(unsigned int, sg_dma_len(dst) - offset -
CHCR_SG_SIZE); dstskip, CHCR_DST_SG_SIZE);
dstlen += less; dstlen += less;
offset += less; offset += less;
if (offset == dst->length) { if (offset == sg_dma_len(dst)) {
dst = sg_next(dst); dst = sg_next(dst);
offset = 0; offset = 0;
} }
dstsg++; dstsg++;
dstskip = 0;
} }
src = sg_next(src); src = sg_next(src);
srcskip = 0;
} }
return min(srclen, dstlen); return min(srclen, dstlen);
} }
...@@ -602,24 +716,22 @@ static int chcr_cipher_fallback(struct crypto_skcipher *cipher, ...@@ -602,24 +716,22 @@ static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
} }
static inline void create_wreq(struct chcr_context *ctx, static inline void create_wreq(struct chcr_context *ctx,
struct chcr_wr *chcr_req, struct chcr_wr *chcr_req,
void *req, struct sk_buff *skb, struct crypto_async_request *req,
unsigned int imm,
int hash_sz, int hash_sz,
unsigned int len16,
unsigned int sc_len, unsigned int sc_len,
unsigned int lcb) unsigned int lcb)
{ {
struct uld_ctx *u_ctx = ULD_CTX(ctx); struct uld_ctx *u_ctx = ULD_CTX(ctx);
int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx]; int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
unsigned int immdatalen = 0;
if (is_ofld_imm(skb))
immdatalen = skb->data_len;
chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE; chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
chcr_req->wreq.pld_size_hash_size = chcr_req->wreq.pld_size_hash_size =
htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz)); htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
chcr_req->wreq.len16_pkd = chcr_req->wreq.len16_pkd =
htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP( htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
(calc_tx_flits_ofld(skb) * 8), 16)));
chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req); chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
chcr_req->wreq.rx_chid_to_rx_q_id = chcr_req->wreq.rx_chid_to_rx_q_id =
FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid, FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
...@@ -627,13 +739,12 @@ static inline void create_wreq(struct chcr_context *ctx, ...@@ -627,13 +739,12 @@ static inline void create_wreq(struct chcr_context *ctx,
chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id, chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id,
qid); qid);
chcr_req->ulptx.len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8), chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
16) - ((sizeof(chcr_req->wreq)) >> 4))); ((sizeof(chcr_req->wreq)) >> 4)));
chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(immdatalen); chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) + chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
sizeof(chcr_req->key_ctx) + sizeof(chcr_req->key_ctx) + sc_len);
sc_len + immdatalen);
} }
/** /**
...@@ -646,49 +757,52 @@ static inline void create_wreq(struct chcr_context *ctx, ...@@ -646,49 +757,52 @@ static inline void create_wreq(struct chcr_context *ctx,
static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam) static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
{ {
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req); struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
struct sk_buff *skb = NULL; struct sk_buff *skb = NULL;
struct chcr_wr *chcr_req; struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl; struct cpl_rx_phys_dsgl *phys_cpl;
struct ulptx_sgl *ulptx;
struct chcr_blkcipher_req_ctx *reqctx = struct chcr_blkcipher_req_ctx *reqctx =
ablkcipher_request_ctx(wrparam->req); ablkcipher_request_ctx(wrparam->req);
struct phys_sge_parm sg_param; unsigned int temp = 0, transhdr_len, dst_size;
unsigned int frags = 0, transhdr_len, phys_dsgl;
int error; int error;
int nents; int nents;
unsigned int ivsize = AES_BLOCK_SIZE, kctx_len; unsigned int kctx_len;
gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
GFP_KERNEL : GFP_ATOMIC; GFP_KERNEL : GFP_ATOMIC;
struct adapter *adap = padap(ctx->dev); struct adapter *adap = padap(c_ctx(tfm)->dev);
reqctx->dst_nents = sg_nents_for_len(reqctx->dst, wrparam->bytes); nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE,
nents = dstsg_2k(reqctx->dst, wrparam->bytes); reqctx->dst_ofst);
phys_dsgl = get_space_for_phys_dsgl(nents); dst_size = get_space_for_phys_dsgl(nents + 1);
kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16); kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl); transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags); nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
CHCR_SRC_SG_SIZE, reqctx->src_ofst);
temp = reqctx->imm ? (DIV_ROUND_UP((IV + wrparam->req->nbytes), 16)
* 16) : (sgl_len(nents + MIN_CIPHER_SG) * 8);
transhdr_len += temp;
transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
skb = alloc_skb(SGE_MAX_WR_LEN, flags);
if (!skb) { if (!skb) {
error = -ENOMEM; error = -ENOMEM;
goto err; goto err;
} }
skb_reserve(skb, sizeof(struct sge_opaque_hdr));
chcr_req = __skb_put_zero(skb, transhdr_len); chcr_req = __skb_put_zero(skb, transhdr_len);
chcr_req->sec_cpl.op_ivinsrtofst = chcr_req->sec_cpl.op_ivinsrtofst =
FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2, 1); FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->dev->rx_channel_id, 2, 1);
chcr_req->sec_cpl.pldlen = htonl(ivsize + wrparam->bytes); chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
chcr_req->sec_cpl.aadstart_cipherstop_hi = chcr_req->sec_cpl.aadstart_cipherstop_hi =
FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, ivsize + 1, 0); FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
chcr_req->sec_cpl.cipherstop_lo_authinsert = chcr_req->sec_cpl.cipherstop_lo_authinsert =
FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0); FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0, chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
ablkctx->ciph_mode, ablkctx->ciph_mode,
0, 0, ivsize >> 1); 0, 0, IV >> 1);
chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0, chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
0, 1, phys_dsgl); 0, 0, dst_size);
chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr; chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
if ((reqctx->op == CHCR_DECRYPT_OP) && if ((reqctx->op == CHCR_DECRYPT_OP) &&
...@@ -713,26 +827,18 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam) ...@@ -713,26 +827,18 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
} }
} }
phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
sg_param.nents = reqctx->dst_nents; ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
sg_param.obsize = wrparam->bytes; chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
sg_param.qid = wrparam->qid; chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
reqctx->dst, &sg_param);
if (error)
goto map_fail1;
skb_set_transport_header(skb, transhdr_len);
write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
write_sg_to_skb(skb, &frags, wrparam->srcsg, wrparam->bytes);
atomic_inc(&adap->chcr_stats.cipher_rqst); atomic_inc(&adap->chcr_stats.cipher_rqst);
create_wreq(ctx, chcr_req, &(wrparam->req->base), skb, 0, temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len
sizeof(struct cpl_rx_phys_dsgl) + phys_dsgl + kctx_len, +(reqctx->imm ? (IV + wrparam->bytes) : 0);
create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
transhdr_len, temp,
ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC); ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
reqctx->skb = skb; reqctx->skb = skb;
skb_get(skb);
return skb; return skb;
map_fail1:
kfree_skb(skb);
err: err:
return ERR_PTR(error); return ERR_PTR(error);
} }
...@@ -757,8 +863,7 @@ static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher, ...@@ -757,8 +863,7 @@ static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
unsigned int keylen) unsigned int keylen)
{ {
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher); struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
int err = 0; int err = 0;
crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK); crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
...@@ -776,8 +881,7 @@ static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher, ...@@ -776,8 +881,7 @@ static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
const u8 *key, const u8 *key,
unsigned int keylen) unsigned int keylen)
{ {
struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher); struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
unsigned int ck_size, context_size; unsigned int ck_size, context_size;
u16 alignment = 0; u16 alignment = 0;
int err; int err;
...@@ -809,8 +913,7 @@ static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher, ...@@ -809,8 +913,7 @@ static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
const u8 *key, const u8 *key,
unsigned int keylen) unsigned int keylen)
{ {
struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher); struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
unsigned int ck_size, context_size; unsigned int ck_size, context_size;
u16 alignment = 0; u16 alignment = 0;
int err; int err;
...@@ -841,8 +944,7 @@ static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher, ...@@ -841,8 +944,7 @@ static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
const u8 *key, const u8 *key,
unsigned int keylen) unsigned int keylen)
{ {
struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher); struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
unsigned int ck_size, context_size; unsigned int ck_size, context_size;
u16 alignment = 0; u16 alignment = 0;
int err; int err;
...@@ -909,8 +1011,7 @@ static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes) ...@@ -909,8 +1011,7 @@ static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv) static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv)
{ {
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
struct crypto_cipher *cipher; struct crypto_cipher *cipher;
int ret, i; int ret, i;
...@@ -927,8 +1028,7 @@ static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv) ...@@ -927,8 +1028,7 @@ static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv)
ret = crypto_cipher_setkey(cipher, key, keylen); ret = crypto_cipher_setkey(cipher, key, keylen);
if (ret) if (ret)
goto out; goto out;
/*H/W sends the encrypted IV in dsgl when AADIVDROP bit is 0*/
crypto_cipher_encrypt_one(cipher, iv, iv);
for (i = 0; i < round8; i++) for (i = 0; i < round8; i++)
gf128mul_x8_ble((le128 *)iv, (le128 *)iv); gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
...@@ -1006,64 +1106,60 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req, ...@@ -1006,64 +1106,60 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
unsigned char *input, int err) unsigned char *input, int err)
{ {
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
struct uld_ctx *u_ctx = ULD_CTX(ctx); struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
struct sk_buff *skb; struct sk_buff *skb;
struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input; struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
struct cipher_wr_param wrparam; struct cipher_wr_param wrparam;
int bytes; int bytes;
dma_unmap_sg(&u_ctx->lldi.pdev->dev, reqctx->dst, reqctx->dst_nents,
DMA_FROM_DEVICE);
if (reqctx->skb) {
kfree_skb(reqctx->skb);
reqctx->skb = NULL;
}
if (err) if (err)
goto complete; goto unmap;
if (req->nbytes == reqctx->processed) { if (req->nbytes == reqctx->processed) {
chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
req);
err = chcr_final_cipher_iv(req, fw6_pld, req->info); err = chcr_final_cipher_iv(req, fw6_pld, req->info);
goto complete; goto complete;
} }
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
ctx->tx_qidx))) { c_ctx(tfm)->tx_qidx))) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
err = -EBUSY; err = -EBUSY;
goto complete; goto unmap;
} }
} }
wrparam.srcsg = scatterwalk_ffwd(reqctx->srcffwd, req->src, if (!reqctx->imm) {
reqctx->processed); bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 1,
reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, reqctx->dstsg, SPACE_LEFT(ablkctx->enckey_len),
reqctx->processed); reqctx->src_ofst, reqctx->dst_ofst);
if (!wrparam.srcsg || !reqctx->dst) {
pr_err("Input sg list length less that nbytes\n");
err = -EINVAL;
goto complete;
}
bytes = chcr_sg_ent_in_wr(wrparam.srcsg, reqctx->dst, 1,
SPACE_LEFT(ablkctx->enckey_len));
if ((bytes + reqctx->processed) >= req->nbytes) if ((bytes + reqctx->processed) >= req->nbytes)
bytes = req->nbytes - reqctx->processed; bytes = req->nbytes - reqctx->processed;
else else
bytes = ROUND_16(bytes); bytes = ROUND_16(bytes);
} else {
/*CTR mode counter overfloa*/
bytes = req->nbytes - reqctx->processed;
}
dma_sync_single_for_cpu(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv); err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
dma_sync_single_for_device(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
if (err) if (err)
goto complete; goto unmap;
if (unlikely(bytes == 0)) { if (unlikely(bytes == 0)) {
chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
req);
err = chcr_cipher_fallback(ablkctx->sw_cipher, err = chcr_cipher_fallback(ablkctx->sw_cipher,
req->base.flags, req->base.flags,
wrparam.srcsg, req->src,
reqctx->dst, req->dst,
req->nbytes - reqctx->processed, req->nbytes,
reqctx->iv, req->info,
reqctx->op); reqctx->op);
goto complete; goto complete;
} }
...@@ -1071,21 +1167,23 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req, ...@@ -1071,21 +1167,23 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
CRYPTO_ALG_SUB_TYPE_CTR) CRYPTO_ALG_SUB_TYPE_CTR)
bytes = adjust_ctr_overflow(reqctx->iv, bytes); bytes = adjust_ctr_overflow(reqctx->iv, bytes);
reqctx->processed += bytes; wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx];
reqctx->last_req_len = bytes;
wrparam.qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
wrparam.req = req; wrparam.req = req;
wrparam.bytes = bytes; wrparam.bytes = bytes;
skb = create_cipher_wr(&wrparam); skb = create_cipher_wr(&wrparam);
if (IS_ERR(skb)) { if (IS_ERR(skb)) {
pr_err("chcr : %s : Failed to form WR. No memory\n", __func__); pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
err = PTR_ERR(skb); err = PTR_ERR(skb);
goto complete; goto unmap;
} }
skb->dev = u_ctx->lldi.ports[0]; skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx); set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
chcr_send_wr(skb); chcr_send_wr(skb);
reqctx->last_req_len = bytes;
reqctx->processed += bytes;
return 0; return 0;
unmap:
chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
complete: complete:
req->base.complete(&req->base, err); req->base.complete(&req->base, err);
return err; return err;
...@@ -1099,8 +1197,7 @@ static int process_cipher(struct ablkcipher_request *req, ...@@ -1099,8 +1197,7 @@ static int process_cipher(struct ablkcipher_request *req,
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
unsigned int ivsize = crypto_ablkcipher_ivsize(tfm); unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
struct cipher_wr_param wrparam; struct cipher_wr_param wrparam;
int bytes, err = -EINVAL; int bytes, err = -EINVAL;
...@@ -1114,16 +1211,41 @@ static int process_cipher(struct ablkcipher_request *req, ...@@ -1114,16 +1211,41 @@ static int process_cipher(struct ablkcipher_request *req,
ablkctx->enckey_len, req->nbytes, ivsize); ablkctx->enckey_len, req->nbytes, ivsize);
goto error; goto error;
} }
wrparam.srcsg = req->src; chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
reqctx->dstsg = req->dst; if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
bytes = chcr_sg_ent_in_wr(wrparam.srcsg, reqctx->dstsg, MIN_CIPHER_SG, AES_MIN_KEY_SIZE +
SPACE_LEFT(ablkctx->enckey_len)); sizeof(struct cpl_rx_phys_dsgl) +
/*Min dsgl size*/
32))) {
/* Can be sent as Imm*/
unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
dnents = sg_nents_xlen(req->dst, req->nbytes,
CHCR_DST_SG_SIZE, 0);
dnents += 1; // IV
phys_dsgl = get_space_for_phys_dsgl(dnents);
kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
reqctx->imm = (transhdr_len + IV + req->nbytes) <=
SGE_MAX_WR_LEN;
bytes = IV + req->nbytes;
} else {
reqctx->imm = 0;
}
if (!reqctx->imm) {
bytes = chcr_sg_ent_in_wr(req->src, req->dst,
MIN_CIPHER_SG,
SPACE_LEFT(ablkctx->enckey_len),
0, 0);
if ((bytes + reqctx->processed) >= req->nbytes) if ((bytes + reqctx->processed) >= req->nbytes)
bytes = req->nbytes - reqctx->processed; bytes = req->nbytes - reqctx->processed;
else else
bytes = ROUND_16(bytes); bytes = ROUND_16(bytes);
if (unlikely(bytes > req->nbytes)) } else {
bytes = req->nbytes; bytes = req->nbytes;
}
if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
CRYPTO_ALG_SUB_TYPE_CTR) { CRYPTO_ALG_SUB_TYPE_CTR) {
bytes = adjust_ctr_overflow(req->info, bytes); bytes = adjust_ctr_overflow(req->info, bytes);
...@@ -1140,9 +1262,11 @@ static int process_cipher(struct ablkcipher_request *req, ...@@ -1140,9 +1262,11 @@ static int process_cipher(struct ablkcipher_request *req,
} else { } else {
memcpy(reqctx->iv, req->info, ivsize); memcpy(reqctx->iv, req->info, IV);
} }
if (unlikely(bytes == 0)) { if (unlikely(bytes == 0)) {
chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
req);
err = chcr_cipher_fallback(ablkctx->sw_cipher, err = chcr_cipher_fallback(ablkctx->sw_cipher,
req->base.flags, req->base.flags,
req->src, req->src,
...@@ -1152,20 +1276,25 @@ static int process_cipher(struct ablkcipher_request *req, ...@@ -1152,20 +1276,25 @@ static int process_cipher(struct ablkcipher_request *req,
op_type); op_type);
goto error; goto error;
} }
reqctx->processed = bytes;
reqctx->last_req_len = bytes;
reqctx->dst = reqctx->dstsg;
reqctx->op = op_type; reqctx->op = op_type;
reqctx->srcsg = req->src;
reqctx->dstsg = req->dst;
reqctx->src_ofst = 0;
reqctx->dst_ofst = 0;
wrparam.qid = qid; wrparam.qid = qid;
wrparam.req = req; wrparam.req = req;
wrparam.bytes = bytes; wrparam.bytes = bytes;
*skb = create_cipher_wr(&wrparam); *skb = create_cipher_wr(&wrparam);
if (IS_ERR(*skb)) { if (IS_ERR(*skb)) {
err = PTR_ERR(*skb); err = PTR_ERR(*skb);
goto error; goto unmap;
} }
reqctx->processed = bytes;
reqctx->last_req_len = bytes;
return 0; return 0;
unmap:
chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
error: error:
return err; return err;
} }
...@@ -1173,23 +1302,22 @@ static int process_cipher(struct ablkcipher_request *req, ...@@ -1173,23 +1302,22 @@ static int process_cipher(struct ablkcipher_request *req,
static int chcr_aes_encrypt(struct ablkcipher_request *req) static int chcr_aes_encrypt(struct ablkcipher_request *req)
{ {
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
struct sk_buff *skb = NULL; struct sk_buff *skb = NULL;
int err; int err;
struct uld_ctx *u_ctx = ULD_CTX(ctx); struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
ctx->tx_qidx))) { c_ctx(tfm)->tx_qidx))) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY; return -EBUSY;
} }
err = process_cipher(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], &skb, err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
CHCR_ENCRYPT_OP); &skb, CHCR_ENCRYPT_OP);
if (err || !skb) if (err || !skb)
return err; return err;
skb->dev = u_ctx->lldi.ports[0]; skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx); set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
chcr_send_wr(skb); chcr_send_wr(skb);
return -EINPROGRESS; return -EINPROGRESS;
} }
...@@ -1197,23 +1325,22 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req) ...@@ -1197,23 +1325,22 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req)
static int chcr_aes_decrypt(struct ablkcipher_request *req) static int chcr_aes_decrypt(struct ablkcipher_request *req)
{ {
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct sk_buff *skb = NULL; struct sk_buff *skb = NULL;
int err; int err;
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
ctx->tx_qidx))) { c_ctx(tfm)->tx_qidx))) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY; return -EBUSY;
} }
err = process_cipher(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], &skb, err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
CHCR_DECRYPT_OP); &skb, CHCR_DECRYPT_OP);
if (err || !skb) if (err || !skb)
return err; return err;
skb->dev = u_ctx->lldi.ports[0]; skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx); set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
chcr_send_wr(skb); chcr_send_wr(skb);
return -EINPROGRESS; return -EINPROGRESS;
} }
...@@ -1361,17 +1488,19 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req, ...@@ -1361,17 +1488,19 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
{ {
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
struct sk_buff *skb = NULL; struct sk_buff *skb = NULL;
struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
struct chcr_wr *chcr_req; struct chcr_wr *chcr_req;
unsigned int frags = 0, transhdr_len, iopad_alignment = 0; struct ulptx_sgl *ulptx;
unsigned int nents = 0, transhdr_len, iopad_alignment = 0;
unsigned int digestsize = crypto_ahash_digestsize(tfm); unsigned int digestsize = crypto_ahash_digestsize(tfm);
unsigned int kctx_len = 0; unsigned int kctx_len = 0, temp = 0;
u8 hash_size_in_response = 0; u8 hash_size_in_response = 0;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC; GFP_ATOMIC;
struct adapter *adap = padap(ctx->dev); struct adapter *adap = padap(h_ctx(tfm)->dev);
int error = 0;
iopad_alignment = KEYCTX_ALIGN_PAD(digestsize); iopad_alignment = KEYCTX_ALIGN_PAD(digestsize);
kctx_len = param->alg_prm.result_size + iopad_alignment; kctx_len = param->alg_prm.result_size + iopad_alignment;
...@@ -1383,15 +1512,22 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req, ...@@ -1383,15 +1512,22 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
else else
hash_size_in_response = param->alg_prm.result_size; hash_size_in_response = param->alg_prm.result_size;
transhdr_len = HASH_TRANSHDR_SIZE(kctx_len); transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags); req_ctx->imm = (transhdr_len + param->bfr_len + param->sg_len) <=
SGE_MAX_WR_LEN;
nents = sg_nents_xlen(req->src, param->sg_len, CHCR_SRC_SG_SIZE, 0);
nents += param->bfr_len ? 1 : 0;
transhdr_len += req_ctx->imm ? (DIV_ROUND_UP((param->bfr_len +
param->sg_len), 16) * 16) :
(sgl_len(nents) * 8);
transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
skb = alloc_skb(SGE_MAX_WR_LEN, flags);
if (!skb) if (!skb)
return skb; return ERR_PTR(-ENOMEM);
skb_reserve(skb, sizeof(struct sge_opaque_hdr));
chcr_req = __skb_put_zero(skb, transhdr_len); chcr_req = __skb_put_zero(skb, transhdr_len);
chcr_req->sec_cpl.op_ivinsrtofst = chcr_req->sec_cpl.op_ivinsrtofst =
FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2, 0); FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->dev->rx_channel_id, 2, 0);
chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len); chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
chcr_req->sec_cpl.aadstart_cipherstop_hi = chcr_req->sec_cpl.aadstart_cipherstop_hi =
...@@ -1420,37 +1556,52 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req, ...@@ -1420,37 +1556,52 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
((kctx_len + ((kctx_len +
sizeof(chcr_req->key_ctx)) >> 4)); sizeof(chcr_req->key_ctx)) >> 4));
chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1); chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + kctx_len +
skb_set_transport_header(skb, transhdr_len); DUMMY_BYTES);
if (param->bfr_len != 0) if (param->bfr_len != 0) {
write_buffer_to_skb(skb, &frags, req_ctx->reqbfr, req_ctx->dma_addr = dma_map_single(&u_ctx->lldi.pdev->dev,
param->bfr_len); req_ctx->reqbfr, param->bfr_len,
if (param->sg_len != 0) DMA_TO_DEVICE);
write_sg_to_skb(skb, &frags, req->src, param->sg_len); if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
req_ctx->dma_addr)) {
error = -ENOMEM;
goto err;
}
req_ctx->dma_len = param->bfr_len;
} else {
req_ctx->dma_addr = 0;
}
chcr_add_hash_src_ent(req, ulptx, param);
/* Request upto max wr size */
temp = kctx_len + DUMMY_BYTES + (req_ctx->imm ? (param->sg_len
+ param->bfr_len) : 0);
atomic_inc(&adap->chcr_stats.digest_rqst); atomic_inc(&adap->chcr_stats.digest_rqst);
create_wreq(ctx, chcr_req, &req->base, skb, hash_size_in_response, create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->imm,
DUMMY_BYTES + kctx_len, 0); hash_size_in_response, transhdr_len,
temp, 0);
req_ctx->skb = skb; req_ctx->skb = skb;
skb_get(skb);
return skb; return skb;
err:
kfree_skb(skb);
return ERR_PTR(error);
} }
static int chcr_ahash_update(struct ahash_request *req) static int chcr_ahash_update(struct ahash_request *req)
{ {
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
struct uld_ctx *u_ctx = NULL; struct uld_ctx *u_ctx = NULL;
struct sk_buff *skb; struct sk_buff *skb;
u8 remainder = 0, bs; u8 remainder = 0, bs;
unsigned int nbytes = req->nbytes; unsigned int nbytes = req->nbytes;
struct hash_wr_param params; struct hash_wr_param params;
int error;
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
u_ctx = ULD_CTX(ctx); u_ctx = ULD_CTX(h_ctx(rtfm));
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
ctx->tx_qidx))) { h_ctx(rtfm)->tx_qidx))) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY; return -EBUSY;
} }
...@@ -1464,7 +1615,9 @@ static int chcr_ahash_update(struct ahash_request *req) ...@@ -1464,7 +1615,9 @@ static int chcr_ahash_update(struct ahash_request *req)
req_ctx->reqlen += nbytes; req_ctx->reqlen += nbytes;
return 0; return 0;
} }
error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
if (error)
return -ENOMEM;
params.opad_needed = 0; params.opad_needed = 0;
params.more = 1; params.more = 1;
params.last = 0; params.last = 0;
...@@ -1475,8 +1628,10 @@ static int chcr_ahash_update(struct ahash_request *req) ...@@ -1475,8 +1628,10 @@ static int chcr_ahash_update(struct ahash_request *req)
req_ctx->result = 0; req_ctx->result = 0;
req_ctx->data_len += params.sg_len + params.bfr_len; req_ctx->data_len += params.sg_len + params.bfr_len;
skb = create_hash_wr(req, &params); skb = create_hash_wr(req, &params);
if (IS_ERR(skb)) if (IS_ERR(skb)) {
return PTR_ERR(skb); error = PTR_ERR(skb);
goto unmap;
}
if (remainder) { if (remainder) {
u8 *temp; u8 *temp;
...@@ -1490,10 +1645,13 @@ static int chcr_ahash_update(struct ahash_request *req) ...@@ -1490,10 +1645,13 @@ static int chcr_ahash_update(struct ahash_request *req)
} }
req_ctx->reqlen = remainder; req_ctx->reqlen = remainder;
skb->dev = u_ctx->lldi.ports[0]; skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx); set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
chcr_send_wr(skb); chcr_send_wr(skb);
return -EINPROGRESS; return -EINPROGRESS;
unmap:
chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
return error;
} }
static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1) static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
...@@ -1510,13 +1668,12 @@ static int chcr_ahash_final(struct ahash_request *req) ...@@ -1510,13 +1668,12 @@ static int chcr_ahash_final(struct ahash_request *req)
{ {
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
struct hash_wr_param params; struct hash_wr_param params;
struct sk_buff *skb; struct sk_buff *skb;
struct uld_ctx *u_ctx = NULL; struct uld_ctx *u_ctx = NULL;
u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
u_ctx = ULD_CTX(ctx); u_ctx = ULD_CTX(h_ctx(rtfm));
if (is_hmac(crypto_ahash_tfm(rtfm))) if (is_hmac(crypto_ahash_tfm(rtfm)))
params.opad_needed = 1; params.opad_needed = 1;
else else
...@@ -1543,7 +1700,7 @@ static int chcr_ahash_final(struct ahash_request *req) ...@@ -1543,7 +1700,7 @@ static int chcr_ahash_final(struct ahash_request *req)
return PTR_ERR(skb); return PTR_ERR(skb);
skb->dev = u_ctx->lldi.ports[0]; skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx); set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
chcr_send_wr(skb); chcr_send_wr(skb);
return -EINPROGRESS; return -EINPROGRESS;
} }
...@@ -1552,17 +1709,17 @@ static int chcr_ahash_finup(struct ahash_request *req) ...@@ -1552,17 +1709,17 @@ static int chcr_ahash_finup(struct ahash_request *req)
{ {
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
struct uld_ctx *u_ctx = NULL; struct uld_ctx *u_ctx = NULL;
struct sk_buff *skb; struct sk_buff *skb;
struct hash_wr_param params; struct hash_wr_param params;
u8 bs; u8 bs;
int error;
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
u_ctx = ULD_CTX(ctx); u_ctx = ULD_CTX(h_ctx(rtfm));
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
ctx->tx_qidx))) { h_ctx(rtfm)->tx_qidx))) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY; return -EBUSY;
} }
...@@ -1588,34 +1745,41 @@ static int chcr_ahash_finup(struct ahash_request *req) ...@@ -1588,34 +1745,41 @@ static int chcr_ahash_finup(struct ahash_request *req)
params.last = 1; params.last = 1;
params.more = 0; params.more = 0;
} }
error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
if (error)
return -ENOMEM;
skb = create_hash_wr(req, &params); skb = create_hash_wr(req, &params);
if (IS_ERR(skb)) if (IS_ERR(skb)) {
return PTR_ERR(skb); error = PTR_ERR(skb);
goto unmap;
}
skb->dev = u_ctx->lldi.ports[0]; skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx); set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
chcr_send_wr(skb); chcr_send_wr(skb);
return -EINPROGRESS; return -EINPROGRESS;
unmap:
chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
return error;
} }
static int chcr_ahash_digest(struct ahash_request *req) static int chcr_ahash_digest(struct ahash_request *req)
{ {
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
struct uld_ctx *u_ctx = NULL; struct uld_ctx *u_ctx = NULL;
struct sk_buff *skb; struct sk_buff *skb;
struct hash_wr_param params; struct hash_wr_param params;
u8 bs; u8 bs;
int error;
rtfm->init(req); rtfm->init(req);
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
u_ctx = ULD_CTX(ctx); u_ctx = ULD_CTX(h_ctx(rtfm));
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
ctx->tx_qidx))) { h_ctx(rtfm)->tx_qidx))) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY; return -EBUSY;
} }
...@@ -1624,6 +1788,9 @@ static int chcr_ahash_digest(struct ahash_request *req) ...@@ -1624,6 +1788,9 @@ static int chcr_ahash_digest(struct ahash_request *req)
params.opad_needed = 1; params.opad_needed = 1;
else else
params.opad_needed = 0; params.opad_needed = 0;
error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
if (error)
return -ENOMEM;
params.last = 0; params.last = 0;
params.more = 0; params.more = 0;
...@@ -1641,13 +1808,17 @@ static int chcr_ahash_digest(struct ahash_request *req) ...@@ -1641,13 +1808,17 @@ static int chcr_ahash_digest(struct ahash_request *req)
} }
skb = create_hash_wr(req, &params); skb = create_hash_wr(req, &params);
if (IS_ERR(skb)) if (IS_ERR(skb)) {
return PTR_ERR(skb); error = PTR_ERR(skb);
goto unmap;
}
skb->dev = u_ctx->lldi.ports[0]; skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx); set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
chcr_send_wr(skb); chcr_send_wr(skb);
return -EINPROGRESS; return -EINPROGRESS;
unmap:
chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
return error;
} }
static int chcr_ahash_export(struct ahash_request *areq, void *out) static int chcr_ahash_export(struct ahash_request *areq, void *out)
...@@ -1657,6 +1828,8 @@ static int chcr_ahash_export(struct ahash_request *areq, void *out) ...@@ -1657,6 +1828,8 @@ static int chcr_ahash_export(struct ahash_request *areq, void *out)
state->reqlen = req_ctx->reqlen; state->reqlen = req_ctx->reqlen;
state->data_len = req_ctx->data_len; state->data_len = req_ctx->data_len;
state->is_sg_map = 0;
state->result = 0;
memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen); memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
memcpy(state->partial_hash, req_ctx->partial_hash, memcpy(state->partial_hash, req_ctx->partial_hash,
CHCR_HASH_MAX_DIGEST_SIZE); CHCR_HASH_MAX_DIGEST_SIZE);
...@@ -1672,6 +1845,8 @@ static int chcr_ahash_import(struct ahash_request *areq, const void *in) ...@@ -1672,6 +1845,8 @@ static int chcr_ahash_import(struct ahash_request *areq, const void *in)
req_ctx->data_len = state->data_len; req_ctx->data_len = state->data_len;
req_ctx->reqbfr = req_ctx->bfr1; req_ctx->reqbfr = req_ctx->bfr1;
req_ctx->skbfr = req_ctx->bfr2; req_ctx->skbfr = req_ctx->bfr2;
req_ctx->is_sg_map = 0;
req_ctx->result = 0;
memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128); memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
memcpy(req_ctx->partial_hash, state->partial_hash, memcpy(req_ctx->partial_hash, state->partial_hash,
CHCR_HASH_MAX_DIGEST_SIZE); CHCR_HASH_MAX_DIGEST_SIZE);
...@@ -1681,8 +1856,7 @@ static int chcr_ahash_import(struct ahash_request *areq, const void *in) ...@@ -1681,8 +1856,7 @@ static int chcr_ahash_import(struct ahash_request *areq, const void *in)
static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen) unsigned int keylen)
{ {
struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
unsigned int digestsize = crypto_ahash_digestsize(tfm); unsigned int digestsize = crypto_ahash_digestsize(tfm);
unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
unsigned int i, err = 0, updated_digestsize; unsigned int i, err = 0, updated_digestsize;
...@@ -1735,8 +1909,7 @@ static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, ...@@ -1735,8 +1909,7 @@ static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key, static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
unsigned int key_len) unsigned int key_len)
{ {
struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher); struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
unsigned short context_size = 0; unsigned short context_size = 0;
int err; int err;
...@@ -1775,6 +1948,7 @@ static int chcr_sha_init(struct ahash_request *areq) ...@@ -1775,6 +1948,7 @@ static int chcr_sha_init(struct ahash_request *areq)
req_ctx->skbfr = req_ctx->bfr2; req_ctx->skbfr = req_ctx->bfr2;
req_ctx->skb = NULL; req_ctx->skb = NULL;
req_ctx->result = 0; req_ctx->result = 0;
req_ctx->is_sg_map = 0;
copy_hash_init_values(req_ctx->partial_hash, digestsize); copy_hash_init_values(req_ctx->partial_hash, digestsize);
return 0; return 0;
} }
...@@ -1790,8 +1964,7 @@ static int chcr_hmac_init(struct ahash_request *areq) ...@@ -1790,8 +1964,7 @@ static int chcr_hmac_init(struct ahash_request *areq)
{ {
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq); struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm)); struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
unsigned int digestsize = crypto_ahash_digestsize(rtfm); unsigned int digestsize = crypto_ahash_digestsize(rtfm);
unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
...@@ -1837,29 +2010,48 @@ static void chcr_hmac_cra_exit(struct crypto_tfm *tfm) ...@@ -1837,29 +2010,48 @@ static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
} }
} }
static int chcr_copy_assoc(struct aead_request *req, static int chcr_aead_common_init(struct aead_request *req,
struct chcr_aead_ctx *ctx) unsigned short op_type)
{ {
SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null); struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
skcipher_request_set_tfm(skreq, ctx->null); struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
skcipher_request_set_callback(skreq, aead_request_flags(req), int error = -EINVAL;
NULL, NULL); unsigned int dst_size;
skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen, unsigned int authsize = crypto_aead_authsize(tfm);
NULL);
return crypto_skcipher_encrypt(skreq); dst_size = req->assoclen + req->cryptlen + (op_type ?
-authsize : authsize);
/* validate key size */
if (aeadctx->enckey_len == 0)
goto err;
if (op_type && req->cryptlen < authsize)
goto err;
error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
op_type);
if (error) {
error = -ENOMEM;
goto err;
}
reqctx->aad_nents = sg_nents_xlen(req->src, req->assoclen,
CHCR_SRC_SG_SIZE, 0);
reqctx->src_nents = sg_nents_xlen(req->src, req->cryptlen,
CHCR_SRC_SG_SIZE, req->assoclen);
return 0;
err:
return error;
} }
static int chcr_aead_need_fallback(struct aead_request *req, int src_nent,
static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
int aadmax, int wrlen, int aadmax, int wrlen,
unsigned short op_type) unsigned short op_type)
{ {
unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req)); unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
if (((req->cryptlen - (op_type ? authsize : 0)) == 0) || if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
dst_nents > MAX_DSGL_ENT ||
(req->assoclen > aadmax) || (req->assoclen > aadmax) ||
(src_nent > MAX_SKB_FRAGS) || (wrlen > SGE_MAX_WR_LEN))
(wrlen > MAX_WR_SIZE))
return 1; return 1;
return 0; return 0;
} }
...@@ -1867,8 +2059,7 @@ static int chcr_aead_need_fallback(struct aead_request *req, int src_nent, ...@@ -1867,8 +2059,7 @@ static int chcr_aead_need_fallback(struct aead_request *req, int src_nent,
static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type) static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
{ {
struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chcr_context *ctx = crypto_aead_ctx(tfm); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
struct aead_request *subreq = aead_request_ctx(req); struct aead_request *subreq = aead_request_ctx(req);
aead_request_set_tfm(subreq, aeadctx->sw_cipher); aead_request_set_tfm(subreq, aeadctx->sw_cipher);
...@@ -1887,84 +2078,75 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, ...@@ -1887,84 +2078,75 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
unsigned short op_type) unsigned short op_type)
{ {
struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chcr_context *ctx = crypto_aead_ctx(tfm); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx); struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct sk_buff *skb = NULL; struct sk_buff *skb = NULL;
struct chcr_wr *chcr_req; struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl; struct cpl_rx_phys_dsgl *phys_cpl;
struct phys_sge_parm sg_param; struct ulptx_sgl *ulptx;
struct scatterlist *src; unsigned int transhdr_len;
unsigned int frags = 0, transhdr_len; unsigned int dst_size = 0, temp;
unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0; unsigned int kctx_len = 0, dnents;
unsigned int kctx_len = 0, nents;
unsigned short stop_offset = 0;
unsigned int assoclen = req->assoclen; unsigned int assoclen = req->assoclen;
unsigned int authsize = crypto_aead_authsize(tfm); unsigned int authsize = crypto_aead_authsize(tfm);
int error = -EINVAL, src_nent; int error = -EINVAL;
int null = 0; int null = 0;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC; GFP_ATOMIC;
struct adapter *adap = padap(ctx->dev); struct adapter *adap = padap(a_ctx(tfm)->dev);
dst_size = req->assoclen + req->cryptlen + (op_type ? -authsize : if (req->cryptlen == 0)
authsize); return NULL;
if (aeadctx->enckey_len == 0 || (req->cryptlen <= 0))
goto err;
if (op_type && req->cryptlen < crypto_aead_authsize(tfm)) reqctx->b0_dma = 0;
goto err;
src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen);
if (src_nent < 0)
goto err;
src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
reqctx->dst = src;
if (req->src != req->dst) {
error = chcr_copy_assoc(req, aeadctx);
if (error)
return ERR_PTR(error);
reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
req->assoclen);
}
if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) { if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
null = 1; null = 1;
assoclen = 0; assoclen = 0;
} }
reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen + dst_size = assoclen + req->cryptlen + (op_type ? -authsize :
(op_type ? -authsize : authsize)); authsize);
if (reqctx->dst_nents < 0) { error = chcr_aead_common_init(req, op_type);
pr_err("AUTHENC:Invalid Destination sg entries\n"); if (error)
error = -EINVAL; return ERR_PTR(error);
goto err; if (dst_size) {
dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
dnents += sg_nents_xlen(req->dst, req->cryptlen +
(op_type ? -authsize : authsize), CHCR_DST_SG_SIZE,
req->assoclen);
dnents += MIN_AUTH_SG; // For IV
} else {
dnents = 0;
} }
nents = dst_size ? dstsg_2k(reqctx->dst, req->cryptlen +
(op_type ? -authsize : authsize)) : 0; dst_size = get_space_for_phys_dsgl(dnents);
dst_size = get_space_for_phys_dsgl(nents);
kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4) kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
- sizeof(chcr_req->key_ctx); - sizeof(chcr_req->key_ctx);
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
if (chcr_aead_need_fallback(req, src_nent + MIN_AUTH_SG, reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <
T6_MAX_AAD_SIZE, SGE_MAX_WR_LEN;
transhdr_len + (sgl_len(src_nent + MIN_AUTH_SG) * 8), temp = reqctx->imm ? (DIV_ROUND_UP((assoclen + IV + req->cryptlen), 16)
op_type)) { * 16) : (sgl_len(reqctx->src_nents + reqctx->aad_nents
+ MIN_GCM_SG) * 8);
transhdr_len += temp;
transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
transhdr_len, op_type)) {
atomic_inc(&adap->chcr_stats.fallback); atomic_inc(&adap->chcr_stats.fallback);
chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
op_type);
return ERR_PTR(chcr_aead_fallback(req, op_type)); return ERR_PTR(chcr_aead_fallback(req, op_type));
} }
skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags); skb = alloc_skb(SGE_MAX_WR_LEN, flags);
if (!skb) { if (!skb) {
error = -ENOMEM; error = -ENOMEM;
goto err; goto err;
} }
/* LLD is going to write the sge hdr. */
skb_reserve(skb, sizeof(struct sge_opaque_hdr));
/* Write WR */
chcr_req = __skb_put_zero(skb, transhdr_len); chcr_req = __skb_put_zero(skb, transhdr_len);
stop_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize; temp = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
/* /*
* Input order is AAD,IV and Payload. where IV should be included as * Input order is AAD,IV and Payload. where IV should be included as
...@@ -1972,24 +2154,24 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, ...@@ -1972,24 +2154,24 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
* to the hardware spec * to the hardware spec
*/ */
chcr_req->sec_cpl.op_ivinsrtofst = chcr_req->sec_cpl.op_ivinsrtofst =
FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2, FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->dev->rx_channel_id, 2,
(ivsize ? (assoclen + 1) : 0)); assoclen + 1);
chcr_req->sec_cpl.pldlen = htonl(assoclen + ivsize + req->cryptlen); chcr_req->sec_cpl.pldlen = htonl(assoclen + IV + req->cryptlen);
chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
assoclen ? 1 : 0, assoclen, assoclen ? 1 : 0, assoclen,
assoclen + ivsize + 1, assoclen + IV + 1,
(stop_offset & 0x1F0) >> 4); (temp & 0x1F0) >> 4);
chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT( chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
stop_offset & 0xF, temp & 0xF,
null ? 0 : assoclen + ivsize + 1, null ? 0 : assoclen + IV + 1,
stop_offset, stop_offset); temp, temp);
chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
(op_type == CHCR_ENCRYPT_OP) ? 1 : 0, (op_type == CHCR_ENCRYPT_OP) ? 1 : 0,
CHCR_SCMD_CIPHER_MODE_AES_CBC, CHCR_SCMD_CIPHER_MODE_AES_CBC,
actx->auth_mode, aeadctx->hmac_ctrl, actx->auth_mode, aeadctx->hmac_ctrl,
ivsize >> 1); IV >> 1);
chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
0, 1, dst_size); 0, 0, dst_size);
chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
if (op_type == CHCR_ENCRYPT_OP) if (op_type == CHCR_ENCRYPT_OP)
...@@ -2002,39 +2184,312 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, ...@@ -2002,39 +2184,312 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) << memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) <<
4), actx->h_iopad, kctx_len - 4), actx->h_iopad, kctx_len -
(DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4)); (DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4));
memcpy(reqctx->iv, req->iv, IV);
phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
sg_param.nents = reqctx->dst_nents; ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
sg_param.qid = qid; chcr_add_aead_src_ent(req, ulptx, assoclen, op_type);
error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
reqctx->dst, &sg_param);
if (error)
goto dstmap_fail;
skb_set_transport_header(skb, transhdr_len);
if (assoclen) {
/* AAD buffer in */
write_sg_to_skb(skb, &frags, req->src, assoclen);
}
write_buffer_to_skb(skb, &frags, req->iv, ivsize);
write_sg_to_skb(skb, &frags, src, req->cryptlen);
atomic_inc(&adap->chcr_stats.cipher_rqst); atomic_inc(&adap->chcr_stats.cipher_rqst);
create_wreq(ctx, chcr_req, &req->base, skb, size, temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len, 0); kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
transhdr_len, temp, 0);
reqctx->skb = skb; reqctx->skb = skb;
skb_get(skb); reqctx->op = op_type;
return skb; return skb;
dstmap_fail:
/* ivmap_fail: */
kfree_skb(skb);
err: err:
chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
op_type);
return ERR_PTR(error); return ERR_PTR(error);
} }
static int chcr_aead_dma_map(struct device *dev,
struct aead_request *req,
unsigned short op_type)
{
int error;
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
unsigned int authsize = crypto_aead_authsize(tfm);
int dst_size;
dst_size = req->assoclen + req->cryptlen + (op_type ?
-authsize : authsize);
if (!req->cryptlen || !dst_size)
return 0;
reqctx->iv_dma = dma_map_single(dev, reqctx->iv, IV,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, reqctx->iv_dma))
return -ENOMEM;
if (req->src == req->dst) {
error = dma_map_sg(dev, req->src, sg_nents(req->src),
DMA_BIDIRECTIONAL);
if (!error)
goto err;
} else {
error = dma_map_sg(dev, req->src, sg_nents(req->src),
DMA_TO_DEVICE);
if (!error)
goto err;
error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
DMA_FROM_DEVICE);
if (!error) {
dma_unmap_sg(dev, req->src, sg_nents(req->src),
DMA_TO_DEVICE);
goto err;
}
}
return 0;
err:
dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
return -ENOMEM;
}
static void chcr_aead_dma_unmap(struct device *dev,
struct aead_request *req,
unsigned short op_type)
{
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
unsigned int authsize = crypto_aead_authsize(tfm);
int dst_size;
dst_size = req->assoclen + req->cryptlen + (op_type ?
-authsize : authsize);
if (!req->cryptlen || !dst_size)
return;
dma_unmap_single(dev, reqctx->iv_dma, IV,
DMA_BIDIRECTIONAL);
if (req->src == req->dst) {
dma_unmap_sg(dev, req->src, sg_nents(req->src),
DMA_BIDIRECTIONAL);
} else {
dma_unmap_sg(dev, req->src, sg_nents(req->src),
DMA_TO_DEVICE);
dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
DMA_FROM_DEVICE);
}
}
static inline void chcr_add_aead_src_ent(struct aead_request *req,
struct ulptx_sgl *ulptx,
unsigned int assoclen,
unsigned short op_type)
{
struct ulptx_walk ulp_walk;
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
if (reqctx->imm) {
u8 *buf = (u8 *)ulptx;
if (reqctx->b0_dma) {
memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
buf += reqctx->b0_len;
}
sg_pcopy_to_buffer(req->src, sg_nents(req->src),
buf, assoclen, 0);
buf += assoclen;
memcpy(buf, reqctx->iv, IV);
buf += IV;
sg_pcopy_to_buffer(req->src, sg_nents(req->src),
buf, req->cryptlen, req->assoclen);
} else {
ulptx_walk_init(&ulp_walk, ulptx);
if (reqctx->b0_dma)
ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
&reqctx->b0_dma);
ulptx_walk_add_sg(&ulp_walk, req->src, assoclen, 0);
ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma);
ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen,
req->assoclen);
ulptx_walk_end(&ulp_walk);
}
}
static inline void chcr_add_aead_dst_ent(struct aead_request *req,
struct cpl_rx_phys_dsgl *phys_cpl,
unsigned int assoclen,
unsigned short op_type,
unsigned short qid)
{
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct dsgl_walk dsgl_walk;
unsigned int authsize = crypto_aead_authsize(tfm);
u32 temp;
dsgl_walk_init(&dsgl_walk, phys_cpl);
if (reqctx->b0_dma)
dsgl_walk_add_page(&dsgl_walk, reqctx->b0_len, &reqctx->b0_dma);
dsgl_walk_add_sg(&dsgl_walk, req->dst, assoclen, 0);
dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
temp = req->cryptlen + (op_type ? -authsize : authsize);
dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
dsgl_walk_end(&dsgl_walk, qid);
}
static inline void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
struct ulptx_sgl *ulptx,
struct cipher_wr_param *wrparam)
{
struct ulptx_walk ulp_walk;
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
if (reqctx->imm) {
u8 *buf = (u8 *)ulptx;
memcpy(buf, reqctx->iv, IV);
buf += IV;
sg_pcopy_to_buffer(req->src, sg_nents(req->src),
buf, wrparam->bytes, reqctx->processed);
} else {
ulptx_walk_init(&ulp_walk, ulptx);
ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma);
ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
reqctx->src_ofst);
reqctx->srcsg = ulp_walk.last_sg;
reqctx->src_ofst = ulp_walk.last_sg_len;
ulptx_walk_end(&ulp_walk);
}
}
static inline void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
struct cpl_rx_phys_dsgl *phys_cpl,
struct cipher_wr_param *wrparam,
unsigned short qid)
{
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
struct dsgl_walk dsgl_walk;
dsgl_walk_init(&dsgl_walk, phys_cpl);
dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
reqctx->dst_ofst);
reqctx->dstsg = dsgl_walk.last_sg;
reqctx->dst_ofst = dsgl_walk.last_sg_len;
dsgl_walk_end(&dsgl_walk, qid);
}
static inline void chcr_add_hash_src_ent(struct ahash_request *req,
struct ulptx_sgl *ulptx,
struct hash_wr_param *param)
{
struct ulptx_walk ulp_walk;
struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
if (reqctx->imm) {
u8 *buf = (u8 *)ulptx;
if (param->bfr_len) {
memcpy(buf, reqctx->reqbfr, param->bfr_len);
buf += param->bfr_len;
}
sg_pcopy_to_buffer(req->src, sg_nents(req->src),
buf, param->sg_len, 0);
} else {
ulptx_walk_init(&ulp_walk, ulptx);
if (param->bfr_len)
ulptx_walk_add_page(&ulp_walk, param->bfr_len,
&reqctx->dma_addr);
ulptx_walk_add_sg(&ulp_walk, req->src, param->sg_len,
0);
// reqctx->srcsg = ulp_walk.last_sg;
// reqctx->src_ofst = ulp_walk.last_sg_len;
ulptx_walk_end(&ulp_walk);
}
}
static inline int chcr_hash_dma_map(struct device *dev,
struct ahash_request *req)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
int error = 0;
if (!req->nbytes)
return 0;
error = dma_map_sg(dev, req->src, sg_nents(req->src),
DMA_TO_DEVICE);
if (!error)
return error;
req_ctx->is_sg_map = 1;
return 0;
}
static inline void chcr_hash_dma_unmap(struct device *dev,
struct ahash_request *req)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
if (!req->nbytes)
return;
dma_unmap_sg(dev, req->src, sg_nents(req->src),
DMA_TO_DEVICE);
req_ctx->is_sg_map = 0;
}
static int chcr_cipher_dma_map(struct device *dev,
struct ablkcipher_request *req)
{
int error;
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
reqctx->iv_dma = dma_map_single(dev, reqctx->iv, IV,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, reqctx->iv_dma))
return -ENOMEM;
if (req->src == req->dst) {
error = dma_map_sg(dev, req->src, sg_nents(req->src),
DMA_BIDIRECTIONAL);
if (!error)
goto err;
} else {
error = dma_map_sg(dev, req->src, sg_nents(req->src),
DMA_TO_DEVICE);
if (!error)
goto err;
error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
DMA_FROM_DEVICE);
if (!error) {
dma_unmap_sg(dev, req->src, sg_nents(req->src),
DMA_TO_DEVICE);
goto err;
}
}
return 0;
err:
dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
return -ENOMEM;
}
static void chcr_cipher_dma_unmap(struct device *dev,
struct ablkcipher_request *req)
{
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
dma_unmap_single(dev, reqctx->iv_dma, IV,
DMA_BIDIRECTIONAL);
if (req->src == req->dst) {
dma_unmap_sg(dev, req->src, sg_nents(req->src),
DMA_BIDIRECTIONAL);
} else {
dma_unmap_sg(dev, req->src, sg_nents(req->src),
DMA_TO_DEVICE);
dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
DMA_FROM_DEVICE);
}
}
static int set_msg_len(u8 *block, unsigned int msglen, int csize) static int set_msg_len(u8 *block, unsigned int msglen, int csize)
{ {
__be32 data; __be32 data;
...@@ -2119,15 +2574,13 @@ static int ccm_format_packet(struct aead_request *req, ...@@ -2119,15 +2574,13 @@ static int ccm_format_packet(struct aead_request *req,
static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl, static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
unsigned int dst_size, unsigned int dst_size,
struct aead_request *req, struct aead_request *req,
unsigned short op_type, unsigned short op_type)
struct chcr_context *chcrctx)
{ {
struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm)); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
unsigned int ivsize = AES_BLOCK_SIZE;
unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM; unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC; unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
unsigned int c_id = chcrctx->dev->rx_channel_id; unsigned int c_id = a_ctx(tfm)->dev->rx_channel_id;
unsigned int ccm_xtra; unsigned int ccm_xtra;
unsigned char tag_offset = 0, auth_offset = 0; unsigned char tag_offset = 0, auth_offset = 0;
unsigned int assoclen; unsigned int assoclen;
...@@ -2140,7 +2593,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl, ...@@ -2140,7 +2593,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
((assoclen) ? CCM_AAD_FIELD_SIZE : 0); ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
auth_offset = req->cryptlen ? auth_offset = req->cryptlen ?
(assoclen + ivsize + 1 + ccm_xtra) : 0; (assoclen + IV + 1 + ccm_xtra) : 0;
if (op_type == CHCR_DECRYPT_OP) { if (op_type == CHCR_DECRYPT_OP) {
if (crypto_aead_authsize(tfm) != req->cryptlen) if (crypto_aead_authsize(tfm) != req->cryptlen)
tag_offset = crypto_aead_authsize(tfm); tag_offset = crypto_aead_authsize(tfm);
...@@ -2150,14 +2603,13 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl, ...@@ -2150,14 +2603,13 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id, sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
2, (ivsize ? (assoclen + 1) : 0) + 2, assoclen + 1 + ccm_xtra);
ccm_xtra);
sec_cpl->pldlen = sec_cpl->pldlen =
htonl(assoclen + ivsize + req->cryptlen + ccm_xtra); htonl(assoclen + IV + req->cryptlen + ccm_xtra);
/* For CCM there wil be b0 always. So AAD start will be 1 always */ /* For CCM there wil be b0 always. So AAD start will be 1 always */
sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
1, assoclen + ccm_xtra, assoclen 1, assoclen + ccm_xtra, assoclen
+ ivsize + 1 + ccm_xtra, 0); + IV + 1 + ccm_xtra, 0);
sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0, sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
auth_offset, tag_offset, auth_offset, tag_offset,
...@@ -2166,10 +2618,10 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl, ...@@ -2166,10 +2618,10 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
(op_type == CHCR_ENCRYPT_OP) ? 0 : 1, (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
cipher_mode, mac_mode, cipher_mode, mac_mode,
aeadctx->hmac_ctrl, ivsize >> 1); aeadctx->hmac_ctrl, IV >> 1);
sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0, sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
1, dst_size); 0, dst_size);
} }
int aead_ccm_validate_input(unsigned short op_type, int aead_ccm_validate_input(unsigned short op_type,
...@@ -2189,119 +2641,83 @@ int aead_ccm_validate_input(unsigned short op_type, ...@@ -2189,119 +2641,83 @@ int aead_ccm_validate_input(unsigned short op_type,
return -EINVAL; return -EINVAL;
} }
} }
if (aeadctx->enckey_len == 0) {
pr_err("CCM: Encryption key not set\n");
return -EINVAL;
}
return 0; return 0;
} }
unsigned int fill_aead_req_fields(struct sk_buff *skb,
struct aead_request *req,
struct scatterlist *src,
unsigned int ivsize,
struct chcr_aead_ctx *aeadctx)
{
unsigned int frags = 0;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
/* b0 and aad length(if available) */
write_buffer_to_skb(skb, &frags, reqctx->scratch_pad, CCM_B0_SIZE +
(req->assoclen ? CCM_AAD_FIELD_SIZE : 0));
if (req->assoclen) {
if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
write_sg_to_skb(skb, &frags, req->src,
req->assoclen - 8);
else
write_sg_to_skb(skb, &frags, req->src, req->assoclen);
}
write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
if (req->cryptlen)
write_sg_to_skb(skb, &frags, src, req->cryptlen);
return frags;
}
static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
unsigned short qid, unsigned short qid,
int size, int size,
unsigned short op_type) unsigned short op_type)
{ {
struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chcr_context *ctx = crypto_aead_ctx(tfm); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct sk_buff *skb = NULL; struct sk_buff *skb = NULL;
struct chcr_wr *chcr_req; struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl; struct cpl_rx_phys_dsgl *phys_cpl;
struct phys_sge_parm sg_param; struct ulptx_sgl *ulptx;
struct scatterlist *src; unsigned int transhdr_len;
unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE; unsigned int dst_size = 0, kctx_len, dnents, temp;
unsigned int dst_size = 0, kctx_len, nents; unsigned int sub_type, assoclen = req->assoclen;
unsigned int sub_type;
unsigned int authsize = crypto_aead_authsize(tfm); unsigned int authsize = crypto_aead_authsize(tfm);
int error = -EINVAL, src_nent; int error = -EINVAL;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC; GFP_ATOMIC;
struct adapter *adap = padap(ctx->dev); struct adapter *adap = padap(a_ctx(tfm)->dev);
dst_size = req->assoclen + req->cryptlen + (op_type ? -authsize :
authsize);
if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
goto err;
src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen);
if (src_nent < 0)
goto err;
reqctx->b0_dma = 0;
sub_type = get_aead_subtype(tfm); sub_type = get_aead_subtype(tfm);
src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen); if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
reqctx->dst = src; assoclen -= 8;
if (req->src != req->dst) { dst_size = assoclen + req->cryptlen + (op_type ? -authsize :
error = chcr_copy_assoc(req, aeadctx); authsize);
if (error) { error = chcr_aead_common_init(req, op_type);
pr_err("AAD copy to destination buffer fails\n"); if (error)
return ERR_PTR(error); return ERR_PTR(error);
}
reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
req->assoclen); reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
}
reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
(op_type ? -authsize : authsize));
if (reqctx->dst_nents < 0) {
pr_err("CCM:Invalid Destination sg entries\n");
error = -EINVAL;
goto err;
}
error = aead_ccm_validate_input(op_type, req, aeadctx, sub_type); error = aead_ccm_validate_input(op_type, req, aeadctx, sub_type);
if (error) if (error)
goto err; goto err;
nents = dst_size ? dstsg_2k(reqctx->dst, req->cryptlen + if (dst_size) {
(op_type ? -authsize : authsize)) : 0; dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
dst_size = get_space_for_phys_dsgl(nents); dnents += sg_nents_xlen(req->dst, req->cryptlen
+ (op_type ? -authsize : authsize),
CHCR_DST_SG_SIZE, req->assoclen);
dnents += MIN_CCM_SG; // For IV and B0
} else {
dnents = 0;
}
dst_size = get_space_for_phys_dsgl(dnents);
kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2; kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2;
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
if (chcr_aead_need_fallback(req, src_nent + MIN_CCM_SG, reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen +
T6_MAX_AAD_SIZE - 18, reqctx->b0_len) <= SGE_MAX_WR_LEN;
transhdr_len + (sgl_len(src_nent + MIN_CCM_SG) * 8), temp = reqctx->imm ? (DIV_ROUND_UP((assoclen + IV + req->cryptlen +
op_type)) { reqctx->b0_len), 16) * 16) :
(sgl_len(reqctx->src_nents + reqctx->aad_nents +
MIN_CCM_SG) * 8);
transhdr_len += temp;
transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
reqctx->b0_len, transhdr_len, op_type)) {
atomic_inc(&adap->chcr_stats.fallback); atomic_inc(&adap->chcr_stats.fallback);
chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
op_type);
return ERR_PTR(chcr_aead_fallback(req, op_type)); return ERR_PTR(chcr_aead_fallback(req, op_type));
} }
skb = alloc_skb(SGE_MAX_WR_LEN, flags);
skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
if (!skb) { if (!skb) {
error = -ENOMEM; error = -ENOMEM;
goto err; goto err;
} }
skb_reserve(skb, sizeof(struct sge_opaque_hdr)); chcr_req = (struct chcr_wr *) __skb_put_zero(skb, transhdr_len);
chcr_req = __skb_put_zero(skb, transhdr_len); fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type);
fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type, ctx);
chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len); memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
...@@ -2309,29 +2725,37 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, ...@@ -2309,29 +2725,37 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
16), aeadctx->key, aeadctx->enckey_len); 16), aeadctx->key, aeadctx->enckey_len);
phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
error = ccm_format_packet(req, aeadctx, sub_type, op_type); error = ccm_format_packet(req, aeadctx, sub_type, op_type);
if (error) if (error)
goto dstmap_fail; goto dstmap_fail;
sg_param.nents = reqctx->dst_nents; reqctx->b0_dma = dma_map_single(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev,
sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); &reqctx->scratch_pad, reqctx->b0_len,
sg_param.qid = qid; DMA_BIDIRECTIONAL);
error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, if (dma_mapping_error(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev,
reqctx->dst, &sg_param); reqctx->b0_dma)) {
if (error) error = -ENOMEM;
goto dstmap_fail; goto dstmap_fail;
}
chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
chcr_add_aead_src_ent(req, ulptx, assoclen, op_type);
skb_set_transport_header(skb, transhdr_len);
frags = fill_aead_req_fields(skb, req, src, ivsize, aeadctx);
atomic_inc(&adap->chcr_stats.aead_rqst); atomic_inc(&adap->chcr_stats.aead_rqst);
create_wreq(ctx, chcr_req, &req->base, skb, 0, temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len, 0); kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen +
reqctx->b0_len) : 0);
create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
transhdr_len, temp, 0);
reqctx->skb = skb; reqctx->skb = skb;
skb_get(skb); reqctx->op = op_type;
return skb; return skb;
dstmap_fail: dstmap_fail:
kfree_skb(skb); kfree_skb(skb);
err: err:
chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, op_type);
return ERR_PTR(error); return ERR_PTR(error);
} }
...@@ -2341,101 +2765,84 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, ...@@ -2341,101 +2765,84 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
unsigned short op_type) unsigned short op_type)
{ {
struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chcr_context *ctx = crypto_aead_ctx(tfm); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct sk_buff *skb = NULL; struct sk_buff *skb = NULL;
struct chcr_wr *chcr_req; struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl; struct cpl_rx_phys_dsgl *phys_cpl;
struct phys_sge_parm sg_param; struct ulptx_sgl *ulptx;
struct scatterlist *src; unsigned int transhdr_len, dnents = 0;
unsigned int frags = 0, transhdr_len; unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
unsigned int ivsize = AES_BLOCK_SIZE;
unsigned int dst_size = 0, kctx_len, nents, assoclen = req->assoclen;
unsigned char tag_offset = 0;
unsigned int authsize = crypto_aead_authsize(tfm); unsigned int authsize = crypto_aead_authsize(tfm);
int error = -EINVAL, src_nent; int error = -EINVAL;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC; GFP_ATOMIC;
struct adapter *adap = padap(ctx->dev); struct adapter *adap = padap(a_ctx(tfm)->dev);
dst_size = assoclen + req->cryptlen + (op_type ? -authsize : if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
authsize); assoclen = req->assoclen - 8;
/* validate key size */
if (aeadctx->enckey_len == 0)
goto err;
if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
goto err;
src_nent = sg_nents_for_len(req->src, assoclen + req->cryptlen);
if (src_nent < 0)
goto err;
src = scatterwalk_ffwd(reqctx->srcffwd, req->src, assoclen); reqctx->b0_dma = 0;
reqctx->dst = src; dst_size = assoclen + req->cryptlen + (op_type ? -authsize : authsize);
if (req->src != req->dst) { error = chcr_aead_common_init(req, op_type);
error = chcr_copy_assoc(req, aeadctx);
if (error) if (error)
return ERR_PTR(error); return ERR_PTR(error);
reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst, if (dst_size) {
req->assoclen); dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
} dnents += sg_nents_xlen(req->dst,
reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen + req->cryptlen + (op_type ? -authsize : authsize),
(op_type ? -authsize : authsize)); CHCR_DST_SG_SIZE, req->assoclen);
if (reqctx->dst_nents < 0) { dnents += MIN_GCM_SG; // For IV
pr_err("GCM:Invalid Destination sg entries\n"); } else {
error = -EINVAL; dnents = 0;
goto err;
} }
dst_size = get_space_for_phys_dsgl(dnents);
nents = dst_size ? dstsg_2k(reqctx->dst, req->cryptlen +
(op_type ? -authsize : authsize)) : 0;
dst_size = get_space_for_phys_dsgl(nents);
kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) + kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) +
AEAD_H_SIZE; AEAD_H_SIZE;
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
if (chcr_aead_need_fallback(req, src_nent + MIN_GCM_SG, reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <=
T6_MAX_AAD_SIZE, SGE_MAX_WR_LEN;
transhdr_len + (sgl_len(src_nent + MIN_GCM_SG) * 8), temp = reqctx->imm ? (DIV_ROUND_UP((assoclen + IV +
op_type)) { req->cryptlen), 16) * 16) : (sgl_len(reqctx->src_nents +
reqctx->aad_nents + MIN_GCM_SG) * 8);
transhdr_len += temp;
transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
transhdr_len, op_type)) {
atomic_inc(&adap->chcr_stats.fallback); atomic_inc(&adap->chcr_stats.fallback);
chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
op_type);
return ERR_PTR(chcr_aead_fallback(req, op_type)); return ERR_PTR(chcr_aead_fallback(req, op_type));
} }
skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags); skb = alloc_skb(SGE_MAX_WR_LEN, flags);
if (!skb) { if (!skb) {
error = -ENOMEM; error = -ENOMEM;
goto err; goto err;
} }
/* NIC driver is going to write the sge hdr. */
skb_reserve(skb, sizeof(struct sge_opaque_hdr));
chcr_req = __skb_put_zero(skb, transhdr_len); chcr_req = __skb_put_zero(skb, transhdr_len);
if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) //Offset of tag from end
assoclen = req->assoclen - 8; temp = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
tag_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR( chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
ctx->dev->rx_channel_id, 2, (ivsize ? a_ctx(tfm)->dev->rx_channel_id, 2,
(assoclen + 1) : 0)); (assoclen + 1));
chcr_req->sec_cpl.pldlen = chcr_req->sec_cpl.pldlen =
htonl(assoclen + ivsize + req->cryptlen); htonl(assoclen + IV + req->cryptlen);
chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
assoclen ? 1 : 0, assoclen, assoclen ? 1 : 0, assoclen,
assoclen + ivsize + 1, 0); assoclen + IV + 1, 0);
chcr_req->sec_cpl.cipherstop_lo_authinsert = chcr_req->sec_cpl.cipherstop_lo_authinsert =
FILL_SEC_CPL_AUTHINSERT(0, assoclen + ivsize + 1, FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1,
tag_offset, tag_offset); temp, temp);
chcr_req->sec_cpl.seqno_numivs = chcr_req->sec_cpl.seqno_numivs =
FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type == FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type ==
CHCR_ENCRYPT_OP) ? 1 : 0, CHCR_ENCRYPT_OP) ? 1 : 0,
CHCR_SCMD_CIPHER_MODE_AES_GCM, CHCR_SCMD_CIPHER_MODE_AES_GCM,
CHCR_SCMD_AUTH_MODE_GHASH, CHCR_SCMD_AUTH_MODE_GHASH,
aeadctx->hmac_ctrl, ivsize >> 1); aeadctx->hmac_ctrl, IV >> 1);
chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
0, 1, dst_size); 0, 0, dst_size);
chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len); memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) * memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
...@@ -2453,30 +2860,21 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, ...@@ -2453,30 +2860,21 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
*((unsigned int *)(reqctx->iv + 12)) = htonl(0x01); *((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
sg_param.nents = reqctx->dst_nents; ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
sg_param.qid = qid;
error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
reqctx->dst, &sg_param);
if (error)
goto dstmap_fail;
skb_set_transport_header(skb, transhdr_len); chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
write_sg_to_skb(skb, &frags, req->src, assoclen); chcr_add_aead_src_ent(req, ulptx, assoclen, op_type);
write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
write_sg_to_skb(skb, &frags, src, req->cryptlen);
atomic_inc(&adap->chcr_stats.aead_rqst); atomic_inc(&adap->chcr_stats.aead_rqst);
create_wreq(ctx, chcr_req, &req->base, skb, size, temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len, kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
reqctx->verify); create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
transhdr_len, temp, reqctx->verify);
reqctx->skb = skb; reqctx->skb = skb;
skb_get(skb); reqctx->op = op_type;
return skb; return skb;
dstmap_fail:
/* ivmap_fail: */
kfree_skb(skb);
err: err:
chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, op_type);
return ERR_PTR(error); return ERR_PTR(error);
} }
...@@ -2484,8 +2882,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, ...@@ -2484,8 +2882,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
static int chcr_aead_cra_init(struct crypto_aead *tfm) static int chcr_aead_cra_init(struct crypto_aead *tfm)
{ {
struct chcr_context *ctx = crypto_aead_ctx(tfm); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
struct aead_alg *alg = crypto_aead_alg(tfm); struct aead_alg *alg = crypto_aead_alg(tfm);
aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0, aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
...@@ -2496,25 +2893,20 @@ static int chcr_aead_cra_init(struct crypto_aead *tfm) ...@@ -2496,25 +2893,20 @@ static int chcr_aead_cra_init(struct crypto_aead *tfm)
crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx), crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
sizeof(struct aead_request) + sizeof(struct aead_request) +
crypto_aead_reqsize(aeadctx->sw_cipher))); crypto_aead_reqsize(aeadctx->sw_cipher)));
aeadctx->null = crypto_get_default_null_skcipher(); return chcr_device_init(a_ctx(tfm));
if (IS_ERR(aeadctx->null))
return PTR_ERR(aeadctx->null);
return chcr_device_init(ctx);
} }
static void chcr_aead_cra_exit(struct crypto_aead *tfm) static void chcr_aead_cra_exit(struct crypto_aead *tfm)
{ {
struct chcr_context *ctx = crypto_aead_ctx(tfm); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
crypto_put_default_null_skcipher();
crypto_free_aead(aeadctx->sw_cipher); crypto_free_aead(aeadctx->sw_cipher);
} }
static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm, static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
unsigned int authsize) unsigned int authsize)
{ {
struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm)); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP; aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
aeadctx->mayverify = VERIFY_HW; aeadctx->mayverify = VERIFY_HW;
...@@ -2523,7 +2915,7 @@ static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm, ...@@ -2523,7 +2915,7 @@ static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
static int chcr_authenc_setauthsize(struct crypto_aead *tfm, static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
unsigned int authsize) unsigned int authsize)
{ {
struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm)); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
u32 maxauth = crypto_aead_maxauthsize(tfm); u32 maxauth = crypto_aead_maxauthsize(tfm);
/*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
...@@ -2561,7 +2953,7 @@ static int chcr_authenc_setauthsize(struct crypto_aead *tfm, ...@@ -2561,7 +2953,7 @@ static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
{ {
struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm)); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
switch (authsize) { switch (authsize) {
case ICV_4: case ICV_4:
...@@ -2601,7 +2993,7 @@ static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) ...@@ -2601,7 +2993,7 @@ static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm, static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
unsigned int authsize) unsigned int authsize)
{ {
struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm)); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
switch (authsize) { switch (authsize) {
case ICV_8: case ICV_8:
...@@ -2627,7 +3019,7 @@ static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm, ...@@ -2627,7 +3019,7 @@ static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
static int chcr_ccm_setauthsize(struct crypto_aead *tfm, static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
unsigned int authsize) unsigned int authsize)
{ {
struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm)); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
switch (authsize) { switch (authsize) {
case ICV_4: case ICV_4:
...@@ -2670,8 +3062,7 @@ static int chcr_ccm_common_setkey(struct crypto_aead *aead, ...@@ -2670,8 +3062,7 @@ static int chcr_ccm_common_setkey(struct crypto_aead *aead,
const u8 *key, const u8 *key,
unsigned int keylen) unsigned int keylen)
{ {
struct chcr_context *ctx = crypto_aead_ctx(aead); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
unsigned char ck_size, mk_size; unsigned char ck_size, mk_size;
int key_ctx_size = 0; int key_ctx_size = 0;
...@@ -2704,8 +3095,7 @@ static int chcr_aead_ccm_setkey(struct crypto_aead *aead, ...@@ -2704,8 +3095,7 @@ static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
const u8 *key, const u8 *key,
unsigned int keylen) unsigned int keylen)
{ {
struct chcr_context *ctx = crypto_aead_ctx(aead); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
int error; int error;
crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK); crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
...@@ -2723,8 +3113,7 @@ static int chcr_aead_ccm_setkey(struct crypto_aead *aead, ...@@ -2723,8 +3113,7 @@ static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key, static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen) unsigned int keylen)
{ {
struct chcr_context *ctx = crypto_aead_ctx(aead); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
int error; int error;
if (keylen < 3) { if (keylen < 3) {
...@@ -2750,8 +3139,7 @@ static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key, ...@@ -2750,8 +3139,7 @@ static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key, static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen) unsigned int keylen)
{ {
struct chcr_context *ctx = crypto_aead_ctx(aead); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx); struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
struct crypto_cipher *cipher; struct crypto_cipher *cipher;
unsigned int ck_size; unsigned int ck_size;
...@@ -2823,8 +3211,7 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key, ...@@ -2823,8 +3211,7 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key, static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
unsigned int keylen) unsigned int keylen)
{ {
struct chcr_context *ctx = crypto_aead_ctx(authenc); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx); struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
/* it contains auth and cipher key both*/ /* it contains auth and cipher key both*/
struct crypto_authenc_keys keys; struct crypto_authenc_keys keys;
...@@ -2944,8 +3331,7 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key, ...@@ -2944,8 +3331,7 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc, static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
const u8 *key, unsigned int keylen) const u8 *key, unsigned int keylen)
{ {
struct chcr_context *ctx = crypto_aead_ctx(authenc); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx); struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
struct crypto_authenc_keys keys; struct crypto_authenc_keys keys;
int err; int err;
...@@ -3017,7 +3403,7 @@ static int chcr_aead_encrypt(struct aead_request *req) ...@@ -3017,7 +3403,7 @@ static int chcr_aead_encrypt(struct aead_request *req)
static int chcr_aead_decrypt(struct aead_request *req) static int chcr_aead_decrypt(struct aead_request *req)
{ {
struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm)); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
int size; int size;
...@@ -3050,30 +3436,29 @@ static int chcr_aead_op(struct aead_request *req, ...@@ -3050,30 +3436,29 @@ static int chcr_aead_op(struct aead_request *req,
create_wr_t create_wr_fn) create_wr_t create_wr_fn)
{ {
struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chcr_context *ctx = crypto_aead_ctx(tfm);
struct uld_ctx *u_ctx; struct uld_ctx *u_ctx;
struct sk_buff *skb; struct sk_buff *skb;
if (!ctx->dev) { if (!a_ctx(tfm)->dev) {
pr_err("chcr : %s : No crypto device.\n", __func__); pr_err("chcr : %s : No crypto device.\n", __func__);
return -ENXIO; return -ENXIO;
} }
u_ctx = ULD_CTX(ctx); u_ctx = ULD_CTX(a_ctx(tfm));
if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
ctx->tx_qidx)) { a_ctx(tfm)->tx_qidx)) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY; return -EBUSY;
} }
/* Form a WR from req */ /* Form a WR from req */
skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], size, skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size,
op_type); op_type);
if (IS_ERR(skb) || !skb) if (IS_ERR(skb) || !skb)
return PTR_ERR(skb); return PTR_ERR(skb);
skb->dev = u_ctx->lldi.ports[0]; skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx); set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
chcr_send_wr(skb); chcr_send_wr(skb);
return -EINPROGRESS; return -EINPROGRESS;
} }
......
...@@ -214,27 +214,22 @@ ...@@ -214,27 +214,22 @@
calc_tx_flits_ofld(skb) * 8), 16))) calc_tx_flits_ofld(skb) * 8), 16)))
#define FILL_CMD_MORE(immdatalen) htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) |\ #define FILL_CMD_MORE(immdatalen) htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) |\
ULP_TX_SC_MORE_V((immdatalen) ? 0 : 1)) ULP_TX_SC_MORE_V((immdatalen)))
#define MAX_NK 8 #define MAX_NK 8
#define CRYPTO_MAX_IMM_TX_PKT_LEN 256
#define MAX_WR_SIZE 512
#define ROUND_16(bytes) ((bytes) & 0xFFFFFFF0) #define ROUND_16(bytes) ((bytes) & 0xFFFFFFF0)
#define MAX_DSGL_ENT 32 #define MAX_DSGL_ENT 32
#define MAX_DIGEST_SKB_SGE (MAX_SKB_FRAGS - 1)
#define MIN_CIPHER_SG 1 /* IV */ #define MIN_CIPHER_SG 1 /* IV */
#define MIN_AUTH_SG 2 /*IV + AAD*/ #define MIN_AUTH_SG 1 /* IV */
#define MIN_GCM_SG 2 /* IV + AAD*/ #define MIN_GCM_SG 1 /* IV */
#define MIN_DIGEST_SG 1 /*Partial Buffer*/ #define MIN_DIGEST_SG 1 /*Partial Buffer*/
#define MIN_CCM_SG 3 /*IV+AAD+B0*/ #define MIN_CCM_SG 2 /*IV+B0*/
#define SPACE_LEFT(len) \ #define SPACE_LEFT(len) \
((MAX_WR_SIZE - WR_MIN_LEN - (len))) ((SGE_MAX_WR_LEN - WR_MIN_LEN - (len)))
unsigned int sgl_ent_len[] = {0, 0, 16, 24, 40, unsigned int sgl_ent_len[] = {0, 0, 16, 24, 40, 48, 64, 72, 88,
48, 64, 72, 88, 96, 112, 120, 136, 144, 160, 168, 184,
96, 112, 120, 136, 192, 208, 216, 232, 240, 256, 264, 280,
144, 160, 168, 184, 288, 304, 312, 328, 336, 352, 360, 376};
192};
unsigned int dsgl_ent_len[] = {0, 32, 32, 48, 48, 64, 64, 80, 80, unsigned int dsgl_ent_len[] = {0, 32, 32, 48, 48, 64, 64, 80, 80,
112, 112, 128, 128, 144, 144, 160, 160, 112, 112, 128, 128, 144, 144, 160, 160,
192, 192, 208, 208, 224, 224, 240, 240, 192, 192, 208, 208, 224, 224, 240, 240,
...@@ -258,7 +253,6 @@ struct hash_wr_param { ...@@ -258,7 +253,6 @@ struct hash_wr_param {
struct cipher_wr_param { struct cipher_wr_param {
struct ablkcipher_request *req; struct ablkcipher_request *req;
struct scatterlist *srcsg;
char *iv; char *iv;
int bytes; int bytes;
unsigned short qid; unsigned short qid;
...@@ -298,31 +292,11 @@ enum { ...@@ -298,31 +292,11 @@ enum {
ICV_16 = 16 ICV_16 = 16
}; };
struct hash_op_params {
unsigned char mk_size;
unsigned char pad_align;
unsigned char auth_mode;
char hash_name[MAX_HASH_NAME];
unsigned short block_size;
unsigned short word_size;
unsigned short ipad_size;
};
struct phys_sge_pairs { struct phys_sge_pairs {
__be16 len[8]; __be16 len[8];
__be64 addr[8]; __be64 addr[8];
}; };
struct phys_sge_parm {
unsigned int nents;
unsigned int obsize;
unsigned short qid;
};
struct crypto_result {
struct completion completion;
int err;
};
static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = { static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = {
SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
......
...@@ -149,9 +149,23 @@ ...@@ -149,9 +149,23 @@
#define CHCR_HASH_MAX_BLOCK_SIZE_64 64 #define CHCR_HASH_MAX_BLOCK_SIZE_64 64
#define CHCR_HASH_MAX_BLOCK_SIZE_128 128 #define CHCR_HASH_MAX_BLOCK_SIZE_128 128
#define CHCR_SG_SIZE 2048 #define CHCR_SRC_SG_SIZE (0x10000 - sizeof(int))
#define CHCR_DST_SG_SIZE 2048
/* Aligned to 128 bit boundary */ static inline struct chcr_context *a_ctx(struct crypto_aead *tfm)
{
return crypto_aead_ctx(tfm);
}
static inline struct chcr_context *c_ctx(struct crypto_ablkcipher *tfm)
{
return crypto_ablkcipher_ctx(tfm);
}
static inline struct chcr_context *h_ctx(struct crypto_ahash *tfm)
{
return crypto_tfm_ctx(crypto_ahash_tfm(tfm));
}
struct ablk_ctx { struct ablk_ctx {
struct crypto_skcipher *sw_cipher; struct crypto_skcipher *sw_cipher;
...@@ -165,15 +179,39 @@ struct ablk_ctx { ...@@ -165,15 +179,39 @@ struct ablk_ctx {
}; };
struct chcr_aead_reqctx { struct chcr_aead_reqctx {
struct sk_buff *skb; struct sk_buff *skb;
struct scatterlist *dst; dma_addr_t iv_dma;
struct scatterlist srcffwd[2]; dma_addr_t b0_dma;
struct scatterlist dstffwd[2]; unsigned int b0_len;
unsigned int op;
short int aad_nents;
short int src_nents;
short int dst_nents; short int dst_nents;
u16 imm;
u16 verify; u16 verify;
u8 iv[CHCR_MAX_CRYPTO_IV_LEN]; u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
unsigned char scratch_pad[MAX_SCRATCH_PAD_SIZE]; unsigned char scratch_pad[MAX_SCRATCH_PAD_SIZE];
}; };
struct ulptx_walk {
struct ulptx_sgl *sgl;
unsigned int nents;
unsigned int pair_idx;
unsigned int last_sg_len;
struct scatterlist *last_sg;
struct ulptx_sge_pair *pair;
};
struct dsgl_walk {
unsigned int nents;
unsigned int last_sg_len;
struct scatterlist *last_sg;
struct cpl_rx_phys_dsgl *dsgl;
struct phys_sge_pairs *to;
};
struct chcr_gcm_ctx { struct chcr_gcm_ctx {
u8 ghash_h[AEAD_H_SIZE]; u8 ghash_h[AEAD_H_SIZE];
}; };
...@@ -194,7 +232,6 @@ struct __aead_ctx { ...@@ -194,7 +232,6 @@ struct __aead_ctx {
struct chcr_aead_ctx { struct chcr_aead_ctx {
__be32 key_ctx_hdr; __be32 key_ctx_hdr;
unsigned int enckey_len; unsigned int enckey_len;
struct crypto_skcipher *null;
struct crypto_aead *sw_cipher; struct crypto_aead *sw_cipher;
u8 salt[MAX_SALT]; u8 salt[MAX_SALT];
u8 key[CHCR_AES_MAX_KEY_LEN]; u8 key[CHCR_AES_MAX_KEY_LEN];
...@@ -230,8 +267,11 @@ struct chcr_ahash_req_ctx { ...@@ -230,8 +267,11 @@ struct chcr_ahash_req_ctx {
u8 bfr2[CHCR_HASH_MAX_BLOCK_SIZE_128]; u8 bfr2[CHCR_HASH_MAX_BLOCK_SIZE_128];
u8 *reqbfr; u8 *reqbfr;
u8 *skbfr; u8 *skbfr;
dma_addr_t dma_addr;
u32 dma_len;
u8 reqlen; u8 reqlen;
/* DMA the partial hash in it */ u8 imm;
u8 is_sg_map;
u8 partial_hash[CHCR_HASH_MAX_DIGEST_SIZE]; u8 partial_hash[CHCR_HASH_MAX_DIGEST_SIZE];
u64 data_len; /* Data len till time */ u64 data_len; /* Data len till time */
/* SKB which is being sent to the hardware for processing */ /* SKB which is being sent to the hardware for processing */
...@@ -240,14 +280,15 @@ struct chcr_ahash_req_ctx { ...@@ -240,14 +280,15 @@ struct chcr_ahash_req_ctx {
struct chcr_blkcipher_req_ctx { struct chcr_blkcipher_req_ctx {
struct sk_buff *skb; struct sk_buff *skb;
struct scatterlist srcffwd[2];
struct scatterlist dstffwd[2];
struct scatterlist *dstsg; struct scatterlist *dstsg;
struct scatterlist *dst;
unsigned int processed; unsigned int processed;
unsigned int last_req_len; unsigned int last_req_len;
struct scatterlist *srcsg;
unsigned int src_ofst;
unsigned int dst_ofst;
unsigned int op; unsigned int op;
short int dst_nents; dma_addr_t iv_dma;
u16 imm;
u8 iv[CHCR_MAX_CRYPTO_IV_LEN]; u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
}; };
...@@ -261,24 +302,6 @@ struct chcr_alg_template { ...@@ -261,24 +302,6 @@ struct chcr_alg_template {
} alg; } alg;
}; };
struct chcr_req_ctx {
union {
struct ahash_request *ahash_req;
struct aead_request *aead_req;
struct ablkcipher_request *ablk_req;
} req;
union {
struct chcr_ahash_req_ctx *ahash_ctx;
struct chcr_aead_reqctx *reqctx;
struct chcr_blkcipher_req_ctx *ablk_ctx;
} ctx;
};
struct sge_opaque_hdr {
void *dev;
dma_addr_t addr[MAX_SKB_FRAGS + 1];
};
typedef struct sk_buff *(*create_wr_t)(struct aead_request *req, typedef struct sk_buff *(*create_wr_t)(struct aead_request *req,
unsigned short qid, unsigned short qid,
int size, int size,
...@@ -291,4 +314,37 @@ static int chcr_aead_op(struct aead_request *req_base, ...@@ -291,4 +314,37 @@ static int chcr_aead_op(struct aead_request *req_base,
static inline int get_aead_subtype(struct crypto_aead *aead); static inline int get_aead_subtype(struct crypto_aead *aead);
static int chcr_handle_cipher_resp(struct ablkcipher_request *req, static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
unsigned char *input, int err); unsigned char *input, int err);
static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err);
static int chcr_aead_dma_map(struct device *dev, struct aead_request *req,
unsigned short op_type);
static void chcr_aead_dma_unmap(struct device *dev, struct aead_request
*req, unsigned short op_type);
static inline void chcr_add_aead_dst_ent(struct aead_request *req,
struct cpl_rx_phys_dsgl *phys_cpl,
unsigned int assoclen,
unsigned short op_type,
unsigned short qid);
static inline void chcr_add_aead_src_ent(struct aead_request *req,
struct ulptx_sgl *ulptx,
unsigned int assoclen,
unsigned short op_type);
static inline void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
struct ulptx_sgl *ulptx,
struct cipher_wr_param *wrparam);
static int chcr_cipher_dma_map(struct device *dev,
struct ablkcipher_request *req);
static void chcr_cipher_dma_unmap(struct device *dev,
struct ablkcipher_request *req);
static inline void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
struct cpl_rx_phys_dsgl *phys_cpl,
struct cipher_wr_param *wrparam,
unsigned short qid);
int sg_nents_len_skip(struct scatterlist *sg, u64 len, u64 skip);
static inline void chcr_add_hash_src_ent(struct ahash_request *req,
struct ulptx_sgl *ulptx,
struct hash_wr_param *param);
static inline int chcr_hash_dma_map(struct device *dev,
struct ahash_request *req);
static inline void chcr_hash_dma_unmap(struct device *dev,
struct ahash_request *req);
#endif /* __CHCR_CRYPTO_H__ */ #endif /* __CHCR_CRYPTO_H__ */
...@@ -1537,6 +1537,12 @@ int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb) ...@@ -1537,6 +1537,12 @@ int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
*/ */
static inline int is_ofld_imm(const struct sk_buff *skb) static inline int is_ofld_imm(const struct sk_buff *skb)
{ {
struct work_request_hdr *req = (struct work_request_hdr *)skb->data;
unsigned long opcode = FW_WR_OP_G(ntohl(req->wr_hi));
if (opcode == FW_CRYPTO_LOOKASIDE_WR)
return skb->len <= SGE_MAX_WR_LEN;
else
return skb->len <= MAX_IMM_TX_PKT_LEN; return skb->len <= MAX_IMM_TX_PKT_LEN;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment