Commit ee0863ba authored by Harsh Jain's avatar Harsh Jain Committed by Herbert Xu

chcr - Add debug counters

Count types of operation done by HW.
Signed-off-by: default avatarHarsh Jain <harsh@chelsio.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent b8fd1f41
...@@ -154,6 +154,7 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input, ...@@ -154,6 +154,7 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
struct uld_ctx *u_ctx = ULD_CTX(ctx); struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct chcr_req_ctx ctx_req; struct chcr_req_ctx ctx_req;
unsigned int digestsize, updated_digestsize; unsigned int digestsize, updated_digestsize;
struct adapter *adap = padap(ctx->dev);
switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_AEAD: case CRYPTO_ALG_TYPE_AEAD:
...@@ -207,6 +208,7 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input, ...@@ -207,6 +208,7 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
ctx_req.req.ahash_req->base.complete(req, err); ctx_req.req.ahash_req->base.complete(req, err);
break; break;
} }
atomic_inc(&adap->chcr_stats.complete);
return err; return err;
} }
...@@ -639,6 +641,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam) ...@@ -639,6 +641,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
unsigned int ivsize = AES_BLOCK_SIZE, kctx_len; unsigned int ivsize = AES_BLOCK_SIZE, kctx_len;
gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
GFP_KERNEL : GFP_ATOMIC; GFP_KERNEL : GFP_ATOMIC;
struct adapter *adap = padap(ctx->dev);
phys_dsgl = get_space_for_phys_dsgl(reqctx->dst_nents); phys_dsgl = get_space_for_phys_dsgl(reqctx->dst_nents);
...@@ -701,6 +704,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam) ...@@ -701,6 +704,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
skb_set_transport_header(skb, transhdr_len); skb_set_transport_header(skb, transhdr_len);
write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize); write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
write_sg_to_skb(skb, &frags, wrparam->srcsg, wrparam->bytes); write_sg_to_skb(skb, &frags, wrparam->srcsg, wrparam->bytes);
atomic_inc(&adap->chcr_stats.cipher_rqst);
create_wreq(ctx, chcr_req, &(wrparam->req->base), skb, kctx_len, 0, 1, create_wreq(ctx, chcr_req, &(wrparam->req->base), skb, kctx_len, 0, 1,
sizeof(struct cpl_rx_phys_dsgl) + phys_dsgl, sizeof(struct cpl_rx_phys_dsgl) + phys_dsgl,
ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC); ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
...@@ -1337,6 +1341,7 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req, ...@@ -1337,6 +1341,7 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
u8 hash_size_in_response = 0; u8 hash_size_in_response = 0;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC; GFP_ATOMIC;
struct adapter *adap = padap(ctx->dev);
iopad_alignment = KEYCTX_ALIGN_PAD(digestsize); iopad_alignment = KEYCTX_ALIGN_PAD(digestsize);
kctx_len = param->alg_prm.result_size + iopad_alignment; kctx_len = param->alg_prm.result_size + iopad_alignment;
...@@ -1393,7 +1398,7 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req, ...@@ -1393,7 +1398,7 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
param->bfr_len); param->bfr_len);
if (param->sg_len != 0) if (param->sg_len != 0)
write_sg_to_skb(skb, &frags, req->src, param->sg_len); write_sg_to_skb(skb, &frags, req->src, param->sg_len);
atomic_inc(&adap->chcr_stats.digest_rqst);
create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, create_wreq(ctx, chcr_req, &req->base, skb, kctx_len,
hash_size_in_response, 0, DUMMY_BYTES, 0); hash_size_in_response, 0, DUMMY_BYTES, 0);
req_ctx->skb = skb; req_ctx->skb = skb;
...@@ -1873,6 +1878,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, ...@@ -1873,6 +1878,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
int null = 0; int null = 0;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC; GFP_ATOMIC;
struct adapter *adap = padap(ctx->dev);
if (aeadctx->enckey_len == 0 || (req->cryptlen == 0)) if (aeadctx->enckey_len == 0 || (req->cryptlen == 0))
goto err; goto err;
...@@ -1911,6 +1917,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, ...@@ -1911,6 +1917,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
T6_MAX_AAD_SIZE, T6_MAX_AAD_SIZE,
transhdr_len + (sgl_len(src_nent + MIN_AUTH_SG) * 8), transhdr_len + (sgl_len(src_nent + MIN_AUTH_SG) * 8),
op_type)) { op_type)) {
atomic_inc(&adap->chcr_stats.fallback);
return ERR_PTR(chcr_aead_fallback(req, op_type)); return ERR_PTR(chcr_aead_fallback(req, op_type));
} }
skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags); skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
...@@ -1983,6 +1990,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, ...@@ -1983,6 +1990,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
} }
write_buffer_to_skb(skb, &frags, req->iv, ivsize); write_buffer_to_skb(skb, &frags, req->iv, ivsize);
write_sg_to_skb(skb, &frags, src, req->cryptlen); write_sg_to_skb(skb, &frags, src, req->cryptlen);
atomic_inc(&adap->chcr_stats.cipher_rqst);
create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, size, 1, create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, size, 1,
sizeof(struct cpl_rx_phys_dsgl) + dst_size, 0); sizeof(struct cpl_rx_phys_dsgl) + dst_size, 0);
reqctx->skb = skb; reqctx->skb = skb;
...@@ -2206,6 +2214,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, ...@@ -2206,6 +2214,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
int error = -EINVAL, src_nent; int error = -EINVAL, src_nent;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC; GFP_ATOMIC;
struct adapter *adap = padap(ctx->dev);
if (op_type && req->cryptlen < crypto_aead_authsize(tfm)) if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
...@@ -2245,6 +2254,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, ...@@ -2245,6 +2254,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
T6_MAX_AAD_SIZE - 18, T6_MAX_AAD_SIZE - 18,
transhdr_len + (sgl_len(src_nent + MIN_CCM_SG) * 8), transhdr_len + (sgl_len(src_nent + MIN_CCM_SG) * 8),
op_type)) { op_type)) {
atomic_inc(&adap->chcr_stats.fallback);
return ERR_PTR(chcr_aead_fallback(req, op_type)); return ERR_PTR(chcr_aead_fallback(req, op_type));
} }
...@@ -2282,6 +2292,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, ...@@ -2282,6 +2292,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
skb_set_transport_header(skb, transhdr_len); skb_set_transport_header(skb, transhdr_len);
frags = fill_aead_req_fields(skb, req, src, ivsize, aeadctx); frags = fill_aead_req_fields(skb, req, src, ivsize, aeadctx);
atomic_inc(&adap->chcr_stats.aead_rqst);
create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, 0, 1, create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, 0, 1,
sizeof(struct cpl_rx_phys_dsgl) + dst_size, 0); sizeof(struct cpl_rx_phys_dsgl) + dst_size, 0);
reqctx->skb = skb; reqctx->skb = skb;
...@@ -2316,6 +2327,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, ...@@ -2316,6 +2327,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
int error = -EINVAL, src_nent; int error = -EINVAL, src_nent;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC; GFP_ATOMIC;
struct adapter *adap = padap(ctx->dev);
/* validate key size */ /* validate key size */
if (aeadctx->enckey_len == 0) if (aeadctx->enckey_len == 0)
...@@ -2355,6 +2367,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, ...@@ -2355,6 +2367,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
T6_MAX_AAD_SIZE, T6_MAX_AAD_SIZE,
transhdr_len + (sgl_len(src_nent + MIN_GCM_SG) * 8), transhdr_len + (sgl_len(src_nent + MIN_GCM_SG) * 8),
op_type)) { op_type)) {
atomic_inc(&adap->chcr_stats.fallback);
return ERR_PTR(chcr_aead_fallback(req, op_type)); return ERR_PTR(chcr_aead_fallback(req, op_type));
} }
skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags); skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
...@@ -2421,6 +2434,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, ...@@ -2421,6 +2434,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
write_sg_to_skb(skb, &frags, req->src, assoclen); write_sg_to_skb(skb, &frags, req->src, assoclen);
write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize); write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
write_sg_to_skb(skb, &frags, src, req->cryptlen); write_sg_to_skb(skb, &frags, src, req->cryptlen);
atomic_inc(&adap->chcr_stats.aead_rqst);
create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, size, 1, create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, size, 1,
sizeof(struct cpl_rx_phys_dsgl) + dst_size, sizeof(struct cpl_rx_phys_dsgl) + dst_size,
reqctx->verify); reqctx->verify);
......
...@@ -100,6 +100,7 @@ static int cpl_fw6_pld_handler(struct chcr_dev *dev, ...@@ -100,6 +100,7 @@ static int cpl_fw6_pld_handler(struct chcr_dev *dev,
struct cpl_fw6_pld *fw6_pld; struct cpl_fw6_pld *fw6_pld;
u32 ack_err_status = 0; u32 ack_err_status = 0;
int error_status = 0; int error_status = 0;
struct adapter *adap = padap(dev);
fw6_pld = (struct cpl_fw6_pld *)input; fw6_pld = (struct cpl_fw6_pld *)input;
req = (struct crypto_async_request *)(uintptr_t)be64_to_cpu( req = (struct crypto_async_request *)(uintptr_t)be64_to_cpu(
...@@ -111,6 +112,7 @@ static int cpl_fw6_pld_handler(struct chcr_dev *dev, ...@@ -111,6 +112,7 @@ static int cpl_fw6_pld_handler(struct chcr_dev *dev,
if (CHK_MAC_ERR_BIT(ack_err_status) || if (CHK_MAC_ERR_BIT(ack_err_status) ||
CHK_PAD_ERR_BIT(ack_err_status)) CHK_PAD_ERR_BIT(ack_err_status))
error_status = -EBADMSG; error_status = -EBADMSG;
atomic_inc(&adap->chcr_stats.error);
} }
/* call completion callback with failure status */ /* call completion callback with failure status */
if (req) { if (req) {
......
...@@ -868,6 +868,7 @@ struct adapter { ...@@ -868,6 +868,7 @@ struct adapter {
/* TC u32 offload */ /* TC u32 offload */
struct cxgb4_tc_u32_table *tc_u32; struct cxgb4_tc_u32_table *tc_u32;
struct chcr_stats_debug chcr_stats;
}; };
/* Support for "sched-class" command to allow a TX Scheduling Class to be /* Support for "sched-class" command to allow a TX Scheduling Class to be
......
...@@ -3069,6 +3069,40 @@ static const struct file_operations meminfo_fops = { ...@@ -3069,6 +3069,40 @@ static const struct file_operations meminfo_fops = {
.llseek = seq_lseek, .llseek = seq_lseek,
.release = single_release, .release = single_release,
}; };
static int chcr_show(struct seq_file *seq, void *v)
{
struct adapter *adap = seq->private;
seq_puts(seq, "Chelsio Crypto Accelerator Stats \n");
seq_printf(seq, "Cipher Ops: %10u \n",
atomic_read(&adap->chcr_stats.cipher_rqst));
seq_printf(seq, "Digest Ops: %10u \n",
atomic_read(&adap->chcr_stats.digest_rqst));
seq_printf(seq, "Aead Ops: %10u \n",
atomic_read(&adap->chcr_stats.aead_rqst));
seq_printf(seq, "Completion: %10u \n",
atomic_read(&adap->chcr_stats.complete));
seq_printf(seq, "Error: %10u \n",
atomic_read(&adap->chcr_stats.error));
seq_printf(seq, "Fallback: %10u \n",
atomic_read(&adap->chcr_stats.fallback));
return 0;
}
static int chcr_stats_open(struct inode *inode, struct file *file)
{
return single_open(file, chcr_show, inode->i_private);
}
static const struct file_operations chcr_stats_debugfs_fops = {
.owner = THIS_MODULE,
.open = chcr_stats_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/* Add an array of Debug FS files. /* Add an array of Debug FS files.
*/ */
void add_debugfs_files(struct adapter *adap, void add_debugfs_files(struct adapter *adap,
...@@ -3143,6 +3177,7 @@ int t4_setup_debugfs(struct adapter *adap) ...@@ -3143,6 +3177,7 @@ int t4_setup_debugfs(struct adapter *adap)
{ "tids", &tid_info_debugfs_fops, S_IRUSR, 0}, { "tids", &tid_info_debugfs_fops, S_IRUSR, 0},
{ "blocked_fl", &blocked_fl_fops, S_IRUSR | S_IWUSR, 0 }, { "blocked_fl", &blocked_fl_fops, S_IRUSR | S_IWUSR, 0 },
{ "meminfo", &meminfo_fops, S_IRUSR, 0 }, { "meminfo", &meminfo_fops, S_IRUSR, 0 },
{ "crypto", &chcr_stats_debugfs_fops, S_IRUSR, 0 },
}; };
/* Debug FS nodes common to all T5 and later adapters. /* Debug FS nodes common to all T5 and later adapters.
......
...@@ -275,6 +275,15 @@ struct cxgb4_virt_res { /* virtualized HW resources */ ...@@ -275,6 +275,15 @@ struct cxgb4_virt_res { /* virtualized HW resources */
unsigned int ncrypto_fc; unsigned int ncrypto_fc;
}; };
struct chcr_stats_debug {
atomic_t cipher_rqst;
atomic_t digest_rqst;
atomic_t aead_rqst;
atomic_t complete;
atomic_t error;
atomic_t fallback;
};
#define OCQ_WIN_OFFSET(pdev, vres) \ #define OCQ_WIN_OFFSET(pdev, vres) \
(pci_resource_len((pdev), 2) - roundup_pow_of_two((vres)->ocq.size)) (pci_resource_len((pdev), 2) - roundup_pow_of_two((vres)->ocq.size))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment