Commit 2e2ee4cd authored by Srujana Challa's avatar Srujana Challa Committed by David S. Miller

octeontx2-af: Add mailbox for CPT stats

Adds a new mailbox to get CPT stats, includes performance
counters, CPT engines status and RXC status.
Signed-off-by: default avatarNarayana Prasad Raju Atherya <pathreya@marvell.com>
Signed-off-by: default avatarSrujana Challa <schalla@marvell.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ecad2ce8
......@@ -177,6 +177,7 @@ M(CPT_LF_ALLOC, 0xA00, cpt_lf_alloc, cpt_lf_alloc_req_msg, \
M(CPT_LF_FREE, 0xA01, cpt_lf_free, msg_req, msg_rsp) \
M(CPT_RD_WR_REGISTER, 0xA02, cpt_rd_wr_register, cpt_rd_wr_reg_msg, \
cpt_rd_wr_reg_msg) \
M(CPT_STATS, 0xA05, cpt_sts, cpt_sts_req, cpt_sts_rsp) \
M(CPT_RXC_TIME_CFG, 0xA06, cpt_rxc_time_cfg, cpt_rxc_time_cfg_req, \
msg_rsp) \
/* NPC mbox IDs (range 0x6000 - 0x7FFF) */ \
......@@ -1257,6 +1258,53 @@ struct cpt_lf_alloc_req_msg {
int blkaddr;
};
/* Mailbox message request and response format for CPT stats. */
struct cpt_sts_req {
struct mbox_msghdr hdr;
u8 blkaddr;
};
struct cpt_sts_rsp {
struct mbox_msghdr hdr;
u64 inst_req_pc;
u64 inst_lat_pc;
u64 rd_req_pc;
u64 rd_lat_pc;
u64 rd_uc_pc;
u64 active_cycles_pc;
u64 ctx_mis_pc;
u64 ctx_hit_pc;
u64 ctx_aop_pc;
u64 ctx_aop_lat_pc;
u64 ctx_ifetch_pc;
u64 ctx_ifetch_lat_pc;
u64 ctx_ffetch_pc;
u64 ctx_ffetch_lat_pc;
u64 ctx_wback_pc;
u64 ctx_wback_lat_pc;
u64 ctx_psh_pc;
u64 ctx_psh_lat_pc;
u64 ctx_err;
u64 ctx_enc_id;
u64 ctx_flush_timer;
u64 rxc_time;
u64 rxc_time_cfg;
u64 rxc_active_sts;
u64 rxc_zombie_sts;
u64 busy_sts_ae;
u64 free_sts_ae;
u64 busy_sts_se;
u64 free_sts_se;
u64 busy_sts_ie;
u64 free_sts_ie;
u64 exe_err_info;
u64 cptclk_cnt;
u64 diag;
u64 rxc_dfrg;
u64 x2p_link_cfg0;
u64 x2p_link_cfg1;
};
/* Mailbox message request format to configure reassembly timeout. */
struct cpt_rxc_time_cfg_req {
struct mbox_msghdr hdr;
......
......@@ -15,6 +15,24 @@
/* Length of initial context fetch in 128 byte words */
#define CPT_CTX_ILEN 2
#define cpt_get_eng_sts(e_min, e_max, rsp, etype) \
({ \
u64 free_sts = 0, busy_sts = 0; \
typeof(rsp) _rsp = rsp; \
u32 e, i; \
\
for (e = (e_min), i = 0; e < (e_max); e++, i++) { \
reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e)); \
if (reg & 0x1) \
busy_sts |= 1ULL << i; \
\
if (reg & 0x2) \
free_sts |= 1ULL << i; \
} \
(_rsp)->busy_sts_##etype = busy_sts; \
(_rsp)->free_sts_##etype = free_sts; \
})
static int get_cpt_pf_num(struct rvu *rvu)
{
int i, domain_nr, cpt_pf_num = -1;
......@@ -264,6 +282,100 @@ int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu,
return 0;
}
static void get_ctx_pc(struct rvu *rvu, struct cpt_sts_rsp *rsp, int blkaddr)
{
if (is_rvu_otx2(rvu))
return;
rsp->ctx_mis_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_MIS_PC);
rsp->ctx_hit_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_HIT_PC);
rsp->ctx_aop_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_AOP_PC);
rsp->ctx_aop_lat_pc = rvu_read64(rvu, blkaddr,
CPT_AF_CTX_AOP_LATENCY_PC);
rsp->ctx_ifetch_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_IFETCH_PC);
rsp->ctx_ifetch_lat_pc = rvu_read64(rvu, blkaddr,
CPT_AF_CTX_IFETCH_LATENCY_PC);
rsp->ctx_ffetch_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FFETCH_PC);
rsp->ctx_ffetch_lat_pc = rvu_read64(rvu, blkaddr,
CPT_AF_CTX_FFETCH_LATENCY_PC);
rsp->ctx_wback_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FFETCH_PC);
rsp->ctx_wback_lat_pc = rvu_read64(rvu, blkaddr,
CPT_AF_CTX_FFETCH_LATENCY_PC);
rsp->ctx_psh_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FFETCH_PC);
rsp->ctx_psh_lat_pc = rvu_read64(rvu, blkaddr,
CPT_AF_CTX_FFETCH_LATENCY_PC);
rsp->ctx_err = rvu_read64(rvu, blkaddr, CPT_AF_CTX_ERR);
rsp->ctx_enc_id = rvu_read64(rvu, blkaddr, CPT_AF_CTX_ENC_ID);
rsp->ctx_flush_timer = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FLUSH_TIMER);
rsp->rxc_time = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME);
rsp->rxc_time_cfg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG);
rsp->rxc_active_sts = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS);
rsp->rxc_zombie_sts = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ZOMBIE_STS);
rsp->rxc_dfrg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_DFRG);
rsp->x2p_link_cfg0 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0));
rsp->x2p_link_cfg1 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1));
}
static void get_eng_sts(struct rvu *rvu, struct cpt_sts_rsp *rsp, int blkaddr)
{
u16 max_ses, max_ies, max_aes;
u32 e_min = 0, e_max = 0;
u64 reg;
reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
max_ses = reg & 0xffff;
max_ies = (reg >> 16) & 0xffff;
max_aes = (reg >> 32) & 0xffff;
/* Get AE status */
e_min = max_ses + max_ies;
e_max = max_ses + max_ies + max_aes;
cpt_get_eng_sts(e_min, e_max, rsp, ae);
/* Get SE status */
e_min = 0;
e_max = max_ses;
cpt_get_eng_sts(e_min, e_max, rsp, se);
/* Get IE status */
e_min = max_ses;
e_max = max_ses + max_ies;
cpt_get_eng_sts(e_min, e_max, rsp, ie);
}
int rvu_mbox_handler_cpt_sts(struct rvu *rvu, struct cpt_sts_req *req,
struct cpt_sts_rsp *rsp)
{
int blkaddr;
blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
if (blkaddr < 0)
return blkaddr;
/* This message is accepted only if sent from CPT PF/VF */
if (!is_cpt_pf(rvu, req->hdr.pcifunc) &&
!is_cpt_vf(rvu, req->hdr.pcifunc))
return CPT_AF_ERR_ACCESS_DENIED;
get_ctx_pc(rvu, rsp, blkaddr);
/* Get CPT engines status */
get_eng_sts(rvu, rsp, blkaddr);
/* Read CPT instruction PC registers */
rsp->inst_req_pc = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
rsp->inst_lat_pc = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
rsp->rd_req_pc = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
rsp->rd_lat_pc = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
rsp->rd_uc_pc = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
rsp->active_cycles_pc = rvu_read64(rvu, blkaddr,
CPT_AF_ACTIVE_CYCLES_PC);
rsp->exe_err_info = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
rsp->cptclk_cnt = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
rsp->diag = rvu_read64(rvu, blkaddr, CPT_AF_DIAG);
return 0;
}
#define RXC_ZOMBIE_THRES GENMASK_ULL(59, 48)
#define RXC_ZOMBIE_LIMIT GENMASK_ULL(43, 32)
#define RXC_ACTIVE_THRES GENMASK_ULL(27, 16)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment