Commit 3ce5daa2 authored by Kaike Wan's avatar Kaike Wan Committed by Doug Ledford

IB/hfi1: Add static trace for TID RDMA READ protocol

This patch makes the following changes to the static trace:
1. Adds the decoding of TID RDMA READ packets in IB header trace;
2. Tracks qpriv->s_flags and iow_flags in qpsleepwakeup trace;
3. Adds a new event to track RC ACK receiving;
4. Adds trace events for various stages of the TID RDMA READ
protocol. These events provide a fine-grained control for monitoring
and debugging the hfi1 driver in the filed.
Reviewed-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarKaike Wan <kaike.wan@intel.com>
Signed-off-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent f1ab4efa
...@@ -121,6 +121,7 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp, ...@@ -121,6 +121,7 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
bool last_pkt; bool last_pkt;
u32 delta; u32 delta;
trace_hfi1_rsp_make_rc_ack(qp, 0);
lockdep_assert_held(&qp->s_lock); lockdep_assert_held(&qp->s_lock);
/* Don't send an ACK if we aren't supposed to. */ /* Don't send an ACK if we aren't supposed to. */
if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
...@@ -349,6 +350,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) ...@@ -349,6 +350,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
int delta; int delta;
struct tid_rdma_flow *flow = NULL; struct tid_rdma_flow *flow = NULL;
trace_hfi1_sender_make_rc_req(qp);
lockdep_assert_held(&qp->s_lock); lockdep_assert_held(&qp->s_lock);
ps->s_txreq = get_txreq(ps->dev, qp); ps->s_txreq = get_txreq(ps->dev, qp);
if (!ps->s_txreq) if (!ps->s_txreq)
...@@ -596,8 +598,13 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) ...@@ -596,8 +598,13 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
break; break;
case IB_WR_TID_RDMA_READ: case IB_WR_TID_RDMA_READ:
trace_hfi1_tid_read_sender_make_req(qp, newreq);
wpriv = wqe->priv; wpriv = wqe->priv;
req = wqe_to_tid_req(wqe); req = wqe_to_tid_req(wqe);
trace_hfi1_tid_req_make_req_read(qp, newreq,
wqe->wr.opcode,
wqe->psn, wqe->lpsn,
req);
delta = cmp_psn(qp->s_psn, wqe->psn); delta = cmp_psn(qp->s_psn, wqe->psn);
/* /*
...@@ -892,6 +899,8 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) ...@@ -892,6 +899,8 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
++qp->s_cur == qp->s_size) ++qp->s_cur == qp->s_size)
qp->s_cur = 0; qp->s_cur = 0;
qp->s_psn = req->s_next_psn; qp->s_psn = req->s_next_psn;
trace_hfi1_tid_req_make_req_read(qp, 0, wqe->wr.opcode,
wqe->psn, wqe->lpsn, req);
break; break;
case TID_OP(READ_REQ): case TID_OP(READ_REQ):
req = wqe_to_tid_req(wqe); req = wqe_to_tid_req(wqe);
...@@ -933,6 +942,8 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) ...@@ -933,6 +942,8 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
++qp->s_cur == qp->s_size) ++qp->s_cur == qp->s_size)
qp->s_cur = 0; qp->s_cur = 0;
qp->s_psn = req->s_next_psn; qp->s_psn = req->s_next_psn;
trace_hfi1_tid_req_make_req_read(qp, 0, wqe->wr.opcode,
wqe->psn, wqe->lpsn, req);
break; break;
} }
qp->s_sending_hpsn = bth2; qp->s_sending_hpsn = bth2;
...@@ -1341,6 +1352,7 @@ static void reset_psn(struct rvt_qp *qp, u32 psn) ...@@ -1341,6 +1352,7 @@ static void reset_psn(struct rvt_qp *qp, u32 psn)
(cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
qp->s_flags |= RVT_S_WAIT_PSN; qp->s_flags |= RVT_S_WAIT_PSN;
qp->s_flags &= ~HFI1_S_AHG_VALID; qp->s_flags &= ~HFI1_S_AHG_VALID;
trace_hfi1_sender_reset_psn(qp);
} }
/* /*
...@@ -1355,6 +1367,7 @@ void hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait) ...@@ -1355,6 +1367,7 @@ void hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
lockdep_assert_held(&qp->r_lock); lockdep_assert_held(&qp->r_lock);
lockdep_assert_held(&qp->s_lock); lockdep_assert_held(&qp->s_lock);
trace_hfi1_sender_restart_rc(qp);
if (qp->s_retry == 0) { if (qp->s_retry == 0) {
if (qp->s_mig_state == IB_MIG_ARMED) { if (qp->s_mig_state == IB_MIG_ARMED) {
hfi1_migrate_qp(qp); hfi1_migrate_qp(qp);
...@@ -1558,6 +1571,7 @@ struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, ...@@ -1558,6 +1571,7 @@ struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
* completion if the SWQE is being resent until the send * completion if the SWQE is being resent until the send
* is finished. * is finished.
*/ */
trace_hfi1_rc_completion(qp, wqe->lpsn);
if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 || if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 ||
cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
u32 s_last; u32 s_last;
...@@ -1742,6 +1756,8 @@ int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, ...@@ -1742,6 +1756,8 @@ int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
break; break;
} }
trace_hfi1_rc_ack_do(qp, aeth, psn, wqe);
trace_hfi1_sender_do_rc_ack(qp);
switch (aeth >> IB_AETH_NAK_SHIFT) { switch (aeth >> IB_AETH_NAK_SHIFT) {
case 0: /* ACK */ case 0: /* ACK */
this_cpu_inc(*ibp->rvp.rc_acks); this_cpu_inc(*ibp->rvp.rc_acks);
......
...@@ -1688,6 +1688,7 @@ u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe, ...@@ -1688,6 +1688,7 @@ u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe,
/* This is the IB psn used to send the request */ /* This is the IB psn used to send the request */
*bth2 = mask_psn(flow->flow_state.ib_spsn + flow->pkt); *bth2 = mask_psn(flow->flow_state.ib_spsn + flow->pkt);
trace_hfi1_tid_flow_build_read_pkt(qp, req->flow_idx, flow);
/* TID Entries for TID RDMA READ payload */ /* TID Entries for TID RDMA READ payload */
req_addr = &flow->tid_entry[flow->tid_idx]; req_addr = &flow->tid_entry[flow->tid_idx];
...@@ -1768,6 +1769,8 @@ u32 hfi1_build_tid_rdma_read_req(struct rvt_qp *qp, struct rvt_swqe *wqe, ...@@ -1768,6 +1769,8 @@ u32 hfi1_build_tid_rdma_read_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
bool retry = true; bool retry = true;
u32 npkts = rvt_div_round_up_mtu(qp, *len); u32 npkts = rvt_div_round_up_mtu(qp, *len);
trace_hfi1_tid_req_build_read_req(qp, 0, wqe->wr.opcode, wqe->psn,
wqe->lpsn, req);
/* /*
* Check sync conditions. Make sure that there are no pending * Check sync conditions. Make sure that there are no pending
* segments before freeing the flow. * segments before freeing the flow.
...@@ -1883,6 +1886,8 @@ static int tid_rdma_rcv_read_request(struct rvt_qp *qp, ...@@ -1883,6 +1886,8 @@ static int tid_rdma_rcv_read_request(struct rvt_qp *qp,
*/ */
flow->npkts = rvt_div_round_up_mtu(qp, len); flow->npkts = rvt_div_round_up_mtu(qp, len);
for (i = 0; i < flow->tidcnt; i++) { for (i = 0; i < flow->tidcnt; i++) {
trace_hfi1_tid_entry_rcv_read_req(qp, i,
flow->tid_entry[i]);
tlen = EXP_TID_GET(flow->tid_entry[i], LEN); tlen = EXP_TID_GET(flow->tid_entry[i], LEN);
if (!tlen) if (!tlen)
return 1; return 1;
...@@ -1917,6 +1922,7 @@ static int tid_rdma_rcv_read_request(struct rvt_qp *qp, ...@@ -1917,6 +1922,7 @@ static int tid_rdma_rcv_read_request(struct rvt_qp *qp,
flow->flow_state.ib_spsn = psn; flow->flow_state.ib_spsn = psn;
flow->flow_state.ib_lpsn = flow->flow_state.ib_spsn + flow->npkts - 1; flow->flow_state.ib_lpsn = flow->flow_state.ib_spsn + flow->npkts - 1;
trace_hfi1_tid_flow_rcv_read_req(qp, req->setup_head, flow);
/* Set the initial flow index to the current flow. */ /* Set the initial flow index to the current flow. */
req->flow_idx = req->setup_head; req->flow_idx = req->setup_head;
...@@ -1942,6 +1948,8 @@ static int tid_rdma_rcv_read_request(struct rvt_qp *qp, ...@@ -1942,6 +1948,8 @@ static int tid_rdma_rcv_read_request(struct rvt_qp *qp,
req->total_segs = 1; req->total_segs = 1;
req->r_flow_psn = e->psn; req->r_flow_psn = e->psn;
trace_hfi1_tid_req_rcv_read_req(qp, 0, e->opcode, e->psn, e->lpsn,
req);
return 0; return 0;
} }
...@@ -1957,6 +1965,8 @@ static int tid_rdma_rcv_error(struct hfi1_packet *packet, ...@@ -1957,6 +1965,8 @@ static int tid_rdma_rcv_error(struct hfi1_packet *packet,
u8 prev; u8 prev;
bool old_req; bool old_req;
trace_hfi1_rsp_tid_rcv_error(qp, psn);
trace_hfi1_tid_rdma_rcv_err(qp, 0, psn, diff);
if (diff > 0) { if (diff > 0) {
/* sequence error */ /* sequence error */
if (!qp->r_nak_state) { if (!qp->r_nak_state) {
...@@ -1977,7 +1987,7 @@ static int tid_rdma_rcv_error(struct hfi1_packet *packet, ...@@ -1977,7 +1987,7 @@ static int tid_rdma_rcv_error(struct hfi1_packet *packet,
req = ack_to_tid_req(e); req = ack_to_tid_req(e);
req->r_flow_psn = psn; req->r_flow_psn = psn;
trace_hfi1_tid_req_rcv_err(qp, 0, e->opcode, e->psn, e->lpsn, req);
if (e->opcode == TID_OP(READ_REQ)) { if (e->opcode == TID_OP(READ_REQ)) {
struct ib_reth *reth; struct ib_reth *reth;
u32 offset; u32 offset;
...@@ -2088,6 +2098,7 @@ void hfi1_rc_rcv_tid_rdma_read_req(struct hfi1_packet *packet) ...@@ -2088,6 +2098,7 @@ void hfi1_rc_rcv_tid_rdma_read_req(struct hfi1_packet *packet)
is_fecn = process_ecn(qp, packet); is_fecn = process_ecn(qp, packet);
psn = mask_psn(be32_to_cpu(ohdr->bth[2])); psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
trace_hfi1_rsp_rcv_tid_read_req(qp, psn);
if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
rvt_comm_est(qp); rvt_comm_est(qp);
...@@ -2199,6 +2210,9 @@ u32 hfi1_build_tid_rdma_read_resp(struct rvt_qp *qp, struct rvt_ack_entry *e, ...@@ -2199,6 +2210,9 @@ u32 hfi1_build_tid_rdma_read_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
next_offset = flow->tid_offset + *len; next_offset = flow->tid_offset + *len;
last_pkt = (flow->sent >= flow->length); last_pkt = (flow->sent >= flow->length);
trace_hfi1_tid_entry_build_read_resp(qp, flow->tid_idx, tidentry);
trace_hfi1_tid_flow_build_read_resp(qp, req->clear_tail, flow);
rcu_read_lock(); rcu_read_lock();
remote = rcu_dereference(qpriv->tid_rdma.remote); remote = rcu_dereference(qpriv->tid_rdma.remote);
if (!remote) { if (!remote) {
...@@ -2293,6 +2307,7 @@ void hfi1_rc_rcv_tid_rdma_read_resp(struct hfi1_packet *packet) ...@@ -2293,6 +2307,7 @@ void hfi1_rc_rcv_tid_rdma_read_resp(struct hfi1_packet *packet)
unsigned long flags; unsigned long flags;
u32 kpsn, ipsn; u32 kpsn, ipsn;
trace_hfi1_sender_rcv_tid_read_resp(qp);
is_fecn = process_ecn(qp, packet); is_fecn = process_ecn(qp, packet);
kpsn = mask_psn(be32_to_cpu(ohdr->bth[2])); kpsn = mask_psn(be32_to_cpu(ohdr->bth[2]));
aeth = be32_to_cpu(ohdr->u.tid_rdma.r_rsp.aeth); aeth = be32_to_cpu(ohdr->u.tid_rdma.r_rsp.aeth);
...@@ -2322,6 +2337,12 @@ void hfi1_rc_rcv_tid_rdma_read_resp(struct hfi1_packet *packet) ...@@ -2322,6 +2337,12 @@ void hfi1_rc_rcv_tid_rdma_read_resp(struct hfi1_packet *packet)
hfi1_schedule_send(qp); hfi1_schedule_send(qp);
} }
trace_hfi1_ack(qp, ipsn);
trace_hfi1_tid_req_rcv_read_resp(qp, 0, req->e.swqe->wr.opcode,
req->e.swqe->psn, req->e.swqe->lpsn,
req);
trace_hfi1_tid_flow_rcv_read_resp(qp, req->clear_tail, flow);
/* Release the tid resources */ /* Release the tid resources */
hfi1_kern_exp_rcv_clear(req); hfi1_kern_exp_rcv_clear(req);
...@@ -2671,6 +2692,8 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd, ...@@ -2671,6 +2692,8 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
unsigned long flags; unsigned long flags;
bool ret = true; bool ret = true;
trace_hfi1_msg_handle_kdeth_eflags(NULL, "Kdeth error: rhf ",
packet->rhf);
if (packet->rhf & (RHF_VCRC_ERR | RHF_ICRC_ERR)) if (packet->rhf & (RHF_VCRC_ERR | RHF_ICRC_ERR))
return ret; return ret;
...@@ -2754,12 +2777,20 @@ void hfi1_tid_rdma_restart_req(struct rvt_qp *qp, struct rvt_swqe *wqe, ...@@ -2754,12 +2777,20 @@ void hfi1_tid_rdma_restart_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) { if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) {
*bth2 = mask_psn(qp->s_psn); *bth2 = mask_psn(qp->s_psn);
flow = find_flow_ib(req, *bth2, &fidx); flow = find_flow_ib(req, *bth2, &fidx);
if (!flow) if (!flow) {
trace_hfi1_msg_tid_restart_req(/* msg */
qp, "!!!!!! Could not find flow to restart: bth2 ",
(u64)*bth2);
trace_hfi1_tid_req_restart_req(qp, 0, wqe->wr.opcode,
wqe->psn, wqe->lpsn,
req);
return; return;
}
} else { } else {
return; return;
} }
trace_hfi1_tid_flow_restart_req(qp, fidx, flow);
diff = delta_psn(*bth2, flow->flow_state.ib_spsn); diff = delta_psn(*bth2, flow->flow_state.ib_spsn);
flow->sent = 0; flow->sent = 0;
...@@ -2794,6 +2825,9 @@ void hfi1_tid_rdma_restart_req(struct rvt_qp *qp, struct rvt_swqe *wqe, ...@@ -2794,6 +2825,9 @@ void hfi1_tid_rdma_restart_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
/* Move flow_idx to correct index */ /* Move flow_idx to correct index */
req->flow_idx = fidx; req->flow_idx = fidx;
trace_hfi1_tid_flow_restart_req(qp, fidx, flow);
trace_hfi1_tid_req_restart_req(qp, 0, wqe->wr.opcode, wqe->psn,
wqe->lpsn, req);
req->state = TID_REQUEST_ACTIVE; req->state = TID_REQUEST_ACTIVE;
} }
...@@ -2868,14 +2902,17 @@ bool hfi1_tid_rdma_wqe_interlock(struct rvt_qp *qp, struct rvt_swqe *wqe) ...@@ -2868,14 +2902,17 @@ bool hfi1_tid_rdma_wqe_interlock(struct rvt_qp *qp, struct rvt_swqe *wqe)
} }
/* Does @sge meet the alignment requirements for tid rdma? */ /* Does @sge meet the alignment requirements for tid rdma? */
static inline bool hfi1_check_sge_align(struct rvt_sge *sge, int num_sge) static inline bool hfi1_check_sge_align(struct rvt_qp *qp,
struct rvt_sge *sge, int num_sge)
{ {
int i; int i;
for (i = 0; i < num_sge; i++, sge++) for (i = 0; i < num_sge; i++, sge++) {
trace_hfi1_sge_check_align(qp, i, sge);
if ((u64)sge->vaddr & ~PAGE_MASK || if ((u64)sge->vaddr & ~PAGE_MASK ||
sge->sge_length & ~PAGE_MASK) sge->sge_length & ~PAGE_MASK)
return false; return false;
}
return true; return true;
} }
...@@ -2904,7 +2941,8 @@ void setup_tid_rdma_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe) ...@@ -2904,7 +2941,8 @@ void setup_tid_rdma_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
goto exit; goto exit;
if (wqe->wr.opcode == IB_WR_RDMA_READ) { if (wqe->wr.opcode == IB_WR_RDMA_READ) {
if (hfi1_check_sge_align(&wqe->sg_list[0], wqe->wr.num_sge)) { if (hfi1_check_sge_align(qp, &wqe->sg_list[0],
wqe->wr.num_sge)) {
new_opcode = IB_WR_TID_RDMA_READ; new_opcode = IB_WR_TID_RDMA_READ;
do_tid_rdma = true; do_tid_rdma = true;
} }
...@@ -2930,6 +2968,9 @@ void setup_tid_rdma_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe) ...@@ -2930,6 +2968,9 @@ void setup_tid_rdma_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
priv->tid_req.comp_seg = 0; priv->tid_req.comp_seg = 0;
priv->tid_req.ack_seg = 0; priv->tid_req.ack_seg = 0;
priv->tid_req.state = TID_REQUEST_INACTIVE; priv->tid_req.state = TID_REQUEST_INACTIVE;
trace_hfi1_tid_req_setup_tid_wqe(qp, 1, wqe->wr.opcode,
wqe->psn, wqe->lpsn,
&priv->tid_req);
} }
exit: exit:
rcu_read_unlock(); rcu_read_unlock();
......
...@@ -129,6 +129,10 @@ const char *hfi1_trace_get_packet_l2_str(u8 l2) ...@@ -129,6 +129,10 @@ const char *hfi1_trace_get_packet_l2_str(u8 l2)
#define IETH_PRN "ieth rkey:0x%.8x" #define IETH_PRN "ieth rkey:0x%.8x"
#define ATOMICACKETH_PRN "origdata:%llx" #define ATOMICACKETH_PRN "origdata:%llx"
#define ATOMICETH_PRN "vaddr:0x%llx rkey:0x%.8x sdata:%llx cdata:%llx" #define ATOMICETH_PRN "vaddr:0x%llx rkey:0x%.8x sdata:%llx cdata:%llx"
#define TID_RDMA_KDETH "kdeth0 0x%x kdeth1 0x%x"
#define TID_RDMA_KDETH_DATA "kdeth0 0x%x: kver %u sh %u intr %u tidctrl %u tid %x offset %x kdeth1 0x%x: jkey %x"
#define TID_READ_REQ_PRN "tid_flow_psn 0x%x tid_flow_qp 0x%x verbs_qp 0x%x"
#define TID_READ_RSP_PRN "verbs_qp 0x%x"
#define OP(transport, op) IB_OPCODE_## transport ## _ ## op #define OP(transport, op) IB_OPCODE_## transport ## _ ## op
...@@ -323,6 +327,38 @@ const char *parse_everbs_hdrs( ...@@ -323,6 +327,38 @@ const char *parse_everbs_hdrs(
parse_syndrome(be32_to_cpu(eh->aeth) >> 24), parse_syndrome(be32_to_cpu(eh->aeth) >> 24),
be32_to_cpu(eh->aeth) & IB_MSN_MASK); be32_to_cpu(eh->aeth) & IB_MSN_MASK);
break; break;
case OP(TID_RDMA, READ_REQ):
trace_seq_printf(p, TID_RDMA_KDETH " " RETH_PRN " "
TID_READ_REQ_PRN,
le32_to_cpu(eh->tid_rdma.r_req.kdeth0),
le32_to_cpu(eh->tid_rdma.r_req.kdeth1),
ib_u64_get(&eh->tid_rdma.r_req.reth.vaddr),
be32_to_cpu(eh->tid_rdma.r_req.reth.rkey),
be32_to_cpu(eh->tid_rdma.r_req.reth.length),
be32_to_cpu(eh->tid_rdma.r_req.tid_flow_psn),
be32_to_cpu(eh->tid_rdma.r_req.tid_flow_qp),
be32_to_cpu(eh->tid_rdma.r_req.verbs_qp));
break;
case OP(TID_RDMA, READ_RESP):
trace_seq_printf(p, TID_RDMA_KDETH_DATA " " AETH_PRN " "
TID_READ_RSP_PRN,
le32_to_cpu(eh->tid_rdma.r_rsp.kdeth0),
KDETH_GET(eh->tid_rdma.r_rsp.kdeth0, KVER),
KDETH_GET(eh->tid_rdma.r_rsp.kdeth0, SH),
KDETH_GET(eh->tid_rdma.r_rsp.kdeth0, INTR),
KDETH_GET(eh->tid_rdma.r_rsp.kdeth0, TIDCTRL),
KDETH_GET(eh->tid_rdma.r_rsp.kdeth0, TID),
KDETH_GET(eh->tid_rdma.r_rsp.kdeth0, OFFSET),
le32_to_cpu(eh->tid_rdma.r_rsp.kdeth1),
KDETH_GET(eh->tid_rdma.r_rsp.kdeth1, JKEY),
be32_to_cpu(eh->tid_rdma.r_rsp.aeth) >> 24,
parse_syndrome(/* aeth */
be32_to_cpu(eh->tid_rdma.r_rsp.aeth)
>> 24),
(be32_to_cpu(eh->tid_rdma.r_rsp.aeth) &
IB_MSN_MASK),
be32_to_cpu(eh->tid_rdma.r_rsp.verbs_qp));
break;
/* aeth + atomicacketh */ /* aeth + atomicacketh */
case OP(RC, ATOMIC_ACKNOWLEDGE): case OP(RC, ATOMIC_ACKNOWLEDGE):
trace_seq_printf(p, AETH_PRN " " ATOMICACKETH_PRN, trace_seq_printf(p, AETH_PRN " " ATOMICACKETH_PRN,
......
...@@ -79,6 +79,8 @@ __print_symbolic(opcode, \ ...@@ -79,6 +79,8 @@ __print_symbolic(opcode, \
ib_opcode_name(RC_ATOMIC_ACKNOWLEDGE), \ ib_opcode_name(RC_ATOMIC_ACKNOWLEDGE), \
ib_opcode_name(RC_COMPARE_SWAP), \ ib_opcode_name(RC_COMPARE_SWAP), \
ib_opcode_name(RC_FETCH_ADD), \ ib_opcode_name(RC_FETCH_ADD), \
ib_opcode_name(TID_RDMA_READ_REQ), \
ib_opcode_name(TID_RDMA_READ_RESP), \
ib_opcode_name(UC_SEND_FIRST), \ ib_opcode_name(UC_SEND_FIRST), \
ib_opcode_name(UC_SEND_MIDDLE), \ ib_opcode_name(UC_SEND_MIDDLE), \
ib_opcode_name(UC_SEND_LAST), \ ib_opcode_name(UC_SEND_LAST), \
......
...@@ -109,6 +109,54 @@ DEFINE_EVENT(hfi1_rc_template, hfi1_rcv_error, ...@@ -109,6 +109,54 @@ DEFINE_EVENT(hfi1_rc_template, hfi1_rcv_error,
TP_ARGS(qp, psn) TP_ARGS(qp, psn)
); );
DEFINE_EVENT(/* event */
hfi1_rc_template, hfi1_rc_completion,
TP_PROTO(struct rvt_qp *qp, u32 psn),
TP_ARGS(qp, psn)
);
DECLARE_EVENT_CLASS(/* rc_ack */
hfi1_rc_ack_template,
TP_PROTO(struct rvt_qp *qp, u32 aeth, u32 psn,
struct rvt_swqe *wqe),
TP_ARGS(qp, aeth, psn, wqe),
TP_STRUCT__entry(/* entry */
DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
__field(u32, qpn)
__field(u32, aeth)
__field(u32, psn)
__field(u8, opcode)
__field(u32, spsn)
__field(u32, lpsn)
),
TP_fast_assign(/* assign */
DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
__entry->qpn = qp->ibqp.qp_num;
__entry->aeth = aeth;
__entry->psn = psn;
__entry->opcode = wqe->wr.opcode;
__entry->spsn = wqe->psn;
__entry->lpsn = wqe->lpsn;
),
TP_printk(/* print */
"[%s] qpn 0x%x aeth 0x%x psn 0x%x opcode 0x%x spsn 0x%x lpsn 0x%x",
__get_str(dev),
__entry->qpn,
__entry->aeth,
__entry->psn,
__entry->opcode,
__entry->spsn,
__entry->lpsn
)
);
DEFINE_EVENT(/* do_rc_ack */
hfi1_rc_ack_template, hfi1_rc_ack_do,
TP_PROTO(struct rvt_qp *qp, u32 aeth, u32 psn,
struct rvt_swqe *wqe),
TP_ARGS(qp, aeth, psn, wqe)
);
#endif /* __HFI1_TRACE_RC_H */ #endif /* __HFI1_TRACE_RC_H */
#undef TRACE_INCLUDE_PATH #undef TRACE_INCLUDE_PATH
......
...@@ -31,11 +31,41 @@ u16 hfi1_trace_get_tid_idx(u32 ent); ...@@ -31,11 +31,41 @@ u16 hfi1_trace_get_tid_idx(u32 ent);
#define TID_FLOW_PRN "[%s] qpn 0x%x flow %d: idx %d resp_ib_psn 0x%x " \ #define TID_FLOW_PRN "[%s] qpn 0x%x flow %d: idx %d resp_ib_psn 0x%x " \
"generation 0x%x fpsn 0x%x-%x r_next_psn 0x%x " \ "generation 0x%x fpsn 0x%x-%x r_next_psn 0x%x " \
"npagesets %u tnode_cnt %u tidcnt %u length %u" "ib_psn 0x%x-%x npagesets %u tnode_cnt %u " \
"tidcnt %u tid_idx %u tid_offset %u length %u sent %u"
#define TID_NODE_PRN "[%s] qpn 0x%x %s idx %u grp base 0x%x map 0x%x " \ #define TID_NODE_PRN "[%s] qpn 0x%x %s idx %u grp base 0x%x map 0x%x " \
"used %u cnt %u" "used %u cnt %u"
#define RSP_INFO_PRN "[%s] qpn 0x%x state 0x%x s_state 0x%x psn 0x%x " \
"r_psn 0x%x r_state 0x%x r_flags 0x%x " \
"r_head_ack_queue %u s_tail_ack_queue %u " \
"s_ack_state 0x%x " \
"s_nak_state 0x%x s_flags 0x%x ps_flags 0x%x " \
"iow_flags 0x%lx"
#define SENDER_INFO_PRN "[%s] qpn 0x%x state 0x%x s_cur %u s_tail %u " \
"s_head %u s_acked %u s_last %u s_psn 0x%x " \
"s_last_psn 0x%x s_flags 0x%x ps_flags 0x%x " \
"iow_flags 0x%lx s_state 0x%x s_num_rd %u s_retry %u"
#define TID_READ_SENDER_PRN "[%s] qpn 0x%x newreq %u tid_r_reqs %u " \
"tid_r_comp %u pending_tid_r_segs %u " \
"s_flags 0x%x ps_flags 0x%x iow_flags 0x%lx " \
"hw_flow_index %u generation 0x%x " \
"fpsn 0x%x flow_flags 0x%x"
#define TID_REQ_PRN "[%s] qpn 0x%x newreq %u opcode 0x%x psn 0x%x lpsn 0x%x " \
"cur_seg %u comp_seg %u ack_seg %u " \
"total_segs %u setup_head %u clear_tail %u flow_idx %u " \
"state %u r_flow_psn 0x%x " \
"s_next_psn 0x%x"
#define RCV_ERR_PRN "[%s] qpn 0x%x s_flags 0x%x state 0x%x " \
"s_tail_ack_queue %u " \
"r_head_ack_queue %u opcode 0x%x psn 0x%x r_psn 0x%x " \
" diff %d"
DECLARE_EVENT_CLASS(/* class */ DECLARE_EVENT_CLASS(/* class */
hfi1_exp_tid_reg_unreg, hfi1_exp_tid_reg_unreg,
TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages, TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages,
...@@ -340,6 +370,18 @@ DEFINE_EVENT(/* event */ ...@@ -340,6 +370,18 @@ DEFINE_EVENT(/* event */
TP_ARGS(qp, msg, more) TP_ARGS(qp, msg, more)
); );
DEFINE_EVENT(/* event */
hfi1_msg_template, hfi1_msg_tid_restart_req,
TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more),
TP_ARGS(qp, msg, more)
);
DEFINE_EVENT(/* event */
hfi1_msg_template, hfi1_msg_handle_kdeth_eflags,
TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more),
TP_ARGS(qp, msg, more)
);
DECLARE_EVENT_CLASS(/* tid_flow_page */ DECLARE_EVENT_CLASS(/* tid_flow_page */
hfi1_tid_flow_page_template, hfi1_tid_flow_page_template,
TP_PROTO(struct rvt_qp *qp, struct tid_rdma_flow *flow, u32 index, TP_PROTO(struct rvt_qp *qp, struct tid_rdma_flow *flow, u32 index,
...@@ -429,10 +471,15 @@ DECLARE_EVENT_CLASS(/* tid_fow */ ...@@ -429,10 +471,15 @@ DECLARE_EVENT_CLASS(/* tid_fow */
__field(u32, fspsn) __field(u32, fspsn)
__field(u32, flpsn) __field(u32, flpsn)
__field(u32, r_next_psn) __field(u32, r_next_psn)
__field(u32, ib_spsn)
__field(u32, ib_lpsn)
__field(u32, npagesets) __field(u32, npagesets)
__field(u32, tnode_cnt) __field(u32, tnode_cnt)
__field(u32, tidcnt) __field(u32, tidcnt)
__field(u32, tid_idx)
__field(u32, tid_offset)
__field(u32, length) __field(u32, length)
__field(u32, sent)
), ),
TP_fast_assign(/* assign */ TP_fast_assign(/* assign */
DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)); DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
...@@ -446,10 +493,15 @@ DECLARE_EVENT_CLASS(/* tid_fow */ ...@@ -446,10 +493,15 @@ DECLARE_EVENT_CLASS(/* tid_fow */
__entry->flpsn = full_flow_psn(flow, __entry->flpsn = full_flow_psn(flow,
flow->flow_state.lpsn); flow->flow_state.lpsn);
__entry->r_next_psn = flow->flow_state.r_next_psn; __entry->r_next_psn = flow->flow_state.r_next_psn;
__entry->ib_spsn = flow->flow_state.ib_spsn;
__entry->ib_lpsn = flow->flow_state.ib_lpsn;
__entry->npagesets = flow->npagesets; __entry->npagesets = flow->npagesets;
__entry->tnode_cnt = flow->tnode_cnt; __entry->tnode_cnt = flow->tnode_cnt;
__entry->tidcnt = flow->tidcnt; __entry->tidcnt = flow->tidcnt;
__entry->tid_idx = flow->tid_idx;
__entry->tid_offset = flow->tid_offset;
__entry->length = flow->length; __entry->length = flow->length;
__entry->sent = flow->sent;
), ),
TP_printk(/* print */ TP_printk(/* print */
TID_FLOW_PRN, TID_FLOW_PRN,
...@@ -462,10 +514,15 @@ DECLARE_EVENT_CLASS(/* tid_fow */ ...@@ -462,10 +514,15 @@ DECLARE_EVENT_CLASS(/* tid_fow */
__entry->fspsn, __entry->fspsn,
__entry->flpsn, __entry->flpsn,
__entry->r_next_psn, __entry->r_next_psn,
__entry->ib_spsn,
__entry->ib_lpsn,
__entry->npagesets, __entry->npagesets,
__entry->tnode_cnt, __entry->tnode_cnt,
__entry->tidcnt, __entry->tidcnt,
__entry->length __entry->tid_idx,
__entry->tid_offset,
__entry->length,
__entry->sent
) )
); );
...@@ -475,6 +532,36 @@ DEFINE_EVENT(/* event */ ...@@ -475,6 +532,36 @@ DEFINE_EVENT(/* event */
TP_ARGS(qp, index, flow) TP_ARGS(qp, index, flow)
); );
DEFINE_EVENT(/* event */
hfi1_tid_flow_template, hfi1_tid_flow_build_read_pkt,
TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
TP_ARGS(qp, index, flow)
);
DEFINE_EVENT(/* event */
hfi1_tid_flow_template, hfi1_tid_flow_build_read_resp,
TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
TP_ARGS(qp, index, flow)
);
DEFINE_EVENT(/* event */
hfi1_tid_flow_template, hfi1_tid_flow_rcv_read_req,
TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
TP_ARGS(qp, index, flow)
);
DEFINE_EVENT(/* event */
hfi1_tid_flow_template, hfi1_tid_flow_rcv_read_resp,
TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
TP_ARGS(qp, index, flow)
);
DEFINE_EVENT(/* event */
hfi1_tid_flow_template, hfi1_tid_flow_restart_req,
TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
TP_ARGS(qp, index, flow)
);
DECLARE_EVENT_CLASS(/* tid_node */ DECLARE_EVENT_CLASS(/* tid_node */
hfi1_tid_node_template, hfi1_tid_node_template,
TP_PROTO(struct rvt_qp *qp, const char *msg, u32 index, u32 base, TP_PROTO(struct rvt_qp *qp, const char *msg, u32 index, u32 base,
...@@ -557,6 +644,443 @@ DEFINE_EVENT(/* event */ ...@@ -557,6 +644,443 @@ DEFINE_EVENT(/* event */
TP_ARGS(qp, index, entry) TP_ARGS(qp, index, entry)
); );
DEFINE_EVENT(/* event */
hfi1_tid_entry_template, hfi1_tid_entry_build_read_resp,
TP_PROTO(struct rvt_qp *qp, int index, u32 ent),
TP_ARGS(qp, index, ent)
);
DEFINE_EVENT(/* event */
hfi1_tid_entry_template, hfi1_tid_entry_rcv_read_req,
TP_PROTO(struct rvt_qp *qp, int index, u32 ent),
TP_ARGS(qp, index, ent)
);
DECLARE_EVENT_CLASS(/* rsp_info */
hfi1_responder_info_template,
TP_PROTO(struct rvt_qp *qp, u32 psn),
TP_ARGS(qp, psn),
TP_STRUCT__entry(/* entry */
DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
__field(u32, qpn)
__field(u8, state)
__field(u8, s_state)
__field(u32, psn)
__field(u32, r_psn)
__field(u8, r_state)
__field(u8, r_flags)
__field(u8, r_head_ack_queue)
__field(u8, s_tail_ack_queue)
__field(u8, s_ack_state)
__field(u8, s_nak_state)
__field(u8, r_nak_state)
__field(u32, s_flags)
__field(u32, ps_flags)
__field(unsigned long, iow_flags)
),
TP_fast_assign(/* assign */
struct hfi1_qp_priv *priv = qp->priv;
DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
__entry->qpn = qp->ibqp.qp_num;
__entry->state = qp->state;
__entry->s_state = qp->s_state;
__entry->psn = psn;
__entry->r_psn = qp->r_psn;
__entry->r_state = qp->r_state;
__entry->r_flags = qp->r_flags;
__entry->r_head_ack_queue = qp->r_head_ack_queue;
__entry->s_tail_ack_queue = qp->s_tail_ack_queue;
__entry->s_ack_state = qp->s_ack_state;
__entry->s_nak_state = qp->s_nak_state;
__entry->s_flags = qp->s_flags;
__entry->ps_flags = priv->s_flags;
__entry->iow_flags = priv->s_iowait.flags;
),
TP_printk(/* print */
RSP_INFO_PRN,
__get_str(dev),
__entry->qpn,
__entry->state,
__entry->s_state,
__entry->psn,
__entry->r_psn,
__entry->r_state,
__entry->r_flags,
__entry->r_head_ack_queue,
__entry->s_tail_ack_queue,
__entry->s_ack_state,
__entry->s_nak_state,
__entry->s_flags,
__entry->ps_flags,
__entry->iow_flags
)
);
DEFINE_EVENT(/* event */
hfi1_responder_info_template, hfi1_rsp_make_rc_ack,
TP_PROTO(struct rvt_qp *qp, u32 psn),
TP_ARGS(qp, psn)
);
DEFINE_EVENT(/* event */
hfi1_responder_info_template, hfi1_rsp_rcv_tid_read_req,
TP_PROTO(struct rvt_qp *qp, u32 psn),
TP_ARGS(qp, psn)
);
DEFINE_EVENT(/* event */
hfi1_responder_info_template, hfi1_rsp_tid_rcv_error,
TP_PROTO(struct rvt_qp *qp, u32 psn),
TP_ARGS(qp, psn)
);
DECLARE_EVENT_CLASS(/* sender_info */
hfi1_sender_info_template,
TP_PROTO(struct rvt_qp *qp),
TP_ARGS(qp),
TP_STRUCT__entry(/* entry */
DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
__field(u32, qpn)
__field(u8, state)
__field(u32, s_cur)
__field(u32, s_tail)
__field(u32, s_head)
__field(u32, s_acked)
__field(u32, s_last)
__field(u32, s_psn)
__field(u32, s_last_psn)
__field(u32, s_flags)
__field(u32, ps_flags)
__field(unsigned long, iow_flags)
__field(u8, s_state)
__field(u8, s_num_rd)
__field(u8, s_retry)
),
TP_fast_assign(/* assign */
DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
__entry->qpn = qp->ibqp.qp_num;
__entry->state = qp->state;
__entry->s_cur = qp->s_cur;
__entry->s_tail = qp->s_tail;
__entry->s_head = qp->s_head;
__entry->s_acked = qp->s_acked;
__entry->s_last = qp->s_last;
__entry->s_psn = qp->s_psn;
__entry->s_last_psn = qp->s_last_psn;
__entry->s_flags = qp->s_flags;
__entry->ps_flags = ((struct hfi1_qp_priv *)qp->priv)->s_flags;
__entry->iow_flags =
((struct hfi1_qp_priv *)qp->priv)->s_iowait.flags;
__entry->s_state = qp->s_state;
__entry->s_num_rd = qp->s_num_rd_atomic;
__entry->s_retry = qp->s_retry;
),
TP_printk(/* print */
SENDER_INFO_PRN,
__get_str(dev),
__entry->qpn,
__entry->state,
__entry->s_cur,
__entry->s_tail,
__entry->s_head,
__entry->s_acked,
__entry->s_last,
__entry->s_psn,
__entry->s_last_psn,
__entry->s_flags,
__entry->ps_flags,
__entry->iow_flags,
__entry->s_state,
__entry->s_num_rd,
__entry->s_retry
)
);
DEFINE_EVENT(/* event */
hfi1_sender_info_template, hfi1_sender_make_rc_req,
TP_PROTO(struct rvt_qp *qp),
TP_ARGS(qp)
);
DEFINE_EVENT(/* event */
hfi1_sender_info_template, hfi1_sender_reset_psn,
TP_PROTO(struct rvt_qp *qp),
TP_ARGS(qp)
);
DEFINE_EVENT(/* event */
hfi1_sender_info_template, hfi1_sender_restart_rc,
TP_PROTO(struct rvt_qp *qp),
TP_ARGS(qp)
);
DEFINE_EVENT(/* event */
hfi1_sender_info_template, hfi1_sender_do_rc_ack,
TP_PROTO(struct rvt_qp *qp),
TP_ARGS(qp)
);
DEFINE_EVENT(/* event */
hfi1_sender_info_template, hfi1_sender_rcv_tid_read_resp,
TP_PROTO(struct rvt_qp *qp),
TP_ARGS(qp)
);
DECLARE_EVENT_CLASS(/* tid_read_sender */
hfi1_tid_read_sender_template,
TP_PROTO(struct rvt_qp *qp, char newreq),
TP_ARGS(qp, newreq),
TP_STRUCT__entry(/* entry */
DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
__field(u32, qpn)
__field(char, newreq)
__field(u32, tid_r_reqs)
__field(u32, tid_r_comp)
__field(u32, pending_tid_r_segs)
__field(u32, s_flags)
__field(u32, ps_flags)
__field(unsigned long, iow_flags)
__field(u32, hw_flow_index)
__field(u32, generation)
__field(u32, fpsn)
__field(u32, flow_flags)
),
TP_fast_assign(/* assign */
struct hfi1_qp_priv *priv = qp->priv;
DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
__entry->qpn = qp->ibqp.qp_num;
__entry->newreq = newreq;
__entry->tid_r_reqs = priv->tid_r_reqs;
__entry->tid_r_comp = priv->tid_r_comp;
__entry->pending_tid_r_segs = priv->pending_tid_r_segs;
__entry->s_flags = qp->s_flags;
__entry->ps_flags = priv->s_flags;
__entry->iow_flags = priv->s_iowait.flags;
__entry->hw_flow_index = priv->flow_state.index;
__entry->generation = priv->flow_state.generation;
__entry->fpsn = priv->flow_state.psn;
__entry->flow_flags = priv->flow_state.flags;
),
TP_printk(/* print */
TID_READ_SENDER_PRN,
__get_str(dev),
__entry->qpn,
__entry->newreq,
__entry->tid_r_reqs,
__entry->tid_r_comp,
__entry->pending_tid_r_segs,
__entry->s_flags,
__entry->ps_flags,
__entry->iow_flags,
__entry->hw_flow_index,
__entry->generation,
__entry->fpsn,
__entry->flow_flags
)
);
DEFINE_EVENT(/* event */
hfi1_tid_read_sender_template, hfi1_tid_read_sender_make_req,
TP_PROTO(struct rvt_qp *qp, char newreq),
TP_ARGS(qp, newreq)
);
DECLARE_EVENT_CLASS(/* tid_rdma_request */
hfi1_tid_rdma_request_template,
TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
struct tid_rdma_request *req),
TP_ARGS(qp, newreq, opcode, psn, lpsn, req),
TP_STRUCT__entry(/* entry */
DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
__field(u32, qpn)
__field(char, newreq)
__field(u8, opcode)
__field(u32, psn)
__field(u32, lpsn)
__field(u32, cur_seg)
__field(u32, comp_seg)
__field(u32, ack_seg)
__field(u32, total_segs)
__field(u16, setup_head)
__field(u16, clear_tail)
__field(u16, flow_idx)
__field(u32, state)
__field(u32, r_flow_psn)
__field(u32, s_next_psn)
),
TP_fast_assign(/* assign */
DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
__entry->qpn = qp->ibqp.qp_num;
__entry->newreq = newreq;
__entry->opcode = opcode;
__entry->psn = psn;
__entry->lpsn = lpsn;
__entry->cur_seg = req->cur_seg;
__entry->comp_seg = req->comp_seg;
__entry->ack_seg = req->ack_seg;
__entry->total_segs = req->total_segs;
__entry->setup_head = req->setup_head;
__entry->clear_tail = req->clear_tail;
__entry->flow_idx = req->flow_idx;
__entry->state = req->state;
__entry->r_flow_psn = req->r_flow_psn;
__entry->s_next_psn = req->s_next_psn;
),
TP_printk(/* print */
TID_REQ_PRN,
__get_str(dev),
__entry->qpn,
__entry->newreq,
__entry->opcode,
__entry->psn,
__entry->lpsn,
__entry->cur_seg,
__entry->comp_seg,
__entry->ack_seg,
__entry->total_segs,
__entry->setup_head,
__entry->clear_tail,
__entry->flow_idx,
__entry->state,
__entry->r_flow_psn,
__entry->s_next_psn
)
);
DEFINE_EVENT(/* event */
hfi1_tid_rdma_request_template, hfi1_tid_req_make_req_read,
TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
struct tid_rdma_request *req),
TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
);
DEFINE_EVENT(/* event */
hfi1_tid_rdma_request_template, hfi1_tid_req_build_read_req,
TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
struct tid_rdma_request *req),
TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
);
DEFINE_EVENT(/* event */
hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_read_req,
TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
struct tid_rdma_request *req),
TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
);
DEFINE_EVENT(/* event */
hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_read_resp,
TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
struct tid_rdma_request *req),
TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
);
DEFINE_EVENT(/* event */
hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_err,
TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
struct tid_rdma_request *req),
TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
);
DEFINE_EVENT(/* event */
hfi1_tid_rdma_request_template, hfi1_tid_req_restart_req,
TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
struct tid_rdma_request *req),
TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
);
DEFINE_EVENT(/* event */
hfi1_tid_rdma_request_template, hfi1_tid_req_setup_tid_wqe,
TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
struct tid_rdma_request *req),
TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
);
DECLARE_EVENT_CLASS(/* rc_rcv_err */
hfi1_rc_rcv_err_template,
TP_PROTO(struct rvt_qp *qp, u32 opcode, u32 psn, int diff),
TP_ARGS(qp, opcode, psn, diff),
TP_STRUCT__entry(/* entry */
DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
__field(u32, qpn)
__field(u32, s_flags)
__field(u8, state)
__field(u8, s_tail_ack_queue)
__field(u8, r_head_ack_queue)
__field(u32, opcode)
__field(u32, psn)
__field(u32, r_psn)
__field(int, diff)
),
TP_fast_assign(/* assign */
DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
__entry->qpn = qp->ibqp.qp_num;
__entry->s_flags = qp->s_flags;
__entry->state = qp->state;
__entry->s_tail_ack_queue = qp->s_tail_ack_queue;
__entry->r_head_ack_queue = qp->r_head_ack_queue;
__entry->opcode = opcode;
__entry->psn = psn;
__entry->r_psn = qp->r_psn;
__entry->diff = diff;
),
TP_printk(/* print */
RCV_ERR_PRN,
__get_str(dev),
__entry->qpn,
__entry->s_flags,
__entry->state,
__entry->s_tail_ack_queue,
__entry->r_head_ack_queue,
__entry->opcode,
__entry->psn,
__entry->r_psn,
__entry->diff
)
);
DEFINE_EVENT(/* event */
hfi1_rc_rcv_err_template, hfi1_tid_rdma_rcv_err,
TP_PROTO(struct rvt_qp *qp, u32 opcode, u32 psn, int diff),
TP_ARGS(qp, opcode, psn, diff)
);
DECLARE_EVENT_CLASS(/* sge */
hfi1_sge_template,
TP_PROTO(struct rvt_qp *qp, int index, struct rvt_sge *sge),
TP_ARGS(qp, index, sge),
TP_STRUCT__entry(/* entry */
DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
__field(u32, qpn)
__field(int, index)
__field(u64, vaddr)
__field(u32, sge_length)
),
TP_fast_assign(/* assign */
DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
__entry->qpn = qp->ibqp.qp_num;
__entry->index = index;
__entry->vaddr = (u64)sge->vaddr;
__entry->sge_length = sge->sge_length;
),
TP_printk(/* print */
"[%s] qpn 0x%x sge %d: vaddr 0x%llx sge_length %u",
__get_str(dev),
__entry->qpn,
__entry->index,
__entry->vaddr,
__entry->sge_length
)
);
DEFINE_EVENT(/* event */
hfi1_sge_template, hfi1_sge_check_align,
TP_PROTO(struct rvt_qp *qp, int index, struct rvt_sge *sge),
TP_ARGS(qp, index, sge)
);
#endif /* __HFI1_TRACE_TID_H */ #endif /* __HFI1_TRACE_TID_H */
#undef TRACE_INCLUDE_PATH #undef TRACE_INCLUDE_PATH
......
...@@ -114,19 +114,27 @@ DECLARE_EVENT_CLASS(hfi1_qpsleepwakeup_template, ...@@ -114,19 +114,27 @@ DECLARE_EVENT_CLASS(hfi1_qpsleepwakeup_template,
__field(u32, qpn) __field(u32, qpn)
__field(u32, flags) __field(u32, flags)
__field(u32, s_flags) __field(u32, s_flags)
__field(u32, ps_flags)
__field(unsigned long, iow_flags)
), ),
TP_fast_assign( TP_fast_assign(
DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)) DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
__entry->flags = flags; __entry->flags = flags;
__entry->qpn = qp->ibqp.qp_num; __entry->qpn = qp->ibqp.qp_num;
__entry->s_flags = qp->s_flags; __entry->s_flags = qp->s_flags;
__entry->ps_flags =
((struct hfi1_qp_priv *)qp->priv)->s_flags;
__entry->iow_flags =
((struct hfi1_qp_priv *)qp->priv)->s_iowait.flags;
), ),
TP_printk( TP_printk(
"[%s] qpn 0x%x flags 0x%x s_flags 0x%x", "[%s] qpn 0x%x flags 0x%x s_flags 0x%x ps_flags 0x%x iow_flags 0x%lx",
__get_str(dev), __get_str(dev),
__entry->qpn, __entry->qpn,
__entry->flags, __entry->flags,
__entry->s_flags __entry->s_flags,
__entry->ps_flags,
__entry->iow_flags
) )
); );
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment