Commit b885d5be authored by Kaike Wan's avatar Kaike Wan Committed by Jason Gunthorpe

IB/hfi1: Unify the software PSN check for TID RDMA READ/WRITE

For expected packet receiving, the hfi1 hardware checks the KDETH PSN
automatically. However, when sequence error occurs, the hfi1 driver can
check the sequence instead until the hardware flow generation is reloaded.

TID RDMA READ and WRITE protocols implement similar software checking
mechanisms, but with different flags and different local variables to
store next expected PSN.

Unify the handling by using only one set of flag and local variable for
both TID RDMA READ and WRITE protocols.
Reviewed-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Reviewed-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Reviewed-by: default avatarMichael J. Ruhl <michael.j.ruhl@intel.com>
Signed-off-by: default avatarKaike Wan <kaike.wan@intel.com>
Signed-off-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 6a40693a
...@@ -67,8 +67,6 @@ static u32 mask_generation(u32 a) ...@@ -67,8 +67,6 @@ static u32 mask_generation(u32 a)
#define TID_RDMA_DESTQP_FLOW_SHIFT 11 #define TID_RDMA_DESTQP_FLOW_SHIFT 11
#define TID_RDMA_DESTQP_FLOW_MASK 0x1f #define TID_RDMA_DESTQP_FLOW_MASK 0x1f
#define TID_FLOW_SW_PSN BIT(0)
#define TID_OPFN_QP_CTXT_MASK 0xff #define TID_OPFN_QP_CTXT_MASK 0xff
#define TID_OPFN_QP_CTXT_SHIFT 56 #define TID_OPFN_QP_CTXT_SHIFT 56
#define TID_OPFN_QP_KDETH_MASK 0xff #define TID_OPFN_QP_KDETH_MASK 0xff
...@@ -777,7 +775,6 @@ int hfi1_kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp) ...@@ -777,7 +775,6 @@ int hfi1_kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp)
rcd->flows[fs->index].generation = fs->generation; rcd->flows[fs->index].generation = fs->generation;
fs->generation = kern_setup_hw_flow(rcd, fs->index); fs->generation = kern_setup_hw_flow(rcd, fs->index);
fs->psn = 0; fs->psn = 0;
fs->flags = 0;
dequeue_tid_waiter(rcd, &rcd->flow_queue, qp); dequeue_tid_waiter(rcd, &rcd->flow_queue, qp);
/* get head before dropping lock */ /* get head before dropping lock */
fqp = first_qp(rcd, &rcd->flow_queue); fqp = first_qp(rcd, &rcd->flow_queue);
...@@ -1808,6 +1805,7 @@ u32 hfi1_build_tid_rdma_read_req(struct rvt_qp *qp, struct rvt_swqe *wqe, ...@@ -1808,6 +1805,7 @@ u32 hfi1_build_tid_rdma_read_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
goto done; goto done;
hfi1_kern_clear_hw_flow(req->rcd, qp); hfi1_kern_clear_hw_flow(req->rcd, qp);
qpriv->s_flags &= ~HFI1_R_TID_SW_PSN;
req->state = TID_REQUEST_ACTIVE; req->state = TID_REQUEST_ACTIVE;
} }
...@@ -2476,8 +2474,13 @@ void hfi1_rc_rcv_tid_rdma_read_resp(struct hfi1_packet *packet) ...@@ -2476,8 +2474,13 @@ void hfi1_rc_rcv_tid_rdma_read_resp(struct hfi1_packet *packet)
flow = &req->flows[req->clear_tail]; flow = &req->flows[req->clear_tail];
/* When header suppression is disabled */ /* When header suppression is disabled */
if (cmp_psn(ipsn, flow->flow_state.ib_lpsn)) if (cmp_psn(ipsn, flow->flow_state.ib_lpsn)) {
if (cmp_psn(kpsn, flow->flow_state.r_next_psn))
goto ack_done;
flow->flow_state.r_next_psn = mask_psn(kpsn + 1);
goto ack_done; goto ack_done;
}
flow->flow_state.r_next_psn = mask_psn(kpsn + 1);
req->ack_pending--; req->ack_pending--;
priv->pending_tid_r_segs--; priv->pending_tid_r_segs--;
qp->s_num_rd_atomic--; qp->s_num_rd_atomic--;
...@@ -2519,6 +2522,7 @@ void hfi1_rc_rcv_tid_rdma_read_resp(struct hfi1_packet *packet) ...@@ -2519,6 +2522,7 @@ void hfi1_rc_rcv_tid_rdma_read_resp(struct hfi1_packet *packet)
req->comp_seg == req->cur_seg) || req->comp_seg == req->cur_seg) ||
priv->tid_r_comp == priv->tid_r_reqs) { priv->tid_r_comp == priv->tid_r_reqs) {
hfi1_kern_clear_hw_flow(priv->rcd, qp); hfi1_kern_clear_hw_flow(priv->rcd, qp);
priv->s_flags &= ~HFI1_R_TID_SW_PSN;
if (req->state == TID_REQUEST_SYNC) if (req->state == TID_REQUEST_SYNC)
req->state = TID_REQUEST_ACTIVE; req->state = TID_REQUEST_ACTIVE;
} }
...@@ -2768,9 +2772,9 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd, ...@@ -2768,9 +2772,9 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR); rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
return ret; return ret;
} }
if (priv->flow_state.flags & TID_FLOW_SW_PSN) { if (priv->s_flags & HFI1_R_TID_SW_PSN) {
diff = cmp_psn(psn, diff = cmp_psn(psn,
priv->flow_state.r_next_psn); flow->flow_state.r_next_psn);
if (diff > 0) { if (diff > 0) {
if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) if (!(qp->r_flags & RVT_R_RDMAR_SEQ))
restart_tid_rdma_read_req(rcd, restart_tid_rdma_read_req(rcd,
...@@ -2806,14 +2810,15 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd, ...@@ -2806,14 +2810,15 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
qp->r_flags &= qp->r_flags &=
~RVT_R_RDMAR_SEQ; ~RVT_R_RDMAR_SEQ;
} }
priv->flow_state.r_next_psn++; flow->flow_state.r_next_psn =
mask_psn(psn + 1);
} else { } else {
u32 last_psn; u32 last_psn;
last_psn = read_r_next_psn(dd, rcd->ctxt, last_psn = read_r_next_psn(dd, rcd->ctxt,
flow->idx); flow->idx);
priv->flow_state.r_next_psn = last_psn; flow->flow_state.r_next_psn = last_psn;
priv->flow_state.flags |= TID_FLOW_SW_PSN; priv->s_flags |= HFI1_R_TID_SW_PSN;
/* /*
* If no request has been restarted yet, * If no request has been restarted yet,
* restart the current one. * restart the current one.
...@@ -2878,6 +2883,7 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd, ...@@ -2878,6 +2883,7 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
struct rvt_ack_entry *e; struct rvt_ack_entry *e;
struct tid_rdma_request *req; struct tid_rdma_request *req;
struct tid_rdma_flow *flow; struct tid_rdma_flow *flow;
int diff = 0;
trace_hfi1_msg_handle_kdeth_eflags(NULL, "Kdeth error: rhf ", trace_hfi1_msg_handle_kdeth_eflags(NULL, "Kdeth error: rhf ",
packet->rhf); packet->rhf);
...@@ -2977,10 +2983,12 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd, ...@@ -2977,10 +2983,12 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
* mismatch could be due to packets that were * mismatch could be due to packets that were
* already in flight. * already in flight.
*/ */
if (psn != flow->flow_state.r_next_psn) { diff = cmp_psn(psn,
psn = flow->flow_state.r_next_psn; flow->flow_state.r_next_psn);
if (diff > 0)
goto nak_psn; goto nak_psn;
} else if (diff < 0)
break;
qpriv->s_nak_state = 0; qpriv->s_nak_state = 0;
/* /*
...@@ -2991,8 +2999,10 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd, ...@@ -2991,8 +2999,10 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
if (psn == full_flow_psn(flow, if (psn == full_flow_psn(flow,
flow->flow_state.lpsn)) flow->flow_state.lpsn))
ret = false; ret = false;
flow->flow_state.r_next_psn =
mask_psn(psn + 1);
qpriv->r_next_psn_kdeth = qpriv->r_next_psn_kdeth =
++flow->flow_state.r_next_psn; flow->flow_state.r_next_psn;
} }
break; break;
...@@ -3497,8 +3507,10 @@ static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx) ...@@ -3497,8 +3507,10 @@ static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx)
if (qpriv->r_tid_alloc == qpriv->r_tid_head) { if (qpriv->r_tid_alloc == qpriv->r_tid_head) {
/* If all data has been received, clear the flow */ /* If all data has been received, clear the flow */
if (qpriv->flow_state.index < RXE_NUM_TID_FLOWS && if (qpriv->flow_state.index < RXE_NUM_TID_FLOWS &&
!qpriv->alloc_w_segs) !qpriv->alloc_w_segs) {
hfi1_kern_clear_hw_flow(rcd, qp); hfi1_kern_clear_hw_flow(rcd, qp);
qpriv->s_flags &= ~HFI1_R_TID_SW_PSN;
}
break; break;
} }
...@@ -3524,8 +3536,7 @@ static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx) ...@@ -3524,8 +3536,7 @@ static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx)
if (qpriv->sync_pt && !qpriv->alloc_w_segs) { if (qpriv->sync_pt && !qpriv->alloc_w_segs) {
hfi1_kern_clear_hw_flow(rcd, qp); hfi1_kern_clear_hw_flow(rcd, qp);
qpriv->sync_pt = false; qpriv->sync_pt = false;
if (qpriv->s_flags & HFI1_R_TID_SW_PSN) qpriv->s_flags &= ~HFI1_R_TID_SW_PSN;
qpriv->s_flags &= ~HFI1_R_TID_SW_PSN;
} }
/* Allocate flow if we don't have one */ /* Allocate flow if we don't have one */
...@@ -4299,7 +4310,7 @@ void hfi1_rc_rcv_tid_rdma_write_data(struct hfi1_packet *packet) ...@@ -4299,7 +4310,7 @@ void hfi1_rc_rcv_tid_rdma_write_data(struct hfi1_packet *packet)
if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.lpsn))) { if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.lpsn))) {
if (cmp_psn(psn, flow->flow_state.r_next_psn)) if (cmp_psn(psn, flow->flow_state.r_next_psn))
goto send_nak; goto send_nak;
flow->flow_state.r_next_psn++; flow->flow_state.r_next_psn = mask_psn(psn + 1);
goto exit; goto exit;
} }
flow->flow_state.r_next_psn = mask_psn(psn + 1); flow->flow_state.r_next_psn = mask_psn(psn + 1);
......
...@@ -76,10 +76,8 @@ struct tid_rdma_qp_params { ...@@ -76,10 +76,8 @@ struct tid_rdma_qp_params {
struct tid_flow_state { struct tid_flow_state {
u32 generation; u32 generation;
u32 psn; u32 psn;
u32 r_next_psn; /* next PSN to be received (in TID space) */
u8 index; u8 index;
u8 last_index; u8 last_index;
u8 flags;
}; };
enum tid_rdma_req_state { enum tid_rdma_req_state {
......
...@@ -53,7 +53,7 @@ u16 hfi1_trace_get_tid_idx(u32 ent); ...@@ -53,7 +53,7 @@ u16 hfi1_trace_get_tid_idx(u32 ent);
"tid_r_comp %u pending_tid_r_segs %u " \ "tid_r_comp %u pending_tid_r_segs %u " \
"s_flags 0x%x ps_flags 0x%x iow_flags 0x%lx " \ "s_flags 0x%x ps_flags 0x%x iow_flags 0x%lx " \
"s_state 0x%x hw_flow_index %u generation 0x%x " \ "s_state 0x%x hw_flow_index %u generation 0x%x " \
"fpsn 0x%x flow_flags 0x%x" "fpsn 0x%x"
#define TID_REQ_PRN "[%s] qpn 0x%x newreq %u opcode 0x%x psn 0x%x lpsn 0x%x " \ #define TID_REQ_PRN "[%s] qpn 0x%x newreq %u opcode 0x%x psn 0x%x lpsn 0x%x " \
"cur_seg %u comp_seg %u ack_seg %u alloc_seg %u " \ "cur_seg %u comp_seg %u ack_seg %u alloc_seg %u " \
...@@ -71,7 +71,7 @@ u16 hfi1_trace_get_tid_idx(u32 ent); ...@@ -71,7 +71,7 @@ u16 hfi1_trace_get_tid_idx(u32 ent);
"pending_tid_w_segs %u sync_pt %s " \ "pending_tid_w_segs %u sync_pt %s " \
"ps_nak_psn 0x%x ps_nak_state 0x%x " \ "ps_nak_psn 0x%x ps_nak_state 0x%x " \
"prnr_nak_state 0x%x hw_flow_index %u generation "\ "prnr_nak_state 0x%x hw_flow_index %u generation "\
"0x%x fpsn 0x%x flow_flags 0x%x resync %s" \ "0x%x fpsn 0x%x resync %s" \
"r_next_psn_kdeth 0x%x" "r_next_psn_kdeth 0x%x"
#define TID_WRITE_SENDER_PRN "[%s] qpn 0x%x newreq %u s_tid_cur %u " \ #define TID_WRITE_SENDER_PRN "[%s] qpn 0x%x newreq %u s_tid_cur %u " \
...@@ -973,7 +973,6 @@ DECLARE_EVENT_CLASS(/* tid_read_sender */ ...@@ -973,7 +973,6 @@ DECLARE_EVENT_CLASS(/* tid_read_sender */
__field(u32, hw_flow_index) __field(u32, hw_flow_index)
__field(u32, generation) __field(u32, generation)
__field(u32, fpsn) __field(u32, fpsn)
__field(u32, flow_flags)
), ),
TP_fast_assign(/* assign */ TP_fast_assign(/* assign */
struct hfi1_qp_priv *priv = qp->priv; struct hfi1_qp_priv *priv = qp->priv;
...@@ -991,7 +990,6 @@ DECLARE_EVENT_CLASS(/* tid_read_sender */ ...@@ -991,7 +990,6 @@ DECLARE_EVENT_CLASS(/* tid_read_sender */
__entry->hw_flow_index = priv->flow_state.index; __entry->hw_flow_index = priv->flow_state.index;
__entry->generation = priv->flow_state.generation; __entry->generation = priv->flow_state.generation;
__entry->fpsn = priv->flow_state.psn; __entry->fpsn = priv->flow_state.psn;
__entry->flow_flags = priv->flow_state.flags;
), ),
TP_printk(/* print */ TP_printk(/* print */
TID_READ_SENDER_PRN, TID_READ_SENDER_PRN,
...@@ -1007,8 +1005,7 @@ DECLARE_EVENT_CLASS(/* tid_read_sender */ ...@@ -1007,8 +1005,7 @@ DECLARE_EVENT_CLASS(/* tid_read_sender */
__entry->s_state, __entry->s_state,
__entry->hw_flow_index, __entry->hw_flow_index,
__entry->generation, __entry->generation,
__entry->fpsn, __entry->fpsn
__entry->flow_flags
) )
); );
...@@ -1338,7 +1335,6 @@ DECLARE_EVENT_CLASS(/* tid_write_sp */ ...@@ -1338,7 +1335,6 @@ DECLARE_EVENT_CLASS(/* tid_write_sp */
__field(u32, hw_flow_index) __field(u32, hw_flow_index)
__field(u32, generation) __field(u32, generation)
__field(u32, fpsn) __field(u32, fpsn)
__field(u32, flow_flags)
__field(bool, resync) __field(bool, resync)
__field(u32, r_next_psn_kdeth) __field(u32, r_next_psn_kdeth)
), ),
...@@ -1360,7 +1356,6 @@ DECLARE_EVENT_CLASS(/* tid_write_sp */ ...@@ -1360,7 +1356,6 @@ DECLARE_EVENT_CLASS(/* tid_write_sp */
__entry->hw_flow_index = priv->flow_state.index; __entry->hw_flow_index = priv->flow_state.index;
__entry->generation = priv->flow_state.generation; __entry->generation = priv->flow_state.generation;
__entry->fpsn = priv->flow_state.psn; __entry->fpsn = priv->flow_state.psn;
__entry->flow_flags = priv->flow_state.flags;
__entry->resync = priv->resync; __entry->resync = priv->resync;
__entry->r_next_psn_kdeth = priv->r_next_psn_kdeth; __entry->r_next_psn_kdeth = priv->r_next_psn_kdeth;
), ),
...@@ -1381,7 +1376,6 @@ DECLARE_EVENT_CLASS(/* tid_write_sp */ ...@@ -1381,7 +1376,6 @@ DECLARE_EVENT_CLASS(/* tid_write_sp */
__entry->hw_flow_index, __entry->hw_flow_index,
__entry->generation, __entry->generation,
__entry->fpsn, __entry->fpsn,
__entry->flow_flags,
__entry->resync ? "yes" : "no", __entry->resync ? "yes" : "no",
__entry->r_next_psn_kdeth __entry->r_next_psn_kdeth
) )
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment