Commit 829eaee5 authored by Kaike Wan's avatar Kaike Wan Committed by Doug Ledford

IB/hfi1: Add TID RDMA retry timer

This patch adds the TID RDMA retry timer to make sure that TID RDMA
WRITE DATA packets for a segment are received successfully by the
responder. This timer is generally armed when the last TID RDMA
WRITE DATA packet for a segment is sent out and stopped when all
TID RDMA DATA packets are acknowledged.
Reviewed-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarMitko Haralanov <mitko.haralanov@intel.com>
Signed-off-by: default avatarKaike Wan <kaike.wan@intel.com>
Signed-off-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 9e93e967
......@@ -252,6 +252,8 @@ void opfn_qp_init(struct rvt_qp *qp, struct ib_qp_attr *attr, int attr_mask)
if (ibqp->qp_type == IB_QPT_RC && HFI1_CAP_IS_KSET(TID_RDMA)) {
struct tid_rdma_params *local = &priv->tid_rdma.local;
if (attr_mask & IB_QP_TIMEOUT)
priv->tid_retry_timeout_jiffies = qp->timeout_jiffies;
if (qp->pmtu == enum_to_mtu(OPA_MTU_4096) ||
qp->pmtu == enum_to_mtu(OPA_MTU_8192)) {
tid_rdma_opfn_init(qp, local);
......
......@@ -121,6 +121,9 @@ static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx);
static void hfi1_tid_timeout(struct timer_list *t);
static void hfi1_add_tid_reap_timer(struct rvt_qp *qp);
static void hfi1_mod_tid_reap_timer(struct rvt_qp *qp);
static void hfi1_mod_tid_retry_timer(struct rvt_qp *qp);
static int hfi1_stop_tid_retry_timer(struct rvt_qp *qp);
static void hfi1_tid_retry_timeout(struct timer_list *t);
static u64 tid_rdma_opfn_encode(struct tid_rdma_params *p)
{
......@@ -330,6 +333,7 @@ int hfi1_qp_priv_init(struct rvt_dev_info *rdi, struct rvt_qp *qp,
qpriv->r_tid_alloc = HFI1_QP_WQE_INVALID;
atomic_set(&qpriv->n_tid_requests, 0);
timer_setup(&qpriv->s_tid_timer, hfi1_tid_timeout, 0);
timer_setup(&qpriv->s_tid_retry_timer, hfi1_tid_retry_timeout, 0);
INIT_LIST_HEAD(&qpriv->tid_wait);
if (init_attr->qp_type == IB_QPT_RC && HFI1_CAP_IS_KSET(TID_RDMA)) {
......@@ -4396,11 +4400,19 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
if (qpriv->s_flags & RVT_S_WAIT_ACK)
qpriv->s_flags &= ~RVT_S_WAIT_ACK;
if (!hfi1_tid_rdma_is_resync_psn(psn)) {
/* Check if there is any pending TID ACK */
if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE &&
req->ack_seg < req->cur_seg)
hfi1_mod_tid_retry_timer(qp);
else
hfi1_stop_tid_retry_timer(qp);
hfi1_schedule_send(qp);
} else {
u32 spsn, fpsn, last_acked, generation;
struct tid_rdma_request *rptr;
/* ACK(RESYNC) */
hfi1_stop_tid_retry_timer(qp);
/* Allow new requests (see hfi1_make_tid_rdma_pkt) */
qp->s_flags &= ~HFI1_S_WAIT_HALT;
/*
......@@ -4506,6 +4518,7 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
break;
case 3: /* NAK */
hfi1_stop_tid_retry_timer(qp);
switch ((aeth >> IB_AETH_CREDIT_SHIFT) &
IB_AETH_CREDIT_MASK) {
case 0: /* PSN sequence error */
......@@ -4530,3 +4543,83 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
ack_op_err:
spin_unlock_irqrestore(&qp->s_lock, flags);
}
void hfi1_add_tid_retry_timer(struct rvt_qp *qp)
{
struct hfi1_qp_priv *priv = qp->priv;
struct ib_qp *ibqp = &qp->ibqp;
struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
lockdep_assert_held(&qp->s_lock);
if (!(priv->s_flags & HFI1_S_TID_RETRY_TIMER)) {
priv->s_flags |= HFI1_S_TID_RETRY_TIMER;
priv->s_tid_retry_timer.expires = jiffies +
priv->tid_retry_timeout_jiffies + rdi->busy_jiffies;
add_timer(&priv->s_tid_retry_timer);
}
}
static void hfi1_mod_tid_retry_timer(struct rvt_qp *qp)
{
struct hfi1_qp_priv *priv = qp->priv;
struct ib_qp *ibqp = &qp->ibqp;
struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
lockdep_assert_held(&qp->s_lock);
priv->s_flags |= HFI1_S_TID_RETRY_TIMER;
mod_timer(&priv->s_tid_retry_timer, jiffies +
priv->tid_retry_timeout_jiffies + rdi->busy_jiffies);
}
static int hfi1_stop_tid_retry_timer(struct rvt_qp *qp)
{
struct hfi1_qp_priv *priv = qp->priv;
int rval = 0;
lockdep_assert_held(&qp->s_lock);
if (priv->s_flags & HFI1_S_TID_RETRY_TIMER) {
rval = del_timer(&priv->s_tid_retry_timer);
priv->s_flags &= ~HFI1_S_TID_RETRY_TIMER;
}
return rval;
}
void hfi1_del_tid_retry_timer(struct rvt_qp *qp)
{
struct hfi1_qp_priv *priv = qp->priv;
del_timer_sync(&priv->s_tid_retry_timer);
priv->s_flags &= ~HFI1_S_TID_RETRY_TIMER;
}
static void hfi1_tid_retry_timeout(struct timer_list *t)
{
struct hfi1_qp_priv *priv = from_timer(priv, t, s_tid_retry_timer);
struct rvt_qp *qp = priv->owner;
struct rvt_swqe *wqe;
unsigned long flags;
spin_lock_irqsave(&qp->r_lock, flags);
spin_lock(&qp->s_lock);
if (priv->s_flags & HFI1_S_TID_RETRY_TIMER) {
hfi1_stop_tid_retry_timer(qp);
if (!priv->s_retry) {
wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
hfi1_trdma_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
} else {
priv->s_flags &= ~RVT_S_WAIT_ACK;
/* Only send one packet (the RESYNC) */
priv->s_flags |= RVT_S_SEND_ONE;
/*
* No additional request shall be made by this QP until
* the RESYNC has been complete.
*/
qp->s_flags |= HFI1_S_WAIT_HALT;
priv->s_state = TID_OP(RESYNC);
priv->s_retry--;
}
}
spin_unlock(&qp->s_lock);
spin_unlock_irqrestore(&qp->r_lock, flags);
}
......@@ -29,6 +29,7 @@
#define HFI1_R_TID_RSC_TIMER BIT(2)
/* BIT(4) reserved for RVT_S_ACK_PENDING. */
#define HFI1_S_TID_WAIT_INTERLCK BIT(5)
#define HFI1_S_TID_RETRY_TIMER BIT(17)
#define HFI1_R_TID_SW_PSN BIT(19)
/*
......@@ -288,4 +289,7 @@ u32 hfi1_build_tid_rdma_write_ack(struct rvt_qp *qp, struct rvt_ack_entry *e,
void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet);
void hfi1_add_tid_retry_timer(struct rvt_qp *qp);
void hfi1_del_tid_retry_timer(struct rvt_qp *qp);
#endif /* HFI1_TID_RDMA_H */
......@@ -164,6 +164,7 @@ struct hfi1_qp_priv {
u8 s_sc; /* SC[0..4] for next packet */
struct iowait s_iowait;
struct timer_list s_tid_timer; /* for timing tid wait */
struct timer_list s_tid_retry_timer; /* for timing tid ack */
struct list_head tid_wait; /* for queueing tid space */
struct hfi1_opfn_data opfn;
struct tid_flow_state flow_state;
......@@ -172,6 +173,7 @@ struct hfi1_qp_priv {
u8 hdr_type; /* 9B or 16B */
atomic_t n_tid_requests; /* # of sent TID RDMA requests */
unsigned long tid_timer_timeout_jiffies;
unsigned long tid_retry_timeout_jiffies;
/* variables for the TID RDMA SE state machine */
u8 s_state;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment