Commit 2fd36865 authored by Mike Marciniszyn's avatar Mike Marciniszyn Committed by Greg Kroah-Hartman

staging/rdma/hfi1: add common routine for queuing acks

This patch is a prelimary patch required to
coalesce acks.

The routine to "schedule" a QP for sending a NAK is
now centralized in rc_defer_ack().  The flag is changed
for clarity since the all acks will potentially use
the deferral  mechanism.
Reviewed-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 46b010d3
...@@ -714,8 +714,8 @@ static inline void process_rcv_qp_work(struct hfi1_packet *packet) ...@@ -714,8 +714,8 @@ static inline void process_rcv_qp_work(struct hfi1_packet *packet)
*/ */
list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) { list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) {
list_del_init(&qp->rspwait); list_del_init(&qp->rspwait);
if (qp->r_flags & HFI1_R_RSP_NAK) { if (qp->r_flags & HFI1_R_RSP_DEFERED_ACK) {
qp->r_flags &= ~HFI1_R_RSP_NAK; qp->r_flags &= ~HFI1_R_RSP_DEFERED_ACK;
hfi1_send_rc_ack(rcd, qp, 0); hfi1_send_rc_ack(rcd, qp, 0);
} }
if (qp->r_flags & HFI1_R_RSP_SEND) { if (qp->r_flags & HFI1_R_RSP_SEND) {
......
...@@ -1608,6 +1608,16 @@ static void rc_rcv_resp(struct hfi1_ibport *ibp, ...@@ -1608,6 +1608,16 @@ static void rc_rcv_resp(struct hfi1_ibport *ibp,
return; return;
} }
static inline void rc_defered_ack(struct hfi1_ctxtdata *rcd,
struct hfi1_qp *qp)
{
if (list_empty(&qp->rspwait)) {
qp->r_flags |= HFI1_R_RSP_DEFERED_ACK;
atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
}
}
/** /**
* rc_rcv_error - process an incoming duplicate or error RC packet * rc_rcv_error - process an incoming duplicate or error RC packet
* @ohdr: the other headers for this packet * @ohdr: the other headers for this packet
...@@ -1650,11 +1660,7 @@ static noinline int rc_rcv_error(struct hfi1_other_headers *ohdr, void *data, ...@@ -1650,11 +1660,7 @@ static noinline int rc_rcv_error(struct hfi1_other_headers *ohdr, void *data,
* in the receive queue have been processed. * in the receive queue have been processed.
* Otherwise, we end up propagating congestion. * Otherwise, we end up propagating congestion.
*/ */
if (list_empty(&qp->rspwait)) { rc_defered_ack(rcd, qp);
qp->r_flags |= HFI1_R_RSP_NAK;
atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
}
} }
goto done; goto done;
} }
...@@ -2337,11 +2343,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet) ...@@ -2337,11 +2343,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer; qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
qp->r_ack_psn = qp->r_psn; qp->r_ack_psn = qp->r_psn;
/* Queue RNR NAK for later */ /* Queue RNR NAK for later */
if (list_empty(&qp->rspwait)) { rc_defered_ack(rcd, qp);
qp->r_flags |= HFI1_R_RSP_NAK;
atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
}
return; return;
nack_op_err: nack_op_err:
...@@ -2349,11 +2351,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet) ...@@ -2349,11 +2351,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR; qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
qp->r_ack_psn = qp->r_psn; qp->r_ack_psn = qp->r_psn;
/* Queue NAK for later */ /* Queue NAK for later */
if (list_empty(&qp->rspwait)) { rc_defered_ack(rcd, qp);
qp->r_flags |= HFI1_R_RSP_NAK;
atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
}
return; return;
nack_inv_unlck: nack_inv_unlck:
...@@ -2363,11 +2361,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet) ...@@ -2363,11 +2361,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
qp->r_nak_state = IB_NAK_INVALID_REQUEST; qp->r_nak_state = IB_NAK_INVALID_REQUEST;
qp->r_ack_psn = qp->r_psn; qp->r_ack_psn = qp->r_psn;
/* Queue NAK for later */ /* Queue NAK for later */
if (list_empty(&qp->rspwait)) { rc_defered_ack(rcd, qp);
qp->r_flags |= HFI1_R_RSP_NAK;
atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
}
return; return;
nack_acc_unlck: nack_acc_unlck:
...@@ -2421,13 +2415,7 @@ void hfi1_rc_hdrerr( ...@@ -2421,13 +2415,7 @@ void hfi1_rc_hdrerr(
* Otherwise, we end up * Otherwise, we end up
* propagating congestion. * propagating congestion.
*/ */
if (list_empty(&qp->rspwait)) { rc_defered_ack(rcd, qp);
qp->r_flags |= HFI1_R_RSP_NAK;
atomic_inc(&qp->refcount);
list_add_tail(
&qp->rspwait,
&rcd->qp_wait_list);
}
} /* Out of sequence NAK */ } /* Out of sequence NAK */
} /* QP Request NAKs */ } /* QP Request NAKs */
} }
...@@ -555,7 +555,9 @@ struct hfi1_qp { ...@@ -555,7 +555,9 @@ struct hfi1_qp {
*/ */
#define HFI1_R_REUSE_SGE 0x01 #define HFI1_R_REUSE_SGE 0x01
#define HFI1_R_RDMAR_SEQ 0x02 #define HFI1_R_RDMAR_SEQ 0x02
#define HFI1_R_RSP_NAK 0x04 /* defer ack until end of interrupt session */
#define HFI1_R_RSP_DEFERED_ACK 0x04
/* relay ack to send engine */
#define HFI1_R_RSP_SEND 0x08 #define HFI1_R_RSP_SEND 0x08
#define HFI1_R_COMM_EST 0x10 #define HFI1_R_COMM_EST 0x10
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment