Commit 3d807a3e authored by Bob Pearson's avatar Bob Pearson Committed by Jason Gunthorpe

RDMA/rxe: Don't call rxe_requester from rxe_completer

Instead of rescheduling rxe_requester from rxe_completer() just extend the
duration of rxe_sender() by one pass. Setting run_requester_again forces
rxe_completer() to return 0 which will cause rxe_sender() to be called at
least one more time.

Link: https://lore.kernel.org/r/20240329145513.35381-10-rpearsonhpe@gmail.comSigned-off-by: default avatarBob Pearson <rpearsonhpe@gmail.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 4891f4fe
...@@ -325,7 +325,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp, ...@@ -325,7 +325,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
qp->comp.psn = pkt->psn; qp->comp.psn = pkt->psn;
if (qp->req.wait_psn) { if (qp->req.wait_psn) {
qp->req.wait_psn = 0; qp->req.wait_psn = 0;
rxe_sched_task(&qp->send_task); qp->req.again = 1;
} }
} }
return COMPST_ERROR_RETRY; return COMPST_ERROR_RETRY;
...@@ -476,7 +476,7 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe) ...@@ -476,7 +476,7 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
*/ */
if (qp->req.wait_fence) { if (qp->req.wait_fence) {
qp->req.wait_fence = 0; qp->req.wait_fence = 0;
rxe_sched_task(&qp->send_task); qp->req.again = 1;
} }
} }
...@@ -515,7 +515,7 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp, ...@@ -515,7 +515,7 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
if (qp->req.need_rd_atomic) { if (qp->req.need_rd_atomic) {
qp->comp.timeout_retry = 0; qp->comp.timeout_retry = 0;
qp->req.need_rd_atomic = 0; qp->req.need_rd_atomic = 0;
rxe_sched_task(&qp->send_task); qp->req.again = 1;
} }
} }
...@@ -541,7 +541,7 @@ static inline enum comp_state complete_wqe(struct rxe_qp *qp, ...@@ -541,7 +541,7 @@ static inline enum comp_state complete_wqe(struct rxe_qp *qp,
if (qp->req.wait_psn) { if (qp->req.wait_psn) {
qp->req.wait_psn = 0; qp->req.wait_psn = 0;
rxe_sched_task(&qp->send_task); qp->req.again = 1;
} }
} }
...@@ -654,6 +654,8 @@ int rxe_completer(struct rxe_qp *qp) ...@@ -654,6 +654,8 @@ int rxe_completer(struct rxe_qp *qp)
int ret; int ret;
unsigned long flags; unsigned long flags;
qp->req.again = 0;
spin_lock_irqsave(&qp->state_lock, flags); spin_lock_irqsave(&qp->state_lock, flags);
if (!qp->valid || qp_state(qp) == IB_QPS_ERR || if (!qp->valid || qp_state(qp) == IB_QPS_ERR ||
qp_state(qp) == IB_QPS_RESET) { qp_state(qp) == IB_QPS_RESET) {
...@@ -737,7 +739,7 @@ int rxe_completer(struct rxe_qp *qp) ...@@ -737,7 +739,7 @@ int rxe_completer(struct rxe_qp *qp)
if (qp->req.wait_psn) { if (qp->req.wait_psn) {
qp->req.wait_psn = 0; qp->req.wait_psn = 0;
rxe_sched_task(&qp->send_task); qp->req.again = 1;
} }
state = COMPST_DONE; state = COMPST_DONE;
...@@ -792,7 +794,7 @@ int rxe_completer(struct rxe_qp *qp) ...@@ -792,7 +794,7 @@ int rxe_completer(struct rxe_qp *qp)
RXE_CNT_COMP_RETRY); RXE_CNT_COMP_RETRY);
qp->req.need_retry = 1; qp->req.need_retry = 1;
qp->comp.started_retry = 1; qp->comp.started_retry = 1;
rxe_sched_task(&qp->send_task); qp->req.again = 1;
} }
goto done; goto done;
...@@ -843,8 +845,9 @@ int rxe_completer(struct rxe_qp *qp) ...@@ -843,8 +845,9 @@ int rxe_completer(struct rxe_qp *qp)
ret = 0; ret = 0;
goto out; goto out;
exit: exit:
ret = -EAGAIN; ret = (qp->req.again) ? 0 : -EAGAIN;
out: out:
qp->req.again = 0;
if (pkt) if (pkt)
free_pkt(pkt); free_pkt(pkt);
return ret; return ret;
......
...@@ -113,6 +113,7 @@ struct rxe_req_info { ...@@ -113,6 +113,7 @@ struct rxe_req_info {
int need_retry; int need_retry;
int wait_for_rnr_timer; int wait_for_rnr_timer;
int noack_pkts; int noack_pkts;
int again;
}; };
struct rxe_comp_info { struct rxe_comp_info {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment