Commit cd8aaddf authored by Bob Pearson's avatar Bob Pearson Committed by Jason Gunthorpe

RDMA/rxe: Remove save/rollback_state in rxe_requester

Now that req.task and comp.task are merged it is no longer necessary to
call save_state() before calling rxe_xmit_pkt() and rollback_state() if
rxe_xmit_pkt() fails. This was done originally to prevent races between
rxe_completer() and rxe_requester() which now cannot happen.

Link: https://lore.kernel.org/r/20240329145513.35381-8-rpearsonhpe@gmail.comSigned-off-by: default avatarBob Pearson <rpearsonhpe@gmail.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 67f57892
...@@ -573,30 +573,6 @@ static void update_wqe_psn(struct rxe_qp *qp, ...@@ -573,30 +573,6 @@ static void update_wqe_psn(struct rxe_qp *qp,
qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK; qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
} }
static void save_state(struct rxe_send_wqe *wqe,
struct rxe_qp *qp,
struct rxe_send_wqe *rollback_wqe,
u32 *rollback_psn)
{
rollback_wqe->state = wqe->state;
rollback_wqe->first_psn = wqe->first_psn;
rollback_wqe->last_psn = wqe->last_psn;
rollback_wqe->dma = wqe->dma;
*rollback_psn = qp->req.psn;
}
static void rollback_state(struct rxe_send_wqe *wqe,
struct rxe_qp *qp,
struct rxe_send_wqe *rollback_wqe,
u32 rollback_psn)
{
wqe->state = rollback_wqe->state;
wqe->first_psn = rollback_wqe->first_psn;
wqe->last_psn = rollback_wqe->last_psn;
wqe->dma = rollback_wqe->dma;
qp->req.psn = rollback_psn;
}
static void update_state(struct rxe_qp *qp, struct rxe_pkt_info *pkt) static void update_state(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
{ {
qp->req.opcode = pkt->opcode; qp->req.opcode = pkt->opcode;
...@@ -676,8 +652,6 @@ int rxe_requester(struct rxe_qp *qp) ...@@ -676,8 +652,6 @@ int rxe_requester(struct rxe_qp *qp)
int opcode; int opcode;
int err; int err;
int ret; int ret;
struct rxe_send_wqe rollback_wqe;
u32 rollback_psn;
struct rxe_queue *q = qp->sq.queue; struct rxe_queue *q = qp->sq.queue;
struct rxe_ah *ah; struct rxe_ah *ah;
struct rxe_av *av; struct rxe_av *av;
...@@ -799,9 +773,6 @@ int rxe_requester(struct rxe_qp *qp) ...@@ -799,9 +773,6 @@ int rxe_requester(struct rxe_qp *qp)
pkt.mask = rxe_opcode[opcode].mask; pkt.mask = rxe_opcode[opcode].mask;
pkt.wqe = wqe; pkt.wqe = wqe;
/* save wqe state before we build and send packet */
save_state(wqe, qp, &rollback_wqe, &rollback_psn);
av = rxe_get_av(&pkt, &ah); av = rxe_get_av(&pkt, &ah);
if (unlikely(!av)) { if (unlikely(!av)) {
rxe_dbg_qp(qp, "Failed no address vector\n"); rxe_dbg_qp(qp, "Failed no address vector\n");
...@@ -834,10 +805,6 @@ int rxe_requester(struct rxe_qp *qp) ...@@ -834,10 +805,6 @@ int rxe_requester(struct rxe_qp *qp)
if (ah) if (ah)
rxe_put(ah); rxe_put(ah);
/* update wqe state as though we had sent it */
update_wqe_state(qp, wqe, &pkt);
update_wqe_psn(qp, wqe, &pkt, payload);
err = rxe_xmit_packet(qp, &pkt, skb); err = rxe_xmit_packet(qp, &pkt, skb);
if (err) { if (err) {
if (err != -EAGAIN) { if (err != -EAGAIN) {
...@@ -845,11 +812,6 @@ int rxe_requester(struct rxe_qp *qp) ...@@ -845,11 +812,6 @@ int rxe_requester(struct rxe_qp *qp)
goto err; goto err;
} }
/* the packet was dropped so reset wqe to the state
* before we sent it so we can try to resend
*/
rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
/* force a delay until the dropped packet is freed and /* force a delay until the dropped packet is freed and
* the send queue is drained below the low water mark * the send queue is drained below the low water mark
*/ */
...@@ -859,6 +821,8 @@ int rxe_requester(struct rxe_qp *qp) ...@@ -859,6 +821,8 @@ int rxe_requester(struct rxe_qp *qp)
goto exit; goto exit;
} }
update_wqe_state(qp, wqe, &pkt);
update_wqe_psn(qp, wqe, &pkt, payload);
update_state(qp, &pkt); update_state(qp, &pkt);
/* A non-zero return value will cause rxe_do_task to /* A non-zero return value will cause rxe_do_task to
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment