Commit 354dff1b authored by Mike Marciniszyn's avatar Mike Marciniszyn Committed by Roland Dreier

IB/qib: Fix UC MR refs for immediate operations

An MR reference leak exists when handling UC RDMA writes with
immediate data because we manipulate the reference counts as if the
operation had been a send.

This patch moves the last_imm label so that the RDMA write operations
with immediate data converge at the cq building code.  The copy/mr
deref code is now done correctly prior to the branch to last_imm.
Reviewed-by: default avatarEdward Mascarenhas <edward.mascarenhas@intel.com>
Signed-off-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarRoland Dreier <roland@purestorage.com>
parent 6887a413
......@@ -403,7 +403,6 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
if (unlikely(wc.byte_len > qp->r_len))
goto rewind;
wc.opcode = IB_WC_RECV;
last_imm:
qib_copy_sge(&qp->r_sge, data, tlen, 0);
while (qp->s_rdma_read_sge.num_sge) {
atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount);
......@@ -411,6 +410,7 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
qp->s_rdma_read_sge.sge =
*qp->s_rdma_read_sge.sg_list++;
}
last_imm:
wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS;
wc.qp = &qp->ibqp;
......@@ -509,6 +509,12 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
}
wc.byte_len = qp->r_len;
wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
qib_copy_sge(&qp->r_sge, data, tlen, 1);
while (qp->r_sge.num_sge) {
atomic_dec(&qp->r_sge.sge.mr->refcount);
if (--qp->r_sge.num_sge)
qp->r_sge.sge = *qp->r_sge.sg_list++;
}
goto last_imm;
case OP(RDMA_WRITE_LAST):
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment