Commit e3eded5e authored by Chuck Lever's avatar Chuck Lever

svcrdma: Clean up dto_q critical section in svc_rdma_recvfrom()

This, to me, seems less cluttered and less redundant. I was hoping
it could help reduce lock contention on the dto_q lock by reducing
the size of the critical section, but alas, the only improvement is
readability.
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
parent 5533c4f4
...@@ -794,22 +794,22 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp) ...@@ -794,22 +794,22 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
rqstp->rq_xprt_ctxt = NULL; rqstp->rq_xprt_ctxt = NULL;
ctxt = NULL;
spin_lock(&rdma_xprt->sc_rq_dto_lock); spin_lock(&rdma_xprt->sc_rq_dto_lock);
ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_rq_dto_q); ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_rq_dto_q);
if (!ctxt) { if (ctxt)
list_del(&ctxt->rc_list);
else
/* No new incoming requests, terminate the loop */ /* No new incoming requests, terminate the loop */
clear_bit(XPT_DATA, &xprt->xpt_flags); clear_bit(XPT_DATA, &xprt->xpt_flags);
spin_unlock(&rdma_xprt->sc_rq_dto_lock); spin_unlock(&rdma_xprt->sc_rq_dto_lock);
svc_xprt_received(xprt);
return 0;
}
list_del(&ctxt->rc_list);
spin_unlock(&rdma_xprt->sc_rq_dto_lock);
percpu_counter_inc(&svcrdma_stat_recv);
/* Unblock the transport for the next receive */ /* Unblock the transport for the next receive */
svc_xprt_received(xprt); svc_xprt_received(xprt);
if (!ctxt)
return 0;
percpu_counter_inc(&svcrdma_stat_recv);
ib_dma_sync_single_for_cpu(rdma_xprt->sc_pd->device, ib_dma_sync_single_for_cpu(rdma_xprt->sc_pd->device,
ctxt->rc_recv_sge.addr, ctxt->rc_byte_len, ctxt->rc_recv_sge.addr, ctxt->rc_byte_len,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment