Commit ee2f412e authored by Chuck Lever's avatar Chuck Lever Committed by Anna Schumaker

xprtrdma: Recycle MRs after disconnect

The optimization done in "xprtrdma: Simplify rpcrdma_mr_pop" was a
bit too optimistic. MRs left over after a reconnect still need to
be recycled, not added back to the free list, since they could be
in flight or actually fully registered.
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Signed-off-by: default avatarAnna Schumaker <Anna.Schumaker@Netapp.com>
parent f836b27e
...@@ -88,15 +88,8 @@ void frwr_release_mr(struct rpcrdma_mr *mr) ...@@ -88,15 +88,8 @@ void frwr_release_mr(struct rpcrdma_mr *mr)
kfree(mr); kfree(mr);
} }
/* MRs are dynamically allocated, so simply clean up and release the MR. static void frwr_mr_recycle(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr)
* A replacement MR will subsequently be allocated on demand.
*/
static void
frwr_mr_recycle_worker(struct work_struct *work)
{ {
struct rpcrdma_mr *mr = container_of(work, struct rpcrdma_mr, mr_recycle);
struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
trace_xprtrdma_mr_recycle(mr); trace_xprtrdma_mr_recycle(mr);
if (mr->mr_dir != DMA_NONE) { if (mr->mr_dir != DMA_NONE) {
...@@ -114,6 +107,32 @@ frwr_mr_recycle_worker(struct work_struct *work) ...@@ -114,6 +107,32 @@ frwr_mr_recycle_worker(struct work_struct *work)
frwr_release_mr(mr); frwr_release_mr(mr);
} }
/* MRs are dynamically allocated, so simply clean up and release the MR.
* A replacement MR will subsequently be allocated on demand.
*/
static void
frwr_mr_recycle_worker(struct work_struct *work)
{
struct rpcrdma_mr *mr = container_of(work, struct rpcrdma_mr,
mr_recycle);
frwr_mr_recycle(mr->mr_xprt, mr);
}
/* frwr_recycle - Discard MRs
* @req: request to reset
*
* Used after a reconnect. These MRs could be in flight, we can't
* tell. Safe thing to do is release them.
*/
void frwr_recycle(struct rpcrdma_req *req)
{
struct rpcrdma_mr *mr;
while ((mr = rpcrdma_mr_pop(&req->rl_registered)))
frwr_mr_recycle(mr->mr_xprt, mr);
}
/* frwr_reset - Place MRs back on the free list /* frwr_reset - Place MRs back on the free list
* @req: request to reset * @req: request to reset
* *
......
...@@ -867,7 +867,7 @@ rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst) ...@@ -867,7 +867,7 @@ rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
* chunks. Very likely the connection has been replaced, * chunks. Very likely the connection has been replaced,
* so these registrations are invalid and unusable. * so these registrations are invalid and unusable.
*/ */
frwr_reset(req); frwr_recycle(req);
/* This implementation supports the following combinations /* This implementation supports the following combinations
* of chunk lists in one RPC-over-RDMA Call message: * of chunk lists in one RPC-over-RDMA Call message:
......
...@@ -542,6 +542,7 @@ rpcrdma_data_dir(bool writing) ...@@ -542,6 +542,7 @@ rpcrdma_data_dir(bool writing)
/* Memory registration calls xprtrdma/frwr_ops.c /* Memory registration calls xprtrdma/frwr_ops.c
*/ */
bool frwr_is_supported(struct ib_device *device); bool frwr_is_supported(struct ib_device *device);
void frwr_recycle(struct rpcrdma_req *req);
void frwr_reset(struct rpcrdma_req *req); void frwr_reset(struct rpcrdma_req *req);
int frwr_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep); int frwr_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep);
int frwr_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr); int frwr_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment