Commit 124fa17d authored by Chuck Lever's avatar Chuck Lever Committed by Anna Schumaker

xprtrdma: Pre-allocate Work Requests for backchannel

Pre-allocate extra send and receive Work Requests needed to handle
backchannel receives and sends.

The transport doesn't know how many extra WRs to pre-allocate until
the xprt_setup_backchannel() call, but that's long after the WRs are
allocated during forechannel setup.

So, use a fixed value for now.
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Reviewed-by: default avatarSagi Grimberg <sagig@mellanox.com>
Tested-By: default avatarDevesh Sharma <devesh.sharma@avagotech.com>
Signed-off-by: default avatarAnna Schumaker <Anna.Schumaker@Netapp.com>
parent f531a5db
...@@ -125,6 +125,9 @@ int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs) ...@@ -125,6 +125,9 @@ int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
* Twice as many rpc_rqsts are prepared to ensure there is * Twice as many rpc_rqsts are prepared to ensure there is
* always an rpc_rqst available as soon as a reply is sent. * always an rpc_rqst available as soon as a reply is sent.
*/ */
if (reqs > RPCRDMA_BACKWARD_WRS >> 1)
goto out_err;
for (i = 0; i < (reqs << 1); i++) { for (i = 0; i < (reqs << 1); i++) {
rqst = kzalloc(sizeof(*rqst), GFP_KERNEL); rqst = kzalloc(sizeof(*rqst), GFP_KERNEL);
if (!rqst) { if (!rqst) {
...@@ -161,6 +164,7 @@ int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs) ...@@ -161,6 +164,7 @@ int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
out_free: out_free:
xprt_rdma_bc_destroy(xprt, reqs); xprt_rdma_bc_destroy(xprt, reqs);
out_err:
pr_err("RPC: %s: setup backchannel transport failed\n", __func__); pr_err("RPC: %s: setup backchannel transport failed\n", __func__);
return -ENOMEM; return -ENOMEM;
} }
......
...@@ -568,6 +568,7 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, ...@@ -568,6 +568,7 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
struct ib_device_attr *devattr = &ia->ri_devattr; struct ib_device_attr *devattr = &ia->ri_devattr;
struct ib_cq *sendcq, *recvcq; struct ib_cq *sendcq, *recvcq;
struct ib_cq_init_attr cq_attr = {}; struct ib_cq_init_attr cq_attr = {};
unsigned int max_qp_wr;
int rc, err; int rc, err;
if (devattr->max_sge < RPCRDMA_MAX_IOVS) { if (devattr->max_sge < RPCRDMA_MAX_IOVS) {
...@@ -576,18 +577,27 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, ...@@ -576,18 +577,27 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
return -ENOMEM; return -ENOMEM;
} }
if (devattr->max_qp_wr <= RPCRDMA_BACKWARD_WRS) {
dprintk("RPC: %s: insufficient wqe's available\n",
__func__);
return -ENOMEM;
}
max_qp_wr = devattr->max_qp_wr - RPCRDMA_BACKWARD_WRS;
/* check provider's send/recv wr limits */ /* check provider's send/recv wr limits */
if (cdata->max_requests > devattr->max_qp_wr) if (cdata->max_requests > max_qp_wr)
cdata->max_requests = devattr->max_qp_wr; cdata->max_requests = max_qp_wr;
ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall; ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall;
ep->rep_attr.qp_context = ep; ep->rep_attr.qp_context = ep;
ep->rep_attr.srq = NULL; ep->rep_attr.srq = NULL;
ep->rep_attr.cap.max_send_wr = cdata->max_requests; ep->rep_attr.cap.max_send_wr = cdata->max_requests;
ep->rep_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS;
rc = ia->ri_ops->ro_open(ia, ep, cdata); rc = ia->ri_ops->ro_open(ia, ep, cdata);
if (rc) if (rc)
return rc; return rc;
ep->rep_attr.cap.max_recv_wr = cdata->max_requests; ep->rep_attr.cap.max_recv_wr = cdata->max_requests;
ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
ep->rep_attr.cap.max_send_sge = RPCRDMA_MAX_IOVS; ep->rep_attr.cap.max_send_sge = RPCRDMA_MAX_IOVS;
ep->rep_attr.cap.max_recv_sge = 1; ep->rep_attr.cap.max_recv_sge = 1;
ep->rep_attr.cap.max_inline_data = 0; ep->rep_attr.cap.max_inline_data = 0;
......
...@@ -101,6 +101,16 @@ struct rpcrdma_ep { ...@@ -101,6 +101,16 @@ struct rpcrdma_ep {
*/ */
#define RPCRDMA_IGNORE_COMPLETION (0ULL) #define RPCRDMA_IGNORE_COMPLETION (0ULL)
/* Pre-allocate extra Work Requests for handling backward receives
* and sends. This is a fixed value because the Work Queues are
* allocated when the forward channel is set up.
*/
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
#define RPCRDMA_BACKWARD_WRS (8)
#else
#define RPCRDMA_BACKWARD_WRS (0)
#endif
/* Registered buffer -- registered kmalloc'd memory for RDMA SEND/RECV /* Registered buffer -- registered kmalloc'd memory for RDMA SEND/RECV
* *
* The below structure appears at the front of a large region of kmalloc'd * The below structure appears at the front of a large region of kmalloc'd
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment