Commit eb342e9a authored by Chuck Lever's avatar Chuck Lever Committed by Anna Schumaker

xprtrdma: Eliminate INLINE_THRESHOLD macros

Clean up: r_xprt is already available everywhere these macros are
invoked, so just dereference that directly.

RPCRDMA_INLINE_PAD_VALUE is no longer used, so it can simply be
removed.
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Signed-off-by: default avatarAnna Schumaker <Anna.Schumaker@Netapp.com>
parent 04fa2c6b
...@@ -46,13 +46,13 @@ static int rpcrdma_bc_setup_rqst(struct rpcrdma_xprt *r_xprt, ...@@ -46,13 +46,13 @@ static int rpcrdma_bc_setup_rqst(struct rpcrdma_xprt *r_xprt,
return PTR_ERR(req); return PTR_ERR(req);
req->rl_backchannel = true; req->rl_backchannel = true;
size = RPCRDMA_INLINE_WRITE_THRESHOLD(rqst); size = r_xprt->rx_data.inline_wsize;
rb = rpcrdma_alloc_regbuf(ia, size, GFP_KERNEL); rb = rpcrdma_alloc_regbuf(ia, size, GFP_KERNEL);
if (IS_ERR(rb)) if (IS_ERR(rb))
goto out_fail; goto out_fail;
req->rl_rdmabuf = rb; req->rl_rdmabuf = rb;
size += RPCRDMA_INLINE_READ_THRESHOLD(rqst); size += r_xprt->rx_data.inline_rsize;
rb = rpcrdma_alloc_regbuf(ia, size, GFP_KERNEL); rb = rpcrdma_alloc_regbuf(ia, size, GFP_KERNEL);
if (IS_ERR(rb)) if (IS_ERR(rb))
goto out_fail; goto out_fail;
......
...@@ -673,7 +673,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst) ...@@ -673,7 +673,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
goto out_unmap; goto out_unmap;
hdrlen = (unsigned char *)iptr - (unsigned char *)headerp; hdrlen = (unsigned char *)iptr - (unsigned char *)headerp;
if (hdrlen + rpclen > RPCRDMA_INLINE_WRITE_THRESHOLD(rqst)) if (hdrlen + rpclen > r_xprt->rx_data.inline_wsize)
goto out_overflow; goto out_overflow;
dprintk("RPC: %5u %s: %s/%s: hdrlen %zd rpclen %zd\n", dprintk("RPC: %5u %s: %s/%s: hdrlen %zd rpclen %zd\n",
......
...@@ -518,7 +518,7 @@ xprt_rdma_allocate(struct rpc_task *task, size_t size) ...@@ -518,7 +518,7 @@ xprt_rdma_allocate(struct rpc_task *task, size_t size)
return req->rl_sendbuf->rg_base; return req->rl_sendbuf->rg_base;
out_rdmabuf: out_rdmabuf:
min_size = RPCRDMA_INLINE_WRITE_THRESHOLD(task->tk_rqstp); min_size = r_xprt->rx_data.inline_wsize;
rb = rpcrdma_alloc_regbuf(&r_xprt->rx_ia, min_size, flags); rb = rpcrdma_alloc_regbuf(&r_xprt->rx_ia, min_size, flags);
if (IS_ERR(rb)) if (IS_ERR(rb))
goto out_fail; goto out_fail;
...@@ -541,8 +541,8 @@ xprt_rdma_allocate(struct rpc_task *task, size_t size) ...@@ -541,8 +541,8 @@ xprt_rdma_allocate(struct rpc_task *task, size_t size)
* reply will be large, but slush is provided here to allow * reply will be large, but slush is provided here to allow
* flexibility when marshaling. * flexibility when marshaling.
*/ */
min_size = RPCRDMA_INLINE_READ_THRESHOLD(task->tk_rqstp); min_size = r_xprt->rx_data.inline_rsize;
min_size += RPCRDMA_INLINE_WRITE_THRESHOLD(task->tk_rqstp); min_size += r_xprt->rx_data.inline_wsize;
if (size < min_size) if (size < min_size)
size = min_size; size = min_size;
......
...@@ -356,15 +356,6 @@ struct rpcrdma_create_data_internal { ...@@ -356,15 +356,6 @@ struct rpcrdma_create_data_internal {
unsigned int padding; /* non-rdma write header padding */ unsigned int padding; /* non-rdma write header padding */
}; };
#define RPCRDMA_INLINE_READ_THRESHOLD(rq) \
(rpcx_to_rdmad(rq->rq_xprt).inline_rsize)
#define RPCRDMA_INLINE_WRITE_THRESHOLD(rq)\
(rpcx_to_rdmad(rq->rq_xprt).inline_wsize)
#define RPCRDMA_INLINE_PAD_VALUE(rq)\
rpcx_to_rdmad(rq->rq_xprt).padding
/* /*
* Statistics for RPCRDMA * Statistics for RPCRDMA
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment