Commit 50148312 authored by Chuck Lever's avatar Chuck Lever Committed by Anna Schumaker

svcrdma: Clean up RPCRDMA_DEF_GFP

xprt_rdma_bc_allocate() is now the only user of RPCRDMA_DEF_GFP.
Replace that macro with the raw flags.
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Signed-off-by: default avatarAnna Schumaker <Anna.Schumaker@Netapp.com>
parent 6b1eb3b2
...@@ -119,12 +119,12 @@ xprt_rdma_bc_allocate(struct rpc_task *task) ...@@ -119,12 +119,12 @@ xprt_rdma_bc_allocate(struct rpc_task *task)
return -EINVAL; return -EINVAL;
} }
page = alloc_page(RPCRDMA_DEF_GFP); page = alloc_page(GFP_NOIO | __GFP_NOWARN);
if (!page) if (!page)
return -ENOMEM; return -ENOMEM;
rqst->rq_buffer = page_address(page); rqst->rq_buffer = page_address(page);
rqst->rq_rbuffer = kmalloc(rqst->rq_rcvsize, RPCRDMA_DEF_GFP); rqst->rq_rbuffer = kmalloc(rqst->rq_rcvsize, GFP_NOIO | __GFP_NOWARN);
if (!rqst->rq_rbuffer) { if (!rqst->rq_rbuffer) {
put_page(page); put_page(page);
return -ENOMEM; return -ENOMEM;
......
...@@ -149,8 +149,6 @@ static inline void *rdmab_data(const struct rpcrdma_regbuf *rb) ...@@ -149,8 +149,6 @@ static inline void *rdmab_data(const struct rpcrdma_regbuf *rb)
return rb->rg_data; return rb->rg_data;
} }
#define RPCRDMA_DEF_GFP (GFP_NOIO | __GFP_NOWARN)
/* To ensure a transport can always make forward progress, /* To ensure a transport can always make forward progress,
* the number of RDMA segments allowed in header chunk lists * the number of RDMA segments allowed in header chunk lists
* is capped at 16. This prevents less-capable devices from * is capped at 16. This prevents less-capable devices from
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment