Commit 8db55a03 authored by NeilBrown's avatar NeilBrown Committed by Trond Myklebust

SUNRPC: improve 'swap' handling: scheduling and PF_MEMALLOC

rpc tasks can be marked as RPC_TASK_SWAPPER.  This causes GFP_MEMALLOC
to be used for some allocations.  This is needed in some cases, but not
in all where it is currently provided, and in some where it isn't
provided.

Currently *all* tasks associated with a rpc_client on which swap is
enabled get the flag and hence some GFP_MEMALLOC support.

GFP_MEMALLOC is provided for ->buf_alloc() but only swap-writes need it.
However xdr_alloc_bvec does not get GFP_MEMALLOC - though it often does
need it.

xdr_alloc_bvec is called while the XPRT_LOCK is held.  If this blocks,
then it blocks all other queued tasks.  So this allocation needs
GFP_MEMALLOC for *all* requests, not just writes, when the xprt is used
for any swap writes.

Similarly, if the transport is not connected, that will block all
requests including swap writes, so memory allocations should get
GFP_MEMALLOC if swap writes are possible.

So with this patch:
 1/ we ONLY set RPC_TASK_SWAPPER for swap writes.
 2/ __rpc_execute() sets PF_MEMALLOC while handling any task
    with RPC_TASK_SWAPPER set, or when handling any task that
    holds the XPRT_LOCKED lock on an xprt used for swap.
    This removes the need for the RPC_IS_SWAPPER() test
    in ->buf_alloc handlers.
 3/ xprt_prepare_transmit() sets PF_MEMALLOC after locking
    any task to a swapper xprt.  __rpc_execute() will clear it.
 3/ PF_MEMALLOC is set for all the connect workers.

Reviewed-by: Chuck Lever <chuck.lever@oracle.com> (for xprtrdma parts)
Signed-off-by: default avatarNeilBrown <neilb@suse.de>
Signed-off-by: default avatarTrond Myklebust <trond.myklebust@hammerspace.com>
parent 89c2be8a
...@@ -1412,6 +1412,8 @@ static void nfs_initiate_write(struct nfs_pgio_header *hdr, ...@@ -1412,6 +1412,8 @@ static void nfs_initiate_write(struct nfs_pgio_header *hdr,
{ {
int priority = flush_task_priority(how); int priority = flush_task_priority(how);
if (IS_SWAPFILE(hdr->inode))
task_setup_data->flags |= RPC_TASK_SWAPPER;
task_setup_data->priority = priority; task_setup_data->priority = priority;
rpc_ops->write_setup(hdr, msg, &task_setup_data->rpc_client); rpc_ops->write_setup(hdr, msg, &task_setup_data->rpc_client);
trace_nfs_initiate_write(hdr); trace_nfs_initiate_write(hdr);
......
...@@ -1085,8 +1085,6 @@ void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt) ...@@ -1085,8 +1085,6 @@ void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
task->tk_flags |= RPC_TASK_TIMEOUT; task->tk_flags |= RPC_TASK_TIMEOUT;
if (clnt->cl_noretranstimeo) if (clnt->cl_noretranstimeo)
task->tk_flags |= RPC_TASK_NO_RETRANS_TIMEOUT; task->tk_flags |= RPC_TASK_NO_RETRANS_TIMEOUT;
if (atomic_read(&clnt->cl_swapper))
task->tk_flags |= RPC_TASK_SWAPPER;
/* Add to the client's list of all tasks */ /* Add to the client's list of all tasks */
spin_lock(&clnt->cl_lock); spin_lock(&clnt->cl_lock);
list_add_tail(&task->tk_task, &clnt->cl_tasks); list_add_tail(&task->tk_task, &clnt->cl_tasks);
......
...@@ -869,6 +869,15 @@ void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata) ...@@ -869,6 +869,15 @@ void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
ops->rpc_release(calldata); ops->rpc_release(calldata);
} }
static bool xprt_needs_memalloc(struct rpc_xprt *xprt, struct rpc_task *tk)
{
if (!xprt)
return false;
if (!atomic_read(&xprt->swapper))
return false;
return test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == tk;
}
/* /*
* This is the RPC `scheduler' (or rather, the finite state machine). * This is the RPC `scheduler' (or rather, the finite state machine).
*/ */
...@@ -877,6 +886,7 @@ static void __rpc_execute(struct rpc_task *task) ...@@ -877,6 +886,7 @@ static void __rpc_execute(struct rpc_task *task)
struct rpc_wait_queue *queue; struct rpc_wait_queue *queue;
int task_is_async = RPC_IS_ASYNC(task); int task_is_async = RPC_IS_ASYNC(task);
int status = 0; int status = 0;
unsigned long pflags = current->flags;
WARN_ON_ONCE(RPC_IS_QUEUED(task)); WARN_ON_ONCE(RPC_IS_QUEUED(task));
if (RPC_IS_QUEUED(task)) if (RPC_IS_QUEUED(task))
...@@ -899,6 +909,10 @@ static void __rpc_execute(struct rpc_task *task) ...@@ -899,6 +909,10 @@ static void __rpc_execute(struct rpc_task *task)
} }
if (!do_action) if (!do_action)
break; break;
if (RPC_IS_SWAPPER(task) ||
xprt_needs_memalloc(task->tk_xprt, task))
current->flags |= PF_MEMALLOC;
trace_rpc_task_run_action(task, do_action); trace_rpc_task_run_action(task, do_action);
do_action(task); do_action(task);
...@@ -936,7 +950,7 @@ static void __rpc_execute(struct rpc_task *task) ...@@ -936,7 +950,7 @@ static void __rpc_execute(struct rpc_task *task)
rpc_clear_running(task); rpc_clear_running(task);
spin_unlock(&queue->lock); spin_unlock(&queue->lock);
if (task_is_async) if (task_is_async)
return; goto out;
/* sync task: sleep here */ /* sync task: sleep here */
trace_rpc_task_sync_sleep(task, task->tk_action); trace_rpc_task_sync_sleep(task, task->tk_action);
...@@ -960,6 +974,8 @@ static void __rpc_execute(struct rpc_task *task) ...@@ -960,6 +974,8 @@ static void __rpc_execute(struct rpc_task *task)
/* Release all resources associated with the task */ /* Release all resources associated with the task */
rpc_release_task(task); rpc_release_task(task);
out:
current_restore_flags(pflags, PF_MEMALLOC);
} }
/* /*
...@@ -1018,8 +1034,6 @@ int rpc_malloc(struct rpc_task *task) ...@@ -1018,8 +1034,6 @@ int rpc_malloc(struct rpc_task *task)
if (RPC_IS_ASYNC(task)) if (RPC_IS_ASYNC(task))
gfp = GFP_NOWAIT | __GFP_NOWARN; gfp = GFP_NOWAIT | __GFP_NOWARN;
if (RPC_IS_SWAPPER(task))
gfp |= __GFP_MEMALLOC;
size += sizeof(struct rpc_buffer); size += sizeof(struct rpc_buffer);
if (size <= RPC_BUFFER_MAXSIZE) if (size <= RPC_BUFFER_MAXSIZE)
......
...@@ -1492,6 +1492,9 @@ bool xprt_prepare_transmit(struct rpc_task *task) ...@@ -1492,6 +1492,9 @@ bool xprt_prepare_transmit(struct rpc_task *task)
return false; return false;
} }
if (atomic_read(&xprt->swapper))
/* This will be clear in __rpc_execute */
current->flags |= PF_MEMALLOC;
return true; return true;
} }
......
...@@ -235,8 +235,11 @@ xprt_rdma_connect_worker(struct work_struct *work) ...@@ -235,8 +235,11 @@ xprt_rdma_connect_worker(struct work_struct *work)
struct rpcrdma_xprt *r_xprt = container_of(work, struct rpcrdma_xprt, struct rpcrdma_xprt *r_xprt = container_of(work, struct rpcrdma_xprt,
rx_connect_worker.work); rx_connect_worker.work);
struct rpc_xprt *xprt = &r_xprt->rx_xprt; struct rpc_xprt *xprt = &r_xprt->rx_xprt;
unsigned int pflags = current->flags;
int rc; int rc;
if (atomic_read(&xprt->swapper))
current->flags |= PF_MEMALLOC;
rc = rpcrdma_xprt_connect(r_xprt); rc = rpcrdma_xprt_connect(r_xprt);
xprt_clear_connecting(xprt); xprt_clear_connecting(xprt);
if (!rc) { if (!rc) {
...@@ -250,6 +253,7 @@ xprt_rdma_connect_worker(struct work_struct *work) ...@@ -250,6 +253,7 @@ xprt_rdma_connect_worker(struct work_struct *work)
rpcrdma_xprt_disconnect(r_xprt); rpcrdma_xprt_disconnect(r_xprt);
xprt_unlock_connect(xprt, r_xprt); xprt_unlock_connect(xprt, r_xprt);
xprt_wake_pending_tasks(xprt, rc); xprt_wake_pending_tasks(xprt, rc);
current_restore_flags(pflags, PF_MEMALLOC);
} }
/** /**
...@@ -572,8 +576,6 @@ xprt_rdma_allocate(struct rpc_task *task) ...@@ -572,8 +576,6 @@ xprt_rdma_allocate(struct rpc_task *task)
flags = RPCRDMA_DEF_GFP; flags = RPCRDMA_DEF_GFP;
if (RPC_IS_ASYNC(task)) if (RPC_IS_ASYNC(task))
flags = GFP_NOWAIT | __GFP_NOWARN; flags = GFP_NOWAIT | __GFP_NOWARN;
if (RPC_IS_SWAPPER(task))
flags |= __GFP_MEMALLOC;
if (!rpcrdma_check_regbuf(r_xprt, req->rl_sendbuf, rqst->rq_callsize, if (!rpcrdma_check_regbuf(r_xprt, req->rl_sendbuf, rqst->rq_callsize,
flags)) flags))
......
...@@ -2052,7 +2052,10 @@ static void xs_udp_setup_socket(struct work_struct *work) ...@@ -2052,7 +2052,10 @@ static void xs_udp_setup_socket(struct work_struct *work)
struct rpc_xprt *xprt = &transport->xprt; struct rpc_xprt *xprt = &transport->xprt;
struct socket *sock; struct socket *sock;
int status = -EIO; int status = -EIO;
unsigned int pflags = current->flags;
if (atomic_read(&xprt->swapper))
current->flags |= PF_MEMALLOC;
sock = xs_create_sock(xprt, transport, sock = xs_create_sock(xprt, transport,
xs_addr(xprt)->sa_family, SOCK_DGRAM, xs_addr(xprt)->sa_family, SOCK_DGRAM,
IPPROTO_UDP, false); IPPROTO_UDP, false);
...@@ -2072,6 +2075,7 @@ static void xs_udp_setup_socket(struct work_struct *work) ...@@ -2072,6 +2075,7 @@ static void xs_udp_setup_socket(struct work_struct *work)
xprt_clear_connecting(xprt); xprt_clear_connecting(xprt);
xprt_unlock_connect(xprt, transport); xprt_unlock_connect(xprt, transport);
xprt_wake_pending_tasks(xprt, status); xprt_wake_pending_tasks(xprt, status);
current_restore_flags(pflags, PF_MEMALLOC);
} }
/** /**
...@@ -2231,7 +2235,10 @@ static void xs_tcp_setup_socket(struct work_struct *work) ...@@ -2231,7 +2235,10 @@ static void xs_tcp_setup_socket(struct work_struct *work)
struct socket *sock = transport->sock; struct socket *sock = transport->sock;
struct rpc_xprt *xprt = &transport->xprt; struct rpc_xprt *xprt = &transport->xprt;
int status; int status;
unsigned int pflags = current->flags;
if (atomic_read(&xprt->swapper))
current->flags |= PF_MEMALLOC;
if (!sock) { if (!sock) {
sock = xs_create_sock(xprt, transport, sock = xs_create_sock(xprt, transport,
xs_addr(xprt)->sa_family, SOCK_STREAM, xs_addr(xprt)->sa_family, SOCK_STREAM,
...@@ -2296,6 +2303,7 @@ static void xs_tcp_setup_socket(struct work_struct *work) ...@@ -2296,6 +2303,7 @@ static void xs_tcp_setup_socket(struct work_struct *work)
xprt_clear_connecting(xprt); xprt_clear_connecting(xprt);
out_unlock: out_unlock:
xprt_unlock_connect(xprt, transport); xprt_unlock_connect(xprt, transport);
current_restore_flags(pflags, PF_MEMALLOC);
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment