Commit 75c84151 authored by Trond Myklebust's avatar Trond Myklebust

SUNRPC: Rename xprt->recv_lock to xprt->queue_lock

We will use the same lock to protect both the transmit and receive queues.
Signed-off-by: default avatarTrond Myklebust <trond.myklebust@hammerspace.com>
parent ec37a58f
...@@ -235,7 +235,7 @@ struct rpc_xprt { ...@@ -235,7 +235,7 @@ struct rpc_xprt {
*/ */
spinlock_t transport_lock; /* lock transport info */ spinlock_t transport_lock; /* lock transport info */
spinlock_t reserve_lock; /* lock slot table */ spinlock_t reserve_lock; /* lock slot table */
spinlock_t recv_lock; /* lock receive list */ spinlock_t queue_lock; /* send/receive queue lock */
u32 xid; /* Next XID value to use */ u32 xid; /* Next XID value to use */
struct rpc_task * snd_task; /* Task blocked in send */ struct rpc_task * snd_task; /* Task blocked in send */
struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */
......
...@@ -1004,7 +1004,7 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp) ...@@ -1004,7 +1004,7 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp)
if (!bc_xprt) if (!bc_xprt)
return -EAGAIN; return -EAGAIN;
spin_lock(&bc_xprt->recv_lock); spin_lock(&bc_xprt->queue_lock);
req = xprt_lookup_rqst(bc_xprt, xid); req = xprt_lookup_rqst(bc_xprt, xid);
if (!req) if (!req)
goto unlock_notfound; goto unlock_notfound;
...@@ -1022,7 +1022,7 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp) ...@@ -1022,7 +1022,7 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp)
memcpy(dst->iov_base, src->iov_base, src->iov_len); memcpy(dst->iov_base, src->iov_base, src->iov_len);
xprt_complete_rqst(req->rq_task, rqstp->rq_arg.len); xprt_complete_rqst(req->rq_task, rqstp->rq_arg.len);
rqstp->rq_arg.len = 0; rqstp->rq_arg.len = 0;
spin_unlock(&bc_xprt->recv_lock); spin_unlock(&bc_xprt->queue_lock);
return 0; return 0;
unlock_notfound: unlock_notfound:
printk(KERN_NOTICE printk(KERN_NOTICE
...@@ -1031,7 +1031,7 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp) ...@@ -1031,7 +1031,7 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp)
__func__, ntohl(calldir), __func__, ntohl(calldir),
bc_xprt, ntohl(xid)); bc_xprt, ntohl(xid));
unlock_eagain: unlock_eagain:
spin_unlock(&bc_xprt->recv_lock); spin_unlock(&bc_xprt->queue_lock);
return -EAGAIN; return -EAGAIN;
} }
......
...@@ -826,7 +826,7 @@ static void xprt_connect_status(struct rpc_task *task) ...@@ -826,7 +826,7 @@ static void xprt_connect_status(struct rpc_task *task)
* @xprt: transport on which the original request was transmitted * @xprt: transport on which the original request was transmitted
* @xid: RPC XID of incoming reply * @xid: RPC XID of incoming reply
* *
* Caller holds xprt->recv_lock. * Caller holds xprt->queue_lock.
*/ */
struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid) struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
{ {
...@@ -892,7 +892,7 @@ static void xprt_wait_on_pinned_rqst(struct rpc_rqst *req) ...@@ -892,7 +892,7 @@ static void xprt_wait_on_pinned_rqst(struct rpc_rqst *req)
* xprt_update_rtt - Update RPC RTT statistics * xprt_update_rtt - Update RPC RTT statistics
* @task: RPC request that recently completed * @task: RPC request that recently completed
* *
* Caller holds xprt->recv_lock. * Caller holds xprt->queue_lock.
*/ */
void xprt_update_rtt(struct rpc_task *task) void xprt_update_rtt(struct rpc_task *task)
{ {
...@@ -914,7 +914,7 @@ EXPORT_SYMBOL_GPL(xprt_update_rtt); ...@@ -914,7 +914,7 @@ EXPORT_SYMBOL_GPL(xprt_update_rtt);
* @task: RPC request that recently completed * @task: RPC request that recently completed
* @copied: actual number of bytes received from the transport * @copied: actual number of bytes received from the transport
* *
* Caller holds xprt->recv_lock. * Caller holds xprt->queue_lock.
*/ */
void xprt_complete_rqst(struct rpc_task *task, int copied) void xprt_complete_rqst(struct rpc_task *task, int copied)
{ {
...@@ -1034,10 +1034,10 @@ void xprt_transmit(struct rpc_task *task) ...@@ -1034,10 +1034,10 @@ void xprt_transmit(struct rpc_task *task)
memcpy(&req->rq_private_buf, &req->rq_rcv_buf, memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
sizeof(req->rq_private_buf)); sizeof(req->rq_private_buf));
/* Add request to the receive list */ /* Add request to the receive list */
spin_lock(&xprt->recv_lock); spin_lock(&xprt->queue_lock);
list_add_tail(&req->rq_list, &xprt->recv); list_add_tail(&req->rq_list, &xprt->recv);
set_bit(RPC_TASK_NEED_RECV, &task->tk_runstate); set_bit(RPC_TASK_NEED_RECV, &task->tk_runstate);
spin_unlock(&xprt->recv_lock); spin_unlock(&xprt->queue_lock);
xprt_reset_majortimeo(req); xprt_reset_majortimeo(req);
/* Turn off autodisconnect */ /* Turn off autodisconnect */
del_singleshot_timer_sync(&xprt->timer); del_singleshot_timer_sync(&xprt->timer);
...@@ -1076,7 +1076,7 @@ void xprt_transmit(struct rpc_task *task) ...@@ -1076,7 +1076,7 @@ void xprt_transmit(struct rpc_task *task)
* The spinlock ensures atomicity between the test of * The spinlock ensures atomicity between the test of
* req->rq_reply_bytes_recvd, and the call to rpc_sleep_on(). * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on().
*/ */
spin_lock(&xprt->recv_lock); spin_lock(&xprt->queue_lock);
if (test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) { if (test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) {
rpc_sleep_on(&xprt->pending, task, xprt_timer); rpc_sleep_on(&xprt->pending, task, xprt_timer);
/* Wake up immediately if the connection was dropped */ /* Wake up immediately if the connection was dropped */
...@@ -1084,7 +1084,7 @@ void xprt_transmit(struct rpc_task *task) ...@@ -1084,7 +1084,7 @@ void xprt_transmit(struct rpc_task *task)
rpc_wake_up_queued_task_set_status(&xprt->pending, rpc_wake_up_queued_task_set_status(&xprt->pending,
task, -ENOTCONN); task, -ENOTCONN);
} }
spin_unlock(&xprt->recv_lock); spin_unlock(&xprt->queue_lock);
} }
} }
...@@ -1379,18 +1379,18 @@ void xprt_release(struct rpc_task *task) ...@@ -1379,18 +1379,18 @@ void xprt_release(struct rpc_task *task)
task->tk_ops->rpc_count_stats(task, task->tk_calldata); task->tk_ops->rpc_count_stats(task, task->tk_calldata);
else if (task->tk_client) else if (task->tk_client)
rpc_count_iostats(task, task->tk_client->cl_metrics); rpc_count_iostats(task, task->tk_client->cl_metrics);
spin_lock(&xprt->recv_lock); spin_lock(&xprt->queue_lock);
if (!list_empty(&req->rq_list)) { if (!list_empty(&req->rq_list)) {
list_del_init(&req->rq_list); list_del_init(&req->rq_list);
if (xprt_is_pinned_rqst(req)) { if (xprt_is_pinned_rqst(req)) {
set_bit(RPC_TASK_MSG_PIN_WAIT, &req->rq_task->tk_runstate); set_bit(RPC_TASK_MSG_PIN_WAIT, &req->rq_task->tk_runstate);
spin_unlock(&xprt->recv_lock); spin_unlock(&xprt->queue_lock);
xprt_wait_on_pinned_rqst(req); xprt_wait_on_pinned_rqst(req);
spin_lock(&xprt->recv_lock); spin_lock(&xprt->queue_lock);
clear_bit(RPC_TASK_MSG_PIN_WAIT, &req->rq_task->tk_runstate); clear_bit(RPC_TASK_MSG_PIN_WAIT, &req->rq_task->tk_runstate);
} }
} }
spin_unlock(&xprt->recv_lock); spin_unlock(&xprt->queue_lock);
spin_lock_bh(&xprt->transport_lock); spin_lock_bh(&xprt->transport_lock);
xprt->ops->release_xprt(xprt, task); xprt->ops->release_xprt(xprt, task);
if (xprt->ops->release_request) if (xprt->ops->release_request)
...@@ -1420,7 +1420,7 @@ static void xprt_init(struct rpc_xprt *xprt, struct net *net) ...@@ -1420,7 +1420,7 @@ static void xprt_init(struct rpc_xprt *xprt, struct net *net)
spin_lock_init(&xprt->transport_lock); spin_lock_init(&xprt->transport_lock);
spin_lock_init(&xprt->reserve_lock); spin_lock_init(&xprt->reserve_lock);
spin_lock_init(&xprt->recv_lock); spin_lock_init(&xprt->queue_lock);
INIT_LIST_HEAD(&xprt->free); INIT_LIST_HEAD(&xprt->free);
INIT_LIST_HEAD(&xprt->recv); INIT_LIST_HEAD(&xprt->recv);
......
...@@ -1238,7 +1238,7 @@ void rpcrdma_complete_rqst(struct rpcrdma_rep *rep) ...@@ -1238,7 +1238,7 @@ void rpcrdma_complete_rqst(struct rpcrdma_rep *rep)
goto out_badheader; goto out_badheader;
out: out:
spin_lock(&xprt->recv_lock); spin_lock(&xprt->queue_lock);
cwnd = xprt->cwnd; cwnd = xprt->cwnd;
xprt->cwnd = r_xprt->rx_buf.rb_credits << RPC_CWNDSHIFT; xprt->cwnd = r_xprt->rx_buf.rb_credits << RPC_CWNDSHIFT;
if (xprt->cwnd > cwnd) if (xprt->cwnd > cwnd)
...@@ -1246,7 +1246,7 @@ void rpcrdma_complete_rqst(struct rpcrdma_rep *rep) ...@@ -1246,7 +1246,7 @@ void rpcrdma_complete_rqst(struct rpcrdma_rep *rep)
xprt_complete_rqst(rqst->rq_task, status); xprt_complete_rqst(rqst->rq_task, status);
xprt_unpin_rqst(rqst); xprt_unpin_rqst(rqst);
spin_unlock(&xprt->recv_lock); spin_unlock(&xprt->queue_lock);
return; return;
/* If the incoming reply terminated a pending RPC, the next /* If the incoming reply terminated a pending RPC, the next
...@@ -1345,7 +1345,7 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep) ...@@ -1345,7 +1345,7 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
/* Match incoming rpcrdma_rep to an rpcrdma_req to /* Match incoming rpcrdma_rep to an rpcrdma_req to
* get context for handling any incoming chunks. * get context for handling any incoming chunks.
*/ */
spin_lock(&xprt->recv_lock); spin_lock(&xprt->queue_lock);
rqst = xprt_lookup_rqst(xprt, rep->rr_xid); rqst = xprt_lookup_rqst(xprt, rep->rr_xid);
if (!rqst) if (!rqst)
goto out_norqst; goto out_norqst;
...@@ -1357,7 +1357,7 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep) ...@@ -1357,7 +1357,7 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
credits = buf->rb_max_requests; credits = buf->rb_max_requests;
buf->rb_credits = credits; buf->rb_credits = credits;
spin_unlock(&xprt->recv_lock); spin_unlock(&xprt->queue_lock);
req = rpcr_to_rdmar(rqst); req = rpcr_to_rdmar(rqst);
req->rl_reply = rep; req->rl_reply = rep;
...@@ -1378,7 +1378,7 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep) ...@@ -1378,7 +1378,7 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
* is corrupt. * is corrupt.
*/ */
out_norqst: out_norqst:
spin_unlock(&xprt->recv_lock); spin_unlock(&xprt->queue_lock);
trace_xprtrdma_reply_rqst(rep); trace_xprtrdma_reply_rqst(rep);
goto repost; goto repost;
......
...@@ -56,7 +56,7 @@ int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp, ...@@ -56,7 +56,7 @@ int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp,
if (src->iov_len < 24) if (src->iov_len < 24)
goto out_shortreply; goto out_shortreply;
spin_lock(&xprt->recv_lock); spin_lock(&xprt->queue_lock);
req = xprt_lookup_rqst(xprt, xid); req = xprt_lookup_rqst(xprt, xid);
if (!req) if (!req)
goto out_notfound; goto out_notfound;
...@@ -86,7 +86,7 @@ int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp, ...@@ -86,7 +86,7 @@ int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp,
rcvbuf->len = 0; rcvbuf->len = 0;
out_unlock: out_unlock:
spin_unlock(&xprt->recv_lock); spin_unlock(&xprt->queue_lock);
out: out:
return ret; return ret;
......
...@@ -966,12 +966,12 @@ static void xs_local_data_read_skb(struct rpc_xprt *xprt, ...@@ -966,12 +966,12 @@ static void xs_local_data_read_skb(struct rpc_xprt *xprt,
return; return;
/* Look up and lock the request corresponding to the given XID */ /* Look up and lock the request corresponding to the given XID */
spin_lock(&xprt->recv_lock); spin_lock(&xprt->queue_lock);
rovr = xprt_lookup_rqst(xprt, *xp); rovr = xprt_lookup_rqst(xprt, *xp);
if (!rovr) if (!rovr)
goto out_unlock; goto out_unlock;
xprt_pin_rqst(rovr); xprt_pin_rqst(rovr);
spin_unlock(&xprt->recv_lock); spin_unlock(&xprt->queue_lock);
task = rovr->rq_task; task = rovr->rq_task;
copied = rovr->rq_private_buf.buflen; copied = rovr->rq_private_buf.buflen;
...@@ -980,16 +980,16 @@ static void xs_local_data_read_skb(struct rpc_xprt *xprt, ...@@ -980,16 +980,16 @@ static void xs_local_data_read_skb(struct rpc_xprt *xprt,
if (xs_local_copy_to_xdr(&rovr->rq_private_buf, skb)) { if (xs_local_copy_to_xdr(&rovr->rq_private_buf, skb)) {
dprintk("RPC: sk_buff copy failed\n"); dprintk("RPC: sk_buff copy failed\n");
spin_lock(&xprt->recv_lock); spin_lock(&xprt->queue_lock);
goto out_unpin; goto out_unpin;
} }
spin_lock(&xprt->recv_lock); spin_lock(&xprt->queue_lock);
xprt_complete_rqst(task, copied); xprt_complete_rqst(task, copied);
out_unpin: out_unpin:
xprt_unpin_rqst(rovr); xprt_unpin_rqst(rovr);
out_unlock: out_unlock:
spin_unlock(&xprt->recv_lock); spin_unlock(&xprt->queue_lock);
} }
static void xs_local_data_receive(struct sock_xprt *transport) static void xs_local_data_receive(struct sock_xprt *transport)
...@@ -1058,13 +1058,13 @@ static void xs_udp_data_read_skb(struct rpc_xprt *xprt, ...@@ -1058,13 +1058,13 @@ static void xs_udp_data_read_skb(struct rpc_xprt *xprt,
return; return;
/* Look up and lock the request corresponding to the given XID */ /* Look up and lock the request corresponding to the given XID */
spin_lock(&xprt->recv_lock); spin_lock(&xprt->queue_lock);
rovr = xprt_lookup_rqst(xprt, *xp); rovr = xprt_lookup_rqst(xprt, *xp);
if (!rovr) if (!rovr)
goto out_unlock; goto out_unlock;
xprt_pin_rqst(rovr); xprt_pin_rqst(rovr);
xprt_update_rtt(rovr->rq_task); xprt_update_rtt(rovr->rq_task);
spin_unlock(&xprt->recv_lock); spin_unlock(&xprt->queue_lock);
task = rovr->rq_task; task = rovr->rq_task;
if ((copied = rovr->rq_private_buf.buflen) > repsize) if ((copied = rovr->rq_private_buf.buflen) > repsize)
...@@ -1072,7 +1072,7 @@ static void xs_udp_data_read_skb(struct rpc_xprt *xprt, ...@@ -1072,7 +1072,7 @@ static void xs_udp_data_read_skb(struct rpc_xprt *xprt,
/* Suck it into the iovec, verify checksum if not done by hw. */ /* Suck it into the iovec, verify checksum if not done by hw. */
if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) { if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) {
spin_lock(&xprt->recv_lock); spin_lock(&xprt->queue_lock);
__UDPX_INC_STATS(sk, UDP_MIB_INERRORS); __UDPX_INC_STATS(sk, UDP_MIB_INERRORS);
goto out_unpin; goto out_unpin;
} }
...@@ -1081,13 +1081,13 @@ static void xs_udp_data_read_skb(struct rpc_xprt *xprt, ...@@ -1081,13 +1081,13 @@ static void xs_udp_data_read_skb(struct rpc_xprt *xprt,
spin_lock_bh(&xprt->transport_lock); spin_lock_bh(&xprt->transport_lock);
xprt_adjust_cwnd(xprt, task, copied); xprt_adjust_cwnd(xprt, task, copied);
spin_unlock_bh(&xprt->transport_lock); spin_unlock_bh(&xprt->transport_lock);
spin_lock(&xprt->recv_lock); spin_lock(&xprt->queue_lock);
xprt_complete_rqst(task, copied); xprt_complete_rqst(task, copied);
__UDPX_INC_STATS(sk, UDP_MIB_INDATAGRAMS); __UDPX_INC_STATS(sk, UDP_MIB_INDATAGRAMS);
out_unpin: out_unpin:
xprt_unpin_rqst(rovr); xprt_unpin_rqst(rovr);
out_unlock: out_unlock:
spin_unlock(&xprt->recv_lock); spin_unlock(&xprt->queue_lock);
} }
static void xs_udp_data_receive(struct sock_xprt *transport) static void xs_udp_data_receive(struct sock_xprt *transport)
...@@ -1356,24 +1356,24 @@ static inline int xs_tcp_read_reply(struct rpc_xprt *xprt, ...@@ -1356,24 +1356,24 @@ static inline int xs_tcp_read_reply(struct rpc_xprt *xprt,
dprintk("RPC: read reply XID %08x\n", ntohl(transport->recv.xid)); dprintk("RPC: read reply XID %08x\n", ntohl(transport->recv.xid));
/* Find and lock the request corresponding to this xid */ /* Find and lock the request corresponding to this xid */
spin_lock(&xprt->recv_lock); spin_lock(&xprt->queue_lock);
req = xprt_lookup_rqst(xprt, transport->recv.xid); req = xprt_lookup_rqst(xprt, transport->recv.xid);
if (!req) { if (!req) {
dprintk("RPC: XID %08x request not found!\n", dprintk("RPC: XID %08x request not found!\n",
ntohl(transport->recv.xid)); ntohl(transport->recv.xid));
spin_unlock(&xprt->recv_lock); spin_unlock(&xprt->queue_lock);
return -1; return -1;
} }
xprt_pin_rqst(req); xprt_pin_rqst(req);
spin_unlock(&xprt->recv_lock); spin_unlock(&xprt->queue_lock);
xs_tcp_read_common(xprt, desc, req); xs_tcp_read_common(xprt, desc, req);
spin_lock(&xprt->recv_lock); spin_lock(&xprt->queue_lock);
if (!(transport->recv.flags & TCP_RCV_COPY_DATA)) if (!(transport->recv.flags & TCP_RCV_COPY_DATA))
xprt_complete_rqst(req->rq_task, transport->recv.copied); xprt_complete_rqst(req->rq_task, transport->recv.copied);
xprt_unpin_rqst(req); xprt_unpin_rqst(req);
spin_unlock(&xprt->recv_lock); spin_unlock(&xprt->queue_lock);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment