Commit d737e5d4 authored by Trond Myklebust's avatar Trond Myklebust

SUNRPC: Set TCP_CORK until the transmit queue is empty

When we have multiple RPC requests queued up, it makes sense to set the
TCP_CORK option while the transmit queue is non-empty.
Signed-off-by: default avatarTrond Myklebust <trond.myklebust@hammerspace.com>
parent 86438186
...@@ -247,6 +247,7 @@ struct rpc_xprt { ...@@ -247,6 +247,7 @@ struct rpc_xprt {
struct rpc_task * snd_task; /* Task blocked in send */ struct rpc_task * snd_task; /* Task blocked in send */
struct list_head xmit_queue; /* Send queue */ struct list_head xmit_queue; /* Send queue */
atomic_long_t xmit_queuelen;
struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */
#if defined(CONFIG_SUNRPC_BACKCHANNEL) #if defined(CONFIG_SUNRPC_BACKCHANNEL)
......
...@@ -1352,6 +1352,7 @@ xprt_request_enqueue_transmit(struct rpc_task *task) ...@@ -1352,6 +1352,7 @@ xprt_request_enqueue_transmit(struct rpc_task *task)
list_add_tail(&req->rq_xmit, &xprt->xmit_queue); list_add_tail(&req->rq_xmit, &xprt->xmit_queue);
INIT_LIST_HEAD(&req->rq_xmit2); INIT_LIST_HEAD(&req->rq_xmit2);
out: out:
atomic_long_inc(&xprt->xmit_queuelen);
set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate); set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
spin_unlock(&xprt->queue_lock); spin_unlock(&xprt->queue_lock);
} }
...@@ -1381,6 +1382,7 @@ xprt_request_dequeue_transmit_locked(struct rpc_task *task) ...@@ -1381,6 +1382,7 @@ xprt_request_dequeue_transmit_locked(struct rpc_task *task)
} }
} else } else
list_del(&req->rq_xmit2); list_del(&req->rq_xmit2);
atomic_long_dec(&req->rq_xprt->xmit_queuelen);
} }
/** /**
......
...@@ -1018,6 +1018,7 @@ static int xs_tcp_send_request(struct rpc_rqst *req) ...@@ -1018,6 +1018,7 @@ static int xs_tcp_send_request(struct rpc_rqst *req)
* to cope with writespace callbacks arriving _after_ we have * to cope with writespace callbacks arriving _after_ we have
* called sendmsg(). */ * called sendmsg(). */
req->rq_xtime = ktime_get(); req->rq_xtime = ktime_get();
tcp_sock_set_cork(transport->inet, true);
while (1) { while (1) {
status = xprt_sock_sendmsg(transport->sock, &msg, xdr, status = xprt_sock_sendmsg(transport->sock, &msg, xdr,
transport->xmit.offset, rm, &sent); transport->xmit.offset, rm, &sent);
...@@ -1032,6 +1033,8 @@ static int xs_tcp_send_request(struct rpc_rqst *req) ...@@ -1032,6 +1033,8 @@ static int xs_tcp_send_request(struct rpc_rqst *req)
if (likely(req->rq_bytes_sent >= msglen)) { if (likely(req->rq_bytes_sent >= msglen)) {
req->rq_xmit_bytes_sent += transport->xmit.offset; req->rq_xmit_bytes_sent += transport->xmit.offset;
transport->xmit.offset = 0; transport->xmit.offset = 0;
if (atomic_long_read(&xprt->xmit_queuelen) == 1)
tcp_sock_set_cork(transport->inet, false);
return 0; return 0;
} }
...@@ -2163,6 +2166,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) ...@@ -2163,6 +2166,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
} }
xs_tcp_set_socket_timeouts(xprt, sock); xs_tcp_set_socket_timeouts(xprt, sock);
tcp_sock_set_nodelay(sk);
write_lock_bh(&sk->sk_callback_lock); write_lock_bh(&sk->sk_callback_lock);
...@@ -2177,7 +2181,6 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) ...@@ -2177,7 +2181,6 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
/* socket options */ /* socket options */
sock_reset_flag(sk, SOCK_LINGER); sock_reset_flag(sk, SOCK_LINGER);
tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
xprt_clear_connected(xprt); xprt_clear_connected(xprt);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment