Commit 9ba7d221 authored by Trond Myklebust's avatar Trond Myklebust

[PATCH] RPC over UDP congestion control updates [3/8]

Improve the response to timeouts. As requests time out, we delay
timing out the remaining requests (in fact we follow exponential
backoff). This is done because we assume either that the round trip
time has been underestimated, or that the network/server is congested,
and we need to back off the resending of new requests.
parent fa7b279e
...@@ -99,6 +99,7 @@ struct rpc_rqst { ...@@ -99,6 +99,7 @@ struct rpc_rqst {
u32 rq_bytes_sent; /* Bytes we have sent */ u32 rq_bytes_sent; /* Bytes we have sent */
long rq_xtime; /* when transmitted */ long rq_xtime; /* when transmitted */
int rq_ntimeo;
int rq_nresend; int rq_nresend;
}; };
#define rq_svec rq_snd_buf.head #define rq_svec rq_snd_buf.head
......
...@@ -77,6 +77,8 @@ ...@@ -77,6 +77,8 @@
# define RPCDBG_FACILITY RPCDBG_XPRT # define RPCDBG_FACILITY RPCDBG_XPRT
#endif #endif
#define XPRT_MAX_BACKOFF (8)
/* /*
* Local functions * Local functions
*/ */
...@@ -931,6 +933,21 @@ xprt_write_space(struct sock *sk) ...@@ -931,6 +933,21 @@ xprt_write_space(struct sock *sk)
} }
} }
/*
* Exponential backoff for UDP retries
*/
static inline int
xprt_expbackoff(struct rpc_task *task, struct rpc_rqst *req)
{
int backoff;
req->rq_ntimeo++;
backoff = min(rpc_ntimeo(&task->tk_client->cl_rtt), XPRT_MAX_BACKOFF);
if (req->rq_ntimeo < (1 << backoff))
return 1;
return 0;
}
/* /*
* RPC receive timeout handler. * RPC receive timeout handler.
*/ */
...@@ -943,9 +960,16 @@ xprt_timer(struct rpc_task *task) ...@@ -943,9 +960,16 @@ xprt_timer(struct rpc_task *task)
spin_lock(&xprt->sock_lock); spin_lock(&xprt->sock_lock);
if (req->rq_received) if (req->rq_received)
goto out; goto out;
if (!xprt->nocong) {
if (xprt_expbackoff(task, req)) {
rpc_add_timer(task, xprt_timer);
goto out_unlock;
}
rpc_inc_timeo(&task->tk_client->cl_rtt);
xprt_adjust_cwnd(req->rq_xprt, -ETIMEDOUT);
}
req->rq_nresend++; req->rq_nresend++;
rpc_inc_timeo(&task->tk_client->cl_rtt);
xprt_adjust_cwnd(xprt, -ETIMEDOUT);
dprintk("RPC: %4d xprt_timer (%s request)\n", dprintk("RPC: %4d xprt_timer (%s request)\n",
task->tk_pid, req ? "pending" : "backlogged"); task->tk_pid, req ? "pending" : "backlogged");
...@@ -954,6 +978,7 @@ xprt_timer(struct rpc_task *task) ...@@ -954,6 +978,7 @@ xprt_timer(struct rpc_task *task)
out: out:
task->tk_timeout = 0; task->tk_timeout = 0;
rpc_wake_up_task(task); rpc_wake_up_task(task);
out_unlock:
spin_unlock(&xprt->sock_lock); spin_unlock(&xprt->sock_lock);
} }
...@@ -1076,16 +1101,9 @@ do_xprt_transmit(struct rpc_task *task) ...@@ -1076,16 +1101,9 @@ do_xprt_transmit(struct rpc_task *task)
dprintk("RPC: %4d xmit complete\n", task->tk_pid); dprintk("RPC: %4d xmit complete\n", task->tk_pid);
/* Set the task's receive timeout value */ /* Set the task's receive timeout value */
if (!xprt->nocong) { if (!xprt->nocong) {
int backoff;
task->tk_timeout = rpc_calc_rto(&clnt->cl_rtt, task->tk_timeout = rpc_calc_rto(&clnt->cl_rtt,
rpcproc_timer(clnt, task->tk_msg.rpc_proc)); rpcproc_timer(clnt, task->tk_msg.rpc_proc));
/* If we are retransmitting, increment the timeout counter */ req->rq_ntimeo = 0;
backoff = req->rq_nresend;
if (backoff) {
if (backoff > 7)
backoff = 7;
task->tk_timeout <<= backoff;
}
if (task->tk_timeout > req->rq_timeout.to_maxval) if (task->tk_timeout > req->rq_timeout.to_maxval)
task->tk_timeout = req->rq_timeout.to_maxval; task->tk_timeout = req->rq_timeout.to_maxval;
} else } else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment