Commit 5d371cbc authored by Trond Myklebust's avatar Trond Myklebust

UDP round trip timer fix. Modify Karn's algorithm so that

we inherit timeouts from previous requests.
This means that we lengthen the window of time during which
we accept updates to the RTO estimate if we see an update.

Scheme proposed by Brian Mancuso, but it is standard for TCP
congestion control implementations.
parent 9b80ac47
...@@ -15,6 +15,7 @@ struct rpc_rtt { ...@@ -15,6 +15,7 @@ struct rpc_rtt {
unsigned long timeo; /* default timeout value */ unsigned long timeo; /* default timeout value */
unsigned long srtt[5]; /* smoothed round trip time << 3 */ unsigned long srtt[5]; /* smoothed round trip time << 3 */
unsigned long sdrtt[5]; /* smoothed medium deviation of RTT */ unsigned long sdrtt[5]; /* smoothed medium deviation of RTT */
int ntimeouts[5]; /* Number of timeouts for the last request */
}; };
...@@ -22,4 +23,18 @@ extern void rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo); ...@@ -22,4 +23,18 @@ extern void rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo);
extern void rpc_update_rtt(struct rpc_rtt *rt, unsigned timer, long m); extern void rpc_update_rtt(struct rpc_rtt *rt, unsigned timer, long m);
extern unsigned long rpc_calc_rto(struct rpc_rtt *rt, unsigned timer); extern unsigned long rpc_calc_rto(struct rpc_rtt *rt, unsigned timer);
static inline void rpc_set_timeo(struct rpc_rtt *rt, int timer, int ntimeo)
{
if (!timer)
return;
rt->ntimeouts[timer-1] = ntimeo;
}
static inline int rpc_ntimeo(struct rpc_rtt *rt, int timer)
{
if (!timer)
return 0;
return rt->ntimeouts[timer-1];
}
#endif /* _LINUX_SUNRPC_TIMER_H */ #endif /* _LINUX_SUNRPC_TIMER_H */
...@@ -579,14 +579,14 @@ xprt_complete_rqst(struct rpc_xprt *xprt, struct rpc_rqst *req, int copied) ...@@ -579,14 +579,14 @@ xprt_complete_rqst(struct rpc_xprt *xprt, struct rpc_rqst *req, int copied)
/* Adjust congestion window */ /* Adjust congestion window */
if (!xprt->nocong) { if (!xprt->nocong) {
unsigned timer = task->tk_msg.rpc_proc->p_timer;
xprt_adjust_cwnd(xprt, copied); xprt_adjust_cwnd(xprt, copied);
__xprt_put_cong(xprt, req); __xprt_put_cong(xprt, req);
if (req->rq_ntrans == 1) { if (timer) {
unsigned timer = if (req->rq_ntrans == 1)
task->tk_msg.rpc_proc->p_timer;
if (timer)
rpc_update_rtt(&clnt->cl_rtt, timer, rpc_update_rtt(&clnt->cl_rtt, timer,
(long)jiffies - req->rq_xtime); (long)jiffies - req->rq_xtime);
rpc_set_timeo(&clnt->cl_rtt, timer, req->rq_ntrans - 1);
} }
} }
...@@ -1223,8 +1223,9 @@ xprt_transmit(struct rpc_task *task) ...@@ -1223,8 +1223,9 @@ xprt_transmit(struct rpc_task *task)
/* Set the task's receive timeout value */ /* Set the task's receive timeout value */
spin_lock_bh(&xprt->sock_lock); spin_lock_bh(&xprt->sock_lock);
if (!xprt->nocong) { if (!xprt->nocong) {
task->tk_timeout = rpc_calc_rto(&clnt->cl_rtt, int timer = task->tk_msg.rpc_proc->p_timer;
task->tk_msg.rpc_proc->p_timer); task->tk_timeout = rpc_calc_rto(&clnt->cl_rtt, timer);
task->tk_timeout <<= rpc_ntimeo(&clnt->cl_rtt, timer);
task->tk_timeout <<= clnt->cl_timeout.to_retries task->tk_timeout <<= clnt->cl_timeout.to_retries
- req->rq_timeout.to_retries; - req->rq_timeout.to_retries;
if (task->tk_timeout > req->rq_timeout.to_maxval) if (task->tk_timeout > req->rq_timeout.to_maxval)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment