Commit c5dc7491 authored by David S. Miller's avatar David S. Miller Committed by Dmitry Torokhov

[TCP]: Grow socket receive buffer based upon estimated sender window.

parent 2a1ca0d0
...@@ -383,6 +383,13 @@ struct tcp_opt { ...@@ -383,6 +383,13 @@ struct tcp_opt {
__u32 time; __u32 time;
} rcv_rtt_est; } rcv_rtt_est;
/* Receiver queue space */
struct {
int space;
__u32 seq;
__u32 time;
} rcvq_space;
/* TCP Westwood structure */ /* TCP Westwood structure */
struct { struct {
__u32 bw_ns_est; /* first bandwidth estimation..not too smoothed 8) */ __u32 bw_ns_est; /* first bandwidth estimation..not too smoothed 8) */
......
...@@ -801,6 +801,8 @@ extern int tcp_rcv_established(struct sock *sk, ...@@ -801,6 +801,8 @@ extern int tcp_rcv_established(struct sock *sk,
struct tcphdr *th, struct tcphdr *th,
unsigned len); unsigned len);
extern void tcp_rcv_space_adjust(struct sock *sk);
enum tcp_ack_state_t enum tcp_ack_state_t
{ {
TCP_ACK_SCHED = 1, TCP_ACK_SCHED = 1,
......
...@@ -1482,6 +1482,9 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, ...@@ -1482,6 +1482,9 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
break; break;
} }
tp->copied_seq = seq; tp->copied_seq = seq;
tcp_rcv_space_adjust(sk);
/* Clean up data we have read: This will do ACK frames. */ /* Clean up data we have read: This will do ACK frames. */
if (copied) if (copied)
cleanup_rbuf(sk, copied); cleanup_rbuf(sk, copied);
...@@ -1742,6 +1745,8 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -1742,6 +1745,8 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
copied += used; copied += used;
len -= used; len -= used;
tcp_rcv_space_adjust(sk);
skip_copy: skip_copy:
if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) { if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
tp->urg_data = 0; tp->urg_data = 0;
......
...@@ -305,6 +305,8 @@ static void tcp_init_buffer_space(struct sock *sk) ...@@ -305,6 +305,8 @@ static void tcp_init_buffer_space(struct sock *sk)
if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK))
tcp_fixup_sndbuf(sk); tcp_fixup_sndbuf(sk);
tp->rcvq_space.space = tp->rcv_wnd;
maxwin = tcp_full_space(sk); maxwin = tcp_full_space(sk);
if (tp->window_clamp >= maxwin) { if (tp->window_clamp >= maxwin) {
...@@ -431,6 +433,53 @@ static inline void tcp_rcv_rtt_measure_ts(struct tcp_opt *tp, struct sk_buff *sk ...@@ -431,6 +433,53 @@ static inline void tcp_rcv_rtt_measure_ts(struct tcp_opt *tp, struct sk_buff *sk
tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rcv_tsecr, 0); tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rcv_tsecr, 0);
} }
/*
* This function should be called every time data is copied to user space.
* It calculates the appropriate TCP receive buffer space.
*/
void tcp_rcv_space_adjust(struct sock *sk)
{
struct tcp_opt *tp = tcp_sk(sk);
int time;
int space;
if (tp->rcvq_space.time == 0)
goto new_measure;
time = tcp_time_stamp - tp->rcvq_space.time;
if (time < (tp->rcv_rtt_est.rtt >> 3) ||
tp->rcv_rtt_est.rtt == 0)
return;
space = 2 * (tp->copied_seq - tp->rcvq_space.seq);
space = max(tp->rcvq_space.space, space);
if (tp->rcvq_space.space != space) {
int rcvmem;
tp->rcvq_space.space = space;
/* Receive space grows, normalize in order to
* take into account packet headers and sk_buff
* structure overhead.
*/
space /= tp->advmss;
if (!space)
space = 1;
rcvmem = (tp->advmss + MAX_TCP_HEADER +
16 + sizeof(struct sk_buff));
space *= rcvmem;
space = min(space, sysctl_tcp_rmem[2]);
if (space > sk->sk_rcvbuf)
sk->sk_rcvbuf = space;
}
new_measure:
tp->rcvq_space.seq = tp->copied_seq;
tp->rcvq_space.time = tcp_time_stamp;
}
/* There is something which you must keep in mind when you analyze the /* There is something which you must keep in mind when you analyze the
* behavior of the tp->ato delayed ack timeout interval. When a * behavior of the tp->ato delayed ack timeout interval. When a
* connection starts up, we want to ack as quickly as possible. The * connection starts up, we want to ack as quickly as possible. The
...@@ -3387,6 +3436,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) ...@@ -3387,6 +3436,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
tp->ucopy.len -= chunk; tp->ucopy.len -= chunk;
tp->copied_seq += chunk; tp->copied_seq += chunk;
eaten = (chunk == skb->len && !th->fin); eaten = (chunk == skb->len && !th->fin);
tcp_rcv_space_adjust(sk);
} }
local_bh_disable(); local_bh_disable();
} }
...@@ -3987,6 +4037,7 @@ static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen) ...@@ -3987,6 +4037,7 @@ static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen)
if (!err) { if (!err) {
tp->ucopy.len -= chunk; tp->ucopy.len -= chunk;
tp->copied_seq += chunk; tp->copied_seq += chunk;
tcp_rcv_space_adjust(sk);
} }
local_bh_disable(); local_bh_disable();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment