Commit 0f4389e9 authored by Arnaldo Carvalho de Melo's avatar Arnaldo Carvalho de Melo Committed by Linus Torvalds

[TCP]: Fix excessive stack usage resulting in OOPS with 4KSTACKS.

Various routines were putting a full struct tcp_sock on
the local stack.  What they really wanted was a subset
of this information when doing TCP options processing
when we only have a mini-socket (for example in SYN-RECVD
and TIME_WAIT states).

Therefore pull out the needed information into a sub-struct
and use that in the TCP options processing routines.
Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@conectiva.com.br>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f7b4ca43
...@@ -210,6 +210,27 @@ enum tcp_congestion_algo { ...@@ -210,6 +210,27 @@ enum tcp_congestion_algo {
TCP_BIC, TCP_BIC,
}; };
struct tcp_options_received {
/* PAWS/RTTM data */
long ts_recent_stamp;/* Time we stored ts_recent (for aging) */
__u32 ts_recent; /* Time stamp to echo next */
__u32 rcv_tsval; /* Time stamp value */
__u32 rcv_tsecr; /* Time stamp echo reply */
char saw_tstamp; /* Saw TIMESTAMP on last packet */
char tstamp_ok; /* TIMESTAMP seen on SYN packet */
char sack_ok; /* SACK seen on SYN packet */
char wscale_ok; /* Wscale seen on SYN packet */
__u8 snd_wscale; /* Window scaling received from sender */
__u8 rcv_wscale; /* Window scaling to send to receiver */
/* SACKs data */
__u8 dsack; /* D-SACK is scheduled */
__u8 eff_sacks; /* Size of SACK array to send with next packet */
__u8 num_sacks; /* Number of SACK blocks */
__u8 __pad;
__u16 user_mss; /* mss requested by user in ioctl */
__u16 mss_clamp; /* Maximal mss, negotiated at connection setup */
};
struct tcp_sock { struct tcp_sock {
/* inet_sock has to be the first member of tcp_sock */ /* inet_sock has to be the first member of tcp_sock */
struct inet_sock inet; struct inet_sock inet;
...@@ -262,22 +283,19 @@ struct tcp_sock { ...@@ -262,22 +283,19 @@ struct tcp_sock {
__u32 pmtu_cookie; /* Last pmtu seen by socket */ __u32 pmtu_cookie; /* Last pmtu seen by socket */
__u32 mss_cache; /* Cached effective mss, not including SACKS */ __u32 mss_cache; /* Cached effective mss, not including SACKS */
__u16 mss_cache_std; /* Like mss_cache, but without TSO */ __u16 mss_cache_std; /* Like mss_cache, but without TSO */
__u16 mss_clamp; /* Maximal mss, negotiated at connection setup */
__u16 ext_header_len; /* Network protocol overhead (IP/IPv6 options) */ __u16 ext_header_len; /* Network protocol overhead (IP/IPv6 options) */
__u16 ext2_header_len;/* Options depending on route */ __u16 ext2_header_len;/* Options depending on route */
__u8 ca_state; /* State of fast-retransmit machine */ __u8 ca_state; /* State of fast-retransmit machine */
__u8 retransmits; /* Number of unrecovered RTO timeouts. */ __u8 retransmits; /* Number of unrecovered RTO timeouts. */
__u32 frto_highmark; /* snd_nxt when RTO occurred */
__u8 reordering; /* Packet reordering metric. */ __u8 reordering; /* Packet reordering metric. */
__u8 frto_counter; /* Number of new acks after RTO */ __u8 frto_counter; /* Number of new acks after RTO */
__u32 frto_highmark; /* snd_nxt when RTO occurred */
__u8 adv_cong; /* Using Vegas, Westwood, or BIC */ __u8 adv_cong; /* Using Vegas, Westwood, or BIC */
__u8 defer_accept; /* User waits for some data after accept() */ __u8 defer_accept; /* User waits for some data after accept() */
/* one byte hole, try to pack */
/* RTT measurement */ /* RTT measurement */
__u8 backoff; /* backoff */
__u32 srtt; /* smoothed round trip time << 3 */ __u32 srtt; /* smoothed round trip time << 3 */
__u32 mdev; /* medium deviation */ __u32 mdev; /* medium deviation */
__u32 mdev_max; /* maximal mdev for the last rtt period */ __u32 mdev_max; /* maximal mdev for the last rtt period */
...@@ -288,7 +306,15 @@ struct tcp_sock { ...@@ -288,7 +306,15 @@ struct tcp_sock {
__u32 packets_out; /* Packets which are "in flight" */ __u32 packets_out; /* Packets which are "in flight" */
__u32 left_out; /* Packets which leaved network */ __u32 left_out; /* Packets which leaved network */
__u32 retrans_out; /* Retransmitted packets out */ __u32 retrans_out; /* Retransmitted packets out */
__u8 backoff; /* backoff */
/*
* Options received (usually on last packet, some only on SYN packets).
*/
__u8 nonagle; /* Disable Nagle algorithm? */
__u8 keepalive_probes; /* num of allowed keep alive probes */
__u8 probes_out; /* unanswered 0 window probes */
struct tcp_options_received rx_opt;
/* /*
* Slow start and congestion control (see also Nagle, and Karn & Partridge) * Slow start and congestion control (see also Nagle, and Karn & Partridge)
...@@ -314,40 +340,19 @@ struct tcp_sock { ...@@ -314,40 +340,19 @@ struct tcp_sock {
__u32 write_seq; /* Tail(+1) of data held in tcp send buffer */ __u32 write_seq; /* Tail(+1) of data held in tcp send buffer */
__u32 pushed_seq; /* Last pushed seq, required to talk to windows */ __u32 pushed_seq; /* Last pushed seq, required to talk to windows */
__u32 copied_seq; /* Head of yet unread data */ __u32 copied_seq; /* Head of yet unread data */
/*
* Options received (usually on last packet, some only on SYN packets).
*/
char tstamp_ok, /* TIMESTAMP seen on SYN packet */
wscale_ok, /* Wscale seen on SYN packet */
sack_ok; /* SACK seen on SYN packet */
char saw_tstamp; /* Saw TIMESTAMP on last packet */
__u8 snd_wscale; /* Window scaling received from sender */
__u8 rcv_wscale; /* Window scaling to send to receiver */
__u8 nonagle; /* Disable Nagle algorithm? */
__u8 keepalive_probes; /* num of allowed keep alive probes */
/* PAWS/RTTM data */
__u32 rcv_tsval; /* Time stamp value */
__u32 rcv_tsecr; /* Time stamp echo reply */
__u32 ts_recent; /* Time stamp to echo next */
long ts_recent_stamp;/* Time we stored ts_recent (for aging) */
/* SACKs data */ /* SACKs data */
__u16 user_mss; /* mss requested by user in ioctl */
__u8 dsack; /* D-SACK is scheduled */
__u8 eff_sacks; /* Size of SACK array to send with next packet */
struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */ struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */
struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/ struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/
__u32 window_clamp; /* Maximal window to advertise */ __u32 window_clamp; /* Maximal window to advertise */
__u32 rcv_ssthresh; /* Current window clamp */ __u32 rcv_ssthresh; /* Current window clamp */
__u8 probes_out; /* unanswered 0 window probes */
__u8 num_sacks; /* Number of SACK blocks */
__u16 advmss; /* Advertised MSS */ __u16 advmss; /* Advertised MSS */
__u8 syn_retries; /* num of allowed syn retries */ __u8 syn_retries; /* num of allowed syn retries */
__u8 ecn_flags; /* ECN status bits. */ __u8 ecn_flags; /* ECN status bits. */
__u16 prior_ssthresh; /* ssthresh saved at recovery start */ __u16 prior_ssthresh; /* ssthresh saved at recovery start */
__u16 __pad1;
__u32 lost_out; /* Lost packets */ __u32 lost_out; /* Lost packets */
__u32 sacked_out; /* SACK'd packets */ __u32 sacked_out; /* SACK'd packets */
__u32 fackets_out; /* FACK'd packets */ __u32 fackets_out; /* FACK'd packets */
......
...@@ -832,9 +832,9 @@ static __inline__ void tcp_delack_init(struct tcp_sock *tp) ...@@ -832,9 +832,9 @@ static __inline__ void tcp_delack_init(struct tcp_sock *tp)
memset(&tp->ack, 0, sizeof(tp->ack)); memset(&tp->ack, 0, sizeof(tp->ack));
} }
static inline void tcp_clear_options(struct tcp_sock *tp) static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
{ {
tp->tstamp_ok = tp->sack_ok = tp->wscale_ok = tp->snd_wscale = 0; rx_opt->tstamp_ok = rx_opt->sack_ok = rx_opt->wscale_ok = rx_opt->snd_wscale = 0;
} }
enum tcp_tw_status enum tcp_tw_status
...@@ -883,7 +883,7 @@ extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, ...@@ -883,7 +883,7 @@ extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk,
extern int tcp_listen_start(struct sock *sk); extern int tcp_listen_start(struct sock *sk);
extern void tcp_parse_options(struct sk_buff *skb, extern void tcp_parse_options(struct sk_buff *skb,
struct tcp_sock *tp, struct tcp_options_received *opt_rx,
int estab); int estab);
/* /*
...@@ -1071,7 +1071,7 @@ static __inline__ void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd) ...@@ -1071,7 +1071,7 @@ static __inline__ void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
static __inline__ void tcp_fast_path_on(struct tcp_sock *tp) static __inline__ void tcp_fast_path_on(struct tcp_sock *tp)
{ {
__tcp_fast_path_on(tp, tp->snd_wnd>>tp->snd_wscale); __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
} }
static inline void tcp_fast_path_check(struct sock *sk, struct tcp_sock *tp) static inline void tcp_fast_path_check(struct sock *sk, struct tcp_sock *tp)
...@@ -1323,7 +1323,7 @@ static inline __u32 tcp_current_ssthresh(struct tcp_sock *tp) ...@@ -1323,7 +1323,7 @@ static inline __u32 tcp_current_ssthresh(struct tcp_sock *tp)
static inline void tcp_sync_left_out(struct tcp_sock *tp) static inline void tcp_sync_left_out(struct tcp_sock *tp)
{ {
if (tp->sack_ok && if (tp->rx_opt.sack_ok &&
(tp->sacked_out >= tp->packets_out - tp->lost_out)) (tp->sacked_out >= tp->packets_out - tp->lost_out))
tp->sacked_out = tp->packets_out - tp->lost_out; tp->sacked_out = tp->packets_out - tp->lost_out;
tp->left_out = tp->sacked_out + tp->lost_out; tp->left_out = tp->sacked_out + tp->lost_out;
...@@ -1649,39 +1649,39 @@ static __inline__ void tcp_done(struct sock *sk) ...@@ -1649,39 +1649,39 @@ static __inline__ void tcp_done(struct sock *sk)
tcp_destroy_sock(sk); tcp_destroy_sock(sk);
} }
static __inline__ void tcp_sack_reset(struct tcp_sock *tp) static __inline__ void tcp_sack_reset(struct tcp_options_received *rx_opt)
{ {
tp->dsack = 0; rx_opt->dsack = 0;
tp->eff_sacks = 0; rx_opt->eff_sacks = 0;
tp->num_sacks = 0; rx_opt->num_sacks = 0;
} }
static __inline__ void tcp_build_and_update_options(__u32 *ptr, struct tcp_sock *tp, __u32 tstamp) static __inline__ void tcp_build_and_update_options(__u32 *ptr, struct tcp_sock *tp, __u32 tstamp)
{ {
if (tp->tstamp_ok) { if (tp->rx_opt.tstamp_ok) {
*ptr++ = __constant_htonl((TCPOPT_NOP << 24) | *ptr++ = __constant_htonl((TCPOPT_NOP << 24) |
(TCPOPT_NOP << 16) | (TCPOPT_NOP << 16) |
(TCPOPT_TIMESTAMP << 8) | (TCPOPT_TIMESTAMP << 8) |
TCPOLEN_TIMESTAMP); TCPOLEN_TIMESTAMP);
*ptr++ = htonl(tstamp); *ptr++ = htonl(tstamp);
*ptr++ = htonl(tp->ts_recent); *ptr++ = htonl(tp->rx_opt.ts_recent);
} }
if (tp->eff_sacks) { if (tp->rx_opt.eff_sacks) {
struct tcp_sack_block *sp = tp->dsack ? tp->duplicate_sack : tp->selective_acks; struct tcp_sack_block *sp = tp->rx_opt.dsack ? tp->duplicate_sack : tp->selective_acks;
int this_sack; int this_sack;
*ptr++ = __constant_htonl((TCPOPT_NOP << 24) | *ptr++ = __constant_htonl((TCPOPT_NOP << 24) |
(TCPOPT_NOP << 16) | (TCPOPT_NOP << 16) |
(TCPOPT_SACK << 8) | (TCPOPT_SACK << 8) |
(TCPOLEN_SACK_BASE + (TCPOLEN_SACK_BASE +
(tp->eff_sacks * TCPOLEN_SACK_PERBLOCK))); (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK)));
for(this_sack = 0; this_sack < tp->eff_sacks; this_sack++) { for(this_sack = 0; this_sack < tp->rx_opt.eff_sacks; this_sack++) {
*ptr++ = htonl(sp[this_sack].start_seq); *ptr++ = htonl(sp[this_sack].start_seq);
*ptr++ = htonl(sp[this_sack].end_seq); *ptr++ = htonl(sp[this_sack].end_seq);
} }
if (tp->dsack) { if (tp->rx_opt.dsack) {
tp->dsack = 0; tp->rx_opt.dsack = 0;
tp->eff_sacks--; tp->rx_opt.eff_sacks--;
} }
} }
} }
...@@ -1827,17 +1827,17 @@ static inline void tcp_synq_drop(struct sock *sk, struct open_request *req, ...@@ -1827,17 +1827,17 @@ static inline void tcp_synq_drop(struct sock *sk, struct open_request *req,
} }
static __inline__ void tcp_openreq_init(struct open_request *req, static __inline__ void tcp_openreq_init(struct open_request *req,
struct tcp_sock *tp, struct tcp_options_received *rx_opt,
struct sk_buff *skb) struct sk_buff *skb)
{ {
req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */ req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
req->rcv_isn = TCP_SKB_CB(skb)->seq; req->rcv_isn = TCP_SKB_CB(skb)->seq;
req->mss = tp->mss_clamp; req->mss = rx_opt->mss_clamp;
req->ts_recent = tp->saw_tstamp ? tp->rcv_tsval : 0; req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
req->tstamp_ok = tp->tstamp_ok; req->tstamp_ok = rx_opt->tstamp_ok;
req->sack_ok = tp->sack_ok; req->sack_ok = rx_opt->sack_ok;
req->snd_wscale = tp->snd_wscale; req->snd_wscale = rx_opt->snd_wscale;
req->wscale_ok = tp->wscale_ok; req->wscale_ok = rx_opt->wscale_ok;
req->acked = 0; req->acked = 0;
req->ecn_ok = 0; req->ecn_ok = 0;
req->rmt_port = skb->h.th->source; req->rmt_port = skb->h.th->source;
...@@ -1886,11 +1886,11 @@ static inline int tcp_fin_time(const struct tcp_sock *tp) ...@@ -1886,11 +1886,11 @@ static inline int tcp_fin_time(const struct tcp_sock *tp)
return fin_timeout; return fin_timeout;
} }
static inline int tcp_paws_check(const struct tcp_sock *tp, int rst) static inline int tcp_paws_check(const struct tcp_options_received *rx_opt, int rst)
{ {
if ((s32)(tp->rcv_tsval - tp->ts_recent) >= 0) if ((s32)(rx_opt->rcv_tsval - rx_opt->ts_recent) >= 0)
return 0; return 0;
if (xtime.tv_sec >= tp->ts_recent_stamp + TCP_PAWS_24DAYS) if (xtime.tv_sec >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)
return 0; return 0;
/* RST segments are not recommended to carry timestamp, /* RST segments are not recommended to carry timestamp,
...@@ -1905,7 +1905,7 @@ static inline int tcp_paws_check(const struct tcp_sock *tp, int rst) ...@@ -1905,7 +1905,7 @@ static inline int tcp_paws_check(const struct tcp_sock *tp, int rst)
However, we can relax time bounds for RST segments to MSL. However, we can relax time bounds for RST segments to MSL.
*/ */
if (rst && xtime.tv_sec >= tp->ts_recent_stamp + TCP_PAWS_MSL) if (rst && xtime.tv_sec >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
return 0; return 0;
return 1; return 1;
} }
......
...@@ -1829,8 +1829,8 @@ int tcp_disconnect(struct sock *sk, int flags) ...@@ -1829,8 +1829,8 @@ int tcp_disconnect(struct sock *sk, int flags)
tcp_clear_retrans(tp); tcp_clear_retrans(tp);
tcp_delack_init(tp); tcp_delack_init(tp);
sk->sk_send_head = NULL; sk->sk_send_head = NULL;
tp->saw_tstamp = 0; tp->rx_opt.saw_tstamp = 0;
tcp_sack_reset(tp); tcp_sack_reset(&tp->rx_opt);
__sk_dst_reset(sk); __sk_dst_reset(sk);
BUG_TRAP(!inet->num || tp->bind_hash); BUG_TRAP(!inet->num || tp->bind_hash);
...@@ -1969,7 +1969,7 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, ...@@ -1969,7 +1969,7 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
err = -EINVAL; err = -EINVAL;
break; break;
} }
tp->user_mss = val; tp->rx_opt.user_mss = val;
break; break;
case TCP_NODELAY: case TCP_NODELAY:
...@@ -2119,14 +2119,14 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) ...@@ -2119,14 +2119,14 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
info->tcpi_probes = tp->probes_out; info->tcpi_probes = tp->probes_out;
info->tcpi_backoff = tp->backoff; info->tcpi_backoff = tp->backoff;
if (tp->tstamp_ok) if (tp->rx_opt.tstamp_ok)
info->tcpi_options |= TCPI_OPT_TIMESTAMPS; info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
if (tp->sack_ok) if (tp->rx_opt.sack_ok)
info->tcpi_options |= TCPI_OPT_SACK; info->tcpi_options |= TCPI_OPT_SACK;
if (tp->wscale_ok) { if (tp->rx_opt.wscale_ok) {
info->tcpi_options |= TCPI_OPT_WSCALE; info->tcpi_options |= TCPI_OPT_WSCALE;
info->tcpi_snd_wscale = tp->snd_wscale; info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
info->tcpi_rcv_wscale = tp->rcv_wscale; info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
} }
if (tp->ecn_flags&TCP_ECN_OK) if (tp->ecn_flags&TCP_ECN_OK)
...@@ -2186,7 +2186,7 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, ...@@ -2186,7 +2186,7 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
case TCP_MAXSEG: case TCP_MAXSEG:
val = tp->mss_cache_std; val = tp->mss_cache_std;
if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
val = tp->user_mss; val = tp->rx_opt.user_mss;
break; break;
case TCP_NODELAY: case TCP_NODELAY:
val = !!(tp->nonagle&TCP_NAGLE_OFF); val = !!(tp->nonagle&TCP_NAGLE_OFF);
......
...@@ -119,9 +119,9 @@ int sysctl_tcp_bic_beta = 819; /* = 819/1024 (BICTCP_BETA_SCALE) */ ...@@ -119,9 +119,9 @@ int sysctl_tcp_bic_beta = 819; /* = 819/1024 (BICTCP_BETA_SCALE) */
#define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE) #define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE)
#define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED) #define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED)
#define IsReno(tp) ((tp)->sack_ok == 0) #define IsReno(tp) ((tp)->rx_opt.sack_ok == 0)
#define IsFack(tp) ((tp)->sack_ok & 2) #define IsFack(tp) ((tp)->rx_opt.sack_ok & 2)
#define IsDSack(tp) ((tp)->sack_ok & 4) #define IsDSack(tp) ((tp)->rx_opt.sack_ok & 4)
#define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH) #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH)
...@@ -205,7 +205,7 @@ static __inline__ int tcp_in_quickack_mode(struct tcp_sock *tp) ...@@ -205,7 +205,7 @@ static __inline__ int tcp_in_quickack_mode(struct tcp_sock *tp)
static void tcp_fixup_sndbuf(struct sock *sk) static void tcp_fixup_sndbuf(struct sock *sk)
{ {
int sndmem = tcp_sk(sk)->mss_clamp + MAX_TCP_HEADER + 16 + int sndmem = tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER + 16 +
sizeof(struct sk_buff); sizeof(struct sk_buff);
if (sk->sk_sndbuf < 3 * sndmem) if (sk->sk_sndbuf < 3 * sndmem)
...@@ -440,10 +440,10 @@ static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp) ...@@ -440,10 +440,10 @@ static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp)
static inline void tcp_rcv_rtt_measure_ts(struct tcp_sock *tp, struct sk_buff *skb) static inline void tcp_rcv_rtt_measure_ts(struct tcp_sock *tp, struct sk_buff *skb)
{ {
if (tp->rcv_tsecr && if (tp->rx_opt.rcv_tsecr &&
(TCP_SKB_CB(skb)->end_seq - (TCP_SKB_CB(skb)->end_seq -
TCP_SKB_CB(skb)->seq >= tp->ack.rcv_mss)) TCP_SKB_CB(skb)->seq >= tp->ack.rcv_mss))
tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rcv_tsecr, 0); tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rx_opt.rcv_tsecr, 0);
} }
/* /*
...@@ -833,7 +833,7 @@ static void tcp_init_metrics(struct sock *sk) ...@@ -833,7 +833,7 @@ static void tcp_init_metrics(struct sock *sk)
} }
if (dst_metric(dst, RTAX_REORDERING) && if (dst_metric(dst, RTAX_REORDERING) &&
tp->reordering != dst_metric(dst, RTAX_REORDERING)) { tp->reordering != dst_metric(dst, RTAX_REORDERING)) {
tp->sack_ok &= ~2; tp->rx_opt.sack_ok &= ~2;
tp->reordering = dst_metric(dst, RTAX_REORDERING); tp->reordering = dst_metric(dst, RTAX_REORDERING);
} }
...@@ -867,7 +867,7 @@ static void tcp_init_metrics(struct sock *sk) ...@@ -867,7 +867,7 @@ static void tcp_init_metrics(struct sock *sk)
} }
tcp_set_rto(tp); tcp_set_rto(tp);
tcp_bound_rto(tp); tcp_bound_rto(tp);
if (tp->rto < TCP_TIMEOUT_INIT && !tp->saw_tstamp) if (tp->rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp)
goto reset; goto reset;
tp->snd_cwnd = tcp_init_cwnd(tp, dst); tp->snd_cwnd = tcp_init_cwnd(tp, dst);
tp->snd_cwnd_stamp = tcp_time_stamp; tp->snd_cwnd_stamp = tcp_time_stamp;
...@@ -878,7 +878,7 @@ static void tcp_init_metrics(struct sock *sk) ...@@ -878,7 +878,7 @@ static void tcp_init_metrics(struct sock *sk)
* supported, TCP will fail to recalculate correct * supported, TCP will fail to recalculate correct
* rtt, if initial rto is too small. FORGET ALL AND RESET! * rtt, if initial rto is too small. FORGET ALL AND RESET!
*/ */
if (!tp->saw_tstamp && tp->srtt) { if (!tp->rx_opt.saw_tstamp && tp->srtt) {
tp->srtt = 0; tp->srtt = 0;
tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT; tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT;
tp->rto = TCP_TIMEOUT_INIT; tp->rto = TCP_TIMEOUT_INIT;
...@@ -901,14 +901,14 @@ static void tcp_update_reordering(struct tcp_sock *tp, int metric, int ts) ...@@ -901,14 +901,14 @@ static void tcp_update_reordering(struct tcp_sock *tp, int metric, int ts)
NET_INC_STATS_BH(LINUX_MIB_TCPSACKREORDER); NET_INC_STATS_BH(LINUX_MIB_TCPSACKREORDER);
#if FASTRETRANS_DEBUG > 1 #if FASTRETRANS_DEBUG > 1
printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n", printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n",
tp->sack_ok, tp->ca_state, tp->rx_opt.sack_ok, tp->ca_state,
tp->reordering, tp->reordering,
tp->fackets_out, tp->fackets_out,
tp->sacked_out, tp->sacked_out,
tp->undo_marker ? tp->undo_retrans : 0); tp->undo_marker ? tp->undo_retrans : 0);
#endif #endif
/* Disable FACK yet. */ /* Disable FACK yet. */
tp->sack_ok &= ~2; tp->rx_opt.sack_ok &= ~2;
} }
} }
...@@ -998,13 +998,13 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ ...@@ -998,13 +998,13 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
if (before(start_seq, ack)) { if (before(start_seq, ack)) {
dup_sack = 1; dup_sack = 1;
tp->sack_ok |= 4; tp->rx_opt.sack_ok |= 4;
NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV); NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV);
} else if (num_sacks > 1 && } else if (num_sacks > 1 &&
!after(end_seq, ntohl(sp[1].end_seq)) && !after(end_seq, ntohl(sp[1].end_seq)) &&
!before(start_seq, ntohl(sp[1].start_seq))) { !before(start_seq, ntohl(sp[1].start_seq))) {
dup_sack = 1; dup_sack = 1;
tp->sack_ok |= 4; tp->rx_opt.sack_ok |= 4;
NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV); NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV);
} }
...@@ -1629,8 +1629,8 @@ static void tcp_cwnd_down(struct tcp_sock *tp) ...@@ -1629,8 +1629,8 @@ static void tcp_cwnd_down(struct tcp_sock *tp)
static inline int tcp_packet_delayed(struct tcp_sock *tp) static inline int tcp_packet_delayed(struct tcp_sock *tp)
{ {
return !tp->retrans_stamp || return !tp->retrans_stamp ||
(tp->saw_tstamp && tp->rcv_tsecr && (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
(__s32)(tp->rcv_tsecr - tp->retrans_stamp) < 0); (__s32)(tp->rx_opt.rcv_tsecr - tp->retrans_stamp) < 0);
} }
/* Undo procedures. */ /* Undo procedures. */
...@@ -1978,7 +1978,7 @@ static void tcp_ack_saw_tstamp(struct tcp_sock *tp, int flag) ...@@ -1978,7 +1978,7 @@ static void tcp_ack_saw_tstamp(struct tcp_sock *tp, int flag)
* answer arrives rto becomes 120 seconds! If at least one of segments * answer arrives rto becomes 120 seconds! If at least one of segments
* in window is lost... Voila. --ANK (010210) * in window is lost... Voila. --ANK (010210)
*/ */
seq_rtt = tcp_time_stamp - tp->rcv_tsecr; seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr;
tcp_rtt_estimator(tp, seq_rtt); tcp_rtt_estimator(tp, seq_rtt);
tcp_set_rto(tp); tcp_set_rto(tp);
tp->backoff = 0; tp->backoff = 0;
...@@ -2009,7 +2009,7 @@ static inline void tcp_ack_update_rtt(struct tcp_sock *tp, ...@@ -2009,7 +2009,7 @@ static inline void tcp_ack_update_rtt(struct tcp_sock *tp,
int flag, s32 seq_rtt) int flag, s32 seq_rtt)
{ {
/* Note that peer MAY send zero echo. In this case it is ignored. (rfc1323) */ /* Note that peer MAY send zero echo. In this case it is ignored. (rfc1323) */
if (tp->saw_tstamp && tp->rcv_tsecr) if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
tcp_ack_saw_tstamp(tp, flag); tcp_ack_saw_tstamp(tp, flag);
else if (seq_rtt >= 0) else if (seq_rtt >= 0)
tcp_ack_no_tstamp(tp, seq_rtt, flag); tcp_ack_no_tstamp(tp, seq_rtt, flag);
...@@ -2483,7 +2483,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p) ...@@ -2483,7 +2483,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
BUG_TRAP((int)tp->sacked_out >= 0); BUG_TRAP((int)tp->sacked_out >= 0);
BUG_TRAP((int)tp->lost_out >= 0); BUG_TRAP((int)tp->lost_out >= 0);
BUG_TRAP((int)tp->retrans_out >= 0); BUG_TRAP((int)tp->retrans_out >= 0);
if (!tp->packets_out && tp->sack_ok) { if (!tp->packets_out && tp->rx_opt.sack_ok) {
if (tp->lost_out) { if (tp->lost_out) {
printk(KERN_DEBUG "Leak l=%u %d\n", printk(KERN_DEBUG "Leak l=%u %d\n",
tp->lost_out, tp->ca_state); tp->lost_out, tp->ca_state);
...@@ -2559,7 +2559,7 @@ static int tcp_ack_update_window(struct sock *sk, struct tcp_sock *tp, ...@@ -2559,7 +2559,7 @@ static int tcp_ack_update_window(struct sock *sk, struct tcp_sock *tp,
u32 nwin = ntohs(skb->h.th->window); u32 nwin = ntohs(skb->h.th->window);
if (likely(!skb->h.th->syn)) if (likely(!skb->h.th->syn))
nwin <<= tp->snd_wscale; nwin <<= tp->rx_opt.snd_wscale;
if (tcp_may_update_window(tp, ack, ack_seq, nwin)) { if (tcp_may_update_window(tp, ack, ack_seq, nwin)) {
flag |= FLAG_WIN_UPDATE; flag |= FLAG_WIN_UPDATE;
...@@ -2979,14 +2979,14 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) ...@@ -2979,14 +2979,14 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
* But, this can also be called on packets in the established flow when * But, this can also be called on packets in the established flow when
* the fast version below fails. * the fast version below fails.
*/ */
void tcp_parse_options(struct sk_buff *skb, struct tcp_sock *tp, int estab) void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, int estab)
{ {
unsigned char *ptr; unsigned char *ptr;
struct tcphdr *th = skb->h.th; struct tcphdr *th = skb->h.th;
int length=(th->doff*4)-sizeof(struct tcphdr); int length=(th->doff*4)-sizeof(struct tcphdr);
ptr = (unsigned char *)(th + 1); ptr = (unsigned char *)(th + 1);
tp->saw_tstamp = 0; opt_rx->saw_tstamp = 0;
while(length>0) { while(length>0) {
int opcode=*ptr++; int opcode=*ptr++;
...@@ -3009,41 +3009,41 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_sock *tp, int estab) ...@@ -3009,41 +3009,41 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_sock *tp, int estab)
if(opsize==TCPOLEN_MSS && th->syn && !estab) { if(opsize==TCPOLEN_MSS && th->syn && !estab) {
u16 in_mss = ntohs(*(__u16 *)ptr); u16 in_mss = ntohs(*(__u16 *)ptr);
if (in_mss) { if (in_mss) {
if (tp->user_mss && tp->user_mss < in_mss) if (opt_rx->user_mss && opt_rx->user_mss < in_mss)
in_mss = tp->user_mss; in_mss = opt_rx->user_mss;
tp->mss_clamp = in_mss; opt_rx->mss_clamp = in_mss;
} }
} }
break; break;
case TCPOPT_WINDOW: case TCPOPT_WINDOW:
if(opsize==TCPOLEN_WINDOW && th->syn && !estab) if(opsize==TCPOLEN_WINDOW && th->syn && !estab)
if (sysctl_tcp_window_scaling) { if (sysctl_tcp_window_scaling) {
tp->wscale_ok = 1; opt_rx->wscale_ok = 1;
tp->snd_wscale = *(__u8 *)ptr; opt_rx->snd_wscale = *(__u8 *)ptr;
if(tp->snd_wscale > 14) { if(opt_rx->snd_wscale > 14) {
if(net_ratelimit()) if(net_ratelimit())
printk(KERN_INFO "tcp_parse_options: Illegal window " printk(KERN_INFO "tcp_parse_options: Illegal window "
"scaling value %d >14 received.\n", "scaling value %d >14 received.\n",
tp->snd_wscale); opt_rx->snd_wscale);
tp->snd_wscale = 14; opt_rx->snd_wscale = 14;
} }
} }
break; break;
case TCPOPT_TIMESTAMP: case TCPOPT_TIMESTAMP:
if(opsize==TCPOLEN_TIMESTAMP) { if(opsize==TCPOLEN_TIMESTAMP) {
if ((estab && tp->tstamp_ok) || if ((estab && opt_rx->tstamp_ok) ||
(!estab && sysctl_tcp_timestamps)) { (!estab && sysctl_tcp_timestamps)) {
tp->saw_tstamp = 1; opt_rx->saw_tstamp = 1;
tp->rcv_tsval = ntohl(*(__u32 *)ptr); opt_rx->rcv_tsval = ntohl(*(__u32 *)ptr);
tp->rcv_tsecr = ntohl(*(__u32 *)(ptr+4)); opt_rx->rcv_tsecr = ntohl(*(__u32 *)(ptr+4));
} }
} }
break; break;
case TCPOPT_SACK_PERM: case TCPOPT_SACK_PERM:
if(opsize==TCPOLEN_SACK_PERM && th->syn && !estab) { if(opsize==TCPOLEN_SACK_PERM && th->syn && !estab) {
if (sysctl_tcp_sack) { if (sysctl_tcp_sack) {
tp->sack_ok = 1; opt_rx->sack_ok = 1;
tcp_sack_reset(tp); tcp_sack_reset(opt_rx);
} }
} }
break; break;
...@@ -3051,7 +3051,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_sock *tp, int estab) ...@@ -3051,7 +3051,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_sock *tp, int estab)
case TCPOPT_SACK: case TCPOPT_SACK:
if((opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK)) && if((opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK)) &&
!((opsize - TCPOLEN_SACK_BASE) % TCPOLEN_SACK_PERBLOCK) && !((opsize - TCPOLEN_SACK_BASE) % TCPOLEN_SACK_PERBLOCK) &&
tp->sack_ok) { opt_rx->sack_ok) {
TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th; TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th;
} }
}; };
...@@ -3068,34 +3068,34 @@ static inline int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th, ...@@ -3068,34 +3068,34 @@ static inline int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
struct tcp_sock *tp) struct tcp_sock *tp)
{ {
if (th->doff == sizeof(struct tcphdr)>>2) { if (th->doff == sizeof(struct tcphdr)>>2) {
tp->saw_tstamp = 0; tp->rx_opt.saw_tstamp = 0;
return 0; return 0;
} else if (tp->tstamp_ok && } else if (tp->rx_opt.tstamp_ok &&
th->doff == (sizeof(struct tcphdr)>>2)+(TCPOLEN_TSTAMP_ALIGNED>>2)) { th->doff == (sizeof(struct tcphdr)>>2)+(TCPOLEN_TSTAMP_ALIGNED>>2)) {
__u32 *ptr = (__u32 *)(th + 1); __u32 *ptr = (__u32 *)(th + 1);
if (*ptr == ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) if (*ptr == ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
| (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) { | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) {
tp->saw_tstamp = 1; tp->rx_opt.saw_tstamp = 1;
++ptr; ++ptr;
tp->rcv_tsval = ntohl(*ptr); tp->rx_opt.rcv_tsval = ntohl(*ptr);
++ptr; ++ptr;
tp->rcv_tsecr = ntohl(*ptr); tp->rx_opt.rcv_tsecr = ntohl(*ptr);
return 1; return 1;
} }
} }
tcp_parse_options(skb, tp, 1); tcp_parse_options(skb, &tp->rx_opt, 1);
return 1; return 1;
} }
static inline void tcp_store_ts_recent(struct tcp_sock *tp) static inline void tcp_store_ts_recent(struct tcp_sock *tp)
{ {
tp->ts_recent = tp->rcv_tsval; tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval;
tp->ts_recent_stamp = xtime.tv_sec; tp->rx_opt.ts_recent_stamp = xtime.tv_sec;
} }
static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
{ {
if (tp->saw_tstamp && !after(seq, tp->rcv_wup)) { if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) {
/* PAWS bug workaround wrt. ACK frames, the PAWS discard /* PAWS bug workaround wrt. ACK frames, the PAWS discard
* extra check below makes sure this can only happen * extra check below makes sure this can only happen
* for pure ACK frames. -DaveM * for pure ACK frames. -DaveM
...@@ -3103,8 +3103,8 @@ static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) ...@@ -3103,8 +3103,8 @@ static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
* Not only, also it occurs for expired timestamps. * Not only, also it occurs for expired timestamps.
*/ */
if((s32)(tp->rcv_tsval - tp->ts_recent) >= 0 || if((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) >= 0 ||
xtime.tv_sec >= tp->ts_recent_stamp + TCP_PAWS_24DAYS) xtime.tv_sec >= tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS)
tcp_store_ts_recent(tp); tcp_store_ts_recent(tp);
} }
} }
...@@ -3145,16 +3145,16 @@ static int tcp_disordered_ack(struct tcp_sock *tp, struct sk_buff *skb) ...@@ -3145,16 +3145,16 @@ static int tcp_disordered_ack(struct tcp_sock *tp, struct sk_buff *skb)
ack == tp->snd_una && ack == tp->snd_una &&
/* 3. ... and does not update window. */ /* 3. ... and does not update window. */
!tcp_may_update_window(tp, ack, seq, ntohs(th->window)<<tp->snd_wscale) && !tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) &&
/* 4. ... and sits in replay window. */ /* 4. ... and sits in replay window. */
(s32)(tp->ts_recent - tp->rcv_tsval) <= (tp->rto*1024)/HZ); (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (tp->rto*1024)/HZ);
} }
static inline int tcp_paws_discard(struct tcp_sock *tp, struct sk_buff *skb) static inline int tcp_paws_discard(struct tcp_sock *tp, struct sk_buff *skb)
{ {
return ((s32)(tp->ts_recent - tp->rcv_tsval) > TCP_PAWS_WINDOW && return ((s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) > TCP_PAWS_WINDOW &&
xtime.tv_sec < tp->ts_recent_stamp + TCP_PAWS_24DAYS && xtime.tv_sec < tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS &&
!tcp_disordered_ack(tp, skb)); !tcp_disordered_ack(tp, skb));
} }
...@@ -3267,8 +3267,8 @@ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th) ...@@ -3267,8 +3267,8 @@ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th)
* Probably, we should reset in this case. For now drop them. * Probably, we should reset in this case. For now drop them.
*/ */
__skb_queue_purge(&tp->out_of_order_queue); __skb_queue_purge(&tp->out_of_order_queue);
if (tp->sack_ok) if (tp->rx_opt.sack_ok)
tcp_sack_reset(tp); tcp_sack_reset(&tp->rx_opt);
sk_stream_mem_reclaim(sk); sk_stream_mem_reclaim(sk);
if (!sock_flag(sk, SOCK_DEAD)) { if (!sock_flag(sk, SOCK_DEAD)) {
...@@ -3298,22 +3298,22 @@ tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, u32 end_seq) ...@@ -3298,22 +3298,22 @@ tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, u32 end_seq)
static inline void tcp_dsack_set(struct tcp_sock *tp, u32 seq, u32 end_seq) static inline void tcp_dsack_set(struct tcp_sock *tp, u32 seq, u32 end_seq)
{ {
if (tp->sack_ok && sysctl_tcp_dsack) { if (tp->rx_opt.sack_ok && sysctl_tcp_dsack) {
if (before(seq, tp->rcv_nxt)) if (before(seq, tp->rcv_nxt))
NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOLDSENT); NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOLDSENT);
else else
NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFOSENT); NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFOSENT);
tp->dsack = 1; tp->rx_opt.dsack = 1;
tp->duplicate_sack[0].start_seq = seq; tp->duplicate_sack[0].start_seq = seq;
tp->duplicate_sack[0].end_seq = end_seq; tp->duplicate_sack[0].end_seq = end_seq;
tp->eff_sacks = min(tp->num_sacks+1, 4-tp->tstamp_ok); tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + 1, 4 - tp->rx_opt.tstamp_ok);
} }
} }
static inline void tcp_dsack_extend(struct tcp_sock *tp, u32 seq, u32 end_seq) static inline void tcp_dsack_extend(struct tcp_sock *tp, u32 seq, u32 end_seq)
{ {
if (!tp->dsack) if (!tp->rx_opt.dsack)
tcp_dsack_set(tp, seq, end_seq); tcp_dsack_set(tp, seq, end_seq);
else else
tcp_sack_extend(tp->duplicate_sack, seq, end_seq); tcp_sack_extend(tp->duplicate_sack, seq, end_seq);
...@@ -3328,7 +3328,7 @@ static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb) ...@@ -3328,7 +3328,7 @@ static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb)
NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST); NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST);
tcp_enter_quickack_mode(tp); tcp_enter_quickack_mode(tp);
if (tp->sack_ok && sysctl_tcp_dsack) { if (tp->rx_opt.sack_ok && sysctl_tcp_dsack) {
u32 end_seq = TCP_SKB_CB(skb)->end_seq; u32 end_seq = TCP_SKB_CB(skb)->end_seq;
if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))
...@@ -3352,16 +3352,16 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp) ...@@ -3352,16 +3352,16 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
/* See if the recent change to the first SACK eats into /* See if the recent change to the first SACK eats into
* or hits the sequence space of other SACK blocks, if so coalesce. * or hits the sequence space of other SACK blocks, if so coalesce.
*/ */
for (this_sack = 1; this_sack < tp->num_sacks; ) { for (this_sack = 1; this_sack < tp->rx_opt.num_sacks; ) {
if (tcp_sack_extend(sp, swalk->start_seq, swalk->end_seq)) { if (tcp_sack_extend(sp, swalk->start_seq, swalk->end_seq)) {
int i; int i;
/* Zap SWALK, by moving every further SACK up by one slot. /* Zap SWALK, by moving every further SACK up by one slot.
* Decrease num_sacks. * Decrease num_sacks.
*/ */
tp->num_sacks--; tp->rx_opt.num_sacks--;
tp->eff_sacks = min(tp->num_sacks+tp->dsack, 4-tp->tstamp_ok); tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, 4 - tp->rx_opt.tstamp_ok);
for(i=this_sack; i < tp->num_sacks; i++) for(i=this_sack; i < tp->rx_opt.num_sacks; i++)
sp[i] = sp[i+1]; sp[i] = sp[i+1];
continue; continue;
} }
...@@ -3386,7 +3386,7 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq) ...@@ -3386,7 +3386,7 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct tcp_sack_block *sp = &tp->selective_acks[0]; struct tcp_sack_block *sp = &tp->selective_acks[0];
int cur_sacks = tp->num_sacks; int cur_sacks = tp->rx_opt.num_sacks;
int this_sack; int this_sack;
if (!cur_sacks) if (!cur_sacks)
...@@ -3411,7 +3411,7 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq) ...@@ -3411,7 +3411,7 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
*/ */
if (this_sack >= 4) { if (this_sack >= 4) {
this_sack--; this_sack--;
tp->num_sacks--; tp->rx_opt.num_sacks--;
sp--; sp--;
} }
for(; this_sack > 0; this_sack--, sp--) for(; this_sack > 0; this_sack--, sp--)
...@@ -3421,8 +3421,8 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq) ...@@ -3421,8 +3421,8 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
/* Build the new head SACK, and we're done. */ /* Build the new head SACK, and we're done. */
sp->start_seq = seq; sp->start_seq = seq;
sp->end_seq = end_seq; sp->end_seq = end_seq;
tp->num_sacks++; tp->rx_opt.num_sacks++;
tp->eff_sacks = min(tp->num_sacks + tp->dsack, 4 - tp->tstamp_ok); tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, 4 - tp->rx_opt.tstamp_ok);
} }
/* RCV.NXT advances, some SACKs should be eaten. */ /* RCV.NXT advances, some SACKs should be eaten. */
...@@ -3430,13 +3430,13 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq) ...@@ -3430,13 +3430,13 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
static void tcp_sack_remove(struct tcp_sock *tp) static void tcp_sack_remove(struct tcp_sock *tp)
{ {
struct tcp_sack_block *sp = &tp->selective_acks[0]; struct tcp_sack_block *sp = &tp->selective_acks[0];
int num_sacks = tp->num_sacks; int num_sacks = tp->rx_opt.num_sacks;
int this_sack; int this_sack;
/* Empty ofo queue, hence, all the SACKs are eaten. Clear. */ /* Empty ofo queue, hence, all the SACKs are eaten. Clear. */
if (skb_queue_len(&tp->out_of_order_queue) == 0) { if (skb_queue_len(&tp->out_of_order_queue) == 0) {
tp->num_sacks = 0; tp->rx_opt.num_sacks = 0;
tp->eff_sacks = tp->dsack; tp->rx_opt.eff_sacks = tp->rx_opt.dsack;
return; return;
} }
...@@ -3457,9 +3457,9 @@ static void tcp_sack_remove(struct tcp_sock *tp) ...@@ -3457,9 +3457,9 @@ static void tcp_sack_remove(struct tcp_sock *tp)
this_sack++; this_sack++;
sp++; sp++;
} }
if (num_sacks != tp->num_sacks) { if (num_sacks != tp->rx_opt.num_sacks) {
tp->num_sacks = num_sacks; tp->rx_opt.num_sacks = num_sacks;
tp->eff_sacks = min(tp->num_sacks+tp->dsack, 4-tp->tstamp_ok); tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, 4 - tp->rx_opt.tstamp_ok);
} }
} }
...@@ -3517,10 +3517,10 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) ...@@ -3517,10 +3517,10 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
TCP_ECN_accept_cwr(tp, skb); TCP_ECN_accept_cwr(tp, skb);
if (tp->dsack) { if (tp->rx_opt.dsack) {
tp->dsack = 0; tp->rx_opt.dsack = 0;
tp->eff_sacks = min_t(unsigned int, tp->num_sacks, tp->rx_opt.eff_sacks = min_t(unsigned int, tp->rx_opt.num_sacks,
4 - tp->tstamp_ok); 4 - tp->rx_opt.tstamp_ok);
} }
/* Queue data for delivery to the user. /* Queue data for delivery to the user.
...@@ -3578,7 +3578,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) ...@@ -3578,7 +3578,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
tp->ack.pingpong = 0; tp->ack.pingpong = 0;
} }
if (tp->num_sacks) if (tp->rx_opt.num_sacks)
tcp_sack_remove(tp); tcp_sack_remove(tp);
tcp_fast_path_check(sk, tp); tcp_fast_path_check(sk, tp);
...@@ -3645,10 +3645,10 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) ...@@ -3645,10 +3645,10 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
if (!skb_peek(&tp->out_of_order_queue)) { if (!skb_peek(&tp->out_of_order_queue)) {
/* Initial out of order segment, build 1 SACK. */ /* Initial out of order segment, build 1 SACK. */
if (tp->sack_ok) { if (tp->rx_opt.sack_ok) {
tp->num_sacks = 1; tp->rx_opt.num_sacks = 1;
tp->dsack = 0; tp->rx_opt.dsack = 0;
tp->eff_sacks = 1; tp->rx_opt.eff_sacks = 1;
tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq; tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq;
tp->selective_acks[0].end_seq = tp->selective_acks[0].end_seq =
TCP_SKB_CB(skb)->end_seq; TCP_SKB_CB(skb)->end_seq;
...@@ -3662,7 +3662,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) ...@@ -3662,7 +3662,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
if (seq == TCP_SKB_CB(skb1)->end_seq) { if (seq == TCP_SKB_CB(skb1)->end_seq) {
__skb_append(skb1, skb); __skb_append(skb1, skb);
if (!tp->num_sacks || if (!tp->rx_opt.num_sacks ||
tp->selective_acks[0].end_seq != seq) tp->selective_acks[0].end_seq != seq)
goto add_sack; goto add_sack;
...@@ -3710,7 +3710,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) ...@@ -3710,7 +3710,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
} }
add_sack: add_sack:
if (tp->sack_ok) if (tp->rx_opt.sack_ok)
tcp_sack_new_ofo_skb(sk, seq, end_seq); tcp_sack_new_ofo_skb(sk, seq, end_seq);
} }
} }
...@@ -3892,8 +3892,8 @@ static int tcp_prune_queue(struct sock *sk) ...@@ -3892,8 +3892,8 @@ static int tcp_prune_queue(struct sock *sk)
* is in a sad state like this, we care only about integrity * is in a sad state like this, we care only about integrity
* of the connection not performance. * of the connection not performance.
*/ */
if (tp->sack_ok) if (tp->rx_opt.sack_ok)
tcp_sack_reset(tp); tcp_sack_reset(&tp->rx_opt);
sk_stream_mem_reclaim(sk); sk_stream_mem_reclaim(sk);
} }
...@@ -3948,7 +3948,7 @@ static void tcp_new_space(struct sock *sk) ...@@ -3948,7 +3948,7 @@ static void tcp_new_space(struct sock *sk)
!(sk->sk_userlocks & SOCK_SNDBUF_LOCK) && !(sk->sk_userlocks & SOCK_SNDBUF_LOCK) &&
!tcp_memory_pressure && !tcp_memory_pressure &&
atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) { atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) {
int sndmem = max_t(u32, tp->mss_clamp, tp->mss_cache_std) + int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache_std) +
MAX_TCP_HEADER + 16 + sizeof(struct sk_buff), MAX_TCP_HEADER + 16 + sizeof(struct sk_buff),
demanded = max_t(unsigned int, tp->snd_cwnd, demanded = max_t(unsigned int, tp->snd_cwnd,
tp->reordering + 1); tp->reordering + 1);
...@@ -4215,7 +4215,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, ...@@ -4215,7 +4215,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
* We do checksum and copy also but from device to kernel. * We do checksum and copy also but from device to kernel.
*/ */
tp->saw_tstamp = 0; tp->rx_opt.saw_tstamp = 0;
/* pred_flags is 0xS?10 << 16 + snd_wnd /* pred_flags is 0xS?10 << 16 + snd_wnd
* if header_predition is to be made * if header_predition is to be made
...@@ -4244,14 +4244,14 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, ...@@ -4244,14 +4244,14 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
| (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP))
goto slow_path; goto slow_path;
tp->saw_tstamp = 1; tp->rx_opt.saw_tstamp = 1;
++ptr; ++ptr;
tp->rcv_tsval = ntohl(*ptr); tp->rx_opt.rcv_tsval = ntohl(*ptr);
++ptr; ++ptr;
tp->rcv_tsecr = ntohl(*ptr); tp->rx_opt.rcv_tsecr = ntohl(*ptr);
/* If PAWS failed, check it more carefully in slow path */ /* If PAWS failed, check it more carefully in slow path */
if ((s32)(tp->rcv_tsval - tp->ts_recent) < 0) if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0)
goto slow_path; goto slow_path;
/* DO NOT update ts_recent here, if checksum fails /* DO NOT update ts_recent here, if checksum fails
...@@ -4377,7 +4377,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, ...@@ -4377,7 +4377,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
/* /*
* RFC1323: H1. Apply PAWS check first. * RFC1323: H1. Apply PAWS check first.
*/ */
if (tcp_fast_parse_options(skb, th, tp) && tp->saw_tstamp && if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
tcp_paws_discard(tp, skb)) { tcp_paws_discard(tp, skb)) {
if (!th->rst) { if (!th->rst) {
NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
...@@ -4449,9 +4449,9 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, ...@@ -4449,9 +4449,9 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
struct tcphdr *th, unsigned len) struct tcphdr *th, unsigned len)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
int saved_clamp = tp->mss_clamp; int saved_clamp = tp->rx_opt.mss_clamp;
tcp_parse_options(skb, tp, 0); tcp_parse_options(skb, &tp->rx_opt, 0);
if (th->ack) { if (th->ack) {
/* rfc793: /* rfc793:
...@@ -4468,8 +4468,8 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, ...@@ -4468,8 +4468,8 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
if (TCP_SKB_CB(skb)->ack_seq != tp->snd_nxt) if (TCP_SKB_CB(skb)->ack_seq != tp->snd_nxt)
goto reset_and_undo; goto reset_and_undo;
if (tp->saw_tstamp && tp->rcv_tsecr && if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
!between(tp->rcv_tsecr, tp->retrans_stamp, !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp,
tcp_time_stamp)) { tcp_time_stamp)) {
NET_INC_STATS_BH(LINUX_MIB_PAWSACTIVEREJECTED); NET_INC_STATS_BH(LINUX_MIB_PAWSACTIVEREJECTED);
goto reset_and_undo; goto reset_and_undo;
...@@ -4524,13 +4524,13 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, ...@@ -4524,13 +4524,13 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
tp->snd_wnd = ntohs(th->window); tp->snd_wnd = ntohs(th->window);
tcp_init_wl(tp, TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(skb)->seq); tcp_init_wl(tp, TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(skb)->seq);
if (!tp->wscale_ok) { if (!tp->rx_opt.wscale_ok) {
tp->snd_wscale = tp->rcv_wscale = 0; tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0;
tp->window_clamp = min(tp->window_clamp, 65535U); tp->window_clamp = min(tp->window_clamp, 65535U);
} }
if (tp->saw_tstamp) { if (tp->rx_opt.saw_tstamp) {
tp->tstamp_ok = 1; tp->rx_opt.tstamp_ok = 1;
tp->tcp_header_len = tp->tcp_header_len =
sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
...@@ -4539,8 +4539,8 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, ...@@ -4539,8 +4539,8 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
tp->tcp_header_len = sizeof(struct tcphdr); tp->tcp_header_len = sizeof(struct tcphdr);
} }
if (tp->sack_ok && sysctl_tcp_fack) if (tp->rx_opt.sack_ok && sysctl_tcp_fack)
tp->sack_ok |= 2; tp->rx_opt.sack_ok |= 2;
tcp_sync_mss(sk, tp->pmtu_cookie); tcp_sync_mss(sk, tp->pmtu_cookie);
tcp_initialize_rcv_mss(sk); tcp_initialize_rcv_mss(sk);
...@@ -4567,7 +4567,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, ...@@ -4567,7 +4567,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
if (sock_flag(sk, SOCK_KEEPOPEN)) if (sock_flag(sk, SOCK_KEEPOPEN))
tcp_reset_keepalive_timer(sk, keepalive_time_when(tp)); tcp_reset_keepalive_timer(sk, keepalive_time_when(tp));
if (!tp->snd_wscale) if (!tp->rx_opt.snd_wscale)
__tcp_fast_path_on(tp, tp->snd_wnd); __tcp_fast_path_on(tp, tp->snd_wnd);
else else
tp->pred_flags = 0; tp->pred_flags = 0;
...@@ -4614,7 +4614,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, ...@@ -4614,7 +4614,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
} }
/* PAWS check. */ /* PAWS check. */
if (tp->ts_recent_stamp && tp->saw_tstamp && tcp_paws_check(tp, 0)) if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp && tcp_paws_check(&tp->rx_opt, 0))
goto discard_and_undo; goto discard_and_undo;
if (th->syn) { if (th->syn) {
...@@ -4624,8 +4624,8 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, ...@@ -4624,8 +4624,8 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
*/ */
tcp_set_state(sk, TCP_SYN_RECV); tcp_set_state(sk, TCP_SYN_RECV);
if (tp->saw_tstamp) { if (tp->rx_opt.saw_tstamp) {
tp->tstamp_ok = 1; tp->rx_opt.tstamp_ok = 1;
tcp_store_ts_recent(tp); tcp_store_ts_recent(tp);
tp->tcp_header_len = tp->tcp_header_len =
sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
...@@ -4672,13 +4672,13 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, ...@@ -4672,13 +4672,13 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
*/ */
discard_and_undo: discard_and_undo:
tcp_clear_options(tp); tcp_clear_options(&tp->rx_opt);
tp->mss_clamp = saved_clamp; tp->rx_opt.mss_clamp = saved_clamp;
goto discard; goto discard;
reset_and_undo: reset_and_undo:
tcp_clear_options(tp); tcp_clear_options(&tp->rx_opt);
tp->mss_clamp = saved_clamp; tp->rx_opt.mss_clamp = saved_clamp;
return 1; return 1;
} }
...@@ -4696,7 +4696,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, ...@@ -4696,7 +4696,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
int queued = 0; int queued = 0;
tp->saw_tstamp = 0; tp->rx_opt.saw_tstamp = 0;
switch (sk->sk_state) { switch (sk->sk_state) {
case TCP_CLOSE: case TCP_CLOSE:
...@@ -4751,7 +4751,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, ...@@ -4751,7 +4751,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
return 0; return 0;
} }
if (tcp_fast_parse_options(skb, th, tp) && tp->saw_tstamp && if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
tcp_paws_discard(tp, skb)) { tcp_paws_discard(tp, skb)) {
if (!th->rst) { if (!th->rst) {
NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
...@@ -4811,7 +4811,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, ...@@ -4811,7 +4811,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
tp->snd_una = TCP_SKB_CB(skb)->ack_seq; tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
tp->snd_wnd = ntohs(th->window) << tp->snd_wnd = ntohs(th->window) <<
tp->snd_wscale; tp->rx_opt.snd_wscale;
tcp_init_wl(tp, TCP_SKB_CB(skb)->ack_seq, tcp_init_wl(tp, TCP_SKB_CB(skb)->ack_seq,
TCP_SKB_CB(skb)->seq); TCP_SKB_CB(skb)->seq);
...@@ -4819,11 +4819,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, ...@@ -4819,11 +4819,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
* and does not calculate rtt. * and does not calculate rtt.
* Fix it at least with timestamps. * Fix it at least with timestamps.
*/ */
if (tp->saw_tstamp && tp->rcv_tsecr && if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
!tp->srtt) !tp->srtt)
tcp_ack_saw_tstamp(tp, 0); tcp_ack_saw_tstamp(tp, 0);
if (tp->tstamp_ok) if (tp->rx_opt.tstamp_ok)
tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
/* Make sure socket is routed, for /* Make sure socket is routed, for
......
...@@ -591,8 +591,8 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport, ...@@ -591,8 +591,8 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
if ((tp->write_seq = if ((tp->write_seq =
tw->tw_snd_nxt + 65535 + 2) == 0) tw->tw_snd_nxt + 65535 + 2) == 0)
tp->write_seq = 1; tp->write_seq = 1;
tp->ts_recent = tw->tw_ts_recent; tp->rx_opt.ts_recent = tw->tw_ts_recent;
tp->ts_recent_stamp = tw->tw_ts_recent_stamp; tp->rx_opt.ts_recent_stamp = tw->tw_ts_recent_stamp;
sock_hold(sk2); sock_hold(sk2);
goto unique; goto unique;
} else } else
...@@ -783,25 +783,25 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) ...@@ -783,25 +783,25 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
inet->saddr = rt->rt_src; inet->saddr = rt->rt_src;
inet->rcv_saddr = inet->saddr; inet->rcv_saddr = inet->saddr;
if (tp->ts_recent_stamp && inet->daddr != daddr) { if (tp->rx_opt.ts_recent_stamp && inet->daddr != daddr) {
/* Reset inherited state */ /* Reset inherited state */
tp->ts_recent = 0; tp->rx_opt.ts_recent = 0;
tp->ts_recent_stamp = 0; tp->rx_opt.ts_recent_stamp = 0;
tp->write_seq = 0; tp->write_seq = 0;
} }
if (sysctl_tcp_tw_recycle && if (sysctl_tcp_tw_recycle &&
!tp->ts_recent_stamp && rt->rt_dst == daddr) { !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) {
struct inet_peer *peer = rt_get_peer(rt); struct inet_peer *peer = rt_get_peer(rt);
/* VJ's idea. We save last timestamp seen from /* VJ's idea. We save last timestamp seen from
* the destination in peer table, when entering state TIME-WAIT * the destination in peer table, when entering state TIME-WAIT
* and initialize ts_recent from it, when trying new connection. * and initialize rx_opt.ts_recent from it, when trying new connection.
*/ */
if (peer && peer->tcp_ts_stamp + TCP_PAWS_MSL >= xtime.tv_sec) { if (peer && peer->tcp_ts_stamp + TCP_PAWS_MSL >= xtime.tv_sec) {
tp->ts_recent_stamp = peer->tcp_ts_stamp; tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
tp->ts_recent = peer->tcp_ts; tp->rx_opt.ts_recent = peer->tcp_ts;
} }
} }
...@@ -812,7 +812,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) ...@@ -812,7 +812,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
if (inet->opt) if (inet->opt)
tp->ext_header_len = inet->opt->optlen; tp->ext_header_len = inet->opt->optlen;
tp->mss_clamp = 536; tp->rx_opt.mss_clamp = 536;
/* Socket identity is still unknown (sport may be zero). /* Socket identity is still unknown (sport may be zero).
* However we set state to SYN-SENT and not releasing socket * However we set state to SYN-SENT and not releasing socket
...@@ -1393,7 +1393,7 @@ struct or_calltable or_ipv4 = { ...@@ -1393,7 +1393,7 @@ struct or_calltable or_ipv4 = {
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
{ {
struct tcp_sock tp; struct tcp_options_received tmp_opt;
struct open_request *req; struct open_request *req;
__u32 saddr = skb->nh.iph->saddr; __u32 saddr = skb->nh.iph->saddr;
__u32 daddr = skb->nh.iph->daddr; __u32 daddr = skb->nh.iph->daddr;
...@@ -1435,29 +1435,29 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) ...@@ -1435,29 +1435,29 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
if (!req) if (!req)
goto drop; goto drop;
tcp_clear_options(&tp); tcp_clear_options(&tmp_opt);
tp.mss_clamp = 536; tmp_opt.mss_clamp = 536;
tp.user_mss = tcp_sk(sk)->user_mss; tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss;
tcp_parse_options(skb, &tp, 0); tcp_parse_options(skb, &tmp_opt, 0);
if (want_cookie) { if (want_cookie) {
tcp_clear_options(&tp); tcp_clear_options(&tmp_opt);
tp.saw_tstamp = 0; tmp_opt.saw_tstamp = 0;
} }
if (tp.saw_tstamp && !tp.rcv_tsval) { if (tmp_opt.saw_tstamp && !tmp_opt.rcv_tsval) {
/* Some OSes (unknown ones, but I see them on web server, which /* Some OSes (unknown ones, but I see them on web server, which
* contains information interesting only for windows' * contains information interesting only for windows'
* users) do not send their stamp in SYN. It is easy case. * users) do not send their stamp in SYN. It is easy case.
* We simply do not advertise TS support. * We simply do not advertise TS support.
*/ */
tp.saw_tstamp = 0; tmp_opt.saw_tstamp = 0;
tp.tstamp_ok = 0; tmp_opt.tstamp_ok = 0;
} }
tp.tstamp_ok = tp.saw_tstamp; tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
tcp_openreq_init(req, &tp, skb); tcp_openreq_init(req, &tmp_opt, skb);
req->af.v4_req.loc_addr = daddr; req->af.v4_req.loc_addr = daddr;
req->af.v4_req.rmt_addr = saddr; req->af.v4_req.rmt_addr = saddr;
...@@ -1483,7 +1483,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) ...@@ -1483,7 +1483,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
* timewait bucket, so that all the necessary checks * timewait bucket, so that all the necessary checks
* are made in the function processing timewait state. * are made in the function processing timewait state.
*/ */
if (tp.saw_tstamp && if (tmp_opt.saw_tstamp &&
sysctl_tcp_tw_recycle && sysctl_tcp_tw_recycle &&
(dst = tcp_v4_route_req(sk, req)) != NULL && (dst = tcp_v4_route_req(sk, req)) != NULL &&
(peer = rt_get_peer((struct rtable *)dst)) != NULL && (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
...@@ -1987,11 +1987,11 @@ int tcp_v4_remember_stamp(struct sock *sk) ...@@ -1987,11 +1987,11 @@ int tcp_v4_remember_stamp(struct sock *sk)
} }
if (peer) { if (peer) {
if ((s32)(peer->tcp_ts - tp->ts_recent) <= 0 || if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
(peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec && (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
peer->tcp_ts_stamp <= tp->ts_recent_stamp)) { peer->tcp_ts_stamp <= tp->rx_opt.ts_recent_stamp)) {
peer->tcp_ts_stamp = tp->ts_recent_stamp; peer->tcp_ts_stamp = tp->rx_opt.ts_recent_stamp;
peer->tcp_ts = tp->ts_recent; peer->tcp_ts = tp->rx_opt.ts_recent;
} }
if (release_it) if (release_it)
inet_putpeer(peer); inet_putpeer(peer);
......
...@@ -125,17 +125,17 @@ enum tcp_tw_status ...@@ -125,17 +125,17 @@ enum tcp_tw_status
tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb, tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb,
struct tcphdr *th, unsigned len) struct tcphdr *th, unsigned len)
{ {
struct tcp_sock tp; struct tcp_options_received tmp_opt;
int paws_reject = 0; int paws_reject = 0;
tp.saw_tstamp = 0; tmp_opt.saw_tstamp = 0;
if (th->doff > (sizeof(struct tcphdr) >> 2) && tw->tw_ts_recent_stamp) { if (th->doff > (sizeof(struct tcphdr) >> 2) && tw->tw_ts_recent_stamp) {
tcp_parse_options(skb, &tp, 0); tcp_parse_options(skb, &tmp_opt, 0);
if (tp.saw_tstamp) { if (tmp_opt.saw_tstamp) {
tp.ts_recent = tw->tw_ts_recent; tmp_opt.ts_recent = tw->tw_ts_recent;
tp.ts_recent_stamp = tw->tw_ts_recent_stamp; tmp_opt.ts_recent_stamp = tw->tw_ts_recent_stamp;
paws_reject = tcp_paws_check(&tp, th->rst); paws_reject = tcp_paws_check(&tmp_opt, th->rst);
} }
} }
...@@ -176,9 +176,9 @@ tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb, ...@@ -176,9 +176,9 @@ tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb,
/* FIN arrived, enter true time-wait state. */ /* FIN arrived, enter true time-wait state. */
tw->tw_substate = TCP_TIME_WAIT; tw->tw_substate = TCP_TIME_WAIT;
tw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq; tw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
if (tp.saw_tstamp) { if (tmp_opt.saw_tstamp) {
tw->tw_ts_recent_stamp = xtime.tv_sec; tw->tw_ts_recent_stamp = xtime.tv_sec;
tw->tw_ts_recent = tp.rcv_tsval; tw->tw_ts_recent = tmp_opt.rcv_tsval;
} }
/* I am shamed, but failed to make it more elegant. /* I am shamed, but failed to make it more elegant.
...@@ -231,8 +231,8 @@ tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb, ...@@ -231,8 +231,8 @@ tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb,
} }
tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN); tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
if (tp.saw_tstamp) { if (tmp_opt.saw_tstamp) {
tw->tw_ts_recent = tp.rcv_tsval; tw->tw_ts_recent = tmp_opt.rcv_tsval;
tw->tw_ts_recent_stamp = xtime.tv_sec; tw->tw_ts_recent_stamp = xtime.tv_sec;
} }
...@@ -259,7 +259,7 @@ tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb, ...@@ -259,7 +259,7 @@ tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb,
if (th->syn && !th->rst && !th->ack && !paws_reject && if (th->syn && !th->rst && !th->ack && !paws_reject &&
(after(TCP_SKB_CB(skb)->seq, tw->tw_rcv_nxt) || (after(TCP_SKB_CB(skb)->seq, tw->tw_rcv_nxt) ||
(tp.saw_tstamp && (s32)(tw->tw_ts_recent - tp.rcv_tsval) < 0))) { (tmp_opt.saw_tstamp && (s32)(tw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
u32 isn = tw->tw_snd_nxt + 65535 + 2; u32 isn = tw->tw_snd_nxt + 65535 + 2;
if (isn == 0) if (isn == 0)
isn++; isn++;
...@@ -332,7 +332,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) ...@@ -332,7 +332,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
int recycle_ok = 0; int recycle_ok = 0;
if (sysctl_tcp_tw_recycle && tp->ts_recent_stamp) if (sysctl_tcp_tw_recycle && tp->rx_opt.ts_recent_stamp)
recycle_ok = tp->af_specific->remember_stamp(sk); recycle_ok = tp->af_specific->remember_stamp(sk);
if (tcp_tw_count < sysctl_tcp_max_tw_buckets) if (tcp_tw_count < sysctl_tcp_max_tw_buckets)
...@@ -353,15 +353,15 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) ...@@ -353,15 +353,15 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
tw->tw_dport = inet->dport; tw->tw_dport = inet->dport;
tw->tw_family = sk->sk_family; tw->tw_family = sk->sk_family;
tw->tw_reuse = sk->sk_reuse; tw->tw_reuse = sk->sk_reuse;
tw->tw_rcv_wscale = tp->rcv_wscale; tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
atomic_set(&tw->tw_refcnt, 1); atomic_set(&tw->tw_refcnt, 1);
tw->tw_hashent = sk->sk_hashent; tw->tw_hashent = sk->sk_hashent;
tw->tw_rcv_nxt = tp->rcv_nxt; tw->tw_rcv_nxt = tp->rcv_nxt;
tw->tw_snd_nxt = tp->snd_nxt; tw->tw_snd_nxt = tp->snd_nxt;
tw->tw_rcv_wnd = tcp_receive_window(tp); tw->tw_rcv_wnd = tcp_receive_window(tp);
tw->tw_ts_recent = tp->ts_recent; tw->tw_ts_recent = tp->rx_opt.ts_recent;
tw->tw_ts_recent_stamp = tp->ts_recent_stamp; tw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
tw_dead_node_init(tw); tw_dead_node_init(tw);
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
...@@ -780,13 +780,13 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, ...@@ -780,13 +780,13 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
newtp->pushed_seq = newtp->write_seq; newtp->pushed_seq = newtp->write_seq;
newtp->copied_seq = req->rcv_isn + 1; newtp->copied_seq = req->rcv_isn + 1;
newtp->saw_tstamp = 0; newtp->rx_opt.saw_tstamp = 0;
newtp->dsack = 0; newtp->rx_opt.dsack = 0;
newtp->eff_sacks = 0; newtp->rx_opt.eff_sacks = 0;
newtp->probes_out = 0; newtp->probes_out = 0;
newtp->num_sacks = 0; newtp->rx_opt.num_sacks = 0;
newtp->urg_data = 0; newtp->urg_data = 0;
newtp->listen_opt = NULL; newtp->listen_opt = NULL;
newtp->accept_queue = newtp->accept_queue_tail = NULL; newtp->accept_queue = newtp->accept_queue_tail = NULL;
...@@ -809,36 +809,36 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, ...@@ -809,36 +809,36 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
newsk->sk_sleep = NULL; newsk->sk_sleep = NULL;
newsk->sk_owner = NULL; newsk->sk_owner = NULL;
newtp->tstamp_ok = req->tstamp_ok; newtp->rx_opt.tstamp_ok = req->tstamp_ok;
if((newtp->sack_ok = req->sack_ok) != 0) { if((newtp->rx_opt.sack_ok = req->sack_ok) != 0) {
if (sysctl_tcp_fack) if (sysctl_tcp_fack)
newtp->sack_ok |= 2; newtp->rx_opt.sack_ok |= 2;
} }
newtp->window_clamp = req->window_clamp; newtp->window_clamp = req->window_clamp;
newtp->rcv_ssthresh = req->rcv_wnd; newtp->rcv_ssthresh = req->rcv_wnd;
newtp->rcv_wnd = req->rcv_wnd; newtp->rcv_wnd = req->rcv_wnd;
newtp->wscale_ok = req->wscale_ok; newtp->rx_opt.wscale_ok = req->wscale_ok;
if (newtp->wscale_ok) { if (newtp->rx_opt.wscale_ok) {
newtp->snd_wscale = req->snd_wscale; newtp->rx_opt.snd_wscale = req->snd_wscale;
newtp->rcv_wscale = req->rcv_wscale; newtp->rx_opt.rcv_wscale = req->rcv_wscale;
} else { } else {
newtp->snd_wscale = newtp->rcv_wscale = 0; newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
newtp->window_clamp = min(newtp->window_clamp, 65535U); newtp->window_clamp = min(newtp->window_clamp, 65535U);
} }
newtp->snd_wnd = ntohs(skb->h.th->window) << newtp->snd_wscale; newtp->snd_wnd = ntohs(skb->h.th->window) << newtp->rx_opt.snd_wscale;
newtp->max_window = newtp->snd_wnd; newtp->max_window = newtp->snd_wnd;
if (newtp->tstamp_ok) { if (newtp->rx_opt.tstamp_ok) {
newtp->ts_recent = req->ts_recent; newtp->rx_opt.ts_recent = req->ts_recent;
newtp->ts_recent_stamp = xtime.tv_sec; newtp->rx_opt.ts_recent_stamp = xtime.tv_sec;
newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
} else { } else {
newtp->ts_recent_stamp = 0; newtp->rx_opt.ts_recent_stamp = 0;
newtp->tcp_header_len = sizeof(struct tcphdr); newtp->tcp_header_len = sizeof(struct tcphdr);
} }
if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len) if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len)
newtp->ack.last_seg_size = skb->len-newtp->tcp_header_len; newtp->ack.last_seg_size = skb->len-newtp->tcp_header_len;
newtp->mss_clamp = req->mss; newtp->rx_opt.mss_clamp = req->mss;
TCP_ECN_openreq_child(newtp, req); TCP_ECN_openreq_child(newtp, req);
if (newtp->ecn_flags&TCP_ECN_OK) if (newtp->ecn_flags&TCP_ECN_OK)
newsk->sk_no_largesend = 1; newsk->sk_no_largesend = 1;
...@@ -863,21 +863,21 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, ...@@ -863,21 +863,21 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
u32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); u32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
int paws_reject = 0; int paws_reject = 0;
struct tcp_sock ttp; struct tcp_options_received tmp_opt;
struct sock *child; struct sock *child;
ttp.saw_tstamp = 0; tmp_opt.saw_tstamp = 0;
if (th->doff > (sizeof(struct tcphdr)>>2)) { if (th->doff > (sizeof(struct tcphdr)>>2)) {
tcp_parse_options(skb, &ttp, 0); tcp_parse_options(skb, &tmp_opt, 0);
if (ttp.saw_tstamp) { if (tmp_opt.saw_tstamp) {
ttp.ts_recent = req->ts_recent; tmp_opt.ts_recent = req->ts_recent;
/* We do not store true stamp, but it is not required, /* We do not store true stamp, but it is not required,
* it can be estimated (approximately) * it can be estimated (approximately)
* from another data. * from another data.
*/ */
ttp.ts_recent_stamp = xtime.tv_sec - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans); tmp_opt.ts_recent_stamp = xtime.tv_sec - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans);
paws_reject = tcp_paws_check(&ttp, th->rst); paws_reject = tcp_paws_check(&tmp_opt, th->rst);
} }
} }
...@@ -982,8 +982,8 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, ...@@ -982,8 +982,8 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
/* In sequence, PAWS is OK. */ /* In sequence, PAWS is OK. */
if (ttp.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, req->rcv_isn+1)) if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, req->rcv_isn+1))
req->ts_recent = ttp.rcv_tsval; req->ts_recent = tmp_opt.rcv_tsval;
if (TCP_SKB_CB(skb)->seq == req->rcv_isn) { if (TCP_SKB_CB(skb)->seq == req->rcv_isn) {
/* Truncate SYN, it is out of window starting /* Truncate SYN, it is out of window starting
...@@ -1026,13 +1026,13 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, ...@@ -1026,13 +1026,13 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
tcp_acceptq_queue(sk, req, child); tcp_acceptq_queue(sk, req, child);
return child; return child;
listen_overflow: listen_overflow:
if (!sysctl_tcp_abort_on_overflow) { if (!sysctl_tcp_abort_on_overflow) {
req->acked = 1; req->acked = 1;
return NULL; return NULL;
} }
embryonic_reset: embryonic_reset:
NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS); NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS);
if (!(flg & TCP_FLAG_RST)) if (!(flg & TCP_FLAG_RST))
req->class->send_reset(skb); req->class->send_reset(skb);
......
...@@ -236,13 +236,13 @@ static __inline__ u16 tcp_select_window(struct sock *sk) ...@@ -236,13 +236,13 @@ static __inline__ u16 tcp_select_window(struct sock *sk)
/* Make sure we do not exceed the maximum possible /* Make sure we do not exceed the maximum possible
* scaled window. * scaled window.
*/ */
if (!tp->rcv_wscale) if (!tp->rx_opt.rcv_wscale)
new_win = min(new_win, MAX_TCP_WINDOW); new_win = min(new_win, MAX_TCP_WINDOW);
else else
new_win = min(new_win, (65535U << tp->rcv_wscale)); new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
/* RFC1323 scaling applied */ /* RFC1323 scaling applied */
new_win >>= tp->rcv_wscale; new_win >>= tp->rx_opt.rcv_wscale;
/* If we advertise zero window, disable fast path. */ /* If we advertise zero window, disable fast path. */
if (new_win == 0) if (new_win == 0)
...@@ -296,12 +296,12 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb) ...@@ -296,12 +296,12 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb)
if(!(sysctl_flags & SYSCTL_FLAG_TSTAMPS)) if(!(sysctl_flags & SYSCTL_FLAG_TSTAMPS))
tcp_header_size += TCPOLEN_SACKPERM_ALIGNED; tcp_header_size += TCPOLEN_SACKPERM_ALIGNED;
} }
} else if (tp->eff_sacks) { } else if (tp->rx_opt.eff_sacks) {
/* A SACK is 2 pad bytes, a 2 byte header, plus /* A SACK is 2 pad bytes, a 2 byte header, plus
* 2 32-bit sequence numbers for each SACK block. * 2 32-bit sequence numbers for each SACK block.
*/ */
tcp_header_size += (TCPOLEN_SACK_BASE_ALIGNED + tcp_header_size += (TCPOLEN_SACK_BASE_ALIGNED +
(tp->eff_sacks * TCPOLEN_SACK_PERBLOCK)); (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK));
} }
/* /*
...@@ -349,9 +349,9 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb) ...@@ -349,9 +349,9 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb)
(sysctl_flags & SYSCTL_FLAG_TSTAMPS), (sysctl_flags & SYSCTL_FLAG_TSTAMPS),
(sysctl_flags & SYSCTL_FLAG_SACK), (sysctl_flags & SYSCTL_FLAG_SACK),
(sysctl_flags & SYSCTL_FLAG_WSCALE), (sysctl_flags & SYSCTL_FLAG_WSCALE),
tp->rcv_wscale, tp->rx_opt.rcv_wscale,
tcb->when, tcb->when,
tp->ts_recent); tp->rx_opt.ts_recent);
} else { } else {
tcp_build_and_update_options((__u32 *)(th + 1), tcp_build_and_update_options((__u32 *)(th + 1),
tp, tcb->when); tp, tcb->when);
...@@ -607,10 +607,10 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) ...@@ -607,10 +607,10 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
/* This function synchronize snd mss to current pmtu/exthdr set. /* This function synchronize snd mss to current pmtu/exthdr set.
tp->user_mss is mss set by user by TCP_MAXSEG. It does NOT counts tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
for TCP options, but includes only bare TCP header. for TCP options, but includes only bare TCP header.
tp->mss_clamp is mss negotiated at connection setup. tp->rx_opt.mss_clamp is mss negotiated at connection setup.
It is minumum of user_mss and mss received with SYN. It is minumum of user_mss and mss received with SYN.
It also does not include TCP options. It also does not include TCP options.
...@@ -619,7 +619,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) ...@@ -619,7 +619,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
tp->mss_cache is current effective sending mss, including tp->mss_cache is current effective sending mss, including
all tcp options except for SACKs. It is evaluated, all tcp options except for SACKs. It is evaluated,
taking into account current pmtu, but never exceeds taking into account current pmtu, but never exceeds
tp->mss_clamp. tp->rx_opt.mss_clamp.
NOTE1. rfc1122 clearly states that advertised MSS NOTE1. rfc1122 clearly states that advertised MSS
DOES NOT include either tcp or ip options. DOES NOT include either tcp or ip options.
...@@ -643,8 +643,8 @@ unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) ...@@ -643,8 +643,8 @@ unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
mss_now = pmtu - tp->af_specific->net_header_len - sizeof(struct tcphdr); mss_now = pmtu - tp->af_specific->net_header_len - sizeof(struct tcphdr);
/* Clamp it (mss_clamp does not include tcp options) */ /* Clamp it (mss_clamp does not include tcp options) */
if (mss_now > tp->mss_clamp) if (mss_now > tp->rx_opt.mss_clamp)
mss_now = tp->mss_clamp; mss_now = tp->rx_opt.mss_clamp;
/* Now subtract optional transport overhead */ /* Now subtract optional transport overhead */
mss_now -= tp->ext_header_len + tp->ext2_header_len; mss_now -= tp->ext_header_len + tp->ext2_header_len;
...@@ -723,9 +723,9 @@ unsigned int tcp_current_mss(struct sock *sk, int large) ...@@ -723,9 +723,9 @@ unsigned int tcp_current_mss(struct sock *sk, int large)
mss_now = tp->mss_cache; mss_now = tp->mss_cache;
} }
if (tp->eff_sacks) if (tp->rx_opt.eff_sacks)
mss_now -= (TCPOLEN_SACK_BASE_ALIGNED + mss_now -= (TCPOLEN_SACK_BASE_ALIGNED +
(tp->eff_sacks * TCPOLEN_SACK_PERBLOCK)); (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK));
return mss_now; return mss_now;
} }
...@@ -875,16 +875,16 @@ u32 __tcp_select_window(struct sock *sk) ...@@ -875,16 +875,16 @@ u32 __tcp_select_window(struct sock *sk)
* scaled window will not line up with the MSS boundary anyway. * scaled window will not line up with the MSS boundary anyway.
*/ */
window = tp->rcv_wnd; window = tp->rcv_wnd;
if (tp->rcv_wscale) { if (tp->rx_opt.rcv_wscale) {
window = free_space; window = free_space;
/* Advertise enough space so that it won't get scaled away. /* Advertise enough space so that it won't get scaled away.
* Import case: prevent zero window announcement if * Import case: prevent zero window announcement if
* 1<<rcv_wscale > mss. * 1<<rcv_wscale > mss.
*/ */
if (((window >> tp->rcv_wscale) << tp->rcv_wscale) != window) if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window)
window = (((window >> tp->rcv_wscale) + 1) window = (((window >> tp->rx_opt.rcv_wscale) + 1)
<< tp->rcv_wscale); << tp->rx_opt.rcv_wscale);
} else { } else {
/* Get the largest window that is a nice multiple of mss. /* Get the largest window that is a nice multiple of mss.
* Window clamp already applied above. * Window clamp already applied above.
...@@ -962,7 +962,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m ...@@ -962,7 +962,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m
tp->left_out -= tcp_skb_pcount(next_skb); tp->left_out -= tcp_skb_pcount(next_skb);
} }
/* Reno case is special. Sigh... */ /* Reno case is special. Sigh... */
if (!tp->sack_ok && tp->sacked_out) { if (!tp->rx_opt.sack_ok && tp->sacked_out) {
tcp_dec_pcount_approx(&tp->sacked_out, next_skb); tcp_dec_pcount_approx(&tp->sacked_out, next_skb);
tp->left_out -= tcp_skb_pcount(next_skb); tp->left_out -= tcp_skb_pcount(next_skb);
} }
...@@ -1200,7 +1200,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) ...@@ -1200,7 +1200,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
return; return;
/* No forward retransmissions in Reno are possible. */ /* No forward retransmissions in Reno are possible. */
if (!tp->sack_ok) if (!tp->rx_opt.sack_ok)
return; return;
/* Yeah, we have to make difficult choice between forward transmission /* Yeah, we have to make difficult choice between forward transmission
...@@ -1439,8 +1439,8 @@ static inline void tcp_connect_init(struct sock *sk) ...@@ -1439,8 +1439,8 @@ static inline void tcp_connect_init(struct sock *sk)
(sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0); (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
/* If user gave his TCP_MAXSEG, record it to clamp */ /* If user gave his TCP_MAXSEG, record it to clamp */
if (tp->user_mss) if (tp->rx_opt.user_mss)
tp->mss_clamp = tp->user_mss; tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
tp->max_window = 0; tp->max_window = 0;
tcp_sync_mss(sk, dst_pmtu(dst)); tcp_sync_mss(sk, dst_pmtu(dst));
...@@ -1451,11 +1451,11 @@ static inline void tcp_connect_init(struct sock *sk) ...@@ -1451,11 +1451,11 @@ static inline void tcp_connect_init(struct sock *sk)
tcp_ca_init(tp); tcp_ca_init(tp);
tcp_select_initial_window(tcp_full_space(sk), tcp_select_initial_window(tcp_full_space(sk),
tp->advmss - (tp->ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
&tp->rcv_wnd, &tp->rcv_wnd,
&tp->window_clamp, &tp->window_clamp,
sysctl_tcp_window_scaling, sysctl_tcp_window_scaling,
&tp->rcv_wscale); &tp->rx_opt.rcv_wscale);
tp->rcv_ssthresh = tp->rcv_wnd; tp->rcv_ssthresh = tp->rcv_wnd;
......
...@@ -353,7 +353,7 @@ static void tcp_retransmit_timer(struct sock *sk) ...@@ -353,7 +353,7 @@ static void tcp_retransmit_timer(struct sock *sk)
if (tp->retransmits == 0) { if (tp->retransmits == 0) {
if (tp->ca_state == TCP_CA_Disorder || tp->ca_state == TCP_CA_Recovery) { if (tp->ca_state == TCP_CA_Disorder || tp->ca_state == TCP_CA_Recovery) {
if (tp->sack_ok) { if (tp->rx_opt.sack_ok) {
if (tp->ca_state == TCP_CA_Recovery) if (tp->ca_state == TCP_CA_Recovery)
NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERYFAIL); NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERYFAIL);
else else
......
...@@ -473,8 +473,8 @@ static int tcp_v6_check_established(struct sock *sk) ...@@ -473,8 +473,8 @@ static int tcp_v6_check_established(struct sock *sk)
tp->write_seq = tw->tw_snd_nxt + 65535 + 2; tp->write_seq = tw->tw_snd_nxt + 65535 + 2;
if (!tp->write_seq) if (!tp->write_seq)
tp->write_seq = 1; tp->write_seq = 1;
tp->ts_recent = tw->tw_ts_recent; tp->rx_opt.ts_recent = tw->tw_ts_recent;
tp->ts_recent_stamp = tw->tw_ts_recent_stamp; tp->rx_opt.ts_recent_stamp = tw->tw_ts_recent_stamp;
sock_hold(sk2); sock_hold(sk2);
goto unique; goto unique;
} else } else
...@@ -609,10 +609,10 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, ...@@ -609,10 +609,10 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
return -EINVAL; return -EINVAL;
} }
if (tp->ts_recent_stamp && if (tp->rx_opt.ts_recent_stamp &&
!ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) { !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
tp->ts_recent = 0; tp->rx_opt.ts_recent = 0;
tp->ts_recent_stamp = 0; tp->rx_opt.ts_recent_stamp = 0;
tp->write_seq = 0; tp->write_seq = 0;
} }
...@@ -703,7 +703,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, ...@@ -703,7 +703,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
tp->ext_header_len = np->opt->opt_flen + np->opt->opt_nflen; tp->ext_header_len = np->opt->opt_flen + np->opt->opt_nflen;
tp->ext2_header_len = dst->header_len; tp->ext2_header_len = dst->header_len;
tp->mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
inet->dport = usin->sin6_port; inet->dport = usin->sin6_port;
...@@ -1202,7 +1202,8 @@ static void tcp_v6_synq_add(struct sock *sk, struct open_request *req) ...@@ -1202,7 +1202,8 @@ static void tcp_v6_synq_add(struct sock *sk, struct open_request *req)
static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
{ {
struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk);
struct tcp_sock tmptp, *tp = tcp_sk(sk); struct tcp_options_received tmp_opt;
struct tcp_sock *tp = tcp_sk(sk);
struct open_request *req = NULL; struct open_request *req = NULL;
__u32 isn = TCP_SKB_CB(skb)->when; __u32 isn = TCP_SKB_CB(skb)->when;
...@@ -1228,14 +1229,14 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) ...@@ -1228,14 +1229,14 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
if (req == NULL) if (req == NULL)
goto drop; goto drop;
tcp_clear_options(&tmptp); tcp_clear_options(&tmp_opt);
tmptp.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
tmptp.user_mss = tp->user_mss; tmp_opt.user_mss = tp->rx_opt.user_mss;
tcp_parse_options(skb, &tmptp, 0); tcp_parse_options(skb, &tmp_opt, 0);
tmptp.tstamp_ok = tmptp.saw_tstamp; tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
tcp_openreq_init(req, &tmptp, skb); tcp_openreq_init(req, &tmp_opt, skb);
req->class = &or_ipv6; req->class = &or_ipv6;
ipv6_addr_copy(&req->af.v6_req.rmt_addr, &skb->nh.ipv6h->saddr); ipv6_addr_copy(&req->af.v6_req.rmt_addr, &skb->nh.ipv6h->saddr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment