Commit 9a568de4 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

tcp: switch TCP TS option (RFC 7323) to 1ms clock

TCP Timestamps option is defined in RFC 7323

Traditionally on linux, it has been tied to the internal
'jiffies' variable, because it had been a cheap and good enough
generator.

For TCP flows on the Internet, 1 ms resolution would be much better
than 4ms or 10ms (HZ=250 or HZ=100 respectively)

For TCP flows in the DC, Google has used usec resolution for more
than two years with great success [1]

Receive size autotuning (DRS) is indeed more precise and converges
faster to optimal window size.

This patch converts tp->tcp_mstamp to a plain u64 value storing
a 1 usec TCP clock.

This choice will allow us to upstream the 1 usec TS option as
discussed in IETF 97.

[1] https://www.ietf.org/proceedings/97/slides/slides-97-tcpm-tcp-options-for-low-latency-00.pdfSigned-off-by: default avatarEric Dumazet <edumazet@google.com>
Acked-by: default avatarSoheil Hassas Yeganeh <soheil@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ac9517fc
...@@ -506,66 +506,6 @@ typedef unsigned int sk_buff_data_t; ...@@ -506,66 +506,6 @@ typedef unsigned int sk_buff_data_t;
typedef unsigned char *sk_buff_data_t; typedef unsigned char *sk_buff_data_t;
#endif #endif
/**
* struct skb_mstamp - multi resolution time stamps
* @stamp_us: timestamp in us resolution
* @stamp_jiffies: timestamp in jiffies
*/
struct skb_mstamp {
union {
u64 v64;
struct {
u32 stamp_us;
u32 stamp_jiffies;
};
};
};
/**
* skb_mstamp_get - get current timestamp
* @cl: place to store timestamps
*/
static inline void skb_mstamp_get(struct skb_mstamp *cl)
{
u64 val = local_clock();
do_div(val, NSEC_PER_USEC);
cl->stamp_us = (u32)val;
cl->stamp_jiffies = (u32)jiffies;
}
/**
* skb_mstamp_delta - compute the difference in usec between two skb_mstamp
* @t1: pointer to newest sample
* @t0: pointer to oldest sample
*/
static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
const struct skb_mstamp *t0)
{
s32 delta_us = t1->stamp_us - t0->stamp_us;
u32 delta_jiffies = t1->stamp_jiffies - t0->stamp_jiffies;
/* If delta_us is negative, this might be because interval is too big,
* or local_clock() drift is too big : fallback using jiffies.
*/
if (delta_us <= 0 ||
delta_jiffies >= (INT_MAX / (USEC_PER_SEC / HZ)))
delta_us = jiffies_to_usecs(delta_jiffies);
return delta_us;
}
static inline bool skb_mstamp_after(const struct skb_mstamp *t1,
const struct skb_mstamp *t0)
{
s32 diff = t1->stamp_jiffies - t0->stamp_jiffies;
if (!diff)
diff = t1->stamp_us - t0->stamp_us;
return diff > 0;
}
/** /**
* struct sk_buff - socket buffer * struct sk_buff - socket buffer
* @next: Next buffer in list * @next: Next buffer in list
...@@ -646,7 +586,7 @@ struct sk_buff { ...@@ -646,7 +586,7 @@ struct sk_buff {
union { union {
ktime_t tstamp; ktime_t tstamp;
struct skb_mstamp skb_mstamp; u64 skb_mstamp;
}; };
}; };
struct rb_node rbnode; /* used in netem & tcp stack */ struct rb_node rbnode; /* used in netem & tcp stack */
......
...@@ -123,7 +123,7 @@ struct tcp_request_sock_ops; ...@@ -123,7 +123,7 @@ struct tcp_request_sock_ops;
struct tcp_request_sock { struct tcp_request_sock {
struct inet_request_sock req; struct inet_request_sock req;
const struct tcp_request_sock_ops *af_specific; const struct tcp_request_sock_ops *af_specific;
struct skb_mstamp snt_synack; /* first SYNACK sent time */ u64 snt_synack; /* first SYNACK sent time */
bool tfo_listener; bool tfo_listener;
u32 txhash; u32 txhash;
u32 rcv_isn; u32 rcv_isn;
...@@ -211,7 +211,7 @@ struct tcp_sock { ...@@ -211,7 +211,7 @@ struct tcp_sock {
/* Information of the most recently (s)acked skb */ /* Information of the most recently (s)acked skb */
struct tcp_rack { struct tcp_rack {
struct skb_mstamp mstamp; /* (Re)sent time of the skb */ u64 mstamp; /* (Re)sent time of the skb */
u32 rtt_us; /* Associated RTT */ u32 rtt_us; /* Associated RTT */
u32 end_seq; /* Ending TCP sequence of the skb */ u32 end_seq; /* Ending TCP sequence of the skb */
u8 advanced; /* mstamp advanced since last lost marking */ u8 advanced; /* mstamp advanced since last lost marking */
...@@ -240,7 +240,7 @@ struct tcp_sock { ...@@ -240,7 +240,7 @@ struct tcp_sock {
u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */ u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */
/* RTT measurement */ /* RTT measurement */
struct skb_mstamp tcp_mstamp; /* most recent packet received/sent */ u64 tcp_mstamp; /* most recent packet received/sent */
u32 srtt_us; /* smoothed round trip time << 3 in usecs */ u32 srtt_us; /* smoothed round trip time << 3 in usecs */
u32 mdev_us; /* medium deviation */ u32 mdev_us; /* medium deviation */
u32 mdev_max_us; /* maximal mdev for the last rtt period */ u32 mdev_max_us; /* maximal mdev for the last rtt period */
...@@ -280,8 +280,8 @@ struct tcp_sock { ...@@ -280,8 +280,8 @@ struct tcp_sock {
u32 delivered; /* Total data packets delivered incl. rexmits */ u32 delivered; /* Total data packets delivered incl. rexmits */
u32 lost; /* Total data packets lost incl. rexmits */ u32 lost; /* Total data packets lost incl. rexmits */
u32 app_limited; /* limited until "delivered" reaches this val */ u32 app_limited; /* limited until "delivered" reaches this val */
struct skb_mstamp first_tx_mstamp; /* start of window send phase */ u64 first_tx_mstamp; /* start of window send phase */
struct skb_mstamp delivered_mstamp; /* time we reached "delivered" */ u64 delivered_mstamp; /* time we reached "delivered" */
u32 rate_delivered; /* saved rate sample: packets delivered */ u32 rate_delivered; /* saved rate sample: packets delivered */
u32 rate_interval_us; /* saved rate sample: time elapsed */ u32 rate_interval_us; /* saved rate sample: time elapsed */
...@@ -335,16 +335,16 @@ struct tcp_sock { ...@@ -335,16 +335,16 @@ struct tcp_sock {
/* Receiver side RTT estimation */ /* Receiver side RTT estimation */
struct { struct {
u32 rtt_us; u32 rtt_us;
u32 seq; u32 seq;
struct skb_mstamp time; u64 time;
} rcv_rtt_est; } rcv_rtt_est;
/* Receiver queue space */ /* Receiver queue space */
struct { struct {
int space; int space;
u32 seq; u32 seq;
struct skb_mstamp time; u64 time;
} rcvq_space; } rcvq_space;
/* TCP-specific MTU probe information. */ /* TCP-specific MTU probe information. */
......
...@@ -519,7 +519,7 @@ static inline u32 tcp_cookie_time(void) ...@@ -519,7 +519,7 @@ static inline u32 tcp_cookie_time(void)
u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th, u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
u16 *mssp); u16 *mssp);
__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss); __u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
__u32 cookie_init_timestamp(struct request_sock *req); u64 cookie_init_timestamp(struct request_sock *req);
bool cookie_timestamp_decode(struct tcp_options_received *opt); bool cookie_timestamp_decode(struct tcp_options_received *opt);
bool cookie_ecn_ok(const struct tcp_options_received *opt, bool cookie_ecn_ok(const struct tcp_options_received *opt,
const struct net *net, const struct dst_entry *dst); const struct net *net, const struct dst_entry *dst);
...@@ -706,14 +706,55 @@ void tcp_send_window_probe(struct sock *sk); ...@@ -706,14 +706,55 @@ void tcp_send_window_probe(struct sock *sk);
*/ */
#define tcp_jiffies32 ((u32)jiffies) #define tcp_jiffies32 ((u32)jiffies)
/* Generator for TCP TS option (RFC 7323) /*
* Currently tied to 'jiffies' but will soon be driven by 1 ms clock. * Deliver a 32bit value for TCP timestamp option (RFC 7323)
* It is no longer tied to jiffies, but to 1 ms clock.
* Note: double check if you want to use tcp_jiffies32 instead of this.
*/
#define TCP_TS_HZ 1000
static inline u64 tcp_clock_ns(void)
{
return local_clock();
}
static inline u64 tcp_clock_us(void)
{
return div_u64(tcp_clock_ns(), NSEC_PER_USEC);
}
/* This should only be used in contexts where tp->tcp_mstamp is up to date */
static inline u32 tcp_time_stamp(const struct tcp_sock *tp)
{
return div_u64(tp->tcp_mstamp, USEC_PER_SEC / TCP_TS_HZ);
}
/* Could use tcp_clock_us() / 1000, but this version uses a single divide */
static inline u32 tcp_time_stamp_raw(void)
{
return div_u64(tcp_clock_ns(), NSEC_PER_SEC / TCP_TS_HZ);
}
/* Refresh 1us clock of a TCP socket,
* ensuring monotically increasing values.
*/ */
#define tcp_time_stamp ((__u32)(jiffies)) static inline void tcp_mstamp_refresh(struct tcp_sock *tp)
{
u64 val = tcp_clock_us();
if (val > tp->tcp_mstamp)
tp->tcp_mstamp = val;
}
static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
{
return max_t(s64, t1 - t0, 0);
}
static inline u32 tcp_skb_timestamp(const struct sk_buff *skb) static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
{ {
return skb->skb_mstamp.stamp_jiffies; return div_u64(skb->skb_mstamp, USEC_PER_SEC / TCP_TS_HZ);
} }
...@@ -778,9 +819,9 @@ struct tcp_skb_cb { ...@@ -778,9 +819,9 @@ struct tcp_skb_cb {
/* pkts S/ACKed so far upon tx of skb, incl retrans: */ /* pkts S/ACKed so far upon tx of skb, incl retrans: */
__u32 delivered; __u32 delivered;
/* start of send pipeline phase */ /* start of send pipeline phase */
struct skb_mstamp first_tx_mstamp; u64 first_tx_mstamp;
/* when we reached the "delivered" count */ /* when we reached the "delivered" count */
struct skb_mstamp delivered_mstamp; u64 delivered_mstamp;
} tx; /* only used for outgoing skbs */ } tx; /* only used for outgoing skbs */
union { union {
struct inet_skb_parm h4; struct inet_skb_parm h4;
...@@ -896,7 +937,7 @@ struct ack_sample { ...@@ -896,7 +937,7 @@ struct ack_sample {
* A sample is invalid if "delivered" or "interval_us" is negative. * A sample is invalid if "delivered" or "interval_us" is negative.
*/ */
struct rate_sample { struct rate_sample {
struct skb_mstamp prior_mstamp; /* starting timestamp for interval */ u64 prior_mstamp; /* starting timestamp for interval */
u32 prior_delivered; /* tp->delivered at "prior_mstamp" */ u32 prior_delivered; /* tp->delivered at "prior_mstamp" */
s32 delivered; /* number of packets delivered over interval */ s32 delivered; /* number of packets delivered over interval */
long interval_us; /* time for tp->delivered to incr "delivered" */ long interval_us; /* time for tp->delivered to incr "delivered" */
...@@ -1862,7 +1903,7 @@ void tcp_init(void); ...@@ -1862,7 +1903,7 @@ void tcp_init(void);
/* tcp_recovery.c */ /* tcp_recovery.c */
extern void tcp_rack_mark_lost(struct sock *sk); extern void tcp_rack_mark_lost(struct sock *sk);
extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
const struct skb_mstamp *xmit_time); u64 xmit_time);
extern void tcp_rack_reo_timeout(struct sock *sk); extern void tcp_rack_reo_timeout(struct sock *sk);
/* /*
......
...@@ -66,10 +66,10 @@ static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport, ...@@ -66,10 +66,10 @@ static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
* Since subsequent timestamps use the normal tcp_time_stamp value, we * Since subsequent timestamps use the normal tcp_time_stamp value, we
* must make sure that the resulting initial timestamp is <= tcp_time_stamp. * must make sure that the resulting initial timestamp is <= tcp_time_stamp.
*/ */
__u32 cookie_init_timestamp(struct request_sock *req) u64 cookie_init_timestamp(struct request_sock *req)
{ {
struct inet_request_sock *ireq; struct inet_request_sock *ireq;
u32 ts, ts_now = tcp_time_stamp; u32 ts, ts_now = tcp_time_stamp_raw();
u32 options = 0; u32 options = 0;
ireq = inet_rsk(req); ireq = inet_rsk(req);
...@@ -88,7 +88,7 @@ __u32 cookie_init_timestamp(struct request_sock *req) ...@@ -88,7 +88,7 @@ __u32 cookie_init_timestamp(struct request_sock *req)
ts <<= TSBITS; ts <<= TSBITS;
ts |= options; ts |= options;
} }
return ts; return (u64)ts * (USEC_PER_SEC / TCP_TS_HZ);
} }
...@@ -343,7 +343,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) ...@@ -343,7 +343,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
ireq->wscale_ok = tcp_opt.wscale_ok; ireq->wscale_ok = tcp_opt.wscale_ok;
ireq->tstamp_ok = tcp_opt.saw_tstamp; ireq->tstamp_ok = tcp_opt.saw_tstamp;
req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
treq->snt_synack.v64 = 0; treq->snt_synack = 0;
treq->tfo_listener = false; treq->tfo_listener = false;
ireq->ir_iif = inet_request_bound_dev_if(sk, skb); ireq->ir_iif = inet_request_bound_dev_if(sk, skb);
......
...@@ -2706,7 +2706,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, ...@@ -2706,7 +2706,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
if (!tp->repair) if (!tp->repair)
err = -EPERM; err = -EPERM;
else else
tp->tsoffset = val - tcp_time_stamp; tp->tsoffset = val - tcp_time_stamp_raw();
break; break;
case TCP_REPAIR_WINDOW: case TCP_REPAIR_WINDOW:
err = tcp_repair_set_window(tp, optval, optlen); err = tcp_repair_set_window(tp, optval, optlen);
...@@ -3072,7 +3072,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level, ...@@ -3072,7 +3072,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
break; break;
case TCP_TIMESTAMP: case TCP_TIMESTAMP:
val = tcp_time_stamp + tp->tsoffset; val = tcp_time_stamp_raw() + tp->tsoffset;
break; break;
case TCP_NOTSENT_LOWAT: case TCP_NOTSENT_LOWAT:
val = tp->notsent_lowat; val = tp->notsent_lowat;
......
...@@ -91,7 +91,7 @@ struct bbr { ...@@ -91,7 +91,7 @@ struct bbr {
struct minmax bw; /* Max recent delivery rate in pkts/uS << 24 */ struct minmax bw; /* Max recent delivery rate in pkts/uS << 24 */
u32 rtt_cnt; /* count of packet-timed rounds elapsed */ u32 rtt_cnt; /* count of packet-timed rounds elapsed */
u32 next_rtt_delivered; /* scb->tx.delivered at end of round */ u32 next_rtt_delivered; /* scb->tx.delivered at end of round */
struct skb_mstamp cycle_mstamp; /* time of this cycle phase start */ u64 cycle_mstamp; /* time of this cycle phase start */
u32 mode:3, /* current bbr_mode in state machine */ u32 mode:3, /* current bbr_mode in state machine */
prev_ca_state:3, /* CA state on previous ACK */ prev_ca_state:3, /* CA state on previous ACK */
packet_conservation:1, /* use packet conservation? */ packet_conservation:1, /* use packet conservation? */
...@@ -411,7 +411,7 @@ static bool bbr_is_next_cycle_phase(struct sock *sk, ...@@ -411,7 +411,7 @@ static bool bbr_is_next_cycle_phase(struct sock *sk,
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct bbr *bbr = inet_csk_ca(sk); struct bbr *bbr = inet_csk_ca(sk);
bool is_full_length = bool is_full_length =
skb_mstamp_us_delta(&tp->delivered_mstamp, &bbr->cycle_mstamp) > tcp_stamp_us_delta(tp->delivered_mstamp, bbr->cycle_mstamp) >
bbr->min_rtt_us; bbr->min_rtt_us;
u32 inflight, bw; u32 inflight, bw;
...@@ -497,7 +497,7 @@ static void bbr_reset_lt_bw_sampling_interval(struct sock *sk) ...@@ -497,7 +497,7 @@ static void bbr_reset_lt_bw_sampling_interval(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct bbr *bbr = inet_csk_ca(sk); struct bbr *bbr = inet_csk_ca(sk);
bbr->lt_last_stamp = tp->delivered_mstamp.stamp_jiffies; bbr->lt_last_stamp = div_u64(tp->delivered_mstamp, USEC_PER_MSEC);
bbr->lt_last_delivered = tp->delivered; bbr->lt_last_delivered = tp->delivered;
bbr->lt_last_lost = tp->lost; bbr->lt_last_lost = tp->lost;
bbr->lt_rtt_cnt = 0; bbr->lt_rtt_cnt = 0;
...@@ -551,7 +551,7 @@ static void bbr_lt_bw_sampling(struct sock *sk, const struct rate_sample *rs) ...@@ -551,7 +551,7 @@ static void bbr_lt_bw_sampling(struct sock *sk, const struct rate_sample *rs)
struct bbr *bbr = inet_csk_ca(sk); struct bbr *bbr = inet_csk_ca(sk);
u32 lost, delivered; u32 lost, delivered;
u64 bw; u64 bw;
s32 t; u32 t;
if (bbr->lt_use_bw) { /* already using long-term rate, lt_bw? */ if (bbr->lt_use_bw) { /* already using long-term rate, lt_bw? */
if (bbr->mode == BBR_PROBE_BW && bbr->round_start && if (bbr->mode == BBR_PROBE_BW && bbr->round_start &&
...@@ -603,15 +603,15 @@ static void bbr_lt_bw_sampling(struct sock *sk, const struct rate_sample *rs) ...@@ -603,15 +603,15 @@ static void bbr_lt_bw_sampling(struct sock *sk, const struct rate_sample *rs)
return; return;
/* Find average delivery rate in this sampling interval. */ /* Find average delivery rate in this sampling interval. */
t = (s32)(tp->delivered_mstamp.stamp_jiffies - bbr->lt_last_stamp); t = div_u64(tp->delivered_mstamp, USEC_PER_MSEC) - bbr->lt_last_stamp;
if (t < 1) if ((s32)t < 1)
return; /* interval is less than one jiffy, so wait */ return; /* interval is less than one ms, so wait */
t = jiffies_to_usecs(t); /* Check if can multiply without overflow */
/* Interval long enough for jiffies_to_usecs() to return a bogus 0? */ if (t >= ~0U / USEC_PER_MSEC) {
if (t < 1) {
bbr_reset_lt_bw_sampling(sk); /* interval too long; reset */ bbr_reset_lt_bw_sampling(sk); /* interval too long; reset */
return; return;
} }
t *= USEC_PER_MSEC;
bw = (u64)delivered * BW_UNIT; bw = (u64)delivered * BW_UNIT;
do_div(bw, t); do_div(bw, t);
bbr_lt_bw_interval_done(sk, bw); bbr_lt_bw_interval_done(sk, bw);
...@@ -825,7 +825,7 @@ static void bbr_init(struct sock *sk) ...@@ -825,7 +825,7 @@ static void bbr_init(struct sock *sk)
bbr->idle_restart = 0; bbr->idle_restart = 0;
bbr->full_bw = 0; bbr->full_bw = 0;
bbr->full_bw_cnt = 0; bbr->full_bw_cnt = 0;
bbr->cycle_mstamp.v64 = 0; bbr->cycle_mstamp = 0;
bbr->cycle_idx = 0; bbr->cycle_idx = 0;
bbr_reset_lt_bw_sampling(sk); bbr_reset_lt_bw_sampling(sk);
bbr_reset_startup_mode(sk); bbr_reset_startup_mode(sk);
......
...@@ -441,7 +441,7 @@ void tcp_init_buffer_space(struct sock *sk) ...@@ -441,7 +441,7 @@ void tcp_init_buffer_space(struct sock *sk)
tcp_sndbuf_expand(sk); tcp_sndbuf_expand(sk);
tp->rcvq_space.space = tp->rcv_wnd; tp->rcvq_space.space = tp->rcv_wnd;
skb_mstamp_get(&tp->tcp_mstamp); tcp_mstamp_refresh(tp);
tp->rcvq_space.time = tp->tcp_mstamp; tp->rcvq_space.time = tp->tcp_mstamp;
tp->rcvq_space.seq = tp->copied_seq; tp->rcvq_space.seq = tp->copied_seq;
...@@ -555,11 +555,11 @@ static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp) ...@@ -555,11 +555,11 @@ static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp)
{ {
u32 delta_us; u32 delta_us;
if (tp->rcv_rtt_est.time.v64 == 0) if (tp->rcv_rtt_est.time == 0)
goto new_measure; goto new_measure;
if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq))
return; return;
delta_us = skb_mstamp_us_delta(&tp->tcp_mstamp, &tp->rcv_rtt_est.time); delta_us = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcv_rtt_est.time);
tcp_rcv_rtt_update(tp, delta_us, 1); tcp_rcv_rtt_update(tp, delta_us, 1);
new_measure: new_measure:
...@@ -571,13 +571,15 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, ...@@ -571,13 +571,15 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
const struct sk_buff *skb) const struct sk_buff *skb)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
if (tp->rx_opt.rcv_tsecr && if (tp->rx_opt.rcv_tsecr &&
(TCP_SKB_CB(skb)->end_seq - (TCP_SKB_CB(skb)->end_seq -
TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) {
tcp_rcv_rtt_update(tp, u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr;
jiffies_to_usecs(tcp_time_stamp - u32 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
tp->rx_opt.rcv_tsecr),
0); tcp_rcv_rtt_update(tp, delta_us, 0);
}
} }
/* /*
...@@ -590,7 +592,7 @@ void tcp_rcv_space_adjust(struct sock *sk) ...@@ -590,7 +592,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
int time; int time;
int copied; int copied;
time = skb_mstamp_us_delta(&tp->tcp_mstamp, &tp->rcvq_space.time); time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time);
if (time < (tp->rcv_rtt_est.rtt_us >> 3) || tp->rcv_rtt_est.rtt_us == 0) if (time < (tp->rcv_rtt_est.rtt_us >> 3) || tp->rcv_rtt_est.rtt_us == 0)
return; return;
...@@ -1134,8 +1136,8 @@ struct tcp_sacktag_state { ...@@ -1134,8 +1136,8 @@ struct tcp_sacktag_state {
* that was SACKed. RTO needs the earliest RTT to stay conservative, * that was SACKed. RTO needs the earliest RTT to stay conservative,
* but congestion control should still get an accurate delay signal. * but congestion control should still get an accurate delay signal.
*/ */
struct skb_mstamp first_sackt; u64 first_sackt;
struct skb_mstamp last_sackt; u64 last_sackt;
struct rate_sample *rate; struct rate_sample *rate;
int flag; int flag;
}; };
...@@ -1200,7 +1202,7 @@ static u8 tcp_sacktag_one(struct sock *sk, ...@@ -1200,7 +1202,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
struct tcp_sacktag_state *state, u8 sacked, struct tcp_sacktag_state *state, u8 sacked,
u32 start_seq, u32 end_seq, u32 start_seq, u32 end_seq,
int dup_sack, int pcount, int dup_sack, int pcount,
const struct skb_mstamp *xmit_time) u64 xmit_time)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
int fack_count = state->fack_count; int fack_count = state->fack_count;
...@@ -1242,9 +1244,9 @@ static u8 tcp_sacktag_one(struct sock *sk, ...@@ -1242,9 +1244,9 @@ static u8 tcp_sacktag_one(struct sock *sk,
state->reord); state->reord);
if (!after(end_seq, tp->high_seq)) if (!after(end_seq, tp->high_seq))
state->flag |= FLAG_ORIG_SACK_ACKED; state->flag |= FLAG_ORIG_SACK_ACKED;
if (state->first_sackt.v64 == 0) if (state->first_sackt == 0)
state->first_sackt = *xmit_time; state->first_sackt = xmit_time;
state->last_sackt = *xmit_time; state->last_sackt = xmit_time;
} }
if (sacked & TCPCB_LOST) { if (sacked & TCPCB_LOST) {
...@@ -1304,7 +1306,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, ...@@ -1304,7 +1306,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
*/ */
tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked, tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
start_seq, end_seq, dup_sack, pcount, start_seq, end_seq, dup_sack, pcount,
&skb->skb_mstamp); skb->skb_mstamp);
tcp_rate_skb_delivered(sk, skb, state->rate); tcp_rate_skb_delivered(sk, skb, state->rate);
if (skb == tp->lost_skb_hint) if (skb == tp->lost_skb_hint)
...@@ -1356,8 +1358,8 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, ...@@ -1356,8 +1358,8 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
tcp_advance_highest_sack(sk, skb); tcp_advance_highest_sack(sk, skb);
tcp_skb_collapse_tstamp(prev, skb); tcp_skb_collapse_tstamp(prev, skb);
if (unlikely(TCP_SKB_CB(prev)->tx.delivered_mstamp.v64)) if (unlikely(TCP_SKB_CB(prev)->tx.delivered_mstamp))
TCP_SKB_CB(prev)->tx.delivered_mstamp.v64 = 0; TCP_SKB_CB(prev)->tx.delivered_mstamp = 0;
tcp_unlink_write_queue(skb, sk); tcp_unlink_write_queue(skb, sk);
sk_wmem_free_skb(sk, skb); sk_wmem_free_skb(sk, skb);
...@@ -1587,7 +1589,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, ...@@ -1587,7 +1589,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
TCP_SKB_CB(skb)->end_seq, TCP_SKB_CB(skb)->end_seq,
dup_sack, dup_sack,
tcp_skb_pcount(skb), tcp_skb_pcount(skb),
&skb->skb_mstamp); skb->skb_mstamp);
tcp_rate_skb_delivered(sk, skb, state->rate); tcp_rate_skb_delivered(sk, skb, state->rate);
if (!before(TCP_SKB_CB(skb)->seq, if (!before(TCP_SKB_CB(skb)->seq,
...@@ -2936,9 +2938,12 @@ static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag, ...@@ -2936,9 +2938,12 @@ static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
* See draft-ietf-tcplw-high-performance-00, section 3.3. * See draft-ietf-tcplw-high-performance-00, section 3.3.
*/ */
if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
flag & FLAG_ACKED) flag & FLAG_ACKED) {
seq_rtt_us = ca_rtt_us = jiffies_to_usecs(tcp_time_stamp - u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr;
tp->rx_opt.rcv_tsecr); u32 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
seq_rtt_us = ca_rtt_us = delta_us;
}
if (seq_rtt_us < 0) if (seq_rtt_us < 0)
return false; return false;
...@@ -2960,12 +2965,8 @@ void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req) ...@@ -2960,12 +2965,8 @@ void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req)
{ {
long rtt_us = -1L; long rtt_us = -1L;
if (req && !req->num_retrans && tcp_rsk(req)->snt_synack.v64) { if (req && !req->num_retrans && tcp_rsk(req)->snt_synack)
struct skb_mstamp now; rtt_us = tcp_stamp_us_delta(tcp_clock_us(), tcp_rsk(req)->snt_synack);
skb_mstamp_get(&now);
rtt_us = skb_mstamp_us_delta(&now, &tcp_rsk(req)->snt_synack);
}
tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, rtt_us, -1L, rtt_us); tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, rtt_us, -1L, rtt_us);
} }
...@@ -3003,7 +3004,7 @@ void tcp_rearm_rto(struct sock *sk) ...@@ -3003,7 +3004,7 @@ void tcp_rearm_rto(struct sock *sk)
struct sk_buff *skb = tcp_write_queue_head(sk); struct sk_buff *skb = tcp_write_queue_head(sk);
const u32 rto_time_stamp = const u32 rto_time_stamp =
tcp_skb_timestamp(skb) + rto; tcp_skb_timestamp(skb) + rto;
s32 delta = (s32)(rto_time_stamp - tcp_time_stamp); s32 delta = (s32)(rto_time_stamp - tcp_jiffies32);
/* delta may not be positive if the socket is locked /* delta may not be positive if the socket is locked
* when the retrans timer fires and is rescheduled. * when the retrans timer fires and is rescheduled.
*/ */
...@@ -3060,9 +3061,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, ...@@ -3060,9 +3061,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
struct tcp_sacktag_state *sack) struct tcp_sacktag_state *sack)
{ {
const struct inet_connection_sock *icsk = inet_csk(sk); const struct inet_connection_sock *icsk = inet_csk(sk);
struct skb_mstamp first_ackt, last_ackt; u64 first_ackt, last_ackt;
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct skb_mstamp *now = &tp->tcp_mstamp;
u32 prior_sacked = tp->sacked_out; u32 prior_sacked = tp->sacked_out;
u32 reord = tp->packets_out; u32 reord = tp->packets_out;
bool fully_acked = true; bool fully_acked = true;
...@@ -3075,7 +3075,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, ...@@ -3075,7 +3075,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
bool rtt_update; bool rtt_update;
int flag = 0; int flag = 0;
first_ackt.v64 = 0; first_ackt = 0;
while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) { while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) {
struct tcp_skb_cb *scb = TCP_SKB_CB(skb); struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
...@@ -3106,8 +3106,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, ...@@ -3106,8 +3106,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
flag |= FLAG_RETRANS_DATA_ACKED; flag |= FLAG_RETRANS_DATA_ACKED;
} else if (!(sacked & TCPCB_SACKED_ACKED)) { } else if (!(sacked & TCPCB_SACKED_ACKED)) {
last_ackt = skb->skb_mstamp; last_ackt = skb->skb_mstamp;
WARN_ON_ONCE(last_ackt.v64 == 0); WARN_ON_ONCE(last_ackt == 0);
if (!first_ackt.v64) if (!first_ackt)
first_ackt = last_ackt; first_ackt = last_ackt;
last_in_flight = TCP_SKB_CB(skb)->tx.in_flight; last_in_flight = TCP_SKB_CB(skb)->tx.in_flight;
...@@ -3122,7 +3122,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, ...@@ -3122,7 +3122,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
tp->delivered += acked_pcount; tp->delivered += acked_pcount;
if (!tcp_skb_spurious_retrans(tp, skb)) if (!tcp_skb_spurious_retrans(tp, skb))
tcp_rack_advance(tp, sacked, scb->end_seq, tcp_rack_advance(tp, sacked, scb->end_seq,
&skb->skb_mstamp); skb->skb_mstamp);
} }
if (sacked & TCPCB_LOST) if (sacked & TCPCB_LOST)
tp->lost_out -= acked_pcount; tp->lost_out -= acked_pcount;
...@@ -3165,13 +3165,13 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, ...@@ -3165,13 +3165,13 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
flag |= FLAG_SACK_RENEGING; flag |= FLAG_SACK_RENEGING;
if (likely(first_ackt.v64) && !(flag & FLAG_RETRANS_DATA_ACKED)) { if (likely(first_ackt) && !(flag & FLAG_RETRANS_DATA_ACKED)) {
seq_rtt_us = skb_mstamp_us_delta(now, &first_ackt); seq_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, first_ackt);
ca_rtt_us = skb_mstamp_us_delta(now, &last_ackt); ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, last_ackt);
} }
if (sack->first_sackt.v64) { if (sack->first_sackt) {
sack_rtt_us = skb_mstamp_us_delta(now, &sack->first_sackt); sack_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->first_sackt);
ca_rtt_us = skb_mstamp_us_delta(now, &sack->last_sackt); ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->last_sackt);
} }
sack->rate->rtt_us = ca_rtt_us; /* RTT of last (S)ACKed packet, or -1 */ sack->rate->rtt_us = ca_rtt_us; /* RTT of last (S)ACKed packet, or -1 */
rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us, rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us,
...@@ -3201,7 +3201,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, ...@@ -3201,7 +3201,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
tp->fackets_out -= min(pkts_acked, tp->fackets_out); tp->fackets_out -= min(pkts_acked, tp->fackets_out);
} else if (skb && rtt_update && sack_rtt_us >= 0 && } else if (skb && rtt_update && sack_rtt_us >= 0 &&
sack_rtt_us > skb_mstamp_us_delta(now, &skb->skb_mstamp)) { sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp)) {
/* Do not re-arm RTO if the sack RTT is measured from data sent /* Do not re-arm RTO if the sack RTT is measured from data sent
* after when the head was last (re)transmitted. Otherwise the * after when the head was last (re)transmitted. Otherwise the
* timeout may continue to extend in loss recovery. * timeout may continue to extend in loss recovery.
...@@ -3553,7 +3553,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) ...@@ -3553,7 +3553,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
int acked = 0; /* Number of packets newly acked */ int acked = 0; /* Number of packets newly acked */
int rexmit = REXMIT_NONE; /* Flag to (re)transmit to recover losses */ int rexmit = REXMIT_NONE; /* Flag to (re)transmit to recover losses */
sack_state.first_sackt.v64 = 0; sack_state.first_sackt = 0;
sack_state.rate = &rs; sack_state.rate = &rs;
/* We very likely will need to access write queue head. */ /* We very likely will need to access write queue head. */
...@@ -5356,7 +5356,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, ...@@ -5356,7 +5356,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
skb_mstamp_get(&tp->tcp_mstamp); tcp_mstamp_refresh(tp);
if (unlikely(!sk->sk_rx_dst)) if (unlikely(!sk->sk_rx_dst))
inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb); inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb);
/* /*
...@@ -5672,7 +5672,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, ...@@ -5672,7 +5672,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
!between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp, !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp,
tcp_time_stamp)) { tcp_time_stamp(tp))) {
NET_INC_STATS(sock_net(sk), NET_INC_STATS(sock_net(sk),
LINUX_MIB_PAWSACTIVEREJECTED); LINUX_MIB_PAWSACTIVEREJECTED);
goto reset_and_undo; goto reset_and_undo;
...@@ -5917,7 +5917,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) ...@@ -5917,7 +5917,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
case TCP_SYN_SENT: case TCP_SYN_SENT:
tp->rx_opt.saw_tstamp = 0; tp->rx_opt.saw_tstamp = 0;
skb_mstamp_get(&tp->tcp_mstamp); tcp_mstamp_refresh(tp);
queued = tcp_rcv_synsent_state_process(sk, skb, th); queued = tcp_rcv_synsent_state_process(sk, skb, th);
if (queued >= 0) if (queued >= 0)
return queued; return queued;
...@@ -5929,7 +5929,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) ...@@ -5929,7 +5929,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
return 0; return 0;
} }
skb_mstamp_get(&tp->tcp_mstamp); tcp_mstamp_refresh(tp);
tp->rx_opt.saw_tstamp = 0; tp->rx_opt.saw_tstamp = 0;
req = tp->fastopen_rsk; req = tp->fastopen_rsk;
if (req) { if (req) {
...@@ -6202,7 +6202,7 @@ static void tcp_openreq_init(struct request_sock *req, ...@@ -6202,7 +6202,7 @@ static void tcp_openreq_init(struct request_sock *req,
req->cookie_ts = 0; req->cookie_ts = 0;
tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq; tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
skb_mstamp_get(&tcp_rsk(req)->snt_synack); tcp_rsk(req)->snt_synack = tcp_clock_us();
tcp_rsk(req)->last_oow_ack_time = 0; tcp_rsk(req)->last_oow_ack_time = 0;
req->mss = rx_opt->mss_clamp; req->mss = rx_opt->mss_clamp;
req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0; req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
......
...@@ -376,8 +376,9 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) ...@@ -376,8 +376,9 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
struct sock *sk; struct sock *sk;
struct sk_buff *skb; struct sk_buff *skb;
struct request_sock *fastopen; struct request_sock *fastopen;
__u32 seq, snd_una; u32 seq, snd_una;
__u32 remaining; s32 remaining;
u32 delta_us;
int err; int err;
struct net *net = dev_net(icmp_skb->dev); struct net *net = dev_net(icmp_skb->dev);
...@@ -483,12 +484,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) ...@@ -483,12 +484,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
skb = tcp_write_queue_head(sk); skb = tcp_write_queue_head(sk);
BUG_ON(!skb); BUG_ON(!skb);
skb_mstamp_get(&tp->tcp_mstamp); tcp_mstamp_refresh(tp);
delta_us = (u32)(tp->tcp_mstamp - skb->skb_mstamp);
remaining = icsk->icsk_rto - remaining = icsk->icsk_rto -
min(icsk->icsk_rto, usecs_to_jiffies(delta_us);
tcp_time_stamp - tcp_skb_timestamp(skb));
if (remaining) { if (remaining > 0) {
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
remaining, TCP_RTO_MAX); remaining, TCP_RTO_MAX);
} else { } else {
...@@ -812,7 +813,7 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) ...@@ -812,7 +813,7 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
tcp_v4_send_ack(sk, skb, tcp_v4_send_ack(sk, skb,
tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
tcp_time_stamp + tcptw->tw_ts_offset, tcp_time_stamp_raw() + tcptw->tw_ts_offset,
tcptw->tw_ts_recent, tcptw->tw_ts_recent,
tw->tw_bound_dev_if, tw->tw_bound_dev_if,
tcp_twsk_md5_key(tcptw), tcp_twsk_md5_key(tcptw),
...@@ -840,7 +841,7 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, ...@@ -840,7 +841,7 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
tcp_v4_send_ack(sk, skb, seq, tcp_v4_send_ack(sk, skb, seq,
tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt,
req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale, req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
tcp_time_stamp + tcp_rsk(req)->ts_off, tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
req->ts_recent, req->ts_recent,
0, 0,
tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr, tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
......
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
#include <net/tcp.h> #include <net/tcp.h>
/* resolution of owd */ /* resolution of owd */
#define LP_RESOL 1000 #define LP_RESOL TCP_TS_HZ
/** /**
* enum tcp_lp_state * enum tcp_lp_state
...@@ -147,9 +147,9 @@ static u32 tcp_lp_remote_hz_estimator(struct sock *sk) ...@@ -147,9 +147,9 @@ static u32 tcp_lp_remote_hz_estimator(struct sock *sk)
tp->rx_opt.rcv_tsecr == lp->local_ref_time) tp->rx_opt.rcv_tsecr == lp->local_ref_time)
goto out; goto out;
m = HZ * (tp->rx_opt.rcv_tsval - m = TCP_TS_HZ *
lp->remote_ref_time) / (tp->rx_opt.rcv_tsecr - (tp->rx_opt.rcv_tsval - lp->remote_ref_time) /
lp->local_ref_time); (tp->rx_opt.rcv_tsecr - lp->local_ref_time);
if (m < 0) if (m < 0)
m = -m; m = -m;
...@@ -194,7 +194,7 @@ static u32 tcp_lp_owd_calculator(struct sock *sk) ...@@ -194,7 +194,7 @@ static u32 tcp_lp_owd_calculator(struct sock *sk)
if (lp->flag & LP_VALID_RHZ) { if (lp->flag & LP_VALID_RHZ) {
owd = owd =
tp->rx_opt.rcv_tsval * (LP_RESOL / lp->remote_hz) - tp->rx_opt.rcv_tsval * (LP_RESOL / lp->remote_hz) -
tp->rx_opt.rcv_tsecr * (LP_RESOL / HZ); tp->rx_opt.rcv_tsecr * (LP_RESOL / TCP_TS_HZ);
if (owd < 0) if (owd < 0)
owd = -owd; owd = -owd;
} }
...@@ -264,7 +264,7 @@ static void tcp_lp_pkts_acked(struct sock *sk, const struct ack_sample *sample) ...@@ -264,7 +264,7 @@ static void tcp_lp_pkts_acked(struct sock *sk, const struct ack_sample *sample)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct lp *lp = inet_csk_ca(sk); struct lp *lp = inet_csk_ca(sk);
u32 now = tcp_time_stamp; u32 now = tcp_time_stamp(tp);
u32 delta; u32 delta;
if (sample->rtt_us > 0) if (sample->rtt_us > 0)
......
...@@ -455,7 +455,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk, ...@@ -455,7 +455,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
newtp->fackets_out = 0; newtp->fackets_out = 0;
newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH; newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
newtp->tlp_high_seq = 0; newtp->tlp_high_seq = 0;
newtp->lsndtime = treq->snt_synack.stamp_jiffies; newtp->lsndtime = tcp_jiffies32;
newsk->sk_txhash = treq->txhash; newsk->sk_txhash = treq->txhash;
newtp->last_oow_ack_time = 0; newtp->last_oow_ack_time = 0;
newtp->total_retrans = req->num_retrans; newtp->total_retrans = req->num_retrans;
...@@ -526,7 +526,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk, ...@@ -526,7 +526,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
newtp->fastopen_req = NULL; newtp->fastopen_req = NULL;
newtp->fastopen_rsk = NULL; newtp->fastopen_rsk = NULL;
newtp->syn_data_acked = 0; newtp->syn_data_acked = 0;
newtp->rack.mstamp.v64 = 0; newtp->rack.mstamp = 0;
newtp->rack.advanced = 0; newtp->rack.advanced = 0;
__TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS); __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
......
...@@ -1962,7 +1962,7 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, ...@@ -1962,7 +1962,7 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
head = tcp_write_queue_head(sk); head = tcp_write_queue_head(sk);
age = skb_mstamp_us_delta(&tp->tcp_mstamp, &head->skb_mstamp); age = tcp_stamp_us_delta(tp->tcp_mstamp, head->skb_mstamp);
/* If next ACK is likely to come too late (half srtt), do not defer */ /* If next ACK is likely to come too late (half srtt), do not defer */
if (age < (tp->srtt_us >> 4)) if (age < (tp->srtt_us >> 4))
goto send_now; goto send_now;
...@@ -2279,7 +2279,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, ...@@ -2279,7 +2279,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
} }
max_segs = tcp_tso_segs(sk, mss_now); max_segs = tcp_tso_segs(sk, mss_now);
skb_mstamp_get(&tp->tcp_mstamp); tcp_mstamp_refresh(tp);
while ((skb = tcp_send_head(sk))) { while ((skb = tcp_send_head(sk))) {
unsigned int limit; unsigned int limit;
...@@ -3095,7 +3095,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority) ...@@ -3095,7 +3095,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
skb_reserve(skb, MAX_TCP_HEADER); skb_reserve(skb, MAX_TCP_HEADER);
tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
TCPHDR_ACK | TCPHDR_RST); TCPHDR_ACK | TCPHDR_RST);
skb_mstamp_get(&tcp_sk(sk)->tcp_mstamp); tcp_mstamp_refresh(tcp_sk(sk));
/* Send it off. */ /* Send it off. */
if (tcp_transmit_skb(sk, skb, 0, priority)) if (tcp_transmit_skb(sk, skb, 0, priority))
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
...@@ -3191,10 +3191,10 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, ...@@ -3191,10 +3191,10 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
memset(&opts, 0, sizeof(opts)); memset(&opts, 0, sizeof(opts));
#ifdef CONFIG_SYN_COOKIES #ifdef CONFIG_SYN_COOKIES
if (unlikely(req->cookie_ts)) if (unlikely(req->cookie_ts))
skb->skb_mstamp.stamp_jiffies = cookie_init_timestamp(req); skb->skb_mstamp = cookie_init_timestamp(req);
else else
#endif #endif
skb_mstamp_get(&skb->skb_mstamp); skb->skb_mstamp = tcp_clock_us();
#ifdef CONFIG_TCP_MD5SIG #ifdef CONFIG_TCP_MD5SIG
rcu_read_lock(); rcu_read_lock();
...@@ -3453,8 +3453,8 @@ int tcp_connect(struct sock *sk) ...@@ -3453,8 +3453,8 @@ int tcp_connect(struct sock *sk)
return -ENOBUFS; return -ENOBUFS;
tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
skb_mstamp_get(&tp->tcp_mstamp); tcp_mstamp_refresh(tp);
tp->retrans_stamp = tp->tcp_mstamp.stamp_jiffies; tp->retrans_stamp = tcp_time_stamp(tp);
tcp_connect_queue_skb(sk, buff); tcp_connect_queue_skb(sk, buff);
tcp_ecn_send_syn(sk, buff); tcp_ecn_send_syn(sk, buff);
...@@ -3615,7 +3615,7 @@ void tcp_send_window_probe(struct sock *sk) ...@@ -3615,7 +3615,7 @@ void tcp_send_window_probe(struct sock *sk)
{ {
if (sk->sk_state == TCP_ESTABLISHED) { if (sk->sk_state == TCP_ESTABLISHED) {
tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1; tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
skb_mstamp_get(&tcp_sk(sk)->tcp_mstamp); tcp_mstamp_refresh(tcp_sk(sk));
tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE); tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE);
} }
} }
......
...@@ -78,7 +78,7 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb, ...@@ -78,7 +78,7 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct tcp_skb_cb *scb = TCP_SKB_CB(skb); struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
if (!scb->tx.delivered_mstamp.v64) if (!scb->tx.delivered_mstamp)
return; return;
if (!rs->prior_delivered || if (!rs->prior_delivered ||
...@@ -89,9 +89,9 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb, ...@@ -89,9 +89,9 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
rs->is_retrans = scb->sacked & TCPCB_RETRANS; rs->is_retrans = scb->sacked & TCPCB_RETRANS;
/* Find the duration of the "send phase" of this window: */ /* Find the duration of the "send phase" of this window: */
rs->interval_us = skb_mstamp_us_delta( rs->interval_us = tcp_stamp_us_delta(
&skb->skb_mstamp, skb->skb_mstamp,
&scb->tx.first_tx_mstamp); scb->tx.first_tx_mstamp);
/* Record send time of most recently ACKed packet: */ /* Record send time of most recently ACKed packet: */
tp->first_tx_mstamp = skb->skb_mstamp; tp->first_tx_mstamp = skb->skb_mstamp;
...@@ -101,7 +101,7 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb, ...@@ -101,7 +101,7 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
* we don't need to reset since it'll be freed soon. * we don't need to reset since it'll be freed soon.
*/ */
if (scb->sacked & TCPCB_SACKED_ACKED) if (scb->sacked & TCPCB_SACKED_ACKED)
scb->tx.delivered_mstamp.v64 = 0; scb->tx.delivered_mstamp = 0;
} }
/* Update the connection delivery information and generate a rate sample. */ /* Update the connection delivery information and generate a rate sample. */
...@@ -125,7 +125,7 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost, ...@@ -125,7 +125,7 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
rs->acked_sacked = delivered; /* freshly ACKed or SACKed */ rs->acked_sacked = delivered; /* freshly ACKed or SACKed */
rs->losses = lost; /* freshly marked lost */ rs->losses = lost; /* freshly marked lost */
/* Return an invalid sample if no timing information is available. */ /* Return an invalid sample if no timing information is available. */
if (!rs->prior_mstamp.v64) { if (!rs->prior_mstamp) {
rs->delivered = -1; rs->delivered = -1;
rs->interval_us = -1; rs->interval_us = -1;
return; return;
...@@ -138,8 +138,8 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost, ...@@ -138,8 +138,8 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
* longer phase. * longer phase.
*/ */
snd_us = rs->interval_us; /* send phase */ snd_us = rs->interval_us; /* send phase */
ack_us = skb_mstamp_us_delta(&tp->tcp_mstamp, ack_us = tcp_stamp_us_delta(tp->tcp_mstamp,
&rs->prior_mstamp); /* ack phase */ rs->prior_mstamp); /* ack phase */
rs->interval_us = max(snd_us, ack_us); rs->interval_us = max(snd_us, ack_us);
/* Normally we expect interval_us >= min-rtt. /* Normally we expect interval_us >= min-rtt.
......
...@@ -17,12 +17,9 @@ static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb) ...@@ -17,12 +17,9 @@ static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
} }
} }
static bool tcp_rack_sent_after(const struct skb_mstamp *t1, static bool tcp_rack_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
const struct skb_mstamp *t2,
u32 seq1, u32 seq2)
{ {
return skb_mstamp_after(t1, t2) || return t1 > t2 || (t1 == t2 && after(seq1, seq2));
(t1->v64 == t2->v64 && after(seq1, seq2));
} }
/* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01): /* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
...@@ -72,14 +69,14 @@ static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout) ...@@ -72,14 +69,14 @@ static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
scb->sacked & TCPCB_SACKED_ACKED) scb->sacked & TCPCB_SACKED_ACKED)
continue; continue;
if (tcp_rack_sent_after(&tp->rack.mstamp, &skb->skb_mstamp, if (tcp_rack_sent_after(tp->rack.mstamp, skb->skb_mstamp,
tp->rack.end_seq, scb->end_seq)) { tp->rack.end_seq, scb->end_seq)) {
/* Step 3 in draft-cheng-tcpm-rack-00.txt: /* Step 3 in draft-cheng-tcpm-rack-00.txt:
* A packet is lost if its elapsed time is beyond * A packet is lost if its elapsed time is beyond
* the recent RTT plus the reordering window. * the recent RTT plus the reordering window.
*/ */
u32 elapsed = skb_mstamp_us_delta(&tp->tcp_mstamp, u32 elapsed = tcp_stamp_us_delta(tp->tcp_mstamp,
&skb->skb_mstamp); skb->skb_mstamp);
s32 remaining = tp->rack.rtt_us + reo_wnd - elapsed; s32 remaining = tp->rack.rtt_us + reo_wnd - elapsed;
if (remaining < 0) { if (remaining < 0) {
...@@ -127,16 +124,16 @@ void tcp_rack_mark_lost(struct sock *sk) ...@@ -127,16 +124,16 @@ void tcp_rack_mark_lost(struct sock *sk)
* draft-cheng-tcpm-rack-00.txt * draft-cheng-tcpm-rack-00.txt
*/ */
void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
const struct skb_mstamp *xmit_time) u64 xmit_time)
{ {
u32 rtt_us; u32 rtt_us;
if (tp->rack.mstamp.v64 && if (tp->rack.mstamp &&
!tcp_rack_sent_after(xmit_time, &tp->rack.mstamp, !tcp_rack_sent_after(xmit_time, tp->rack.mstamp,
end_seq, tp->rack.end_seq)) end_seq, tp->rack.end_seq))
return; return;
rtt_us = skb_mstamp_us_delta(&tp->tcp_mstamp, xmit_time); rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time);
if (sacked & TCPCB_RETRANS) { if (sacked & TCPCB_RETRANS) {
/* If the sacked packet was retransmitted, it's ambiguous /* If the sacked packet was retransmitted, it's ambiguous
* whether the retransmission or the original (or the prior * whether the retransmission or the original (or the prior
...@@ -152,7 +149,7 @@ void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, ...@@ -152,7 +149,7 @@ void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
return; return;
} }
tp->rack.rtt_us = rtt_us; tp->rack.rtt_us = rtt_us;
tp->rack.mstamp = *xmit_time; tp->rack.mstamp = xmit_time;
tp->rack.end_seq = end_seq; tp->rack.end_seq = end_seq;
tp->rack.advanced = 1; tp->rack.advanced = 1;
} }
......
...@@ -153,8 +153,8 @@ static bool retransmits_timed_out(struct sock *sk, ...@@ -153,8 +153,8 @@ static bool retransmits_timed_out(struct sock *sk,
unsigned int timeout, unsigned int timeout,
bool syn_set) bool syn_set)
{ {
unsigned int linear_backoff_thresh, start_ts;
unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN; unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN;
unsigned int linear_backoff_thresh, start_ts;
if (!inet_csk(sk)->icsk_retransmits) if (!inet_csk(sk)->icsk_retransmits)
return false; return false;
...@@ -172,7 +172,7 @@ static bool retransmits_timed_out(struct sock *sk, ...@@ -172,7 +172,7 @@ static bool retransmits_timed_out(struct sock *sk,
timeout = ((2 << linear_backoff_thresh) - 1) * rto_base + timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
(boundary - linear_backoff_thresh) * TCP_RTO_MAX; (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
} }
return (tcp_time_stamp - start_ts) >= timeout; return (tcp_time_stamp(tcp_sk(sk)) - start_ts) >= jiffies_to_msecs(timeout);
} }
/* A write timeout has occurred. Process the after effects. */ /* A write timeout has occurred. Process the after effects. */
...@@ -341,7 +341,7 @@ static void tcp_probe_timer(struct sock *sk) ...@@ -341,7 +341,7 @@ static void tcp_probe_timer(struct sock *sk)
if (!start_ts) if (!start_ts)
tcp_send_head(sk)->skb_mstamp = tp->tcp_mstamp; tcp_send_head(sk)->skb_mstamp = tp->tcp_mstamp;
else if (icsk->icsk_user_timeout && else if (icsk->icsk_user_timeout &&
(s32)(tcp_time_stamp - start_ts) > icsk->icsk_user_timeout) (s32)(tcp_time_stamp(tp) - start_ts) > icsk->icsk_user_timeout)
goto abort; goto abort;
max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2; max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2;
...@@ -561,7 +561,7 @@ void tcp_write_timer_handler(struct sock *sk) ...@@ -561,7 +561,7 @@ void tcp_write_timer_handler(struct sock *sk)
goto out; goto out;
} }
skb_mstamp_get(&tcp_sk(sk)->tcp_mstamp); tcp_mstamp_refresh(tcp_sk(sk));
event = icsk->icsk_pending; event = icsk->icsk_pending;
switch (event) { switch (event) {
......
...@@ -211,7 +211,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) ...@@ -211,7 +211,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
ireq->wscale_ok = tcp_opt.wscale_ok; ireq->wscale_ok = tcp_opt.wscale_ok;
ireq->tstamp_ok = tcp_opt.saw_tstamp; ireq->tstamp_ok = tcp_opt.saw_tstamp;
req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
treq->snt_synack.v64 = 0; treq->snt_synack = 0;
treq->rcv_isn = ntohl(th->seq) - 1; treq->rcv_isn = ntohl(th->seq) - 1;
treq->snt_isn = cookie; treq->snt_isn = cookie;
treq->ts_off = 0; treq->ts_off = 0;
......
...@@ -949,7 +949,7 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) ...@@ -949,7 +949,7 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
tcp_time_stamp + tcptw->tw_ts_offset, tcp_time_stamp_raw() + tcptw->tw_ts_offset,
tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw), tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel)); tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
...@@ -971,7 +971,7 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, ...@@ -971,7 +971,7 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt, tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt,
req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale, req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
tcp_time_stamp + tcp_rsk(req)->ts_off, tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
req->ts_recent, sk->sk_bound_dev_if, req->ts_recent, sk->sk_bound_dev_if,
tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
0, 0); 0, 0);
......
...@@ -152,7 +152,7 @@ void synproxy_init_timestamp_cookie(const struct xt_synproxy_info *info, ...@@ -152,7 +152,7 @@ void synproxy_init_timestamp_cookie(const struct xt_synproxy_info *info,
struct synproxy_options *opts) struct synproxy_options *opts)
{ {
opts->tsecr = opts->tsval; opts->tsecr = opts->tsval;
opts->tsval = tcp_time_stamp & ~0x3f; opts->tsval = tcp_time_stamp_raw() & ~0x3f;
if (opts->options & XT_SYNPROXY_OPT_WSCALE) { if (opts->options & XT_SYNPROXY_OPT_WSCALE) {
opts->tsval |= opts->wscale; opts->tsval |= opts->wscale;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment