Commit 005903bc authored by Ilpo Järvinen's avatar Ilpo Järvinen Committed by David S. Miller

[TCP]: Left out sync->verify (the new meaning of it) & definify

Left_out was dropped a while ago, thus leaving verifying
consistency of the "left out" as only task for the function in
question. Thus make it's name more appropriate.

In addition, it is intentionally converted to #define instead
of static inline because the location of the invariant failure
is the most important thing to have if this ever triggers. I
think it would have been helpful e.g. in this case where the
location of the failure point had to be based on some quesswork:
    http://lkml.org/lkml/2007/5/2/464
...Luckily the guesswork seems to have proved to be correct.
Signed-off-by: default avatarIlpo Järvinen <ilpo.jarvinen@helsinki.fi>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 83ae4088
...@@ -758,10 +758,9 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk) ...@@ -758,10 +758,9 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk)
(tp->snd_cwnd >> 2))); (tp->snd_cwnd >> 2)));
} }
static inline void tcp_sync_left_out(struct tcp_sock *tp) /* Use define here intentionally to get BUG_ON location shown at the caller */
{ #define tcp_verify_left_out(tp) \
BUG_ON(tp->rx_opt.sack_ok && (tcp_left_out(tp) > tp->packets_out)); BUG_ON(tp->rx_opt.sack_ok && (tcp_left_out(tp) > tp->packets_out))
}
extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh); extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst); extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst);
......
...@@ -1383,7 +1383,7 @@ static void tcp_add_reno_sack(struct sock *sk) ...@@ -1383,7 +1383,7 @@ static void tcp_add_reno_sack(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
tp->sacked_out++; tp->sacked_out++;
tcp_check_reno_reordering(sk, 0); tcp_check_reno_reordering(sk, 0);
tcp_sync_left_out(tp); tcp_verify_left_out(tp);
} }
/* Account for ACK, ACKing some data in Reno Recovery phase. */ /* Account for ACK, ACKing some data in Reno Recovery phase. */
...@@ -1400,7 +1400,7 @@ static void tcp_remove_reno_sacks(struct sock *sk, int acked) ...@@ -1400,7 +1400,7 @@ static void tcp_remove_reno_sacks(struct sock *sk, int acked)
tp->sacked_out -= acked-1; tp->sacked_out -= acked-1;
} }
tcp_check_reno_reordering(sk, acked); tcp_check_reno_reordering(sk, acked);
tcp_sync_left_out(tp); tcp_verify_left_out(tp);
} }
static inline void tcp_reset_reno_sack(struct tcp_sock *tp) static inline void tcp_reset_reno_sack(struct tcp_sock *tp)
...@@ -1496,7 +1496,7 @@ void tcp_enter_frto(struct sock *sk) ...@@ -1496,7 +1496,7 @@ void tcp_enter_frto(struct sock *sk)
TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
tp->retrans_out -= tcp_skb_pcount(skb); tp->retrans_out -= tcp_skb_pcount(skb);
} }
tcp_sync_left_out(tp); tcp_verify_left_out(tp);
/* Earlier loss recovery underway (see RFC4138; Appendix B). /* Earlier loss recovery underway (see RFC4138; Appendix B).
* The last condition is necessary at least in tp->frto_counter case. * The last condition is necessary at least in tp->frto_counter case.
...@@ -1551,7 +1551,7 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag) ...@@ -1551,7 +1551,7 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
tp->lost_out += tcp_skb_pcount(skb); tp->lost_out += tcp_skb_pcount(skb);
} }
} }
tcp_sync_left_out(tp); tcp_verify_left_out(tp);
tp->snd_cwnd = tcp_packets_in_flight(tp) + allowed_segments; tp->snd_cwnd = tcp_packets_in_flight(tp) + allowed_segments;
tp->snd_cwnd_cnt = 0; tp->snd_cwnd_cnt = 0;
...@@ -1626,7 +1626,7 @@ void tcp_enter_loss(struct sock *sk, int how) ...@@ -1626,7 +1626,7 @@ void tcp_enter_loss(struct sock *sk, int how)
tp->fackets_out = cnt; tp->fackets_out = cnt;
} }
} }
tcp_sync_left_out(tp); tcp_verify_left_out(tp);
tp->reordering = min_t(unsigned int, tp->reordering, tp->reordering = min_t(unsigned int, tp->reordering,
sysctl_tcp_reordering); sysctl_tcp_reordering);
...@@ -1861,7 +1861,7 @@ static void tcp_mark_head_lost(struct sock *sk, ...@@ -1861,7 +1861,7 @@ static void tcp_mark_head_lost(struct sock *sk,
tcp_verify_retransmit_hint(tp, skb); tcp_verify_retransmit_hint(tp, skb);
} }
} }
tcp_sync_left_out(tp); tcp_verify_left_out(tp);
} }
/* Account newly detected lost packet(s) */ /* Account newly detected lost packet(s) */
...@@ -1905,7 +1905,7 @@ static void tcp_update_scoreboard(struct sock *sk) ...@@ -1905,7 +1905,7 @@ static void tcp_update_scoreboard(struct sock *sk)
tp->scoreboard_skb_hint = skb; tp->scoreboard_skb_hint = skb;
tcp_sync_left_out(tp); tcp_verify_left_out(tp);
} }
} }
...@@ -2217,8 +2217,8 @@ tcp_fastretrans_alert(struct sock *sk, int prior_packets, int flag) ...@@ -2217,8 +2217,8 @@ tcp_fastretrans_alert(struct sock *sk, int prior_packets, int flag)
NET_INC_STATS_BH(LINUX_MIB_TCPLOSS); NET_INC_STATS_BH(LINUX_MIB_TCPLOSS);
} }
/* D. Synchronize left_out to current state. */ /* D. Check consistency of the current state. */
tcp_sync_left_out(tp); tcp_verify_left_out(tp);
/* E. Check state exit conditions. State can be terminated /* E. Check state exit conditions. State can be terminated
* when high_seq is ACKed. */ * when high_seq is ACKed. */
...@@ -2765,7 +2765,7 @@ static int tcp_process_frto(struct sock *sk, int flag) ...@@ -2765,7 +2765,7 @@ static int tcp_process_frto(struct sock *sk, int flag)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
tcp_sync_left_out(tp); tcp_verify_left_out(tp);
/* Duplicate the behavior from Loss state (fastretrans_alert) */ /* Duplicate the behavior from Loss state (fastretrans_alert) */
if (flag&FLAG_DATA_ACKED) if (flag&FLAG_DATA_ACKED)
......
...@@ -739,7 +739,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss ...@@ -739,7 +739,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss
/* Adjust Reno SACK estimate. */ /* Adjust Reno SACK estimate. */
if (!tp->rx_opt.sack_ok) { if (!tp->rx_opt.sack_ok) {
tcp_dec_pcount_approx_int(&tp->sacked_out, diff); tcp_dec_pcount_approx_int(&tp->sacked_out, diff);
tcp_sync_left_out(tp); tcp_verify_left_out(tp);
} }
tcp_dec_pcount_approx_int(&tp->fackets_out, diff); tcp_dec_pcount_approx_int(&tp->fackets_out, diff);
...@@ -1774,7 +1774,7 @@ void tcp_simple_retransmit(struct sock *sk) ...@@ -1774,7 +1774,7 @@ void tcp_simple_retransmit(struct sock *sk)
if (!lost) if (!lost)
return; return;
tcp_sync_left_out(tp); tcp_verify_left_out(tp);
/* Don't muck with the congestion window here. /* Don't muck with the congestion window here.
* Reason is that we do not increase amount of _data_ * Reason is that we do not increase amount of _data_
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment