Commit 5bbbcc00 authored by David S. Miller's avatar David S. Miller

Merge branch 'tcp-loss-probe'

Yuchung Cheng says:

====================
minor tail loss probe improvements

This patch series enhance the tail loss probe (TLP) on some error
conditions. When TLP fails to send a probe, it will no longer
extend the RTO. When it fails to send a new packet because of
receiver window limit, it'll try to retransmit the last packet.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 646244b2 b340b264
......@@ -2149,7 +2149,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
tcp_cwnd_validate(sk, is_cwnd_limited);
return false;
}
return (push_one == 2) || (!tp->packets_out && tcp_send_head(sk));
return !tp->packets_out && tcp_send_head(sk);
}
bool tcp_schedule_loss_probe(struct sock *sk)
......@@ -2226,7 +2226,7 @@ static bool skb_still_in_host_queue(const struct sock *sk,
return false;
}
/* When probe timeout (PTO) fires, send a new segment if one exists, else
/* When probe timeout (PTO) fires, try send a new segment if possible, else
* retransmit the last segment.
*/
void tcp_send_loss_probe(struct sock *sk)
......@@ -2235,19 +2235,26 @@ void tcp_send_loss_probe(struct sock *sk)
struct sk_buff *skb;
int pcount;
int mss = tcp_current_mss(sk);
int err = -1;
if (tcp_send_head(sk)) {
err = tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
skb = tcp_send_head(sk);
if (skb) {
if (tcp_snd_wnd_test(tp, skb, mss)) {
pcount = tp->packets_out;
tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
if (tp->packets_out > pcount)
goto probe_sent;
goto rearm_timer;
}
skb = tcp_write_queue_prev(sk, skb);
} else {
skb = tcp_write_queue_tail(sk);
}
/* At most one outstanding TLP retransmission. */
if (tp->tlp_high_seq)
goto rearm_timer;
/* Retransmit last segment. */
skb = tcp_write_queue_tail(sk);
if (WARN_ON(!skb))
goto rearm_timer;
......@@ -2262,26 +2269,24 @@ void tcp_send_loss_probe(struct sock *sk)
if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss,
GFP_ATOMIC)))
goto rearm_timer;
skb = tcp_write_queue_tail(sk);
skb = tcp_write_queue_next(sk, skb);
}
if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
goto rearm_timer;
err = __tcp_retransmit_skb(sk, skb);
if (__tcp_retransmit_skb(sk, skb))
goto rearm_timer;
/* Record snd_nxt for loss detection. */
if (likely(!err))
tp->tlp_high_seq = tp->snd_nxt;
probe_sent:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
/* Reset s.t. tcp_rearm_rto will restart timer from now */
inet_csk(sk)->icsk_pending = 0;
rearm_timer:
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
inet_csk(sk)->icsk_rto,
TCP_RTO_MAX);
if (likely(!err))
NET_INC_STATS_BH(sock_net(sk),
LINUX_MIB_TCPLOSSPROBES);
tcp_rearm_rto(sk);
}
/* Push out any pending frames which were held back due to
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment