Commit 563d34d0 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

tcp: dont drop MTU reduction indications

ICMP messages generated in output path if frame length is bigger than
mtu are actually lost because socket is owned by user (doing the xmit)

One example is the ipgre_tunnel_xmit() calling
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));

We had a similar case fixed in commit a34a101e (ipv6: disable GSO on
sockets hitting dst_allfrag).

Problem of such fix is that it relied on retransmit timers, so short tcp
sessions paid a too big latency increase price.

This patch uses the tcp_release_cb() infrastructure so that MTU
reduction messages (ICMP messages) are not lost, and no extra delay
is added in TCP transmits.
Reported-by: default avatarMaciej Żenczykowski <maze@google.com>
Diagnosed-by: default avatarNeal Cardwell <ncardwell@google.com>
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Cc: Nandita Dukkipati <nanditad@google.com>
Cc: Tom Herbert <therbert@google.com>
Cc: Tore Anderson <tore@fud.no>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c3def943
...@@ -493,6 +493,9 @@ struct tcp_sock { ...@@ -493,6 +493,9 @@ struct tcp_sock {
u32 probe_seq_start; u32 probe_seq_start;
u32 probe_seq_end; u32 probe_seq_end;
} mtu_probe; } mtu_probe;
u32 mtu_info; /* We received an ICMP_FRAG_NEEDED / ICMPV6_PKT_TOOBIG
* while socket was owned by user.
*/
#ifdef CONFIG_TCP_MD5SIG #ifdef CONFIG_TCP_MD5SIG
/* TCP AF-Specific parts; only used by MD5 Signature support so far */ /* TCP AF-Specific parts; only used by MD5 Signature support so far */
...@@ -518,6 +521,9 @@ enum tsq_flags { ...@@ -518,6 +521,9 @@ enum tsq_flags {
TCP_TSQ_DEFERRED, /* tcp_tasklet_func() found socket was owned */ TCP_TSQ_DEFERRED, /* tcp_tasklet_func() found socket was owned */
TCP_WRITE_TIMER_DEFERRED, /* tcp_write_timer() found socket was owned */ TCP_WRITE_TIMER_DEFERRED, /* tcp_write_timer() found socket was owned */
TCP_DELACK_TIMER_DEFERRED, /* tcp_delack_timer() found socket was owned */ TCP_DELACK_TIMER_DEFERRED, /* tcp_delack_timer() found socket was owned */
TCP_MTU_REDUCED_DEFERRED, /* tcp_v{4|6}_err() could not call
* tcp_v{4|6}_mtu_reduced()
*/
}; };
static inline struct tcp_sock *tcp_sk(const struct sock *sk) static inline struct tcp_sock *tcp_sk(const struct sock *sk)
......
...@@ -859,6 +859,7 @@ struct proto { ...@@ -859,6 +859,7 @@ struct proto {
struct sk_buff *skb); struct sk_buff *skb);
void (*release_cb)(struct sock *sk); void (*release_cb)(struct sock *sk);
void (*mtu_reduced)(struct sock *sk);
/* Keeping track of sk's, looking them up, and port selection methods. */ /* Keeping track of sk's, looking them up, and port selection methods. */
void (*hash)(struct sock *sk); void (*hash)(struct sock *sk);
......
...@@ -275,12 +275,15 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) ...@@ -275,12 +275,15 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
EXPORT_SYMBOL(tcp_v4_connect); EXPORT_SYMBOL(tcp_v4_connect);
/* /*
* This routine does path mtu discovery as defined in RFC1191. * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
* It can be called through tcp_release_cb() if socket was owned by user
* at the time tcp_v4_err() was called to handle ICMP message.
*/ */
static void do_pmtu_discovery(struct sock *sk, const struct iphdr *iph, u32 mtu) static void tcp_v4_mtu_reduced(struct sock *sk)
{ {
struct dst_entry *dst; struct dst_entry *dst;
struct inet_sock *inet = inet_sk(sk); struct inet_sock *inet = inet_sk(sk);
u32 mtu = tcp_sk(sk)->mtu_info;
/* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
* send out by Linux are always <576bytes so they should go through * send out by Linux are always <576bytes so they should go through
...@@ -373,8 +376,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) ...@@ -373,8 +376,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
bh_lock_sock(sk); bh_lock_sock(sk);
/* If too many ICMPs get dropped on busy /* If too many ICMPs get dropped on busy
* servers this needs to be solved differently. * servers this needs to be solved differently.
* We do take care of PMTU discovery (RFC1191) special case :
* we can receive locally generated ICMP messages while socket is held.
*/ */
if (sock_owned_by_user(sk)) if (sock_owned_by_user(sk) &&
type != ICMP_DEST_UNREACH &&
code != ICMP_FRAG_NEEDED)
NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
if (sk->sk_state == TCP_CLOSE) if (sk->sk_state == TCP_CLOSE)
...@@ -409,8 +416,11 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) ...@@ -409,8 +416,11 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
goto out; goto out;
if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
tp->mtu_info = info;
if (!sock_owned_by_user(sk)) if (!sock_owned_by_user(sk))
do_pmtu_discovery(sk, iph, info); tcp_v4_mtu_reduced(sk);
else
set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags);
goto out; goto out;
} }
...@@ -2596,6 +2606,7 @@ struct proto tcp_prot = { ...@@ -2596,6 +2606,7 @@ struct proto tcp_prot = {
.sendpage = tcp_sendpage, .sendpage = tcp_sendpage,
.backlog_rcv = tcp_v4_do_rcv, .backlog_rcv = tcp_v4_do_rcv,
.release_cb = tcp_release_cb, .release_cb = tcp_release_cb,
.mtu_reduced = tcp_v4_mtu_reduced,
.hash = inet_hash, .hash = inet_hash,
.unhash = inet_unhash, .unhash = inet_unhash,
.get_port = inet_csk_get_port, .get_port = inet_csk_get_port,
......
...@@ -885,7 +885,8 @@ static void tcp_tasklet_func(unsigned long data) ...@@ -885,7 +885,8 @@ static void tcp_tasklet_func(unsigned long data)
#define TCP_DEFERRED_ALL ((1UL << TCP_TSQ_DEFERRED) | \ #define TCP_DEFERRED_ALL ((1UL << TCP_TSQ_DEFERRED) | \
(1UL << TCP_WRITE_TIMER_DEFERRED) | \ (1UL << TCP_WRITE_TIMER_DEFERRED) | \
(1UL << TCP_DELACK_TIMER_DEFERRED)) (1UL << TCP_DELACK_TIMER_DEFERRED) | \
(1UL << TCP_MTU_REDUCED_DEFERRED))
/** /**
* tcp_release_cb - tcp release_sock() callback * tcp_release_cb - tcp release_sock() callback
* @sk: socket * @sk: socket
...@@ -914,6 +915,9 @@ void tcp_release_cb(struct sock *sk) ...@@ -914,6 +915,9 @@ void tcp_release_cb(struct sock *sk)
if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED)) if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED))
tcp_delack_timer_handler(sk); tcp_delack_timer_handler(sk);
if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED))
sk->sk_prot->mtu_reduced(sk);
} }
EXPORT_SYMBOL(tcp_release_cb); EXPORT_SYMBOL(tcp_release_cb);
......
...@@ -315,6 +315,23 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, ...@@ -315,6 +315,23 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
return err; return err;
} }
static void tcp_v6_mtu_reduced(struct sock *sk)
{
struct dst_entry *dst;
if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
return;
dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
if (!dst)
return;
if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
tcp_sync_mss(sk, dst_mtu(dst));
tcp_simple_retransmit(sk);
}
}
static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info) u8 type, u8 code, int offset, __be32 info)
{ {
...@@ -342,7 +359,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, ...@@ -342,7 +359,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
} }
bh_lock_sock(sk); bh_lock_sock(sk);
if (sock_owned_by_user(sk)) if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
if (sk->sk_state == TCP_CLOSE) if (sk->sk_state == TCP_CLOSE)
...@@ -371,21 +388,11 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, ...@@ -371,21 +388,11 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
} }
if (type == ICMPV6_PKT_TOOBIG) { if (type == ICMPV6_PKT_TOOBIG) {
struct dst_entry *dst; tp->mtu_info = ntohl(info);
if (!sock_owned_by_user(sk))
if (sock_owned_by_user(sk)) tcp_v6_mtu_reduced(sk);
goto out; else
if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags);
goto out;
dst = inet6_csk_update_pmtu(sk, ntohl(info));
if (!dst)
goto out;
if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
tcp_sync_mss(sk, dst_mtu(dst));
tcp_simple_retransmit(sk);
}
goto out; goto out;
} }
...@@ -1949,6 +1956,7 @@ struct proto tcpv6_prot = { ...@@ -1949,6 +1956,7 @@ struct proto tcpv6_prot = {
.sendpage = tcp_sendpage, .sendpage = tcp_sendpage,
.backlog_rcv = tcp_v6_do_rcv, .backlog_rcv = tcp_v6_do_rcv,
.release_cb = tcp_release_cb, .release_cb = tcp_release_cb,
.mtu_reduced = tcp_v6_mtu_reduced,
.hash = tcp_v6_hash, .hash = tcp_v6_hash,
.unhash = inet_unhash, .unhash = inet_unhash,
.get_port = inet_csk_get_port, .get_port = inet_csk_get_port,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment