Commit ed2e9239 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

tcp/dccp: fix timewait races in timer handling

When creating a timewait socket, we need to arm the timer before
allowing other cpus to find it. The signal allowing cpus to find
the socket is setting tw_refcnt to non zero value.

As we set tw_refcnt in __inet_twsk_hashdance(), we therefore need to
call inet_twsk_schedule() first.

This also means we need to remove tw_refcnt changes from
inet_twsk_schedule() and let the caller handle it.

Note that because we use mod_timer_pinned(), we have the guarantee
the timer wont expire before we set tw_refcnt as we run in BH context.

To make things more readable I introduced inet_twsk_reschedule() helper.

When rearming the timer, we can use mod_timer_pending() to make sure
we do not rearm a canceled timer.

Note: This bug can possibly trigger if packets of a flow can hit
multiple cpus. This does not normally happen, unless flow steering
is broken somehow. This explains this bug was spotted ~5 months after
its introduction.

A similar fix is needed for SYN_RECV sockets in reqsk_queue_hash_req(),
but will be provided in a separate patch for proper tracking.

Fixes: 789f558c ("tcp/dccp: get rid of central timewait timer")
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Reported-by: default avatarYing Cai <ycai@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 4c5d283a
...@@ -110,7 +110,19 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, ...@@ -110,7 +110,19 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
struct inet_hashinfo *hashinfo); struct inet_hashinfo *hashinfo);
void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo); void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo,
bool rearm);
static void inline inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo)
{
__inet_twsk_schedule(tw, timeo, false);
}
static void inline inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo)
{
__inet_twsk_schedule(tw, timeo, true);
}
void inet_twsk_deschedule_put(struct inet_timewait_sock *tw); void inet_twsk_deschedule_put(struct inet_timewait_sock *tw);
void inet_twsk_purge(struct inet_hashinfo *hashinfo, void inet_twsk_purge(struct inet_hashinfo *hashinfo,
......
...@@ -48,8 +48,6 @@ void dccp_time_wait(struct sock *sk, int state, int timeo) ...@@ -48,8 +48,6 @@ void dccp_time_wait(struct sock *sk, int state, int timeo)
tw->tw_ipv6only = sk->sk_ipv6only; tw->tw_ipv6only = sk->sk_ipv6only;
} }
#endif #endif
/* Linkage updates. */
__inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
/* Get the TIME_WAIT timeout firing. */ /* Get the TIME_WAIT timeout firing. */
if (timeo < rto) if (timeo < rto)
...@@ -60,6 +58,8 @@ void dccp_time_wait(struct sock *sk, int state, int timeo) ...@@ -60,6 +58,8 @@ void dccp_time_wait(struct sock *sk, int state, int timeo)
timeo = DCCP_TIMEWAIT_LEN; timeo = DCCP_TIMEWAIT_LEN;
inet_twsk_schedule(tw, timeo); inet_twsk_schedule(tw, timeo);
/* Linkage updates. */
__inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
inet_twsk_put(tw); inet_twsk_put(tw);
} else { } else {
/* Sorry, if we're out of memory, just CLOSE this /* Sorry, if we're out of memory, just CLOSE this
......
...@@ -123,13 +123,15 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, ...@@ -123,13 +123,15 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
/* /*
* Step 2: Hash TW into tcp ehash chain. * Step 2: Hash TW into tcp ehash chain.
* Notes : * Notes :
* - tw_refcnt is set to 3 because : * - tw_refcnt is set to 4 because :
* - We have one reference from bhash chain. * - We have one reference from bhash chain.
* - We have one reference from ehash chain. * - We have one reference from ehash chain.
* - We have one reference from timer.
* - One reference for ourself (our caller will release it).
* We can use atomic_set() because prior spin_lock()/spin_unlock() * We can use atomic_set() because prior spin_lock()/spin_unlock()
* committed into memory all tw fields. * committed into memory all tw fields.
*/ */
atomic_set(&tw->tw_refcnt, 1 + 1 + 1); atomic_set(&tw->tw_refcnt, 4);
inet_twsk_add_node_rcu(tw, &ehead->chain); inet_twsk_add_node_rcu(tw, &ehead->chain);
/* Step 3: Remove SK from hash chain */ /* Step 3: Remove SK from hash chain */
...@@ -217,7 +219,7 @@ void inet_twsk_deschedule_put(struct inet_timewait_sock *tw) ...@@ -217,7 +219,7 @@ void inet_twsk_deschedule_put(struct inet_timewait_sock *tw)
} }
EXPORT_SYMBOL(inet_twsk_deschedule_put); EXPORT_SYMBOL(inet_twsk_deschedule_put);
void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo) void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, bool rearm)
{ {
/* timeout := RTO * 3.5 /* timeout := RTO * 3.5
* *
...@@ -245,12 +247,14 @@ void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo) ...@@ -245,12 +247,14 @@ void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo)
*/ */
tw->tw_kill = timeo <= 4*HZ; tw->tw_kill = timeo <= 4*HZ;
if (!mod_timer_pinned(&tw->tw_timer, jiffies + timeo)) { if (!rearm) {
atomic_inc(&tw->tw_refcnt); BUG_ON(mod_timer_pinned(&tw->tw_timer, jiffies + timeo));
atomic_inc(&tw->tw_dr->tw_count); atomic_inc(&tw->tw_dr->tw_count);
} else {
mod_timer_pending(&tw->tw_timer, jiffies + timeo);
} }
} }
EXPORT_SYMBOL_GPL(inet_twsk_schedule); EXPORT_SYMBOL_GPL(__inet_twsk_schedule);
void inet_twsk_purge(struct inet_hashinfo *hashinfo, void inet_twsk_purge(struct inet_hashinfo *hashinfo,
struct inet_timewait_death_row *twdr, int family) struct inet_timewait_death_row *twdr, int family)
......
...@@ -162,9 +162,9 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, ...@@ -162,9 +162,9 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
if (tcp_death_row.sysctl_tw_recycle && if (tcp_death_row.sysctl_tw_recycle &&
tcptw->tw_ts_recent_stamp && tcptw->tw_ts_recent_stamp &&
tcp_tw_remember_stamp(tw)) tcp_tw_remember_stamp(tw))
inet_twsk_schedule(tw, tw->tw_timeout); inet_twsk_reschedule(tw, tw->tw_timeout);
else else
inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN); inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
return TCP_TW_ACK; return TCP_TW_ACK;
} }
...@@ -201,7 +201,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, ...@@ -201,7 +201,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
return TCP_TW_SUCCESS; return TCP_TW_SUCCESS;
} }
} }
inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN); inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
if (tmp_opt.saw_tstamp) { if (tmp_opt.saw_tstamp) {
tcptw->tw_ts_recent = tmp_opt.rcv_tsval; tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
...@@ -251,7 +251,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, ...@@ -251,7 +251,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
* Do not reschedule in the last case. * Do not reschedule in the last case.
*/ */
if (paws_reject || th->ack) if (paws_reject || th->ack)
inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN); inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
return tcp_timewait_check_oow_rate_limit( return tcp_timewait_check_oow_rate_limit(
tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT); tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
...@@ -322,9 +322,6 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) ...@@ -322,9 +322,6 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
} while (0); } while (0);
#endif #endif
/* Linkage updates. */
__inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
/* Get the TIME_WAIT timeout firing. */ /* Get the TIME_WAIT timeout firing. */
if (timeo < rto) if (timeo < rto)
timeo = rto; timeo = rto;
...@@ -338,6 +335,8 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) ...@@ -338,6 +335,8 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
} }
inet_twsk_schedule(tw, timeo); inet_twsk_schedule(tw, timeo);
/* Linkage updates. */
__inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
inet_twsk_put(tw); inet_twsk_put(tw);
} else { } else {
/* Sorry, if we're out of memory, just CLOSE this /* Sorry, if we're out of memory, just CLOSE this
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment