Commit 9caad864 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

tcp: increment sk_drops for listeners

Goal: packets dropped by a listener are accounted for.

This adds tcp_listendrop() helper, and clears sk_drops in sk_clone_lock()
so that children do not inherit their parent drop count.

Note that we no longer increment LINUX_MIB_LISTENDROPS counter when
sending a SYNCOOKIE, since the SYN packet generated a SYNACK.
We already have a separate LINUX_MIB_SYNCOOKIESSENT
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 532182cd
...@@ -1836,4 +1836,17 @@ static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb) ...@@ -1836,4 +1836,17 @@ static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
tp->data_segs_in += segs_in; tp->data_segs_in += segs_in;
} }
/*
* TCP listen path runs lockless.
* We forced "struct sock" to be const qualified to make sure
* we don't modify one of its field by mistake.
* Here, we increment sk_drops which is an atomic_t, so we can safely
* make sock writable again.
*/
static inline void tcp_listendrop(const struct sock *sk)
{
atomic_inc(&((struct sock *)sk)->sk_drops);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
}
#endif /* _TCP_H */ #endif /* _TCP_H */
...@@ -1525,6 +1525,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) ...@@ -1525,6 +1525,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
newsk->sk_dst_cache = NULL; newsk->sk_dst_cache = NULL;
newsk->sk_wmem_queued = 0; newsk->sk_wmem_queued = 0;
newsk->sk_forward_alloc = 0; newsk->sk_forward_alloc = 0;
atomic_set(&newsk->sk_drops, 0);
newsk->sk_send_head = NULL; newsk->sk_send_head = NULL;
newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
......
...@@ -6339,8 +6339,10 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, ...@@ -6339,8 +6339,10 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
af_ops->send_synack(sk, dst, &fl, req, af_ops->send_synack(sk, dst, &fl, req,
&foc, !want_cookie); &foc, !want_cookie);
if (want_cookie) if (want_cookie) {
goto drop_and_free; reqsk_free(req);
return 0;
}
} }
reqsk_put(req); reqsk_put(req);
return 0; return 0;
...@@ -6350,7 +6352,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, ...@@ -6350,7 +6352,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
drop_and_free: drop_and_free:
reqsk_free(req); reqsk_free(req);
drop: drop:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); tcp_listendrop(sk);
return 0; return 0;
} }
EXPORT_SYMBOL(tcp_conn_request); EXPORT_SYMBOL(tcp_conn_request);
...@@ -329,7 +329,7 @@ void tcp_req_err(struct sock *sk, u32 seq, bool abort) ...@@ -329,7 +329,7 @@ void tcp_req_err(struct sock *sk, u32 seq, bool abort)
* errors returned from accept(). * errors returned from accept().
*/ */
inet_csk_reqsk_queue_drop(req->rsk_listener, req); inet_csk_reqsk_queue_drop(req->rsk_listener, req);
NET_INC_STATS_BH(net, LINUX_MIB_LISTENDROPS); tcp_listendrop(req->rsk_listener);
} }
reqsk_put(req); reqsk_put(req);
} }
...@@ -1246,7 +1246,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) ...@@ -1246,7 +1246,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
&tcp_request_sock_ipv4_ops, sk, skb); &tcp_request_sock_ipv4_ops, sk, skb);
drop: drop:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); tcp_listendrop(sk);
return 0; return 0;
} }
EXPORT_SYMBOL(tcp_v4_conn_request); EXPORT_SYMBOL(tcp_v4_conn_request);
...@@ -1348,7 +1348,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, ...@@ -1348,7 +1348,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
exit_nonewsk: exit_nonewsk:
dst_release(dst); dst_release(dst);
exit: exit:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); tcp_listendrop(sk);
return NULL; return NULL;
put_and_exit: put_and_exit:
inet_csk_prepare_forced_close(newsk); inet_csk_prepare_forced_close(newsk);
......
...@@ -964,7 +964,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) ...@@ -964,7 +964,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
&tcp_request_sock_ipv6_ops, sk, skb); &tcp_request_sock_ipv6_ops, sk, skb);
drop: drop:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); tcp_listendrop(sk);
return 0; /* don't send reset */ return 0; /* don't send reset */
} }
...@@ -1169,7 +1169,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * ...@@ -1169,7 +1169,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
out_nonewsk: out_nonewsk:
dst_release(dst); dst_release(dst);
out: out:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); tcp_listendrop(sk);
return NULL; return NULL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment