Commit 02a1d6e7 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

net: rename NET_{ADD|INC}_STATS_BH()

Rename NET_INC_STATS_BH() to __NET_INC_STATS()
and NET_ADD_STATS_BH() to __NET_ADD_STATS()
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b15084ec
...@@ -193,9 +193,9 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, ...@@ -193,9 +193,9 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
#define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val) #define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
#define __IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64_BH((net)->mib.ip_statistics, field, val) #define __IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64_BH((net)->mib.ip_statistics, field, val)
#define NET_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.net_statistics, field) #define NET_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.net_statistics, field)
#define NET_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.net_statistics, field) #define __NET_INC_STATS(net, field) SNMP_INC_STATS_BH((net)->mib.net_statistics, field)
#define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd) #define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
#define NET_ADD_STATS_BH(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd) #define __NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd)
u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct); u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct);
unsigned long snmp_fold_field(void __percpu *mib, int offt); unsigned long snmp_fold_field(void __percpu *mib, int offt);
......
...@@ -1743,7 +1743,7 @@ static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops, ...@@ -1743,7 +1743,7 @@ static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
__u16 *mss) __u16 *mss)
{ {
tcp_synq_overflow(sk); tcp_synq_overflow(sk);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT); __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
return ops->cookie_init_seq(skb, mss); return ops->cookie_init_seq(skb, mss);
} }
#else #else
...@@ -1852,7 +1852,7 @@ static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb) ...@@ -1852,7 +1852,7 @@ static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
static inline void tcp_listendrop(const struct sock *sk) static inline void tcp_listendrop(const struct sock *sk)
{ {
atomic_inc(&((struct sock *)sk)->sk_drops); atomic_inc(&((struct sock *)sk)->sk_drops);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
} }
#endif /* _TCP_H */ #endif /* _TCP_H */
...@@ -4982,8 +4982,8 @@ bool sk_busy_loop(struct sock *sk, int nonblock) ...@@ -4982,8 +4982,8 @@ bool sk_busy_loop(struct sock *sk, int nonblock)
netpoll_poll_unlock(have); netpoll_poll_unlock(have);
} }
if (rc > 0) if (rc > 0)
NET_ADD_STATS_BH(sock_net(sk), __NET_ADD_STATS(sock_net(sk),
LINUX_MIB_BUSYPOLLRXPACKETS, rc); LINUX_MIB_BUSYPOLLRXPACKETS, rc);
local_bh_enable(); local_bh_enable();
if (rc == LL_FLUSH_FAILED) if (rc == LL_FLUSH_FAILED)
......
...@@ -205,7 +205,7 @@ void dccp_req_err(struct sock *sk, u64 seq) ...@@ -205,7 +205,7 @@ void dccp_req_err(struct sock *sk, u64 seq)
* socket here. * socket here.
*/ */
if (!between48(seq, dccp_rsk(req)->dreq_iss, dccp_rsk(req)->dreq_gss)) { if (!between48(seq, dccp_rsk(req)->dreq_iss, dccp_rsk(req)->dreq_gss)) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
} else { } else {
/* /*
* Still in RESPOND, just remove it silently. * Still in RESPOND, just remove it silently.
...@@ -273,7 +273,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info) ...@@ -273,7 +273,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
* servers this needs to be solved differently. * servers this needs to be solved differently.
*/ */
if (sock_owned_by_user(sk)) if (sock_owned_by_user(sk))
NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
if (sk->sk_state == DCCP_CLOSED) if (sk->sk_state == DCCP_CLOSED)
goto out; goto out;
...@@ -281,7 +281,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info) ...@@ -281,7 +281,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
dp = dccp_sk(sk); dp = dccp_sk(sk);
if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) && if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
!between48(seq, dp->dccps_awl, dp->dccps_awh)) { !between48(seq, dp->dccps_awl, dp->dccps_awh)) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
goto out; goto out;
} }
...@@ -431,11 +431,11 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk, ...@@ -431,11 +431,11 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
return newsk; return newsk;
exit_overflow: exit_overflow:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
exit_nonewsk: exit_nonewsk:
dst_release(dst); dst_release(dst);
exit: exit:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
return NULL; return NULL;
put_and_exit: put_and_exit:
inet_csk_prepare_forced_close(newsk); inet_csk_prepare_forced_close(newsk);
......
...@@ -106,7 +106,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, ...@@ -106,7 +106,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
bh_lock_sock(sk); bh_lock_sock(sk);
if (sock_owned_by_user(sk)) if (sock_owned_by_user(sk))
NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
if (sk->sk_state == DCCP_CLOSED) if (sk->sk_state == DCCP_CLOSED)
goto out; goto out;
...@@ -114,7 +114,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, ...@@ -114,7 +114,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
dp = dccp_sk(sk); dp = dccp_sk(sk);
if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) && if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
!between48(seq, dp->dccps_awl, dp->dccps_awh)) { !between48(seq, dp->dccps_awl, dp->dccps_awh)) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
goto out; goto out;
} }
...@@ -527,11 +527,11 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk, ...@@ -527,11 +527,11 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
return newsk; return newsk;
out_overflow: out_overflow:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
out_nonewsk: out_nonewsk:
dst_release(dst); dst_release(dst);
out: out:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
return NULL; return NULL;
} }
......
...@@ -179,7 +179,7 @@ static void dccp_delack_timer(unsigned long data) ...@@ -179,7 +179,7 @@ static void dccp_delack_timer(unsigned long data)
if (sock_owned_by_user(sk)) { if (sock_owned_by_user(sk)) {
/* Try again later. */ /* Try again later. */
icsk->icsk_ack.blocked = 1; icsk->icsk_ack.blocked = 1;
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED); __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
sk_reset_timer(sk, &icsk->icsk_delack_timer, sk_reset_timer(sk, &icsk->icsk_delack_timer,
jiffies + TCP_DELACK_MIN); jiffies + TCP_DELACK_MIN);
goto out; goto out;
...@@ -209,7 +209,7 @@ static void dccp_delack_timer(unsigned long data) ...@@ -209,7 +209,7 @@ static void dccp_delack_timer(unsigned long data)
icsk->icsk_ack.ato = TCP_ATO_MIN; icsk->icsk_ack.ato = TCP_ATO_MIN;
} }
dccp_send_ack(sk); dccp_send_ack(sk);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS); __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
} }
out: out:
bh_unlock_sock(sk); bh_unlock_sock(sk);
......
...@@ -436,7 +436,7 @@ static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev) ...@@ -436,7 +436,7 @@ static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev)
if (IS_ERR(rt)) if (IS_ERR(rt))
return 1; return 1;
if (rt->dst.dev != dev) { if (rt->dst.dev != dev) {
NET_INC_STATS_BH(net, LINUX_MIB_ARPFILTER); __NET_INC_STATS(net, LINUX_MIB_ARPFILTER);
flag = 1; flag = 1;
} }
ip_rt_put(rt); ip_rt_put(rt);
......
...@@ -360,7 +360,7 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row, ...@@ -360,7 +360,7 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
__sk_nulls_add_node_rcu(sk, &head->chain); __sk_nulls_add_node_rcu(sk, &head->chain);
if (tw) { if (tw) {
sk_nulls_del_node_init_rcu((struct sock *)tw); sk_nulls_del_node_init_rcu((struct sock *)tw);
NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED); __NET_INC_STATS(net, LINUX_MIB_TIMEWAITRECYCLED);
} }
spin_unlock(lock); spin_unlock(lock);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
......
...@@ -147,9 +147,9 @@ static void tw_timer_handler(unsigned long data) ...@@ -147,9 +147,9 @@ static void tw_timer_handler(unsigned long data)
struct inet_timewait_sock *tw = (struct inet_timewait_sock *)data; struct inet_timewait_sock *tw = (struct inet_timewait_sock *)data;
if (tw->tw_kill) if (tw->tw_kill)
NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITKILLED); __NET_INC_STATS(twsk_net(tw), LINUX_MIB_TIMEWAITKILLED);
else else
NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITED); __NET_INC_STATS(twsk_net(tw), LINUX_MIB_TIMEWAITED);
inet_twsk_kill(tw); inet_twsk_kill(tw);
} }
......
...@@ -337,7 +337,7 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) ...@@ -337,7 +337,7 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
iph->tos, skb->dev); iph->tos, skb->dev);
if (unlikely(err)) { if (unlikely(err)) {
if (err == -EXDEV) if (err == -EXDEV)
NET_INC_STATS_BH(net, LINUX_MIB_IPRPFILTER); __NET_INC_STATS(net, LINUX_MIB_IPRPFILTER);
goto drop; goto drop;
} }
} }
......
...@@ -312,11 +312,11 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) ...@@ -312,11 +312,11 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
mss = __cookie_v4_check(ip_hdr(skb), th, cookie); mss = __cookie_v4_check(ip_hdr(skb), th, cookie);
if (mss == 0) { if (mss == 0) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED); __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
goto out; goto out;
} }
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
/* check for timestamp cookie support */ /* check for timestamp cookie support */
memset(&tcp_opt, 0, sizeof(tcp_opt)); memset(&tcp_opt, 0, sizeof(tcp_opt));
......
...@@ -2148,7 +2148,7 @@ void tcp_close(struct sock *sk, long timeout) ...@@ -2148,7 +2148,7 @@ void tcp_close(struct sock *sk, long timeout)
if (tp->linger2 < 0) { if (tp->linger2 < 0) {
tcp_set_state(sk, TCP_CLOSE); tcp_set_state(sk, TCP_CLOSE);
tcp_send_active_reset(sk, GFP_ATOMIC); tcp_send_active_reset(sk, GFP_ATOMIC);
NET_INC_STATS_BH(sock_net(sk), __NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPABORTONLINGER); LINUX_MIB_TCPABORTONLINGER);
} else { } else {
const int tmo = tcp_fin_time(sk); const int tmo = tcp_fin_time(sk);
...@@ -2167,7 +2167,7 @@ void tcp_close(struct sock *sk, long timeout) ...@@ -2167,7 +2167,7 @@ void tcp_close(struct sock *sk, long timeout)
if (tcp_check_oom(sk, 0)) { if (tcp_check_oom(sk, 0)) {
tcp_set_state(sk, TCP_CLOSE); tcp_set_state(sk, TCP_CLOSE);
tcp_send_active_reset(sk, GFP_ATOMIC); tcp_send_active_reset(sk, GFP_ATOMIC);
NET_INC_STATS_BH(sock_net(sk), __NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPABORTONMEMORY); LINUX_MIB_TCPABORTONMEMORY);
} }
} }
......
...@@ -155,11 +155,11 @@ static void tcp_cdg_hystart_update(struct sock *sk) ...@@ -155,11 +155,11 @@ static void tcp_cdg_hystart_update(struct sock *sk)
ca->last_ack = now_us; ca->last_ack = now_us;
if (after(now_us, ca->round_start + base_owd)) { if (after(now_us, ca->round_start + base_owd)) {
NET_INC_STATS_BH(sock_net(sk), __NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPHYSTARTTRAINDETECT); LINUX_MIB_TCPHYSTARTTRAINDETECT);
NET_ADD_STATS_BH(sock_net(sk), __NET_ADD_STATS(sock_net(sk),
LINUX_MIB_TCPHYSTARTTRAINCWND, LINUX_MIB_TCPHYSTARTTRAINCWND,
tp->snd_cwnd); tp->snd_cwnd);
tp->snd_ssthresh = tp->snd_cwnd; tp->snd_ssthresh = tp->snd_cwnd;
return; return;
} }
...@@ -174,11 +174,11 @@ static void tcp_cdg_hystart_update(struct sock *sk) ...@@ -174,11 +174,11 @@ static void tcp_cdg_hystart_update(struct sock *sk)
125U); 125U);
if (ca->rtt.min > thresh) { if (ca->rtt.min > thresh) {
NET_INC_STATS_BH(sock_net(sk), __NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPHYSTARTDELAYDETECT); LINUX_MIB_TCPHYSTARTDELAYDETECT);
NET_ADD_STATS_BH(sock_net(sk), __NET_ADD_STATS(sock_net(sk),
LINUX_MIB_TCPHYSTARTDELAYCWND, LINUX_MIB_TCPHYSTARTDELAYCWND,
tp->snd_cwnd); tp->snd_cwnd);
tp->snd_ssthresh = tp->snd_cwnd; tp->snd_ssthresh = tp->snd_cwnd;
} }
} }
......
...@@ -402,11 +402,11 @@ static void hystart_update(struct sock *sk, u32 delay) ...@@ -402,11 +402,11 @@ static void hystart_update(struct sock *sk, u32 delay)
ca->last_ack = now; ca->last_ack = now;
if ((s32)(now - ca->round_start) > ca->delay_min >> 4) { if ((s32)(now - ca->round_start) > ca->delay_min >> 4) {
ca->found |= HYSTART_ACK_TRAIN; ca->found |= HYSTART_ACK_TRAIN;
NET_INC_STATS_BH(sock_net(sk), __NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPHYSTARTTRAINDETECT); LINUX_MIB_TCPHYSTARTTRAINDETECT);
NET_ADD_STATS_BH(sock_net(sk), __NET_ADD_STATS(sock_net(sk),
LINUX_MIB_TCPHYSTARTTRAINCWND, LINUX_MIB_TCPHYSTARTTRAINCWND,
tp->snd_cwnd); tp->snd_cwnd);
tp->snd_ssthresh = tp->snd_cwnd; tp->snd_ssthresh = tp->snd_cwnd;
} }
} }
...@@ -423,11 +423,11 @@ static void hystart_update(struct sock *sk, u32 delay) ...@@ -423,11 +423,11 @@ static void hystart_update(struct sock *sk, u32 delay)
if (ca->curr_rtt > ca->delay_min + if (ca->curr_rtt > ca->delay_min +
HYSTART_DELAY_THRESH(ca->delay_min >> 3)) { HYSTART_DELAY_THRESH(ca->delay_min >> 3)) {
ca->found |= HYSTART_DELAY; ca->found |= HYSTART_DELAY;
NET_INC_STATS_BH(sock_net(sk), __NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPHYSTARTDELAYDETECT); LINUX_MIB_TCPHYSTARTDELAYDETECT);
NET_ADD_STATS_BH(sock_net(sk), __NET_ADD_STATS(sock_net(sk),
LINUX_MIB_TCPHYSTARTDELAYCWND, LINUX_MIB_TCPHYSTARTDELAYCWND,
tp->snd_cwnd); tp->snd_cwnd);
tp->snd_ssthresh = tp->snd_cwnd; tp->snd_ssthresh = tp->snd_cwnd;
} }
} }
......
...@@ -256,8 +256,8 @@ static bool tcp_fastopen_queue_check(struct sock *sk) ...@@ -256,8 +256,8 @@ static bool tcp_fastopen_queue_check(struct sock *sk)
req1 = fastopenq->rskq_rst_head; req1 = fastopenq->rskq_rst_head;
if (!req1 || time_after(req1->rsk_timer.expires, jiffies)) { if (!req1 || time_after(req1->rsk_timer.expires, jiffies)) {
spin_unlock(&fastopenq->lock); spin_unlock(&fastopenq->lock);
NET_INC_STATS_BH(sock_net(sk), __NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPFASTOPENLISTENOVERFLOW); LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
return false; return false;
} }
fastopenq->rskq_rst_head = req1->dl_next; fastopenq->rskq_rst_head = req1->dl_next;
...@@ -282,7 +282,7 @@ struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, ...@@ -282,7 +282,7 @@ struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
struct sock *child; struct sock *child;
if (foc->len == 0) /* Client requests a cookie */ if (foc->len == 0) /* Client requests a cookie */
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD);
if (!((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) && if (!((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) &&
(syn_data || foc->len >= 0) && (syn_data || foc->len >= 0) &&
...@@ -311,13 +311,13 @@ struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, ...@@ -311,13 +311,13 @@ struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
child = tcp_fastopen_create_child(sk, skb, dst, req); child = tcp_fastopen_create_child(sk, skb, dst, req);
if (child) { if (child) {
foc->len = -1; foc->len = -1;
NET_INC_STATS_BH(sock_net(sk), __NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPFASTOPENPASSIVE); LINUX_MIB_TCPFASTOPENPASSIVE);
return child; return child;
} }
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
} else if (foc->len > 0) /* Client presents an invalid cookie */ } else if (foc->len > 0) /* Client presents an invalid cookie */
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
valid_foc.exp = foc->exp; valid_foc.exp = foc->exp;
*foc = valid_foc; *foc = valid_foc;
......
This diff is collapsed.
...@@ -320,7 +320,7 @@ void tcp_req_err(struct sock *sk, u32 seq, bool abort) ...@@ -320,7 +320,7 @@ void tcp_req_err(struct sock *sk, u32 seq, bool abort)
* an established socket here. * an established socket here.
*/ */
if (seq != tcp_rsk(req)->snt_isn) { if (seq != tcp_rsk(req)->snt_isn) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
} else if (abort) { } else if (abort) {
/* /*
* Still in SYN_RECV, just remove it silently. * Still in SYN_RECV, just remove it silently.
...@@ -396,13 +396,13 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) ...@@ -396,13 +396,13 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
*/ */
if (sock_owned_by_user(sk)) { if (sock_owned_by_user(sk)) {
if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)) if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
} }
if (sk->sk_state == TCP_CLOSE) if (sk->sk_state == TCP_CLOSE)
goto out; goto out;
if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
goto out; goto out;
} }
...@@ -413,7 +413,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) ...@@ -413,7 +413,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una; snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
if (sk->sk_state != TCP_LISTEN && if (sk->sk_state != TCP_LISTEN &&
!between(seq, snd_una, tp->snd_nxt)) { !between(seq, snd_una, tp->snd_nxt)) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
goto out; goto out;
} }
...@@ -1151,12 +1151,12 @@ static bool tcp_v4_inbound_md5_hash(const struct sock *sk, ...@@ -1151,12 +1151,12 @@ static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
return false; return false;
if (hash_expected && !hash_location) { if (hash_expected && !hash_location) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
return true; return true;
} }
if (!hash_expected && hash_location) { if (!hash_expected && hash_location) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
return true; return true;
} }
...@@ -1342,7 +1342,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, ...@@ -1342,7 +1342,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
return newsk; return newsk;
exit_overflow: exit_overflow:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
exit_nonewsk: exit_nonewsk:
dst_release(dst); dst_release(dst);
exit: exit:
...@@ -1513,8 +1513,8 @@ bool tcp_prequeue(struct sock *sk, struct sk_buff *skb) ...@@ -1513,8 +1513,8 @@ bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) { while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
sk_backlog_rcv(sk, skb1); sk_backlog_rcv(sk, skb1);
NET_INC_STATS_BH(sock_net(sk), __NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPPREQUEUEDROPPED); LINUX_MIB_TCPPREQUEUEDROPPED);
} }
tp->ucopy.memory = 0; tp->ucopy.memory = 0;
...@@ -1629,7 +1629,7 @@ int tcp_v4_rcv(struct sk_buff *skb) ...@@ -1629,7 +1629,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
} }
} }
if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
goto discard_and_relse; goto discard_and_relse;
} }
...@@ -1662,7 +1662,7 @@ int tcp_v4_rcv(struct sk_buff *skb) ...@@ -1662,7 +1662,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
} else if (unlikely(sk_add_backlog(sk, skb, } else if (unlikely(sk_add_backlog(sk, skb,
sk->sk_rcvbuf + sk->sk_sndbuf))) { sk->sk_rcvbuf + sk->sk_sndbuf))) {
bh_unlock_sock(sk); bh_unlock_sock(sk);
NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP); __NET_INC_STATS(net, LINUX_MIB_TCPBACKLOGDROP);
goto discard_and_relse; goto discard_and_relse;
} }
bh_unlock_sock(sk); bh_unlock_sock(sk);
......
...@@ -235,7 +235,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, ...@@ -235,7 +235,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
} }
if (paws_reject) if (paws_reject)
NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED); __NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
if (!th->rst) { if (!th->rst) {
/* In this case we must reset the TIMEWAIT timer. /* In this case we must reset the TIMEWAIT timer.
...@@ -337,7 +337,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) ...@@ -337,7 +337,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
* socket up. We've got bigger problems than * socket up. We've got bigger problems than
* non-graceful socket closings. * non-graceful socket closings.
*/ */
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
} }
tcp_update_metrics(sk); tcp_update_metrics(sk);
...@@ -710,7 +710,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, ...@@ -710,7 +710,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
&tcp_rsk(req)->last_oow_ack_time)) &tcp_rsk(req)->last_oow_ack_time))
req->rsk_ops->send_ack(sk, skb, req); req->rsk_ops->send_ack(sk, skb, req);
if (paws_reject) if (paws_reject)
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
return NULL; return NULL;
} }
...@@ -752,7 +752,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, ...@@ -752,7 +752,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
inet_rsk(req)->acked = 1; inet_rsk(req)->acked = 1;
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
return NULL; return NULL;
} }
...@@ -791,7 +791,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, ...@@ -791,7 +791,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
} }
if (!fastopen) { if (!fastopen) {
inet_csk_reqsk_queue_drop(sk, req); inet_csk_reqsk_queue_drop(sk, req);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS); __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
} }
return NULL; return NULL;
} }
......
...@@ -2212,8 +2212,8 @@ static bool skb_still_in_host_queue(const struct sock *sk, ...@@ -2212,8 +2212,8 @@ static bool skb_still_in_host_queue(const struct sock *sk,
const struct sk_buff *skb) const struct sk_buff *skb)
{ {
if (unlikely(skb_fclone_busy(sk, skb))) { if (unlikely(skb_fclone_busy(sk, skb))) {
NET_INC_STATS_BH(sock_net(sk), __NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
return true; return true;
} }
return false; return false;
...@@ -2275,7 +2275,7 @@ void tcp_send_loss_probe(struct sock *sk) ...@@ -2275,7 +2275,7 @@ void tcp_send_loss_probe(struct sock *sk)
tp->tlp_high_seq = tp->snd_nxt; tp->tlp_high_seq = tp->snd_nxt;
probe_sent: probe_sent:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSPROBES); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
/* Reset s.t. tcp_rearm_rto will restart timer from now */ /* Reset s.t. tcp_rearm_rto will restart timer from now */
inet_csk(sk)->icsk_pending = 0; inet_csk(sk)->icsk_pending = 0;
rearm_timer: rearm_timer:
...@@ -2656,7 +2656,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) ...@@ -2656,7 +2656,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
/* Update global TCP statistics. */ /* Update global TCP statistics. */
TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs); TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs);
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
tp->total_retrans += segs; tp->total_retrans += segs;
} }
return err; return err;
...@@ -2681,7 +2681,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) ...@@ -2681,7 +2681,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
tp->retrans_stamp = tcp_skb_timestamp(skb); tp->retrans_stamp = tcp_skb_timestamp(skb);
} else if (err != -EBUSY) { } else if (err != -EBUSY) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
} }
if (tp->undo_retrans < 0) if (tp->undo_retrans < 0)
...@@ -2805,7 +2805,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) ...@@ -2805,7 +2805,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
if (tcp_retransmit_skb(sk, skb, segs)) if (tcp_retransmit_skb(sk, skb, segs))
return; return;
NET_INC_STATS_BH(sock_net(sk), mib_idx); __NET_INC_STATS(sock_net(sk), mib_idx);
if (tcp_in_cwnd_reduction(sk)) if (tcp_in_cwnd_reduction(sk))
tp->prr_out += tcp_skb_pcount(skb); tp->prr_out += tcp_skb_pcount(skb);
...@@ -3541,7 +3541,7 @@ int tcp_rtx_synack(const struct sock *sk, struct request_sock *req) ...@@ -3541,7 +3541,7 @@ int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL); res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL);
if (!res) { if (!res) {
__TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS); __TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
} }
return res; return res;
} }
......
...@@ -65,8 +65,8 @@ int tcp_rack_mark_lost(struct sock *sk) ...@@ -65,8 +65,8 @@ int tcp_rack_mark_lost(struct sock *sk)
if (scb->sacked & TCPCB_SACKED_RETRANS) { if (scb->sacked & TCPCB_SACKED_RETRANS) {
scb->sacked &= ~TCPCB_SACKED_RETRANS; scb->sacked &= ~TCPCB_SACKED_RETRANS;
tp->retrans_out -= tcp_skb_pcount(skb); tp->retrans_out -= tcp_skb_pcount(skb);
NET_INC_STATS_BH(sock_net(sk), __NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPLOSTRETRANSMIT); LINUX_MIB_TCPLOSTRETRANSMIT);
} }
} else if (!(scb->sacked & TCPCB_RETRANS)) { } else if (!(scb->sacked & TCPCB_RETRANS)) {
/* Original data are sent sequentially so stop early /* Original data are sent sequentially so stop early
......
...@@ -30,7 +30,7 @@ static void tcp_write_err(struct sock *sk) ...@@ -30,7 +30,7 @@ static void tcp_write_err(struct sock *sk)
sk->sk_error_report(sk); sk->sk_error_report(sk);
tcp_done(sk); tcp_done(sk);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
} }
/* Do not allow orphaned sockets to eat all our resources. /* Do not allow orphaned sockets to eat all our resources.
...@@ -68,7 +68,7 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset) ...@@ -68,7 +68,7 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset)
if (do_reset) if (do_reset)
tcp_send_active_reset(sk, GFP_ATOMIC); tcp_send_active_reset(sk, GFP_ATOMIC);
tcp_done(sk); tcp_done(sk);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
return 1; return 1;
} }
return 0; return 0;
...@@ -162,8 +162,8 @@ static int tcp_write_timeout(struct sock *sk) ...@@ -162,8 +162,8 @@ static int tcp_write_timeout(struct sock *sk)
if (tp->syn_fastopen || tp->syn_data) if (tp->syn_fastopen || tp->syn_data)
tcp_fastopen_cache_set(sk, 0, NULL, true, 0); tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
if (tp->syn_data && icsk->icsk_retransmits == 1) if (tp->syn_data && icsk->icsk_retransmits == 1)
NET_INC_STATS_BH(sock_net(sk), __NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPFASTOPENACTIVEFAIL); LINUX_MIB_TCPFASTOPENACTIVEFAIL);
} }
retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries; retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
syn_set = true; syn_set = true;
...@@ -178,8 +178,8 @@ static int tcp_write_timeout(struct sock *sk) ...@@ -178,8 +178,8 @@ static int tcp_write_timeout(struct sock *sk)
tp->bytes_acked <= tp->rx_opt.mss_clamp) { tp->bytes_acked <= tp->rx_opt.mss_clamp) {
tcp_fastopen_cache_set(sk, 0, NULL, true, 0); tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
if (icsk->icsk_retransmits == net->ipv4.sysctl_tcp_retries1) if (icsk->icsk_retransmits == net->ipv4.sysctl_tcp_retries1)
NET_INC_STATS_BH(sock_net(sk), __NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPFASTOPENACTIVEFAIL); LINUX_MIB_TCPFASTOPENACTIVEFAIL);
} }
/* Black hole detection */ /* Black hole detection */
tcp_mtu_probing(icsk, sk); tcp_mtu_probing(icsk, sk);
...@@ -228,7 +228,7 @@ void tcp_delack_timer_handler(struct sock *sk) ...@@ -228,7 +228,7 @@ void tcp_delack_timer_handler(struct sock *sk)
if (!skb_queue_empty(&tp->ucopy.prequeue)) { if (!skb_queue_empty(&tp->ucopy.prequeue)) {
struct sk_buff *skb; struct sk_buff *skb;
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED);
while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
sk_backlog_rcv(sk, skb); sk_backlog_rcv(sk, skb);
...@@ -248,7 +248,7 @@ void tcp_delack_timer_handler(struct sock *sk) ...@@ -248,7 +248,7 @@ void tcp_delack_timer_handler(struct sock *sk)
icsk->icsk_ack.ato = TCP_ATO_MIN; icsk->icsk_ack.ato = TCP_ATO_MIN;
} }
tcp_send_ack(sk); tcp_send_ack(sk);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS); __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
} }
out: out:
...@@ -265,7 +265,7 @@ static void tcp_delack_timer(unsigned long data) ...@@ -265,7 +265,7 @@ static void tcp_delack_timer(unsigned long data)
tcp_delack_timer_handler(sk); tcp_delack_timer_handler(sk);
} else { } else {
inet_csk(sk)->icsk_ack.blocked = 1; inet_csk(sk)->icsk_ack.blocked = 1;
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED); __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
/* deleguate our work to tcp_release_cb() */ /* deleguate our work to tcp_release_cb() */
if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags)) if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
sock_hold(sk); sock_hold(sk);
...@@ -431,7 +431,7 @@ void tcp_retransmit_timer(struct sock *sk) ...@@ -431,7 +431,7 @@ void tcp_retransmit_timer(struct sock *sk)
} else { } else {
mib_idx = LINUX_MIB_TCPTIMEOUTS; mib_idx = LINUX_MIB_TCPTIMEOUTS;
} }
NET_INC_STATS_BH(sock_net(sk), mib_idx); __NET_INC_STATS(sock_net(sk), mib_idx);
} }
tcp_enter_loss(sk); tcp_enter_loss(sk);
...@@ -549,7 +549,7 @@ void tcp_syn_ack_timeout(const struct request_sock *req) ...@@ -549,7 +549,7 @@ void tcp_syn_ack_timeout(const struct request_sock *req)
{ {
struct net *net = read_pnet(&inet_rsk(req)->ireq_net); struct net *net = read_pnet(&inet_rsk(req)->ireq_net);
NET_INC_STATS_BH(net, LINUX_MIB_TCPTIMEOUTS); __NET_INC_STATS(net, LINUX_MIB_TCPTIMEOUTS);
} }
EXPORT_SYMBOL(tcp_syn_ack_timeout); EXPORT_SYMBOL(tcp_syn_ack_timeout);
......
...@@ -222,7 +222,7 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row, ...@@ -222,7 +222,7 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
__sk_nulls_add_node_rcu(sk, &head->chain); __sk_nulls_add_node_rcu(sk, &head->chain);
if (tw) { if (tw) {
sk_nulls_del_node_init_rcu((struct sock *)tw); sk_nulls_del_node_init_rcu((struct sock *)tw);
NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED); __NET_INC_STATS(net, LINUX_MIB_TIMEWAITRECYCLED);
} }
spin_unlock(lock); spin_unlock(lock);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
......
...@@ -155,11 +155,11 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) ...@@ -155,11 +155,11 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
mss = __cookie_v6_check(ipv6_hdr(skb), th, cookie); mss = __cookie_v6_check(ipv6_hdr(skb), th, cookie);
if (mss == 0) { if (mss == 0) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED); __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
goto out; goto out;
} }
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
/* check for timestamp cookie support */ /* check for timestamp cookie support */
memset(&tcp_opt, 0, sizeof(tcp_opt)); memset(&tcp_opt, 0, sizeof(tcp_opt));
......
...@@ -352,13 +352,13 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, ...@@ -352,13 +352,13 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
bh_lock_sock(sk); bh_lock_sock(sk);
if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG) if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
if (sk->sk_state == TCP_CLOSE) if (sk->sk_state == TCP_CLOSE)
goto out; goto out;
if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) { if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
goto out; goto out;
} }
...@@ -368,7 +368,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, ...@@ -368,7 +368,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una; snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
if (sk->sk_state != TCP_LISTEN && if (sk->sk_state != TCP_LISTEN &&
!between(seq, snd_una, tp->snd_nxt)) { !between(seq, snd_una, tp->snd_nxt)) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
goto out; goto out;
} }
...@@ -649,12 +649,12 @@ static bool tcp_v6_inbound_md5_hash(const struct sock *sk, ...@@ -649,12 +649,12 @@ static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
return false; return false;
if (hash_expected && !hash_location) { if (hash_expected && !hash_location) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
return true; return true;
} }
if (!hash_expected && hash_location) { if (!hash_expected && hash_location) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
return true; return true;
} }
...@@ -1165,7 +1165,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * ...@@ -1165,7 +1165,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
return newsk; return newsk;
out_overflow: out_overflow:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
out_nonewsk: out_nonewsk:
dst_release(dst); dst_release(dst);
out: out:
...@@ -1421,7 +1421,7 @@ static int tcp_v6_rcv(struct sk_buff *skb) ...@@ -1421,7 +1421,7 @@ static int tcp_v6_rcv(struct sk_buff *skb)
} }
} }
if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) { if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
goto discard_and_relse; goto discard_and_relse;
} }
...@@ -1454,7 +1454,7 @@ static int tcp_v6_rcv(struct sk_buff *skb) ...@@ -1454,7 +1454,7 @@ static int tcp_v6_rcv(struct sk_buff *skb)
} else if (unlikely(sk_add_backlog(sk, skb, } else if (unlikely(sk_add_backlog(sk, skb,
sk->sk_rcvbuf + sk->sk_sndbuf))) { sk->sk_rcvbuf + sk->sk_sndbuf))) {
bh_unlock_sock(sk); bh_unlock_sock(sk);
NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP); __NET_INC_STATS(net, LINUX_MIB_TCPBACKLOGDROP);
goto discard_and_relse; goto discard_and_relse;
} }
bh_unlock_sock(sk); bh_unlock_sock(sk);
......
...@@ -532,7 +532,7 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb, ...@@ -532,7 +532,7 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
* servers this needs to be solved differently. * servers this needs to be solved differently.
*/ */
if (sock_owned_by_user(sk)) if (sock_owned_by_user(sk))
NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
*app = asoc; *app = asoc;
*tpp = transport; *tpp = transport;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment