Commit 575f5a13 authored by Nivedita Singhvi's avatar Nivedita Singhvi Committed by David S. Miller

[IPV{4,6}]: Clean up SNMP counter bumping.

parent 0a68b031
......@@ -237,11 +237,13 @@ static inline int ip_local_deliver_finish(struct sk_buff *skb)
protocol = -ret;
goto resubmit;
}
IP_INC_STATS_BH(IpInDelivers);
} else {
if (!raw_sk) {
icmp_send(skb, ICMP_DEST_UNREACH,
ICMP_PROT_UNREACH, 0);
}
} else
IP_INC_STATS_BH(IpInDelivers);
kfree_skb(skb);
}
}
......@@ -304,8 +306,10 @@ static inline int ip_rcv_finish(struct sk_buff *skb)
--ANK (980813)
*/
if (skb_cow(skb, skb_headroom(skb)))
if (skb_cow(skb, skb_headroom(skb))) {
IP_INC_STATS_BH(IpInDiscards);
goto drop;
}
iph = skb->nh.iph;
if (ip_options_compile(NULL, skb))
......@@ -353,8 +357,10 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt)
IP_INC_STATS_BH(IpInReceives);
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
IP_INC_STATS_BH(IpInDiscards);
goto out;
}
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
goto inhdr_error;
......
......@@ -227,12 +227,11 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
/* Charge it to the socket. */
if (sock_queue_rcv_skb(sk, skb) < 0) {
IP_INC_STATS(IpInDiscards);
/* FIXME: increment a raw drops counter here */
kfree_skb(skb);
return NET_RX_DROP;
}
IP_INC_STATS(IpInDelivers);
return NET_RX_SUCCESS;
}
......
......@@ -1700,8 +1700,6 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
goto discard;
#endif /* CONFIG_FILTER */
IP_INC_STATS_BH(IpInDelivers);
if (sk->state == TCP_ESTABLISHED) { /* Fast path */
TCP_CHECK_TIMER(sk);
if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
......
......@@ -951,8 +951,6 @@ static int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
if (sk->filter && skb->ip_summed != CHECKSUM_UNNECESSARY) {
if (__udp_checksum_complete(skb)) {
UDP_INC_STATS_BH(UdpInErrors);
IP_INC_STATS_BH(IpInDiscards);
ip_statistics[smp_processor_id()*2].IpInDelivers--;
kfree_skb(skb);
return -1;
}
......@@ -962,8 +960,6 @@ static int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
if (sock_queue_rcv_skb(sk,skb)<0) {
UDP_INC_STATS_BH(UdpInErrors);
IP_INC_STATS_BH(IpInDiscards);
ip_statistics[smp_processor_id()*2].IpInDelivers--;
kfree_skb(skb);
return -1;
}
......@@ -1047,8 +1043,6 @@ int udp_rcv(struct sk_buff *skb)
u32 daddr = skb->nh.iph->daddr;
int len = skb->len;
IP_INC_STATS_BH(IpInDelivers);
/*
* Validate the packet and the UDP length.
*/
......
......@@ -60,8 +60,10 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
IP6_INC_STATS_BH(Ip6InReceives);
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
IP6_INC_STATS_BH(Ip6InDiscards);
goto out;
}
/* Store incoming device index. When the packet will
be queued, we cannot refer to skb->dev anymore.
......@@ -175,11 +177,13 @@ static inline int ip6_input_finish(struct sk_buff *skb)
nexthdr = -ret;
goto resubmit;
}
IP6_INC_STATS_BH(Ip6InDelivers);
} else {
if (!raw_sk) {
IP6_INC_STATS_BH(Ip6InUnknownProtos);
icmpv6_param_prob(skb, ICMPV6_UNK_NEXTHDR, nhoff);
}
} else
IP6_INC_STATS_BH(Ip6InDelivers);
kfree_skb(skb);
}
......
......@@ -275,7 +275,7 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
#if defined(CONFIG_FILTER)
if (sk->filter && skb->ip_summed != CHECKSUM_UNNECESSARY) {
if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum))) {
IP6_INC_STATS_BH(Ip6InDiscards);
/* FIXME: increment a raw6 drops counter here */
kfree_skb(skb);
return 0;
}
......@@ -284,12 +284,11 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
#endif
/* Charge it to the socket. */
if (sock_queue_rcv_skb(sk,skb)<0) {
IP6_INC_STATS_BH(Ip6InDiscards);
/* FIXME: increment a raw6 drops counter here */
kfree_skb(skb);
return 0;
}
IP6_INC_STATS_BH(Ip6InDelivers);
return 0;
}
......@@ -327,7 +326,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
if (inet->hdrincl) {
if (skb->ip_summed != CHECKSUM_UNNECESSARY &&
(unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum))) {
IP6_INC_STATS_BH(Ip6InDiscards);
/* FIXME: increment a raw6 drops counter here */
kfree_skb(skb);
return 0;
}
......@@ -427,7 +426,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg
as some normal condition.
*/
err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
IP6_INC_STATS_USER(Ip6InDiscards);
/* FIXME: increment a raw6 drops counter here */
goto out_free;
}
......
......@@ -1497,8 +1497,6 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
* is currently called with bh processing disabled.
*/
IP6_INC_STATS_BH(Ip6InDelivers);
/* Do Stevens' IPV6_PKTOPTIONS.
Yes, guys, it is the only place in our code, where we
......
......@@ -545,7 +545,6 @@ static inline int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
if (sk->filter && skb->ip_summed != CHECKSUM_UNNECESSARY) {
if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum))) {
UDP6_INC_STATS_BH(UdpInErrors);
IP6_INC_STATS_BH(Ip6InDiscards);
kfree_skb(skb);
return 0;
}
......@@ -554,11 +553,9 @@ static inline int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
#endif
if (sock_queue_rcv_skb(sk,skb)<0) {
UDP6_INC_STATS_BH(UdpInErrors);
IP6_INC_STATS_BH(Ip6InDiscards);
kfree_skb(skb);
return 0;
}
IP6_INC_STATS_BH(Ip6InDelivers);
UDP6_INC_STATS_BH(UdpInDatagrams);
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment