Commit c4794d22 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

ipv4: tunnels: use DEV_STATS_INC()

Most of code paths in tunnels are lockless (eg NETIF_F_LLTX in tx).

Adopt SMP safe DEV_STATS_INC() to update dev->stats fields.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 2fad1ba3
...@@ -510,7 +510,7 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -510,7 +510,7 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
err_free_skb: err_free_skb:
kfree_skb(skb); kfree_skb(skb);
dev->stats.tx_dropped++; DEV_STATS_INC(dev, tx_dropped);
} }
static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev) static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
...@@ -592,7 +592,7 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -592,7 +592,7 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
err_free_skb: err_free_skb:
kfree_skb(skb); kfree_skb(skb);
dev->stats.tx_dropped++; DEV_STATS_INC(dev, tx_dropped);
} }
static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
...@@ -663,7 +663,7 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb, ...@@ -663,7 +663,7 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
free_skb: free_skb:
kfree_skb(skb); kfree_skb(skb);
dev->stats.tx_dropped++; DEV_STATS_INC(dev, tx_dropped);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
...@@ -717,7 +717,7 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb, ...@@ -717,7 +717,7 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
free_skb: free_skb:
kfree_skb(skb); kfree_skb(skb);
dev->stats.tx_dropped++; DEV_STATS_INC(dev, tx_dropped);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
...@@ -745,7 +745,7 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb, ...@@ -745,7 +745,7 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
free_skb: free_skb:
kfree_skb(skb); kfree_skb(skb);
dev->stats.tx_dropped++; DEV_STATS_INC(dev, tx_dropped);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
......
...@@ -368,23 +368,23 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, ...@@ -368,23 +368,23 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
#ifdef CONFIG_NET_IPGRE_BROADCAST #ifdef CONFIG_NET_IPGRE_BROADCAST
if (ipv4_is_multicast(iph->daddr)) { if (ipv4_is_multicast(iph->daddr)) {
tunnel->dev->stats.multicast++; DEV_STATS_INC(tunnel->dev, multicast);
skb->pkt_type = PACKET_BROADCAST; skb->pkt_type = PACKET_BROADCAST;
} }
#endif #endif
if ((!(tpi->flags&TUNNEL_CSUM) && (tunnel->parms.i_flags&TUNNEL_CSUM)) || if ((!(tpi->flags&TUNNEL_CSUM) && (tunnel->parms.i_flags&TUNNEL_CSUM)) ||
((tpi->flags&TUNNEL_CSUM) && !(tunnel->parms.i_flags&TUNNEL_CSUM))) { ((tpi->flags&TUNNEL_CSUM) && !(tunnel->parms.i_flags&TUNNEL_CSUM))) {
tunnel->dev->stats.rx_crc_errors++; DEV_STATS_INC(tunnel->dev, rx_crc_errors);
tunnel->dev->stats.rx_errors++; DEV_STATS_INC(tunnel->dev, rx_errors);
goto drop; goto drop;
} }
if (tunnel->parms.i_flags&TUNNEL_SEQ) { if (tunnel->parms.i_flags&TUNNEL_SEQ) {
if (!(tpi->flags&TUNNEL_SEQ) || if (!(tpi->flags&TUNNEL_SEQ) ||
(tunnel->i_seqno && (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) { (tunnel->i_seqno && (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
tunnel->dev->stats.rx_fifo_errors++; DEV_STATS_INC(tunnel->dev, rx_fifo_errors);
tunnel->dev->stats.rx_errors++; DEV_STATS_INC(tunnel->dev, rx_errors);
goto drop; goto drop;
} }
tunnel->i_seqno = ntohl(tpi->seq) + 1; tunnel->i_seqno = ntohl(tpi->seq) + 1;
...@@ -398,8 +398,8 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, ...@@ -398,8 +398,8 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n", net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
&iph->saddr, iph->tos); &iph->saddr, iph->tos);
if (err > 1) { if (err > 1) {
++tunnel->dev->stats.rx_frame_errors; DEV_STATS_INC(tunnel->dev, rx_frame_errors);
++tunnel->dev->stats.rx_errors; DEV_STATS_INC(tunnel->dev, rx_errors);
goto drop; goto drop;
} }
} }
...@@ -581,7 +581,7 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -581,7 +581,7 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
if (!rt) { if (!rt) {
rt = ip_route_output_key(tunnel->net, &fl4); rt = ip_route_output_key(tunnel->net, &fl4);
if (IS_ERR(rt)) { if (IS_ERR(rt)) {
dev->stats.tx_carrier_errors++; DEV_STATS_INC(dev, tx_carrier_errors);
goto tx_error; goto tx_error;
} }
if (use_cache) if (use_cache)
...@@ -590,7 +590,7 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -590,7 +590,7 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
} }
if (rt->dst.dev == dev) { if (rt->dst.dev == dev) {
ip_rt_put(rt); ip_rt_put(rt);
dev->stats.collisions++; DEV_STATS_INC(dev, collisions);
goto tx_error; goto tx_error;
} }
...@@ -625,10 +625,10 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -625,10 +625,10 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
df, !net_eq(tunnel->net, dev_net(dev))); df, !net_eq(tunnel->net, dev_net(dev)));
return; return;
tx_error: tx_error:
dev->stats.tx_errors++; DEV_STATS_INC(dev, tx_errors);
goto kfree; goto kfree;
tx_dropped: tx_dropped:
dev->stats.tx_dropped++; DEV_STATS_INC(dev, tx_dropped);
kfree: kfree:
kfree_skb(skb); kfree_skb(skb);
} }
...@@ -662,7 +662,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -662,7 +662,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
/* NBMA tunnel */ /* NBMA tunnel */
if (!skb_dst(skb)) { if (!skb_dst(skb)) {
dev->stats.tx_fifo_errors++; DEV_STATS_INC(dev, tx_fifo_errors);
goto tx_error; goto tx_error;
} }
...@@ -749,7 +749,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -749,7 +749,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
rt = ip_route_output_key(tunnel->net, &fl4); rt = ip_route_output_key(tunnel->net, &fl4);
if (IS_ERR(rt)) { if (IS_ERR(rt)) {
dev->stats.tx_carrier_errors++; DEV_STATS_INC(dev, tx_carrier_errors);
goto tx_error; goto tx_error;
} }
if (use_cache) if (use_cache)
...@@ -762,7 +762,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -762,7 +762,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
if (rt->dst.dev == dev) { if (rt->dst.dev == dev) {
ip_rt_put(rt); ip_rt_put(rt);
dev->stats.collisions++; DEV_STATS_INC(dev, collisions);
goto tx_error; goto tx_error;
} }
...@@ -805,7 +805,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -805,7 +805,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
if (skb_cow_head(skb, dev->needed_headroom)) { if (skb_cow_head(skb, dev->needed_headroom)) {
ip_rt_put(rt); ip_rt_put(rt);
dev->stats.tx_dropped++; DEV_STATS_INC(dev, tx_dropped);
kfree_skb(skb); kfree_skb(skb);
return; return;
} }
...@@ -819,7 +819,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -819,7 +819,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
dst_link_failure(skb); dst_link_failure(skb);
#endif #endif
tx_error: tx_error:
dev->stats.tx_errors++; DEV_STATS_INC(dev, tx_errors);
kfree_skb(skb); kfree_skb(skb);
} }
EXPORT_SYMBOL_GPL(ip_tunnel_xmit); EXPORT_SYMBOL_GPL(ip_tunnel_xmit);
......
...@@ -107,8 +107,8 @@ static int vti_rcv_cb(struct sk_buff *skb, int err) ...@@ -107,8 +107,8 @@ static int vti_rcv_cb(struct sk_buff *skb, int err)
dev = tunnel->dev; dev = tunnel->dev;
if (err) { if (err) {
dev->stats.rx_errors++; DEV_STATS_INC(dev, rx_errors);
dev->stats.rx_dropped++; DEV_STATS_INC(dev, rx_dropped);
return 0; return 0;
} }
...@@ -183,7 +183,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -183,7 +183,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
fl->u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC; fl->u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
rt = __ip_route_output_key(dev_net(dev), &fl->u.ip4); rt = __ip_route_output_key(dev_net(dev), &fl->u.ip4);
if (IS_ERR(rt)) { if (IS_ERR(rt)) {
dev->stats.tx_carrier_errors++; DEV_STATS_INC(dev, tx_carrier_errors);
goto tx_error_icmp; goto tx_error_icmp;
} }
dst = &rt->dst; dst = &rt->dst;
...@@ -198,14 +198,14 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -198,14 +198,14 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
if (dst->error) { if (dst->error) {
dst_release(dst); dst_release(dst);
dst = NULL; dst = NULL;
dev->stats.tx_carrier_errors++; DEV_STATS_INC(dev, tx_carrier_errors);
goto tx_error_icmp; goto tx_error_icmp;
} }
skb_dst_set(skb, dst); skb_dst_set(skb, dst);
break; break;
#endif #endif
default: default:
dev->stats.tx_carrier_errors++; DEV_STATS_INC(dev, tx_carrier_errors);
goto tx_error_icmp; goto tx_error_icmp;
} }
} }
...@@ -213,7 +213,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -213,7 +213,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
dst_hold(dst); dst_hold(dst);
dst = xfrm_lookup_route(tunnel->net, dst, fl, NULL, 0); dst = xfrm_lookup_route(tunnel->net, dst, fl, NULL, 0);
if (IS_ERR(dst)) { if (IS_ERR(dst)) {
dev->stats.tx_carrier_errors++; DEV_STATS_INC(dev, tx_carrier_errors);
goto tx_error_icmp; goto tx_error_icmp;
} }
...@@ -221,7 +221,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -221,7 +221,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
goto xmit; goto xmit;
if (!vti_state_check(dst->xfrm, parms->iph.daddr, parms->iph.saddr)) { if (!vti_state_check(dst->xfrm, parms->iph.daddr, parms->iph.saddr)) {
dev->stats.tx_carrier_errors++; DEV_STATS_INC(dev, tx_carrier_errors);
dst_release(dst); dst_release(dst);
goto tx_error_icmp; goto tx_error_icmp;
} }
...@@ -230,7 +230,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -230,7 +230,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
if (tdev == dev) { if (tdev == dev) {
dst_release(dst); dst_release(dst);
dev->stats.collisions++; DEV_STATS_INC(dev, collisions);
goto tx_error; goto tx_error;
} }
...@@ -267,7 +267,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -267,7 +267,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
tx_error_icmp: tx_error_icmp:
dst_link_failure(skb); dst_link_failure(skb);
tx_error: tx_error:
dev->stats.tx_errors++; DEV_STATS_INC(dev, tx_errors);
kfree_skb(skb); kfree_skb(skb);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
...@@ -304,7 +304,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -304,7 +304,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
return vti_xmit(skb, dev, &fl); return vti_xmit(skb, dev, &fl);
tx_err: tx_err:
dev->stats.tx_errors++; DEV_STATS_INC(dev, tx_errors);
kfree_skb(skb); kfree_skb(skb);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
......
...@@ -310,7 +310,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, ...@@ -310,7 +310,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb,
tx_error: tx_error:
kfree_skb(skb); kfree_skb(skb);
dev->stats.tx_errors++; DEV_STATS_INC(dev, tx_errors);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
......
...@@ -506,8 +506,8 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -506,8 +506,8 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
return err; return err;
} }
dev->stats.tx_bytes += skb->len; DEV_STATS_ADD(dev, tx_bytes, skb->len);
dev->stats.tx_packets++; DEV_STATS_INC(dev, tx_packets);
rcu_read_lock(); rcu_read_lock();
/* Pairs with WRITE_ONCE() in vif_add() and vif_delete() */ /* Pairs with WRITE_ONCE() in vif_add() and vif_delete() */
...@@ -1839,8 +1839,8 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt, ...@@ -1839,8 +1839,8 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
if (vif->flags & VIFF_REGISTER) { if (vif->flags & VIFF_REGISTER) {
WRITE_ONCE(vif->pkt_out, vif->pkt_out + 1); WRITE_ONCE(vif->pkt_out, vif->pkt_out + 1);
WRITE_ONCE(vif->bytes_out, vif->bytes_out + skb->len); WRITE_ONCE(vif->bytes_out, vif->bytes_out + skb->len);
vif_dev->stats.tx_bytes += skb->len; DEV_STATS_ADD(vif_dev, tx_bytes, skb->len);
vif_dev->stats.tx_packets++; DEV_STATS_INC(vif_dev, tx_packets);
ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT); ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
goto out_free; goto out_free;
} }
...@@ -1898,8 +1898,8 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt, ...@@ -1898,8 +1898,8 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
if (vif->flags & VIFF_TUNNEL) { if (vif->flags & VIFF_TUNNEL) {
ip_encap(net, skb, vif->local, vif->remote); ip_encap(net, skb, vif->local, vif->remote);
/* FIXME: extra output firewall step used to be here. --RR */ /* FIXME: extra output firewall step used to be here. --RR */
vif_dev->stats.tx_packets++; DEV_STATS_INC(vif_dev, tx_packets);
vif_dev->stats.tx_bytes += skb->len; DEV_STATS_ADD(vif_dev, tx_bytes, skb->len);
} }
IPCB(skb)->flags |= IPSKB_FORWARDED; IPCB(skb)->flags |= IPSKB_FORWARDED;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment