Commit aed069df authored by Alexander Duyck's avatar Alexander Duyck Committed by David S. Miller

ip_tunnel_core: iptunnel_handle_offloads returns int and doesn't free skb

This patch updates the IP tunnel core function iptunnel_handle_offloads so
that we return an int and do not free the skb inside the function.  This
actually allows us to clean up several paths in several tunnels so that we
can free the skb at one point in the path without having to have a
secondary path if we are supporting tunnel offloads.

In addition it should resolve some double-free issues I have found in the
tunnels paths as I believe it is possible for us to end up triggering such
an event in the case of fou or gue.
Signed-off-by: default avatarAlexander Duyck <aduyck@mirantis.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ec9dcd35
...@@ -696,16 +696,12 @@ static int geneve_build_skb(struct rtable *rt, struct sk_buff *skb, ...@@ -696,16 +696,12 @@ static int geneve_build_skb(struct rtable *rt, struct sk_buff *skb,
min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+ GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr); + GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr);
err = skb_cow_head(skb, min_headroom); err = skb_cow_head(skb, min_headroom);
if (unlikely(err)) { if (unlikely(err))
kfree_skb(skb);
goto free_rt; goto free_rt;
}
skb = udp_tunnel_handle_offloads(skb, udp_sum); err = udp_tunnel_handle_offloads(skb, udp_sum);
if (IS_ERR(skb)) { if (err)
err = PTR_ERR(skb);
goto free_rt; goto free_rt;
}
gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len); gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
geneve_build_header(gnvh, tun_flags, vni, opt_len, opt); geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
...@@ -733,16 +729,12 @@ static int geneve6_build_skb(struct dst_entry *dst, struct sk_buff *skb, ...@@ -733,16 +729,12 @@ static int geneve6_build_skb(struct dst_entry *dst, struct sk_buff *skb,
min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
+ GENEVE_BASE_HLEN + opt_len + sizeof(struct ipv6hdr); + GENEVE_BASE_HLEN + opt_len + sizeof(struct ipv6hdr);
err = skb_cow_head(skb, min_headroom); err = skb_cow_head(skb, min_headroom);
if (unlikely(err)) { if (unlikely(err))
kfree_skb(skb);
goto free_dst; goto free_dst;
}
skb = udp_tunnel_handle_offloads(skb, udp_sum); err = udp_tunnel_handle_offloads(skb, udp_sum);
if (IS_ERR(skb)) { if (IS_ERR(skb))
err = PTR_ERR(skb);
goto free_dst; goto free_dst;
}
gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len); gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
geneve_build_header(gnvh, tun_flags, vni, opt_len, opt); geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
...@@ -937,7 +929,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, ...@@ -937,7 +929,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
err = geneve_build_skb(rt, skb, key->tun_flags, vni, err = geneve_build_skb(rt, skb, key->tun_flags, vni,
info->options_len, opts, flags, xnet); info->options_len, opts, flags, xnet);
if (unlikely(err)) if (unlikely(err))
goto err; goto tx_error;
tos = ip_tunnel_ecn_encap(key->tos, iip, skb); tos = ip_tunnel_ecn_encap(key->tos, iip, skb);
ttl = key->ttl; ttl = key->ttl;
...@@ -946,7 +938,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, ...@@ -946,7 +938,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
err = geneve_build_skb(rt, skb, 0, geneve->vni, err = geneve_build_skb(rt, skb, 0, geneve->vni,
0, NULL, flags, xnet); 0, NULL, flags, xnet);
if (unlikely(err)) if (unlikely(err))
goto err; goto tx_error;
tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb); tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb);
ttl = geneve->ttl; ttl = geneve->ttl;
...@@ -964,7 +956,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, ...@@ -964,7 +956,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
tx_error: tx_error:
dev_kfree_skb(skb); dev_kfree_skb(skb);
err:
if (err == -ELOOP) if (err == -ELOOP)
dev->stats.collisions++; dev->stats.collisions++;
else if (err == -ENETUNREACH) else if (err == -ENETUNREACH)
...@@ -1026,7 +1018,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, ...@@ -1026,7 +1018,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
info->options_len, opts, info->options_len, opts,
flags, xnet); flags, xnet);
if (unlikely(err)) if (unlikely(err))
goto err; goto tx_error;
prio = ip_tunnel_ecn_encap(key->tos, iip, skb); prio = ip_tunnel_ecn_encap(key->tos, iip, skb);
ttl = key->ttl; ttl = key->ttl;
...@@ -1035,7 +1027,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, ...@@ -1035,7 +1027,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
err = geneve6_build_skb(dst, skb, 0, geneve->vni, err = geneve6_build_skb(dst, skb, 0, geneve->vni,
0, NULL, flags, xnet); 0, NULL, flags, xnet);
if (unlikely(err)) if (unlikely(err))
goto err; goto tx_error;
prio = ip_tunnel_ecn_encap(ip6_tclass(fl6.flowlabel), prio = ip_tunnel_ecn_encap(ip6_tclass(fl6.flowlabel),
iip, skb); iip, skb);
...@@ -1054,7 +1046,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, ...@@ -1054,7 +1046,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
tx_error: tx_error:
dev_kfree_skb(skb); dev_kfree_skb(skb);
err:
if (err == -ELOOP) if (err == -ELOOP)
dev->stats.collisions++; dev->stats.collisions++;
else if (err == -ENETUNREACH) else if (err == -ENETUNREACH)
......
...@@ -1797,9 +1797,9 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst, ...@@ -1797,9 +1797,9 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
if (WARN_ON(!skb)) if (WARN_ON(!skb))
return -ENOMEM; return -ENOMEM;
skb = iptunnel_handle_offloads(skb, type); err = iptunnel_handle_offloads(skb, type);
if (IS_ERR(skb)) if (err)
return PTR_ERR(skb); goto out_free;
vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
vxh->vx_flags = VXLAN_HF_VNI; vxh->vx_flags = VXLAN_HF_VNI;
......
...@@ -309,7 +309,7 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, ...@@ -309,7 +309,7 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md, struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
gfp_t flags); gfp_t flags);
struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask); int iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask);
static inline int iptunnel_pull_offloads(struct sk_buff *skb) static inline int iptunnel_pull_offloads(struct sk_buff *skb)
{ {
......
...@@ -105,8 +105,7 @@ struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family, ...@@ -105,8 +105,7 @@ struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family,
__be16 flags, __be64 tunnel_id, __be16 flags, __be64 tunnel_id,
int md_size); int md_size);
static inline struct sk_buff *udp_tunnel_handle_offloads(struct sk_buff *skb, static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum)
bool udp_csum)
{ {
int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
......
...@@ -802,11 +802,11 @@ int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e, ...@@ -802,11 +802,11 @@ int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
int type = e->flags & TUNNEL_ENCAP_FLAG_CSUM ? SKB_GSO_UDP_TUNNEL_CSUM : int type = e->flags & TUNNEL_ENCAP_FLAG_CSUM ? SKB_GSO_UDP_TUNNEL_CSUM :
SKB_GSO_UDP_TUNNEL; SKB_GSO_UDP_TUNNEL;
__be16 sport; __be16 sport;
int err;
skb = iptunnel_handle_offloads(skb, type); err = iptunnel_handle_offloads(skb, type);
if (err)
if (IS_ERR(skb)) return err;
return PTR_ERR(skb);
sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev), sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
skb, 0, 0, false); skb, 0, 0, false);
...@@ -826,6 +826,7 @@ int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e, ...@@ -826,6 +826,7 @@ int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
__be16 sport; __be16 sport;
void *data; void *data;
bool need_priv = false; bool need_priv = false;
int err;
if ((e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) && if ((e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) &&
skb->ip_summed == CHECKSUM_PARTIAL) { skb->ip_summed == CHECKSUM_PARTIAL) {
...@@ -836,10 +837,9 @@ int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e, ...@@ -836,10 +837,9 @@ int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
optlen += need_priv ? GUE_LEN_PRIV : 0; optlen += need_priv ? GUE_LEN_PRIV : 0;
skb = iptunnel_handle_offloads(skb, type); err = iptunnel_handle_offloads(skb, type);
if (err)
if (IS_ERR(skb)) return err;
return PTR_ERR(skb);
/* Get source port (based on flow hash) before skb_push */ /* Get source port (based on flow hash) before skb_push */
sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev), sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
......
...@@ -500,8 +500,7 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -500,8 +500,7 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol); ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
} }
static struct sk_buff *gre_handle_offloads(struct sk_buff *skb, static int gre_handle_offloads(struct sk_buff *skb, bool csum)
bool csum)
{ {
return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE); return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
} }
...@@ -568,11 +567,8 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -568,11 +567,8 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
} }
/* Push Tunnel header. */ /* Push Tunnel header. */
skb = gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)); if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
if (IS_ERR(skb)) {
skb = NULL;
goto err_free_rt; goto err_free_rt;
}
flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY); flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY);
build_header(skb, tunnel_hlen, flags, htons(ETH_P_TEB), build_header(skb, tunnel_hlen, flags, htons(ETH_P_TEB),
...@@ -640,16 +636,14 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb, ...@@ -640,16 +636,14 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
tnl_params = &tunnel->parms.iph; tnl_params = &tunnel->parms.iph;
} }
skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM)); if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
if (IS_ERR(skb)) goto free_skb;
goto out;
__gre_xmit(skb, dev, tnl_params, skb->protocol); __gre_xmit(skb, dev, tnl_params, skb->protocol);
return NETDEV_TX_OK; return NETDEV_TX_OK;
free_skb: free_skb:
kfree_skb(skb); kfree_skb(skb);
out:
dev->stats.tx_dropped++; dev->stats.tx_dropped++;
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
...@@ -664,9 +658,8 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb, ...@@ -664,9 +658,8 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM)); if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
if (IS_ERR(skb)) goto free_skb;
goto out;
if (skb_cow_head(skb, dev->needed_headroom)) if (skb_cow_head(skb, dev->needed_headroom))
goto free_skb; goto free_skb;
...@@ -676,7 +669,6 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb, ...@@ -676,7 +669,6 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
free_skb: free_skb:
kfree_skb(skb); kfree_skb(skb);
out:
dev->stats.tx_dropped++; dev->stats.tx_dropped++;
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
......
...@@ -146,8 +146,8 @@ struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md, ...@@ -146,8 +146,8 @@ struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
} }
EXPORT_SYMBOL_GPL(iptunnel_metadata_reply); EXPORT_SYMBOL_GPL(iptunnel_metadata_reply);
struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, int iptunnel_handle_offloads(struct sk_buff *skb,
int gso_type_mask) int gso_type_mask)
{ {
int err; int err;
...@@ -159,9 +159,9 @@ struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, ...@@ -159,9 +159,9 @@ struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb,
if (skb_is_gso(skb)) { if (skb_is_gso(skb)) {
err = skb_unclone(skb, GFP_ATOMIC); err = skb_unclone(skb, GFP_ATOMIC);
if (unlikely(err)) if (unlikely(err))
goto error; return err;
skb_shinfo(skb)->gso_type |= gso_type_mask; skb_shinfo(skb)->gso_type |= gso_type_mask;
return skb; return 0;
} }
if (skb->ip_summed != CHECKSUM_PARTIAL) { if (skb->ip_summed != CHECKSUM_PARTIAL) {
...@@ -174,10 +174,7 @@ struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, ...@@ -174,10 +174,7 @@ struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb,
skb->encapsulation = 0; skb->encapsulation = 0;
} }
return skb; return 0;
error:
kfree_skb(skb);
return ERR_PTR(err);
} }
EXPORT_SYMBOL_GPL(iptunnel_handle_offloads); EXPORT_SYMBOL_GPL(iptunnel_handle_offloads);
......
...@@ -219,9 +219,8 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -219,9 +219,8 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(skb->protocol != htons(ETH_P_IP))) if (unlikely(skb->protocol != htons(ETH_P_IP)))
goto tx_error; goto tx_error;
skb = iptunnel_handle_offloads(skb, SKB_GSO_IPIP); if (iptunnel_handle_offloads(skb, SKB_GSO_IPIP))
if (IS_ERR(skb)) goto tx_error;
goto out;
skb_set_inner_ipproto(skb, IPPROTO_IPIP); skb_set_inner_ipproto(skb, IPPROTO_IPIP);
...@@ -230,7 +229,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -230,7 +229,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
tx_error: tx_error:
kfree_skb(skb); kfree_skb(skb);
out:
dev->stats.tx_errors++; dev->stats.tx_errors++;
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
......
...@@ -913,10 +913,9 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, ...@@ -913,10 +913,9 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
goto tx_error; goto tx_error;
} }
skb = iptunnel_handle_offloads(skb, SKB_GSO_SIT); if (iptunnel_handle_offloads(skb, SKB_GSO_SIT)) {
if (IS_ERR(skb)) {
ip_rt_put(rt); ip_rt_put(rt);
goto out; goto tx_error;
} }
if (df) { if (df) {
...@@ -992,7 +991,6 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, ...@@ -992,7 +991,6 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
dst_link_failure(skb); dst_link_failure(skb);
tx_error: tx_error:
kfree_skb(skb); kfree_skb(skb);
out:
dev->stats.tx_errors++; dev->stats.tx_errors++;
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
...@@ -1002,15 +1000,15 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1002,15 +1000,15 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
struct ip_tunnel *tunnel = netdev_priv(dev); struct ip_tunnel *tunnel = netdev_priv(dev);
const struct iphdr *tiph = &tunnel->parms.iph; const struct iphdr *tiph = &tunnel->parms.iph;
skb = iptunnel_handle_offloads(skb, SKB_GSO_IPIP); if (iptunnel_handle_offloads(skb, SKB_GSO_IPIP))
if (IS_ERR(skb)) goto tx_error;
goto out;
skb_set_inner_ipproto(skb, IPPROTO_IPIP); skb_set_inner_ipproto(skb, IPPROTO_IPIP);
ip_tunnel_xmit(skb, dev, tiph, IPPROTO_IPIP); ip_tunnel_xmit(skb, dev, tiph, IPPROTO_IPIP);
return NETDEV_TX_OK; return NETDEV_TX_OK;
out: tx_error:
kfree_skb(skb);
dev->stats.tx_errors++; dev->stats.tx_errors++;
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
......
...@@ -1013,8 +1013,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, ...@@ -1013,8 +1013,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
if (IS_ERR(skb)) if (IS_ERR(skb))
goto tx_error; goto tx_error;
skb = iptunnel_handle_offloads(skb, __tun_gso_type_mask(AF_INET, cp->af)); if (iptunnel_handle_offloads(skb, __tun_gso_type_mask(AF_INET, cp->af)))
if (IS_ERR(skb))
goto tx_error; goto tx_error;
skb->transport_header = skb->network_header; skb->transport_header = skb->network_header;
...@@ -1105,8 +1104,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, ...@@ -1105,8 +1104,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
if (IS_ERR(skb)) if (IS_ERR(skb))
goto tx_error; goto tx_error;
skb = iptunnel_handle_offloads(skb, __tun_gso_type_mask(AF_INET6, cp->af)); if (iptunnel_handle_offloads(skb, __tun_gso_type_mask(AF_INET6, cp->af)))
if (IS_ERR(skb))
goto tx_error; goto tx_error;
skb->transport_header = skb->network_header; skb->transport_header = skb->network_header;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment