Commit 53da5b47 authored by David S. Miller's avatar David S. Miller

Merge branch 'tunnel-features-and-gso-partial'

Alexander Duyck says:

====================
Fix Tunnel features and enable GSO partial for several drivers

This patch series is meant to allow us to get the best performance possible
for Mellanox ConnectX-3/4 and Broadcom NetXtreme-C/E adapters in terms of
VXLAN and GRE tunnels.

The first 3 patches address issues I found in regards to GSO_PARTIAL and
TSO_MANGLEID.

The next 4 patches go through and enable GSO_PARTIAL for VXLAN tunnels that
have an outer checksum enabled, and then enable IPv6 support where I can.
One outstanding issue is that I wasn't able to get offloads working with
outer IPv6 headers on mlx4.  However that wasn't a feature that was enabled
before so it isn't technically a regression, however I believe Engineers
from Mellanox said they would look into it since they thought it should be
supported.

The last patch enables GSO_PARTIAL for VXLAN and GRE tunnels on the bnxt
driver.  One piece of feedback I received on the patch was that the
hardware has globally set IPv6 UDP tunnels to always have the checksum
field computed.  I plan to work with Broadcom to get that addressed so that
we only populate the checksum field if it was requested by the network
stack.

v2: Rebased patches off of latest changes to the mlx4/mlx5 drivers.
    Added bnxt driver patch as I received feedback on the RFC.
v3: Moved 2 patches into series for net as they were generic fixes.
    Added patch to disable GSO partial if frame is less than 2x size of MSS

    There are outstanding issues as called out above that need to be
    addressed, however they were present before these patches so it isn't
    as if they introduce a regression.  In addition gains can be easily
    seen so there should be no issue with applying the driver patches while
    the IPv6 mlx4_en and bnxt issues are being researched.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents f132ae7c 152971ee
...@@ -6219,14 +6219,19 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -6219,14 +6219,19 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT | NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT |
NETIF_F_RXHASH | NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO; NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO;
dev->hw_enc_features = dev->hw_enc_features =
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT; NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT |
NETIF_F_GSO_PARTIAL;
dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_GSO_GRE_CSUM;
dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA; dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX; NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
......
...@@ -2357,8 +2357,12 @@ static void mlx4_en_add_vxlan_offloads(struct work_struct *work) ...@@ -2357,8 +2357,12 @@ static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
} }
/* set offloads */ /* set offloads */
priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM | priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL; NETIF_F_RXCSUM |
NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_GSO_PARTIAL;
} }
static void mlx4_en_del_vxlan_offloads(struct work_struct *work) static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
...@@ -2367,8 +2371,12 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work) ...@@ -2367,8 +2371,12 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
vxlan_del_task); vxlan_del_task);
/* unset offloads */ /* unset offloads */
priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM | priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL); NETIF_F_RXCSUM |
NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_GSO_PARTIAL);
ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
VXLAN_STEER_BY_OUTER_MAC, 0); VXLAN_STEER_BY_OUTER_MAC, 0);
...@@ -2427,7 +2435,18 @@ static netdev_features_t mlx4_en_features_check(struct sk_buff *skb, ...@@ -2427,7 +2435,18 @@ static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
netdev_features_t features) netdev_features_t features)
{ {
features = vlan_features_check(skb, features); features = vlan_features_check(skb, features);
return vxlan_features_check(skb, features); features = vxlan_features_check(skb, features);
/* The ConnectX-3 doesn't support outer IPv6 checksums but it does
* support inner IPv6 checksums and segmentation so we need to
* strip that feature if this is an IPv6 encapsulated frame.
*/
if (skb->encapsulation &&
(skb->ip_summed == CHECKSUM_PARTIAL) &&
(ip_hdr(skb)->version != 4))
features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
return features;
} }
#endif #endif
...@@ -2992,8 +3011,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, ...@@ -2992,8 +3011,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
} }
if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
dev->features |= NETIF_F_GSO_UDP_TUNNEL; NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_GSO_PARTIAL;
dev->features |= NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_GSO_PARTIAL;
dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
} }
mdev->pndev[port] = dev; mdev->pndev[port] = dev;
......
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/tcp.h> #include <linux/tcp.h>
#include <linux/ip.h> #include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/moduleparam.h> #include <linux/moduleparam.h>
#include "mlx4_en.h" #include "mlx4_en.h"
...@@ -920,8 +921,18 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -920,8 +921,18 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
tx_ind, fragptr); tx_ind, fragptr);
if (skb->encapsulation) { if (skb->encapsulation) {
struct iphdr *ipv4 = (struct iphdr *)skb_inner_network_header(skb); union {
if (ipv4->protocol == IPPROTO_TCP || ipv4->protocol == IPPROTO_UDP) struct iphdr *v4;
struct ipv6hdr *v6;
unsigned char *hdr;
} ip;
u8 proto;
ip.hdr = skb_inner_network_header(skb);
proto = (ip.v4->version == 4) ? ip.v4->protocol :
ip.v6->nexthdr;
if (proto == IPPROTO_TCP || proto == IPPROTO_UDP)
op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP | MLX4_WQE_CTRL_ILP); op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP | MLX4_WQE_CTRL_ILP);
else else
op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP); op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP);
......
...@@ -2802,13 +2802,17 @@ static void mlx5e_build_netdev(struct net_device *netdev) ...@@ -2802,13 +2802,17 @@ static void mlx5e_build_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
if (mlx5e_vxlan_allowed(mdev)) { if (mlx5e_vxlan_allowed(mdev)) {
netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_GSO_PARTIAL;
netdev->hw_enc_features |= NETIF_F_IP_CSUM; netdev->hw_enc_features |= NETIF_F_IP_CSUM;
netdev->hw_enc_features |= NETIF_F_RXCSUM; netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
netdev->hw_enc_features |= NETIF_F_TSO; netdev->hw_enc_features |= NETIF_F_TSO;
netdev->hw_enc_features |= NETIF_F_TSO6; netdev->hw_enc_features |= NETIF_F_TSO6;
netdev->hw_enc_features |= NETIF_F_RXHASH;
netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_GSO_PARTIAL;
netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
} }
mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled); mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
......
...@@ -6721,6 +6721,10 @@ static netdev_features_t netdev_fix_features(struct net_device *dev, ...@@ -6721,6 +6721,10 @@ static netdev_features_t netdev_fix_features(struct net_device *dev,
features &= ~NETIF_F_TSO6; features &= ~NETIF_F_TSO6;
} }
/* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO))
features &= ~NETIF_F_TSO_MANGLEID;
/* TSO ECN requires that TSO is present as well. */ /* TSO ECN requires that TSO is present as well. */
if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN) if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
features &= ~NETIF_F_TSO_ECN; features &= ~NETIF_F_TSO_ECN;
......
...@@ -3080,8 +3080,7 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, ...@@ -3080,8 +3080,7 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
unsigned int headroom; unsigned int headroom;
unsigned int len = head_skb->len; unsigned int len = head_skb->len;
__be16 proto; __be16 proto;
bool csum; bool csum, sg;
int sg = !!(features & NETIF_F_SG);
int nfrags = skb_shinfo(head_skb)->nr_frags; int nfrags = skb_shinfo(head_skb)->nr_frags;
int err = -ENOMEM; int err = -ENOMEM;
int i = 0; int i = 0;
...@@ -3093,15 +3092,19 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, ...@@ -3093,15 +3092,19 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
if (unlikely(!proto)) if (unlikely(!proto))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
sg = !!(features & NETIF_F_SG);
csum = !!can_checksum_protocol(features, proto); csum = !!can_checksum_protocol(features, proto);
/* GSO partial only requires that we trim off any excess that /* GSO partial only requires that we trim off any excess that
* doesn't fit into an MSS sized block, so take care of that * doesn't fit into an MSS sized block, so take care of that
* now. * now.
*/ */
if (features & NETIF_F_GSO_PARTIAL) { if (sg && csum && (features & NETIF_F_GSO_PARTIAL)) {
partial_segs = len / mss; partial_segs = len / mss;
mss *= partial_segs; if (partial_segs > 1)
mss *= partial_segs;
else
partial_segs = 0;
} }
headroom = skb_headroom(head_skb); headroom = skb_headroom(head_skb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment