Commit ea3100ab authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-GRE-Offload' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2017-08-31 (GRE Offloads support)

This series provides the support for MPLS RSS and GRE TX offloads and
RSS support.

The first patch from Gal and Ariel provides the mlx5 driver support for
ConnectX capability to perform IP version identification and matching in
order to distinguish between IPv4 and IPv6 without the need to specify the
encapsulation type, thus perform RSS in MPLS automatically without
specifying MPLS ethertyoe. This patch will also serve for inner GRE IPv4/6
classification for inner GRE RSS.

2nd patch from Gal, Adds the TX offloads support for GRE tunneled packets,
by reporting the needed netdev features.

3rd patch from Gal, Adds GRE inner RSS support by creating the needed device
resources (Steering Tables/rules and traffic classifiers) to Match GRE traffic
and perform RSS hashing on the inner headers.

Improvement:
Testing 8 TCP streams bandwidth over GRE:
    System: Intel(R) Xeon(R) CPU E5-2680 v3 @ 2.50GHz
    NIC: Mellanox Technologies MT28800 Family [ConnectX-5 Ex]
    Before: 21.3 Gbps (Single RQ)
    Now   : 90.5 Gbps (RSS spread on 8 RQs)
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents acfb98b9 7b3722fa
...@@ -620,6 +620,12 @@ enum mlx5e_traffic_types { ...@@ -620,6 +620,12 @@ enum mlx5e_traffic_types {
MLX5E_NUM_INDIR_TIRS = MLX5E_TT_ANY, MLX5E_NUM_INDIR_TIRS = MLX5E_TT_ANY,
}; };
enum mlx5e_tunnel_types {
MLX5E_TT_IPV4_GRE,
MLX5E_TT_IPV6_GRE,
MLX5E_NUM_TUNNEL_TT,
};
enum { enum {
MLX5E_STATE_ASYNC_EVENTS_ENABLED, MLX5E_STATE_ASYNC_EVENTS_ENABLED,
MLX5E_STATE_OPENED, MLX5E_STATE_OPENED,
...@@ -679,6 +685,7 @@ struct mlx5e_l2_table { ...@@ -679,6 +685,7 @@ struct mlx5e_l2_table {
struct mlx5e_ttc_table { struct mlx5e_ttc_table {
struct mlx5e_flow_table ft; struct mlx5e_flow_table ft;
struct mlx5_flow_handle *rules[MLX5E_NUM_TT]; struct mlx5_flow_handle *rules[MLX5E_NUM_TT];
struct mlx5_flow_handle *tunnel_rules[MLX5E_NUM_TUNNEL_TT];
}; };
#define ARFS_HASH_SHIFT BITS_PER_BYTE #define ARFS_HASH_SHIFT BITS_PER_BYTE
...@@ -711,6 +718,7 @@ enum { ...@@ -711,6 +718,7 @@ enum {
MLX5E_VLAN_FT_LEVEL = 0, MLX5E_VLAN_FT_LEVEL = 0,
MLX5E_L2_FT_LEVEL, MLX5E_L2_FT_LEVEL,
MLX5E_TTC_FT_LEVEL, MLX5E_TTC_FT_LEVEL,
MLX5E_INNER_TTC_FT_LEVEL,
MLX5E_ARFS_FT_LEVEL MLX5E_ARFS_FT_LEVEL
}; };
...@@ -736,6 +744,7 @@ struct mlx5e_flow_steering { ...@@ -736,6 +744,7 @@ struct mlx5e_flow_steering {
struct mlx5e_vlan_table vlan; struct mlx5e_vlan_table vlan;
struct mlx5e_l2_table l2; struct mlx5e_l2_table l2;
struct mlx5e_ttc_table ttc; struct mlx5e_ttc_table ttc;
struct mlx5e_ttc_table inner_ttc;
struct mlx5e_arfs_tables arfs; struct mlx5e_arfs_tables arfs;
}; };
...@@ -769,6 +778,7 @@ struct mlx5e_priv { ...@@ -769,6 +778,7 @@ struct mlx5e_priv {
u32 tisn[MLX5E_MAX_NUM_TC]; u32 tisn[MLX5E_MAX_NUM_TC];
struct mlx5e_rqt indir_rqt; struct mlx5e_rqt indir_rqt;
struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS]; struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
struct mlx5e_tir inner_indir_tir[MLX5E_NUM_INDIR_TIRS];
struct mlx5e_tir direct_tir[MLX5E_MAX_NUM_CHANNELS]; struct mlx5e_tir direct_tir[MLX5E_MAX_NUM_CHANNELS];
u32 tx_rates[MLX5E_MAX_NUM_SQS]; u32 tx_rates[MLX5E_MAX_NUM_SQS];
int hard_mtu; int hard_mtu;
...@@ -903,7 +913,7 @@ int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, ...@@ -903,7 +913,7 @@ int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
struct mlx5e_redirect_rqt_param rrp); struct mlx5e_redirect_rqt_param rrp);
void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params, void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params,
enum mlx5e_traffic_types tt, enum mlx5e_traffic_types tt,
void *tirc); void *tirc, bool inner);
int mlx5e_open_locked(struct net_device *netdev); int mlx5e_open_locked(struct net_device *netdev);
int mlx5e_close_locked(struct net_device *netdev); int mlx5e_close_locked(struct net_device *netdev);
...@@ -932,6 +942,12 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, ...@@ -932,6 +942,12 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev, void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params, u8 rq_type); struct mlx5e_params *params, u8 rq_type);
static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
{
return (MLX5_CAP_ETH(mdev, tunnel_stateless_gre) &&
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version));
}
static inline static inline
struct mlx5e_tx_wqe *mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc) struct mlx5e_tx_wqe *mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
{ {
......
...@@ -1212,9 +1212,18 @@ static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen) ...@@ -1212,9 +1212,18 @@ static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
memset(tirc, 0, ctxlen); memset(tirc, 0, ctxlen);
mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc); mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, false);
mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen); mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
} }
if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
return;
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
memset(tirc, 0, ctxlen);
mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, true);
mlx5_core_modify_tir(mdev, priv->inner_indir_tir[tt].tirn, in, inlen);
}
} }
static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
......
...@@ -2349,9 +2349,10 @@ static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc) ...@@ -2349,9 +2349,10 @@ static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc)
void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params, void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params,
enum mlx5e_traffic_types tt, enum mlx5e_traffic_types tt,
void *tirc) void *tirc, bool inner)
{ {
void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); void *hfso = inner ? MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner) :
MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\ #define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP) MLX5_HASH_FIELD_SEL_DST_IP)
...@@ -2500,6 +2501,21 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv) ...@@ -2500,6 +2501,21 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
return err; return err;
} }
static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv,
enum mlx5e_traffic_types tt,
u32 *tirc)
{
MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
MLX5_SET(tirc, tirc, tunneled_offload_en, 0x1);
mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, true);
}
static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu) static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
{ {
struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_dev *mdev = priv->mdev;
...@@ -2865,7 +2881,7 @@ static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, ...@@ -2865,7 +2881,7 @@ static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv,
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn); MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc); mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, false);
} }
static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *tirc) static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *tirc)
...@@ -2884,6 +2900,7 @@ int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv) ...@@ -2884,6 +2900,7 @@ int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv)
struct mlx5e_tir *tir; struct mlx5e_tir *tir;
void *tirc; void *tirc;
int inlen; int inlen;
int i = 0;
int err; int err;
u32 *in; u32 *in;
int tt; int tt;
...@@ -2899,16 +2916,36 @@ int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv) ...@@ -2899,16 +2916,36 @@ int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv)
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
mlx5e_build_indir_tir_ctx(priv, tt, tirc); mlx5e_build_indir_tir_ctx(priv, tt, tirc);
err = mlx5e_create_tir(priv->mdev, tir, in, inlen); err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
if (err) if (err) {
goto err_destroy_tirs; mlx5_core_warn(priv->mdev, "create indirect tirs failed, %d\n", err);
goto err_destroy_inner_tirs;
}
}
if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
goto out;
for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) {
memset(in, 0, inlen);
tir = &priv->inner_indir_tir[i];
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
mlx5e_build_inner_indir_tir_ctx(priv, i, tirc);
err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
if (err) {
mlx5_core_warn(priv->mdev, "create inner indirect tirs failed, %d\n", err);
goto err_destroy_inner_tirs;
}
} }
out:
kvfree(in); kvfree(in);
return 0; return 0;
err_destroy_tirs: err_destroy_inner_tirs:
mlx5_core_warn(priv->mdev, "create indirect tirs failed, %d\n", err); for (i--; i >= 0; i--)
mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]);
for (tt--; tt >= 0; tt--) for (tt--; tt >= 0; tt--)
mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]); mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]);
...@@ -2962,6 +2999,12 @@ void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv) ...@@ -2962,6 +2999,12 @@ void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]); mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]);
if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
return;
for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]);
} }
void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv) void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv)
...@@ -3499,13 +3542,13 @@ static void mlx5e_del_vxlan_port(struct net_device *netdev, ...@@ -3499,13 +3542,13 @@ static void mlx5e_del_vxlan_port(struct net_device *netdev,
mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 0); mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 0);
} }
static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv, static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
struct sk_buff *skb, struct sk_buff *skb,
netdev_features_t features) netdev_features_t features)
{ {
struct udphdr *udph; struct udphdr *udph;
u16 proto; u8 proto;
u16 port = 0; u16 port;
switch (vlan_get_protocol(skb)) { switch (vlan_get_protocol(skb)) {
case htons(ETH_P_IP): case htons(ETH_P_IP):
...@@ -3518,14 +3561,17 @@ static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv, ...@@ -3518,14 +3561,17 @@ static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv,
goto out; goto out;
} }
if (proto == IPPROTO_UDP) { switch (proto) {
case IPPROTO_GRE:
return features;
case IPPROTO_UDP:
udph = udp_hdr(skb); udph = udp_hdr(skb);
port = be16_to_cpu(udph->dest); port = be16_to_cpu(udph->dest);
}
/* Verify if UDP port is being offloaded by HW */ /* Verify if UDP port is being offloaded by HW */
if (port && mlx5e_vxlan_lookup_port(priv, port)) if (mlx5e_vxlan_lookup_port(priv, port))
return features; return features;
}
out: out:
/* Disable CSUM and GSO if the udp dport is not offloaded by HW */ /* Disable CSUM and GSO if the udp dport is not offloaded by HW */
...@@ -3549,7 +3595,7 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb, ...@@ -3549,7 +3595,7 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
/* Validate if the tunneled packet is being offloaded by HW */ /* Validate if the tunneled packet is being offloaded by HW */
if (skb->encapsulation && if (skb->encapsulation &&
(features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK)) (features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK))
return mlx5e_vxlan_features_check(priv, skb, features); return mlx5e_tunnel_features_check(priv, skb, features);
return features; return features;
} }
...@@ -4014,20 +4060,32 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) ...@@ -4014,20 +4060,32 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
if (mlx5e_vxlan_allowed(mdev)) { if (mlx5e_vxlan_allowed(mdev) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | netdev->hw_features |= NETIF_F_GSO_PARTIAL;
NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_GSO_PARTIAL;
netdev->hw_enc_features |= NETIF_F_IP_CSUM; netdev->hw_enc_features |= NETIF_F_IP_CSUM;
netdev->hw_enc_features |= NETIF_F_IPV6_CSUM; netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
netdev->hw_enc_features |= NETIF_F_TSO; netdev->hw_enc_features |= NETIF_F_TSO;
netdev->hw_enc_features |= NETIF_F_TSO6; netdev->hw_enc_features |= NETIF_F_TSO6;
netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL;
netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM | }
NETIF_F_GSO_PARTIAL;
if (mlx5e_vxlan_allowed(mdev)) {
netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM; netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
} }
if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
netdev->hw_features |= NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM;
netdev->hw_enc_features |= NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM;
netdev->gso_partial_features |= NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM;
}
mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled); mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
if (fcs_supported) if (fcs_supported)
......
...@@ -83,8 +83,8 @@ ...@@ -83,8 +83,8 @@
#define ETHTOOL_PRIO_NUM_LEVELS 1 #define ETHTOOL_PRIO_NUM_LEVELS 1
#define ETHTOOL_NUM_PRIOS 11 #define ETHTOOL_NUM_PRIOS 11
#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS) #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
/* Vlan, mac, ttc, aRFS */ /* Vlan, mac, ttc, inner ttc, aRFS */
#define KERNEL_NIC_PRIO_NUM_LEVELS 4 #define KERNEL_NIC_PRIO_NUM_LEVELS 5
#define KERNEL_NIC_NUM_PRIOS 1 #define KERNEL_NIC_NUM_PRIOS 1
/* One more level for tc */ /* One more level for tc */
#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1) #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
......
...@@ -602,7 +602,7 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { ...@@ -602,7 +602,7 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 reserved_at_1a[0x1]; u8 reserved_at_1a[0x1];
u8 tunnel_lso_const_out_ip_id[0x1]; u8 tunnel_lso_const_out_ip_id[0x1];
u8 reserved_at_1c[0x2]; u8 reserved_at_1c[0x2];
u8 tunnel_statless_gre[0x1]; u8 tunnel_stateless_gre[0x1];
u8 tunnel_stateless_vxlan[0x1]; u8 tunnel_stateless_vxlan[0x1];
u8 swp[0x1]; u8 swp[0x1];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment