Commit ba5dfaff authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5e-updates-2018-12-04' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5e-updates-2018-12-04

This series includes updates to mlx5e netdevice driver

From Saeed, Remove trailing space of tx_pause ethtool stat
From Gal, Cleanup unused defines
From Aya, ethtool Support for configuring of RX hash fields
From Tariq, Improve ethtool private-flags code structure
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 7127f2fe 8ff57c18
...@@ -49,6 +49,7 @@ ...@@ -49,6 +49,7 @@
#include <net/switchdev.h> #include <net/switchdev.h>
#include <net/xdp.h> #include <net/xdp.h>
#include <linux/net_dim.h> #include <linux/net_dim.h>
#include <linux/bits.h>
#include "wq.h" #include "wq.h"
#include "mlx5_core.h" #include "mlx5_core.h"
#include "en_stats.h" #include "en_stats.h"
...@@ -147,9 +148,6 @@ struct page_pool; ...@@ -147,9 +148,6 @@ struct page_pool;
MLX5_UMR_MTT_ALIGNMENT)) MLX5_UMR_MTT_ALIGNMENT))
#define MLX5E_UMR_WQEBBS \ #define MLX5E_UMR_WQEBBS \
(DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB)) (DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB))
#define MLX5E_ICOSQ_MAX_WQEBBS MLX5E_UMR_WQEBBS
#define MLX5E_NUM_MAIN_GROUPS 9
#define MLX5E_MSG_LEVEL NETIF_MSG_LINK #define MLX5E_MSG_LEVEL NETIF_MSG_LINK
...@@ -214,22 +212,23 @@ struct mlx5e_umr_wqe { ...@@ -214,22 +212,23 @@ struct mlx5e_umr_wqe {
extern const char mlx5e_self_tests[][ETH_GSTRING_LEN]; extern const char mlx5e_self_tests[][ETH_GSTRING_LEN];
enum mlx5e_priv_flag { enum mlx5e_priv_flag {
MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0), MLX5E_PFLAG_RX_CQE_BASED_MODER,
MLX5E_PFLAG_TX_CQE_BASED_MODER = (1 << 1), MLX5E_PFLAG_TX_CQE_BASED_MODER,
MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 2), MLX5E_PFLAG_RX_CQE_COMPRESS,
MLX5E_PFLAG_RX_STRIDING_RQ = (1 << 3), MLX5E_PFLAG_RX_STRIDING_RQ,
MLX5E_PFLAG_RX_NO_CSUM_COMPLETE = (1 << 4), MLX5E_PFLAG_RX_NO_CSUM_COMPLETE,
MLX5E_NUM_PFLAGS, /* Keep last */
}; };
#define MLX5E_SET_PFLAG(params, pflag, enable) \ #define MLX5E_SET_PFLAG(params, pflag, enable) \
do { \ do { \
if (enable) \ if (enable) \
(params)->pflags |= (pflag); \ (params)->pflags |= BIT(pflag); \
else \ else \
(params)->pflags &= ~(pflag); \ (params)->pflags &= ~(BIT(pflag)); \
} while (0) } while (0)
#define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (pflag))) #define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (BIT(pflag))))
#ifdef CONFIG_MLX5_CORE_EN_DCB #ifdef CONFIG_MLX5_CORE_EN_DCB
#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */ #define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
...@@ -247,9 +246,6 @@ struct mlx5e_params { ...@@ -247,9 +246,6 @@ struct mlx5e_params {
bool lro_en; bool lro_en;
u32 lro_wqe_sz; u32 lro_wqe_sz;
u8 tx_min_inline_mode; u8 tx_min_inline_mode;
u8 rss_hfunc;
u8 toeplitz_hash_key[40];
u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
bool vlan_strip_disable; bool vlan_strip_disable;
bool scatter_fcs_en; bool scatter_fcs_en;
bool rx_dim_enabled; bool rx_dim_enabled;
...@@ -654,6 +650,13 @@ enum { ...@@ -654,6 +650,13 @@ enum {
MLX5E_NIC_PRIO MLX5E_NIC_PRIO
}; };
struct mlx5e_rss_params {
u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
u32 rx_hash_fields[MLX5E_NUM_INDIR_TIRS];
u8 toeplitz_hash_key[40];
u8 hfunc;
};
struct mlx5e_priv { struct mlx5e_priv {
/* priv data path fields - start */ /* priv data path fields - start */
struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC]; struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC];
...@@ -674,6 +677,7 @@ struct mlx5e_priv { ...@@ -674,6 +677,7 @@ struct mlx5e_priv {
struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS]; struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
struct mlx5e_tir inner_indir_tir[MLX5E_NUM_INDIR_TIRS]; struct mlx5e_tir inner_indir_tir[MLX5E_NUM_INDIR_TIRS];
struct mlx5e_tir direct_tir[MLX5E_MAX_NUM_CHANNELS]; struct mlx5e_tir direct_tir[MLX5E_MAX_NUM_CHANNELS];
struct mlx5e_rss_params rss_params;
u32 tx_rates[MLX5E_MAX_NUM_SQS]; u32 tx_rates[MLX5E_MAX_NUM_SQS];
struct mlx5e_flow_steering fs; struct mlx5e_flow_steering fs;
...@@ -799,9 +803,11 @@ struct mlx5e_redirect_rqt_param { ...@@ -799,9 +803,11 @@ struct mlx5e_redirect_rqt_param {
int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
struct mlx5e_redirect_rqt_param rrp); struct mlx5e_redirect_rqt_param rrp);
void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params, void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params *rss_params,
enum mlx5e_traffic_types tt, const struct mlx5e_tirc_config *ttconfig,
void *tirc, bool inner); void *tirc, bool inner);
void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen);
struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt);
int mlx5e_open_locked(struct net_device *netdev); int mlx5e_open_locked(struct net_device *netdev);
int mlx5e_close_locked(struct net_device *netdev); int mlx5e_close_locked(struct net_device *netdev);
...@@ -983,11 +989,13 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv); ...@@ -983,11 +989,13 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv);
void mlx5e_detach_netdev(struct mlx5e_priv *priv); void mlx5e_detach_netdev(struct mlx5e_priv *priv);
void mlx5e_destroy_netdev(struct mlx5e_priv *priv); void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
struct mlx5e_rss_params *rss_params,
struct mlx5e_params *params, struct mlx5e_params *params,
u16 max_channels, u16 mtu); u16 max_channels, u16 mtu);
void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params); struct mlx5e_params *params);
void mlx5e_build_rss_params(struct mlx5e_params *params); void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
u16 num_channels);
u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev); u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev);
void mlx5e_rx_dim_work(struct work_struct *work); void mlx5e_rx_dim_work(struct work_struct *work);
void mlx5e_tx_dim_work(struct work_struct *work); void mlx5e_tx_dim_work(struct work_struct *work);
......
...@@ -73,6 +73,22 @@ enum mlx5e_traffic_types { ...@@ -73,6 +73,22 @@ enum mlx5e_traffic_types {
MLX5E_NUM_INDIR_TIRS = MLX5E_TT_ANY, MLX5E_NUM_INDIR_TIRS = MLX5E_TT_ANY,
}; };
struct mlx5e_tirc_config {
u8 l3_prot_type;
u8 l4_prot_type;
u32 rx_hash_fields;
};
#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP)
#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP |\
MLX5_HASH_FIELD_SEL_L4_SPORT |\
MLX5_HASH_FIELD_SEL_L4_DPORT)
#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP |\
MLX5_HASH_FIELD_SEL_IPSEC_SPI)
enum mlx5e_tunnel_types { enum mlx5e_tunnel_types {
MLX5E_TT_IPV4_GRE, MLX5E_TT_IPV4_GRE,
MLX5E_TT_IPV6_GRE, MLX5E_TT_IPV6_GRE,
......
...@@ -135,14 +135,15 @@ void mlx5e_build_ptys2ethtool_map(void) ...@@ -135,14 +135,15 @@ void mlx5e_build_ptys2ethtool_map(void)
ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT); ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT);
} }
static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = { typedef int (*mlx5e_pflag_handler)(struct net_device *netdev, bool enable);
"rx_cqe_moder",
"tx_cqe_moder", struct pflag_desc {
"rx_cqe_compress", char name[ETH_GSTRING_LEN];
"rx_striding_rq", mlx5e_pflag_handler handler;
"rx_no_csum_complete",
}; };
static const struct pflag_desc mlx5e_priv_flags[MLX5E_NUM_PFLAGS];
int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset) int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset)
{ {
int i, num_stats = 0; int i, num_stats = 0;
...@@ -153,7 +154,7 @@ int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset) ...@@ -153,7 +154,7 @@ int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset)
num_stats += mlx5e_stats_grps[i].get_num_stats(priv); num_stats += mlx5e_stats_grps[i].get_num_stats(priv);
return num_stats; return num_stats;
case ETH_SS_PRIV_FLAGS: case ETH_SS_PRIV_FLAGS:
return ARRAY_SIZE(mlx5e_priv_flags); return MLX5E_NUM_PFLAGS;
case ETH_SS_TEST: case ETH_SS_TEST:
return mlx5e_self_test_num(priv); return mlx5e_self_test_num(priv);
/* fallthrough */ /* fallthrough */
...@@ -183,8 +184,9 @@ void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, u32 stringset, u8 *data) ...@@ -183,8 +184,9 @@ void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, u32 stringset, u8 *data)
switch (stringset) { switch (stringset) {
case ETH_SS_PRIV_FLAGS: case ETH_SS_PRIV_FLAGS:
for (i = 0; i < ARRAY_SIZE(mlx5e_priv_flags); i++) for (i = 0; i < MLX5E_NUM_PFLAGS; i++)
strcpy(data + i * ETH_GSTRING_LEN, mlx5e_priv_flags[i]); strcpy(data + i * ETH_GSTRING_LEN,
mlx5e_priv_flags[i].name);
break; break;
case ETH_SS_TEST: case ETH_SS_TEST:
...@@ -353,7 +355,7 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, ...@@ -353,7 +355,7 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
new_channels.params = priv->channels.params; new_channels.params = priv->channels.params;
new_channels.params.num_channels = count; new_channels.params.num_channels = count;
if (!netif_is_rxfh_configured(priv->netdev)) if (!netif_is_rxfh_configured(priv->netdev))
mlx5e_build_default_indir_rqt(new_channels.params.indirection_rqt, mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
MLX5E_INDIR_RQT_SIZE, count); MLX5E_INDIR_RQT_SIZE, count);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
...@@ -931,7 +933,7 @@ static int mlx5e_set_link_ksettings(struct net_device *netdev, ...@@ -931,7 +933,7 @@ static int mlx5e_set_link_ksettings(struct net_device *netdev,
u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv) u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv)
{ {
return sizeof(priv->channels.params.toeplitz_hash_key); return sizeof(priv->rss_params.toeplitz_hash_key);
} }
static u32 mlx5e_get_rxfh_key_size(struct net_device *netdev) static u32 mlx5e_get_rxfh_key_size(struct net_device *netdev)
...@@ -957,50 +959,27 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, ...@@ -957,50 +959,27 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
u8 *hfunc) u8 *hfunc)
{ {
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_rss_params *rss = &priv->rss_params;
if (indir) if (indir)
memcpy(indir, priv->channels.params.indirection_rqt, memcpy(indir, rss->indirection_rqt,
sizeof(priv->channels.params.indirection_rqt)); sizeof(rss->indirection_rqt));
if (key) if (key)
memcpy(key, priv->channels.params.toeplitz_hash_key, memcpy(key, rss->toeplitz_hash_key,
sizeof(priv->channels.params.toeplitz_hash_key)); sizeof(rss->toeplitz_hash_key));
if (hfunc) if (hfunc)
*hfunc = priv->channels.params.rss_hfunc; *hfunc = rss->hfunc;
return 0; return 0;
} }
static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
{
void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
struct mlx5_core_dev *mdev = priv->mdev;
int ctxlen = MLX5_ST_SZ_BYTES(tirc);
int tt;
MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
memset(tirc, 0, ctxlen);
mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, false);
mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
}
if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
return;
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
memset(tirc, 0, ctxlen);
mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, true);
mlx5_core_modify_tir(mdev, priv->inner_indir_tir[tt].tirn, in, inlen);
}
}
static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
const u8 *key, const u8 hfunc) const u8 *key, const u8 hfunc)
{ {
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_rss_params *rss = &priv->rss_params;
int inlen = MLX5_ST_SZ_BYTES(modify_tir_in); int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
bool hash_changed = false; bool hash_changed = false;
void *in; void *in;
...@@ -1016,15 +995,14 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, ...@@ -1016,15 +995,14 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
mutex_lock(&priv->state_lock); mutex_lock(&priv->state_lock);
if (hfunc != ETH_RSS_HASH_NO_CHANGE && if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != rss->hfunc) {
hfunc != priv->channels.params.rss_hfunc) { rss->hfunc = hfunc;
priv->channels.params.rss_hfunc = hfunc;
hash_changed = true; hash_changed = true;
} }
if (indir) { if (indir) {
memcpy(priv->channels.params.indirection_rqt, indir, memcpy(rss->indirection_rqt, indir,
sizeof(priv->channels.params.indirection_rqt)); sizeof(rss->indirection_rqt));
if (test_bit(MLX5E_STATE_OPENED, &priv->state)) { if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
u32 rqtn = priv->indir_rqt.rqtn; u32 rqtn = priv->indir_rqt.rqtn;
...@@ -1032,7 +1010,7 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, ...@@ -1032,7 +1010,7 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
.is_rss = true, .is_rss = true,
{ {
.rss = { .rss = {
.hfunc = priv->channels.params.rss_hfunc, .hfunc = rss->hfunc,
.channels = &priv->channels, .channels = &priv->channels,
}, },
}, },
...@@ -1043,10 +1021,9 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, ...@@ -1043,10 +1021,9 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
} }
if (key) { if (key) {
memcpy(priv->channels.params.toeplitz_hash_key, key, memcpy(rss->toeplitz_hash_key, key,
sizeof(priv->channels.params.toeplitz_hash_key)); sizeof(rss->toeplitz_hash_key));
hash_changed = hash_changed || hash_changed = hash_changed || rss->hfunc == ETH_RSS_HASH_TOP;
priv->channels.params.rss_hfunc == ETH_RSS_HASH_TOP;
} }
if (hash_changed) if (hash_changed)
...@@ -1510,8 +1487,6 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev, ...@@ -1510,8 +1487,6 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev,
return 0; return 0;
} }
typedef int (*mlx5e_pflag_handler)(struct net_device *netdev, bool enable);
static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable, static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable,
bool is_rx_cq) bool is_rx_cq)
{ {
...@@ -1674,23 +1649,30 @@ static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable) ...@@ -1674,23 +1649,30 @@ static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable)
return 0; return 0;
} }
static const struct pflag_desc mlx5e_priv_flags[MLX5E_NUM_PFLAGS] = {
{ "rx_cqe_moder", set_pflag_rx_cqe_based_moder },
{ "tx_cqe_moder", set_pflag_tx_cqe_based_moder },
{ "rx_cqe_compress", set_pflag_rx_cqe_compress },
{ "rx_striding_rq", set_pflag_rx_striding_rq },
{ "rx_no_csum_complete", set_pflag_rx_no_csum_complete },
};
static int mlx5e_handle_pflag(struct net_device *netdev, static int mlx5e_handle_pflag(struct net_device *netdev,
u32 wanted_flags, u32 wanted_flags,
enum mlx5e_priv_flag flag, enum mlx5e_priv_flag flag)
mlx5e_pflag_handler pflag_handler)
{ {
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
bool enable = !!(wanted_flags & flag); bool enable = !!(wanted_flags & BIT(flag));
u32 changes = wanted_flags ^ priv->channels.params.pflags; u32 changes = wanted_flags ^ priv->channels.params.pflags;
int err; int err;
if (!(changes & flag)) if (!(changes & BIT(flag)))
return 0; return 0;
err = pflag_handler(netdev, enable); err = mlx5e_priv_flags[flag].handler(netdev, enable);
if (err) { if (err) {
netdev_err(netdev, "%s private flag 0x%x failed err %d\n", netdev_err(netdev, "%s private flag '%s' failed err %d\n",
enable ? "Enable" : "Disable", flag, err); enable ? "Enable" : "Disable", mlx5e_priv_flags[flag].name, err);
return err; return err;
} }
...@@ -1701,38 +1683,17 @@ static int mlx5e_handle_pflag(struct net_device *netdev, ...@@ -1701,38 +1683,17 @@ static int mlx5e_handle_pflag(struct net_device *netdev,
static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags) static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags)
{ {
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
enum mlx5e_priv_flag pflag;
int err; int err;
mutex_lock(&priv->state_lock); mutex_lock(&priv->state_lock);
err = mlx5e_handle_pflag(netdev, pflags,
MLX5E_PFLAG_RX_CQE_BASED_MODER,
set_pflag_rx_cqe_based_moder);
if (err)
goto out;
err = mlx5e_handle_pflag(netdev, pflags,
MLX5E_PFLAG_TX_CQE_BASED_MODER,
set_pflag_tx_cqe_based_moder);
if (err)
goto out;
err = mlx5e_handle_pflag(netdev, pflags,
MLX5E_PFLAG_RX_CQE_COMPRESS,
set_pflag_rx_cqe_compress);
if (err)
goto out;
err = mlx5e_handle_pflag(netdev, pflags,
MLX5E_PFLAG_RX_STRIDING_RQ,
set_pflag_rx_striding_rq);
if (err)
goto out;
err = mlx5e_handle_pflag(netdev, pflags, for (pflag = 0; pflag < MLX5E_NUM_PFLAGS; pflag++) {
MLX5E_PFLAG_RX_NO_CSUM_COMPLETE, err = mlx5e_handle_pflag(netdev, pflags, pflag);
set_pflag_rx_no_csum_complete); if (err)
break;
}
out:
mutex_unlock(&priv->state_lock); mutex_unlock(&priv->state_lock);
/* Need to fix some features.. */ /* Need to fix some features.. */
......
...@@ -771,6 +771,112 @@ void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv) ...@@ -771,6 +771,112 @@ void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv)
INIT_LIST_HEAD(&priv->fs.ethtool.rules); INIT_LIST_HEAD(&priv->fs.ethtool.rules);
} }
static enum mlx5e_traffic_types flow_type_to_traffic_type(u32 flow_type)
{
switch (flow_type) {
case TCP_V4_FLOW:
return MLX5E_TT_IPV4_TCP;
case TCP_V6_FLOW:
return MLX5E_TT_IPV6_TCP;
case UDP_V4_FLOW:
return MLX5E_TT_IPV4_UDP;
case UDP_V6_FLOW:
return MLX5E_TT_IPV6_UDP;
case AH_V4_FLOW:
return MLX5E_TT_IPV4_IPSEC_AH;
case AH_V6_FLOW:
return MLX5E_TT_IPV6_IPSEC_AH;
case ESP_V4_FLOW:
return MLX5E_TT_IPV4_IPSEC_ESP;
case ESP_V6_FLOW:
return MLX5E_TT_IPV6_IPSEC_ESP;
case IPV4_FLOW:
return MLX5E_TT_IPV4;
case IPV6_FLOW:
return MLX5E_TT_IPV6;
default:
return MLX5E_NUM_INDIR_TIRS;
}
}
static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv,
struct ethtool_rxnfc *nfc)
{
int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
enum mlx5e_traffic_types tt;
u8 rx_hash_field = 0;
void *in;
tt = flow_type_to_traffic_type(nfc->flow_type);
if (tt == MLX5E_NUM_INDIR_TIRS)
return -EINVAL;
/* RSS does not support anything other than hashing to queues
* on src IP, dest IP, TCP/UDP src port and TCP/UDP dest
* port.
*/
if (nfc->flow_type != TCP_V4_FLOW &&
nfc->flow_type != TCP_V6_FLOW &&
nfc->flow_type != UDP_V4_FLOW &&
nfc->flow_type != UDP_V6_FLOW)
return -EOPNOTSUPP;
if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3))
return -EOPNOTSUPP;
if (nfc->data & RXH_IP_SRC)
rx_hash_field |= MLX5_HASH_FIELD_SEL_SRC_IP;
if (nfc->data & RXH_IP_DST)
rx_hash_field |= MLX5_HASH_FIELD_SEL_DST_IP;
if (nfc->data & RXH_L4_B_0_1)
rx_hash_field |= MLX5_HASH_FIELD_SEL_L4_SPORT;
if (nfc->data & RXH_L4_B_2_3)
rx_hash_field |= MLX5_HASH_FIELD_SEL_L4_DPORT;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
mutex_lock(&priv->state_lock);
if (rx_hash_field == priv->rss_params.rx_hash_fields[tt])
goto out;
priv->rss_params.rx_hash_fields[tt] = rx_hash_field;
mlx5e_modify_tirs_hash(priv, in, inlen);
out:
mutex_unlock(&priv->state_lock);
kvfree(in);
return 0;
}
static int mlx5e_get_rss_hash_opt(struct mlx5e_priv *priv,
struct ethtool_rxnfc *nfc)
{
enum mlx5e_traffic_types tt;
u32 hash_field = 0;
tt = flow_type_to_traffic_type(nfc->flow_type);
if (tt == MLX5E_NUM_INDIR_TIRS)
return -EINVAL;
hash_field = priv->rss_params.rx_hash_fields[tt];
nfc->data = 0;
if (hash_field & MLX5_HASH_FIELD_SEL_SRC_IP)
nfc->data |= RXH_IP_SRC;
if (hash_field & MLX5_HASH_FIELD_SEL_DST_IP)
nfc->data |= RXH_IP_DST;
if (hash_field & MLX5_HASH_FIELD_SEL_L4_SPORT)
nfc->data |= RXH_L4_B_0_1;
if (hash_field & MLX5_HASH_FIELD_SEL_L4_DPORT)
nfc->data |= RXH_L4_B_2_3;
return 0;
}
int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{ {
int err = 0; int err = 0;
...@@ -783,6 +889,9 @@ int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) ...@@ -783,6 +889,9 @@ int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
case ETHTOOL_SRXCLSRLDEL: case ETHTOOL_SRXCLSRLDEL:
err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location); err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location);
break; break;
case ETHTOOL_SRXFH:
err = mlx5e_set_rss_hash_opt(priv, cmd);
break;
default: default:
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
break; break;
...@@ -810,6 +919,9 @@ int mlx5e_get_rxnfc(struct net_device *dev, ...@@ -810,6 +919,9 @@ int mlx5e_get_rxnfc(struct net_device *dev,
case ETHTOOL_GRXCLSRLALL: case ETHTOOL_GRXCLSRLALL:
err = mlx5e_ethtool_get_all_flows(priv, info, rule_locs); err = mlx5e_ethtool_get_all_flows(priv, info, rule_locs);
break; break;
case ETHTOOL_GRXFH:
err = mlx5e_get_rss_hash_opt(priv, info);
break;
default: default:
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
break; break;
......
...@@ -2504,7 +2504,7 @@ static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, int sz, ...@@ -2504,7 +2504,7 @@ static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, int sz,
if (rrp.rss.hfunc == ETH_RSS_HASH_XOR) if (rrp.rss.hfunc == ETH_RSS_HASH_XOR)
ix = mlx5e_bits_invert(i, ilog2(sz)); ix = mlx5e_bits_invert(i, ilog2(sz));
ix = priv->channels.params.indirection_rqt[ix]; ix = priv->rss_params.indirection_rqt[ix];
rqn = rrp.rss.channels->c[ix]->rq.rqn; rqn = rrp.rss.channels->c[ix]->rq.rqn;
} else { } else {
rqn = rrp.rqn; rqn = rrp.rqn;
...@@ -2587,7 +2587,7 @@ static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv *priv, ...@@ -2587,7 +2587,7 @@ static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv *priv,
{ {
.rss = { .rss = {
.channels = chs, .channels = chs,
.hfunc = chs->params.rss_hfunc, .hfunc = priv->rss_params.hfunc,
} }
}, },
}; };
...@@ -2607,6 +2607,54 @@ static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv *priv) ...@@ -2607,6 +2607,54 @@ static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv *priv)
mlx5e_redirect_rqts(priv, drop_rrp); mlx5e_redirect_rqts(priv, drop_rrp);
} }
static const struct mlx5e_tirc_config tirc_default_config[MLX5E_NUM_INDIR_TIRS] = {
[MLX5E_TT_IPV4_TCP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
.l4_prot_type = MLX5_L4_PROT_TYPE_TCP,
.rx_hash_fields = MLX5_HASH_IP_L4PORTS,
},
[MLX5E_TT_IPV6_TCP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
.l4_prot_type = MLX5_L4_PROT_TYPE_TCP,
.rx_hash_fields = MLX5_HASH_IP_L4PORTS,
},
[MLX5E_TT_IPV4_UDP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
.l4_prot_type = MLX5_L4_PROT_TYPE_UDP,
.rx_hash_fields = MLX5_HASH_IP_L4PORTS,
},
[MLX5E_TT_IPV6_UDP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
.l4_prot_type = MLX5_L4_PROT_TYPE_UDP,
.rx_hash_fields = MLX5_HASH_IP_L4PORTS,
},
[MLX5E_TT_IPV4_IPSEC_AH] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
.l4_prot_type = 0,
.rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
},
[MLX5E_TT_IPV6_IPSEC_AH] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
.l4_prot_type = 0,
.rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
},
[MLX5E_TT_IPV4_IPSEC_ESP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
.l4_prot_type = 0,
.rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
},
[MLX5E_TT_IPV6_IPSEC_ESP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
.l4_prot_type = 0,
.rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
},
[MLX5E_TT_IPV4] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
.l4_prot_type = 0,
.rx_hash_fields = MLX5_HASH_IP,
},
[MLX5E_TT_IPV6] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
.l4_prot_type = 0,
.rx_hash_fields = MLX5_HASH_IP,
},
};
struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt)
{
return tirc_default_config[tt];
}
static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc) static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc)
{ {
if (!params->lro_en) if (!params->lro_en)
...@@ -2622,116 +2670,68 @@ static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc) ...@@ -2622,116 +2670,68 @@ static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc)
MLX5_SET(tirc, tirc, lro_timeout_period_usecs, params->lro_timeout); MLX5_SET(tirc, tirc, lro_timeout_period_usecs, params->lro_timeout);
} }
void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params, void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params *rss_params,
enum mlx5e_traffic_types tt, const struct mlx5e_tirc_config *ttconfig,
void *tirc, bool inner) void *tirc, bool inner)
{ {
void *hfso = inner ? MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner) : void *hfso = inner ? MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner) :
MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\ MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_rx_hash_fn(rss_params->hfunc));
MLX5_HASH_FIELD_SEL_DST_IP) if (rss_params->hfunc == ETH_RSS_HASH_TOP) {
#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP |\
MLX5_HASH_FIELD_SEL_L4_SPORT |\
MLX5_HASH_FIELD_SEL_L4_DPORT)
#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP |\
MLX5_HASH_FIELD_SEL_IPSEC_SPI)
MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_rx_hash_fn(params->rss_hfunc));
if (params->rss_hfunc == ETH_RSS_HASH_TOP) {
void *rss_key = MLX5_ADDR_OF(tirc, tirc, void *rss_key = MLX5_ADDR_OF(tirc, tirc,
rx_hash_toeplitz_key); rx_hash_toeplitz_key);
size_t len = MLX5_FLD_SZ_BYTES(tirc, size_t len = MLX5_FLD_SZ_BYTES(tirc,
rx_hash_toeplitz_key); rx_hash_toeplitz_key);
MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
memcpy(rss_key, params->toeplitz_hash_key, len); memcpy(rss_key, rss_params->toeplitz_hash_key, len);
} }
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
ttconfig->l3_prot_type);
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
ttconfig->l4_prot_type);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
ttconfig->rx_hash_fields);
}
switch (tt) { static void mlx5e_update_rx_hash_fields(struct mlx5e_tirc_config *ttconfig,
case MLX5E_TT_IPV4_TCP: enum mlx5e_traffic_types tt,
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, u32 rx_hash_fields)
MLX5_L3_PROT_TYPE_IPV4); {
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, *ttconfig = tirc_default_config[tt];
MLX5_L4_PROT_TYPE_TCP); ttconfig->rx_hash_fields = rx_hash_fields;
MLX5_SET(rx_hash_field_select, hfso, selected_fields, }
MLX5_HASH_IP_L4PORTS);
break;
case MLX5E_TT_IPV6_TCP:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV6);
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_TCP);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_L4PORTS);
break;
case MLX5E_TT_IPV4_UDP:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV4);
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_UDP);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_L4PORTS);
break;
case MLX5E_TT_IPV6_UDP:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV6);
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_UDP);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_L4PORTS);
break;
case MLX5E_TT_IPV4_IPSEC_AH:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV4);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_IPSEC_SPI);
break;
case MLX5E_TT_IPV6_IPSEC_AH: void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, {
MLX5_L3_PROT_TYPE_IPV6); void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
MLX5_SET(rx_hash_field_select, hfso, selected_fields, struct mlx5e_rss_params *rss = &priv->rss_params;
MLX5_HASH_IP_IPSEC_SPI); struct mlx5_core_dev *mdev = priv->mdev;
break; int ctxlen = MLX5_ST_SZ_BYTES(tirc);
struct mlx5e_tirc_config ttconfig;
int tt;
case MLX5E_TT_IPV4_IPSEC_ESP: MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV4);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_IPSEC_SPI);
break;
case MLX5E_TT_IPV6_IPSEC_ESP: for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, memset(tirc, 0, ctxlen);
MLX5_L3_PROT_TYPE_IPV6); mlx5e_update_rx_hash_fields(&ttconfig, tt,
MLX5_SET(rx_hash_field_select, hfso, selected_fields, rss->rx_hash_fields[tt]);
MLX5_HASH_IP_IPSEC_SPI); mlx5e_build_indir_tir_ctx_hash(rss, &ttconfig, tirc, false);
break; mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
}
case MLX5E_TT_IPV4: if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, return;
MLX5_L3_PROT_TYPE_IPV4);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP);
break;
case MLX5E_TT_IPV6: for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, memset(tirc, 0, ctxlen);
MLX5_L3_PROT_TYPE_IPV6); mlx5e_update_rx_hash_fields(&ttconfig, tt,
MLX5_SET(rx_hash_field_select, hfso, selected_fields, rss->rx_hash_fields[tt]);
MLX5_HASH_IP); mlx5e_build_indir_tir_ctx_hash(rss, &ttconfig, tirc, true);
break; mlx5_core_modify_tir(mdev, priv->inner_indir_tir[tt].tirn, in,
default: inlen);
WARN_ONCE(true, "%s: bad traffic type!\n", __func__);
} }
} }
...@@ -2788,7 +2788,8 @@ static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv, ...@@ -2788,7 +2788,8 @@ static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv,
MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn); MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
MLX5_SET(tirc, tirc, tunneled_offload_en, 0x1); MLX5_SET(tirc, tirc, tunneled_offload_en, 0x1);
mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, true); mlx5e_build_indir_tir_ctx_hash(&priv->rss_params,
&tirc_default_config[tt], tirc, true);
} }
static int mlx5e_set_mtu(struct mlx5_core_dev *mdev, static int mlx5e_set_mtu(struct mlx5_core_dev *mdev,
...@@ -3180,7 +3181,9 @@ static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, ...@@ -3180,7 +3181,9 @@ static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv,
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn); MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, false);
mlx5e_build_indir_tir_ctx_hash(&priv->rss_params,
&tirc_default_config[tt], tirc, false);
} }
static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *tirc) static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *tirc)
...@@ -4518,15 +4521,23 @@ void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, ...@@ -4518,15 +4521,23 @@ void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
mlx5e_init_rq_type_params(mdev, params); mlx5e_init_rq_type_params(mdev, params);
} }
void mlx5e_build_rss_params(struct mlx5e_params *params) void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
u16 num_channels)
{ {
params->rss_hfunc = ETH_RSS_HASH_XOR; enum mlx5e_traffic_types tt;
netdev_rss_key_fill(params->toeplitz_hash_key, sizeof(params->toeplitz_hash_key));
mlx5e_build_default_indir_rqt(params->indirection_rqt, rss_params->hfunc = ETH_RSS_HASH_XOR;
MLX5E_INDIR_RQT_SIZE, params->num_channels); netdev_rss_key_fill(rss_params->toeplitz_hash_key,
sizeof(rss_params->toeplitz_hash_key));
mlx5e_build_default_indir_rqt(rss_params->indirection_rqt,
MLX5E_INDIR_RQT_SIZE, num_channels);
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
rss_params->rx_hash_fields[tt] =
tirc_default_config[tt].rx_hash_fields;
} }
void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
struct mlx5e_rss_params *rss_params,
struct mlx5e_params *params, struct mlx5e_params *params,
u16 max_channels, u16 mtu) u16 max_channels, u16 mtu)
{ {
...@@ -4575,7 +4586,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, ...@@ -4575,7 +4586,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(mdev); params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(mdev);
/* RSS */ /* RSS */
mlx5e_build_rss_params(params); mlx5e_build_rss_params(rss_params, params->num_channels);
} }
static void mlx5e_set_netdev_dev_addr(struct net_device *netdev) static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
...@@ -4748,14 +4759,16 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev, ...@@ -4748,14 +4759,16 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
void *ppriv) void *ppriv)
{ {
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_rss_params *rss = &priv->rss_params;
int err; int err;
err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv); err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
if (err) if (err)
return err; return err;
mlx5e_build_nic_params(mdev, &priv->channels.params, mlx5e_build_nic_params(mdev, rss, &priv->channels.params,
mlx5e_get_netdev_max_channels(netdev), netdev->mtu); mlx5e_get_netdev_max_channels(netdev),
netdev->mtu);
mlx5e_timestamp_init(priv); mlx5e_timestamp_init(priv);
...@@ -5030,7 +5043,7 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv) ...@@ -5030,7 +5043,7 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
if (priv->channels.params.num_channels > max_nch) { if (priv->channels.params.num_channels > max_nch) {
mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch); mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch);
priv->channels.params.num_channels = max_nch; priv->channels.params.num_channels = max_nch;
mlx5e_build_default_indir_rqt(priv->channels.params.indirection_rqt, mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
MLX5E_INDIR_RQT_SIZE, max_nch); MLX5E_INDIR_RQT_SIZE, max_nch);
} }
......
...@@ -1012,7 +1012,9 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = { ...@@ -1012,7 +1012,9 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = {
}; };
static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev, static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params, u16 mtu) struct mlx5e_params *params,
struct mlx5e_rss_params *rss_params,
u16 mtu)
{ {
u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
MLX5_CQ_PERIOD_MODE_START_FROM_CQE : MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
...@@ -1034,7 +1036,7 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev, ...@@ -1034,7 +1036,7 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
mlx5_query_min_inline(mdev, &params->tx_min_inline_mode); mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
/* RSS */ /* RSS */
mlx5e_build_rss_params(params); mlx5e_build_rss_params(rss_params, params->num_channels);
} }
static void mlx5e_build_rep_netdev(struct net_device *netdev) static void mlx5e_build_rep_netdev(struct net_device *netdev)
...@@ -1087,7 +1089,8 @@ static int mlx5e_init_rep(struct mlx5_core_dev *mdev, ...@@ -1087,7 +1089,8 @@ static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
priv->channels.params.num_channels = priv->channels.params.num_channels =
mlx5e_get_netdev_max_channels(netdev); mlx5e_get_netdev_max_channels(netdev);
mlx5e_build_rep_params(mdev, &priv->channels.params, netdev->mtu); mlx5e_build_rep_params(mdev, &priv->channels.params,
&priv->rss_params, netdev->mtu);
mlx5e_build_rep_netdev(netdev); mlx5e_build_rep_netdev(netdev);
mlx5e_timestamp_init(priv); mlx5e_timestamp_init(priv);
......
...@@ -936,7 +936,7 @@ static const struct counter_desc pport_per_prio_pfc_stats_desc[] = { ...@@ -936,7 +936,7 @@ static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
}; };
static const struct counter_desc pport_pfc_stall_stats_desc[] = { static const struct counter_desc pport_pfc_stall_stats_desc[] = {
{ "tx_pause_storm_warning_events ", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) }, { "tx_pause_storm_warning_events", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) },
{ "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) }, { "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) },
}; };
......
...@@ -316,7 +316,7 @@ static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc) ...@@ -316,7 +316,7 @@ static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
for (i = 0; i < sz; i++) { for (i = 0; i < sz; i++) {
ix = i; ix = i;
if (priv->channels.params.rss_hfunc == ETH_RSS_HASH_XOR) if (priv->rss_params.hfunc == ETH_RSS_HASH_XOR)
ix = mlx5e_bits_invert(i, ilog2(sz)); ix = mlx5e_bits_invert(i, ilog2(sz));
ix = indirection_rqt[ix]; ix = indirection_rqt[ix];
rqn = hp->pair->rqn[ix]; rqn = hp->pair->rqn[ix];
...@@ -360,13 +360,15 @@ static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp) ...@@ -360,13 +360,15 @@ static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
void *tirc; void *tirc;
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
struct mlx5e_tirc_config ttconfig = mlx5e_tirc_get_default_config(tt);
memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in)); memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in));
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
MLX5_SET(tirc, tirc, transport_domain, hp->tdn); MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn); MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn);
mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, false); mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, &ttconfig, tirc, false);
err = mlx5_core_create_tir(hp->func_mdev, in, err = mlx5_core_create_tir(hp->func_mdev, in,
MLX5_ST_SZ_BYTES(create_tir_in), &hp->indir_tirn[tt]); MLX5_ST_SZ_BYTES(create_tir_in), &hp->indir_tirn[tt]);
......
...@@ -87,7 +87,7 @@ int mlx5i_init(struct mlx5_core_dev *mdev, ...@@ -87,7 +87,7 @@ int mlx5i_init(struct mlx5_core_dev *mdev,
mlx5_query_port_max_mtu(mdev, &max_mtu, 1); mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
netdev->mtu = max_mtu; netdev->mtu = max_mtu;
mlx5e_build_nic_params(mdev, &priv->channels.params, mlx5e_build_nic_params(mdev, &priv->rss_params, &priv->channels.params,
mlx5e_get_netdev_max_channels(netdev), mlx5e_get_netdev_max_channels(netdev),
netdev->mtu); netdev->mtu);
mlx5i_build_nic_params(mdev, &priv->channels.params); mlx5i_build_nic_params(mdev, &priv->channels.params);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment