Commit 6415aa50 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-fixes-2017-01-27' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-fixes-2017-01-27

A couple of mlx5 core and ethernet driver fixes.

From Or, A couple of error return values and error handling fixes.
From Hadar, Support TC encapsulation offloads even when the mlx5e uplink
device is stacked  under an upper device.
From Gal, Two patches to fix RSS hash modifications via ethtool.
From Moshe, Added a needed ets capability check.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 051a2e08 d15118af
...@@ -1728,7 +1728,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) ...@@ -1728,7 +1728,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
if (cmd->cmdif_rev > CMD_IF_REV) { if (cmd->cmdif_rev > CMD_IF_REV) {
dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n", dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
CMD_IF_REV, cmd->cmdif_rev); CMD_IF_REV, cmd->cmdif_rev);
err = -ENOTSUPP; err = -EOPNOTSUPP;
goto err_free_page; goto err_free_page;
} }
......
...@@ -791,7 +791,8 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv); ...@@ -791,7 +791,8 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd); int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd);
int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix); int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix);
void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv); void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
enum mlx5e_traffic_types tt);
int mlx5e_open_locked(struct net_device *netdev); int mlx5e_open_locked(struct net_device *netdev);
int mlx5e_close_locked(struct net_device *netdev); int mlx5e_close_locked(struct net_device *netdev);
...@@ -863,12 +864,12 @@ static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {} ...@@ -863,12 +864,12 @@ static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {}
static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv) static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv)
{ {
return -ENOTSUPP; return -EOPNOTSUPP;
} }
static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv) static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv)
{ {
return -ENOTSUPP; return -EOPNOTSUPP;
} }
#else #else
int mlx5e_arfs_create_tables(struct mlx5e_priv *priv); int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
......
...@@ -89,7 +89,7 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev, ...@@ -89,7 +89,7 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
int i; int i;
if (!MLX5_CAP_GEN(priv->mdev, ets)) if (!MLX5_CAP_GEN(priv->mdev, ets))
return -ENOTSUPP; return -EOPNOTSUPP;
ets->ets_cap = mlx5_max_tc(priv->mdev) + 1; ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
for (i = 0; i < ets->ets_cap; i++) { for (i = 0; i < ets->ets_cap; i++) {
...@@ -236,7 +236,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev, ...@@ -236,7 +236,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
int err; int err;
if (!MLX5_CAP_GEN(priv->mdev, ets)) if (!MLX5_CAP_GEN(priv->mdev, ets))
return -ENOTSUPP; return -EOPNOTSUPP;
err = mlx5e_dbcnl_validate_ets(netdev, ets); err = mlx5e_dbcnl_validate_ets(netdev, ets);
if (err) if (err)
...@@ -402,7 +402,7 @@ static u8 mlx5e_dcbnl_setall(struct net_device *netdev) ...@@ -402,7 +402,7 @@ static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_dev *mdev = priv->mdev;
struct ieee_ets ets; struct ieee_ets ets;
struct ieee_pfc pfc; struct ieee_pfc pfc;
int err = -ENOTSUPP; int err = -EOPNOTSUPP;
int i; int i;
if (!MLX5_CAP_GEN(mdev, ets)) if (!MLX5_CAP_GEN(mdev, ets))
...@@ -511,6 +511,11 @@ static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev, ...@@ -511,6 +511,11 @@ static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev,
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_dev *mdev = priv->mdev;
if (!MLX5_CAP_GEN(priv->mdev, ets)) {
netdev_err(netdev, "%s, ets is not supported\n", __func__);
return;
}
if (priority >= CEE_DCBX_MAX_PRIO) { if (priority >= CEE_DCBX_MAX_PRIO) {
netdev_err(netdev, netdev_err(netdev,
"%s, priority is out of range\n", __func__); "%s, priority is out of range\n", __func__);
......
...@@ -595,7 +595,7 @@ static int mlx5e_get_coalesce(struct net_device *netdev, ...@@ -595,7 +595,7 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
if (!MLX5_CAP_GEN(priv->mdev, cq_moderation)) if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
return -ENOTSUPP; return -EOPNOTSUPP;
coal->rx_coalesce_usecs = priv->params.rx_cq_moderation.usec; coal->rx_coalesce_usecs = priv->params.rx_cq_moderation.usec;
coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts; coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts;
...@@ -620,7 +620,7 @@ static int mlx5e_set_coalesce(struct net_device *netdev, ...@@ -620,7 +620,7 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
int i; int i;
if (!MLX5_CAP_GEN(mdev, cq_moderation)) if (!MLX5_CAP_GEN(mdev, cq_moderation))
return -ENOTSUPP; return -EOPNOTSUPP;
mutex_lock(&priv->state_lock); mutex_lock(&priv->state_lock);
...@@ -980,15 +980,18 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, ...@@ -980,15 +980,18 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen) static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
{ {
struct mlx5_core_dev *mdev = priv->mdev;
void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx); void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
int i; struct mlx5_core_dev *mdev = priv->mdev;
int ctxlen = MLX5_ST_SZ_BYTES(tirc);
int tt;
MLX5_SET(modify_tir_in, in, bitmask.hash, 1); MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
mlx5e_build_tir_ctx_hash(tirc, priv);
for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
mlx5_core_modify_tir(mdev, priv->indir_tir[i].tirn, in, inlen); memset(tirc, 0, ctxlen);
mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
}
} }
static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
...@@ -996,6 +999,7 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, ...@@ -996,6 +999,7 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
{ {
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
int inlen = MLX5_ST_SZ_BYTES(modify_tir_in); int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
bool hash_changed = false;
void *in; void *in;
if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && if ((hfunc != ETH_RSS_HASH_NO_CHANGE) &&
...@@ -1017,14 +1021,21 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, ...@@ -1017,14 +1021,21 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0); mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
} }
if (key) if (hfunc != ETH_RSS_HASH_NO_CHANGE &&
hfunc != priv->params.rss_hfunc) {
priv->params.rss_hfunc = hfunc;
hash_changed = true;
}
if (key) {
memcpy(priv->params.toeplitz_hash_key, key, memcpy(priv->params.toeplitz_hash_key, key,
sizeof(priv->params.toeplitz_hash_key)); sizeof(priv->params.toeplitz_hash_key));
hash_changed = hash_changed ||
priv->params.rss_hfunc == ETH_RSS_HASH_TOP;
}
if (hfunc != ETH_RSS_HASH_NO_CHANGE) if (hash_changed)
priv->params.rss_hfunc = hfunc; mlx5e_modify_tirs_hash(priv, in, inlen);
mlx5e_modify_tirs_hash(priv, in, inlen);
mutex_unlock(&priv->state_lock); mutex_unlock(&priv->state_lock);
...@@ -1296,7 +1307,7 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) ...@@ -1296,7 +1307,7 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
u32 mlx5_wol_mode; u32 mlx5_wol_mode;
if (!wol_supported) if (!wol_supported)
return -ENOTSUPP; return -EOPNOTSUPP;
if (wol->wolopts & ~wol_supported) if (wol->wolopts & ~wol_supported)
return -EINVAL; return -EINVAL;
...@@ -1426,7 +1437,7 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable) ...@@ -1426,7 +1437,7 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE && if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE &&
!MLX5_CAP_GEN(mdev, cq_period_start_from_cqe)) !MLX5_CAP_GEN(mdev, cq_period_start_from_cqe))
return -ENOTSUPP; return -EOPNOTSUPP;
if (!rx_mode_changed) if (!rx_mode_changed)
return 0; return 0;
...@@ -1452,7 +1463,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev, ...@@ -1452,7 +1463,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
bool reset; bool reset;
if (!MLX5_CAP_GEN(mdev, cqe_compression)) if (!MLX5_CAP_GEN(mdev, cqe_compression))
return -ENOTSUPP; return -EOPNOTSUPP;
if (enable && priv->tstamp.hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) { if (enable && priv->tstamp.hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) {
netdev_err(netdev, "Can't enable cqe compression while timestamping is enabled.\n"); netdev_err(netdev, "Can't enable cqe compression while timestamping is enabled.\n");
......
...@@ -1089,7 +1089,7 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv) ...@@ -1089,7 +1089,7 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
MLX5_FLOW_NAMESPACE_KERNEL); MLX5_FLOW_NAMESPACE_KERNEL);
if (!priv->fs.ns) if (!priv->fs.ns)
return -EINVAL; return -EOPNOTSUPP;
err = mlx5e_arfs_create_tables(priv); err = mlx5e_arfs_create_tables(priv);
if (err) { if (err) {
......
...@@ -92,7 +92,7 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv, ...@@ -92,7 +92,7 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
ns = mlx5_get_flow_namespace(priv->mdev, ns = mlx5_get_flow_namespace(priv->mdev,
MLX5_FLOW_NAMESPACE_ETHTOOL); MLX5_FLOW_NAMESPACE_ETHTOOL);
if (!ns) if (!ns)
return ERR_PTR(-ENOTSUPP); return ERR_PTR(-EOPNOTSUPP);
table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev, table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
flow_table_properties_nic_receive.log_max_ft_size)), flow_table_properties_nic_receive.log_max_ft_size)),
......
...@@ -2022,8 +2022,23 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv) ...@@ -2022,8 +2022,23 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout); MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout);
} }
void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv) void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
enum mlx5e_traffic_types tt)
{ {
void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP)
#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP |\
MLX5_HASH_FIELD_SEL_L4_SPORT |\
MLX5_HASH_FIELD_SEL_L4_DPORT)
#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP |\
MLX5_HASH_FIELD_SEL_IPSEC_SPI)
MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_SET(tirc, tirc, rx_hash_fn,
mlx5e_rx_hash_fn(priv->params.rss_hfunc)); mlx5e_rx_hash_fn(priv->params.rss_hfunc));
if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) { if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
...@@ -2035,6 +2050,88 @@ void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv) ...@@ -2035,6 +2050,88 @@ void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
memcpy(rss_key, priv->params.toeplitz_hash_key, len); memcpy(rss_key, priv->params.toeplitz_hash_key, len);
} }
switch (tt) {
case MLX5E_TT_IPV4_TCP:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV4);
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_TCP);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_L4PORTS);
break;
case MLX5E_TT_IPV6_TCP:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV6);
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_TCP);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_L4PORTS);
break;
case MLX5E_TT_IPV4_UDP:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV4);
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_UDP);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_L4PORTS);
break;
case MLX5E_TT_IPV6_UDP:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV6);
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_UDP);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_L4PORTS);
break;
case MLX5E_TT_IPV4_IPSEC_AH:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV4);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_IPSEC_SPI);
break;
case MLX5E_TT_IPV6_IPSEC_AH:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV6);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_IPSEC_SPI);
break;
case MLX5E_TT_IPV4_IPSEC_ESP:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV4);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_IPSEC_SPI);
break;
case MLX5E_TT_IPV6_IPSEC_ESP:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV6);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_IPSEC_SPI);
break;
case MLX5E_TT_IPV4:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV4);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP);
break;
case MLX5E_TT_IPV6:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV6);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP);
break;
default:
WARN_ONCE(true, "%s: bad traffic type!\n", __func__);
}
} }
static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv) static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
...@@ -2404,110 +2501,13 @@ void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv) ...@@ -2404,110 +2501,13 @@ void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
enum mlx5e_traffic_types tt) enum mlx5e_traffic_types tt)
{ {
void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn); MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP)
#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP |\
MLX5_HASH_FIELD_SEL_L4_SPORT |\
MLX5_HASH_FIELD_SEL_L4_DPORT)
#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP |\
MLX5_HASH_FIELD_SEL_IPSEC_SPI)
mlx5e_build_tir_ctx_lro(tirc, priv); mlx5e_build_tir_ctx_lro(tirc, priv);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn); MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
mlx5e_build_tir_ctx_hash(tirc, priv); mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
switch (tt) {
case MLX5E_TT_IPV4_TCP:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV4);
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_TCP);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_L4PORTS);
break;
case MLX5E_TT_IPV6_TCP:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV6);
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_TCP);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_L4PORTS);
break;
case MLX5E_TT_IPV4_UDP:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV4);
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_UDP);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_L4PORTS);
break;
case MLX5E_TT_IPV6_UDP:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV6);
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_UDP);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_L4PORTS);
break;
case MLX5E_TT_IPV4_IPSEC_AH:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV4);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_IPSEC_SPI);
break;
case MLX5E_TT_IPV6_IPSEC_AH:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV6);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_IPSEC_SPI);
break;
case MLX5E_TT_IPV4_IPSEC_ESP:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV4);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_IPSEC_SPI);
break;
case MLX5E_TT_IPV6_IPSEC_ESP:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV6);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_IPSEC_SPI);
break;
case MLX5E_TT_IPV4:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV4);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP);
break;
case MLX5E_TT_IPV6:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV6);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP);
break;
default:
WARN_ONCE(true,
"mlx5e_build_indir_tir_ctx: bad traffic type!\n");
}
} }
static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
...@@ -3331,7 +3331,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = { ...@@ -3331,7 +3331,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
{ {
if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
return -ENOTSUPP; return -EOPNOTSUPP;
if (!MLX5_CAP_GEN(mdev, eth_net_offloads) || if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
!MLX5_CAP_GEN(mdev, nic_flow_table) || !MLX5_CAP_GEN(mdev, nic_flow_table) ||
!MLX5_CAP_ETH(mdev, csum_cap) || !MLX5_CAP_ETH(mdev, csum_cap) ||
...@@ -3343,7 +3343,7 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) ...@@ -3343,7 +3343,7 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
< 3) { < 3) {
mlx5_core_warn(mdev, mlx5_core_warn(mdev,
"Not creating net device, some required device capabilities are missing\n"); "Not creating net device, some required device capabilities are missing\n");
return -ENOTSUPP; return -EOPNOTSUPP;
} }
if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable)) if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
mlx5_core_warn(mdev, "Self loop back prevention is not supported\n"); mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
......
...@@ -663,6 +663,7 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, ...@@ -663,6 +663,7 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
__be32 *saddr, __be32 *saddr,
int *out_ttl) int *out_ttl)
{ {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct rtable *rt; struct rtable *rt;
struct neighbour *n = NULL; struct neighbour *n = NULL;
int ttl; int ttl;
...@@ -677,12 +678,11 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, ...@@ -677,12 +678,11 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
#else #else
return -EOPNOTSUPP; return -EOPNOTSUPP;
#endif #endif
/* if the egress device isn't on the same HW e-switch, we use the uplink */
if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) { if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
pr_warn("%s: can't offload, devices not on same HW e-switch\n", __func__); *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
ip_rt_put(rt); else
return -EOPNOTSUPP; *out_dev = rt->dst.dev;
}
ttl = ip4_dst_hoplimit(&rt->dst); ttl = ip4_dst_hoplimit(&rt->dst);
n = dst_neigh_lookup(&rt->dst, &fl4->daddr); n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
...@@ -693,7 +693,6 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, ...@@ -693,7 +693,6 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
*out_n = n; *out_n = n;
*saddr = fl4->saddr; *saddr = fl4->saddr;
*out_ttl = ttl; *out_ttl = ttl;
*out_dev = rt->dst.dev;
return 0; return 0;
} }
......
...@@ -133,7 +133,7 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport, ...@@ -133,7 +133,7 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) || if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
!MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist)) !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
return -ENOTSUPP; return -EOPNOTSUPP;
esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n", esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
vport, vlan, qos, set_flags); vport, vlan, qos, set_flags);
...@@ -353,7 +353,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports) ...@@ -353,7 +353,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
if (!root_ns) { if (!root_ns) {
esw_warn(dev, "Failed to get FDB flow namespace\n"); esw_warn(dev, "Failed to get FDB flow namespace\n");
return -ENOMEM; return -EOPNOTSUPP;
} }
flow_group_in = mlx5_vzalloc(inlen); flow_group_in = mlx5_vzalloc(inlen);
...@@ -962,7 +962,7 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, ...@@ -962,7 +962,7 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS); root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
if (!root_ns) { if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch egress flow namespace\n"); esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
return -EIO; return -EOPNOTSUPP;
} }
flow_group_in = mlx5_vzalloc(inlen); flow_group_in = mlx5_vzalloc(inlen);
...@@ -1079,7 +1079,7 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, ...@@ -1079,7 +1079,7 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS); root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
if (!root_ns) { if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n"); esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
return -EIO; return -EOPNOTSUPP;
} }
flow_group_in = mlx5_vzalloc(inlen); flow_group_in = mlx5_vzalloc(inlen);
...@@ -1630,7 +1630,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) ...@@ -1630,7 +1630,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) || if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) { !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n"); esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
return -ENOTSUPP; return -EOPNOTSUPP;
} }
if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support)) if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
......
...@@ -166,7 +166,7 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr, ...@@ -166,7 +166,7 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
return 0; return 0;
out_notsupp: out_notsupp:
return -ENOTSUPP; return -EOPNOTSUPP;
} }
int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
...@@ -424,6 +424,7 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports) ...@@ -424,6 +424,7 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
if (!root_ns) { if (!root_ns) {
esw_warn(dev, "Failed to get FDB flow namespace\n"); esw_warn(dev, "Failed to get FDB flow namespace\n");
err = -EOPNOTSUPP;
goto ns_err; goto ns_err;
} }
...@@ -535,7 +536,7 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw) ...@@ -535,7 +536,7 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw)
ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS); ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
if (!ns) { if (!ns) {
esw_warn(esw->dev, "Failed to get offloads flow namespace\n"); esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
return -ENOMEM; return -EOPNOTSUPP;
} }
ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0); ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0);
...@@ -655,7 +656,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw) ...@@ -655,7 +656,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw)
esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err); esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
if (err1) if (err1)
esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err); esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
} }
if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) { if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
if (mlx5_eswitch_inline_mode_get(esw, if (mlx5_eswitch_inline_mode_get(esw,
...@@ -674,9 +675,14 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports) ...@@ -674,9 +675,14 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
int vport; int vport;
int err; int err;
/* disable PF RoCE so missed packets don't go through RoCE steering */
mlx5_dev_list_lock();
mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_dev_list_unlock();
err = esw_create_offloads_fdb_table(esw, nvports); err = esw_create_offloads_fdb_table(esw, nvports);
if (err) if (err)
return err; goto create_fdb_err;
err = esw_create_offloads_table(esw); err = esw_create_offloads_table(esw);
if (err) if (err)
...@@ -696,11 +702,6 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports) ...@@ -696,11 +702,6 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
goto err_reps; goto err_reps;
} }
/* disable PF RoCE so missed packets don't go through RoCE steering */
mlx5_dev_list_lock();
mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_dev_list_unlock();
return 0; return 0;
err_reps: err_reps:
...@@ -717,6 +718,13 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports) ...@@ -717,6 +718,13 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
create_ft_err: create_ft_err:
esw_destroy_offloads_fdb_table(esw); esw_destroy_offloads_fdb_table(esw);
create_fdb_err:
/* enable back PF RoCE */
mlx5_dev_list_lock();
mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_dev_list_unlock();
return err; return err;
} }
...@@ -724,11 +732,6 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw) ...@@ -724,11 +732,6 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
{ {
int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs; int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
/* enable back PF RoCE */
mlx5_dev_list_lock();
mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_dev_list_unlock();
mlx5_eswitch_disable_sriov(esw); mlx5_eswitch_disable_sriov(esw);
err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
if (err) { if (err) {
...@@ -738,6 +741,11 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw) ...@@ -738,6 +741,11 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err); esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
} }
/* enable back PF RoCE */
mlx5_dev_list_lock();
mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_dev_list_unlock();
return err; return err;
} }
......
...@@ -322,7 +322,7 @@ int mlx5_cmd_update_fte(struct mlx5_core_dev *dev, ...@@ -322,7 +322,7 @@ int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
flow_table_properties_nic_receive. flow_table_properties_nic_receive.
flow_modify_en); flow_modify_en);
if (!atomic_mod_cap) if (!atomic_mod_cap)
return -ENOTSUPP; return -EOPNOTSUPP;
opmod = 1; opmod = 1;
return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte); return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
......
...@@ -1822,7 +1822,7 @@ static int create_anchor_flow_table(struct mlx5_flow_steering *steering) ...@@ -1822,7 +1822,7 @@ static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
struct mlx5_flow_table *ft; struct mlx5_flow_table *ft;
ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR); ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
if (!ns) if (WARN_ON(!ns))
return -EINVAL; return -EINVAL;
ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL, 0); ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL, 0);
if (IS_ERR(ft)) { if (IS_ERR(ft)) {
......
...@@ -807,7 +807,7 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev) ...@@ -807,7 +807,7 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
return 0; return 0;
} }
return -ENOTSUPP; return -EOPNOTSUPP;
} }
......
...@@ -620,7 +620,7 @@ static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in, ...@@ -620,7 +620,7 @@ static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
u32 out[MLX5_ST_SZ_DW(qtct_reg)]; u32 out[MLX5_ST_SZ_DW(qtct_reg)];
if (!MLX5_CAP_GEN(mdev, ets)) if (!MLX5_CAP_GEN(mdev, ets))
return -ENOTSUPP; return -EOPNOTSUPP;
return mlx5_core_access_reg(mdev, in, inlen, out, sizeof(out), return mlx5_core_access_reg(mdev, in, inlen, out, sizeof(out),
MLX5_REG_QETCR, 0, 1); MLX5_REG_QETCR, 0, 1);
...@@ -632,7 +632,7 @@ static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out, ...@@ -632,7 +632,7 @@ static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out,
u32 in[MLX5_ST_SZ_DW(qtct_reg)]; u32 in[MLX5_ST_SZ_DW(qtct_reg)];
if (!MLX5_CAP_GEN(mdev, ets)) if (!MLX5_CAP_GEN(mdev, ets))
return -ENOTSUPP; return -EOPNOTSUPP;
memset(in, 0, sizeof(in)); memset(in, 0, sizeof(in));
return mlx5_core_access_reg(mdev, in, sizeof(in), out, outlen, return mlx5_core_access_reg(mdev, in, sizeof(in), out, outlen,
......
...@@ -532,7 +532,7 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev, ...@@ -532,7 +532,7 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
if (!MLX5_CAP_GEN(mdev, vport_group_manager)) if (!MLX5_CAP_GEN(mdev, vport_group_manager))
return -EACCES; return -EACCES;
if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify)) if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
return -ENOTSUPP; return -EOPNOTSUPP;
in = mlx5_vzalloc(inlen); in = mlx5_vzalloc(inlen);
if (!in) if (!in)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment