Commit 63de273f authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5e-updates-2018-12-14' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5e-updates-2018-12-14 (VF Lag)

From Aviv Heller,

Subsequent patches introduce VF LAG, which provdies load-balancing and
high-availability capabilities for VFs associated with different
physical ports of the same Connect-X card.

This series consists of the following:
 - mlx5 devcom, driver infrastructure that facilitates operations that involve
   both core devices (physical functions) of the same card, to synchronize and
   communicate between two driver instances of the same card.
 - Infrastructure for TC rule duplication.
 - Changes to LAG logic to enable its use when SR-IOV is enabled
 - PFs in switchdev mode is the only mode currently supported.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents bedf3b33 95824666
...@@ -48,13 +48,21 @@ static const struct mlx5_ib_profile rep_profile = { ...@@ -48,13 +48,21 @@ static const struct mlx5_ib_profile rep_profile = {
static int static int
mlx5_ib_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) mlx5_ib_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
{ {
struct mlx5_ib_dev *ibdev;
ibdev = mlx5_ib_rep_to_dev(rep);
if (!__mlx5_ib_add(ibdev, ibdev->profile))
return -EINVAL;
return 0; return 0;
} }
static void static void
mlx5_ib_nic_rep_unload(struct mlx5_eswitch_rep *rep) mlx5_ib_nic_rep_unload(struct mlx5_eswitch_rep *rep)
{ {
rep->rep_if[REP_IB].priv = NULL; struct mlx5_ib_dev *ibdev;
ibdev = mlx5_ib_rep_to_dev(rep);
__mlx5_ib_remove(ibdev, ibdev->profile, MLX5_IB_STAGE_MAX);
} }
static int static int
...@@ -89,6 +97,7 @@ mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep) ...@@ -89,6 +97,7 @@ mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep)
dev = mlx5_ib_rep_to_dev(rep); dev = mlx5_ib_rep_to_dev(rep);
__mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX); __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
rep->rep_if[REP_IB].priv = NULL; rep->rep_if[REP_IB].priv = NULL;
ib_dealloc_device(&dev->ib_dev);
} }
static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep) static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep)
......
...@@ -445,7 +445,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num, ...@@ -445,7 +445,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
if (!ndev) if (!ndev)
goto out; goto out;
if (mlx5_lag_is_active(dev->mdev)) { if (dev->lag_active) {
rcu_read_lock(); rcu_read_lock();
upper = netdev_master_upper_dev_get_rcu(ndev); upper = netdev_master_upper_dev_get_rcu(ndev);
if (upper) { if (upper) {
...@@ -1848,7 +1848,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, ...@@ -1848,7 +1848,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
context->lib_caps = req.lib_caps; context->lib_caps = req.lib_caps;
print_lib_caps(dev, context->lib_caps); print_lib_caps(dev, context->lib_caps);
if (mlx5_lag_is_active(dev->mdev)) { if (dev->lag_active) {
u8 port = mlx5_core_native_port_num(dev->mdev); u8 port = mlx5_core_native_port_num(dev->mdev);
atomic_set(&context->tx_port_affinity, atomic_set(&context->tx_port_affinity,
...@@ -4841,7 +4841,7 @@ static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev) ...@@ -4841,7 +4841,7 @@ static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
struct mlx5_flow_table *ft; struct mlx5_flow_table *ft;
int err; int err;
if (!ns || !mlx5_lag_is_active(mdev)) if (!ns || !mlx5_lag_is_roce(mdev))
return 0; return 0;
err = mlx5_cmd_create_vport_lag(mdev); err = mlx5_cmd_create_vport_lag(mdev);
...@@ -4855,6 +4855,7 @@ static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev) ...@@ -4855,6 +4855,7 @@ static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
} }
dev->flow_db->lag_demux_ft = ft; dev->flow_db->lag_demux_ft = ft;
dev->lag_active = true;
return 0; return 0;
err_destroy_vport_lag: err_destroy_vport_lag:
...@@ -4866,7 +4867,9 @@ static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev) ...@@ -4866,7 +4867,9 @@ static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
{ {
struct mlx5_core_dev *mdev = dev->mdev; struct mlx5_core_dev *mdev = dev->mdev;
if (dev->flow_db->lag_demux_ft) { if (dev->lag_active) {
dev->lag_active = false;
mlx5_destroy_flow_table(dev->flow_db->lag_demux_ft); mlx5_destroy_flow_table(dev->flow_db->lag_demux_ft);
dev->flow_db->lag_demux_ft = NULL; dev->flow_db->lag_demux_ft = NULL;
...@@ -6173,7 +6176,7 @@ int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev) ...@@ -6173,7 +6176,7 @@ int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
const char *name; const char *name;
rdma_set_device_sysfs_group(&dev->ib_dev, &mlx5_attr_group); rdma_set_device_sysfs_group(&dev->ib_dev, &mlx5_attr_group);
if (!mlx5_lag_is_active(dev->mdev)) if (!mlx5_lag_is_roce(dev->mdev))
name = "mlx5_%d"; name = "mlx5_%d";
else else
name = "mlx5_bond_%d"; name = "mlx5_bond_%d";
...@@ -6207,18 +6210,6 @@ static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev) ...@@ -6207,18 +6210,6 @@ static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev)
cancel_delay_drop(dev); cancel_delay_drop(dev);
} }
static int mlx5_ib_stage_rep_reg_init(struct mlx5_ib_dev *dev)
{
mlx5_ib_register_vport_reps(dev);
return 0;
}
static void mlx5_ib_stage_rep_reg_cleanup(struct mlx5_ib_dev *dev)
{
mlx5_ib_unregister_vport_reps(dev);
}
static int mlx5_ib_stage_dev_notifier_init(struct mlx5_ib_dev *dev) static int mlx5_ib_stage_dev_notifier_init(struct mlx5_ib_dev *dev)
{ {
dev->mdev_events.notifier_call = mlx5_ib_event; dev->mdev_events.notifier_call = mlx5_ib_event;
...@@ -6257,8 +6248,6 @@ void __mlx5_ib_remove(struct mlx5_ib_dev *dev, ...@@ -6257,8 +6248,6 @@ void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
if (profile->stage[stage].cleanup) if (profile->stage[stage].cleanup)
profile->stage[stage].cleanup(dev); profile->stage[stage].cleanup(dev);
} }
ib_dealloc_device((struct ib_device *)dev);
} }
void *__mlx5_ib_add(struct mlx5_ib_dev *dev, void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
...@@ -6392,9 +6381,6 @@ static const struct mlx5_ib_profile nic_rep_profile = { ...@@ -6392,9 +6381,6 @@ static const struct mlx5_ib_profile nic_rep_profile = {
STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR, STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
mlx5_ib_stage_post_ib_reg_umr_init, mlx5_ib_stage_post_ib_reg_umr_init,
NULL), NULL),
STAGE_CREATE(MLX5_IB_STAGE_REP_REG,
mlx5_ib_stage_rep_reg_init,
mlx5_ib_stage_rep_reg_cleanup),
}; };
static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev) static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev)
...@@ -6462,8 +6448,9 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) ...@@ -6462,8 +6448,9 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
if (MLX5_ESWITCH_MANAGER(mdev) && if (MLX5_ESWITCH_MANAGER(mdev) &&
mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) { mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0); dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0);
dev->profile = &nic_rep_profile;
return __mlx5_ib_add(dev, &nic_rep_profile); mlx5_ib_register_vport_reps(dev);
return dev;
} }
return __mlx5_ib_add(dev, &pf_profile); return __mlx5_ib_add(dev, &pf_profile);
...@@ -6485,7 +6472,12 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context) ...@@ -6485,7 +6472,12 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
} }
dev = context; dev = context;
if (dev->profile == &nic_rep_profile)
mlx5_ib_unregister_vport_reps(dev);
else
__mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX); __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
ib_dealloc_device((struct ib_device *)dev);
} }
static struct mlx5_interface mlx5_ib_interface = { static struct mlx5_interface mlx5_ib_interface = {
......
...@@ -790,7 +790,6 @@ enum mlx5_ib_stages { ...@@ -790,7 +790,6 @@ enum mlx5_ib_stages {
MLX5_IB_STAGE_POST_IB_REG_UMR, MLX5_IB_STAGE_POST_IB_REG_UMR,
MLX5_IB_STAGE_DELAY_DROP, MLX5_IB_STAGE_DELAY_DROP,
MLX5_IB_STAGE_CLASS_ATTR, MLX5_IB_STAGE_CLASS_ATTR,
MLX5_IB_STAGE_REP_REG,
MLX5_IB_STAGE_MAX, MLX5_IB_STAGE_MAX,
}; };
...@@ -937,6 +936,7 @@ struct mlx5_ib_dev { ...@@ -937,6 +936,7 @@ struct mlx5_ib_dev {
struct mlx5_ib_delay_drop delay_drop; struct mlx5_ib_delay_drop delay_drop;
const struct mlx5_ib_profile *profile; const struct mlx5_ib_profile *profile;
struct mlx5_eswitch_rep *rep; struct mlx5_eswitch_rep *rep;
int lag_active;
struct mlx5_ib_lb_state lb; struct mlx5_ib_lb_state lb;
u8 umr_fence; u8 umr_fence;
......
...@@ -3258,7 +3258,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, ...@@ -3258,7 +3258,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
(ibqp->qp_type == IB_QPT_RAW_PACKET) || (ibqp->qp_type == IB_QPT_RAW_PACKET) ||
(ibqp->qp_type == IB_QPT_XRC_INI) || (ibqp->qp_type == IB_QPT_XRC_INI) ||
(ibqp->qp_type == IB_QPT_XRC_TGT)) { (ibqp->qp_type == IB_QPT_XRC_TGT)) {
if (mlx5_lag_is_active(dev->mdev)) { if (dev->lag_active) {
u8 p = mlx5_core_native_port_num(dev->mdev); u8 p = mlx5_core_native_port_num(dev->mdev);
tx_affinity = get_tx_affinity(dev, pd, base, p); tx_affinity = get_tx_affinity(dev, pd, base, p);
context->flags |= cpu_to_be32(tx_affinity << 24); context->flags |= cpu_to_be32(tx_affinity << 24);
......
...@@ -15,7 +15,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ ...@@ -15,7 +15,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o alloc.o qp.o port.o mr.o pd.o \ health.o mcg.o cq.o alloc.o qp.o port.o mr.o pd.o \
mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \ mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \
fs_counters.o rl.o lag.o dev.o events.o wq.o lib/gid.o \ fs_counters.o rl.o lag.o dev.o events.o wq.o lib/gid.o \
diag/fs_tracepoint.o diag/fw_tracer.o lib/devcom.o diag/fs_tracepoint.o diag/fw_tracer.o
# #
# Netdev basic # Netdev basic
......
...@@ -14,7 +14,8 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, ...@@ -14,7 +14,8 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
u8 *out_ttl) u8 *out_ttl)
{ {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *uplink_rpriv; struct net_device *uplink_dev, *uplink_upper;
bool dst_is_lag_dev;
struct rtable *rt; struct rtable *rt;
struct neighbour *n = NULL; struct neighbour *n = NULL;
...@@ -28,10 +29,20 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, ...@@ -28,10 +29,20 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
#else #else
return -EOPNOTSUPP; return -EOPNOTSUPP;
#endif #endif
uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
/* if the egress device isn't on the same HW e-switch, we use the uplink */ uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) uplink_upper = netdev_master_upper_dev_get(uplink_dev);
*out_dev = uplink_rpriv->netdev; dst_is_lag_dev = (uplink_upper &&
netif_is_lag_master(uplink_upper) &&
rt->dst.dev == uplink_upper &&
mlx5_lag_is_sriov(priv->mdev));
/* if the egress device isn't on the same HW e-switch or
* it's a LAG device, use the uplink
*/
if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev) ||
dst_is_lag_dev)
*out_dev = uplink_dev;
else else
*out_dev = rt->dst.dev; *out_dev = rt->dst.dev;
...@@ -65,8 +76,9 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, ...@@ -65,8 +76,9 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
struct dst_entry *dst; struct dst_entry *dst;
#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
struct mlx5e_rep_priv *uplink_rpriv;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct net_device *uplink_dev, *uplink_upper;
bool dst_is_lag_dev;
int ret; int ret;
ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst, ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
...@@ -77,10 +89,19 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, ...@@ -77,10 +89,19 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
if (!(*out_ttl)) if (!(*out_ttl))
*out_ttl = ip6_dst_hoplimit(dst); *out_ttl = ip6_dst_hoplimit(dst);
uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
/* if the egress device isn't on the same HW e-switch, we use the uplink */ uplink_upper = netdev_master_upper_dev_get(uplink_dev);
if (!switchdev_port_same_parent_id(priv->netdev, dst->dev)) dst_is_lag_dev = (uplink_upper &&
*out_dev = uplink_rpriv->netdev; netif_is_lag_master(uplink_upper) &&
dst->dev == uplink_upper &&
mlx5_lag_is_sriov(priv->mdev));
/* if the egress device isn't on the same HW e-switch or
* it's a LAG device, use the uplink
*/
if (!switchdev_port_same_parent_id(priv->netdev, dst->dev) ||
dst_is_lag_dev)
*out_dev = uplink_dev;
else else
*out_dev = dst->dev; *out_dev = dst->dev;
#else #else
......
...@@ -297,17 +297,31 @@ static const struct ethtool_ops mlx5e_rep_ethtool_ops = { ...@@ -297,17 +297,31 @@ static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr) int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
{ {
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct net_device *uplink_upper = NULL;
struct mlx5e_priv *uplink_priv = NULL;
struct net_device *uplink_dev;
if (esw->mode == SRIOV_NONE) if (esw->mode == SRIOV_NONE)
return -EOPNOTSUPP; return -EOPNOTSUPP;
uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
if (uplink_dev) {
uplink_upper = netdev_master_upper_dev_get(uplink_dev);
uplink_priv = netdev_priv(uplink_dev);
}
switch (attr->id) { switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
attr->u.ppid.id_len = ETH_ALEN; attr->u.ppid.id_len = ETH_ALEN;
if (uplink_upper && mlx5_lag_is_sriov(uplink_priv->mdev)) {
ether_addr_copy(attr->u.ppid.id, uplink_upper->dev_addr);
} else {
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
ether_addr_copy(attr->u.ppid.id, rep->hw_id); ether_addr_copy(attr->u.ppid.id, rep->hw_id);
}
break; break;
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
......
...@@ -52,6 +52,7 @@ ...@@ -52,6 +52,7 @@
#include "fs_core.h" #include "fs_core.h"
#include "en/port.h" #include "en/port.h"
#include "en/tc_tun.h" #include "en/tc_tun.h"
#include "lib/devcom.h"
struct mlx5_nic_flow_attr { struct mlx5_nic_flow_attr {
u32 action; u32 action;
...@@ -74,6 +75,7 @@ enum { ...@@ -74,6 +75,7 @@ enum {
MLX5E_TC_FLOW_HAIRPIN = BIT(MLX5E_TC_FLOW_BASE + 3), MLX5E_TC_FLOW_HAIRPIN = BIT(MLX5E_TC_FLOW_BASE + 3),
MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 4), MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 4),
MLX5E_TC_FLOW_SLOW = BIT(MLX5E_TC_FLOW_BASE + 5), MLX5E_TC_FLOW_SLOW = BIT(MLX5E_TC_FLOW_BASE + 5),
MLX5E_TC_FLOW_DUP = BIT(MLX5E_TC_FLOW_BASE + 6),
}; };
#define MLX5E_TC_MAX_SPLITS 1 #define MLX5E_TC_MAX_SPLITS 1
...@@ -111,8 +113,10 @@ struct mlx5e_tc_flow { ...@@ -111,8 +113,10 @@ struct mlx5e_tc_flow {
* destinations. * destinations.
*/ */
struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS]; struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
struct mlx5e_tc_flow *peer_flow;
struct list_head mod_hdr; /* flows sharing the same mod hdr ID */ struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
struct list_head hairpin; /* flows sharing the same hairpin */ struct list_head hairpin; /* flows sharing the same hairpin */
struct list_head peer; /* flows with peer flow */
union { union {
struct mlx5_esw_flow_attr esw_attr[0]; struct mlx5_esw_flow_attr esw_attr[0];
struct mlx5_nic_flow_attr nic_attr[0]; struct mlx5_nic_flow_attr nic_attr[0];
...@@ -990,7 +994,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, ...@@ -990,7 +994,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
} }
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
counter = mlx5_fc_create(esw->dev, true); counter = mlx5_fc_create(attr->counter_dev, true);
if (IS_ERR(counter)) { if (IS_ERR(counter)) {
err = PTR_ERR(counter); err = PTR_ERR(counter);
goto err_create_counter; goto err_create_counter;
...@@ -1019,7 +1023,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, ...@@ -1019,7 +1023,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
return 0; return 0;
err_add_rule: err_add_rule:
mlx5_fc_destroy(esw->dev, counter); mlx5_fc_destroy(attr->counter_dev, counter);
err_create_counter: err_create_counter:
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow); mlx5e_detach_mod_hdr(priv, flow);
...@@ -1060,7 +1064,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, ...@@ -1060,7 +1064,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
mlx5e_detach_mod_hdr(priv, flow); mlx5e_detach_mod_hdr(priv, flow);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
mlx5_fc_destroy(esw->dev, attr->counter); mlx5_fc_destroy(attr->counter_dev, attr->counter);
} }
void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
...@@ -1249,13 +1253,48 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv, ...@@ -1249,13 +1253,48 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv,
} }
} }
static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
if (!(flow->flags & MLX5E_TC_FLOW_ESWITCH) ||
!(flow->flags & MLX5E_TC_FLOW_DUP))
return;
mutex_lock(&esw->offloads.peer_mutex);
list_del(&flow->peer);
mutex_unlock(&esw->offloads.peer_mutex);
flow->flags &= ~MLX5E_TC_FLOW_DUP;
mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
kvfree(flow->peer_flow);
flow->peer_flow = NULL;
}
static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
{
struct mlx5_core_dev *dev = flow->priv->mdev;
struct mlx5_devcom *devcom = dev->priv.devcom;
struct mlx5_eswitch *peer_esw;
peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
if (!peer_esw)
return;
__mlx5e_tc_del_fdb_peer_flow(flow);
mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
}
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow) struct mlx5e_tc_flow *flow)
{ {
if (flow->flags & MLX5E_TC_FLOW_ESWITCH) if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
mlx5e_tc_del_fdb_peer_flow(flow);
mlx5e_tc_del_fdb_flow(priv, flow); mlx5e_tc_del_fdb_flow(priv, flow);
else } else {
mlx5e_tc_del_nic_flow(priv, flow); mlx5e_tc_del_nic_flow(priv, flow);
}
} }
...@@ -2535,6 +2574,15 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, ...@@ -2535,6 +2574,15 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (switchdev_port_same_parent_id(priv->netdev, if (switchdev_port_same_parent_id(priv->netdev,
out_dev) || out_dev) ||
is_merged_eswitch_dev(priv, out_dev)) { is_merged_eswitch_dev(priv, out_dev)) {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
struct net_device *uplink_upper = netdev_master_upper_dev_get(uplink_dev);
if (uplink_upper &&
netif_is_lag_master(uplink_upper) &&
uplink_upper == out_dev)
out_dev = uplink_dev;
out_priv = netdev_priv(out_dev); out_priv = netdev_priv(out_dev);
rpriv = out_priv->ppriv; rpriv = out_priv->ppriv;
attr->dests[attr->out_count].rep = rpriv->rep; attr->dests[attr->out_count].rep = rpriv->rep;
...@@ -2660,6 +2708,20 @@ static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv) ...@@ -2660,6 +2708,20 @@ static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv)
return &priv->fs.tc.ht; return &priv->fs.tc.ht;
} }
static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
{
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
bool is_rep_ingress = attr->in_rep->vport != FDB_UPLINK_VPORT &&
flow->flags & MLX5E_TC_FLOW_INGRESS;
bool act_is_encap = !!(attr->action &
MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
bool esw_paired = mlx5_devcom_is_paired(attr->in_mdev->priv.devcom,
MLX5_DEVCOM_ESW_OFFLOADS);
return esw_paired && mlx5_lag_is_sriov(attr->in_mdev) &&
(is_rep_ingress || act_is_encap);
}
static int static int
mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size, mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
struct tc_cls_flower_offload *f, u16 flow_flags, struct tc_cls_flower_offload *f, u16 flow_flags,
...@@ -2693,13 +2755,16 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size, ...@@ -2693,13 +2755,16 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
} }
static int static int
mlx5e_add_fdb_flow(struct mlx5e_priv *priv, __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f, struct tc_cls_flower_offload *f,
u16 flow_flags, u16 flow_flags,
struct net_device *filter_dev, struct net_device *filter_dev,
struct mlx5_eswitch_rep *in_rep,
struct mlx5_core_dev *in_mdev,
struct mlx5e_tc_flow **__flow) struct mlx5e_tc_flow **__flow)
{ {
struct netlink_ext_ack *extack = f->common.extack; struct netlink_ext_ack *extack = f->common.extack;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_tc_flow_parse_attr *parse_attr; struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5e_tc_flow *flow; struct mlx5e_tc_flow *flow;
int attr_size, err; int attr_size, err;
...@@ -2723,6 +2788,15 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv, ...@@ -2723,6 +2788,15 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
if (err) if (err)
goto err_free; goto err_free;
flow->esw_attr->in_rep = in_rep;
flow->esw_attr->in_mdev = in_mdev;
if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) ==
MLX5_COUNTER_SOURCE_ESWITCH)
flow->esw_attr->counter_dev = in_mdev;
else
flow->esw_attr->counter_dev = priv->mdev;
err = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow, extack); err = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow, extack);
if (err) if (err)
goto err_free; goto err_free;
...@@ -2738,6 +2812,87 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv, ...@@ -2738,6 +2812,87 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
return err; return err;
} }
static int mlx5e_tc_add_fdb_peer_flow(struct tc_cls_flower_offload *f,
struct mlx5e_tc_flow *flow)
{
struct mlx5e_priv *priv = flow->priv, *peer_priv;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw;
struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5e_rep_priv *peer_urpriv;
struct mlx5e_tc_flow *peer_flow;
struct mlx5_core_dev *in_mdev;
int err = 0;
peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
if (!peer_esw)
return -ENODEV;
peer_urpriv = mlx5_eswitch_get_uplink_priv(peer_esw, REP_ETH);
peer_priv = netdev_priv(peer_urpriv->netdev);
/* in_mdev is assigned of which the packet originated from.
* So packets redirected to uplink use the same mdev of the
* original flow and packets redirected from uplink use the
* peer mdev.
*/
if (flow->esw_attr->in_rep->vport == FDB_UPLINK_VPORT)
in_mdev = peer_priv->mdev;
else
in_mdev = priv->mdev;
parse_attr = flow->esw_attr->parse_attr;
err = __mlx5e_add_fdb_flow(peer_priv, f, flow->flags,
parse_attr->filter_dev,
flow->esw_attr->in_rep, in_mdev, &peer_flow);
if (err)
goto out;
flow->peer_flow = peer_flow;
flow->flags |= MLX5E_TC_FLOW_DUP;
mutex_lock(&esw->offloads.peer_mutex);
list_add_tail(&flow->peer, &esw->offloads.peer_flows);
mutex_unlock(&esw->offloads.peer_mutex);
out:
mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
return err;
}
static int
mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f,
u16 flow_flags,
struct net_device *filter_dev,
struct mlx5e_tc_flow **__flow)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *in_rep = rpriv->rep;
struct mlx5_core_dev *in_mdev = priv->mdev;
struct mlx5e_tc_flow *flow;
int err;
err = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
in_mdev, &flow);
if (err)
goto out;
if (is_peer_flow_needed(flow)) {
err = mlx5e_tc_add_fdb_peer_flow(f, flow);
if (err) {
mlx5e_tc_del_fdb_flow(priv, flow);
goto out;
}
}
*__flow = flow;
return 0;
out:
return err;
}
static int static int
mlx5e_add_nic_flow(struct mlx5e_priv *priv, mlx5e_add_nic_flow(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f, struct tc_cls_flower_offload *f,
...@@ -2882,7 +3037,9 @@ int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv, ...@@ -2882,7 +3037,9 @@ int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f, int flags) struct tc_cls_flower_offload *f, int flags)
{ {
struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
struct rhashtable *tc_ht = get_tc_ht(priv); struct rhashtable *tc_ht = get_tc_ht(priv);
struct mlx5_eswitch *peer_esw;
struct mlx5e_tc_flow *flow; struct mlx5e_tc_flow *flow;
struct mlx5_fc *counter; struct mlx5_fc *counter;
u64 bytes; u64 bytes;
...@@ -2902,6 +3059,27 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, ...@@ -2902,6 +3059,27 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
if (!peer_esw)
goto out;
if ((flow->flags & MLX5E_TC_FLOW_DUP) &&
(flow->peer_flow->flags & MLX5E_TC_FLOW_OFFLOADED)) {
u64 bytes2;
u64 packets2;
u64 lastuse2;
counter = mlx5e_tc_get_counter(flow->peer_flow);
mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2);
bytes += bytes2;
packets += packets2;
lastuse = max_t(u64, lastuse, lastuse2);
}
mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
out:
tcf_exts_stats_update(f->exts, bytes, packets, lastuse); tcf_exts_stats_update(f->exts, bytes, packets, lastuse);
return 0; return 0;
...@@ -3014,3 +3192,11 @@ int mlx5e_tc_num_filters(struct mlx5e_priv *priv) ...@@ -3014,3 +3192,11 @@ int mlx5e_tc_num_filters(struct mlx5e_priv *priv)
return atomic_read(&tc_ht->nelems); return atomic_read(&tc_ht->nelems);
} }
void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
{
struct mlx5e_tc_flow *flow, *tmp;
list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer)
__mlx5e_tc_del_fdb_peer_flow(flow);
}
...@@ -39,7 +39,6 @@ ...@@ -39,7 +39,6 @@
#include "lib/eq.h" #include "lib/eq.h"
#include "eswitch.h" #include "eswitch.h"
#include "fs_core.h" #include "fs_core.h"
#include "lib/eq.h"
#define UPLINK_VPORT 0xFFFF #define UPLINK_VPORT 0xFFFF
...@@ -1633,6 +1632,8 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) ...@@ -1633,6 +1632,8 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode); esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode);
esw->mode = mode; esw->mode = mode;
mlx5_lag_update(esw->dev);
if (mode == SRIOV_LEGACY) { if (mode == SRIOV_LEGACY) {
err = esw_create_legacy_fdb_table(esw); err = esw_create_legacy_fdb_table(esw);
} else { } else {
...@@ -1709,6 +1710,8 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) ...@@ -1709,6 +1710,8 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
old_mode = esw->mode; old_mode = esw->mode;
esw->mode = SRIOV_NONE; esw->mode = SRIOV_NONE;
mlx5_lag_update(esw->dev);
if (old_mode == SRIOV_OFFLOADS) if (old_mode == SRIOV_OFFLOADS)
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
} }
...@@ -2226,3 +2229,14 @@ u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw) ...@@ -2226,3 +2229,14 @@ u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
return ESW_ALLOWED(esw) ? esw->mode : SRIOV_NONE; return ESW_ALLOWED(esw) ? esw->mode : SRIOV_NONE;
} }
EXPORT_SYMBOL_GPL(mlx5_eswitch_mode); EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1)
{
if ((dev0->priv.eswitch->mode == SRIOV_NONE &&
dev1->priv.eswitch->mode == SRIOV_NONE) ||
(dev0->priv.eswitch->mode == SRIOV_OFFLOADS &&
dev1->priv.eswitch->mode == SRIOV_OFFLOADS))
return true;
return false;
}
...@@ -143,6 +143,8 @@ struct mlx5_eswitch_fdb { ...@@ -143,6 +143,8 @@ struct mlx5_eswitch_fdb {
struct offloads_fdb { struct offloads_fdb {
struct mlx5_flow_table *slow_fdb; struct mlx5_flow_table *slow_fdb;
struct mlx5_flow_group *send_to_vport_grp; struct mlx5_flow_group *send_to_vport_grp;
struct mlx5_flow_group *peer_miss_grp;
struct mlx5_flow_handle **peer_miss_rules;
struct mlx5_flow_group *miss_grp; struct mlx5_flow_group *miss_grp;
struct mlx5_flow_handle *miss_rule_uni; struct mlx5_flow_handle *miss_rule_uni;
struct mlx5_flow_handle *miss_rule_multi; struct mlx5_flow_handle *miss_rule_multi;
...@@ -165,6 +167,8 @@ struct mlx5_esw_offload { ...@@ -165,6 +167,8 @@ struct mlx5_esw_offload {
struct mlx5_flow_table *ft_offloads; struct mlx5_flow_table *ft_offloads;
struct mlx5_flow_group *vport_rx_group; struct mlx5_flow_group *vport_rx_group;
struct mlx5_eswitch_rep *vport_reps; struct mlx5_eswitch_rep *vport_reps;
struct list_head peer_flows;
struct mutex peer_mutex;
DECLARE_HASHTABLE(encap_tbl, 8); DECLARE_HASHTABLE(encap_tbl, 8);
DECLARE_HASHTABLE(mod_hdr_tbl, 8); DECLARE_HASHTABLE(mod_hdr_tbl, 8);
u8 inline_mode; u8 inline_mode;
...@@ -289,6 +293,7 @@ enum { ...@@ -289,6 +293,7 @@ enum {
struct mlx5_esw_flow_attr { struct mlx5_esw_flow_attr {
struct mlx5_eswitch_rep *in_rep; struct mlx5_eswitch_rep *in_rep;
struct mlx5_core_dev *in_mdev; struct mlx5_core_dev *in_mdev;
struct mlx5_core_dev *counter_dev;
int split_count; int split_count;
int out_count; int out_count;
...@@ -346,6 +351,9 @@ static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev ...@@ -346,6 +351,9 @@ static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan_2); MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan_2);
} }
bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0,
struct mlx5_core_dev *dev1);
#define MLX5_DEBUG_ESWITCH_MASK BIT(3) #define MLX5_DEBUG_ESWITCH_MASK BIT(3)
#define esw_info(dev, format, ...) \ #define esw_info(dev, format, ...) \
...@@ -362,6 +370,7 @@ static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } ...@@ -362,6 +370,7 @@ static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {} static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
static inline int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) { return 0; } static inline int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) { return 0; }
static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) {} static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) {}
static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; }
#define FDB_MAX_CHAIN 1 #define FDB_MAX_CHAIN 1
#define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1) #define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1)
......
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#include "eswitch.h" #include "eswitch.h"
#include "en.h" #include "en.h"
#include "fs_core.h" #include "fs_core.h"
#include "lib/devcom.h"
enum { enum {
FDB_FAST_PATH = 0, FDB_FAST_PATH = 0,
...@@ -541,6 +542,98 @@ void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule) ...@@ -541,6 +542,98 @@ void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
mlx5_del_flow_rules(rule); mlx5_del_flow_rules(rule);
} }
static void peer_miss_rules_setup(struct mlx5_core_dev *peer_dev,
struct mlx5_flow_spec *spec,
struct mlx5_flow_destination *dest)
{
void *misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters);
MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
MLX5_CAP_GEN(peer_dev, vhca_id));
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
misc_parameters);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
MLX5_SET_TO_ONES(fte_match_set_misc, misc,
source_eswitch_owner_vhca_id);
dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest->vport.num = 0;
dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
}
static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
struct mlx5_core_dev *peer_dev)
{
struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_handle **flows;
struct mlx5_flow_handle *flow;
struct mlx5_flow_spec *spec;
/* total vports is the same for both e-switches */
int nvports = esw->total_vports;
void *misc;
int err, i;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
peer_miss_rules_setup(peer_dev, spec, &dest);
flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL);
if (!flows) {
err = -ENOMEM;
goto alloc_flows_err;
}
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters);
for (i = 1; i < nvports; i++) {
MLX5_SET(fte_match_set_misc, misc, source_port, i);
flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
spec, &flow_act, &dest, 1);
if (IS_ERR(flow)) {
err = PTR_ERR(flow);
esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
goto add_flow_err;
}
flows[i] = flow;
}
esw->fdb_table.offloads.peer_miss_rules = flows;
kvfree(spec);
return 0;
add_flow_err:
for (i--; i > 0; i--)
mlx5_del_flow_rules(flows[i]);
kvfree(flows);
alloc_flows_err:
kvfree(spec);
return err;
}
static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
{
struct mlx5_flow_handle **flows;
int i;
flows = esw->fdb_table.offloads.peer_miss_rules;
for (i = 1; i < esw->total_vports; i++)
mlx5_del_flow_rules(flows[i]);
kvfree(flows);
}
static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
{ {
struct mlx5_flow_act flow_act = {0}; struct mlx5_flow_act flow_act = {0};
...@@ -811,7 +904,8 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) ...@@ -811,7 +904,8 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
esw->fdb_table.offloads.fdb_left[i] = esw->fdb_table.offloads.fdb_left[i] =
ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0; ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0;
table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + 2; table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + 2 +
esw->total_vports;
/* create the slow path fdb with encap set, so further table instances /* create the slow path fdb with encap set, so further table instances
* can be created at run time while VFs are probed if the FW allows that. * can be created at run time while VFs are probed if the FW allows that.
...@@ -866,6 +960,34 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) ...@@ -866,6 +960,34 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
} }
esw->fdb_table.offloads.send_to_vport_grp = g; esw->fdb_table.offloads.send_to_vport_grp = g;
/* create peer esw miss group */
memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
MLX5_MATCH_MISC_PARAMETERS);
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
match_criteria);
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
misc_parameters.source_port);
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
misc_parameters.source_eswitch_owner_vhca_id);
MLX5_SET(create_flow_group_in, flow_group_in,
source_eswitch_owner_vhca_id_valid, 1);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
ix + esw->total_vports - 1);
ix += esw->total_vports;
g = mlx5_create_flow_group(fdb, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
goto peer_miss_err;
}
esw->fdb_table.offloads.peer_miss_grp = g;
/* create miss group */ /* create miss group */
memset(flow_group_in, 0, inlen); memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
...@@ -898,6 +1020,8 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) ...@@ -898,6 +1020,8 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
miss_rule_err: miss_rule_err:
mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
miss_err: miss_err:
mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
peer_miss_err:
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
send_vport_err: send_vport_err:
esw_destroy_offloads_fast_fdb_tables(esw); esw_destroy_offloads_fast_fdb_tables(esw);
...@@ -917,6 +1041,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) ...@@ -917,6 +1041,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi); mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni); mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb); mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
...@@ -1173,6 +1298,105 @@ static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports) ...@@ -1173,6 +1298,105 @@ static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports)
return err; return err;
} }
#define ESW_OFFLOADS_DEVCOM_PAIR (0)
#define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
struct mlx5_eswitch *peer_esw)
{
int err;
err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
if (err)
return err;
return 0;
}
void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
{
mlx5e_tc_clean_fdb_peer_flows(esw);
esw_del_fdb_peer_miss_rules(esw);
}
static int mlx5_esw_offloads_devcom_event(int event,
void *my_data,
void *event_data)
{
struct mlx5_eswitch *esw = my_data;
struct mlx5_eswitch *peer_esw = event_data;
struct mlx5_devcom *devcom = esw->dev->priv.devcom;
int err;
switch (event) {
case ESW_OFFLOADS_DEVCOM_PAIR:
err = mlx5_esw_offloads_pair(esw, peer_esw);
if (err)
goto err_out;
err = mlx5_esw_offloads_pair(peer_esw, esw);
if (err)
goto err_pair;
mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
break;
case ESW_OFFLOADS_DEVCOM_UNPAIR:
if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
break;
mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
mlx5_esw_offloads_unpair(peer_esw);
mlx5_esw_offloads_unpair(esw);
break;
}
return 0;
err_pair:
mlx5_esw_offloads_unpair(esw);
err_out:
mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
event, err);
return err;
}
static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
{
struct mlx5_devcom *devcom = esw->dev->priv.devcom;
INIT_LIST_HEAD(&esw->offloads.peer_flows);
mutex_init(&esw->offloads.peer_mutex);
if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
return;
mlx5_devcom_register_component(devcom,
MLX5_DEVCOM_ESW_OFFLOADS,
mlx5_esw_offloads_devcom_event,
esw);
mlx5_devcom_send_event(devcom,
MLX5_DEVCOM_ESW_OFFLOADS,
ESW_OFFLOADS_DEVCOM_PAIR, esw);
}
static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
{
struct mlx5_devcom *devcom = esw->dev->priv.devcom;
if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
return;
mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
}
int esw_offloads_init(struct mlx5_eswitch *esw, int nvports) int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
{ {
int err; int err;
...@@ -1195,6 +1419,7 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports) ...@@ -1195,6 +1419,7 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
if (err) if (err)
goto err_reps; goto err_reps;
esw_offloads_devcom_init(esw);
return 0; return 0;
err_reps: err_reps:
...@@ -1233,6 +1458,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw, ...@@ -1233,6 +1458,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports) void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
{ {
esw_offloads_devcom_cleanup(esw);
esw_offloads_unload_reps(esw, nvports); esw_offloads_unload_reps(esw, nvports);
esw_destroy_vport_rx_group(esw); esw_destroy_vport_rx_group(esw);
esw_destroy_offloads_table(esw); esw_destroy_offloads_table(esw);
......
...@@ -34,11 +34,15 @@ ...@@ -34,11 +34,15 @@
#include <linux/mlx5/driver.h> #include <linux/mlx5/driver.h>
#include <linux/mlx5/vport.h> #include <linux/mlx5/vport.h>
#include "mlx5_core.h" #include "mlx5_core.h"
#include "eswitch.h"
enum { enum {
MLX5_LAG_FLAG_BONDED = 1 << 0, MLX5_LAG_FLAG_ROCE = 1 << 0,
MLX5_LAG_FLAG_SRIOV = 1 << 1,
}; };
#define MLX5_LAG_MODE_FLAGS (MLX5_LAG_FLAG_ROCE | MLX5_LAG_FLAG_SRIOV)
struct lag_func { struct lag_func {
struct mlx5_core_dev *dev; struct mlx5_core_dev *dev;
struct net_device *netdev; struct net_device *netdev;
...@@ -61,11 +65,6 @@ struct mlx5_lag { ...@@ -61,11 +65,6 @@ struct mlx5_lag {
struct lag_tracker tracker; struct lag_tracker tracker;
struct delayed_work bond_work; struct delayed_work bond_work;
struct notifier_block nb; struct notifier_block nb;
/* Admin state. Allow lag only if allowed is true
* even if network conditions for lag were met
*/
bool allowed;
}; };
/* General purpose, use for short periods of time. /* General purpose, use for short periods of time.
...@@ -165,9 +164,19 @@ static int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev, ...@@ -165,9 +164,19 @@ static int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
return -1; return -1;
} }
static bool mlx5_lag_is_bonded(struct mlx5_lag *ldev) static bool __mlx5_lag_is_roce(struct mlx5_lag *ldev)
{
return !!(ldev->flags & MLX5_LAG_FLAG_ROCE);
}
static bool __mlx5_lag_is_sriov(struct mlx5_lag *ldev)
{ {
return !!(ldev->flags & MLX5_LAG_FLAG_BONDED); return !!(ldev->flags & MLX5_LAG_FLAG_SRIOV);
}
static bool __mlx5_lag_is_active(struct mlx5_lag *ldev)
{
return !!(ldev->flags & MLX5_LAG_MODE_FLAGS);
} }
static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker, static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
...@@ -186,36 +195,128 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker, ...@@ -186,36 +195,128 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
*port2 = 1; *port2 = 1;
} }
static void mlx5_activate_lag(struct mlx5_lag *ldev, static void mlx5_modify_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker) struct lag_tracker *tracker)
{ {
struct mlx5_core_dev *dev0 = ldev->pf[0].dev; struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
u8 v2p_port1, v2p_port2;
int err; int err;
ldev->flags |= MLX5_LAG_FLAG_BONDED; mlx5_infer_tx_affinity_mapping(tracker, &v2p_port1,
&v2p_port2);
if (v2p_port1 != ldev->v2p_map[0] ||
v2p_port2 != ldev->v2p_map[1]) {
ldev->v2p_map[0] = v2p_port1;
ldev->v2p_map[1] = v2p_port2;
mlx5_core_info(dev0, "modify lag map port 1:%d port 2:%d",
ldev->v2p_map[0], ldev->v2p_map[1]);
err = mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2);
if (err)
mlx5_core_err(dev0,
"Failed to modify LAG (%d)\n",
err);
}
}
static int mlx5_create_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker)
{
struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
int err;
mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[0], mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[0],
&ldev->v2p_map[1]); &ldev->v2p_map[1]);
mlx5_core_info(dev0, "lag map port 1:%d port 2:%d",
ldev->v2p_map[0], ldev->v2p_map[1]);
err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[0], ldev->v2p_map[1]); err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[0], ldev->v2p_map[1]);
if (err) if (err)
mlx5_core_err(dev0, mlx5_core_err(dev0,
"Failed to create LAG (%d)\n", "Failed to create LAG (%d)\n",
err); err);
return err;
}
static int mlx5_activate_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker,
u8 flags)
{
bool roce_lag = !!(flags & MLX5_LAG_FLAG_ROCE);
struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
int err;
err = mlx5_create_lag(ldev, tracker);
if (err) {
if (roce_lag) {
mlx5_core_err(dev0,
"Failed to activate RoCE LAG\n");
} else {
mlx5_core_err(dev0,
"Failed to activate VF LAG\n"
"Make sure all VFs are unbound prior to VF LAG activation or deactivation\n");
}
return err;
}
ldev->flags |= flags;
return 0;
} }
static void mlx5_deactivate_lag(struct mlx5_lag *ldev) static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
{ {
struct mlx5_core_dev *dev0 = ldev->pf[0].dev; struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
bool roce_lag = __mlx5_lag_is_roce(ldev);
int err; int err;
ldev->flags &= ~MLX5_LAG_FLAG_BONDED; ldev->flags &= ~MLX5_LAG_MODE_FLAGS;
err = mlx5_cmd_destroy_lag(dev0); err = mlx5_cmd_destroy_lag(dev0);
if (err) if (err) {
if (roce_lag) {
mlx5_core_err(dev0, mlx5_core_err(dev0,
"Failed to destroy LAG (%d)\n", "Failed to deactivate RoCE LAG; driver restart required\n");
err); } else {
mlx5_core_err(dev0,
"Failed to deactivate VF LAG; driver restart required\n"
"Make sure all VFs are unbound prior to VF LAG activation or deactivation\n");
}
}
return err;
}
static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
{
if (ldev->pf[0].dev &&
ldev->pf[1].dev &&
mlx5_esw_lag_prereq(ldev->pf[0].dev, ldev->pf[1].dev))
return true;
else
return false;
}
static void mlx5_lag_add_ib_devices(struct mlx5_lag *ldev)
{
int i;
for (i = 0; i < MLX5_MAX_PORTS; i++)
if (ldev->pf[i].dev)
mlx5_add_dev_by_protocol(ldev->pf[i].dev,
MLX5_INTERFACE_PROTOCOL_IB);
}
static void mlx5_lag_remove_ib_devices(struct mlx5_lag *ldev)
{
int i;
for (i = 0; i < MLX5_MAX_PORTS; i++)
if (ldev->pf[i].dev)
mlx5_remove_dev_by_protocol(ldev->pf[i].dev,
MLX5_INTERFACE_PROTOCOL_IB);
} }
static void mlx5_do_bond(struct mlx5_lag *ldev) static void mlx5_do_bond(struct mlx5_lag *ldev)
...@@ -223,9 +324,8 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) ...@@ -223,9 +324,8 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
struct mlx5_core_dev *dev0 = ldev->pf[0].dev; struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
struct mlx5_core_dev *dev1 = ldev->pf[1].dev; struct mlx5_core_dev *dev1 = ldev->pf[1].dev;
struct lag_tracker tracker; struct lag_tracker tracker;
u8 v2p_port1, v2p_port2; bool do_bond, roce_lag;
int i, err; int err;
bool do_bond;
if (!dev0 || !dev1) if (!dev0 || !dev1)
return; return;
...@@ -234,42 +334,45 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) ...@@ -234,42 +334,45 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
tracker = ldev->tracker; tracker = ldev->tracker;
mutex_unlock(&lag_mutex); mutex_unlock(&lag_mutex);
do_bond = tracker.is_bonded && ldev->allowed; do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev);
if (do_bond && !mlx5_lag_is_bonded(ldev)) { if (do_bond && !__mlx5_lag_is_active(ldev)) {
for (i = 0; i < MLX5_MAX_PORTS; i++) roce_lag = !mlx5_sriov_is_enabled(dev0) &&
mlx5_remove_dev_by_protocol(ldev->pf[i].dev, !mlx5_sriov_is_enabled(dev1);
MLX5_INTERFACE_PROTOCOL_IB);
mlx5_activate_lag(ldev, &tracker); if (roce_lag)
mlx5_lag_remove_ib_devices(ldev);
mlx5_add_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB); err = mlx5_activate_lag(ldev, &tracker,
mlx5_nic_vport_enable_roce(dev1); roce_lag ? MLX5_LAG_FLAG_ROCE :
} else if (do_bond && mlx5_lag_is_bonded(ldev)) { MLX5_LAG_FLAG_SRIOV);
mlx5_infer_tx_affinity_mapping(&tracker, &v2p_port1, if (err) {
&v2p_port2); if (roce_lag)
mlx5_lag_add_ib_devices(ldev);
if ((v2p_port1 != ldev->v2p_map[0]) || return;
(v2p_port2 != ldev->v2p_map[1])) { }
ldev->v2p_map[0] = v2p_port1;
ldev->v2p_map[1] = v2p_port2;
err = mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2); if (roce_lag) {
if (err) mlx5_add_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_core_err(dev0, mlx5_nic_vport_enable_roce(dev1);
"Failed to modify LAG (%d)\n",
err);
} }
} else if (!do_bond && mlx5_lag_is_bonded(ldev)) { } else if (do_bond && __mlx5_lag_is_active(ldev)) {
mlx5_modify_lag(ldev, &tracker);
} else if (!do_bond && __mlx5_lag_is_active(ldev)) {
roce_lag = __mlx5_lag_is_roce(ldev);
if (roce_lag) {
mlx5_remove_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB); mlx5_remove_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_nic_vport_disable_roce(dev1); mlx5_nic_vport_disable_roce(dev1);
}
mlx5_deactivate_lag(ldev); err = mlx5_deactivate_lag(ldev);
if (err)
return;
for (i = 0; i < MLX5_MAX_PORTS; i++) if (roce_lag)
if (ldev->pf[i].dev) mlx5_lag_add_ib_devices(ldev);
mlx5_add_dev_by_protocol(ldev->pf[i].dev,
MLX5_INTERFACE_PROTOCOL_IB);
} }
} }
...@@ -419,15 +522,6 @@ static int mlx5_lag_netdev_event(struct notifier_block *this, ...@@ -419,15 +522,6 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
return NOTIFY_DONE; return NOTIFY_DONE;
} }
static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
{
if ((ldev->pf[0].dev && mlx5_sriov_is_enabled(ldev->pf[0].dev)) ||
(ldev->pf[1].dev && mlx5_sriov_is_enabled(ldev->pf[1].dev)))
return false;
else
return true;
}
static struct mlx5_lag *mlx5_lag_dev_alloc(void) static struct mlx5_lag *mlx5_lag_dev_alloc(void)
{ {
struct mlx5_lag *ldev; struct mlx5_lag *ldev;
...@@ -437,7 +531,6 @@ static struct mlx5_lag *mlx5_lag_dev_alloc(void) ...@@ -437,7 +531,6 @@ static struct mlx5_lag *mlx5_lag_dev_alloc(void)
return NULL; return NULL;
INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work); INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work);
ldev->allowed = mlx5_lag_check_prereq(ldev);
return ldev; return ldev;
} }
...@@ -462,7 +555,6 @@ static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev, ...@@ -462,7 +555,6 @@ static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
ldev->tracker.netdev_state[fn].link_up = 0; ldev->tracker.netdev_state[fn].link_up = 0;
ldev->tracker.netdev_state[fn].tx_enabled = 0; ldev->tracker.netdev_state[fn].tx_enabled = 0;
ldev->allowed = mlx5_lag_check_prereq(ldev);
dev->priv.lag = ldev; dev->priv.lag = ldev;
mutex_unlock(&lag_mutex); mutex_unlock(&lag_mutex);
...@@ -484,7 +576,6 @@ static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev, ...@@ -484,7 +576,6 @@ static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
memset(&ldev->pf[i], 0, sizeof(*ldev->pf)); memset(&ldev->pf[i], 0, sizeof(*ldev->pf));
dev->priv.lag = NULL; dev->priv.lag = NULL;
ldev->allowed = mlx5_lag_check_prereq(ldev);
mutex_unlock(&lag_mutex); mutex_unlock(&lag_mutex);
} }
...@@ -532,7 +623,7 @@ void mlx5_lag_remove(struct mlx5_core_dev *dev) ...@@ -532,7 +623,7 @@ void mlx5_lag_remove(struct mlx5_core_dev *dev)
if (!ldev) if (!ldev)
return; return;
if (mlx5_lag_is_bonded(ldev)) if (__mlx5_lag_is_active(ldev))
mlx5_deactivate_lag(ldev); mlx5_deactivate_lag(ldev);
mlx5_lag_dev_remove_pf(ldev, dev); mlx5_lag_dev_remove_pf(ldev, dev);
...@@ -549,6 +640,20 @@ void mlx5_lag_remove(struct mlx5_core_dev *dev) ...@@ -549,6 +640,20 @@ void mlx5_lag_remove(struct mlx5_core_dev *dev)
} }
} }
bool mlx5_lag_is_roce(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
bool res;
mutex_lock(&lag_mutex);
ldev = mlx5_lag_dev_get(dev);
res = ldev && __mlx5_lag_is_roce(ldev);
mutex_unlock(&lag_mutex);
return res;
}
EXPORT_SYMBOL(mlx5_lag_is_roce);
bool mlx5_lag_is_active(struct mlx5_core_dev *dev) bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
{ {
struct mlx5_lag *ldev; struct mlx5_lag *ldev;
...@@ -556,49 +661,40 @@ bool mlx5_lag_is_active(struct mlx5_core_dev *dev) ...@@ -556,49 +661,40 @@ bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
mutex_lock(&lag_mutex); mutex_lock(&lag_mutex);
ldev = mlx5_lag_dev_get(dev); ldev = mlx5_lag_dev_get(dev);
res = ldev && mlx5_lag_is_bonded(ldev); res = ldev && __mlx5_lag_is_active(ldev);
mutex_unlock(&lag_mutex); mutex_unlock(&lag_mutex);
return res; return res;
} }
EXPORT_SYMBOL(mlx5_lag_is_active); EXPORT_SYMBOL(mlx5_lag_is_active);
static int mlx5_lag_set_state(struct mlx5_core_dev *dev, bool allow) bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev)
{ {
struct mlx5_lag *ldev; struct mlx5_lag *ldev;
int ret = 0; bool res;
bool lag_active;
mlx5_dev_list_lock(); mutex_lock(&lag_mutex);
ldev = mlx5_lag_dev_get(dev);
res = ldev && __mlx5_lag_is_sriov(ldev);
mutex_unlock(&lag_mutex);
return res;
}
EXPORT_SYMBOL(mlx5_lag_is_sriov);
void mlx5_lag_update(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
mlx5_dev_list_lock();
ldev = mlx5_lag_dev_get(dev); ldev = mlx5_lag_dev_get(dev);
if (!ldev) { if (!ldev)
ret = -ENODEV;
goto unlock;
}
lag_active = mlx5_lag_is_bonded(ldev);
if (!mlx5_lag_check_prereq(ldev) && allow) {
ret = -EINVAL;
goto unlock;
}
if (ldev->allowed == allow)
goto unlock; goto unlock;
ldev->allowed = allow;
if ((lag_active && !allow) || allow)
mlx5_do_bond(ldev); mlx5_do_bond(ldev);
unlock: unlock:
mlx5_dev_list_unlock(); mlx5_dev_list_unlock();
return ret;
}
int mlx5_lag_forbid(struct mlx5_core_dev *dev)
{
return mlx5_lag_set_state(dev, false);
}
int mlx5_lag_allow(struct mlx5_core_dev *dev)
{
return mlx5_lag_set_state(dev, true);
} }
struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev) struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
...@@ -609,7 +705,7 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev) ...@@ -609,7 +705,7 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
mutex_lock(&lag_mutex); mutex_lock(&lag_mutex);
ldev = mlx5_lag_dev_get(dev); ldev = mlx5_lag_dev_get(dev);
if (!(ldev && mlx5_lag_is_bonded(ldev))) if (!(ldev && __mlx5_lag_is_roce(ldev)))
goto unlock; goto unlock;
if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) { if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
...@@ -638,7 +734,7 @@ bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv) ...@@ -638,7 +734,7 @@ bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv)
return true; return true;
ldev = mlx5_lag_dev_get(dev); ldev = mlx5_lag_dev_get(dev);
if (!ldev || !mlx5_lag_is_bonded(ldev) || ldev->pf[0].dev == dev) if (!ldev || !__mlx5_lag_is_roce(ldev) || ldev->pf[0].dev == dev)
return true; return true;
/* If bonded, we do not add an IB device for PF1. */ /* If bonded, we do not add an IB device for PF1. */
...@@ -665,7 +761,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, ...@@ -665,7 +761,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
mutex_lock(&lag_mutex); mutex_lock(&lag_mutex);
ldev = mlx5_lag_dev_get(dev); ldev = mlx5_lag_dev_get(dev);
if (ldev && mlx5_lag_is_bonded(ldev)) { if (ldev && __mlx5_lag_is_roce(ldev)) {
num_ports = MLX5_MAX_PORTS; num_ports = MLX5_MAX_PORTS;
mdev[0] = ldev->pf[0].dev; mdev[0] = ldev->pf[0].dev;
mdev[1] = ldev->pf[1].dev; mdev[1] = ldev->pf[1].dev;
......
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2018 Mellanox Technologies */
#include <linux/mlx5/vport.h>
#include "lib/devcom.h"
static LIST_HEAD(devcom_list);
#define devcom_for_each_component(priv, comp, iter) \
for (iter = 0; \
comp = &(priv)->components[iter], iter < MLX5_DEVCOM_NUM_COMPONENTS; \
iter++)
struct mlx5_devcom_component {
struct {
void *data;
} device[MLX5_MAX_PORTS];
mlx5_devcom_event_handler_t handler;
struct rw_semaphore sem;
bool paired;
};
struct mlx5_devcom_list {
struct list_head list;
struct mlx5_devcom_component components[MLX5_DEVCOM_NUM_COMPONENTS];
struct mlx5_core_dev *devs[MLX5_MAX_PORTS];
};
struct mlx5_devcom {
struct mlx5_devcom_list *priv;
int idx;
};
static struct mlx5_devcom_list *mlx5_devcom_list_alloc(void)
{
struct mlx5_devcom_component *comp;
struct mlx5_devcom_list *priv;
int i;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return NULL;
devcom_for_each_component(priv, comp, i)
init_rwsem(&comp->sem);
return priv;
}
static struct mlx5_devcom *mlx5_devcom_alloc(struct mlx5_devcom_list *priv,
u8 idx)
{
struct mlx5_devcom *devcom;
devcom = kzalloc(sizeof(*devcom), GFP_KERNEL);
if (!devcom)
return NULL;
devcom->priv = priv;
devcom->idx = idx;
return devcom;
}
/* Must be called with intf_mutex held */
struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev)
{
struct mlx5_devcom_list *priv = NULL, *iter;
struct mlx5_devcom *devcom = NULL;
bool new_priv = false;
u64 sguid0, sguid1;
int idx, i;
if (!mlx5_core_is_pf(dev))
return NULL;
sguid0 = mlx5_query_nic_system_image_guid(dev);
list_for_each_entry(iter, &devcom_list, list) {
struct mlx5_core_dev *tmp_dev = NULL;
idx = -1;
for (i = 0; i < MLX5_MAX_PORTS; i++) {
if (iter->devs[i])
tmp_dev = iter->devs[i];
else
idx = i;
}
if (idx == -1)
continue;
sguid1 = mlx5_query_nic_system_image_guid(tmp_dev);
if (sguid0 != sguid1)
continue;
priv = iter;
break;
}
if (!priv) {
priv = mlx5_devcom_list_alloc();
if (!priv)
return ERR_PTR(-ENOMEM);
idx = 0;
new_priv = true;
}
priv->devs[idx] = dev;
devcom = mlx5_devcom_alloc(priv, idx);
if (!devcom) {
kfree(priv);
return ERR_PTR(-ENOMEM);
}
if (new_priv)
list_add(&priv->list, &devcom_list);
return devcom;
}
/* Must be called with intf_mutex held */
void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom)
{
struct mlx5_devcom_list *priv;
int i;
if (IS_ERR_OR_NULL(devcom))
return;
priv = devcom->priv;
priv->devs[devcom->idx] = NULL;
kfree(devcom);
for (i = 0; i < MLX5_MAX_PORTS; i++)
if (priv->devs[i])
break;
if (i != MLX5_MAX_PORTS)
return;
list_del(&priv->list);
kfree(priv);
}
void mlx5_devcom_register_component(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id,
mlx5_devcom_event_handler_t handler,
void *data)
{
struct mlx5_devcom_component *comp;
if (IS_ERR_OR_NULL(devcom))
return;
WARN_ON(!data);
comp = &devcom->priv->components[id];
down_write(&comp->sem);
comp->handler = handler;
comp->device[devcom->idx].data = data;
up_write(&comp->sem);
}
void mlx5_devcom_unregister_component(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id)
{
struct mlx5_devcom_component *comp;
if (IS_ERR_OR_NULL(devcom))
return;
comp = &devcom->priv->components[id];
down_write(&comp->sem);
comp->device[devcom->idx].data = NULL;
up_write(&comp->sem);
}
int mlx5_devcom_send_event(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id,
int event,
void *event_data)
{
struct mlx5_devcom_component *comp;
int err = -ENODEV, i;
if (IS_ERR_OR_NULL(devcom))
return err;
comp = &devcom->priv->components[id];
down_write(&comp->sem);
for (i = 0; i < MLX5_MAX_PORTS; i++)
if (i != devcom->idx && comp->device[i].data) {
err = comp->handler(event, comp->device[i].data,
event_data);
break;
}
up_write(&comp->sem);
return err;
}
void mlx5_devcom_set_paired(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id,
bool paired)
{
struct mlx5_devcom_component *comp;
comp = &devcom->priv->components[id];
WARN_ON(!rwsem_is_locked(&comp->sem));
comp->paired = paired;
}
bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id)
{
if (IS_ERR_OR_NULL(devcom))
return false;
return devcom->priv->components[id].paired;
}
void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id)
{
struct mlx5_devcom_component *comp;
int i;
if (IS_ERR_OR_NULL(devcom))
return NULL;
comp = &devcom->priv->components[id];
down_read(&comp->sem);
if (!comp->paired) {
up_read(&comp->sem);
return NULL;
}
for (i = 0; i < MLX5_MAX_PORTS; i++)
if (i != devcom->idx)
break;
return comp->device[i].data;
}
void mlx5_devcom_release_peer_data(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id)
{
struct mlx5_devcom_component *comp = &devcom->priv->components[id];
up_read(&comp->sem);
}
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2018 Mellanox Technologies */
#ifndef __LIB_MLX5_DEVCOM_H__
#define __LIB_MLX5_DEVCOM_H__
#include <linux/mlx5/driver.h>
enum mlx5_devcom_components {
MLX5_DEVCOM_ESW_OFFLOADS,
MLX5_DEVCOM_NUM_COMPONENTS,
};
typedef int (*mlx5_devcom_event_handler_t)(int event,
void *my_data,
void *event_data);
struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev);
void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom);
void mlx5_devcom_register_component(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id,
mlx5_devcom_event_handler_t handler,
void *data);
void mlx5_devcom_unregister_component(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id);
int mlx5_devcom_send_event(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id,
int event,
void *event_data);
void mlx5_devcom_set_paired(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id,
bool paired);
bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id);
void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id);
void mlx5_devcom_release_peer_data(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id);
#endif
...@@ -63,6 +63,7 @@ ...@@ -63,6 +63,7 @@
#include "accel/tls.h" #include "accel/tls.h"
#include "lib/clock.h" #include "lib/clock.h"
#include "lib/vxlan.h" #include "lib/vxlan.h"
#include "lib/devcom.h"
#include "diag/fw_tracer.h" #include "diag/fw_tracer.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
...@@ -722,16 +723,21 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) ...@@ -722,16 +723,21 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
struct pci_dev *pdev = dev->pdev; struct pci_dev *pdev = dev->pdev;
int err; int err;
priv->devcom = mlx5_devcom_register_device(dev);
if (IS_ERR(priv->devcom))
dev_err(&pdev->dev, "failed to register with devcom (0x%p)\n",
priv->devcom);
err = mlx5_query_board_id(dev); err = mlx5_query_board_id(dev);
if (err) { if (err) {
dev_err(&pdev->dev, "query board id failed\n"); dev_err(&pdev->dev, "query board id failed\n");
goto out; goto err_devcom;
} }
err = mlx5_eq_table_init(dev); err = mlx5_eq_table_init(dev);
if (err) { if (err) {
dev_err(&pdev->dev, "failed to initialize eq\n"); dev_err(&pdev->dev, "failed to initialize eq\n");
goto out; goto err_devcom;
} }
err = mlx5_events_init(dev); err = mlx5_events_init(dev);
...@@ -807,8 +813,9 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) ...@@ -807,8 +813,9 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
mlx5_events_cleanup(dev); mlx5_events_cleanup(dev);
err_eq_cleanup: err_eq_cleanup:
mlx5_eq_table_cleanup(dev); mlx5_eq_table_cleanup(dev);
err_devcom:
mlx5_devcom_unregister_device(dev->priv.devcom);
out:
return err; return err;
} }
...@@ -828,6 +835,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev) ...@@ -828,6 +835,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_cq_debugfs_cleanup(dev); mlx5_cq_debugfs_cleanup(dev);
mlx5_events_cleanup(dev); mlx5_events_cleanup(dev);
mlx5_eq_table_cleanup(dev); mlx5_eq_table_cleanup(dev);
mlx5_devcom_unregister_device(dev->priv.devcom);
} }
static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
......
...@@ -185,10 +185,8 @@ static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev) ...@@ -185,10 +185,8 @@ static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev)
MLX5_CAP_GEN(dev, lag_master); MLX5_CAP_GEN(dev, lag_master);
} }
int mlx5_lag_allow(struct mlx5_core_dev *dev);
int mlx5_lag_forbid(struct mlx5_core_dev *dev);
void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol); void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol);
void mlx5_lag_update(struct mlx5_core_dev *dev);
enum { enum {
MLX5_NIC_IFC_FULL = 0, MLX5_NIC_IFC_FULL = 0,
......
...@@ -216,20 +216,10 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs) ...@@ -216,20 +216,10 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (!mlx5_core_is_pf(dev)) if (!mlx5_core_is_pf(dev))
return -EPERM; return -EPERM;
if (num_vfs) { if (num_vfs)
int ret;
ret = mlx5_lag_forbid(dev);
if (ret && (ret != -ENODEV))
return ret;
}
if (num_vfs) {
err = mlx5_sriov_enable(pdev, num_vfs); err = mlx5_sriov_enable(pdev, num_vfs);
} else { else
mlx5_sriov_disable(pdev); mlx5_sriov_disable(pdev);
mlx5_lag_allow(dev);
}
return err ? err : num_vfs; return err ? err : num_vfs;
} }
......
...@@ -486,6 +486,7 @@ struct mlx5_events; ...@@ -486,6 +486,7 @@ struct mlx5_events;
struct mlx5_mpfs; struct mlx5_mpfs;
struct mlx5_eswitch; struct mlx5_eswitch;
struct mlx5_lag; struct mlx5_lag;
struct mlx5_devcom;
struct mlx5_eq_table; struct mlx5_eq_table;
struct mlx5_rate_limit { struct mlx5_rate_limit {
...@@ -560,6 +561,7 @@ struct mlx5_priv { ...@@ -560,6 +561,7 @@ struct mlx5_priv {
struct mlx5_eswitch *eswitch; struct mlx5_eswitch *eswitch;
struct mlx5_core_sriov sriov; struct mlx5_core_sriov sriov;
struct mlx5_lag *lag; struct mlx5_lag *lag;
struct mlx5_devcom *devcom;
unsigned long pci_dev_data; unsigned long pci_dev_data;
struct mlx5_fc_stats fc_stats; struct mlx5_fc_stats fc_stats;
struct mlx5_rl_table rl_table; struct mlx5_rl_table rl_table;
...@@ -1017,6 +1019,8 @@ int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); ...@@ -1017,6 +1019,8 @@ int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev); int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev); int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
bool mlx5_lag_is_roce(struct mlx5_core_dev *dev);
bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev);
bool mlx5_lag_is_active(struct mlx5_core_dev *dev); bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev); struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
......
...@@ -608,13 +608,19 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits { ...@@ -608,13 +608,19 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits {
u8 reserved_at_800[0x7800]; u8 reserved_at_800[0x7800];
}; };
enum {
MLX5_COUNTER_SOURCE_ESWITCH = 0x0,
MLX5_COUNTER_FLOW_ESWITCH = 0x1,
};
struct mlx5_ifc_e_switch_cap_bits { struct mlx5_ifc_e_switch_cap_bits {
u8 vport_svlan_strip[0x1]; u8 vport_svlan_strip[0x1];
u8 vport_cvlan_strip[0x1]; u8 vport_cvlan_strip[0x1];
u8 vport_svlan_insert[0x1]; u8 vport_svlan_insert[0x1];
u8 vport_cvlan_insert_if_not_exist[0x1]; u8 vport_cvlan_insert_if_not_exist[0x1];
u8 vport_cvlan_insert_overwrite[0x1]; u8 vport_cvlan_insert_overwrite[0x1];
u8 reserved_at_5[0x18]; u8 reserved_at_5[0x17];
u8 counter_eswitch_affinity[0x1];
u8 merged_eswitch[0x1]; u8 merged_eswitch[0x1];
u8 nic_vport_node_guid_modify[0x1]; u8 nic_vport_node_guid_modify[0x1];
u8 nic_vport_port_guid_modify[0x1]; u8 nic_vport_port_guid_modify[0x1];
...@@ -3865,16 +3871,16 @@ enum { ...@@ -3865,16 +3871,16 @@ enum {
}; };
enum mlx5_monitor_counter_ppcnt { enum mlx5_monitor_counter_ppcnt {
MLX5_QUERY_MONITOR_PPCNT_IN_RANGE_LENGTH_ERRORS = 0X0, MLX5_QUERY_MONITOR_PPCNT_IN_RANGE_LENGTH_ERRORS = 0x0,
MLX5_QUERY_MONITOR_PPCNT_OUT_OF_RANGE_LENGTH_FIELD = 0X1, MLX5_QUERY_MONITOR_PPCNT_OUT_OF_RANGE_LENGTH_FIELD = 0x1,
MLX5_QUERY_MONITOR_PPCNT_FRAME_TOO_LONG_ERRORS = 0X2, MLX5_QUERY_MONITOR_PPCNT_FRAME_TOO_LONG_ERRORS = 0x2,
MLX5_QUERY_MONITOR_PPCNT_FRAME_CHECK_SEQUENCE_ERRORS = 0X3, MLX5_QUERY_MONITOR_PPCNT_FRAME_CHECK_SEQUENCE_ERRORS = 0x3,
MLX5_QUERY_MONITOR_PPCNT_ALIGNMENT_ERRORS = 0X4, MLX5_QUERY_MONITOR_PPCNT_ALIGNMENT_ERRORS = 0x4,
MLX5_QUERY_MONITOR_PPCNT_IF_OUT_DISCARDS = 0X5, MLX5_QUERY_MONITOR_PPCNT_IF_OUT_DISCARDS = 0x5,
}; };
enum { enum {
MLX5_QUERY_MONITOR_Q_COUNTER_RX_OUT_OF_BUFFER = 0X4, MLX5_QUERY_MONITOR_Q_COUNTER_RX_OUT_OF_BUFFER = 0x4,
}; };
struct mlx5_ifc_monitor_counter_output_bits { struct mlx5_ifc_monitor_counter_output_bits {
...@@ -4780,7 +4786,7 @@ enum { ...@@ -4780,7 +4786,7 @@ enum {
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0, MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1, MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2, MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0X3, MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0x3,
}; };
struct mlx5_ifc_query_flow_group_out_bits { struct mlx5_ifc_query_flow_group_out_bits {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment