Commit 416a01a4 authored by Paolo Abeni's avatar Paolo Abeni

Merge tag 'mlx5-updates-2023-09-19' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2023-09-19

Misc updates for mlx5 driver

1) From Erez, Add support for multicast forwarding to multi destination
   in bridge offloads with software steering mode (SMFS).

2) From Jianbo, Utilize the maximum aggregated link speed for police
   action rate.

3) From Moshe, Add a health error syndrome for pci data poisoned

4) From Shay, Enable 4 ports multiport E-switch

5) From Jiri, Trivial SF code cleanup

====================

Link: https://lore.kernel.org/r/20230920063552.296978-1-saeed@kernel.orgSigned-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parents c1fec890 e738e355
......@@ -78,6 +78,8 @@ mlx5_esw_bridge_mdb_flow_create(u16 esw_owner_vhca_id, struct mlx5_esw_bridge_md
xa_for_each(&entry->ports, idx, port) {
dests[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dests[i].ft = port->mcast.ft;
if (port->vport_num == MLX5_VPORT_UPLINK)
dests[i].ft->flags |= MLX5_FLOW_TABLE_UPLINK_VPORT;
i++;
}
......@@ -585,10 +587,6 @@ mlx5_esw_bridge_mcast_vlan_flow_create(u16 vlan_proto, struct mlx5_esw_bridge_po
if (!rule_spec)
return ERR_PTR(-ENOMEM);
if (MLX5_CAP_ESW_FLOWTABLE(bridge->br_offloads->esw->dev, flow_source) &&
port->vport_num == MLX5_VPORT_UPLINK)
rule_spec->flow_context.flow_source =
MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
......@@ -660,11 +658,6 @@ mlx5_esw_bridge_mcast_fwd_flow_create(struct mlx5_esw_bridge_port *port)
if (!rule_spec)
return ERR_PTR(-ENOMEM);
if (MLX5_CAP_ESW_FLOWTABLE(bridge->br_offloads->esw->dev, flow_source) &&
port->vport_num == MLX5_VPORT_UPLINK)
rule_spec->flow_context.flow_source =
MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
if (MLX5_CAP_ESW(bridge->br_offloads->esw->dev, merged_eswitch)) {
dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
dest.vport.vhca_id = port->esw_owner_vhca_id;
......
......@@ -2,6 +2,7 @@
/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#include "eswitch.h"
#include "lib/mlx5.h"
#include "esw/qos.h"
#include "en/port.h"
#define CREATE_TRACE_POINTS
......@@ -701,10 +702,75 @@ int mlx5_esw_qos_set_vport_rate(struct mlx5_eswitch *esw, struct mlx5_vport *vpo
return err;
}
static u32 mlx5_esw_qos_lag_link_speed_get_locked(struct mlx5_core_dev *mdev)
{
struct ethtool_link_ksettings lksettings;
struct net_device *slave, *master;
u32 speed = SPEED_UNKNOWN;
/* Lock ensures a stable reference to master and slave netdevice
* while port speed of master is queried.
*/
ASSERT_RTNL();
slave = mlx5_uplink_netdev_get(mdev);
if (!slave)
goto out;
master = netdev_master_upper_dev_get(slave);
if (master && !__ethtool_get_link_ksettings(master, &lksettings))
speed = lksettings.base.speed;
out:
return speed;
}
static int mlx5_esw_qos_max_link_speed_get(struct mlx5_core_dev *mdev, u32 *link_speed_max,
bool hold_rtnl_lock, struct netlink_ext_ack *extack)
{
int err;
if (!mlx5_lag_is_active(mdev))
goto skip_lag;
if (hold_rtnl_lock)
rtnl_lock();
*link_speed_max = mlx5_esw_qos_lag_link_speed_get_locked(mdev);
if (hold_rtnl_lock)
rtnl_unlock();
if (*link_speed_max != (u32)SPEED_UNKNOWN)
return 0;
skip_lag:
err = mlx5_port_max_linkspeed(mdev, link_speed_max);
if (err)
NL_SET_ERR_MSG_MOD(extack, "Failed to get link maximum speed");
return err;
}
static int mlx5_esw_qos_link_speed_verify(struct mlx5_core_dev *mdev,
const char *name, u32 link_speed_max,
u64 value, struct netlink_ext_ack *extack)
{
if (value > link_speed_max) {
pr_err("%s rate value %lluMbps exceed link maximum speed %u.\n",
name, value, link_speed_max);
NL_SET_ERR_MSG_MOD(extack, "TX rate value exceed link maximum speed");
return -EINVAL;
}
return 0;
}
int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps)
{
u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
struct mlx5_vport *vport;
u32 link_speed_max;
u32 bitmask;
int err;
......@@ -712,6 +778,17 @@ int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32
if (IS_ERR(vport))
return PTR_ERR(vport);
if (rate_mbps) {
err = mlx5_esw_qos_max_link_speed_get(esw->dev, &link_speed_max, false, NULL);
if (err)
return err;
err = mlx5_esw_qos_link_speed_verify(esw->dev, "Police",
link_speed_max, rate_mbps, NULL);
if (err)
return err;
}
mutex_lock(&esw->state_lock);
if (!vport->qos.enabled) {
/* Eswitch QoS wasn't enabled yet. Enable it and vport QoS. */
......@@ -744,12 +821,6 @@ static int esw_qos_devlink_rate_to_mbps(struct mlx5_core_dev *mdev, const char *
u64 value;
int err;
err = mlx5_port_max_linkspeed(mdev, &link_speed_max);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to get link maximum speed");
return err;
}
value = div_u64_rem(*rate, MLX5_LINKSPEED_UNIT, &remainder);
if (remainder) {
pr_err("%s rate value %lluBps not in link speed units of 1Mbps.\n",
......@@ -758,12 +829,13 @@ static int esw_qos_devlink_rate_to_mbps(struct mlx5_core_dev *mdev, const char *
return -EINVAL;
}
if (value > link_speed_max) {
pr_err("%s rate value %lluMbps exceed link maximum speed %u.\n",
name, value, link_speed_max);
NL_SET_ERR_MSG_MOD(extack, "TX rate value exceed link maximum speed");
return -EINVAL;
}
err = mlx5_esw_qos_max_link_speed_get(mdev, &link_speed_max, true, extack);
if (err)
return err;
err = mlx5_esw_qos_link_speed_verify(mdev, name, link_speed_max, value, extack);
if (err)
return err;
*rate = value;
return 0;
......
......@@ -365,6 +365,8 @@ static const char *hsynd_str(u8 synd)
return "FFSER error";
case MLX5_INITIAL_SEG_HEALTH_SYNDROME_HIGH_TEMP_ERR:
return "High temperature";
case MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PCI_POISONED_ERR:
return "ICM fetch PCI data poisoned error";
default:
return "unrecognized error";
}
......
......@@ -65,12 +65,12 @@ static int mlx5_mpesw_metadata_set(struct mlx5_lag *ldev)
return err;
}
#define MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS 2
#define MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS 4
static int enable_mpesw(struct mlx5_lag *ldev)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
int err;
int i;
if (ldev->mode != MLX5_LAG_MODE_NONE)
return -EINVAL;
......@@ -98,11 +98,11 @@ static int enable_mpesw(struct mlx5_lag *ldev)
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
err = mlx5_eswitch_reload_reps(dev0->priv.eswitch);
if (!err)
err = mlx5_eswitch_reload_reps(dev1->priv.eswitch);
for (i = 0; i < ldev->ports; i++) {
err = mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
if (err)
goto err_rescan_drivers;
}
return 0;
......@@ -112,8 +112,8 @@ static int enable_mpesw(struct mlx5_lag *ldev)
mlx5_deactivate_lag(ldev);
err_add_devices:
mlx5_lag_add_devices(ldev);
mlx5_eswitch_reload_reps(dev0->priv.eswitch);
mlx5_eswitch_reload_reps(dev1->priv.eswitch);
for (i = 0; i < ldev->ports; i++)
mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
mlx5_mpesw_metadata_cleanup(ldev);
return err;
}
......
......@@ -14,7 +14,6 @@
struct mlx5_sf_dev_table {
struct xarray devices;
unsigned int max_sfs;
phys_addr_t base_address;
u64 sf_bar_length;
struct notifier_block nb;
......@@ -110,12 +109,6 @@ static void mlx5_sf_dev_add(struct mlx5_core_dev *dev, u16 sf_index, u16 fn_id,
sf_dev->parent_mdev = dev;
sf_dev->fn_id = fn_id;
if (!table->max_sfs) {
mlx5_adev_idx_free(id);
kfree(sf_dev);
err = -EOPNOTSUPP;
goto add_err;
}
sf_dev->bar_base_addr = table->base_address + (sf_index * table->sf_bar_length);
trace_mlx5_sf_dev_add(dev, sf_dev, id);
......@@ -296,7 +289,6 @@ static void mlx5_sf_dev_destroy_active_work(struct mlx5_sf_dev_table *table)
void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
{
struct mlx5_sf_dev_table *table;
unsigned int max_sfs;
int err;
if (!mlx5_sf_dev_supported(dev))
......@@ -310,13 +302,8 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
table->nb.notifier_call = mlx5_sf_dev_state_change_handler;
table->dev = dev;
if (MLX5_CAP_GEN(dev, max_num_sf))
max_sfs = MLX5_CAP_GEN(dev, max_num_sf);
else
max_sfs = 1 << MLX5_CAP_GEN(dev, log_max_sf);
table->sf_bar_length = 1 << (MLX5_CAP_GEN(dev, log_min_sf_size) + 12);
table->base_address = pci_resource_start(dev->pdev, 2);
table->max_sfs = max_sfs;
xa_init(&table->devices);
mutex_init(&table->table_lock);
dev->priv.sf_dev_table = table;
......@@ -332,7 +319,6 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
err = mlx5_sf_dev_vhca_arm_all(table);
if (err)
goto arm_err;
mlx5_core_dbg(dev, "SF DEV: max sf devices=%d\n", max_sfs);
return;
arm_err:
......@@ -340,7 +326,6 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
add_active_err:
mlx5_vhca_event_notifier_unregister(dev, &table->nb);
vhca_err:
table->max_sfs = 0;
kfree(table);
dev->priv.sf_dev_table = NULL;
table_err:
......
......@@ -20,44 +20,36 @@ struct mlx5_sf {
u16 hw_state;
};
static void *mlx5_sf_by_dl_port(struct devlink_port *dl_port)
{
struct mlx5_devlink_port *mlx5_dl_port = mlx5_devlink_port_get(dl_port);
return container_of(mlx5_dl_port, struct mlx5_sf, dl_port);
}
struct mlx5_sf_table {
struct mlx5_core_dev *dev; /* To refer from notifier context. */
struct xarray port_indices; /* port index based lookup. */
refcount_t refcount;
struct completion disable_complete;
struct xarray function_ids; /* function id based lookup. */
struct mutex sf_state_lock; /* Serializes sf state among user cmds & vhca event handler. */
struct notifier_block esw_nb;
struct notifier_block vhca_nb;
struct notifier_block mdev_nb;
};
static struct mlx5_sf *
mlx5_sf_lookup_by_index(struct mlx5_sf_table *table, unsigned int port_index)
{
return xa_load(&table->port_indices, port_index);
}
static struct mlx5_sf *
mlx5_sf_lookup_by_function_id(struct mlx5_sf_table *table, unsigned int fn_id)
{
unsigned long index;
struct mlx5_sf *sf;
xa_for_each(&table->port_indices, index, sf) {
if (sf->hw_fn_id == fn_id)
return sf;
}
return NULL;
return xa_load(&table->function_ids, fn_id);
}
static int mlx5_sf_id_insert(struct mlx5_sf_table *table, struct mlx5_sf *sf)
static int mlx5_sf_function_id_insert(struct mlx5_sf_table *table, struct mlx5_sf *sf)
{
return xa_insert(&table->port_indices, sf->port_index, sf, GFP_KERNEL);
return xa_insert(&table->function_ids, sf->hw_fn_id, sf, GFP_KERNEL);
}
static void mlx5_sf_id_erase(struct mlx5_sf_table *table, struct mlx5_sf *sf)
static void mlx5_sf_function_id_erase(struct mlx5_sf_table *table, struct mlx5_sf *sf)
{
xa_erase(&table->port_indices, sf->port_index);
xa_erase(&table->function_ids, sf->hw_fn_id);
}
static struct mlx5_sf *
......@@ -94,7 +86,7 @@ mlx5_sf_alloc(struct mlx5_sf_table *table, struct mlx5_eswitch *esw,
sf->hw_state = MLX5_VHCA_STATE_ALLOCATED;
sf->controller = controller;
err = mlx5_sf_id_insert(table, sf);
err = mlx5_sf_function_id_insert(table, sf);
if (err)
goto insert_err;
......@@ -112,28 +104,11 @@ mlx5_sf_alloc(struct mlx5_sf_table *table, struct mlx5_eswitch *esw,
static void mlx5_sf_free(struct mlx5_sf_table *table, struct mlx5_sf *sf)
{
mlx5_sf_id_erase(table, sf);
mlx5_sf_hw_table_sf_free(table->dev, sf->controller, sf->id);
trace_mlx5_sf_free(table->dev, sf->port_index, sf->controller, sf->hw_fn_id);
kfree(sf);
}
static struct mlx5_sf_table *mlx5_sf_table_try_get(struct mlx5_core_dev *dev)
{
struct mlx5_sf_table *table = dev->priv.sf_table;
if (!table)
return NULL;
return refcount_inc_not_zero(&table->refcount) ? table : NULL;
}
static void mlx5_sf_table_put(struct mlx5_sf_table *table)
{
if (refcount_dec_and_test(&table->refcount))
complete(&table->disable_complete);
}
static enum devlink_port_fn_state mlx5_sf_to_devlink_state(u8 hw_state)
{
switch (hw_state) {
......@@ -173,26 +148,14 @@ int mlx5_devlink_sf_port_fn_state_get(struct devlink_port *dl_port,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(dl_port->devlink);
struct mlx5_sf_table *table;
struct mlx5_sf *sf;
int err = 0;
table = mlx5_sf_table_try_get(dev);
if (!table)
return -EOPNOTSUPP;
struct mlx5_sf_table *table = dev->priv.sf_table;
struct mlx5_sf *sf = mlx5_sf_by_dl_port(dl_port);
sf = mlx5_sf_lookup_by_index(table, dl_port->index);
if (!sf) {
err = -EOPNOTSUPP;
goto sf_err;
}
mutex_lock(&table->sf_state_lock);
*state = mlx5_sf_to_devlink_state(sf->hw_state);
*opstate = mlx5_sf_to_devlink_opstate(sf->hw_state);
mutex_unlock(&table->sf_state_lock);
sf_err:
mlx5_sf_table_put(table);
return err;
return 0;
}
static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf,
......@@ -258,26 +221,10 @@ int mlx5_devlink_sf_port_fn_state_set(struct devlink_port *dl_port,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(dl_port->devlink);
struct mlx5_sf_table *table;
struct mlx5_sf *sf;
int err;
table = mlx5_sf_table_try_get(dev);
if (!table) {
NL_SET_ERR_MSG_MOD(extack,
"Port state set is only supported in eswitch switchdev mode or SF ports are disabled.");
return -EOPNOTSUPP;
}
sf = mlx5_sf_lookup_by_index(table, dl_port->index);
if (!sf) {
err = -ENODEV;
goto out;
}
struct mlx5_sf_table *table = dev->priv.sf_table;
struct mlx5_sf *sf = mlx5_sf_by_dl_port(dl_port);
err = mlx5_sf_state_set(dev, table, sf, state, extack);
out:
mlx5_sf_table_put(table);
return err;
return mlx5_sf_state_set(dev, table, sf, state, extack);
}
static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
......@@ -336,32 +283,45 @@ mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_
return 0;
}
static bool mlx5_sf_table_supported(const struct mlx5_core_dev *dev)
{
return dev->priv.eswitch && MLX5_ESWITCH_MANAGER(dev) &&
mlx5_sf_hw_table_supported(dev);
}
int mlx5_devlink_sf_port_new(struct devlink *devlink,
const struct devlink_port_new_attrs *new_attr,
struct netlink_ext_ack *extack,
struct devlink_port **dl_port)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_sf_table *table;
struct mlx5_sf_table *table = dev->priv.sf_table;
int err;
err = mlx5_sf_new_check_attr(dev, new_attr, extack);
if (err)
return err;
table = mlx5_sf_table_try_get(dev);
if (!table) {
if (!mlx5_sf_table_supported(dev)) {
NL_SET_ERR_MSG_MOD(extack, "SF ports are not supported.");
return -EOPNOTSUPP;
}
if (!is_mdev_switchdev_mode(dev)) {
NL_SET_ERR_MSG_MOD(extack,
"Port add is only supported in eswitch switchdev mode or SF ports are disabled.");
"SF ports are only supported in eswitch switchdev mode.");
return -EOPNOTSUPP;
}
err = mlx5_sf_add(dev, table, new_attr, extack, dl_port);
mlx5_sf_table_put(table);
return err;
return mlx5_sf_add(dev, table, new_attr, extack, dl_port);
}
static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf)
{
mutex_lock(&table->sf_state_lock);
mlx5_sf_function_id_erase(table, sf);
if (sf->hw_state == MLX5_VHCA_STATE_ALLOCATED) {
mlx5_sf_free(table, sf);
} else if (mlx5_sf_is_active(sf)) {
......@@ -377,6 +337,16 @@ static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf)
mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->controller, sf->id);
kfree(sf);
}
mutex_unlock(&table->sf_state_lock);
}
static void mlx5_sf_del(struct mlx5_sf_table *table, struct mlx5_sf *sf)
{
struct mlx5_eswitch *esw = table->dev->priv.eswitch;
mlx5_eswitch_unload_sf_vport(esw, sf->hw_fn_id);
mlx5_sf_dealloc(table, sf);
}
int mlx5_devlink_sf_port_del(struct devlink *devlink,
......@@ -384,32 +354,11 @@ int mlx5_devlink_sf_port_del(struct devlink *devlink,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_eswitch *esw = dev->priv.eswitch;
struct mlx5_sf_table *table;
struct mlx5_sf *sf;
int err = 0;
table = mlx5_sf_table_try_get(dev);
if (!table) {
NL_SET_ERR_MSG_MOD(extack,
"Port del is only supported in eswitch switchdev mode or SF ports are disabled.");
return -EOPNOTSUPP;
}
sf = mlx5_sf_lookup_by_index(table, dl_port->index);
if (!sf) {
err = -ENODEV;
goto sf_err;
}
mlx5_eswitch_unload_sf_vport(esw, sf->hw_fn_id);
mlx5_sf_id_erase(table, sf);
struct mlx5_sf_table *table = dev->priv.sf_table;
struct mlx5_sf *sf = mlx5_sf_by_dl_port(dl_port);
mutex_lock(&table->sf_state_lock);
mlx5_sf_dealloc(table, sf);
mutex_unlock(&table->sf_state_lock);
sf_err:
mlx5_sf_table_put(table);
return err;
mlx5_sf_del(table, sf);
return 0;
}
static bool mlx5_sf_state_update_check(const struct mlx5_sf *sf, u8 new_state)
......@@ -434,14 +383,10 @@ static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, v
bool update = false;
struct mlx5_sf *sf;
table = mlx5_sf_table_try_get(table->dev);
if (!table)
return 0;
mutex_lock(&table->sf_state_lock);
sf = mlx5_sf_lookup_by_function_id(table, event->function_id);
if (!sf)
goto sf_err;
goto unlock;
/* When driver is attached or detached to a function, an event
* notifies such state change.
......@@ -451,46 +396,18 @@ static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, v
sf->hw_state = event->new_vhca_state;
trace_mlx5_sf_update_state(table->dev, sf->port_index, sf->controller,
sf->hw_fn_id, sf->hw_state);
sf_err:
unlock:
mutex_unlock(&table->sf_state_lock);
mlx5_sf_table_put(table);
return 0;
}
static void mlx5_sf_table_enable(struct mlx5_sf_table *table)
{
init_completion(&table->disable_complete);
refcount_set(&table->refcount, 1);
}
static void mlx5_sf_deactivate_all(struct mlx5_sf_table *table)
static void mlx5_sf_del_all(struct mlx5_sf_table *table)
{
struct mlx5_eswitch *esw = table->dev->priv.eswitch;
unsigned long index;
struct mlx5_sf *sf;
/* At this point, no new user commands can start and no vhca event can
* arrive. It is safe to destroy all user created SFs.
*/
xa_for_each(&table->port_indices, index, sf) {
mlx5_eswitch_unload_sf_vport(esw, sf->hw_fn_id);
mlx5_sf_id_erase(table, sf);
mlx5_sf_dealloc(table, sf);
}
}
static void mlx5_sf_table_disable(struct mlx5_sf_table *table)
{
if (!refcount_read(&table->refcount))
return;
/* Balances with refcount_set; drop the reference so that new user cmd cannot start
* and new vhca event handler cannot run.
*/
mlx5_sf_table_put(table);
wait_for_completion(&table->disable_complete);
mlx5_sf_deactivate_all(table);
xa_for_each(&table->function_ids, index, sf)
mlx5_sf_del(table, sf);
}
static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, void *data)
......@@ -499,11 +416,8 @@ static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, voi
const struct mlx5_esw_event_info *mode = data;
switch (mode->new_mode) {
case MLX5_ESWITCH_OFFLOADS:
mlx5_sf_table_enable(table);
break;
case MLX5_ESWITCH_LEGACY:
mlx5_sf_table_disable(table);
mlx5_sf_del_all(table);
break;
default:
break;
......@@ -522,9 +436,6 @@ static int mlx5_sf_mdev_event(struct notifier_block *nb, unsigned long event, vo
if (event != MLX5_DRIVER_EVENT_SF_PEER_DEVLINK)
return NOTIFY_DONE;
table = mlx5_sf_table_try_get(table->dev);
if (!table)
return NOTIFY_DONE;
mutex_lock(&table->sf_state_lock);
sf = mlx5_sf_lookup_by_function_id(table, event_ctx->fn_id);
......@@ -537,16 +448,9 @@ static int mlx5_sf_mdev_event(struct notifier_block *nb, unsigned long event, vo
ret = NOTIFY_OK;
out:
mutex_unlock(&table->sf_state_lock);
mlx5_sf_table_put(table);
return ret;
}
static bool mlx5_sf_table_supported(const struct mlx5_core_dev *dev)
{
return dev->priv.eswitch && MLX5_ESWITCH_MANAGER(dev) &&
mlx5_sf_hw_table_supported(dev);
}
int mlx5_sf_table_init(struct mlx5_core_dev *dev)
{
struct mlx5_sf_table *table;
......@@ -561,9 +465,8 @@ int mlx5_sf_table_init(struct mlx5_core_dev *dev)
mutex_init(&table->sf_state_lock);
table->dev = dev;
xa_init(&table->port_indices);
xa_init(&table->function_ids);
dev->priv.sf_table = table;
refcount_set(&table->refcount, 0);
table->esw_nb.notifier_call = mlx5_sf_esw_event;
err = mlx5_esw_event_notifier_register(dev->priv.eswitch, &table->esw_nb);
if (err)
......@@ -598,8 +501,7 @@ void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev)
mlx5_blocking_notifier_unregister(dev, &table->mdev_nb);
mlx5_vhca_event_notifier_unregister(table->dev, &table->vhca_nb);
mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb);
WARN_ON(refcount_read(&table->refcount));
mutex_destroy(&table->sf_state_lock);
WARN_ON(!xa_empty(&table->port_indices));
WARN_ON(!xa_empty(&table->function_ids));
kfree(table);
}
......@@ -55,6 +55,12 @@ static const char *dr_action_id_to_str(enum mlx5dr_action_type action_id)
return action_type_to_str[action_id];
}
static bool mlx5dr_action_supp_fwd_fdb_multi_ft(struct mlx5_core_dev *dev)
{
return (MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_any_table_limit_regc) ||
MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_any_table));
}
static const enum dr_action_valid_state
next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] = {
[DR_ACTION_DOMAIN_NIC_INGRESS] = {
......@@ -1163,12 +1169,16 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
bool ignore_flow_level,
u32 flow_source)
{
struct mlx5dr_cmd_flow_destination_hw_info tmp_hw_dest;
struct mlx5dr_cmd_flow_destination_hw_info *hw_dests;
struct mlx5dr_action **ref_actions;
struct mlx5dr_action *action;
bool reformat_req = false;
bool is_ft_wire = false;
u16 num_dst_ft = 0;
u32 num_of_ref = 0;
u32 ref_act_cnt;
u16 last_dest;
int ret;
int i;
......@@ -1210,11 +1220,22 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
break;
case DR_ACTION_TYP_FT:
if (num_dst_ft &&
!mlx5dr_action_supp_fwd_fdb_multi_ft(dmn->mdev)) {
mlx5dr_dbg(dmn, "multiple FT destinations not supported\n");
goto free_ref_actions;
}
num_dst_ft++;
hw_dests[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
if (dest_action->dest_tbl->is_fw_tbl)
if (dest_action->dest_tbl->is_fw_tbl) {
hw_dests[i].ft_id = dest_action->dest_tbl->fw_tbl.id;
else
} else {
hw_dests[i].ft_id = dest_action->dest_tbl->tbl->table_id;
if (dest_action->dest_tbl->is_wire_ft) {
is_ft_wire = true;
last_dest = i;
}
}
break;
default:
......@@ -1223,6 +1244,16 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
}
}
/* In multidest, the FW does the iterator in the RX except of the last
* one that done in the TX.
* So, if one of the ft target is wire, put it at the end of the dest list.
*/
if (is_ft_wire && num_dst_ft > 1) {
tmp_hw_dest = hw_dests[last_dest];
hw_dests[last_dest] = hw_dests[num_of_dests - 1];
hw_dests[num_of_dests - 1] = tmp_hw_dest;
}
action = dr_action_create_generic(DR_ACTION_TYP_FT);
if (!action)
goto free_ref_actions;
......
......@@ -1064,6 +1064,7 @@ struct mlx5dr_action_sampler {
struct mlx5dr_action_dest_tbl {
u8 is_fw_tbl:1;
u8 is_wire_ft:1;
union {
struct mlx5dr_table *tbl;
struct {
......
......@@ -209,10 +209,17 @@ static struct mlx5dr_action *create_ft_action(struct mlx5dr_domain *domain,
struct mlx5_flow_rule *dst)
{
struct mlx5_flow_table *dest_ft = dst->dest_attr.ft;
struct mlx5dr_action *tbl_action;
if (mlx5dr_is_fw_table(dest_ft))
return mlx5dr_action_create_dest_flow_fw_table(domain, dest_ft);
return mlx5dr_action_create_dest_table(dest_ft->fs_dr_table.dr_table);
tbl_action = mlx5dr_action_create_dest_table(dest_ft->fs_dr_table.dr_table);
if (tbl_action)
tbl_action->dest_tbl->is_wire_ft =
dest_ft->flags & MLX5_FLOW_TABLE_UPLINK_VPORT ? 1 : 0;
return tbl_action;
}
static struct mlx5dr_action *create_range_action(struct mlx5dr_domain *domain,
......
......@@ -67,6 +67,7 @@ enum {
MLX5_FLOW_TABLE_TERMINATION = BIT(2),
MLX5_FLOW_TABLE_UNMANAGED = BIT(3),
MLX5_FLOW_TABLE_OTHER_VPORT = BIT(4),
MLX5_FLOW_TABLE_UPLINK_VPORT = BIT(5),
};
#define LEFTOVERS_RULE_NUM 2
......
......@@ -10574,6 +10574,7 @@ enum {
MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_INV = 0xe,
MLX5_INITIAL_SEG_HEALTH_SYNDROME_FFSER_ERR = 0xf,
MLX5_INITIAL_SEG_HEALTH_SYNDROME_HIGH_TEMP_ERR = 0x10,
MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PCI_POISONED_ERR = 0x12,
};
struct mlx5_ifc_initial_seg_bits {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment