Commit 416a01a4 authored by Paolo Abeni's avatar Paolo Abeni

Merge tag 'mlx5-updates-2023-09-19' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2023-09-19

Misc updates for mlx5 driver

1) From Erez, Add support for multicast forwarding to multi destination
   in bridge offloads with software steering mode (SMFS).

2) From Jianbo, Utilize the maximum aggregated link speed for police
   action rate.

3) From Moshe, Add a health error syndrome for pci data poisoned

4) From Shay, Enable 4 ports multiport E-switch

5) From Jiri, Trivial SF code cleanup

====================

Link: https://lore.kernel.org/r/20230920063552.296978-1-saeed@kernel.orgSigned-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parents c1fec890 e738e355
......@@ -78,6 +78,8 @@ mlx5_esw_bridge_mdb_flow_create(u16 esw_owner_vhca_id, struct mlx5_esw_bridge_md
xa_for_each(&entry->ports, idx, port) {
dests[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dests[i].ft = port->mcast.ft;
if (port->vport_num == MLX5_VPORT_UPLINK)
dests[i].ft->flags |= MLX5_FLOW_TABLE_UPLINK_VPORT;
i++;
}
......@@ -585,10 +587,6 @@ mlx5_esw_bridge_mcast_vlan_flow_create(u16 vlan_proto, struct mlx5_esw_bridge_po
if (!rule_spec)
return ERR_PTR(-ENOMEM);
if (MLX5_CAP_ESW_FLOWTABLE(bridge->br_offloads->esw->dev, flow_source) &&
port->vport_num == MLX5_VPORT_UPLINK)
rule_spec->flow_context.flow_source =
MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
......@@ -660,11 +658,6 @@ mlx5_esw_bridge_mcast_fwd_flow_create(struct mlx5_esw_bridge_port *port)
if (!rule_spec)
return ERR_PTR(-ENOMEM);
if (MLX5_CAP_ESW_FLOWTABLE(bridge->br_offloads->esw->dev, flow_source) &&
port->vport_num == MLX5_VPORT_UPLINK)
rule_spec->flow_context.flow_source =
MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
if (MLX5_CAP_ESW(bridge->br_offloads->esw->dev, merged_eswitch)) {
dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
dest.vport.vhca_id = port->esw_owner_vhca_id;
......
......@@ -2,6 +2,7 @@
/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#include "eswitch.h"
#include "lib/mlx5.h"
#include "esw/qos.h"
#include "en/port.h"
#define CREATE_TRACE_POINTS
......@@ -701,10 +702,75 @@ int mlx5_esw_qos_set_vport_rate(struct mlx5_eswitch *esw, struct mlx5_vport *vpo
return err;
}
static u32 mlx5_esw_qos_lag_link_speed_get_locked(struct mlx5_core_dev *mdev)
{
struct ethtool_link_ksettings lksettings;
struct net_device *slave, *master;
u32 speed = SPEED_UNKNOWN;
/* Lock ensures a stable reference to master and slave netdevice
* while port speed of master is queried.
*/
ASSERT_RTNL();
slave = mlx5_uplink_netdev_get(mdev);
if (!slave)
goto out;
master = netdev_master_upper_dev_get(slave);
if (master && !__ethtool_get_link_ksettings(master, &lksettings))
speed = lksettings.base.speed;
out:
return speed;
}
static int mlx5_esw_qos_max_link_speed_get(struct mlx5_core_dev *mdev, u32 *link_speed_max,
bool hold_rtnl_lock, struct netlink_ext_ack *extack)
{
int err;
if (!mlx5_lag_is_active(mdev))
goto skip_lag;
if (hold_rtnl_lock)
rtnl_lock();
*link_speed_max = mlx5_esw_qos_lag_link_speed_get_locked(mdev);
if (hold_rtnl_lock)
rtnl_unlock();
if (*link_speed_max != (u32)SPEED_UNKNOWN)
return 0;
skip_lag:
err = mlx5_port_max_linkspeed(mdev, link_speed_max);
if (err)
NL_SET_ERR_MSG_MOD(extack, "Failed to get link maximum speed");
return err;
}
static int mlx5_esw_qos_link_speed_verify(struct mlx5_core_dev *mdev,
const char *name, u32 link_speed_max,
u64 value, struct netlink_ext_ack *extack)
{
if (value > link_speed_max) {
pr_err("%s rate value %lluMbps exceed link maximum speed %u.\n",
name, value, link_speed_max);
NL_SET_ERR_MSG_MOD(extack, "TX rate value exceed link maximum speed");
return -EINVAL;
}
return 0;
}
int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps)
{
u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
struct mlx5_vport *vport;
u32 link_speed_max;
u32 bitmask;
int err;
......@@ -712,6 +778,17 @@ int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32
if (IS_ERR(vport))
return PTR_ERR(vport);
if (rate_mbps) {
err = mlx5_esw_qos_max_link_speed_get(esw->dev, &link_speed_max, false, NULL);
if (err)
return err;
err = mlx5_esw_qos_link_speed_verify(esw->dev, "Police",
link_speed_max, rate_mbps, NULL);
if (err)
return err;
}
mutex_lock(&esw->state_lock);
if (!vport->qos.enabled) {
/* Eswitch QoS wasn't enabled yet. Enable it and vport QoS. */
......@@ -744,12 +821,6 @@ static int esw_qos_devlink_rate_to_mbps(struct mlx5_core_dev *mdev, const char *
u64 value;
int err;
err = mlx5_port_max_linkspeed(mdev, &link_speed_max);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to get link maximum speed");
return err;
}
value = div_u64_rem(*rate, MLX5_LINKSPEED_UNIT, &remainder);
if (remainder) {
pr_err("%s rate value %lluBps not in link speed units of 1Mbps.\n",
......@@ -758,12 +829,13 @@ static int esw_qos_devlink_rate_to_mbps(struct mlx5_core_dev *mdev, const char *
return -EINVAL;
}
if (value > link_speed_max) {
pr_err("%s rate value %lluMbps exceed link maximum speed %u.\n",
name, value, link_speed_max);
NL_SET_ERR_MSG_MOD(extack, "TX rate value exceed link maximum speed");
return -EINVAL;
}
err = mlx5_esw_qos_max_link_speed_get(mdev, &link_speed_max, true, extack);
if (err)
return err;
err = mlx5_esw_qos_link_speed_verify(mdev, name, link_speed_max, value, extack);
if (err)
return err;
*rate = value;
return 0;
......
......@@ -365,6 +365,8 @@ static const char *hsynd_str(u8 synd)
return "FFSER error";
case MLX5_INITIAL_SEG_HEALTH_SYNDROME_HIGH_TEMP_ERR:
return "High temperature";
case MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PCI_POISONED_ERR:
return "ICM fetch PCI data poisoned error";
default:
return "unrecognized error";
}
......
......@@ -65,12 +65,12 @@ static int mlx5_mpesw_metadata_set(struct mlx5_lag *ldev)
return err;
}
#define MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS 2
#define MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS 4
static int enable_mpesw(struct mlx5_lag *ldev)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
int err;
int i;
if (ldev->mode != MLX5_LAG_MODE_NONE)
return -EINVAL;
......@@ -98,11 +98,11 @@ static int enable_mpesw(struct mlx5_lag *ldev)
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
err = mlx5_eswitch_reload_reps(dev0->priv.eswitch);
if (!err)
err = mlx5_eswitch_reload_reps(dev1->priv.eswitch);
if (err)
goto err_rescan_drivers;
for (i = 0; i < ldev->ports; i++) {
err = mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
if (err)
goto err_rescan_drivers;
}
return 0;
......@@ -112,8 +112,8 @@ static int enable_mpesw(struct mlx5_lag *ldev)
mlx5_deactivate_lag(ldev);
err_add_devices:
mlx5_lag_add_devices(ldev);
mlx5_eswitch_reload_reps(dev0->priv.eswitch);
mlx5_eswitch_reload_reps(dev1->priv.eswitch);
for (i = 0; i < ldev->ports; i++)
mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
mlx5_mpesw_metadata_cleanup(ldev);
return err;
}
......
......@@ -14,7 +14,6 @@
struct mlx5_sf_dev_table {
struct xarray devices;
unsigned int max_sfs;
phys_addr_t base_address;
u64 sf_bar_length;
struct notifier_block nb;
......@@ -110,12 +109,6 @@ static void mlx5_sf_dev_add(struct mlx5_core_dev *dev, u16 sf_index, u16 fn_id,
sf_dev->parent_mdev = dev;
sf_dev->fn_id = fn_id;
if (!table->max_sfs) {
mlx5_adev_idx_free(id);
kfree(sf_dev);
err = -EOPNOTSUPP;
goto add_err;
}
sf_dev->bar_base_addr = table->base_address + (sf_index * table->sf_bar_length);
trace_mlx5_sf_dev_add(dev, sf_dev, id);
......@@ -296,7 +289,6 @@ static void mlx5_sf_dev_destroy_active_work(struct mlx5_sf_dev_table *table)
void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
{
struct mlx5_sf_dev_table *table;
unsigned int max_sfs;
int err;
if (!mlx5_sf_dev_supported(dev))
......@@ -310,13 +302,8 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
table->nb.notifier_call = mlx5_sf_dev_state_change_handler;
table->dev = dev;
if (MLX5_CAP_GEN(dev, max_num_sf))
max_sfs = MLX5_CAP_GEN(dev, max_num_sf);
else
max_sfs = 1 << MLX5_CAP_GEN(dev, log_max_sf);
table->sf_bar_length = 1 << (MLX5_CAP_GEN(dev, log_min_sf_size) + 12);
table->base_address = pci_resource_start(dev->pdev, 2);
table->max_sfs = max_sfs;
xa_init(&table->devices);
mutex_init(&table->table_lock);
dev->priv.sf_dev_table = table;
......@@ -332,7 +319,6 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
err = mlx5_sf_dev_vhca_arm_all(table);
if (err)
goto arm_err;
mlx5_core_dbg(dev, "SF DEV: max sf devices=%d\n", max_sfs);
return;
arm_err:
......@@ -340,7 +326,6 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
add_active_err:
mlx5_vhca_event_notifier_unregister(dev, &table->nb);
vhca_err:
table->max_sfs = 0;
kfree(table);
dev->priv.sf_dev_table = NULL;
table_err:
......
......@@ -55,6 +55,12 @@ static const char *dr_action_id_to_str(enum mlx5dr_action_type action_id)
return action_type_to_str[action_id];
}
static bool mlx5dr_action_supp_fwd_fdb_multi_ft(struct mlx5_core_dev *dev)
{
return (MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_any_table_limit_regc) ||
MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_any_table));
}
static const enum dr_action_valid_state
next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] = {
[DR_ACTION_DOMAIN_NIC_INGRESS] = {
......@@ -1163,12 +1169,16 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
bool ignore_flow_level,
u32 flow_source)
{
struct mlx5dr_cmd_flow_destination_hw_info tmp_hw_dest;
struct mlx5dr_cmd_flow_destination_hw_info *hw_dests;
struct mlx5dr_action **ref_actions;
struct mlx5dr_action *action;
bool reformat_req = false;
bool is_ft_wire = false;
u16 num_dst_ft = 0;
u32 num_of_ref = 0;
u32 ref_act_cnt;
u16 last_dest;
int ret;
int i;
......@@ -1210,11 +1220,22 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
break;
case DR_ACTION_TYP_FT:
if (num_dst_ft &&
!mlx5dr_action_supp_fwd_fdb_multi_ft(dmn->mdev)) {
mlx5dr_dbg(dmn, "multiple FT destinations not supported\n");
goto free_ref_actions;
}
num_dst_ft++;
hw_dests[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
if (dest_action->dest_tbl->is_fw_tbl)
if (dest_action->dest_tbl->is_fw_tbl) {
hw_dests[i].ft_id = dest_action->dest_tbl->fw_tbl.id;
else
} else {
hw_dests[i].ft_id = dest_action->dest_tbl->tbl->table_id;
if (dest_action->dest_tbl->is_wire_ft) {
is_ft_wire = true;
last_dest = i;
}
}
break;
default:
......@@ -1223,6 +1244,16 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
}
}
/* In multidest, the FW does the iterator in the RX except of the last
* one that done in the TX.
* So, if one of the ft target is wire, put it at the end of the dest list.
*/
if (is_ft_wire && num_dst_ft > 1) {
tmp_hw_dest = hw_dests[last_dest];
hw_dests[last_dest] = hw_dests[num_of_dests - 1];
hw_dests[num_of_dests - 1] = tmp_hw_dest;
}
action = dr_action_create_generic(DR_ACTION_TYP_FT);
if (!action)
goto free_ref_actions;
......
......@@ -1064,6 +1064,7 @@ struct mlx5dr_action_sampler {
struct mlx5dr_action_dest_tbl {
u8 is_fw_tbl:1;
u8 is_wire_ft:1;
union {
struct mlx5dr_table *tbl;
struct {
......
......@@ -209,10 +209,17 @@ static struct mlx5dr_action *create_ft_action(struct mlx5dr_domain *domain,
struct mlx5_flow_rule *dst)
{
struct mlx5_flow_table *dest_ft = dst->dest_attr.ft;
struct mlx5dr_action *tbl_action;
if (mlx5dr_is_fw_table(dest_ft))
return mlx5dr_action_create_dest_flow_fw_table(domain, dest_ft);
return mlx5dr_action_create_dest_table(dest_ft->fs_dr_table.dr_table);
tbl_action = mlx5dr_action_create_dest_table(dest_ft->fs_dr_table.dr_table);
if (tbl_action)
tbl_action->dest_tbl->is_wire_ft =
dest_ft->flags & MLX5_FLOW_TABLE_UPLINK_VPORT ? 1 : 0;
return tbl_action;
}
static struct mlx5dr_action *create_range_action(struct mlx5dr_domain *domain,
......
......@@ -67,6 +67,7 @@ enum {
MLX5_FLOW_TABLE_TERMINATION = BIT(2),
MLX5_FLOW_TABLE_UNMANAGED = BIT(3),
MLX5_FLOW_TABLE_OTHER_VPORT = BIT(4),
MLX5_FLOW_TABLE_UPLINK_VPORT = BIT(5),
};
#define LEFTOVERS_RULE_NUM 2
......
......@@ -10574,6 +10574,7 @@ enum {
MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_INV = 0xe,
MLX5_INITIAL_SEG_HEALTH_SYNDROME_FFSER_ERR = 0xf,
MLX5_INITIAL_SEG_HEALTH_SYNDROME_HIGH_TEMP_ERR = 0x10,
MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PCI_POISONED_ERR = 0x12,
};
struct mlx5_ifc_initial_seg_bits {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment