Commit b10d10a7 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'mlx5-updates-2023-07-24' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2023-07-24

1) Generalize devcom implementation to be independent of number of ports
   or device's GUID.

2) Save memory on command interface statistics.

3) General code cleanups

* tag 'mlx5-updates-2023-07-24' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5: Give esw_offloads_load/unload_rep() "mlx5_" prefix
  net/mlx5: Make mlx5_eswitch_load/unload_vport() static
  net/mlx5: Make mlx5_esw_offloads_rep_load/unload() static
  net/mlx5: Remove pointless devlink_rate checks
  net/mlx5: Don't check vport->enabled in port ops
  net/mlx5e: Make flow classification filters static
  net/mlx5e: Remove duplicate code for user flow
  net/mlx5: Allocate command stats with xarray
  net/mlx5: split mlx5_cmd_init() to probe and reload routines
  net/mlx5: Remove redundant cmdif revision check
  net/mlx5: Re-organize mlx5_cmd struct
  net/mlx5e: E-Switch, Allow devcom initialization on more vports
  net/mlx5e: E-Switch, Register devcom device with switch id key
  net/mlx5: Devcom, Infrastructure changes
  net/mlx5: Use shared code for checking lag is supported
====================

Link: https://lore.kernel.org/r/20230727183914.69229-1-saeed@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 97d0dca7 9eca8bb8
......@@ -176,8 +176,8 @@ static ssize_t slots_read(struct file *filp, char __user *buf, size_t count,
int ret;
cmd = filp->private_data;
weight = bitmap_weight(&cmd->bitmask, cmd->max_reg_cmds);
field = cmd->max_reg_cmds - weight;
weight = bitmap_weight(&cmd->vars.bitmask, cmd->vars.max_reg_cmds);
field = cmd->vars.max_reg_cmds - weight;
ret = snprintf(tbuf, sizeof(tbuf), "%d\n", field);
return simple_read_from_buffer(buf, count, pos, tbuf, ret);
}
......@@ -188,6 +188,24 @@ static const struct file_operations slots_fops = {
.read = slots_read,
};
static struct mlx5_cmd_stats *
mlx5_cmdif_alloc_stats(struct xarray *stats_xa, int opcode)
{
struct mlx5_cmd_stats *stats = kzalloc(sizeof(*stats), GFP_KERNEL);
int err;
if (!stats)
return NULL;
err = xa_insert(stats_xa, opcode, stats, GFP_KERNEL);
if (err) {
kfree(stats);
return NULL;
}
spin_lock_init(&stats->lock);
return stats;
}
void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
{
struct mlx5_cmd_stats *stats;
......@@ -200,10 +218,14 @@ void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
debugfs_create_file("slots_inuse", 0400, *cmd, &dev->cmd, &slots_fops);
xa_init(&dev->cmd.stats);
for (i = 0; i < MLX5_CMD_OP_MAX; i++) {
stats = &dev->cmd.stats[i];
namep = mlx5_command_str(i);
if (strcmp(namep, "unknown command opcode")) {
stats = mlx5_cmdif_alloc_stats(&dev->cmd.stats, i);
if (!stats)
continue;
stats->root = debugfs_create_dir(namep, *cmd);
debugfs_create_file("average", 0400, stats->root, stats,
......@@ -224,7 +246,13 @@ void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_cmd_stats *stats;
unsigned long i;
debugfs_remove_recursive(dev->priv.dbg.cmdif_debugfs);
xa_for_each(&dev->cmd.stats, i, stats)
kfree(stats);
xa_destroy(&dev->cmd.stats);
}
void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev)
......
......@@ -36,6 +36,7 @@
#include <linux/mlx5/vport.h>
#include "mlx5_core.h"
#include "devlink.h"
#include "lag/lag.h"
/* intf dev list mutex */
static DEFINE_MUTEX(mlx5_intf_mutex);
......@@ -587,10 +588,7 @@ static int next_phys_dev_lag(struct device *dev, const void *data)
if (!mdev)
return 0;
if (!MLX5_CAP_GEN(mdev, vport_group_manager) ||
!MLX5_CAP_GEN(mdev, lag_master) ||
(MLX5_CAP_GEN(mdev, num_lag_ports) > MLX5_MAX_PORTS ||
MLX5_CAP_GEN(mdev, num_lag_ports) <= 1))
if (!mlx5_lag_is_supported(mdev))
return 0;
return _next_phys_dev(mdev, data);
......
......@@ -1167,9 +1167,6 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc);
int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
const u8 hfunc);
int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
u32 *rule_locs);
int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd);
u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
......
......@@ -2163,8 +2163,8 @@ static u32 mlx5e_get_priv_flags(struct net_device *netdev)
return priv->channels.params.pflags;
}
int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
u32 *rule_locs)
static int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
u32 *rule_locs)
{
struct mlx5e_priv *priv = netdev_priv(dev);
......@@ -2181,7 +2181,7 @@ int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
return mlx5e_ethtool_get_rxnfc(priv, info, rule_locs);
}
int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
struct mlx5e_priv *priv = netdev_priv(dev);
......
......@@ -96,10 +96,6 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
case UDP_V4_FLOW:
case TCP_V6_FLOW:
case UDP_V6_FLOW:
max_tuples = ETHTOOL_NUM_L3_L4_FTS;
prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
eth_ft = &ethtool->l3_l4_ft[prio];
break;
case IP_USER_FLOW:
case IPV6_USER_FLOW:
max_tuples = ETHTOOL_NUM_L3_L4_FTS;
......
......@@ -399,15 +399,13 @@ static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
}
static int mlx5e_sqs2vport_add_peers_rules(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep,
struct mlx5_devcom *devcom,
struct mlx5e_rep_sq *rep_sq, int i)
{
struct mlx5_eswitch *peer_esw = NULL;
struct mlx5_flow_handle *flow_rule;
int tmp;
struct mlx5_devcom_comp_dev *tmp;
struct mlx5_eswitch *peer_esw;
mlx5_devcom_for_each_peer_entry(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
peer_esw, tmp) {
mlx5_devcom_for_each_peer_entry(esw->devcom, peer_esw, tmp) {
u16 peer_rule_idx = MLX5_CAP_GEN(peer_esw->dev, vhca_id);
struct mlx5e_rep_sq_peer *sq_peer;
int err;
......@@ -443,7 +441,6 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *flow_rule;
struct mlx5e_rep_priv *rpriv;
struct mlx5e_rep_sq *rep_sq;
struct mlx5_devcom *devcom;
bool devcom_locked = false;
int err;
int i;
......@@ -451,10 +448,10 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return 0;
devcom = esw->dev->priv.devcom;
rpriv = mlx5e_rep_to_rep_priv(rep);
if (mlx5_devcom_comp_is_ready(devcom, MLX5_DEVCOM_ESW_OFFLOADS) &&
mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
if (mlx5_devcom_comp_is_ready(esw->devcom) &&
mlx5_devcom_for_each_peer_begin(esw->devcom))
devcom_locked = true;
for (i = 0; i < sqns_num; i++) {
......@@ -477,7 +474,7 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
xa_init(&rep_sq->sq_peer);
if (devcom_locked) {
err = mlx5e_sqs2vport_add_peers_rules(esw, rep, devcom, rep_sq, i);
err = mlx5e_sqs2vport_add_peers_rules(esw, rep, rep_sq, i);
if (err) {
mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
xa_destroy(&rep_sq->sq_peer);
......@@ -490,7 +487,7 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
}
if (devcom_locked)
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
mlx5_devcom_for_each_peer_end(esw->devcom);
return 0;
......@@ -498,7 +495,7 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
mlx5e_sqs2vport_stop(esw, rep);
if (devcom_locked)
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
mlx5_devcom_for_each_peer_end(esw->devcom);
return err;
}
......
......@@ -1668,11 +1668,10 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
{
struct mlx5e_priv *out_priv, *route_priv;
struct mlx5_core_dev *route_mdev;
struct mlx5_devcom *devcom;
struct mlx5_devcom_comp_dev *pos;
struct mlx5_eswitch *esw;
u16 vhca_id;
int err;
int i;
out_priv = netdev_priv(out_dev);
esw = out_priv->mdev->priv.eswitch;
......@@ -1688,10 +1687,8 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
return err;
rcu_read_lock();
devcom = out_priv->mdev->priv.devcom;
err = -ENODEV;
mlx5_devcom_for_each_peer_entry_rcu(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
esw, i) {
mlx5_devcom_for_each_peer_entry_rcu(esw->devcom, esw, pos) {
err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
if (!err)
break;
......@@ -2031,15 +2028,15 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
if (mlx5e_is_eswitch_flow(flow)) {
struct mlx5_devcom *devcom = flow->priv->mdev->priv.devcom;
struct mlx5_devcom_comp_dev *devcom = flow->priv->mdev->priv.eswitch->devcom;
if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) {
if (!mlx5_devcom_for_each_peer_begin(devcom)) {
mlx5e_tc_del_fdb_flow(priv, flow);
return;
}
mlx5e_tc_del_fdb_peers_flow(flow);
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
mlx5_devcom_for_each_peer_end(devcom);
mlx5e_tc_del_fdb_flow(priv, flow);
} else {
mlx5e_tc_del_nic_flow(priv, flow);
......@@ -4216,8 +4213,7 @@ static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
flow_flag_test(flow, INGRESS);
bool act_is_encap = !!(attr->action &
MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
bool esw_paired = mlx5_devcom_comp_is_ready(esw_attr->in_mdev->priv.devcom,
MLX5_DEVCOM_ESW_OFFLOADS);
bool esw_paired = mlx5_devcom_comp_is_ready(esw_attr->in_mdev->priv.eswitch->devcom);
if (!esw_paired)
return false;
......@@ -4471,14 +4467,13 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
struct net_device *filter_dev,
struct mlx5e_tc_flow **__flow)
{
struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
struct mlx5_devcom_comp_dev *devcom = priv->mdev->priv.eswitch->devcom, *pos;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *in_rep = rpriv->rep;
struct mlx5_core_dev *in_mdev = priv->mdev;
struct mlx5_eswitch *peer_esw;
struct mlx5e_tc_flow *flow;
int err;
int i;
flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
in_mdev);
......@@ -4490,27 +4485,25 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
return 0;
}
if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) {
if (!mlx5_devcom_for_each_peer_begin(devcom)) {
err = -ENODEV;
goto clean_flow;
}
mlx5_devcom_for_each_peer_entry(devcom,
MLX5_DEVCOM_ESW_OFFLOADS,
peer_esw, i) {
mlx5_devcom_for_each_peer_entry(devcom, peer_esw, pos) {
err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags, peer_esw);
if (err)
goto peer_clean;
}
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
mlx5_devcom_for_each_peer_end(devcom);
*__flow = flow;
return 0;
peer_clean:
mlx5e_tc_del_fdb_peers_flow(flow);
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
mlx5_devcom_for_each_peer_end(devcom);
clean_flow:
mlx5e_tc_del_fdb_flow(priv, flow);
return err;
......@@ -4728,7 +4721,7 @@ int mlx5e_tc_fill_action_stats(struct mlx5e_priv *priv,
int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct flow_cls_offload *f, unsigned long flags)
{
struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct rhashtable *tc_ht = get_tc_ht(priv, flags);
struct mlx5e_tc_flow *flow;
struct mlx5_fc *counter;
......@@ -4764,7 +4757,7 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
/* Under multipath it's possible for one rule to be currently
* un-offloaded while the other rule is offloaded.
*/
if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
if (esw && !mlx5_devcom_for_each_peer_begin(esw->devcom))
goto out;
if (flow_flag_test(flow, DUP)) {
......@@ -4795,7 +4788,8 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
}
no_peer_counter:
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
if (esw)
mlx5_devcom_for_each_peer_end(esw->devcom);
out:
flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
FLOW_ACTION_HW_STATS_DELAYED);
......@@ -5200,11 +5194,12 @@ void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht)
int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
{
const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts);
struct netdev_phys_item_id ppid;
struct mlx5e_rep_priv *rpriv;
struct mapping_ctx *mapping;
struct mlx5_eswitch *esw;
struct mlx5e_priv *priv;
u64 mapping_id;
u64 mapping_id, key;
int err = 0;
rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
......@@ -5258,7 +5253,11 @@ int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
goto err_action_counter;
}
mlx5_esw_offloads_devcom_init(esw);
err = dev_get_port_parent_id(priv->netdev, &ppid, false);
if (!err) {
memcpy(&key, &ppid.id, sizeof(key));
mlx5_esw_offloads_devcom_init(esw, key);
}
return 0;
......
......@@ -652,30 +652,30 @@ mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, u16 esw_owner_vhca_id,
struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
struct mlx5_esw_bridge *bridge)
{
struct mlx5_devcom *devcom = bridge->br_offloads->esw->dev->priv.devcom;
struct mlx5_devcom_comp_dev *devcom = bridge->br_offloads->esw->devcom, *pos;
struct mlx5_eswitch *tmp, *peer_esw = NULL;
static struct mlx5_flow_handle *handle;
int i;
if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
if (!mlx5_devcom_for_each_peer_begin(devcom))
return ERR_PTR(-ENODEV);
mlx5_devcom_for_each_peer_entry(devcom,
MLX5_DEVCOM_ESW_OFFLOADS,
tmp, i) {
mlx5_devcom_for_each_peer_entry(devcom, tmp, pos) {
if (mlx5_esw_is_owner(tmp, vport_num, esw_owner_vhca_id)) {
peer_esw = tmp;
break;
}
}
if (!peer_esw) {
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
return ERR_PTR(-ENODEV);
handle = ERR_PTR(-ENODEV);
goto out;
}
handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id,
bridge, peer_esw);
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
out:
mlx5_devcom_for_each_peer_end(devcom);
return handle;
}
......@@ -1391,8 +1391,8 @@ mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, u16 esw_ow
mlx5_fc_id(counter), bridge);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
esw_warn(esw->dev, "Failed to create ingress flow(vport=%u,err=%d)\n",
vport_num, err);
esw_warn(esw->dev, "Failed to create ingress flow(vport=%u,err=%d,peer=%d)\n",
vport_num, err, peer);
goto err_ingress_flow_create;
}
entry->ingress_handle = handle;
......
......@@ -539,30 +539,29 @@ mlx5_esw_bridge_mcast_filter_flow_create(struct mlx5_esw_bridge_port *port)
static struct mlx5_flow_handle *
mlx5_esw_bridge_mcast_filter_flow_peer_create(struct mlx5_esw_bridge_port *port)
{
struct mlx5_devcom *devcom = port->bridge->br_offloads->esw->dev->priv.devcom;
struct mlx5_devcom_comp_dev *devcom = port->bridge->br_offloads->esw->devcom, *pos;
struct mlx5_eswitch *tmp, *peer_esw = NULL;
static struct mlx5_flow_handle *handle;
int i;
if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
if (!mlx5_devcom_for_each_peer_begin(devcom))
return ERR_PTR(-ENODEV);
mlx5_devcom_for_each_peer_entry(devcom,
MLX5_DEVCOM_ESW_OFFLOADS,
tmp, i) {
mlx5_devcom_for_each_peer_entry(devcom, tmp, pos) {
if (mlx5_esw_is_owner(tmp, port->vport_num, port->esw_owner_vhca_id)) {
peer_esw = tmp;
break;
}
}
if (!peer_esw) {
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
return ERR_PTR(-ENODEV);
handle = ERR_PTR(-ENODEV);
goto out;
}
handle = mlx5_esw_bridge_mcast_flow_with_esw_create(port, peer_esw);
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
out:
mlx5_devcom_for_each_peer_end(devcom);
return handle;
}
......
......@@ -132,10 +132,8 @@ void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vpo
if (IS_ERR(vport))
return;
if (vport->dl_port->devlink_rate) {
mlx5_esw_qos_vport_update_group(esw, vport, NULL, NULL);
devl_rate_leaf_destroy(vport->dl_port);
}
mlx5_esw_qos_vport_update_group(esw, vport, NULL, NULL);
devl_rate_leaf_destroy(vport->dl_port);
devl_port_unregister(vport->dl_port);
mlx5_esw_dl_port_free(vport->dl_port);
......@@ -211,10 +209,8 @@ void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num
if (IS_ERR(vport))
return;
if (vport->dl_port->devlink_rate) {
mlx5_esw_qos_vport_update_group(esw, vport, NULL, NULL);
devl_rate_leaf_destroy(vport->dl_port);
}
mlx5_esw_qos_vport_update_group(esw, vport, NULL, NULL);
devl_rate_leaf_destroy(vport->dl_port);
devl_port_unregister(vport->dl_port);
vport->dl_port = NULL;
......
......@@ -1068,9 +1068,8 @@ static void mlx5_eswitch_clear_ec_vf_vports_info(struct mlx5_eswitch *esw)
}
}
/* Public E-Switch API */
int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
enum mlx5_eswitch_vport_event enabled_events)
static int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
enum mlx5_eswitch_vport_event enabled_events)
{
int err;
......@@ -1078,7 +1077,7 @@ int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
if (err)
return err;
err = esw_offloads_load_rep(esw, vport_num);
err = mlx5_esw_offloads_load_rep(esw, vport_num);
if (err)
goto err_rep;
......@@ -1089,9 +1088,9 @@ int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
return err;
}
void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num)
static void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
esw_offloads_unload_rep(esw, vport_num);
mlx5_esw_offloads_unload_rep(esw, vport_num);
mlx5_esw_vport_disable(esw, vport_num);
}
......
......@@ -354,6 +354,7 @@ struct mlx5_eswitch {
} params;
struct blocking_notifier_head n_head;
struct xarray paired;
struct mlx5_devcom_comp_dev *devcom;
};
void esw_offloads_disable(struct mlx5_eswitch *esw);
......@@ -381,8 +382,9 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs);
void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf);
void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw);
void mlx5_eswitch_disable(struct mlx5_eswitch *esw);
void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw);
void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key);
void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw);
bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw);
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
u16 vport, const u8 *mac);
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
......@@ -725,15 +727,8 @@ void mlx5_esw_set_spec_source_port(struct mlx5_eswitch *esw,
u16 vport,
struct mlx5_flow_spec *spec);
int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num);
void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num);
void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
enum mlx5_eswitch_vport_event enabled_events);
void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num);
void mlx5_esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
enum mlx5_eswitch_vport_event enabled_events);
......@@ -816,8 +811,9 @@ static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; }
static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) {}
static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {}
static inline void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw) {}
static inline void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key) {}
static inline void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) {}
static inline bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw) { return false; }
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
static inline
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
......
......@@ -2391,7 +2391,7 @@ static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
__esw_offloads_unload_rep(esw, rep, rep_type);
}
int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
static int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_eswitch_rep *rep;
int rep_type;
......@@ -2415,7 +2415,7 @@ int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
return err;
}
void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num)
static void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_eswitch_rep *rep;
int rep_type;
......@@ -2425,7 +2425,7 @@ void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num)
__esw_offloads_unload_rep(esw, rep, rep_type);
}
int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
int mlx5_esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
{
int err;
......@@ -2449,7 +2449,7 @@ int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
return err;
}
void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
void mlx5_esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
{
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return;
......@@ -2811,7 +2811,6 @@ static int mlx5_esw_offloads_devcom_event(int event,
void *event_data)
{
struct mlx5_eswitch *esw = my_data;
struct mlx5_devcom *devcom = esw->dev->priv.devcom;
struct mlx5_eswitch *peer_esw = event_data;
u16 esw_i, peer_esw_i;
bool esw_paired;
......@@ -2833,6 +2832,7 @@ static int mlx5_esw_offloads_devcom_event(int event,
err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
if (err)
goto err_out;
err = mlx5_esw_offloads_pair(esw, peer_esw);
if (err)
goto err_peer;
......@@ -2851,7 +2851,7 @@ static int mlx5_esw_offloads_devcom_event(int event,
esw->num_peers++;
peer_esw->num_peers++;
mlx5_devcom_comp_set_ready(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
mlx5_devcom_comp_set_ready(esw->devcom, true);
break;
case ESW_OFFLOADS_DEVCOM_UNPAIR:
......@@ -2861,7 +2861,7 @@ static int mlx5_esw_offloads_devcom_event(int event,
peer_esw->num_peers--;
esw->num_peers--;
if (!esw->num_peers && !peer_esw->num_peers)
mlx5_devcom_comp_set_ready(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
mlx5_devcom_comp_set_ready(esw->devcom, false);
xa_erase(&peer_esw->paired, esw_i);
xa_erase(&esw->paired, peer_esw_i);
mlx5_esw_offloads_unpair(peer_esw, esw);
......@@ -2886,9 +2886,8 @@ static int mlx5_esw_offloads_devcom_event(int event,
return err;
}
void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw)
void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key)
{
struct mlx5_devcom *devcom = esw->dev->priv.devcom;
int i;
for (i = 0; i < MLX5_MAX_PORTS; i++)
......@@ -2898,38 +2897,44 @@ void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw)
if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
return;
if (!mlx5_lag_is_supported(esw->dev))
if ((MLX5_VPORT_MANAGER(esw->dev) || mlx5_core_is_ecpf_esw_manager(esw->dev)) &&
!mlx5_lag_is_supported(esw->dev))
return;
xa_init(&esw->paired);
mlx5_devcom_register_component(devcom,
MLX5_DEVCOM_ESW_OFFLOADS,
mlx5_esw_offloads_devcom_event,
esw);
esw->num_peers = 0;
mlx5_devcom_send_event(devcom,
MLX5_DEVCOM_ESW_OFFLOADS,
esw->devcom = mlx5_devcom_register_component(esw->dev->priv.devc,
MLX5_DEVCOM_ESW_OFFLOADS,
key,
mlx5_esw_offloads_devcom_event,
esw);
if (IS_ERR_OR_NULL(esw->devcom))
return;
mlx5_devcom_send_event(esw->devcom,
ESW_OFFLOADS_DEVCOM_PAIR,
ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
ESW_OFFLOADS_DEVCOM_UNPAIR,
esw);
}
void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
{
struct mlx5_devcom *devcom = esw->dev->priv.devcom;
if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
return;
if (!mlx5_lag_is_supported(esw->dev))
if (IS_ERR_OR_NULL(esw->devcom))
return;
mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
mlx5_devcom_send_event(esw->devcom,
ESW_OFFLOADS_DEVCOM_UNPAIR,
ESW_OFFLOADS_DEVCOM_UNPAIR,
ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
esw);
mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
mlx5_devcom_unregister_component(esw->devcom);
xa_destroy(&esw->paired);
esw->devcom = NULL;
}
bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw)
{
return mlx5_devcom_comp_is_ready(esw->devcom);
}
bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
......@@ -3356,7 +3361,7 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
/* Uplink vport rep must load first. */
err = esw_offloads_load_rep(esw, MLX5_VPORT_UPLINK);
err = mlx5_esw_offloads_load_rep(esw, MLX5_VPORT_UPLINK);
if (err)
goto err_uplink;
......@@ -3367,7 +3372,7 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
return 0;
err_vports:
esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
mlx5_esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
err_uplink:
esw_offloads_steering_cleanup(esw);
err_steering_init:
......@@ -3405,7 +3410,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
void esw_offloads_disable(struct mlx5_eswitch *esw)
{
mlx5_eswitch_disable_pf_vf_vports(esw);
esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
mlx5_esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
esw_set_passing_vport_metadata(esw, false);
esw_offloads_steering_cleanup(esw);
mapping_destroy(esw->offloads.reg_c0_obj_pool);
......@@ -4120,7 +4125,6 @@ int mlx5_devlink_port_fn_migratable_get(struct devlink_port *port, bool *is_enab
{
struct mlx5_eswitch *esw;
struct mlx5_vport *vport;
int err = -EOPNOTSUPP;
esw = mlx5_devlink_eswitch_get(port->devlink);
if (IS_ERR(esw))
......@@ -4128,7 +4132,7 @@ int mlx5_devlink_port_fn_migratable_get(struct devlink_port *port, bool *is_enab
if (!MLX5_CAP_GEN(esw->dev, migration)) {
NL_SET_ERR_MSG_MOD(extack, "Device doesn't support migration");
return err;
return -EOPNOTSUPP;
}
vport = mlx5_devlink_port_fn_get_vport(port, esw);
......@@ -4138,12 +4142,9 @@ int mlx5_devlink_port_fn_migratable_get(struct devlink_port *port, bool *is_enab
}
mutex_lock(&esw->state_lock);
if (vport->enabled) {
*is_enabled = vport->info.mig_enabled;
err = 0;
}
*is_enabled = vport->info.mig_enabled;
mutex_unlock(&esw->state_lock);
return err;
return 0;
}
int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable,
......@@ -4172,10 +4173,6 @@ int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable,
}
mutex_lock(&esw->state_lock);
if (!vport->enabled) {
NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
goto out;
}
if (vport->info.mig_enabled == enable) {
err = 0;
......@@ -4219,7 +4216,6 @@ int mlx5_devlink_port_fn_roce_get(struct devlink_port *port, bool *is_enabled,
{
struct mlx5_eswitch *esw;
struct mlx5_vport *vport;
int err = -EOPNOTSUPP;
esw = mlx5_devlink_eswitch_get(port->devlink);
if (IS_ERR(esw))
......@@ -4232,12 +4228,9 @@ int mlx5_devlink_port_fn_roce_get(struct devlink_port *port, bool *is_enabled,
}
mutex_lock(&esw->state_lock);
if (vport->enabled) {
*is_enabled = vport->info.roce_enabled;
err = 0;
}
*is_enabled = vport->info.roce_enabled;
mutex_unlock(&esw->state_lock);
return err;
return 0;
}
int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable,
......@@ -4246,10 +4239,10 @@ int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable,
int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
struct mlx5_eswitch *esw;
struct mlx5_vport *vport;
int err = -EOPNOTSUPP;
void *query_ctx;
void *hca_caps;
u16 vport_num;
int err;
esw = mlx5_devlink_eswitch_get(port->devlink);
if (IS_ERR(esw))
......@@ -4263,10 +4256,6 @@ int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable,
vport_num = vport->vport;
mutex_lock(&esw->state_lock);
if (!vport->enabled) {
NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
goto out;
}
if (vport->info.roce_enabled == enable) {
err = 0;
......
......@@ -835,7 +835,7 @@ static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
dev = ldev->pf[MLX5_LAG_P1].dev;
if (is_mdev_switchdev_mode(dev) &&
mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch) &&
mlx5_devcom_comp_is_ready(dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS) &&
mlx5_esw_offloads_devcom_is_ready(dev->priv.eswitch) &&
MLX5_CAP_ESW(dev, esw_shared_ingress_acl) &&
mlx5_eswitch_get_npeers(dev->priv.eswitch) == MLX5_CAP_GEN(dev, num_lag_ports) - 1)
return true;
......@@ -1268,16 +1268,6 @@ void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev)
mlx5_ldev_put(ldev);
}
bool mlx5_lag_is_supported(struct mlx5_core_dev *dev)
{
if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
!MLX5_CAP_GEN(dev, lag_master) ||
MLX5_CAP_GEN(dev, num_lag_ports) < 2 ||
MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS)
return false;
return true;
}
void mlx5_lag_add_mdev(struct mlx5_core_dev *dev)
{
int err;
......
......@@ -74,8 +74,6 @@ struct mlx5_lag {
struct lag_mpesw lag_mpesw;
};
bool mlx5_lag_is_supported(struct mlx5_core_dev *dev);
static inline struct mlx5_lag *
mlx5_lag_dev(struct mlx5_core_dev *dev)
{
......@@ -115,4 +113,14 @@ void mlx5_lag_remove_devices(struct mlx5_lag *ldev);
int mlx5_deactivate_lag(struct mlx5_lag *ldev);
void mlx5_lag_add_devices(struct mlx5_lag *ldev);
static inline bool mlx5_lag_is_supported(struct mlx5_core_dev *dev)
{
if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
!MLX5_CAP_GEN(dev, lag_master) ||
MLX5_CAP_GEN(dev, num_lag_ports) < 2 ||
MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS)
return false;
return true;
}
#endif /* __MLX5_LAG_H__ */
......@@ -6,11 +6,8 @@
#include <linux/mlx5/driver.h>
#define MLX5_DEVCOM_PORTS_SUPPORTED 4
enum mlx5_devcom_components {
enum mlx5_devcom_component {
MLX5_DEVCOM_ESW_OFFLOADS,
MLX5_DEVCOM_NUM_COMPONENTS,
};
......@@ -18,45 +15,40 @@ typedef int (*mlx5_devcom_event_handler_t)(int event,
void *my_data,
void *event_data);
struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev);
void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom);
struct mlx5_devcom_dev *mlx5_devcom_register_device(struct mlx5_core_dev *dev);
void mlx5_devcom_unregister_device(struct mlx5_devcom_dev *devc);
void mlx5_devcom_register_component(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id,
mlx5_devcom_event_handler_t handler,
void *data);
void mlx5_devcom_unregister_component(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id);
struct mlx5_devcom_comp_dev *
mlx5_devcom_register_component(struct mlx5_devcom_dev *devc,
enum mlx5_devcom_component id,
u64 key,
mlx5_devcom_event_handler_t handler,
void *data);
void mlx5_devcom_unregister_component(struct mlx5_devcom_comp_dev *devcom);
int mlx5_devcom_send_event(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id,
int mlx5_devcom_send_event(struct mlx5_devcom_comp_dev *devcom,
int event, int rollback_event,
void *event_data);
void mlx5_devcom_comp_set_ready(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id,
bool ready);
bool mlx5_devcom_comp_is_ready(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id);
bool mlx5_devcom_for_each_peer_begin(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id);
void mlx5_devcom_for_each_peer_end(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id);
void *mlx5_devcom_get_next_peer_data(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id, int *i);
#define mlx5_devcom_for_each_peer_entry(devcom, id, data, i) \
for (i = 0, data = mlx5_devcom_get_next_peer_data(devcom, id, &i); \
data; \
data = mlx5_devcom_get_next_peer_data(devcom, id, &i))
void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id, int *i);
#define mlx5_devcom_for_each_peer_entry_rcu(devcom, id, data, i) \
for (i = 0, data = mlx5_devcom_get_next_peer_data_rcu(devcom, id, &i); \
data; \
data = mlx5_devcom_get_next_peer_data_rcu(devcom, id, &i))
#endif
void mlx5_devcom_comp_set_ready(struct mlx5_devcom_comp_dev *devcom, bool ready);
bool mlx5_devcom_comp_is_ready(struct mlx5_devcom_comp_dev *devcom);
bool mlx5_devcom_for_each_peer_begin(struct mlx5_devcom_comp_dev *devcom);
void mlx5_devcom_for_each_peer_end(struct mlx5_devcom_comp_dev *devcom);
void *mlx5_devcom_get_next_peer_data(struct mlx5_devcom_comp_dev *devcom,
struct mlx5_devcom_comp_dev **pos);
#define mlx5_devcom_for_each_peer_entry(devcom, data, pos) \
for (pos = NULL, data = mlx5_devcom_get_next_peer_data(devcom, &pos); \
data; \
data = mlx5_devcom_get_next_peer_data(devcom, &pos))
void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom_comp_dev *devcom,
struct mlx5_devcom_comp_dev **pos);
#define mlx5_devcom_for_each_peer_entry_rcu(devcom, data, pos) \
for (pos = NULL, data = mlx5_devcom_get_next_peer_data_rcu(devcom, &pos); \
data; \
data = mlx5_devcom_get_next_peer_data_rcu(devcom, &pos))
#endif /* __LIB_MLX5_DEVCOM_H__ */
......@@ -951,10 +951,10 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
{
int err;
dev->priv.devcom = mlx5_devcom_register_device(dev);
if (IS_ERR(dev->priv.devcom))
mlx5_core_err(dev, "failed to register with devcom (0x%p)\n",
dev->priv.devcom);
dev->priv.devc = mlx5_devcom_register_device(dev);
if (IS_ERR(dev->priv.devc))
mlx5_core_warn(dev, "failed to register devcom device %ld\n",
PTR_ERR(dev->priv.devc));
err = mlx5_query_board_id(dev);
if (err) {
......@@ -1089,7 +1089,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
err_irq_cleanup:
mlx5_irq_table_cleanup(dev);
err_devcom:
mlx5_devcom_unregister_device(dev->priv.devcom);
mlx5_devcom_unregister_device(dev->priv.devc);
return err;
}
......@@ -1118,7 +1118,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_events_cleanup(dev);
mlx5_eq_table_cleanup(dev);
mlx5_irq_table_cleanup(dev);
mlx5_devcom_unregister_device(dev->priv.devcom);
mlx5_devcom_unregister_device(dev->priv.devc);
}
static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeout)
......@@ -1142,7 +1142,7 @@ static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeou
return err;
}
err = mlx5_cmd_init(dev);
err = mlx5_cmd_enable(dev);
if (err) {
mlx5_core_err(dev, "Failed initializing command interface, aborting\n");
return err;
......@@ -1196,7 +1196,7 @@ static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeou
mlx5_stop_health_poll(dev, boot);
err_cmd_cleanup:
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_cleanup(dev);
mlx5_cmd_disable(dev);
return err;
}
......@@ -1207,7 +1207,7 @@ static void mlx5_function_disable(struct mlx5_core_dev *dev, bool boot)
mlx5_core_disable_hca(dev, 0);
mlx5_stop_health_poll(dev, boot);
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_cleanup(dev);
mlx5_cmd_disable(dev);
}
static int mlx5_function_open(struct mlx5_core_dev *dev)
......@@ -1796,6 +1796,12 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
debugfs_create_file("vhca_id", 0400, priv->dbg.dbg_root, dev, &vhca_id_fops);
INIT_LIST_HEAD(&priv->traps);
err = mlx5_cmd_init(dev);
if (err) {
mlx5_core_err(dev, "Failed initializing cmdif SW structs, aborting\n");
goto err_cmd_init;
}
err = mlx5_tout_init(dev);
if (err) {
mlx5_core_err(dev, "Failed initializing timeouts, aborting\n");
......@@ -1841,6 +1847,8 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
err_health_init:
mlx5_tout_cleanup(dev);
err_timeout_init:
mlx5_cmd_cleanup(dev);
err_cmd_init:
debugfs_remove(dev->priv.dbg.dbg_root);
mutex_destroy(&priv->pgdir_mutex);
mutex_destroy(&priv->alloc_mutex);
......@@ -1863,6 +1871,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
mlx5_pagealloc_cleanup(dev);
mlx5_health_cleanup(dev);
mlx5_tout_cleanup(dev);
mlx5_cmd_cleanup(dev);
debugfs_remove_recursive(dev->priv.dbg.dbg_root);
mutex_destroy(&priv->pgdir_mutex);
mutex_destroy(&priv->alloc_mutex);
......
......@@ -178,6 +178,8 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
int mlx5_query_board_id(struct mlx5_core_dev *dev);
int mlx5_cmd_init(struct mlx5_core_dev *dev);
void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
int mlx5_cmd_enable(struct mlx5_core_dev *dev);
void mlx5_cmd_disable(struct mlx5_core_dev *dev);
void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
enum mlx5_cmdif_state cmdif_state);
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id);
......
......@@ -287,18 +287,23 @@ struct mlx5_cmd_stats {
struct mlx5_cmd {
struct mlx5_nb nb;
/* members which needs to be queried or reinitialized each reload */
struct {
u16 cmdif_rev;
u8 log_sz;
u8 log_stride;
int max_reg_cmds;
unsigned long bitmask;
struct semaphore sem;
struct semaphore pages_sem;
struct semaphore throttle_sem;
} vars;
enum mlx5_cmdif_state state;
void *cmd_alloc_buf;
dma_addr_t alloc_dma;
int alloc_size;
void *cmd_buf;
dma_addr_t dma;
u16 cmdif_rev;
u8 log_sz;
u8 log_stride;
int max_reg_cmds;
int events;
u32 __iomem *vector;
/* protect command queue allocations
*/
......@@ -308,12 +313,8 @@ struct mlx5_cmd {
*/
spinlock_t token_lock;
u8 token;
unsigned long bitmask;
char wq_name[MLX5_CMD_WQ_MAX_NAME];
struct workqueue_struct *wq;
struct semaphore sem;
struct semaphore pages_sem;
struct semaphore throttle_sem;
int mode;
u16 allowed_opcode;
struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
......@@ -321,7 +322,7 @@ struct mlx5_cmd {
struct mlx5_cmd_debug dbg;
struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES];
int checksum_disabled;
struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX];
struct xarray stats;
};
struct mlx5_cmd_mailbox {
......@@ -501,7 +502,7 @@ struct mlx5_events;
struct mlx5_mpfs;
struct mlx5_eswitch;
struct mlx5_lag;
struct mlx5_devcom;
struct mlx5_devcom_dev;
struct mlx5_fw_reset;
struct mlx5_eq_table;
struct mlx5_irq_table;
......@@ -618,7 +619,7 @@ struct mlx5_priv {
struct mlx5_core_sriov sriov;
struct mlx5_lag *lag;
u32 flags;
struct mlx5_devcom *devcom;
struct mlx5_devcom_dev *devc;
struct mlx5_fw_reset *fw_reset;
struct mlx5_core_roce roce;
struct mlx5_fc_stats fc_stats;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment