Commit 178a4ff1 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'mlx5-fixes-2022-11-21' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 fixes 2022-11-21

This series provides bug fixes to mlx5 driver.

* tag 'mlx5-fixes-2022-11-21' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5e: Fix possible race condition in macsec extended packet number update routine
  net/mlx5e: Fix MACsec update SecY
  net/mlx5e: Fix MACsec SA initialization routine
  net/mlx5e: Remove leftovers from old XSK queues enumeration
  net/mlx5e: Offload rule only when all encaps are valid
  net/mlx5e: Fix missing alignment in size of MTT/KLM entries
  net/mlx5: Fix sync reset event handler error flow
  net/mlx5: E-Switch, Set correctly vport destination
  net/mlx5: Lag, avoid lockdep warnings
  net/mlx5: Fix handling of entry refcount when command is not issued to FW
  net/mlx5: cmdif, Print info on any firmware cmd failure to tracepoint
  net/mlx5: SF: Fix probing active SFs during driver probe phase
  net/mlx5: Fix FW tracer timestamp calculation
  net/mlx5: Do not query pci info while pci disabled
====================

Link: https://lore.kernel.org/r/20221122022559.89459-1-saeed@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 568fe849 8514e325
...@@ -45,6 +45,8 @@ ...@@ -45,6 +45,8 @@
#include "mlx5_core.h" #include "mlx5_core.h"
#include "lib/eq.h" #include "lib/eq.h"
#include "lib/tout.h" #include "lib/tout.h"
#define CREATE_TRACE_POINTS
#include "diag/cmd_tracepoint.h"
enum { enum {
CMD_IF_REV = 5, CMD_IF_REV = 5,
...@@ -785,27 +787,14 @@ EXPORT_SYMBOL(mlx5_cmd_out_err); ...@@ -785,27 +787,14 @@ EXPORT_SYMBOL(mlx5_cmd_out_err);
static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out) static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out)
{ {
u16 opcode, op_mod; u16 opcode, op_mod;
u32 syndrome;
u8 status;
u16 uid; u16 uid;
int err;
syndrome = MLX5_GET(mbox_out, out, syndrome);
status = MLX5_GET(mbox_out, out, status);
opcode = MLX5_GET(mbox_in, in, opcode); opcode = MLX5_GET(mbox_in, in, opcode);
op_mod = MLX5_GET(mbox_in, in, op_mod); op_mod = MLX5_GET(mbox_in, in, op_mod);
uid = MLX5_GET(mbox_in, in, uid); uid = MLX5_GET(mbox_in, in, uid);
err = cmd_status_to_err(status);
if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY) if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY)
mlx5_cmd_out_err(dev, opcode, op_mod, out); mlx5_cmd_out_err(dev, opcode, op_mod, out);
else
mlx5_core_dbg(dev,
"%s(0x%x) op_mod(0x%x) uid(%d) failed, status %s(0x%x), syndrome (0x%x), err(%d)\n",
mlx5_command_str(opcode), opcode, op_mod, uid,
cmd_status_str(status), status, syndrome, err);
} }
int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out) int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out)
...@@ -1016,6 +1005,7 @@ static void cmd_work_handler(struct work_struct *work) ...@@ -1016,6 +1005,7 @@ static void cmd_work_handler(struct work_struct *work)
cmd_ent_get(ent); cmd_ent_get(ent);
set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state); set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
cmd_ent_get(ent); /* for the _real_ FW event on completion */
/* Skip sending command to fw if internal error */ /* Skip sending command to fw if internal error */
if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, ent->op)) { if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, ent->op)) {
ent->ret = -ENXIO; ent->ret = -ENXIO;
...@@ -1023,7 +1013,6 @@ static void cmd_work_handler(struct work_struct *work) ...@@ -1023,7 +1013,6 @@ static void cmd_work_handler(struct work_struct *work)
return; return;
} }
cmd_ent_get(ent); /* for the _real_ FW event on completion */
/* ring doorbell after the descriptor is valid */ /* ring doorbell after the descriptor is valid */
mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
wmb(); wmb();
...@@ -1672,8 +1661,8 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force ...@@ -1672,8 +1661,8 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
cmd_ent_put(ent); /* timeout work was canceled */ cmd_ent_put(ent); /* timeout work was canceled */
if (!forced || /* Real FW completion */ if (!forced || /* Real FW completion */
pci_channel_offline(dev->pdev) || /* FW is inaccessible */ mlx5_cmd_is_down(dev) || /* No real FW completion is expected */
dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) !opcode_allowed(cmd, ent->op))
cmd_ent_put(ent); cmd_ent_put(ent);
ent->ts2 = ktime_get_ns(); ent->ts2 = ktime_get_ns();
...@@ -1892,6 +1881,16 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, ...@@ -1892,6 +1881,16 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
return err; return err;
} }
static void mlx5_cmd_err_trace(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out)
{
u32 syndrome = MLX5_GET(mbox_out, out, syndrome);
u8 status = MLX5_GET(mbox_out, out, status);
trace_mlx5_cmd(mlx5_command_str(opcode), opcode, op_mod,
cmd_status_str(status), status, syndrome,
cmd_status_to_err(status));
}
static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status, static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
u32 syndrome, int err) u32 syndrome, int err)
{ {
...@@ -1914,7 +1913,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status, ...@@ -1914,7 +1913,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
} }
/* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */ /* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */
static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, void *out) static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, u16 op_mod, void *out)
{ {
u32 syndrome = MLX5_GET(mbox_out, out, syndrome); u32 syndrome = MLX5_GET(mbox_out, out, syndrome);
u8 status = MLX5_GET(mbox_out, out, status); u8 status = MLX5_GET(mbox_out, out, status);
...@@ -1922,8 +1921,10 @@ static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, void * ...@@ -1922,8 +1921,10 @@ static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, void *
if (err == -EREMOTEIO) /* -EREMOTEIO is preserved */ if (err == -EREMOTEIO) /* -EREMOTEIO is preserved */
err = -EIO; err = -EIO;
if (!err && status != MLX5_CMD_STAT_OK) if (!err && status != MLX5_CMD_STAT_OK) {
err = -EREMOTEIO; err = -EREMOTEIO;
mlx5_cmd_err_trace(dev, opcode, op_mod, out);
}
cmd_status_log(dev, opcode, status, syndrome, err); cmd_status_log(dev, opcode, status, syndrome, err);
return err; return err;
...@@ -1951,9 +1952,9 @@ int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int ...@@ -1951,9 +1952,9 @@ int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int
{ {
int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false); int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false);
u16 opcode = MLX5_GET(mbox_in, in, opcode); u16 opcode = MLX5_GET(mbox_in, in, opcode);
u16 op_mod = MLX5_GET(mbox_in, in, op_mod);
err = cmd_status_err(dev, err, opcode, out); return cmd_status_err(dev, err, opcode, op_mod, out);
return err;
} }
EXPORT_SYMBOL(mlx5_cmd_do); EXPORT_SYMBOL(mlx5_cmd_do);
...@@ -1997,8 +1998,9 @@ int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, ...@@ -1997,8 +1998,9 @@ int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
{ {
int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true); int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true);
u16 opcode = MLX5_GET(mbox_in, in, opcode); u16 opcode = MLX5_GET(mbox_in, in, opcode);
u16 op_mod = MLX5_GET(mbox_in, in, op_mod);
err = cmd_status_err(dev, err, opcode, out); err = cmd_status_err(dev, err, opcode, op_mod, out);
return mlx5_cmd_check(dev, err, in, out); return mlx5_cmd_check(dev, err, in, out);
} }
EXPORT_SYMBOL(mlx5_cmd_exec_polling); EXPORT_SYMBOL(mlx5_cmd_exec_polling);
...@@ -2034,7 +2036,7 @@ static void mlx5_cmd_exec_cb_handler(int status, void *_work) ...@@ -2034,7 +2036,7 @@ static void mlx5_cmd_exec_cb_handler(int status, void *_work)
struct mlx5_async_ctx *ctx; struct mlx5_async_ctx *ctx;
ctx = work->ctx; ctx = work->ctx;
status = cmd_status_err(ctx->dev, status, work->opcode, work->out); status = cmd_status_err(ctx->dev, status, work->opcode, work->op_mod, work->out);
work->user_callback(status, work); work->user_callback(status, work);
if (atomic_dec_and_test(&ctx->num_inflight)) if (atomic_dec_and_test(&ctx->num_inflight))
complete(&ctx->inflight_done); complete(&ctx->inflight_done);
...@@ -2049,6 +2051,7 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, ...@@ -2049,6 +2051,7 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
work->ctx = ctx; work->ctx = ctx;
work->user_callback = callback; work->user_callback = callback;
work->opcode = MLX5_GET(mbox_in, in, opcode); work->opcode = MLX5_GET(mbox_in, in, opcode);
work->op_mod = MLX5_GET(mbox_in, in, op_mod);
work->out = out; work->out = out;
if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight))) if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight)))
return -EIO; return -EIO;
......
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM mlx5
#if !defined(_MLX5_CMD_TP_H_) || defined(TRACE_HEADER_MULTI_READ)
#define _MLX5_CMD_TP_H_
#include <linux/tracepoint.h>
#include <linux/trace_seq.h>
TRACE_EVENT(mlx5_cmd,
TP_PROTO(const char *command_str, u16 opcode, u16 op_mod,
const char *status_str, u8 status, u32 syndrome, int err),
TP_ARGS(command_str, opcode, op_mod, status_str, status, syndrome, err),
TP_STRUCT__entry(__string(command_str, command_str)
__field(u16, opcode)
__field(u16, op_mod)
__string(status_str, status_str)
__field(u8, status)
__field(u32, syndrome)
__field(int, err)
),
TP_fast_assign(__assign_str(command_str, command_str);
__entry->opcode = opcode;
__entry->op_mod = op_mod;
__assign_str(status_str, status_str);
__entry->status = status;
__entry->syndrome = syndrome;
__entry->err = err;
),
TP_printk("%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x), err(%d)",
__get_str(command_str), __entry->opcode, __entry->op_mod,
__get_str(status_str), __entry->status, __entry->syndrome,
__entry->err)
);
#endif /* _MLX5_CMD_TP_H_ */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH ./diag
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE cmd_tracepoint
#include <trace/define_trace.h>
...@@ -638,7 +638,7 @@ static void mlx5_tracer_handle_timestamp_trace(struct mlx5_fw_tracer *tracer, ...@@ -638,7 +638,7 @@ static void mlx5_tracer_handle_timestamp_trace(struct mlx5_fw_tracer *tracer,
trace_timestamp = (timestamp_event.timestamp & MASK_52_7) | trace_timestamp = (timestamp_event.timestamp & MASK_52_7) |
(str_frmt->timestamp & MASK_6_0); (str_frmt->timestamp & MASK_6_0);
else else
trace_timestamp = ((timestamp_event.timestamp & MASK_52_7) - 1) | trace_timestamp = ((timestamp_event.timestamp - 1) & MASK_52_7) |
(str_frmt->timestamp & MASK_6_0); (str_frmt->timestamp & MASK_6_0);
mlx5_tracer_print_trace(str_frmt, dev, trace_timestamp); mlx5_tracer_print_trace(str_frmt, dev, trace_timestamp);
......
...@@ -224,15 +224,16 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, ...@@ -224,15 +224,16 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
list_for_each_entry(flow, flow_list, tmp_list) { list_for_each_entry(flow, flow_list, tmp_list) {
if (!mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, SLOW)) if (!mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, SLOW))
continue; continue;
spec = &flow->attr->parse_attr->spec;
/* update from encap rule to slow path rule */
rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
attr = mlx5e_tc_get_encap_attr(flow); attr = mlx5e_tc_get_encap_attr(flow);
esw_attr = attr->esw_attr; esw_attr = attr->esw_attr;
/* mark the flow's encap dest as non-valid */ /* mark the flow's encap dest as non-valid */
esw_attr->dests[flow->tmp_entry_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID; esw_attr->dests[flow->tmp_entry_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
esw_attr->dests[flow->tmp_entry_index].pkt_reformat = NULL;
/* update from encap rule to slow path rule */
spec = &flow->attr->parse_attr->spec;
rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
if (IS_ERR(rule)) { if (IS_ERR(rule)) {
err = PTR_ERR(rule); err = PTR_ERR(rule);
...@@ -251,6 +252,7 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, ...@@ -251,6 +252,7 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
/* we know that the encap is valid */ /* we know that the encap is valid */
e->flags &= ~MLX5_ENCAP_ENTRY_VALID; e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat); mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
e->pkt_reformat = NULL;
} }
static void mlx5e_take_tmp_flow(struct mlx5e_tc_flow *flow, static void mlx5e_take_tmp_flow(struct mlx5e_tc_flow *flow,
...@@ -762,8 +764,7 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv, ...@@ -762,8 +764,7 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct net_device *mirred_dev, struct net_device *mirred_dev,
int out_index, int out_index,
struct netlink_ext_ack *extack, struct netlink_ext_ack *extack,
struct net_device **encap_dev, struct net_device **encap_dev)
bool *encap_valid)
{ {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_tc_flow_parse_attr *parse_attr; struct mlx5e_tc_flow_parse_attr *parse_attr;
...@@ -878,9 +879,8 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv, ...@@ -878,9 +879,8 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
if (e->flags & MLX5_ENCAP_ENTRY_VALID) { if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
attr->esw_attr->dests[out_index].pkt_reformat = e->pkt_reformat; attr->esw_attr->dests[out_index].pkt_reformat = e->pkt_reformat;
attr->esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID; attr->esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
*encap_valid = true;
} else { } else {
*encap_valid = false; flow_flag_set(flow, SLOW);
} }
mutex_unlock(&esw->offloads.encap_tbl_lock); mutex_unlock(&esw->offloads.encap_tbl_lock);
......
...@@ -17,8 +17,7 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv, ...@@ -17,8 +17,7 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct net_device *mirred_dev, struct net_device *mirred_dev,
int out_index, int out_index,
struct netlink_ext_ack *extack, struct netlink_ext_ack *extack,
struct net_device **encap_dev, struct net_device **encap_dev);
bool *encap_valid);
int mlx5e_attach_decap(struct mlx5e_priv *priv, int mlx5e_attach_decap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow, struct mlx5e_tc_flow *flow,
......
...@@ -368,15 +368,15 @@ static int mlx5e_macsec_init_sa(struct macsec_context *ctx, ...@@ -368,15 +368,15 @@ static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
obj_attrs.aso_pdn = macsec->aso.pdn; obj_attrs.aso_pdn = macsec->aso.pdn;
obj_attrs.epn_state = sa->epn_state; obj_attrs.epn_state = sa->epn_state;
if (is_tx) { key = (is_tx) ? &ctx->sa.tx_sa->key : &ctx->sa.rx_sa->key;
obj_attrs.ssci = cpu_to_be32((__force u32)ctx->sa.tx_sa->ssci);
key = &ctx->sa.tx_sa->key; if (sa->epn_state.epn_enabled) {
} else { obj_attrs.ssci = (is_tx) ? cpu_to_be32((__force u32)ctx->sa.tx_sa->ssci) :
obj_attrs.ssci = cpu_to_be32((__force u32)ctx->sa.rx_sa->ssci); cpu_to_be32((__force u32)ctx->sa.rx_sa->ssci);
key = &ctx->sa.rx_sa->key;
}
memcpy(&obj_attrs.salt, &key->salt, sizeof(key->salt)); memcpy(&obj_attrs.salt, &key->salt, sizeof(key->salt));
}
obj_attrs.replay_window = ctx->secy->replay_window; obj_attrs.replay_window = ctx->secy->replay_window;
obj_attrs.replay_protect = ctx->secy->replay_protect; obj_attrs.replay_protect = ctx->secy->replay_protect;
...@@ -1155,7 +1155,7 @@ static int macsec_upd_secy_hw_address(struct macsec_context *ctx, ...@@ -1155,7 +1155,7 @@ static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
continue; continue;
if (rx_sa->active) { if (rx_sa->active) {
err = mlx5e_macsec_init_sa(ctx, rx_sa, false, false); err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false);
if (err) if (err)
goto out; goto out;
} }
...@@ -1536,6 +1536,8 @@ static void macsec_async_event(struct work_struct *work) ...@@ -1536,6 +1536,8 @@ static void macsec_async_event(struct work_struct *work)
async_work = container_of(work, struct mlx5e_macsec_async_work, work); async_work = container_of(work, struct mlx5e_macsec_async_work, work);
macsec = async_work->macsec; macsec = async_work->macsec;
mutex_lock(&macsec->lock);
mdev = async_work->mdev; mdev = async_work->mdev;
obj_id = async_work->obj_id; obj_id = async_work->obj_id;
macsec_sa = get_macsec_tx_sa_from_obj_id(macsec, obj_id); macsec_sa = get_macsec_tx_sa_from_obj_id(macsec, obj_id);
...@@ -1557,6 +1559,7 @@ static void macsec_async_event(struct work_struct *work) ...@@ -1557,6 +1559,7 @@ static void macsec_async_event(struct work_struct *work)
out_async_work: out_async_work:
kfree(async_work); kfree(async_work);
mutex_unlock(&macsec->lock);
} }
static int macsec_obj_change_event(struct notifier_block *nb, unsigned long event, void *data) static int macsec_obj_change_event(struct notifier_block *nb, unsigned long event, void *data)
......
...@@ -35,7 +35,6 @@ ...@@ -35,7 +35,6 @@
#include "en.h" #include "en.h"
#include "en/port.h" #include "en/port.h"
#include "en/params.h" #include "en/params.h"
#include "en/xsk/pool.h"
#include "en/ptp.h" #include "en/ptp.h"
#include "lib/clock.h" #include "lib/clock.h"
#include "en/fs_ethtool.h" #include "en/fs_ethtool.h"
...@@ -412,15 +411,8 @@ void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv, ...@@ -412,15 +411,8 @@ void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
struct ethtool_channels *ch) struct ethtool_channels *ch)
{ {
mutex_lock(&priv->state_lock); mutex_lock(&priv->state_lock);
ch->max_combined = priv->max_nch; ch->max_combined = priv->max_nch;
ch->combined_count = priv->channels.params.num_channels; ch->combined_count = priv->channels.params.num_channels;
if (priv->xsk.refcnt) {
/* The upper half are XSK queues. */
ch->max_combined *= 2;
ch->combined_count *= 2;
}
mutex_unlock(&priv->state_lock); mutex_unlock(&priv->state_lock);
} }
...@@ -454,16 +446,6 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, ...@@ -454,16 +446,6 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
mutex_lock(&priv->state_lock); mutex_lock(&priv->state_lock);
/* Don't allow changing the number of channels if there is an active
* XSK, because the numeration of the XSK and regular RQs will change.
*/
if (priv->xsk.refcnt) {
err = -EINVAL;
netdev_err(priv->netdev, "%s: AF_XDP is active, cannot change the number of channels\n",
__func__);
goto out;
}
/* Don't allow changing the number of channels if HTB offload is active, /* Don't allow changing the number of channels if HTB offload is active,
* because the numeration of the QoS SQs will change, while per-queue * because the numeration of the QoS SQs will change, while per-queue
* qdiscs are attached. * qdiscs are attached.
......
...@@ -206,10 +206,11 @@ static void mlx5e_disable_blocking_events(struct mlx5e_priv *priv) ...@@ -206,10 +206,11 @@ static void mlx5e_disable_blocking_events(struct mlx5e_priv *priv)
static u16 mlx5e_mpwrq_umr_octowords(u32 entries, enum mlx5e_mpwrq_umr_mode umr_mode) static u16 mlx5e_mpwrq_umr_octowords(u32 entries, enum mlx5e_mpwrq_umr_mode umr_mode)
{ {
u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode); u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode);
u32 sz;
WARN_ON_ONCE(entries * umr_entry_size % MLX5_OCTWORD); sz = ALIGN(entries * umr_entry_size, MLX5_UMR_MTT_ALIGNMENT);
return entries * umr_entry_size / MLX5_OCTWORD; return sz / MLX5_OCTWORD;
} }
static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
......
...@@ -1634,7 +1634,6 @@ set_encap_dests(struct mlx5e_priv *priv, ...@@ -1634,7 +1634,6 @@ set_encap_dests(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow, struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr, struct mlx5_flow_attr *attr,
struct netlink_ext_ack *extack, struct netlink_ext_ack *extack,
bool *encap_valid,
bool *vf_tun) bool *vf_tun)
{ {
struct mlx5e_tc_flow_parse_attr *parse_attr; struct mlx5e_tc_flow_parse_attr *parse_attr;
...@@ -1651,7 +1650,6 @@ set_encap_dests(struct mlx5e_priv *priv, ...@@ -1651,7 +1650,6 @@ set_encap_dests(struct mlx5e_priv *priv,
parse_attr = attr->parse_attr; parse_attr = attr->parse_attr;
esw_attr = attr->esw_attr; esw_attr = attr->esw_attr;
*vf_tun = false; *vf_tun = false;
*encap_valid = true;
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) { for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
struct net_device *out_dev; struct net_device *out_dev;
...@@ -1668,7 +1666,7 @@ set_encap_dests(struct mlx5e_priv *priv, ...@@ -1668,7 +1666,7 @@ set_encap_dests(struct mlx5e_priv *priv,
goto out; goto out;
} }
err = mlx5e_attach_encap(priv, flow, attr, out_dev, out_index, err = mlx5e_attach_encap(priv, flow, attr, out_dev, out_index,
extack, &encap_dev, encap_valid); extack, &encap_dev);
dev_put(out_dev); dev_put(out_dev);
if (err) if (err)
goto out; goto out;
...@@ -1732,8 +1730,8 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, ...@@ -1732,8 +1730,8 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr; struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr; struct mlx5_flow_attr *attr = flow->attr;
struct mlx5_esw_flow_attr *esw_attr; struct mlx5_esw_flow_attr *esw_attr;
bool vf_tun, encap_valid;
u32 max_prio, max_chain; u32 max_prio, max_chain;
bool vf_tun;
int err = 0; int err = 0;
parse_attr = attr->parse_attr; parse_attr = attr->parse_attr;
...@@ -1823,7 +1821,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, ...@@ -1823,7 +1821,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
esw_attr->int_port = int_port; esw_attr->int_port = int_port;
} }
err = set_encap_dests(priv, flow, attr, extack, &encap_valid, &vf_tun); err = set_encap_dests(priv, flow, attr, extack, &vf_tun);
if (err) if (err)
goto err_out; goto err_out;
...@@ -1853,7 +1851,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, ...@@ -1853,7 +1851,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
* (1) there's no error * (1) there's no error
* (2) there's an encap action and we don't have valid neigh * (2) there's an encap action and we don't have valid neigh
*/ */
if (!encap_valid || flow_flag_test(flow, SLOW)) if (flow_flag_test(flow, SLOW))
flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec); flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec);
else else
flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr); flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
...@@ -3759,7 +3757,7 @@ alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack) ...@@ -3759,7 +3757,7 @@ alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack)
struct mlx5e_post_act *post_act = get_post_action(flow->priv); struct mlx5e_post_act *post_act = get_post_action(flow->priv);
struct mlx5_flow_attr *attr, *next_attr = NULL; struct mlx5_flow_attr *attr, *next_attr = NULL;
struct mlx5e_post_act_handle *handle; struct mlx5e_post_act_handle *handle;
bool vf_tun, encap_valid = true; bool vf_tun;
int err; int err;
/* This is going in reverse order as needed. /* This is going in reverse order as needed.
...@@ -3781,13 +3779,10 @@ alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack) ...@@ -3781,13 +3779,10 @@ alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack)
if (list_is_last(&attr->list, &flow->attrs)) if (list_is_last(&attr->list, &flow->attrs))
break; break;
err = set_encap_dests(flow->priv, flow, attr, extack, &encap_valid, &vf_tun); err = set_encap_dests(flow->priv, flow, attr, extack, &vf_tun);
if (err) if (err)
goto out_free; goto out_free;
if (!encap_valid)
flow_flag_set(flow, SLOW);
err = actions_prepare_mod_hdr_actions(flow->priv, flow, attr, extack); err = actions_prepare_mod_hdr_actions(flow->priv, flow, attr, extack);
if (err) if (err)
goto out_free; goto out_free;
......
...@@ -433,7 +433,7 @@ esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *f ...@@ -433,7 +433,7 @@ esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *f
mlx5_lag_mpesw_is_activated(esw->dev)) mlx5_lag_mpesw_is_activated(esw->dev))
dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_UPLINK; dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
} }
if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP) { if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) {
if (pkt_reformat) { if (pkt_reformat) {
flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat; flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
......
...@@ -9,7 +9,8 @@ enum { ...@@ -9,7 +9,8 @@ enum {
MLX5_FW_RESET_FLAGS_RESET_REQUESTED, MLX5_FW_RESET_FLAGS_RESET_REQUESTED,
MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST,
MLX5_FW_RESET_FLAGS_PENDING_COMP, MLX5_FW_RESET_FLAGS_PENDING_COMP,
MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS,
MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED
}; };
struct mlx5_fw_reset { struct mlx5_fw_reset {
...@@ -406,7 +407,7 @@ static void mlx5_sync_reset_now_event(struct work_struct *work) ...@@ -406,7 +407,7 @@ static void mlx5_sync_reset_now_event(struct work_struct *work)
err = mlx5_pci_link_toggle(dev); err = mlx5_pci_link_toggle(dev);
if (err) { if (err) {
mlx5_core_warn(dev, "mlx5_pci_link_toggle failed, no reset done, err %d\n", err); mlx5_core_warn(dev, "mlx5_pci_link_toggle failed, no reset done, err %d\n", err);
goto done; set_bit(MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED, &fw_reset->reset_flags);
} }
mlx5_enter_error_state(dev, true); mlx5_enter_error_state(dev, true);
...@@ -482,6 +483,10 @@ int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev) ...@@ -482,6 +483,10 @@ int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev)
goto out; goto out;
} }
err = fw_reset->ret; err = fw_reset->ret;
if (test_and_clear_bit(MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED, &fw_reset->reset_flags)) {
mlx5_unload_one_devl_locked(dev);
mlx5_load_one_devl_locked(dev, false);
}
out: out:
clear_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags); clear_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags);
return err; return err;
......
...@@ -228,9 +228,8 @@ static void mlx5_ldev_free(struct kref *ref) ...@@ -228,9 +228,8 @@ static void mlx5_ldev_free(struct kref *ref)
if (ldev->nb.notifier_call) if (ldev->nb.notifier_call)
unregister_netdevice_notifier_net(&init_net, &ldev->nb); unregister_netdevice_notifier_net(&init_net, &ldev->nb);
mlx5_lag_mp_cleanup(ldev); mlx5_lag_mp_cleanup(ldev);
mlx5_lag_mpesw_cleanup(ldev);
cancel_work_sync(&ldev->mpesw_work);
destroy_workqueue(ldev->wq); destroy_workqueue(ldev->wq);
mlx5_lag_mpesw_cleanup(ldev);
mutex_destroy(&ldev->lock); mutex_destroy(&ldev->lock);
kfree(ldev); kfree(ldev);
} }
......
...@@ -50,6 +50,19 @@ struct lag_tracker { ...@@ -50,6 +50,19 @@ struct lag_tracker {
enum netdev_lag_hash hash_type; enum netdev_lag_hash hash_type;
}; };
enum mpesw_op {
MLX5_MPESW_OP_ENABLE,
MLX5_MPESW_OP_DISABLE,
};
struct mlx5_mpesw_work_st {
struct work_struct work;
struct mlx5_lag *lag;
enum mpesw_op op;
struct completion comp;
int result;
};
/* LAG data of a ConnectX card. /* LAG data of a ConnectX card.
* It serves both its phys functions. * It serves both its phys functions.
*/ */
...@@ -66,7 +79,6 @@ struct mlx5_lag { ...@@ -66,7 +79,6 @@ struct mlx5_lag {
struct lag_tracker tracker; struct lag_tracker tracker;
struct workqueue_struct *wq; struct workqueue_struct *wq;
struct delayed_work bond_work; struct delayed_work bond_work;
struct work_struct mpesw_work;
struct notifier_block nb; struct notifier_block nb;
struct lag_mp lag_mp; struct lag_mp lag_mp;
struct mlx5_lag_port_sel port_sel; struct mlx5_lag_port_sel port_sel;
......
...@@ -7,63 +7,95 @@ ...@@ -7,63 +7,95 @@
#include "eswitch.h" #include "eswitch.h"
#include "lib/mlx5.h" #include "lib/mlx5.h"
void mlx5_mpesw_work(struct work_struct *work) static int add_mpesw_rule(struct mlx5_lag *ldev)
{ {
struct mlx5_lag *ldev = container_of(work, struct mlx5_lag, mpesw_work); struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
int err;
mutex_lock(&ldev->lock); if (atomic_add_return(1, &ldev->lag_mpesw.mpesw_rule_count) != 1)
mlx5_disable_lag(ldev); return 0;
mutex_unlock(&ldev->lock);
}
static void mlx5_lag_disable_mpesw(struct mlx5_core_dev *dev) if (ldev->mode != MLX5_LAG_MODE_NONE) {
{ err = -EINVAL;
struct mlx5_lag *ldev = dev->priv.lag; goto out_err;
}
if (!queue_work(ldev->wq, &ldev->mpesw_work)) err = mlx5_activate_lag(ldev, NULL, MLX5_LAG_MODE_MPESW, false);
mlx5_core_warn(dev, "failed to queue work\n"); if (err) {
mlx5_core_warn(dev, "Failed to create LAG in MPESW mode (%d)\n", err);
goto out_err;
}
return 0;
out_err:
atomic_dec(&ldev->lag_mpesw.mpesw_rule_count);
return err;
} }
void mlx5_lag_del_mpesw_rule(struct mlx5_core_dev *dev) static void del_mpesw_rule(struct mlx5_lag *ldev)
{ {
struct mlx5_lag *ldev = dev->priv.lag; if (!atomic_dec_return(&ldev->lag_mpesw.mpesw_rule_count) &&
ldev->mode == MLX5_LAG_MODE_MPESW)
mlx5_disable_lag(ldev);
}
if (!ldev) static void mlx5_mpesw_work(struct work_struct *work)
return; {
struct mlx5_mpesw_work_st *mpesww = container_of(work, struct mlx5_mpesw_work_st, work);
struct mlx5_lag *ldev = mpesww->lag;
mutex_lock(&ldev->lock); mutex_lock(&ldev->lock);
if (!atomic_dec_return(&ldev->lag_mpesw.mpesw_rule_count) && if (mpesww->op == MLX5_MPESW_OP_ENABLE)
ldev->mode == MLX5_LAG_MODE_MPESW) mpesww->result = add_mpesw_rule(ldev);
mlx5_lag_disable_mpesw(dev); else if (mpesww->op == MLX5_MPESW_OP_DISABLE)
del_mpesw_rule(ldev);
mutex_unlock(&ldev->lock); mutex_unlock(&ldev->lock);
complete(&mpesww->comp);
} }
int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev) static int mlx5_lag_mpesw_queue_work(struct mlx5_core_dev *dev,
enum mpesw_op op)
{ {
struct mlx5_lag *ldev = dev->priv.lag; struct mlx5_lag *ldev = dev->priv.lag;
struct mlx5_mpesw_work_st *work;
int err = 0; int err = 0;
if (!ldev) if (!ldev)
return 0; return 0;
mutex_lock(&ldev->lock); work = kzalloc(sizeof(*work), GFP_KERNEL);
if (atomic_add_return(1, &ldev->lag_mpesw.mpesw_rule_count) != 1) if (!work)
goto out; return -ENOMEM;
if (ldev->mode != MLX5_LAG_MODE_NONE) { INIT_WORK(&work->work, mlx5_mpesw_work);
init_completion(&work->comp);
work->op = op;
work->lag = ldev;
if (!queue_work(ldev->wq, &work->work)) {
mlx5_core_warn(dev, "failed to queue mpesw work\n");
err = -EINVAL; err = -EINVAL;
goto out; goto out;
} }
wait_for_completion(&work->comp);
err = mlx5_activate_lag(ldev, NULL, MLX5_LAG_MODE_MPESW, false); err = work->result;
if (err)
mlx5_core_warn(dev, "Failed to create LAG in MPESW mode (%d)\n", err);
out: out:
mutex_unlock(&ldev->lock); kfree(work);
return err; return err;
} }
void mlx5_lag_del_mpesw_rule(struct mlx5_core_dev *dev)
{
mlx5_lag_mpesw_queue_work(dev, MLX5_MPESW_OP_DISABLE);
}
int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev)
{
return mlx5_lag_mpesw_queue_work(dev, MLX5_MPESW_OP_ENABLE);
}
int mlx5_lag_do_mirred(struct mlx5_core_dev *mdev, struct net_device *out_dev) int mlx5_lag_do_mirred(struct mlx5_core_dev *mdev, struct net_device *out_dev)
{ {
struct mlx5_lag *ldev = mdev->priv.lag; struct mlx5_lag *ldev = mdev->priv.lag;
...@@ -71,12 +103,9 @@ int mlx5_lag_do_mirred(struct mlx5_core_dev *mdev, struct net_device *out_dev) ...@@ -71,12 +103,9 @@ int mlx5_lag_do_mirred(struct mlx5_core_dev *mdev, struct net_device *out_dev)
if (!netif_is_bond_master(out_dev) || !ldev) if (!netif_is_bond_master(out_dev) || !ldev)
return 0; return 0;
mutex_lock(&ldev->lock); if (ldev->mode == MLX5_LAG_MODE_MPESW)
if (ldev->mode == MLX5_LAG_MODE_MPESW) {
mutex_unlock(&ldev->lock);
return -EOPNOTSUPP; return -EOPNOTSUPP;
}
mutex_unlock(&ldev->lock);
return 0; return 0;
} }
...@@ -90,11 +119,10 @@ bool mlx5_lag_mpesw_is_activated(struct mlx5_core_dev *dev) ...@@ -90,11 +119,10 @@ bool mlx5_lag_mpesw_is_activated(struct mlx5_core_dev *dev)
void mlx5_lag_mpesw_init(struct mlx5_lag *ldev) void mlx5_lag_mpesw_init(struct mlx5_lag *ldev)
{ {
INIT_WORK(&ldev->mpesw_work, mlx5_mpesw_work);
atomic_set(&ldev->lag_mpesw.mpesw_rule_count, 0); atomic_set(&ldev->lag_mpesw.mpesw_rule_count, 0);
} }
void mlx5_lag_mpesw_cleanup(struct mlx5_lag *ldev) void mlx5_lag_mpesw_cleanup(struct mlx5_lag *ldev)
{ {
cancel_delayed_work_sync(&ldev->bond_work); WARN_ON(atomic_read(&ldev->lag_mpesw.mpesw_rule_count));
} }
...@@ -12,7 +12,6 @@ struct lag_mpesw { ...@@ -12,7 +12,6 @@ struct lag_mpesw {
atomic_t mpesw_rule_count; atomic_t mpesw_rule_count;
}; };
void mlx5_mpesw_work(struct work_struct *work);
int mlx5_lag_do_mirred(struct mlx5_core_dev *mdev, struct net_device *out_dev); int mlx5_lag_do_mirred(struct mlx5_core_dev *mdev, struct net_device *out_dev);
bool mlx5_lag_mpesw_is_activated(struct mlx5_core_dev *dev); bool mlx5_lag_mpesw_is_activated(struct mlx5_core_dev *dev);
#if IS_ENABLED(CONFIG_MLX5_ESWITCH) #if IS_ENABLED(CONFIG_MLX5_ESWITCH)
......
...@@ -1798,7 +1798,8 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev, ...@@ -1798,7 +1798,8 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
res = state == pci_channel_io_perm_failure ? res = state == pci_channel_io_perm_failure ?
PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
mlx5_pci_trace(dev, "Exit, result = %d, %s\n", res, result2str(res)); mlx5_core_info(dev, "%s Device state = %d pci_status: %d. Exit, result = %d, %s\n",
__func__, dev->state, dev->pci_status, res, result2str(res));
return res; return res;
} }
...@@ -1837,7 +1838,8 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev) ...@@ -1837,7 +1838,8 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
struct mlx5_core_dev *dev = pci_get_drvdata(pdev); struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
int err; int err;
mlx5_pci_trace(dev, "Enter\n"); mlx5_core_info(dev, "%s Device state = %d pci_status: %d. Enter\n",
__func__, dev->state, dev->pci_status);
err = mlx5_pci_enable_device(dev); err = mlx5_pci_enable_device(dev);
if (err) { if (err) {
...@@ -1859,7 +1861,8 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev) ...@@ -1859,7 +1861,8 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
res = PCI_ERS_RESULT_RECOVERED; res = PCI_ERS_RESULT_RECOVERED;
out: out:
mlx5_pci_trace(dev, "Exit, err = %d, result = %d, %s\n", err, res, result2str(res)); mlx5_core_info(dev, "%s Device state = %d pci_status: %d. Exit, err = %d, result = %d, %s\n",
__func__, dev->state, dev->pci_status, err, res, result2str(res));
return res; return res;
} }
......
...@@ -18,6 +18,10 @@ struct mlx5_sf_dev_table { ...@@ -18,6 +18,10 @@ struct mlx5_sf_dev_table {
phys_addr_t base_address; phys_addr_t base_address;
u64 sf_bar_length; u64 sf_bar_length;
struct notifier_block nb; struct notifier_block nb;
struct mutex table_lock; /* Serializes sf life cycle and vhca state change handler */
struct workqueue_struct *active_wq;
struct work_struct work;
u8 stop_active_wq:1;
struct mlx5_core_dev *dev; struct mlx5_core_dev *dev;
}; };
...@@ -168,6 +172,7 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_ ...@@ -168,6 +172,7 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
return 0; return 0;
sf_index = event->function_id - base_id; sf_index = event->function_id - base_id;
mutex_lock(&table->table_lock);
sf_dev = xa_load(&table->devices, sf_index); sf_dev = xa_load(&table->devices, sf_index);
switch (event->new_vhca_state) { switch (event->new_vhca_state) {
case MLX5_VHCA_STATE_INVALID: case MLX5_VHCA_STATE_INVALID:
...@@ -191,6 +196,7 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_ ...@@ -191,6 +196,7 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
default: default:
break; break;
} }
mutex_unlock(&table->table_lock);
return 0; return 0;
} }
...@@ -215,6 +221,78 @@ static int mlx5_sf_dev_vhca_arm_all(struct mlx5_sf_dev_table *table) ...@@ -215,6 +221,78 @@ static int mlx5_sf_dev_vhca_arm_all(struct mlx5_sf_dev_table *table)
return 0; return 0;
} }
static void mlx5_sf_dev_add_active_work(struct work_struct *work)
{
struct mlx5_sf_dev_table *table = container_of(work, struct mlx5_sf_dev_table, work);
u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {};
struct mlx5_core_dev *dev = table->dev;
u16 max_functions;
u16 function_id;
u16 sw_func_id;
int err = 0;
u8 state;
int i;
max_functions = mlx5_sf_max_functions(dev);
function_id = MLX5_CAP_GEN(dev, sf_base_id);
for (i = 0; i < max_functions; i++, function_id++) {
if (table->stop_active_wq)
return;
err = mlx5_cmd_query_vhca_state(dev, function_id, out, sizeof(out));
if (err)
/* A failure of specific vhca doesn't mean others will
* fail as well.
*/
continue;
state = MLX5_GET(query_vhca_state_out, out, vhca_state_context.vhca_state);
if (state != MLX5_VHCA_STATE_ACTIVE)
continue;
sw_func_id = MLX5_GET(query_vhca_state_out, out, vhca_state_context.sw_function_id);
mutex_lock(&table->table_lock);
/* Don't probe device which is already probe */
if (!xa_load(&table->devices, i))
mlx5_sf_dev_add(dev, i, function_id, sw_func_id);
/* There is a race where SF got inactive after the query
* above. e.g.: the query returns that the state of the
* SF is active, and after that the eswitch manager set it to
* inactive.
* This case cannot be managed in SW, since the probing of the
* SF is on one system, and the inactivation is on a different
* system.
* If the inactive is done after the SF perform init_hca(),
* the SF will fully probe and then removed. If it was
* done before init_hca(), the SF probe will fail.
*/
mutex_unlock(&table->table_lock);
}
}
/* In case SFs are generated externally, probe active SFs */
static int mlx5_sf_dev_queue_active_work(struct mlx5_sf_dev_table *table)
{
if (MLX5_CAP_GEN(table->dev, eswitch_manager))
return 0; /* the table is local */
/* Use a workqueue to probe active SFs, which are in large
* quantity and may take up to minutes to probe.
*/
table->active_wq = create_singlethread_workqueue("mlx5_active_sf");
if (!table->active_wq)
return -ENOMEM;
INIT_WORK(&table->work, &mlx5_sf_dev_add_active_work);
queue_work(table->active_wq, &table->work);
return 0;
}
static void mlx5_sf_dev_destroy_active_work(struct mlx5_sf_dev_table *table)
{
if (table->active_wq) {
table->stop_active_wq = true;
destroy_workqueue(table->active_wq);
}
}
void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev) void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
{ {
struct mlx5_sf_dev_table *table; struct mlx5_sf_dev_table *table;
...@@ -240,11 +318,17 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev) ...@@ -240,11 +318,17 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
table->base_address = pci_resource_start(dev->pdev, 2); table->base_address = pci_resource_start(dev->pdev, 2);
table->max_sfs = max_sfs; table->max_sfs = max_sfs;
xa_init(&table->devices); xa_init(&table->devices);
mutex_init(&table->table_lock);
dev->priv.sf_dev_table = table; dev->priv.sf_dev_table = table;
err = mlx5_vhca_event_notifier_register(dev, &table->nb); err = mlx5_vhca_event_notifier_register(dev, &table->nb);
if (err) if (err)
goto vhca_err; goto vhca_err;
err = mlx5_sf_dev_queue_active_work(table);
if (err)
goto add_active_err;
err = mlx5_sf_dev_vhca_arm_all(table); err = mlx5_sf_dev_vhca_arm_all(table);
if (err) if (err)
goto arm_err; goto arm_err;
...@@ -252,6 +336,8 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev) ...@@ -252,6 +336,8 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
return; return;
arm_err: arm_err:
mlx5_sf_dev_destroy_active_work(table);
add_active_err:
mlx5_vhca_event_notifier_unregister(dev, &table->nb); mlx5_vhca_event_notifier_unregister(dev, &table->nb);
vhca_err: vhca_err:
table->max_sfs = 0; table->max_sfs = 0;
...@@ -279,7 +365,9 @@ void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev) ...@@ -279,7 +365,9 @@ void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev)
if (!table) if (!table)
return; return;
mlx5_sf_dev_destroy_active_work(table);
mlx5_vhca_event_notifier_unregister(dev, &table->nb); mlx5_vhca_event_notifier_unregister(dev, &table->nb);
mutex_destroy(&table->table_lock);
/* Now that event handler is not running, it is safe to destroy /* Now that event handler is not running, it is safe to destroy
* the sf device without race. * the sf device without race.
......
...@@ -981,6 +981,7 @@ struct mlx5_async_work { ...@@ -981,6 +981,7 @@ struct mlx5_async_work {
struct mlx5_async_ctx *ctx; struct mlx5_async_ctx *ctx;
mlx5_async_cbk_t user_callback; mlx5_async_cbk_t user_callback;
u16 opcode; /* cmd opcode */ u16 opcode; /* cmd opcode */
u16 op_mod; /* cmd op_mod */
void *out; /* pointer to the cmd output buffer */ void *out; /* pointer to the cmd output buffer */
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment