Commit ad0724b9 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-fixes-2022-05-03' of git://git.kernel.org/pub/scm/linux/kernel/g

it/saeed/linux

Saeed Mahameed says:

====================
mlx5 fixes 2022-05-03

This series provides bug fixes to mlx5 driver.
Please pull and let me know if there is any problem.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 0a806ecc a042d7f5
......@@ -31,6 +31,7 @@ static const char *const mlx5_rsc_sgmt_name[] = {
struct mlx5_rsc_dump {
u32 pdn;
u32 mkey;
u32 number_of_menu_items;
u16 fw_segment_type[MLX5_SGMT_TYPE_NUM];
};
......@@ -50,21 +51,37 @@ static int mlx5_rsc_dump_sgmt_get_by_name(char *name)
return -EINVAL;
}
static void mlx5_rsc_dump_read_menu_sgmt(struct mlx5_rsc_dump *rsc_dump, struct page *page)
#define MLX5_RSC_DUMP_MENU_HEADER_SIZE (MLX5_ST_SZ_BYTES(resource_dump_info_segment) + \
MLX5_ST_SZ_BYTES(resource_dump_command_segment) + \
MLX5_ST_SZ_BYTES(resource_dump_menu_segment))
static int mlx5_rsc_dump_read_menu_sgmt(struct mlx5_rsc_dump *rsc_dump, struct page *page,
int read_size, int start_idx)
{
void *data = page_address(page);
enum mlx5_sgmt_type sgmt_idx;
int num_of_items;
char *sgmt_name;
void *member;
int size = 0;
void *menu;
int i;
if (!start_idx) {
menu = MLX5_ADDR_OF(menu_resource_dump_response, data, menu);
num_of_items = MLX5_GET(resource_dump_menu_segment, menu, num_of_records);
rsc_dump->number_of_menu_items = MLX5_GET(resource_dump_menu_segment, menu,
num_of_records);
size = MLX5_RSC_DUMP_MENU_HEADER_SIZE;
data += size;
}
num_of_items = rsc_dump->number_of_menu_items;
for (i = 0; i < num_of_items; i++) {
member = MLX5_ADDR_OF(resource_dump_menu_segment, menu, record[i]);
for (i = 0; start_idx + i < num_of_items; i++) {
size += MLX5_ST_SZ_BYTES(resource_dump_menu_record);
if (size >= read_size)
return start_idx + i;
member = data + MLX5_ST_SZ_BYTES(resource_dump_menu_record) * i;
sgmt_name = MLX5_ADDR_OF(resource_dump_menu_record, member, segment_name);
sgmt_idx = mlx5_rsc_dump_sgmt_get_by_name(sgmt_name);
if (sgmt_idx == -EINVAL)
......@@ -72,6 +89,7 @@ static void mlx5_rsc_dump_read_menu_sgmt(struct mlx5_rsc_dump *rsc_dump, struct
rsc_dump->fw_segment_type[sgmt_idx] = MLX5_GET(resource_dump_menu_record,
member, segment_type);
}
return 0;
}
static int mlx5_rsc_dump_trigger(struct mlx5_core_dev *dev, struct mlx5_rsc_dump_cmd *cmd,
......@@ -168,6 +186,7 @@ static int mlx5_rsc_dump_menu(struct mlx5_core_dev *dev)
struct mlx5_rsc_dump_cmd *cmd = NULL;
struct mlx5_rsc_key key = {};
struct page *page;
int start_idx = 0;
int size;
int err;
......@@ -189,7 +208,7 @@ static int mlx5_rsc_dump_menu(struct mlx5_core_dev *dev)
if (err < 0)
goto destroy_cmd;
mlx5_rsc_dump_read_menu_sgmt(dev->rsc_dump, page);
start_idx = mlx5_rsc_dump_read_menu_sgmt(dev->rsc_dump, page, size, start_idx);
} while (err > 0);
......
......@@ -309,8 +309,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
if (err)
return err;
err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, port_buff_cell_sz,
xoff, &port_buffer, &update_buffer);
err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, xoff,
port_buff_cell_sz, &port_buffer, &update_buffer);
if (err)
return err;
}
......
......@@ -145,8 +145,7 @@ mlx5e_tc_act_post_parse(struct mlx5e_tc_act_parse_state *parse_state,
flow_action_for_each(i, act, flow_action) {
tc_act = mlx5e_tc_act_get(act->id, ns_type);
if (!tc_act || !tc_act->post_parse ||
!tc_act->can_offload(parse_state, act, i, attr))
if (!tc_act || !tc_act->post_parse)
continue;
err = tc_act->post_parse(parse_state, priv, attr);
......
......@@ -45,12 +45,41 @@ tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state,
if (mlx5e_is_eswitch_flow(parse_state->flow))
attr->esw_attr->split_count = attr->esw_attr->out_count;
if (!clear_action) {
if (clear_action) {
parse_state->ct_clear = true;
} else {
attr->flags |= MLX5_ATTR_FLAG_CT;
flow_flag_set(parse_state->flow, CT);
parse_state->ct = true;
}
parse_state->ct_clear = clear_action;
return 0;
}
static int
tc_act_post_parse_ct(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
struct mlx5e_tc_mod_hdr_acts *mod_acts = &attr->parse_attr->mod_hdr_acts;
int err;
/* If ct action exist, we can ignore previous ct_clear actions */
if (parse_state->ct)
return 0;
if (parse_state->ct_clear) {
err = mlx5_tc_ct_set_ct_clear_regs(parse_state->ct_priv, mod_acts);
if (err) {
NL_SET_ERR_MSG_MOD(parse_state->extack,
"Failed to set registers for ct clear");
return err;
}
attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
/* Prevent handling of additional, redundant clear actions */
parse_state->ct_clear = false;
}
return 0;
}
......@@ -70,5 +99,6 @@ struct mlx5e_tc_act mlx5e_tc_act_ct = {
.can_offload = tc_act_can_offload_ct,
.parse_action = tc_act_parse_ct,
.is_multi_table_act = tc_act_is_multi_table_act_ct,
.post_parse = tc_act_post_parse_ct,
};
......@@ -582,6 +582,12 @@ mlx5_tc_ct_entry_set_registers(struct mlx5_tc_ct_priv *ct_priv,
return 0;
}
int mlx5_tc_ct_set_ct_clear_regs(struct mlx5_tc_ct_priv *priv,
struct mlx5e_tc_mod_hdr_acts *mod_acts)
{
return mlx5_tc_ct_entry_set_registers(priv, mod_acts, 0, 0, 0, 0);
}
static int
mlx5_tc_ct_parse_mangle_to_mod_act(struct flow_action_entry *act,
char *modact)
......@@ -1410,9 +1416,6 @@ mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
const struct flow_action_entry *act,
struct netlink_ext_ack *extack)
{
bool clear_action = act->ct.action & TCA_CT_ACT_CLEAR;
int err;
if (!priv) {
NL_SET_ERR_MSG_MOD(extack,
"offload of ct action isn't available");
......@@ -1423,17 +1426,6 @@ mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
attr->ct_attr.ct_action = act->ct.action;
attr->ct_attr.nf_ft = act->ct.flow_table;
if (!clear_action)
goto out;
err = mlx5_tc_ct_entry_set_registers(priv, mod_acts, 0, 0, 0, 0);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to set registers for ct clear");
return err;
}
attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
out:
return 0;
}
......@@ -1749,6 +1741,8 @@ mlx5_tc_ct_flush_ft_entry(void *ptr, void *arg)
static void
mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
{
struct mlx5e_priv *priv;
if (!refcount_dec_and_test(&ft->refcount))
return;
......@@ -1758,6 +1752,8 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
rhashtable_free_and_destroy(&ft->ct_entries_ht,
mlx5_tc_ct_flush_ft_entry,
ct_priv);
priv = netdev_priv(ct_priv->netdev);
flush_workqueue(priv->wq);
mlx5_tc_ct_free_pre_ct_tables(ft);
mapping_remove(ct_priv->zone_mapping, ft->zone_restore_id);
kfree(ft);
......
......@@ -129,6 +129,10 @@ bool
mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv,
struct sk_buff *skb, u8 zone_restore_id);
int
mlx5_tc_ct_set_ct_clear_regs(struct mlx5_tc_ct_priv *priv,
struct mlx5e_tc_mod_hdr_acts *mod_acts);
#else /* CONFIG_MLX5_TC_CT */
static inline struct mlx5_tc_ct_priv *
......@@ -170,6 +174,13 @@ mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec)
return 0;
}
static inline int
mlx5_tc_ct_set_ct_clear_regs(struct mlx5_tc_ct_priv *priv,
struct mlx5e_tc_mod_hdr_acts *mod_acts)
{
return -EOPNOTSUPP;
}
static inline int
mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
struct mlx5_flow_attr *attr,
......
......@@ -713,6 +713,7 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
struct net_device *filter_dev)
{
struct mlx5_esw_flow_attr *esw_attr = flow_attr->esw_attr;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_tc_int_port *int_port;
TC_TUN_ROUTE_ATTR_INIT(attr);
u16 vport_num;
......@@ -747,7 +748,7 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
esw_attr->rx_tun_attr->vni = MLX5_GET(fte_match_param, spec->match_value,
misc_parameters.vxlan_vni);
esw_attr->rx_tun_attr->decap_vport = vport_num;
} else if (netif_is_ovs_master(attr.route_dev)) {
} else if (netif_is_ovs_master(attr.route_dev) && mlx5e_tc_int_port_supported(esw)) {
int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv),
attr.route_dev->ifindex,
MLX5E_TC_INT_PORT_INGRESS);
......
......@@ -1200,6 +1200,16 @@ static int mlx5e_trust_initialize(struct mlx5e_priv *priv)
return err;
WRITE_ONCE(priv->dcbx_dp.trust_state, trust_state);
if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_PCP && priv->dcbx.dscp_app_cnt) {
/*
* Align the driver state with the register state.
* Temporary state change is required to enable the app list reset.
*/
priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_DSCP;
mlx5e_dcbnl_delete_app(priv);
priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_PCP;
}
mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &priv->channels.params,
priv->dcbx_dp.trust_state);
......
......@@ -2459,6 +2459,17 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
match.key->vlan_priority);
*match_level = MLX5_MATCH_L2;
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN) &&
match.mask->vlan_eth_type &&
MLX5_CAP_FLOWTABLE_TYPE(priv->mdev,
ft_field_support.outer_second_vid,
fs_type)) {
MLX5_SET(fte_match_set_misc, misc_c,
outer_second_cvlan_tag, 1);
spec->match_criteria_enable |=
MLX5_MATCH_MISC_PARAMETERS;
}
}
} else if (*match_level != MLX5_MATCH_NONE) {
/* cvlan_tag enabled in match criteria and
......
......@@ -139,7 +139,7 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
if (mlx5_esw_indir_table_decap_vport(attr))
vport = mlx5_esw_indir_table_decap_vport(attr);
if (esw_attr->int_port)
if (attr && !attr->chain && esw_attr->int_port)
metadata =
mlx5e_tc_int_port_get_metadata_for_match(esw_attr->int_port);
else
......
......@@ -155,6 +155,28 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev)
}
}
static void mlx5_stop_sync_reset_poll(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
del_timer_sync(&fw_reset->timer);
}
static int mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
if (!test_and_clear_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags)) {
mlx5_core_warn(dev, "Reset request was already cleared\n");
return -EALREADY;
}
mlx5_stop_sync_reset_poll(dev);
if (poll_health)
mlx5_start_health_poll(dev);
return 0;
}
static void mlx5_sync_reset_reload_work(struct work_struct *work)
{
struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
......@@ -162,6 +184,7 @@ static void mlx5_sync_reset_reload_work(struct work_struct *work)
struct mlx5_core_dev *dev = fw_reset->dev;
int err;
mlx5_sync_reset_clear_reset_requested(dev, false);
mlx5_enter_error_state(dev, true);
mlx5_unload_one(dev);
err = mlx5_health_wait_pci_up(dev);
......@@ -171,23 +194,6 @@ static void mlx5_sync_reset_reload_work(struct work_struct *work)
mlx5_fw_reset_complete_reload(dev);
}
static void mlx5_stop_sync_reset_poll(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
del_timer_sync(&fw_reset->timer);
}
static void mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
mlx5_stop_sync_reset_poll(dev);
clear_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags);
if (poll_health)
mlx5_start_health_poll(dev);
}
#define MLX5_RESET_POLL_INTERVAL (HZ / 10)
static void poll_sync_reset(struct timer_list *t)
{
......@@ -202,7 +208,6 @@ static void poll_sync_reset(struct timer_list *t)
if (fatal_error) {
mlx5_core_warn(dev, "Got Device Reset\n");
mlx5_sync_reset_clear_reset_requested(dev, false);
queue_work(fw_reset->wq, &fw_reset->reset_reload_work);
return;
}
......@@ -229,13 +234,17 @@ static int mlx5_fw_reset_set_reset_sync_nack(struct mlx5_core_dev *dev)
return mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL3, 0, 2, false);
}
static void mlx5_sync_reset_set_reset_requested(struct mlx5_core_dev *dev)
static int mlx5_sync_reset_set_reset_requested(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
if (test_and_set_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags)) {
mlx5_core_warn(dev, "Reset request was already set\n");
return -EALREADY;
}
mlx5_stop_health_poll(dev, true);
set_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags);
mlx5_start_sync_reset_poll(dev);
return 0;
}
static void mlx5_fw_live_patch_event(struct work_struct *work)
......@@ -264,7 +273,9 @@ static void mlx5_sync_reset_request_event(struct work_struct *work)
err ? "Failed" : "Sent");
return;
}
mlx5_sync_reset_set_reset_requested(dev);
if (mlx5_sync_reset_set_reset_requested(dev))
return;
err = mlx5_fw_reset_set_reset_sync_ack(dev);
if (err)
mlx5_core_warn(dev, "PCI Sync FW Update Reset Ack Failed. Error code: %d\n", err);
......@@ -362,7 +373,8 @@ static void mlx5_sync_reset_now_event(struct work_struct *work)
struct mlx5_core_dev *dev = fw_reset->dev;
int err;
mlx5_sync_reset_clear_reset_requested(dev, false);
if (mlx5_sync_reset_clear_reset_requested(dev, false))
return;
mlx5_core_warn(dev, "Sync Reset now. Device is going to reset.\n");
......@@ -391,10 +403,8 @@ static void mlx5_sync_reset_abort_event(struct work_struct *work)
reset_abort_work);
struct mlx5_core_dev *dev = fw_reset->dev;
if (!test_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags))
if (mlx5_sync_reset_clear_reset_requested(dev, true))
return;
mlx5_sync_reset_clear_reset_requested(dev, true);
mlx5_core_warn(dev, "PCI Sync FW Update Reset Aborted.\n");
}
......
......@@ -100,6 +100,14 @@ static void mlx5_lag_fib_event_flush(struct notifier_block *nb)
flush_workqueue(mp->wq);
}
static void mlx5_lag_fib_set(struct lag_mp *mp, struct fib_info *fi, u32 dst, int dst_len)
{
mp->fib.mfi = fi;
mp->fib.priority = fi->fib_priority;
mp->fib.dst = dst;
mp->fib.dst_len = dst_len;
}
struct mlx5_fib_event_work {
struct work_struct work;
struct mlx5_lag *ldev;
......@@ -110,10 +118,10 @@ struct mlx5_fib_event_work {
};
};
static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
unsigned long event,
struct fib_info *fi)
static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, unsigned long event,
struct fib_entry_notifier_info *fen_info)
{
struct fib_info *fi = fen_info->fi;
struct lag_mp *mp = &ldev->lag_mp;
struct fib_nh *fib_nh0, *fib_nh1;
unsigned int nhs;
......@@ -121,13 +129,15 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
/* Handle delete event */
if (event == FIB_EVENT_ENTRY_DEL) {
/* stop track */
if (mp->mfi == fi)
mp->mfi = NULL;
if (mp->fib.mfi == fi)
mp->fib.mfi = NULL;
return;
}
/* Handle multipath entry with lower priority value */
if (mp->mfi && mp->mfi != fi && fi->fib_priority >= mp->mfi->fib_priority)
if (mp->fib.mfi && mp->fib.mfi != fi &&
(mp->fib.dst != fen_info->dst || mp->fib.dst_len != fen_info->dst_len) &&
fi->fib_priority >= mp->fib.priority)
return;
/* Handle add/replace event */
......@@ -143,9 +153,9 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
i++;
mlx5_lag_set_port_affinity(ldev, i);
mlx5_lag_fib_set(mp, fi, fen_info->dst, fen_info->dst_len);
}
mp->mfi = fi;
return;
}
......@@ -165,7 +175,7 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
}
/* First time we see multipath route */
if (!mp->mfi && !__mlx5_lag_is_active(ldev)) {
if (!mp->fib.mfi && !__mlx5_lag_is_active(ldev)) {
struct lag_tracker tracker;
tracker = ldev->tracker;
......@@ -173,7 +183,7 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
}
mlx5_lag_set_port_affinity(ldev, MLX5_LAG_NORMAL_AFFINITY);
mp->mfi = fi;
mlx5_lag_fib_set(mp, fi, fen_info->dst, fen_info->dst_len);
}
static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev,
......@@ -184,7 +194,7 @@ static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev,
struct lag_mp *mp = &ldev->lag_mp;
/* Check the nh event is related to the route */
if (!mp->mfi || mp->mfi != fi)
if (!mp->fib.mfi || mp->fib.mfi != fi)
return;
/* nh added/removed */
......@@ -214,7 +224,7 @@ static void mlx5_lag_fib_update(struct work_struct *work)
case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_DEL:
mlx5_lag_fib_route_event(ldev, fib_work->event,
fib_work->fen_info.fi);
&fib_work->fen_info);
fib_info_put(fib_work->fen_info.fi);
break;
case FIB_EVENT_NH_ADD:
......@@ -313,7 +323,7 @@ void mlx5_lag_mp_reset(struct mlx5_lag *ldev)
/* Clear mfi, as it might become stale when a route delete event
* has been missed, see mlx5_lag_fib_route_event().
*/
ldev->lag_mp.mfi = NULL;
ldev->lag_mp.fib.mfi = NULL;
}
int mlx5_lag_mp_init(struct mlx5_lag *ldev)
......@@ -324,7 +334,7 @@ int mlx5_lag_mp_init(struct mlx5_lag *ldev)
/* always clear mfi, as it might become stale when a route delete event
* has been missed
*/
mp->mfi = NULL;
mp->fib.mfi = NULL;
if (mp->fib_nb.notifier_call)
return 0;
......@@ -354,5 +364,5 @@ void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev)
unregister_fib_notifier(&init_net, &mp->fib_nb);
destroy_workqueue(mp->wq);
mp->fib_nb.notifier_call = NULL;
mp->mfi = NULL;
mp->fib.mfi = NULL;
}
......@@ -15,7 +15,12 @@ enum mlx5_lag_port_affinity {
struct lag_mp {
struct notifier_block fib_nb;
struct fib_info *mfi; /* used in tracking fib events */
struct {
const void *mfi; /* used in tracking fib events */
u32 priority;
u32 dst;
int dst_len;
} fib;
struct workqueue_struct *wq;
};
......
......@@ -505,7 +505,7 @@ static int mlx5_lag_create_inner_ttc_table(struct mlx5_lag *ldev)
struct ttc_params ttc_params = {};
mlx5_lag_set_inner_ttc_params(ldev, &ttc_params);
port_sel->inner.ttc = mlx5_create_ttc_table(dev, &ttc_params);
port_sel->inner.ttc = mlx5_create_inner_ttc_table(dev, &ttc_params);
if (IS_ERR(port_sel->inner.ttc))
return PTR_ERR(port_sel->inner.ttc);
......
......@@ -408,6 +408,8 @@ static int mlx5_generate_inner_ttc_table_rules(struct mlx5_core_dev *dev,
for (tt = 0; tt < MLX5_NUM_TT; tt++) {
struct mlx5_ttc_rule *rule = &rules[tt];
if (test_bit(tt, params->ignore_dests))
continue;
rule->rule = mlx5_generate_inner_ttc_rule(dev, ft,
&params->dests[tt],
ttc_rules[tt].etype,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment