Commit 55c4bf4d authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'mlx5-fixes-2022-03-09' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 fixes 2022-03-09

This series provides bug fixes to mlx5 driver.

* tag 'mlx5-fixes-2022-03-09' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5e: SHAMPO, reduce TIR indication
  net/mlx5e: Lag, Only handle events from highest priority multipath entry
  net/mlx5: Fix offloading with ESWITCH_IPV4_TTL_MODIFY_ENABLE
  net/mlx5: Fix a race on command flush flow
  net/mlx5: Fix size field in bufferx_reg struct
====================

Link: https://lore.kernel.org/r/20220309201517.589132-1-saeed@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 37c9d66c 99a2b9be
......@@ -131,11 +131,8 @@ static int cmd_alloc_index(struct mlx5_cmd *cmd)
static void cmd_free_index(struct mlx5_cmd *cmd, int idx)
{
unsigned long flags;
spin_lock_irqsave(&cmd->alloc_lock, flags);
lockdep_assert_held(&cmd->alloc_lock);
set_bit(idx, &cmd->bitmask);
spin_unlock_irqrestore(&cmd->alloc_lock, flags);
}
static void cmd_ent_get(struct mlx5_cmd_work_ent *ent)
......@@ -145,17 +142,21 @@ static void cmd_ent_get(struct mlx5_cmd_work_ent *ent)
static void cmd_ent_put(struct mlx5_cmd_work_ent *ent)
{
struct mlx5_cmd *cmd = ent->cmd;
unsigned long flags;
spin_lock_irqsave(&cmd->alloc_lock, flags);
if (!refcount_dec_and_test(&ent->refcnt))
return;
goto out;
if (ent->idx >= 0) {
struct mlx5_cmd *cmd = ent->cmd;
cmd_free_index(cmd, ent->idx);
up(ent->page_queue ? &cmd->pages_sem : &cmd->sem);
}
cmd_free_ent(ent);
out:
spin_unlock_irqrestore(&cmd->alloc_lock, flags);
}
static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
......
......@@ -88,9 +88,6 @@ void mlx5e_tir_builder_build_packet_merge(struct mlx5e_tir_builder *builder,
(MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - rough_max_l2_l3_hdr_sz) >> 8);
MLX5_SET(tirc, tirc, lro_timeout_period_usecs, pkt_merge_param->timeout);
break;
case MLX5E_PACKET_MERGE_SHAMPO:
MLX5_SET(tirc, tirc, packet_merge_mask, MLX5_TIRC_PACKET_MERGE_MASK_SHAMPO);
break;
default:
break;
}
......
......@@ -3616,8 +3616,7 @@ static int set_feature_hw_gro(struct net_device *netdev, bool enable)
goto out;
}
err = mlx5e_safe_switch_params(priv, &new_params,
mlx5e_modify_tirs_packet_merge_ctx, NULL, reset);
err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, reset);
out:
mutex_unlock(&priv->state_lock);
return err;
......
......@@ -126,6 +126,10 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
return;
}
/* Handle multipath entry with lower priority value */
if (mp->mfi && mp->mfi != fi && fi->fib_priority >= mp->mfi->fib_priority)
return;
/* Handle add/replace event */
nhs = fib_info_num_path(fi);
if (nhs == 1) {
......@@ -135,12 +139,13 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev);
if (i < 0)
i = MLX5_LAG_NORMAL_AFFINITY;
else
++i;
return;
i++;
mlx5_lag_set_port_affinity(ldev, i);
}
mp->mfi = fi;
return;
}
......
......@@ -121,9 +121,6 @@ u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains)
u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
{
if (!mlx5_chains_prios_supported(chains))
return 1;
if (mlx5_chains_ignore_flow_level_supported(chains))
return UINT_MAX;
......
......@@ -3434,7 +3434,6 @@ enum {
enum {
MLX5_TIRC_PACKET_MERGE_MASK_IPV4_LRO = BIT(0),
MLX5_TIRC_PACKET_MERGE_MASK_IPV6_LRO = BIT(1),
MLX5_TIRC_PACKET_MERGE_MASK_SHAMPO = BIT(2),
};
enum {
......@@ -9900,8 +9899,8 @@ struct mlx5_ifc_bufferx_reg_bits {
u8 reserved_at_0[0x6];
u8 lossy[0x1];
u8 epsb[0x1];
u8 reserved_at_8[0xc];
u8 size[0xc];
u8 reserved_at_8[0x8];
u8 size[0x10];
u8 xoff_threshold[0x10];
u8 xon_threshold[0x10];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment