Commit e9b797dc authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'mlx5-fixes-2023-12-13' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 fixes 2023-12-13

This series provides bug fixes to mlx5 driver.

* tag 'mlx5-fixes-2023-12-13' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5e: Correct snprintf truncation handling for fw_version buffer used by representors
  net/mlx5e: Correct snprintf truncation handling for fw_version buffer
  net/mlx5e: Fix error codes in alloc_branch_attr()
  net/mlx5e: Fix error code in mlx5e_tc_action_miss_mapping_get()
  net/mlx5: Refactor mlx5_flow_destination->rep pointer to vport num
  net/mlx5: Fix fw tracer first block check
  net/mlx5e: XDP, Drop fragmented packets larger than MTU size
  net/mlx5e: Decrease num_block_tc when unblock tc offload
  net/mlx5e: Fix overrun reported by coverity
  net/mlx5e: fix a potential double-free in fs_udp_create_groups
  net/mlx5e: Fix a race in command alloc flow
  net/mlx5e: Fix slab-out-of-bounds in mlx5_query_nic_vport_mac_list()
  net/mlx5e: fix double free of encap_header
  Revert "net/mlx5e: fix double free of encap_header"
  Revert "net/mlx5e: fix double free of encap_header in update funcs"
====================

Link: https://lore.kernel.org/r/20231214012505.42666-1-saeed@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 2c1a4185 b13559b7
......@@ -156,15 +156,18 @@ static u8 alloc_token(struct mlx5_cmd *cmd)
return token;
}
static int cmd_alloc_index(struct mlx5_cmd *cmd)
static int cmd_alloc_index(struct mlx5_cmd *cmd, struct mlx5_cmd_work_ent *ent)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&cmd->alloc_lock, flags);
ret = find_first_bit(&cmd->vars.bitmask, cmd->vars.max_reg_cmds);
if (ret < cmd->vars.max_reg_cmds)
if (ret < cmd->vars.max_reg_cmds) {
clear_bit(ret, &cmd->vars.bitmask);
ent->idx = ret;
cmd->ent_arr[ent->idx] = ent;
}
spin_unlock_irqrestore(&cmd->alloc_lock, flags);
return ret < cmd->vars.max_reg_cmds ? ret : -ENOMEM;
......@@ -979,7 +982,7 @@ static void cmd_work_handler(struct work_struct *work)
sem = ent->page_queue ? &cmd->vars.pages_sem : &cmd->vars.sem;
down(sem);
if (!ent->page_queue) {
alloc_ret = cmd_alloc_index(cmd);
alloc_ret = cmd_alloc_index(cmd, ent);
if (alloc_ret < 0) {
mlx5_core_err_rl(dev, "failed to allocate command entry\n");
if (ent->callback) {
......@@ -994,15 +997,14 @@ static void cmd_work_handler(struct work_struct *work)
up(sem);
return;
}
ent->idx = alloc_ret;
} else {
ent->idx = cmd->vars.max_reg_cmds;
spin_lock_irqsave(&cmd->alloc_lock, flags);
clear_bit(ent->idx, &cmd->vars.bitmask);
cmd->ent_arr[ent->idx] = ent;
spin_unlock_irqrestore(&cmd->alloc_lock, flags);
}
cmd->ent_arr[ent->idx] = ent;
lay = get_inst(cmd, ent->idx);
ent->lay = lay;
memset(lay, 0, sizeof(*lay));
......
......@@ -718,7 +718,7 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
while (block_timestamp > tracer->last_timestamp) {
/* Check block override if it's not the first block */
if (!tracer->last_timestamp) {
if (tracer->last_timestamp) {
u64 *ts_event;
/* To avoid block override be the HW in case of buffer
* wraparound, the time stamp of the previous block
......
......@@ -154,6 +154,7 @@ static int fs_udp_create_groups(struct mlx5e_flow_table *ft, enum fs_udp_type ty
in = kvzalloc(inlen, GFP_KERNEL);
if (!in || !ft->g) {
kfree(ft->g);
ft->g = NULL;
kvfree(in);
return -ENOMEM;
}
......
......@@ -197,7 +197,7 @@ parse_mirred_encap(struct mlx5e_tc_act_parse_state *parse_state,
}
esw_attr->dests[esw_attr->out_count].flags |= MLX5_ESW_DEST_ENCAP;
esw_attr->out_count++;
/* attr->dests[].rep is resolved when we handle encap */
/* attr->dests[].vport is resolved when we handle encap */
return 0;
}
......@@ -270,7 +270,8 @@ parse_mirred(struct mlx5e_tc_act_parse_state *parse_state,
out_priv = netdev_priv(out_dev);
rpriv = out_priv->ppriv;
esw_attr->dests[esw_attr->out_count].rep = rpriv->rep;
esw_attr->dests[esw_attr->out_count].vport_valid = true;
esw_attr->dests[esw_attr->out_count].vport = rpriv->rep->vport;
esw_attr->dests[esw_attr->out_count].mdev = out_priv->mdev;
esw_attr->out_count++;
......
......@@ -300,6 +300,10 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
if (err)
goto destroy_neigh_entry;
e->encap_size = ipv4_encap_size;
e->encap_header = encap_header;
encap_header = NULL;
if (!(nud_state & NUD_VALID)) {
neigh_event_send(attr.n, NULL);
/* the encap entry will be made valid on neigh update event
......@@ -310,8 +314,8 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
memset(&reformat_params, 0, sizeof(reformat_params));
reformat_params.type = e->reformat_type;
reformat_params.size = ipv4_encap_size;
reformat_params.data = encap_header;
reformat_params.size = e->encap_size;
reformat_params.data = e->encap_header;
e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(e->pkt_reformat)) {
......@@ -319,8 +323,6 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
goto destroy_neigh_entry;
}
e->encap_size = ipv4_encap_size;
e->encap_header = encap_header;
e->flags |= MLX5_ENCAP_ENTRY_VALID;
mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
mlx5e_route_lookup_ipv4_put(&attr);
......@@ -403,18 +405,23 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
if (err)
goto free_encap;
e->encap_size = ipv4_encap_size;
kfree(e->encap_header);
e->encap_header = encap_header;
encap_header = NULL;
if (!(nud_state & NUD_VALID)) {
neigh_event_send(attr.n, NULL);
/* the encap entry will be made valid on neigh update event
* and not used before that.
*/
goto free_encap;
goto release_neigh;
}
memset(&reformat_params, 0, sizeof(reformat_params));
reformat_params.type = e->reformat_type;
reformat_params.size = ipv4_encap_size;
reformat_params.data = encap_header;
reformat_params.size = e->encap_size;
reformat_params.data = e->encap_header;
e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(e->pkt_reformat)) {
......@@ -422,10 +429,6 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
goto free_encap;
}
e->encap_size = ipv4_encap_size;
kfree(e->encap_header);
e->encap_header = encap_header;
e->flags |= MLX5_ENCAP_ENTRY_VALID;
mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
mlx5e_route_lookup_ipv4_put(&attr);
......@@ -567,6 +570,10 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
if (err)
goto destroy_neigh_entry;
e->encap_size = ipv6_encap_size;
e->encap_header = encap_header;
encap_header = NULL;
if (!(nud_state & NUD_VALID)) {
neigh_event_send(attr.n, NULL);
/* the encap entry will be made valid on neigh update event
......@@ -577,8 +584,8 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
memset(&reformat_params, 0, sizeof(reformat_params));
reformat_params.type = e->reformat_type;
reformat_params.size = ipv6_encap_size;
reformat_params.data = encap_header;
reformat_params.size = e->encap_size;
reformat_params.data = e->encap_header;
e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(e->pkt_reformat)) {
......@@ -586,8 +593,6 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
goto destroy_neigh_entry;
}
e->encap_size = ipv6_encap_size;
e->encap_header = encap_header;
e->flags |= MLX5_ENCAP_ENTRY_VALID;
mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
mlx5e_route_lookup_ipv6_put(&attr);
......@@ -669,18 +674,23 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
if (err)
goto free_encap;
e->encap_size = ipv6_encap_size;
kfree(e->encap_header);
e->encap_header = encap_header;
encap_header = NULL;
if (!(nud_state & NUD_VALID)) {
neigh_event_send(attr.n, NULL);
/* the encap entry will be made valid on neigh update event
* and not used before that.
*/
goto free_encap;
goto release_neigh;
}
memset(&reformat_params, 0, sizeof(reformat_params));
reformat_params.type = e->reformat_type;
reformat_params.size = ipv6_encap_size;
reformat_params.data = encap_header;
reformat_params.size = e->encap_size;
reformat_params.data = e->encap_header;
e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(e->pkt_reformat)) {
......@@ -688,10 +698,6 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
goto free_encap;
}
e->encap_size = ipv6_encap_size;
kfree(e->encap_header);
e->encap_header = encap_header;
e->flags |= MLX5_ENCAP_ENTRY_VALID;
mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
mlx5e_route_lookup_ipv6_put(&attr);
......
......@@ -1064,7 +1064,8 @@ int mlx5e_tc_tun_encap_dests_set(struct mlx5e_priv *priv,
out_priv = netdev_priv(encap_dev);
rpriv = out_priv->ppriv;
esw_attr->dests[out_index].rep = rpriv->rep;
esw_attr->dests[out_index].vport_valid = true;
esw_attr->dests[out_index].vport = rpriv->rep->vport;
esw_attr->dests[out_index].mdev = out_priv->mdev;
}
......
......@@ -493,6 +493,7 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
dma_addr_t dma_addr = xdptxd->dma_addr;
u32 dma_len = xdptxd->len;
u16 ds_cnt, inline_hdr_sz;
unsigned int frags_size;
u8 num_wqebbs = 1;
int num_frags = 0;
bool inline_ok;
......@@ -503,8 +504,9 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
inline_ok = sq->min_inline_mode == MLX5_INLINE_MODE_NONE ||
dma_len >= MLX5E_XDP_MIN_INLINE;
frags_size = xdptxd->has_frags ? xdptxdf->sinfo->xdp_frags_size : 0;
if (unlikely(!inline_ok || sq->hw_mtu < dma_len)) {
if (unlikely(!inline_ok || sq->hw_mtu < dma_len + frags_size)) {
stats->err++;
return false;
}
......
......@@ -2142,7 +2142,7 @@ static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev)
static void mlx5e_ipsec_unblock_tc_offload(struct mlx5_core_dev *mdev)
{
mdev->num_block_tc++;
mdev->num_block_tc--;
}
int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
......
......@@ -49,7 +49,7 @@ void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%04d (%.16s)", fw_rev_maj(mdev),
fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id);
if (count == sizeof(drvinfo->fw_version))
if (count >= sizeof(drvinfo->fw_version))
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%04d", fw_rev_maj(mdev),
fw_rev_min(mdev), fw_rev_sub(mdev));
......
......@@ -78,7 +78,7 @@ static void mlx5e_rep_get_drvinfo(struct net_device *dev,
count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%04d (%.16s)", fw_rev_maj(mdev),
fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id);
if (count == sizeof(drvinfo->fw_version))
if (count >= sizeof(drvinfo->fw_version))
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%04d", fw_rev_maj(mdev),
fw_rev_min(mdev), fw_rev_sub(mdev));
......
......@@ -3778,7 +3778,8 @@ alloc_branch_attr(struct mlx5e_tc_flow *flow,
break;
case FLOW_ACTION_ACCEPT:
case FLOW_ACTION_PIPE:
if (set_branch_dest_ft(flow->priv, attr))
err = set_branch_dest_ft(flow->priv, attr);
if (err)
goto out_err;
break;
case FLOW_ACTION_JUMP:
......@@ -3788,7 +3789,8 @@ alloc_branch_attr(struct mlx5e_tc_flow *flow,
goto out_err;
}
*jump_count = cond->extval;
if (set_branch_dest_ft(flow->priv, attr))
err = set_branch_dest_ft(flow->priv, attr);
if (err)
goto out_err;
break;
default:
......@@ -5736,8 +5738,10 @@ int mlx5e_tc_action_miss_mapping_get(struct mlx5e_priv *priv, struct mlx5_flow_a
esw = priv->mdev->priv.eswitch;
attr->act_id_restore_rule = esw_add_restore_rule(esw, *act_miss_mapping);
if (IS_ERR(attr->act_id_restore_rule))
if (IS_ERR(attr->act_id_restore_rule)) {
err = PTR_ERR(attr->act_id_restore_rule);
goto err_rule;
}
return 0;
......
......@@ -526,7 +526,8 @@ struct mlx5_esw_flow_attr {
u8 total_vlan;
struct {
u32 flags;
struct mlx5_eswitch_rep *rep;
bool vport_valid;
u16 vport;
struct mlx5_pkt_reformat *pkt_reformat;
struct mlx5_core_dev *mdev;
struct mlx5_termtbl_handle *termtbl;
......
......@@ -287,10 +287,9 @@ static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_
for (i = from; i < to; i++)
if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
mlx5_chains_put_table(chains, 0, 1, 0);
else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].vport,
esw_attr->dests[i].mdev))
mlx5_esw_indir_table_put(esw, esw_attr->dests[i].rep->vport,
false);
mlx5_esw_indir_table_put(esw, esw_attr->dests[i].vport, false);
}
static bool
......@@ -358,8 +357,8 @@ esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
* this criteria.
*/
for (i = esw_attr->split_count; i < esw_attr->out_count; i++) {
if (esw_attr->dests[i].rep &&
mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
if (esw_attr->dests[i].vport_valid &&
mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].vport,
esw_attr->dests[i].mdev)) {
result = true;
} else {
......@@ -388,7 +387,7 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest,
dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[*i].ft = mlx5_esw_indir_table_get(esw, attr,
esw_attr->dests[j].rep->vport, false);
esw_attr->dests[j].vport, false);
if (IS_ERR(dest[*i].ft)) {
err = PTR_ERR(dest[*i].ft);
goto err_indir_tbl_get;
......@@ -432,11 +431,11 @@ static bool esw_setup_uplink_fwd_ipsec_needed(struct mlx5_eswitch *esw,
int attr_idx)
{
if (esw->offloads.ft_ipsec_tx_pol &&
esw_attr->dests[attr_idx].rep &&
esw_attr->dests[attr_idx].rep->vport == MLX5_VPORT_UPLINK &&
esw_attr->dests[attr_idx].vport_valid &&
esw_attr->dests[attr_idx].vport == MLX5_VPORT_UPLINK &&
/* To be aligned with software, encryption is needed only for tunnel device */
(esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) &&
esw_attr->dests[attr_idx].rep != esw_attr->in_rep &&
esw_attr->dests[attr_idx].vport != esw_attr->in_rep->vport &&
esw_same_vhca_id(esw_attr->dests[attr_idx].mdev, esw->dev))
return true;
......@@ -469,7 +468,7 @@ esw_setup_dest_fwd_vport(struct mlx5_flow_destination *dest, struct mlx5_flow_ac
int attr_idx, int dest_idx, bool pkt_reformat)
{
dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport;
dest[dest_idx].vport.num = esw_attr->dests[attr_idx].vport;
if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
dest[dest_idx].vport.vhca_id =
MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
......@@ -1177,9 +1176,9 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *flow;
struct mlx5_flow_spec *spec;
struct mlx5_vport *vport;
int err, pfindex;
unsigned long i;
void *misc;
int err;
if (!MLX5_VPORT_MANAGER(esw->dev) && !mlx5_core_is_ecpf_esw_manager(esw->dev))
return 0;
......@@ -1255,7 +1254,15 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
flows[vport->index] = flow;
}
}
esw->fdb_table.offloads.peer_miss_rules[mlx5_get_dev_index(peer_dev)] = flows;
pfindex = mlx5_get_dev_index(peer_dev);
if (pfindex >= MLX5_MAX_PORTS) {
esw_warn(esw->dev, "Peer dev index(%d) is over the max num defined(%d)\n",
pfindex, MLX5_MAX_PORTS);
err = -EINVAL;
goto add_ec_vf_flow_err;
}
esw->fdb_table.offloads.peer_miss_rules[pfindex] = flows;
kvfree(spec);
return 0;
......
......@@ -233,8 +233,8 @@ mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
/* hairpin */
for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
if (!esw_attr->dest_int_port && esw_attr->dests[i].rep &&
esw_attr->dests[i].rep->vport == MLX5_VPORT_UPLINK)
if (!esw_attr->dest_int_port && esw_attr->dests[i].vport_valid &&
esw_attr->dests[i].vport == MLX5_VPORT_UPLINK)
return true;
return false;
......
......@@ -277,7 +277,7 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
req_list_size = max_list_size;
}
out_sz = MLX5_ST_SZ_BYTES(query_nic_vport_context_in) +
out_sz = MLX5_ST_SZ_BYTES(query_nic_vport_context_out) +
req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
out = kvzalloc(out_sz, GFP_KERNEL);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment