Commit 5da45971 authored by Paolo Abeni's avatar Paolo Abeni

Merge tag 'mlx5-fixes-2024-01-24' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 fixes 2024-01-24

This series provides bug fixes to mlx5 driver.
Please pull and let me know if there is any problem.

* tag 'mlx5-fixes-2024-01-24' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5e: fix a potential double-free in fs_any_create_groups
  net/mlx5e: fix a double-free in arfs_create_groups
  net/mlx5e: Ignore IPsec replay window values on sender side
  net/mlx5e: Allow software parsing when IPsec crypto is enabled
  net/mlx5: Use mlx5 device constant for selecting CQ period mode for ASO
  net/mlx5: DR, Can't go to uplink vport on RX rule
  net/mlx5: DR, Use the right GVMI number for drop action
  net/mlx5: Bridge, fix multicast packets sent to uplink
  net/mlx5: Fix a WARN upon a callback command failure
  net/mlx5e: Fix peer flow lists handling
  net/mlx5e: Fix inconsistent hairpin RQT sizes
  net/mlx5e: Fix operation precedence bug in port timestamping napi_poll context
  net/mlx5: Fix query of sd_group field
  net/mlx5e: Use the correct lag ports number when creating TISes
====================

Link: https://lore.kernel.org/r/20240124081855.115410-1-saeed@kernel.orgSigned-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parents fdf8e6d1 aef855df
...@@ -1923,6 +1923,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status, ...@@ -1923,6 +1923,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
{ {
const char *namep = mlx5_command_str(opcode); const char *namep = mlx5_command_str(opcode);
struct mlx5_cmd_stats *stats; struct mlx5_cmd_stats *stats;
unsigned long flags;
if (!err || !(strcmp(namep, "unknown command opcode"))) if (!err || !(strcmp(namep, "unknown command opcode")))
return; return;
...@@ -1930,7 +1931,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status, ...@@ -1930,7 +1931,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
stats = xa_load(&dev->cmd.stats, opcode); stats = xa_load(&dev->cmd.stats, opcode);
if (!stats) if (!stats)
return; return;
spin_lock_irq(&stats->lock); spin_lock_irqsave(&stats->lock, flags);
stats->failed++; stats->failed++;
if (err < 0) if (err < 0)
stats->last_failed_errno = -err; stats->last_failed_errno = -err;
...@@ -1939,7 +1940,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status, ...@@ -1939,7 +1940,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
stats->last_failed_mbox_status = status; stats->last_failed_mbox_status = status;
stats->last_failed_syndrome = syndrome; stats->last_failed_syndrome = syndrome;
} }
spin_unlock_irq(&stats->lock); spin_unlock_irqrestore(&stats->lock, flags);
} }
/* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */ /* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */
......
...@@ -1124,7 +1124,7 @@ static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev) ...@@ -1124,7 +1124,7 @@ static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
extern const struct ethtool_ops mlx5e_ethtool_ops; extern const struct ethtool_ops mlx5e_ethtool_ops;
int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey); int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey);
int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev); int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises);
void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev); void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb, int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
bool enable_mc_lb); bool enable_mc_lb);
......
...@@ -436,6 +436,7 @@ static int fs_any_create_groups(struct mlx5e_flow_table *ft) ...@@ -436,6 +436,7 @@ static int fs_any_create_groups(struct mlx5e_flow_table *ft)
in = kvzalloc(inlen, GFP_KERNEL); in = kvzalloc(inlen, GFP_KERNEL);
if (!in || !ft->g) { if (!in || !ft->g) {
kfree(ft->g); kfree(ft->g);
ft->g = NULL;
kvfree(in); kvfree(in);
return -ENOMEM; return -ENOMEM;
} }
......
...@@ -1064,8 +1064,8 @@ void mlx5e_build_sq_param(struct mlx5_core_dev *mdev, ...@@ -1064,8 +1064,8 @@ void mlx5e_build_sq_param(struct mlx5_core_dev *mdev,
void *wq = MLX5_ADDR_OF(sqc, sqc, wq); void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
bool allow_swp; bool allow_swp;
allow_swp = allow_swp = mlx5_geneve_tx_allowed(mdev) ||
mlx5_geneve_tx_allowed(mdev) || !!mlx5_ipsec_device_caps(mdev); (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_CRYPTO);
mlx5e_build_sq_param_common(mdev, param); mlx5e_build_sq_param_common(mdev, param);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
MLX5_SET(sqc, sqc, allow_swp, allow_swp); MLX5_SET(sqc, sqc, allow_swp, allow_swp);
......
...@@ -213,7 +213,7 @@ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq, ...@@ -213,7 +213,7 @@ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
mlx5e_ptpsq_mark_ts_cqes_undelivered(ptpsq, hwtstamp); mlx5e_ptpsq_mark_ts_cqes_undelivered(ptpsq, hwtstamp);
out: out:
napi_consume_skb(skb, budget); napi_consume_skb(skb, budget);
md_buff[*md_buff_sz++] = metadata_id; md_buff[(*md_buff_sz)++] = metadata_id;
if (unlikely(mlx5e_ptp_metadata_map_unhealthy(&ptpsq->metadata_map)) && if (unlikely(mlx5e_ptp_metadata_map_unhealthy(&ptpsq->metadata_map)) &&
!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) !test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
queue_work(ptpsq->txqsq.priv->wq, &ptpsq->report_unhealthy_work); queue_work(ptpsq->txqsq.priv->wq, &ptpsq->report_unhealthy_work);
......
...@@ -336,12 +336,17 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry, ...@@ -336,12 +336,17 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
/* iv len */ /* iv len */
aes_gcm->icv_len = x->aead->alg_icv_len; aes_gcm->icv_len = x->aead->alg_icv_len;
attrs->dir = x->xso.dir;
/* esn */ /* esn */
if (x->props.flags & XFRM_STATE_ESN) { if (x->props.flags & XFRM_STATE_ESN) {
attrs->replay_esn.trigger = true; attrs->replay_esn.trigger = true;
attrs->replay_esn.esn = sa_entry->esn_state.esn; attrs->replay_esn.esn = sa_entry->esn_state.esn;
attrs->replay_esn.esn_msb = sa_entry->esn_state.esn_msb; attrs->replay_esn.esn_msb = sa_entry->esn_state.esn_msb;
attrs->replay_esn.overlap = sa_entry->esn_state.overlap; attrs->replay_esn.overlap = sa_entry->esn_state.overlap;
if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
goto skip_replay_window;
switch (x->replay_esn->replay_window) { switch (x->replay_esn->replay_window) {
case 32: case 32:
attrs->replay_esn.replay_window = attrs->replay_esn.replay_window =
...@@ -365,7 +370,7 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry, ...@@ -365,7 +370,7 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
} }
} }
attrs->dir = x->xso.dir; skip_replay_window:
/* spi */ /* spi */
attrs->spi = be32_to_cpu(x->id.spi); attrs->spi = be32_to_cpu(x->id.spi);
...@@ -501,7 +506,8 @@ static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev, ...@@ -501,7 +506,8 @@ static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
return -EINVAL; return -EINVAL;
} }
if (x->replay_esn && x->replay_esn->replay_window != 32 && if (x->replay_esn && x->xso.dir == XFRM_DEV_OFFLOAD_IN &&
x->replay_esn->replay_window != 32 &&
x->replay_esn->replay_window != 64 && x->replay_esn->replay_window != 64 &&
x->replay_esn->replay_window != 128 && x->replay_esn->replay_window != 128 &&
x->replay_esn->replay_window != 256) { x->replay_esn->replay_window != 256) {
......
...@@ -254,11 +254,13 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft, ...@@ -254,11 +254,13 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
ft->g = kcalloc(MLX5E_ARFS_NUM_GROUPS, ft->g = kcalloc(MLX5E_ARFS_NUM_GROUPS,
sizeof(*ft->g), GFP_KERNEL); sizeof(*ft->g), GFP_KERNEL);
in = kvzalloc(inlen, GFP_KERNEL); if (!ft->g)
if (!in || !ft->g) {
kfree(ft->g);
kvfree(in);
return -ENOMEM; return -ENOMEM;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
goto err_free_g;
} }
mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
...@@ -278,7 +280,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft, ...@@ -278,7 +280,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
break; break;
default: default:
err = -EINVAL; err = -EINVAL;
goto out; goto err_free_in;
} }
switch (type) { switch (type) {
...@@ -300,7 +302,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft, ...@@ -300,7 +302,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
break; break;
default: default:
err = -EINVAL; err = -EINVAL;
goto out; goto err_free_in;
} }
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
...@@ -309,7 +311,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft, ...@@ -309,7 +311,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
MLX5_SET_CFG(in, end_flow_index, ix - 1); MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups])) if (IS_ERR(ft->g[ft->num_groups]))
goto err; goto err_clean_group;
ft->num_groups++; ft->num_groups++;
memset(in, 0, inlen); memset(in, 0, inlen);
...@@ -318,18 +320,20 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft, ...@@ -318,18 +320,20 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
MLX5_SET_CFG(in, end_flow_index, ix - 1); MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups])) if (IS_ERR(ft->g[ft->num_groups]))
goto err; goto err_clean_group;
ft->num_groups++; ft->num_groups++;
kvfree(in); kvfree(in);
return 0; return 0;
err: err_clean_group:
err = PTR_ERR(ft->g[ft->num_groups]); err = PTR_ERR(ft->g[ft->num_groups]);
ft->g[ft->num_groups] = NULL; ft->g[ft->num_groups] = NULL;
out: err_free_in:
kvfree(in); kvfree(in);
err_free_g:
kfree(ft->g);
ft->g = NULL;
return err; return err;
} }
......
...@@ -95,7 +95,7 @@ static void mlx5e_destroy_tises(struct mlx5_core_dev *mdev, u32 tisn[MLX5_MAX_PO ...@@ -95,7 +95,7 @@ static void mlx5e_destroy_tises(struct mlx5_core_dev *mdev, u32 tisn[MLX5_MAX_PO
{ {
int tc, i; int tc, i;
for (i = 0; i < MLX5_MAX_PORTS; i++) for (i = 0; i < mlx5e_get_num_lag_ports(mdev); i++)
for (tc = 0; tc < MLX5_MAX_NUM_TC; tc++) for (tc = 0; tc < MLX5_MAX_NUM_TC; tc++)
mlx5e_destroy_tis(mdev, tisn[i][tc]); mlx5e_destroy_tis(mdev, tisn[i][tc]);
} }
...@@ -110,7 +110,7 @@ static int mlx5e_create_tises(struct mlx5_core_dev *mdev, u32 tisn[MLX5_MAX_PORT ...@@ -110,7 +110,7 @@ static int mlx5e_create_tises(struct mlx5_core_dev *mdev, u32 tisn[MLX5_MAX_PORT
int tc, i; int tc, i;
int err; int err;
for (i = 0; i < MLX5_MAX_PORTS; i++) { for (i = 0; i < mlx5e_get_num_lag_ports(mdev); i++) {
for (tc = 0; tc < MLX5_MAX_NUM_TC; tc++) { for (tc = 0; tc < MLX5_MAX_NUM_TC; tc++) {
u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {}; u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
void *tisc; void *tisc;
...@@ -140,7 +140,7 @@ static int mlx5e_create_tises(struct mlx5_core_dev *mdev, u32 tisn[MLX5_MAX_PORT ...@@ -140,7 +140,7 @@ static int mlx5e_create_tises(struct mlx5_core_dev *mdev, u32 tisn[MLX5_MAX_PORT
return err; return err;
} }
int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev) int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises)
{ {
struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs; struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs;
int err; int err;
...@@ -169,11 +169,15 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev) ...@@ -169,11 +169,15 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
goto err_destroy_mkey; goto err_destroy_mkey;
} }
err = mlx5e_create_tises(mdev, res->tisn); if (create_tises) {
if (err) { err = mlx5e_create_tises(mdev, res->tisn);
mlx5_core_err(mdev, "alloc tises failed, %d\n", err); if (err) {
goto err_destroy_bfreg; mlx5_core_err(mdev, "alloc tises failed, %d\n", err);
goto err_destroy_bfreg;
}
res->tisn_valid = true;
} }
INIT_LIST_HEAD(&res->td.tirs_list); INIT_LIST_HEAD(&res->td.tirs_list);
mutex_init(&res->td.list_lock); mutex_init(&res->td.list_lock);
...@@ -203,7 +207,8 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev) ...@@ -203,7 +207,8 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
mlx5_crypto_dek_cleanup(mdev->mlx5e_res.dek_priv); mlx5_crypto_dek_cleanup(mdev->mlx5e_res.dek_priv);
mdev->mlx5e_res.dek_priv = NULL; mdev->mlx5e_res.dek_priv = NULL;
mlx5e_destroy_tises(mdev, res->tisn); if (res->tisn_valid)
mlx5e_destroy_tises(mdev, res->tisn);
mlx5_free_bfreg(mdev, &res->bfreg); mlx5_free_bfreg(mdev, &res->bfreg);
mlx5_core_destroy_mkey(mdev, res->mkey); mlx5_core_destroy_mkey(mdev, res->mkey);
mlx5_core_dealloc_transport_domain(mdev, res->td.tdn); mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
......
...@@ -5992,7 +5992,7 @@ static int mlx5e_resume(struct auxiliary_device *adev) ...@@ -5992,7 +5992,7 @@ static int mlx5e_resume(struct auxiliary_device *adev)
if (netif_device_present(netdev)) if (netif_device_present(netdev))
return 0; return 0;
err = mlx5e_create_mdev_resources(mdev); err = mlx5e_create_mdev_resources(mdev, true);
if (err) if (err)
return err; return err;
......
...@@ -761,7 +761,7 @@ static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp) ...@@ -761,7 +761,7 @@ static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
err = mlx5e_rss_params_indir_init(&indir, mdev, err = mlx5e_rss_params_indir_init(&indir, mdev,
mlx5e_rqt_size(mdev, hp->num_channels), mlx5e_rqt_size(mdev, hp->num_channels),
mlx5e_rqt_size(mdev, priv->max_nch)); mlx5e_rqt_size(mdev, hp->num_channels));
if (err) if (err)
return err; return err;
...@@ -2014,9 +2014,10 @@ static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow, ...@@ -2014,9 +2014,10 @@ static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow,
list_for_each_entry_safe(peer_flow, tmp, &flow->peer_flows, peer_flows) { list_for_each_entry_safe(peer_flow, tmp, &flow->peer_flows, peer_flows) {
if (peer_index != mlx5_get_dev_index(peer_flow->priv->mdev)) if (peer_index != mlx5_get_dev_index(peer_flow->priv->mdev))
continue; continue;
list_del(&peer_flow->peer_flows);
if (refcount_dec_and_test(&peer_flow->refcnt)) { if (refcount_dec_and_test(&peer_flow->refcnt)) {
mlx5e_tc_del_fdb_flow(peer_flow->priv, peer_flow); mlx5e_tc_del_fdb_flow(peer_flow->priv, peer_flow);
list_del(&peer_flow->peer_flows);
kfree(peer_flow); kfree(peer_flow);
} }
} }
......
...@@ -83,6 +83,7 @@ mlx5_esw_bridge_mdb_flow_create(u16 esw_owner_vhca_id, struct mlx5_esw_bridge_md ...@@ -83,6 +83,7 @@ mlx5_esw_bridge_mdb_flow_create(u16 esw_owner_vhca_id, struct mlx5_esw_bridge_md
i++; i++;
} }
rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN;
rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
dmac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value, outer_headers.dmac_47_16); dmac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value, outer_headers.dmac_47_16);
ether_addr_copy(dmac_v, entry->key.addr); ether_addr_copy(dmac_v, entry->key.addr);
...@@ -587,6 +588,7 @@ mlx5_esw_bridge_mcast_vlan_flow_create(u16 vlan_proto, struct mlx5_esw_bridge_po ...@@ -587,6 +588,7 @@ mlx5_esw_bridge_mcast_vlan_flow_create(u16 vlan_proto, struct mlx5_esw_bridge_po
if (!rule_spec) if (!rule_spec)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN;
rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
...@@ -662,6 +664,7 @@ mlx5_esw_bridge_mcast_fwd_flow_create(struct mlx5_esw_bridge_port *port) ...@@ -662,6 +664,7 @@ mlx5_esw_bridge_mcast_fwd_flow_create(struct mlx5_esw_bridge_port *port)
dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID; dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
dest.vport.vhca_id = port->esw_owner_vhca_id; dest.vport.vhca_id = port->esw_owner_vhca_id;
} }
rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN;
handle = mlx5_add_flow_rules(port->mcast.ft, rule_spec, &flow_act, &dest, 1); handle = mlx5_add_flow_rules(port->mcast.ft, rule_spec, &flow_act, &dest, 1);
kvfree(rule_spec); kvfree(rule_spec);
......
...@@ -566,6 +566,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, ...@@ -566,6 +566,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
fte->flow_context.flow_tag); fte->flow_context.flow_tag);
MLX5_SET(flow_context, in_flow_context, flow_source, MLX5_SET(flow_context, in_flow_context, flow_source,
fte->flow_context.flow_source); fte->flow_context.flow_source);
MLX5_SET(flow_context, in_flow_context, uplink_hairpin_en,
!!(fte->flow_context.flags & FLOW_CONTEXT_UPLINK_HAIRPIN_EN));
MLX5_SET(flow_context, in_flow_context, extended_destination, MLX5_SET(flow_context, in_flow_context, extended_destination,
extended_dest); extended_dest);
......
...@@ -783,7 +783,7 @@ static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u32 port_num, ...@@ -783,7 +783,7 @@ static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u32 port_num,
} }
/* This should only be called once per mdev */ /* This should only be called once per mdev */
err = mlx5e_create_mdev_resources(mdev); err = mlx5e_create_mdev_resources(mdev, false);
if (err) if (err)
goto destroy_ht; goto destroy_ht;
} }
......
...@@ -98,7 +98,7 @@ static int create_aso_cq(struct mlx5_aso_cq *cq, void *cqc_data) ...@@ -98,7 +98,7 @@ static int create_aso_cq(struct mlx5_aso_cq *cq, void *cqc_data)
mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
(__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas)); (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
MLX5_SET(cqc, cqc, cq_period_mode, DIM_CQ_PERIOD_MODE_START_FROM_EQE); MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn); MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
......
...@@ -788,6 +788,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, ...@@ -788,6 +788,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
switch (action_type) { switch (action_type) {
case DR_ACTION_TYP_DROP: case DR_ACTION_TYP_DROP:
attr.final_icm_addr = nic_dmn->drop_icm_addr; attr.final_icm_addr = nic_dmn->drop_icm_addr;
attr.hit_gvmi = nic_dmn->drop_icm_addr >> 48;
break; break;
case DR_ACTION_TYP_FT: case DR_ACTION_TYP_FT:
dest_action = action; dest_action = action;
...@@ -873,11 +874,17 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, ...@@ -873,11 +874,17 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
action->sampler->tx_icm_addr; action->sampler->tx_icm_addr;
break; break;
case DR_ACTION_TYP_VPORT: case DR_ACTION_TYP_VPORT:
attr.hit_gvmi = action->vport->caps->vhca_gvmi; if (unlikely(rx_rule && action->vport->caps->num == MLX5_VPORT_UPLINK)) {
dest_action = action; /* can't go to uplink on RX rule - dropping instead */
attr.final_icm_addr = rx_rule ? attr.final_icm_addr = nic_dmn->drop_icm_addr;
action->vport->caps->icm_address_rx : attr.hit_gvmi = nic_dmn->drop_icm_addr >> 48;
action->vport->caps->icm_address_tx; } else {
attr.hit_gvmi = action->vport->caps->vhca_gvmi;
dest_action = action;
attr.final_icm_addr = rx_rule ?
action->vport->caps->icm_address_rx :
action->vport->caps->icm_address_tx;
}
break; break;
case DR_ACTION_TYP_POP_VLAN: case DR_ACTION_TYP_POP_VLAN:
if (!rx_rule && !(dmn->ste_ctx->actions_caps & if (!rx_rule && !(dmn->ste_ctx->actions_caps &
......
...@@ -440,6 +440,27 @@ int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev, ...@@ -440,6 +440,27 @@ int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
} }
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid); EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
int mlx5_query_nic_vport_sd_group(struct mlx5_core_dev *mdev, u8 *sd_group)
{
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
u32 *out;
int err;
out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
err = mlx5_query_nic_vport_context(mdev, 0, out);
if (err)
goto out;
*sd_group = MLX5_GET(query_nic_vport_context_out, out,
nic_vport_context.sd_group);
out:
kvfree(out);
return err;
}
int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid) int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
{ {
u32 *out; u32 *out;
......
...@@ -681,6 +681,7 @@ struct mlx5e_resources { ...@@ -681,6 +681,7 @@ struct mlx5e_resources {
struct mlx5_sq_bfreg bfreg; struct mlx5_sq_bfreg bfreg;
#define MLX5_MAX_NUM_TC 8 #define MLX5_MAX_NUM_TC 8
u32 tisn[MLX5_MAX_PORTS][MLX5_MAX_NUM_TC]; u32 tisn[MLX5_MAX_PORTS][MLX5_MAX_NUM_TC];
bool tisn_valid;
} hw_objs; } hw_objs;
struct net_device *uplink_netdev; struct net_device *uplink_netdev;
struct mutex uplink_netdev_lock; struct mutex uplink_netdev_lock;
......
...@@ -132,6 +132,7 @@ struct mlx5_flow_handle; ...@@ -132,6 +132,7 @@ struct mlx5_flow_handle;
enum { enum {
FLOW_CONTEXT_HAS_TAG = BIT(0), FLOW_CONTEXT_HAS_TAG = BIT(0),
FLOW_CONTEXT_UPLINK_HAIRPIN_EN = BIT(1),
}; };
struct mlx5_flow_context { struct mlx5_flow_context {
......
...@@ -3576,7 +3576,7 @@ struct mlx5_ifc_flow_context_bits { ...@@ -3576,7 +3576,7 @@ struct mlx5_ifc_flow_context_bits {
u8 action[0x10]; u8 action[0x10];
u8 extended_destination[0x1]; u8 extended_destination[0x1];
u8 reserved_at_81[0x1]; u8 uplink_hairpin_en[0x1];
u8 flow_source[0x2]; u8 flow_source[0x2];
u8 encrypt_decrypt_type[0x4]; u8 encrypt_decrypt_type[0x4];
u8 destination_list_size[0x18]; u8 destination_list_size[0x18];
...@@ -4036,8 +4036,13 @@ struct mlx5_ifc_nic_vport_context_bits { ...@@ -4036,8 +4036,13 @@ struct mlx5_ifc_nic_vport_context_bits {
u8 affiliation_criteria[0x4]; u8 affiliation_criteria[0x4];
u8 affiliated_vhca_id[0x10]; u8 affiliated_vhca_id[0x10];
u8 reserved_at_60[0xd0]; u8 reserved_at_60[0xa0];
u8 reserved_at_100[0x1];
u8 sd_group[0x3];
u8 reserved_at_104[0x1c];
u8 reserved_at_120[0x10];
u8 mtu[0x10]; u8 mtu[0x10];
u8 system_image_guid[0x40]; u8 system_image_guid[0x40];
...@@ -10122,8 +10127,7 @@ struct mlx5_ifc_mpir_reg_bits { ...@@ -10122,8 +10127,7 @@ struct mlx5_ifc_mpir_reg_bits {
u8 reserved_at_20[0x20]; u8 reserved_at_20[0x20];
u8 local_port[0x8]; u8 local_port[0x8];
u8 reserved_at_28[0x15]; u8 reserved_at_28[0x18];
u8 sd_group[0x3];
u8 reserved_at_60[0x20]; u8 reserved_at_60[0x20];
}; };
......
...@@ -72,6 +72,7 @@ int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu); ...@@ -72,6 +72,7 @@ int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);
int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu); int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev, int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
u64 *system_image_guid); u64 *system_image_guid);
int mlx5_query_nic_vport_sd_group(struct mlx5_core_dev *mdev, u8 *sd_group);
int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid); int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev, int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
u16 vport, u64 node_guid); u16 vport, u64 node_guid);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment