Commit 62752c0b authored by Shay Drory's avatar Shay Drory Committed by Saeed Mahameed

net/mlx5: DR, Fix peer domain namespace setting

The offending patch is based on the assumption that for PFs,
mlx5_get_dev_index() is the same as vhca_id. However, this assumption
is wrong in case of DPU (ECPF).
Fix it by using vhca_id directly, and switch the array of peers to
xarray.

Fixes: 6d5b7321 ("net/mlx5: DR, handle more than one peer domain")
Signed-off-by: default avatarShay Drory <shayd@nvidia.com>
Reviewed-by: default avatarYevgeny Kliteynik <kliteyn@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 61eab651
......@@ -2778,9 +2778,9 @@ static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
struct mlx5_eswitch *peer_esw,
bool pair)
{
u8 peer_idx = mlx5_get_dev_index(peer_esw->dev);
u16 peer_vhca_id = MLX5_CAP_GEN(peer_esw->dev, vhca_id);
u16 vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
struct mlx5_flow_root_namespace *peer_ns;
u8 idx = mlx5_get_dev_index(esw->dev);
struct mlx5_flow_root_namespace *ns;
int err;
......@@ -2788,18 +2788,18 @@ static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
ns = esw->dev->priv.steering->fdb_root_ns;
if (pair) {
err = mlx5_flow_namespace_set_peer(ns, peer_ns, peer_idx);
err = mlx5_flow_namespace_set_peer(ns, peer_ns, peer_vhca_id);
if (err)
return err;
err = mlx5_flow_namespace_set_peer(peer_ns, ns, idx);
err = mlx5_flow_namespace_set_peer(peer_ns, ns, vhca_id);
if (err) {
mlx5_flow_namespace_set_peer(ns, NULL, peer_idx);
mlx5_flow_namespace_set_peer(ns, NULL, peer_vhca_id);
return err;
}
} else {
mlx5_flow_namespace_set_peer(ns, NULL, peer_idx);
mlx5_flow_namespace_set_peer(peer_ns, NULL, idx);
mlx5_flow_namespace_set_peer(ns, NULL, peer_vhca_id);
mlx5_flow_namespace_set_peer(peer_ns, NULL, vhca_id);
}
return 0;
......
......@@ -140,7 +140,7 @@ static void mlx5_cmd_stub_modify_header_dealloc(struct mlx5_flow_root_namespace
static int mlx5_cmd_stub_set_peer(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_root_namespace *peer_ns,
u8 peer_idx)
u16 peer_vhca_id)
{
return 0;
}
......
......@@ -94,7 +94,7 @@ struct mlx5_flow_cmds {
int (*set_peer)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_root_namespace *peer_ns,
u8 peer_idx);
u16 peer_vhca_id);
int (*create_ns)(struct mlx5_flow_root_namespace *ns);
int (*destroy_ns)(struct mlx5_flow_root_namespace *ns);
......
......@@ -3621,7 +3621,7 @@ void mlx5_destroy_match_definer(struct mlx5_core_dev *dev,
int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_root_namespace *peer_ns,
u8 peer_idx)
u16 peer_vhca_id)
{
if (peer_ns && ns->mode != peer_ns->mode) {
mlx5_core_err(ns->dev,
......@@ -3629,7 +3629,7 @@ int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
return -EINVAL;
}
return ns->cmds->set_peer(ns, peer_ns, peer_idx);
return ns->cmds->set_peer(ns, peer_ns, peer_vhca_id);
}
/* This function should be called only at init stage of the namespace.
......
......@@ -303,7 +303,7 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void);
int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_root_namespace *peer_ns,
u8 peer_idx);
u16 peer_vhca_id);
int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
enum mlx5_flow_steering_mode mode);
......
......@@ -2079,7 +2079,7 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn,
peer_vport = vhca_id_valid && mlx5_core_is_pf(dmn->mdev) &&
(vhca_id != dmn->info.caps.gvmi);
vport_dmn = peer_vport ? dmn->peer_dmn[vhca_id] : dmn;
vport_dmn = peer_vport ? xa_load(&dmn->peer_dmn_xa, vhca_id) : dmn;
if (!vport_dmn) {
mlx5dr_dbg(dmn, "No peer vport domain for given vhca_id\n");
return NULL;
......
......@@ -475,6 +475,7 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
mutex_init(&dmn->info.rx.mutex);
mutex_init(&dmn->info.tx.mutex);
xa_init(&dmn->definers_xa);
xa_init(&dmn->peer_dmn_xa);
if (dr_domain_caps_init(mdev, dmn)) {
mlx5dr_err(dmn, "Failed init domain, no caps\n");
......@@ -507,6 +508,7 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
uninit_caps:
dr_domain_caps_uninit(dmn);
def_xa_destroy:
xa_destroy(&dmn->peer_dmn_xa);
xa_destroy(&dmn->definers_xa);
kfree(dmn);
return NULL;
......@@ -547,6 +549,7 @@ int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
dr_domain_uninit_csum_recalc_fts(dmn);
dr_domain_uninit_resources(dmn);
dr_domain_caps_uninit(dmn);
xa_destroy(&dmn->peer_dmn_xa);
xa_destroy(&dmn->definers_xa);
mutex_destroy(&dmn->info.tx.mutex);
mutex_destroy(&dmn->info.rx.mutex);
......@@ -556,17 +559,21 @@ int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
struct mlx5dr_domain *peer_dmn,
u8 peer_idx)
u16 peer_vhca_id)
{
struct mlx5dr_domain *peer;
mlx5dr_domain_lock(dmn);
if (dmn->peer_dmn[peer_idx])
refcount_dec(&dmn->peer_dmn[peer_idx]->refcount);
peer = xa_load(&dmn->peer_dmn_xa, peer_vhca_id);
if (peer)
refcount_dec(&peer->refcount);
dmn->peer_dmn[peer_idx] = peer_dmn;
WARN_ON(xa_err(xa_store(&dmn->peer_dmn_xa, peer_vhca_id, peer_dmn, GFP_KERNEL)));
if (dmn->peer_dmn[peer_idx])
refcount_inc(&dmn->peer_dmn[peer_idx]->refcount);
peer = xa_load(&dmn->peer_dmn_xa, peer_vhca_id);
if (peer)
refcount_inc(&peer->refcount);
mlx5dr_domain_unlock(dmn);
}
......@@ -1652,17 +1652,18 @@ dr_ste_v0_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
struct mlx5dr_domain *dmn = sb->dmn;
struct mlx5dr_domain *vport_dmn;
u8 *bit_mask = sb->bit_mask;
struct mlx5dr_domain *peer;
bool source_gvmi_set;
DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn);
if (sb->vhca_id_valid) {
peer = xa_load(&dmn->peer_dmn_xa, id);
/* Find port GVMI based on the eswitch_owner_vhca_id */
if (id == dmn->info.caps.gvmi)
vport_dmn = dmn;
else if (id < MLX5_MAX_PORTS && dmn->peer_dmn[id] &&
(id == dmn->peer_dmn[id]->info.caps.gvmi))
vport_dmn = dmn->peer_dmn[id];
else if (peer && (id == peer->info.caps.gvmi))
vport_dmn = peer;
else
return -EINVAL;
......
......@@ -1984,16 +1984,17 @@ static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
struct mlx5dr_domain *dmn = sb->dmn;
struct mlx5dr_domain *vport_dmn;
u8 *bit_mask = sb->bit_mask;
struct mlx5dr_domain *peer;
DR_STE_SET_TAG(src_gvmi_qp_v1, tag, source_qp, misc, source_sqn);
if (sb->vhca_id_valid) {
peer = xa_load(&dmn->peer_dmn_xa, id);
/* Find port GVMI based on the eswitch_owner_vhca_id */
if (id == dmn->info.caps.gvmi)
vport_dmn = dmn;
else if (id < MLX5_MAX_PORTS && dmn->peer_dmn[id] &&
(id == dmn->peer_dmn[id]->info.caps.gvmi))
vport_dmn = dmn->peer_dmn[id];
else if (peer && (id == peer->info.caps.gvmi))
vport_dmn = peer;
else
return -EINVAL;
......
......@@ -935,7 +935,6 @@ struct mlx5dr_domain_info {
};
struct mlx5dr_domain {
struct mlx5dr_domain *peer_dmn[MLX5_MAX_PORTS];
struct mlx5_core_dev *mdev;
u32 pdn;
struct mlx5_uars_page *uar;
......@@ -956,6 +955,7 @@ struct mlx5dr_domain {
struct list_head dbg_tbl_list;
struct mlx5dr_dbg_dump_info dump_info;
struct xarray definers_xa;
struct xarray peer_dmn_xa;
/* memory management statistics */
u32 num_buddies[DR_ICM_TYPE_MAX];
};
......
......@@ -781,14 +781,14 @@ static int mlx5_cmd_dr_update_fte(struct mlx5_flow_root_namespace *ns,
static int mlx5_cmd_dr_set_peer(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_root_namespace *peer_ns,
u8 peer_idx)
u16 peer_vhca_id)
{
struct mlx5dr_domain *peer_domain = NULL;
if (peer_ns)
peer_domain = peer_ns->fs_dr_domain.dr_domain;
mlx5dr_domain_set_peer(ns->fs_dr_domain.dr_domain,
peer_domain, peer_idx);
peer_domain, peer_vhca_id);
return 0;
}
......
......@@ -49,7 +49,7 @@ int mlx5dr_domain_sync(struct mlx5dr_domain *domain, u32 flags);
void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
struct mlx5dr_domain *peer_dmn,
u8 peer_idx);
u16 peer_vhca_id);
struct mlx5dr_table *
mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level, u32 flags,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment