Commit 6a4d00be authored by Mark Bloch's avatar Mark Bloch Committed by Jason Gunthorpe

RDMA/mlx5: Move rep into port struct

In preparation of moving into a model of single IB device multiple ports
move rep to be part of the port structure. We mark a representor device by
setting is_rep, no functional change with this patch.
Signed-off-by: default avatarMark Bloch <markb@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 5d8f6a0e
...@@ -1904,7 +1904,7 @@ static bool devx_is_supported(struct ib_device *device) ...@@ -1904,7 +1904,7 @@ static bool devx_is_supported(struct ib_device *device)
{ {
struct mlx5_ib_dev *dev = to_mdev(device); struct mlx5_ib_dev *dev = to_mdev(device);
return !dev->rep && MLX5_CAP_GEN(dev->mdev, log_max_uctx); return !dev->is_rep && MLX5_CAP_GEN(dev->mdev, log_max_uctx);
} }
const struct uapi_definition mlx5_ib_devx_defs[] = { const struct uapi_definition mlx5_ib_devx_defs[] = {
......
...@@ -621,7 +621,7 @@ DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_FLOW_MATCHER, ...@@ -621,7 +621,7 @@ DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_FLOW_MATCHER,
static bool flow_is_supported(struct ib_device *device) static bool flow_is_supported(struct ib_device *device)
{ {
return !to_mdev(device)->rep; return !to_mdev(device)->is_rep;
} }
const struct uapi_definition mlx5_ib_flow_defs[] = { const struct uapi_definition mlx5_ib_flow_defs[] = {
......
...@@ -69,7 +69,8 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) ...@@ -69,7 +69,8 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
return -ENOMEM; return -ENOMEM;
} }
ibdev->rep = rep; ibdev->is_rep = true;
ibdev->port[0].rep = rep;
ibdev->mdev = dev; ibdev->mdev = dev;
ibdev->num_ports = num_ports; ibdev->num_ports = num_ports;
...@@ -151,12 +152,12 @@ int create_flow_rule_vport_sq(struct mlx5_ib_dev *dev, ...@@ -151,12 +152,12 @@ int create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
struct mlx5_flow_handle *flow_rule; struct mlx5_flow_handle *flow_rule;
struct mlx5_eswitch *esw = dev->mdev->priv.eswitch; struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
if (!dev->rep) if (!dev->is_rep)
return 0; return 0;
flow_rule = flow_rule =
mlx5_eswitch_add_send_to_vport_rule(esw, mlx5_eswitch_add_send_to_vport_rule(esw,
dev->rep->vport, dev->port[0].rep->vport,
sq->base.mqp.qpn); sq->base.mqp.qpn);
if (IS_ERR(flow_rule)) if (IS_ERR(flow_rule))
return PTR_ERR(flow_rule); return PTR_ERR(flow_rule);
......
...@@ -173,12 +173,12 @@ static int mlx5_netdev_event(struct notifier_block *this, ...@@ -173,12 +173,12 @@ static int mlx5_netdev_event(struct notifier_block *this,
switch (event) { switch (event) {
case NETDEV_REGISTER: case NETDEV_REGISTER:
write_lock(&roce->netdev_lock); write_lock(&roce->netdev_lock);
if (ibdev->rep) { if (ibdev->is_rep) {
struct mlx5_eswitch *esw = ibdev->mdev->priv.eswitch; struct mlx5_eswitch *esw = ibdev->mdev->priv.eswitch;
struct mlx5_eswitch_rep *rep = ibdev->port[0].rep;
struct net_device *rep_ndev; struct net_device *rep_ndev;
rep_ndev = mlx5_ib_get_rep_netdev(esw, rep_ndev = mlx5_ib_get_rep_netdev(esw, rep->vport);
ibdev->rep->vport);
if (rep_ndev == ndev) if (rep_ndev == ndev)
roce->netdev = ndev; roce->netdev = ndev;
} else if (ndev->dev.parent == &mdev->pdev->dev) { } else if (ndev->dev.parent == &mdev->pdev->dev) {
...@@ -3153,10 +3153,10 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev, ...@@ -3153,10 +3153,10 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
if (ft_type == MLX5_IB_FT_RX) { if (ft_type == MLX5_IB_FT_RX) {
fn_type = MLX5_FLOW_NAMESPACE_BYPASS; fn_type = MLX5_FLOW_NAMESPACE_BYPASS;
prio = &dev->flow_db->prios[priority]; prio = &dev->flow_db->prios[priority];
if (!dev->rep && if (!dev->is_rep &&
MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap)) MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap))
flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP; flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
if (!dev->rep && if (!dev->is_rep &&
MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
reformat_l3_tunnel_to_l2)) reformat_l3_tunnel_to_l2))
flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
...@@ -3166,7 +3166,7 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev, ...@@ -3166,7 +3166,7 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
log_max_ft_size)); log_max_ft_size));
fn_type = MLX5_FLOW_NAMESPACE_EGRESS; fn_type = MLX5_FLOW_NAMESPACE_EGRESS;
prio = &dev->flow_db->egress_prios[priority]; prio = &dev->flow_db->egress_prios[priority];
if (!dev->rep && if (!dev->is_rep &&
MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat)) MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat))
flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
} }
...@@ -3372,7 +3372,7 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev, ...@@ -3372,7 +3372,7 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
if (!is_valid_attr(dev->mdev, flow_attr)) if (!is_valid_attr(dev->mdev, flow_attr))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
if (dev->rep && is_egress) if (dev->is_rep && is_egress)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
spec = kvzalloc(sizeof(*spec), GFP_KERNEL); spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
...@@ -3403,13 +3403,17 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev, ...@@ -3403,13 +3403,17 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
if (!flow_is_multicast_only(flow_attr)) if (!flow_is_multicast_only(flow_attr))
set_underlay_qp(dev, spec, underlay_qpn); set_underlay_qp(dev, spec, underlay_qpn);
if (dev->rep) { if (dev->is_rep) {
void *misc; void *misc;
if (!dev->port[flow_attr->port - 1].rep) {
err = -EINVAL;
goto free;
}
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters); misc_parameters);
MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_SET(fte_match_set_misc, misc, source_port,
dev->rep->vport); dev->port[flow_attr->port - 1].rep->vport);
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
misc_parameters); misc_parameters);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
......
...@@ -720,6 +720,7 @@ struct mlx5_ib_port { ...@@ -720,6 +720,7 @@ struct mlx5_ib_port {
struct mlx5_ib_multiport mp; struct mlx5_ib_multiport mp;
struct mlx5_ib_dbg_cc_params *dbg_cc_params; struct mlx5_ib_dbg_cc_params *dbg_cc_params;
struct mlx5_roce roce; struct mlx5_roce roce;
struct mlx5_eswitch_rep *rep;
}; };
struct mlx5_ib_dbg_param { struct mlx5_ib_dbg_param {
...@@ -940,7 +941,7 @@ struct mlx5_ib_dev { ...@@ -940,7 +941,7 @@ struct mlx5_ib_dev {
struct mlx5_sq_bfreg fp_bfreg; struct mlx5_sq_bfreg fp_bfreg;
struct mlx5_ib_delay_drop delay_drop; struct mlx5_ib_delay_drop delay_drop;
const struct mlx5_ib_profile *profile; const struct mlx5_ib_profile *profile;
struct mlx5_eswitch_rep *rep; bool is_rep;
int lag_active; int lag_active;
struct mlx5_ib_lb_state lb; struct mlx5_ib_lb_state lb;
......
...@@ -600,7 +600,7 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c) ...@@ -600,7 +600,7 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev) static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
{ {
if (!mlx5_debugfs_root || dev->rep) if (!mlx5_debugfs_root || dev->is_rep)
return; return;
debugfs_remove_recursive(dev->cache.root); debugfs_remove_recursive(dev->cache.root);
...@@ -614,7 +614,7 @@ static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev) ...@@ -614,7 +614,7 @@ static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
struct dentry *dir; struct dentry *dir;
int i; int i;
if (!mlx5_debugfs_root || dev->rep) if (!mlx5_debugfs_root || dev->is_rep)
return; return;
cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root); cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
...@@ -677,7 +677,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) ...@@ -677,7 +677,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
MLX5_IB_UMR_OCTOWORD; MLX5_IB_UMR_OCTOWORD;
ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT; ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) && if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
!dev->rep && !dev->is_rep &&
mlx5_core_is_pf(dev->mdev)) mlx5_core_is_pf(dev->mdev))
ent->limit = dev->mdev->profile->mr_cache[i].limit; ent->limit = dev->mdev->profile->mr_cache[i].limit;
else else
......
...@@ -1436,7 +1436,7 @@ static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev, ...@@ -1436,7 +1436,7 @@ static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
if (*qp_flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC) if (*qp_flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)
lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST; lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST;
if (dev->rep) { if (dev->is_rep) {
lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST; lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
*qp_flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC; *qp_flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
} }
...@@ -1648,7 +1648,7 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, ...@@ -1648,7 +1648,7 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC || dev->rep) { if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC || dev->is_rep) {
lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST; lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC; qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment