Commit 9a4ca38d authored by Mark Bloch's avatar Mark Bloch Committed by Saeed Mahameed

IB/mlx5: Allocate flow DB only on PF IB device

A flow DB is a shared resource between PF and representors,
need to allocate it only when creating the PF IB device.
Once we add IB representors, they will use the flow db which was
created by the PF.
Signed-off-by: default avatarMark Bloch <markb@mellanox.com>
Reviewed-by: default avatarOr Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leon@kernel.org>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent fc385b7a
...@@ -2632,7 +2632,7 @@ static int mlx5_ib_destroy_flow(struct ib_flow *flow_id) ...@@ -2632,7 +2632,7 @@ static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
ibflow); ibflow);
struct mlx5_ib_flow_handler *iter, *tmp; struct mlx5_ib_flow_handler *iter, *tmp;
mutex_lock(&dev->flow_db.lock); mutex_lock(&dev->flow_db->lock);
list_for_each_entry_safe(iter, tmp, &handler->list, list) { list_for_each_entry_safe(iter, tmp, &handler->list, list) {
mlx5_del_flow_rules(iter->rule); mlx5_del_flow_rules(iter->rule);
...@@ -2643,7 +2643,7 @@ static int mlx5_ib_destroy_flow(struct ib_flow *flow_id) ...@@ -2643,7 +2643,7 @@ static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
mlx5_del_flow_rules(handler->rule); mlx5_del_flow_rules(handler->rule);
put_flow_table(dev, handler->prio, true); put_flow_table(dev, handler->prio, true);
mutex_unlock(&dev->flow_db.lock); mutex_unlock(&dev->flow_db->lock);
kfree(handler); kfree(handler);
...@@ -2692,7 +2692,7 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev, ...@@ -2692,7 +2692,7 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
MLX5_FLOW_NAMESPACE_BYPASS); MLX5_FLOW_NAMESPACE_BYPASS);
num_entries = MLX5_FS_MAX_ENTRIES; num_entries = MLX5_FS_MAX_ENTRIES;
num_groups = MLX5_FS_MAX_TYPES; num_groups = MLX5_FS_MAX_TYPES;
prio = &dev->flow_db.prios[priority]; prio = &dev->flow_db->prios[priority];
} else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) { flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
ns = mlx5_get_flow_namespace(dev->mdev, ns = mlx5_get_flow_namespace(dev->mdev,
...@@ -2700,7 +2700,7 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev, ...@@ -2700,7 +2700,7 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
build_leftovers_ft_param(&priority, build_leftovers_ft_param(&priority,
&num_entries, &num_entries,
&num_groups); &num_groups);
prio = &dev->flow_db.prios[MLX5_IB_FLOW_LEFTOVERS_PRIO]; prio = &dev->flow_db->prios[MLX5_IB_FLOW_LEFTOVERS_PRIO];
} else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) { } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
if (!MLX5_CAP_FLOWTABLE(dev->mdev, if (!MLX5_CAP_FLOWTABLE(dev->mdev,
allow_sniffer_and_nic_rx_shared_tir)) allow_sniffer_and_nic_rx_shared_tir))
...@@ -2710,7 +2710,7 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev, ...@@ -2710,7 +2710,7 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
MLX5_FLOW_NAMESPACE_SNIFFER_RX : MLX5_FLOW_NAMESPACE_SNIFFER_RX :
MLX5_FLOW_NAMESPACE_SNIFFER_TX); MLX5_FLOW_NAMESPACE_SNIFFER_TX);
prio = &dev->flow_db.sniffer[ft_type]; prio = &dev->flow_db->sniffer[ft_type];
priority = 0; priority = 0;
num_entries = 1; num_entries = 1;
num_groups = 1; num_groups = 1;
...@@ -3000,7 +3000,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp, ...@@ -3000,7 +3000,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
if (!dst) if (!dst)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
mutex_lock(&dev->flow_db.lock); mutex_lock(&dev->flow_db->lock);
ft_prio = get_flow_table(dev, flow_attr, MLX5_IB_FT_RX); ft_prio = get_flow_table(dev, flow_attr, MLX5_IB_FT_RX);
if (IS_ERR(ft_prio)) { if (IS_ERR(ft_prio)) {
...@@ -3049,7 +3049,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp, ...@@ -3049,7 +3049,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
goto destroy_ft; goto destroy_ft;
} }
mutex_unlock(&dev->flow_db.lock); mutex_unlock(&dev->flow_db->lock);
kfree(dst); kfree(dst);
return &handler->ibflow; return &handler->ibflow;
...@@ -3059,7 +3059,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp, ...@@ -3059,7 +3059,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
if (ft_prio_tx) if (ft_prio_tx)
put_flow_table(dev, ft_prio_tx, false); put_flow_table(dev, ft_prio_tx, false);
unlock: unlock:
mutex_unlock(&dev->flow_db.lock); mutex_unlock(&dev->flow_db->lock);
kfree(dst); kfree(dst);
kfree(handler); kfree(handler);
return ERR_PTR(err); return ERR_PTR(err);
...@@ -3803,7 +3803,7 @@ static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev) ...@@ -3803,7 +3803,7 @@ static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
goto err_destroy_vport_lag; goto err_destroy_vport_lag;
} }
dev->flow_db.lag_demux_ft = ft; dev->flow_db->lag_demux_ft = ft;
return 0; return 0;
err_destroy_vport_lag: err_destroy_vport_lag:
...@@ -3815,9 +3815,9 @@ static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev) ...@@ -3815,9 +3815,9 @@ static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
{ {
struct mlx5_core_dev *mdev = dev->mdev; struct mlx5_core_dev *mdev = dev->mdev;
if (dev->flow_db.lag_demux_ft) { if (dev->flow_db->lag_demux_ft) {
mlx5_destroy_flow_table(dev->flow_db.lag_demux_ft); mlx5_destroy_flow_table(dev->flow_db->lag_demux_ft);
dev->flow_db.lag_demux_ft = NULL; dev->flow_db->lag_demux_ft = NULL;
mlx5_cmd_destroy_vport_lag(mdev); mlx5_cmd_destroy_vport_lag(mdev);
} }
...@@ -4565,7 +4565,6 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev) ...@@ -4565,7 +4565,6 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
dev->mdev->priv.eq_table.num_comp_vectors; dev->mdev->priv.eq_table.num_comp_vectors;
dev->ib_dev.dev.parent = &mdev->pdev->dev; dev->ib_dev.dev.parent = &mdev->pdev->dev;
mutex_init(&dev->flow_db.lock);
mutex_init(&dev->cap_mask_mutex); mutex_init(&dev->cap_mask_mutex);
INIT_LIST_HEAD(&dev->qp_list); INIT_LIST_HEAD(&dev->qp_list);
spin_lock_init(&dev->reset_flow_resource_lock); spin_lock_init(&dev->reset_flow_resource_lock);
...@@ -4586,6 +4585,23 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev) ...@@ -4586,6 +4585,23 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
return -ENOMEM; return -ENOMEM;
} }
static int mlx5_ib_stage_flow_db_init(struct mlx5_ib_dev *dev)
{
dev->flow_db = kzalloc(sizeof(*dev->flow_db), GFP_KERNEL);
if (!dev->flow_db)
return -ENOMEM;
mutex_init(&dev->flow_db->lock);
return 0;
}
static void mlx5_ib_stage_flow_db_cleanup(struct mlx5_ib_dev *dev)
{
kfree(dev->flow_db);
}
static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev) static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
{ {
struct mlx5_core_dev *mdev = dev->mdev; struct mlx5_core_dev *mdev = dev->mdev;
...@@ -4974,6 +4990,9 @@ static const struct mlx5_ib_profile pf_profile = { ...@@ -4974,6 +4990,9 @@ static const struct mlx5_ib_profile pf_profile = {
STAGE_CREATE(MLX5_IB_STAGE_INIT, STAGE_CREATE(MLX5_IB_STAGE_INIT,
mlx5_ib_stage_init_init, mlx5_ib_stage_init_init,
mlx5_ib_stage_init_cleanup), mlx5_ib_stage_init_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB,
mlx5_ib_stage_flow_db_init,
mlx5_ib_stage_flow_db_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_CAPS, STAGE_CREATE(MLX5_IB_STAGE_CAPS,
mlx5_ib_stage_caps_init, mlx5_ib_stage_caps_init,
NULL), NULL),
......
...@@ -731,6 +731,7 @@ struct mlx5_ib_delay_drop { ...@@ -731,6 +731,7 @@ struct mlx5_ib_delay_drop {
enum mlx5_ib_stages { enum mlx5_ib_stages {
MLX5_IB_STAGE_INIT, MLX5_IB_STAGE_INIT,
MLX5_IB_STAGE_FLOW_DB,
MLX5_IB_STAGE_CAPS, MLX5_IB_STAGE_CAPS,
MLX5_IB_STAGE_ROCE, MLX5_IB_STAGE_ROCE,
MLX5_IB_STAGE_DEVICE_RESOURCES, MLX5_IB_STAGE_DEVICE_RESOURCES,
...@@ -798,7 +799,7 @@ struct mlx5_ib_dev { ...@@ -798,7 +799,7 @@ struct mlx5_ib_dev {
struct srcu_struct mr_srcu; struct srcu_struct mr_srcu;
u32 null_mkey; u32 null_mkey;
#endif #endif
struct mlx5_ib_flow_db flow_db; struct mlx5_ib_flow_db *flow_db;
/* protect resources needed as part of reset flow */ /* protect resources needed as part of reset flow */
spinlock_t reset_flow_resource_lock; spinlock_t reset_flow_resource_lock;
struct list_head qp_list; struct list_head qp_list;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment