Commit f1430536 authored by Matthew Wilcox's avatar Matthew Wilcox Committed by Jason Gunthorpe

mlx4: Convert pv_id_table to XArray

Signed-off-by: default avatarMatthew Wilcox <willy@infradead.org>
Acked-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent b02a29eb
...@@ -168,20 +168,17 @@ static void id_map_ent_timeout(struct work_struct *work) ...@@ -168,20 +168,17 @@ static void id_map_ent_timeout(struct work_struct *work)
{ {
struct delayed_work *delay = to_delayed_work(work); struct delayed_work *delay = to_delayed_work(work);
struct id_map_entry *ent = container_of(delay, struct id_map_entry, timeout); struct id_map_entry *ent = container_of(delay, struct id_map_entry, timeout);
struct id_map_entry *db_ent, *found_ent; struct id_map_entry *found_ent;
struct mlx4_ib_dev *dev = ent->dev; struct mlx4_ib_dev *dev = ent->dev;
struct mlx4_ib_sriov *sriov = &dev->sriov; struct mlx4_ib_sriov *sriov = &dev->sriov;
struct rb_root *sl_id_map = &sriov->sl_id_map; struct rb_root *sl_id_map = &sriov->sl_id_map;
int pv_id = (int) ent->pv_cm_id;
spin_lock(&sriov->id_map_lock); spin_lock(&sriov->id_map_lock);
db_ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_id); if (!xa_erase(&sriov->pv_id_table, ent->pv_cm_id))
if (!db_ent)
goto out; goto out;
found_ent = id_map_find_by_sl_id(&dev->ib_dev, ent->slave_id, ent->sl_cm_id); found_ent = id_map_find_by_sl_id(&dev->ib_dev, ent->slave_id, ent->sl_cm_id);
if (found_ent && found_ent == ent) if (found_ent && found_ent == ent)
rb_erase(&found_ent->node, sl_id_map); rb_erase(&found_ent->node, sl_id_map);
idr_remove(&sriov->pv_id_table, pv_id);
out: out:
list_del(&ent->list); list_del(&ent->list);
...@@ -196,13 +193,12 @@ static void id_map_find_del(struct ib_device *ibdev, int pv_cm_id) ...@@ -196,13 +193,12 @@ static void id_map_find_del(struct ib_device *ibdev, int pv_cm_id)
struct id_map_entry *ent, *found_ent; struct id_map_entry *ent, *found_ent;
spin_lock(&sriov->id_map_lock); spin_lock(&sriov->id_map_lock);
ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_cm_id); ent = xa_erase(&sriov->pv_id_table, pv_cm_id);
if (!ent) if (!ent)
goto out; goto out;
found_ent = id_map_find_by_sl_id(ibdev, ent->slave_id, ent->sl_cm_id); found_ent = id_map_find_by_sl_id(ibdev, ent->slave_id, ent->sl_cm_id);
if (found_ent && found_ent == ent) if (found_ent && found_ent == ent)
rb_erase(&found_ent->node, sl_id_map); rb_erase(&found_ent->node, sl_id_map);
idr_remove(&sriov->pv_id_table, pv_cm_id);
out: out:
spin_unlock(&sriov->id_map_lock); spin_unlock(&sriov->id_map_lock);
} }
...@@ -256,25 +252,19 @@ id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id) ...@@ -256,25 +252,19 @@ id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
ent->dev = to_mdev(ibdev); ent->dev = to_mdev(ibdev);
INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout); INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout);
idr_preload(GFP_KERNEL); ret = xa_alloc_cyclic(&sriov->pv_id_table, &ent->pv_cm_id, ent,
spin_lock(&to_mdev(ibdev)->sriov.id_map_lock); xa_limit_32b, &sriov->pv_id_next, GFP_KERNEL);
ret = idr_alloc_cyclic(&sriov->pv_id_table, ent, 0, 0, GFP_NOWAIT);
if (ret >= 0) { if (ret >= 0) {
ent->pv_cm_id = (u32)ret; spin_lock(&sriov->id_map_lock);
sl_id_map_add(ibdev, ent); sl_id_map_add(ibdev, ent);
list_add_tail(&ent->list, &sriov->cm_list); list_add_tail(&ent->list, &sriov->cm_list);
} spin_unlock(&sriov->id_map_lock);
spin_unlock(&sriov->id_map_lock);
idr_preload_end();
if (ret >= 0)
return ent; return ent;
}
/*error flow*/ /*error flow*/
kfree(ent); kfree(ent);
mlx4_ib_warn(ibdev, "No more space in the idr (err:0x%x)\n", ret); mlx4_ib_warn(ibdev, "Allocation failed (err:0x%x)\n", ret);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
...@@ -290,7 +280,7 @@ id_map_get(struct ib_device *ibdev, int *pv_cm_id, int slave_id, int sl_cm_id) ...@@ -290,7 +280,7 @@ id_map_get(struct ib_device *ibdev, int *pv_cm_id, int slave_id, int sl_cm_id)
if (ent) if (ent)
*pv_cm_id = (int) ent->pv_cm_id; *pv_cm_id = (int) ent->pv_cm_id;
} else } else
ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, *pv_cm_id); ent = xa_load(&sriov->pv_id_table, *pv_cm_id);
spin_unlock(&sriov->id_map_lock); spin_unlock(&sriov->id_map_lock);
return ent; return ent;
...@@ -407,7 +397,7 @@ void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev) ...@@ -407,7 +397,7 @@ void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev)
spin_lock_init(&dev->sriov.id_map_lock); spin_lock_init(&dev->sriov.id_map_lock);
INIT_LIST_HEAD(&dev->sriov.cm_list); INIT_LIST_HEAD(&dev->sriov.cm_list);
dev->sriov.sl_id_map = RB_ROOT; dev->sriov.sl_id_map = RB_ROOT;
idr_init(&dev->sriov.pv_id_table); xa_init_flags(&dev->sriov.pv_id_table, XA_FLAGS_ALLOC);
} }
/* slave = -1 ==> all slaves */ /* slave = -1 ==> all slaves */
...@@ -444,7 +434,7 @@ void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave) ...@@ -444,7 +434,7 @@ void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)
struct id_map_entry, node); struct id_map_entry, node);
rb_erase(&ent->node, sl_id_map); rb_erase(&ent->node, sl_id_map);
idr_remove(&sriov->pv_id_table, (int) ent->pv_cm_id); xa_erase(&sriov->pv_id_table, ent->pv_cm_id);
} }
list_splice_init(&dev->sriov.cm_list, &lh); list_splice_init(&dev->sriov.cm_list, &lh);
} else { } else {
...@@ -460,7 +450,7 @@ void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave) ...@@ -460,7 +450,7 @@ void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)
/* remove those nodes from databases */ /* remove those nodes from databases */
list_for_each_entry_safe(map, tmp_map, &lh, list) { list_for_each_entry_safe(map, tmp_map, &lh, list) {
rb_erase(&map->node, sl_id_map); rb_erase(&map->node, sl_id_map);
idr_remove(&sriov->pv_id_table, (int) map->pv_cm_id); xa_erase(&sriov->pv_id_table, map->pv_cm_id);
} }
/* add remaining nodes from cm_list */ /* add remaining nodes from cm_list */
......
...@@ -492,10 +492,11 @@ struct mlx4_ib_sriov { ...@@ -492,10 +492,11 @@ struct mlx4_ib_sriov {
struct mlx4_sriov_alias_guid alias_guid; struct mlx4_sriov_alias_guid alias_guid;
/* CM paravirtualization fields */ /* CM paravirtualization fields */
struct list_head cm_list; struct xarray pv_id_table;
u32 pv_id_next;
spinlock_t id_map_lock; spinlock_t id_map_lock;
struct rb_root sl_id_map; struct rb_root sl_id_map;
struct idr pv_id_table; struct list_head cm_list;
}; };
struct gid_cache_context { struct gid_cache_context {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment