Commit 62111654 authored by Dragos Tatulea's avatar Dragos Tatulea Committed by Michael S. Tsirkin

vdpa/mlx5: Postpone MR deletion

Currently, when a new MR is set up, the old MR is deleted. MR deletion
is about 30-40% the time of MR creation. As deleting the old MR is not
important for the process of setting up the new MR, this operation
can be postponed.

This series adds a workqueue that does MR garbage collection at a later
point. If the MR lock is taken, the handler will back off and
reschedule. The exception during shutdown: then the handler must
not postpone the work.

Note that this is only a speculative optimization: if there is some
mapping operation that is triggered while the garbage collector handler
has the lock taken, this operation it will have to wait for the handler
to finish.
Signed-off-by: default avatarDragos Tatulea <dtatulea@nvidia.com>
Reviewed-by: default avatarCosmin Ratiu <cratiu@nvidia.com>
Message-Id: <20240830105838.2666587-9-dtatulea@nvidia.com>
Signed-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>
parent f30a1232
...@@ -86,8 +86,18 @@ enum { ...@@ -86,8 +86,18 @@ enum {
struct mlx5_vdpa_mr_resources { struct mlx5_vdpa_mr_resources {
struct mlx5_vdpa_mr *mr[MLX5_VDPA_NUM_AS]; struct mlx5_vdpa_mr *mr[MLX5_VDPA_NUM_AS];
unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS]; unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS];
/* Pre-deletion mr list */
struct list_head mr_list_head; struct list_head mr_list_head;
/* Deferred mr list */
struct list_head mr_gc_list_head;
struct workqueue_struct *wq_gc;
struct delayed_work gc_dwork_ent;
struct mutex lock; struct mutex lock;
atomic_t shutdown;
}; };
struct mlx5_vdpa_dev { struct mlx5_vdpa_dev {
......
...@@ -653,14 +653,50 @@ static void _mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_ ...@@ -653,14 +653,50 @@ static void _mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_
kfree(mr); kfree(mr);
} }
/* There can be multiple .set_map() operations in quick succession.
* This large delay is a simple way to prevent the MR cleanup from blocking
* .set_map() MR creation in this scenario.
*/
#define MLX5_VDPA_MR_GC_TRIGGER_MS 2000
static void mlx5_vdpa_mr_gc_handler(struct work_struct *work)
{
struct mlx5_vdpa_mr_resources *mres;
struct mlx5_vdpa_mr *mr, *tmp;
struct mlx5_vdpa_dev *mvdev;
mres = container_of(work, struct mlx5_vdpa_mr_resources, gc_dwork_ent.work);
if (atomic_read(&mres->shutdown)) {
mutex_lock(&mres->lock);
} else if (!mutex_trylock(&mres->lock)) {
queue_delayed_work(mres->wq_gc, &mres->gc_dwork_ent,
msecs_to_jiffies(MLX5_VDPA_MR_GC_TRIGGER_MS));
return;
}
mvdev = container_of(mres, struct mlx5_vdpa_dev, mres);
list_for_each_entry_safe(mr, tmp, &mres->mr_gc_list_head, mr_list) {
_mlx5_vdpa_destroy_mr(mvdev, mr);
}
mutex_unlock(&mres->lock);
}
static void _mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev, static void _mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev,
struct mlx5_vdpa_mr *mr) struct mlx5_vdpa_mr *mr)
{ {
struct mlx5_vdpa_mr_resources *mres = &mvdev->mres;
if (!mr) if (!mr)
return; return;
if (refcount_dec_and_test(&mr->refcount)) if (refcount_dec_and_test(&mr->refcount)) {
_mlx5_vdpa_destroy_mr(mvdev, mr); list_move_tail(&mr->mr_list, &mres->mr_gc_list_head);
queue_delayed_work(mres->wq_gc, &mres->gc_dwork_ent,
msecs_to_jiffies(MLX5_VDPA_MR_GC_TRIGGER_MS));
}
} }
void mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev, void mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev,
...@@ -851,9 +887,17 @@ int mlx5_vdpa_init_mr_resources(struct mlx5_vdpa_dev *mvdev) ...@@ -851,9 +887,17 @@ int mlx5_vdpa_init_mr_resources(struct mlx5_vdpa_dev *mvdev)
{ {
struct mlx5_vdpa_mr_resources *mres = &mvdev->mres; struct mlx5_vdpa_mr_resources *mres = &mvdev->mres;
INIT_LIST_HEAD(&mres->mr_list_head); mres->wq_gc = create_singlethread_workqueue("mlx5_vdpa_mr_gc");
if (!mres->wq_gc)
return -ENOMEM;
INIT_DELAYED_WORK(&mres->gc_dwork_ent, mlx5_vdpa_mr_gc_handler);
mutex_init(&mres->lock); mutex_init(&mres->lock);
INIT_LIST_HEAD(&mres->mr_list_head);
INIT_LIST_HEAD(&mres->mr_gc_list_head);
return 0; return 0;
} }
...@@ -861,5 +905,10 @@ void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev) ...@@ -861,5 +905,10 @@ void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev)
{ {
struct mlx5_vdpa_mr_resources *mres = &mvdev->mres; struct mlx5_vdpa_mr_resources *mres = &mvdev->mres;
atomic_set(&mres->shutdown, 1);
flush_delayed_work(&mres->gc_dwork_ent);
destroy_workqueue(mres->wq_gc);
mres->wq_gc = NULL;
mutex_destroy(&mres->lock); mutex_destroy(&mres->lock);
} }
...@@ -3435,6 +3435,8 @@ static void mlx5_vdpa_free(struct vdpa_device *vdev) ...@@ -3435,6 +3435,8 @@ static void mlx5_vdpa_free(struct vdpa_device *vdev)
free_fixed_resources(ndev); free_fixed_resources(ndev);
mlx5_vdpa_clean_mrs(mvdev); mlx5_vdpa_clean_mrs(mvdev);
mlx5_vdpa_destroy_mr_resources(&ndev->mvdev); mlx5_vdpa_destroy_mr_resources(&ndev->mvdev);
mlx5_cmd_cleanup_async_ctx(&mvdev->async_ctx);
if (!is_zero_ether_addr(ndev->config.mac)) { if (!is_zero_ether_addr(ndev->config.mac)) {
pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev)); pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev));
mlx5_mpfs_del_mac(pfmdev, ndev->config.mac); mlx5_mpfs_del_mac(pfmdev, ndev->config.mac);
...@@ -4042,8 +4044,6 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device * ...@@ -4042,8 +4044,6 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *
mvdev->wq = NULL; mvdev->wq = NULL;
destroy_workqueue(wq); destroy_workqueue(wq);
mgtdev->ndev = NULL; mgtdev->ndev = NULL;
mlx5_cmd_cleanup_async_ctx(&mvdev->async_ctx);
} }
static int mlx5_vdpa_set_attr(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *dev, static int mlx5_vdpa_set_attr(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *dev,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment