Commit 657360d6 authored by Jason Gunthorpe's avatar Jason Gunthorpe

RDMA/ucma: Remove closing and the close_wq

Use cancel_work_sync() to ensure that the wq is not running and simply
assign NULL to ctx->cm_id to indicate if the work ran or not. Delete the
close_wq since flush_workqueue() is no longer needed.

Link: https://lore.kernel.org/r/20200818120526.702120-15-leon@kernel.orgSigned-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent a1d33b70
...@@ -80,7 +80,6 @@ struct ucma_file { ...@@ -80,7 +80,6 @@ struct ucma_file {
struct list_head ctx_list; struct list_head ctx_list;
struct list_head event_list; struct list_head event_list;
wait_queue_head_t poll_wait; wait_queue_head_t poll_wait;
struct workqueue_struct *close_wq;
}; };
struct ucma_context { struct ucma_context {
...@@ -96,10 +95,6 @@ struct ucma_context { ...@@ -96,10 +95,6 @@ struct ucma_context {
u64 uid; u64 uid;
struct list_head list; struct list_head list;
/* mark that device is in process of destroying the internal HW
* resources, protected by the ctx_table lock
*/
int closing;
/* sync between removal event and id destroy, protected by file mut */ /* sync between removal event and id destroy, protected by file mut */
int destroying; int destroying;
struct work_struct close_work; struct work_struct close_work;
...@@ -148,12 +143,9 @@ static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id) ...@@ -148,12 +143,9 @@ static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
xa_lock(&ctx_table); xa_lock(&ctx_table);
ctx = _ucma_find_context(id, file); ctx = _ucma_find_context(id, file);
if (!IS_ERR(ctx)) { if (!IS_ERR(ctx))
if (ctx->closing) if (!refcount_inc_not_zero(&ctx->ref))
ctx = ERR_PTR(-EIO);
else if (!refcount_inc_not_zero(&ctx->ref))
ctx = ERR_PTR(-ENXIO); ctx = ERR_PTR(-ENXIO);
}
xa_unlock(&ctx_table); xa_unlock(&ctx_table);
return ctx; return ctx;
} }
...@@ -193,6 +185,14 @@ static void ucma_close_id(struct work_struct *work) ...@@ -193,6 +185,14 @@ static void ucma_close_id(struct work_struct *work)
wait_for_completion(&ctx->comp); wait_for_completion(&ctx->comp);
/* No new events will be generated after destroying the id. */ /* No new events will be generated after destroying the id. */
rdma_destroy_id(ctx->cm_id); rdma_destroy_id(ctx->cm_id);
/*
* At this point ctx->ref is zero so the only place the ctx can be is in
* a uevent or in __destroy_id(). Since the former doesn't touch
* ctx->cm_id and the latter sync cancels this, there is no races with
* this store.
*/
ctx->cm_id = NULL;
} }
static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file) static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
...@@ -356,12 +356,8 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id, ...@@ -356,12 +356,8 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id,
wake_up_interruptible(&ctx->file->poll_wait); wake_up_interruptible(&ctx->file->poll_wait);
} }
if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL && !ctx->destroying) { if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL && !ctx->destroying)
xa_lock(&ctx_table); queue_work(system_unbound_wq, &ctx->close_work);
ctx->closing = 1;
xa_unlock(&ctx_table);
queue_work(ctx->file->close_wq, &ctx->close_work);
}
return 0; return 0;
} }
...@@ -577,17 +573,10 @@ static int __destroy_id(struct ucma_context *ctx) ...@@ -577,17 +573,10 @@ static int __destroy_id(struct ucma_context *ctx)
ucma_put_ctx(ctx); ucma_put_ctx(ctx);
} }
flush_workqueue(ctx->file->close_wq); cancel_work_sync(&ctx->close_work);
/* At this point it's guaranteed that there is no inflight closing task */ /* At this point it's guaranteed that there is no inflight closing task */
xa_lock(&ctx_table); if (ctx->cm_id)
if (!ctx->closing) { ucma_close_id(&ctx->close_work);
xa_unlock(&ctx_table);
ucma_put_ctx(ctx);
wait_for_completion(&ctx->comp);
rdma_destroy_id(ctx->cm_id);
} else {
xa_unlock(&ctx_table);
}
return ucma_free_ctx(ctx); return ucma_free_ctx(ctx);
} }
...@@ -1788,13 +1777,6 @@ static int ucma_open(struct inode *inode, struct file *filp) ...@@ -1788,13 +1777,6 @@ static int ucma_open(struct inode *inode, struct file *filp)
if (!file) if (!file)
return -ENOMEM; return -ENOMEM;
file->close_wq = alloc_ordered_workqueue("ucma_close_id",
WQ_MEM_RECLAIM);
if (!file->close_wq) {
kfree(file);
return -ENOMEM;
}
INIT_LIST_HEAD(&file->event_list); INIT_LIST_HEAD(&file->event_list);
INIT_LIST_HEAD(&file->ctx_list); INIT_LIST_HEAD(&file->ctx_list);
init_waitqueue_head(&file->poll_wait); init_waitqueue_head(&file->poll_wait);
...@@ -1825,7 +1807,6 @@ static int ucma_close(struct inode *inode, struct file *filp) ...@@ -1825,7 +1807,6 @@ static int ucma_close(struct inode *inode, struct file *filp)
xa_erase(&ctx_table, ctx->id); xa_erase(&ctx_table, ctx->id);
__destroy_id(ctx); __destroy_id(ctx);
} }
destroy_workqueue(file->close_wq);
kfree(file); kfree(file);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment