Commit 5b259c0d authored by Matthew Auld's avatar Matthew Auld

drm/xe/vm: drop vm->destroy_work

Now that we no longer grab the usm.lock mutex (which might sleep) it
looks like it should be safe to directly perform xe_vm_free when vm
refcount reaches zero, instead of punting that off to some worker.
Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarMatthew Brost <matthew.brost@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240412113144.259426-5-matthew.auld@intel.com
parent 83967c57
...@@ -1179,8 +1179,6 @@ static const struct xe_pt_ops xelp_pt_ops = { ...@@ -1179,8 +1179,6 @@ static const struct xe_pt_ops xelp_pt_ops = {
.pde_encode_bo = xelp_pde_encode_bo, .pde_encode_bo = xelp_pde_encode_bo,
}; };
static void vm_destroy_work_func(struct work_struct *w);
/** /**
* xe_vm_create_scratch() - Setup a scratch memory pagetable tree for the * xe_vm_create_scratch() - Setup a scratch memory pagetable tree for the
* given tile and vm. * given tile and vm.
...@@ -1260,8 +1258,6 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags) ...@@ -1260,8 +1258,6 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
init_rwsem(&vm->userptr.notifier_lock); init_rwsem(&vm->userptr.notifier_lock);
spin_lock_init(&vm->userptr.invalidated_lock); spin_lock_init(&vm->userptr.invalidated_lock);
INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
INIT_LIST_HEAD(&vm->preempt.exec_queues); INIT_LIST_HEAD(&vm->preempt.exec_queues);
vm->preempt.min_run_period_ms = 10; /* FIXME: Wire up to uAPI */ vm->preempt.min_run_period_ms = 10; /* FIXME: Wire up to uAPI */
...@@ -1499,10 +1495,9 @@ void xe_vm_close_and_put(struct xe_vm *vm) ...@@ -1499,10 +1495,9 @@ void xe_vm_close_and_put(struct xe_vm *vm)
xe_vm_put(vm); xe_vm_put(vm);
} }
static void vm_destroy_work_func(struct work_struct *w) static void xe_vm_free(struct drm_gpuvm *gpuvm)
{ {
struct xe_vm *vm = struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
container_of(w, struct xe_vm, destroy_work);
struct xe_device *xe = vm->xe; struct xe_device *xe = vm->xe;
struct xe_tile *tile; struct xe_tile *tile;
u8 id; u8 id;
...@@ -1522,14 +1517,6 @@ static void vm_destroy_work_func(struct work_struct *w) ...@@ -1522,14 +1517,6 @@ static void vm_destroy_work_func(struct work_struct *w)
kfree(vm); kfree(vm);
} }
static void xe_vm_free(struct drm_gpuvm *gpuvm)
{
struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
/* To destroy the VM we need to be able to sleep */
queue_work(system_unbound_wq, &vm->destroy_work);
}
struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id) struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
{ {
struct xe_vm *vm; struct xe_vm *vm;
......
...@@ -177,13 +177,6 @@ struct xe_vm { ...@@ -177,13 +177,6 @@ struct xe_vm {
*/ */
struct list_head rebind_list; struct list_head rebind_list;
/**
* @destroy_work: worker to destroy VM, needed as a dma_fence signaling
* from an irq context can be last put and the destroy needs to be able
* to sleep.
*/
struct work_struct destroy_work;
/** /**
* @rftree: range fence tree to track updates to page table structure. * @rftree: range fence tree to track updates to page table structure.
* Used to implement conflict tracking between independent bind engines. * Used to implement conflict tracking between independent bind engines.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment