Commit 6e78e071 authored by Matthew Auld's avatar Matthew Auld

Revert "drm/xe/vm: drop vm->destroy_work"

This reverts commit 5b259c0d.

Cleanup here is good, however we need to able to flush a worker during
vm destruction which might involve sleeping, so bring back the worker.
Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarMatthew Brost <matthew.brost@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240423074721.119633-3-matthew.auld@intel.com
parent 3cd1585e
...@@ -1173,6 +1173,8 @@ static const struct xe_pt_ops xelp_pt_ops = { ...@@ -1173,6 +1173,8 @@ static const struct xe_pt_ops xelp_pt_ops = {
.pde_encode_bo = xelp_pde_encode_bo, .pde_encode_bo = xelp_pde_encode_bo,
}; };
static void vm_destroy_work_func(struct work_struct *w);
/** /**
* xe_vm_create_scratch() - Setup a scratch memory pagetable tree for the * xe_vm_create_scratch() - Setup a scratch memory pagetable tree for the
* given tile and vm. * given tile and vm.
...@@ -1252,6 +1254,8 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags) ...@@ -1252,6 +1254,8 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
init_rwsem(&vm->userptr.notifier_lock); init_rwsem(&vm->userptr.notifier_lock);
spin_lock_init(&vm->userptr.invalidated_lock); spin_lock_init(&vm->userptr.invalidated_lock);
INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
INIT_LIST_HEAD(&vm->preempt.exec_queues); INIT_LIST_HEAD(&vm->preempt.exec_queues);
vm->preempt.min_run_period_ms = 10; /* FIXME: Wire up to uAPI */ vm->preempt.min_run_period_ms = 10; /* FIXME: Wire up to uAPI */
...@@ -1489,9 +1493,10 @@ void xe_vm_close_and_put(struct xe_vm *vm) ...@@ -1489,9 +1493,10 @@ void xe_vm_close_and_put(struct xe_vm *vm)
xe_vm_put(vm); xe_vm_put(vm);
} }
static void xe_vm_free(struct drm_gpuvm *gpuvm) static void vm_destroy_work_func(struct work_struct *w)
{ {
struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm); struct xe_vm *vm =
container_of(w, struct xe_vm, destroy_work);
struct xe_device *xe = vm->xe; struct xe_device *xe = vm->xe;
struct xe_tile *tile; struct xe_tile *tile;
u8 id; u8 id;
...@@ -1511,6 +1516,14 @@ static void xe_vm_free(struct drm_gpuvm *gpuvm) ...@@ -1511,6 +1516,14 @@ static void xe_vm_free(struct drm_gpuvm *gpuvm)
kfree(vm); kfree(vm);
} }
static void xe_vm_free(struct drm_gpuvm *gpuvm)
{
struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
/* To destroy the VM we need to be able to sleep */
queue_work(system_unbound_wq, &vm->destroy_work);
}
struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id) struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
{ {
struct xe_vm *vm; struct xe_vm *vm;
......
...@@ -177,6 +177,13 @@ struct xe_vm { ...@@ -177,6 +177,13 @@ struct xe_vm {
*/ */
struct list_head rebind_list; struct list_head rebind_list;
/**
* @destroy_work: worker to destroy VM, needed as a dma_fence signaling
* from an irq context can be last put and the destroy needs to be able
* to sleep.
*/
struct work_struct destroy_work;
/** /**
* @rftree: range fence tree to track updates to page table structure. * @rftree: range fence tree to track updates to page table structure.
* Used to implement conflict tracking between independent bind engines. * Used to implement conflict tracking between independent bind engines.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment