Commit 342206b7 authored by Matthew Brost's avatar Matthew Brost Committed by Rodrigo Vivi

drm/xe: Always use xe_vm_queue_rebind_worker helper

Do not queue the rebind worker directly, rather use the helper
xe_vm_queue_rebind_worker. This ensures we use the correct work queue.
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent c8dc1546
...@@ -1472,8 +1472,7 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e, ...@@ -1472,8 +1472,7 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e,
} }
if (!rebind && last_munmap_rebind && if (!rebind && last_munmap_rebind &&
xe_vm_in_compute_mode(vm)) xe_vm_in_compute_mode(vm))
queue_work(vm->xe->ordered_wq, xe_vm_queue_rebind_worker(vm);
&vm->preempt.rebind_work);
} else { } else {
kfree(rfence); kfree(rfence);
kfree(ifence); kfree(ifence);
......
...@@ -182,6 +182,12 @@ extern struct ttm_device_funcs xe_ttm_funcs; ...@@ -182,6 +182,12 @@ extern struct ttm_device_funcs xe_ttm_funcs;
struct ttm_buffer_object *xe_vm_ttm_bo(struct xe_vm *vm); struct ttm_buffer_object *xe_vm_ttm_bo(struct xe_vm *vm);
static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm)
{
XE_WARN_ON(!xe_vm_in_compute_mode(vm));
queue_work(vm->xe->ordered_wq, &vm->preempt.rebind_work);
}
/** /**
* xe_vm_reactivate_rebind() - Reactivate the rebind functionality on compute * xe_vm_reactivate_rebind() - Reactivate the rebind functionality on compute
* vms. * vms.
...@@ -195,7 +201,7 @@ static inline void xe_vm_reactivate_rebind(struct xe_vm *vm) ...@@ -195,7 +201,7 @@ static inline void xe_vm_reactivate_rebind(struct xe_vm *vm)
{ {
if (xe_vm_in_compute_mode(vm) && vm->preempt.rebind_deactivated) { if (xe_vm_in_compute_mode(vm) && vm->preempt.rebind_deactivated) {
vm->preempt.rebind_deactivated = false; vm->preempt.rebind_deactivated = false;
queue_work(system_unbound_wq, &vm->preempt.rebind_work); xe_vm_queue_rebind_worker(vm);
} }
} }
...@@ -203,12 +209,6 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma); ...@@ -203,12 +209,6 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma);
int xe_vma_userptr_check_repin(struct xe_vma *vma); int xe_vma_userptr_check_repin(struct xe_vma *vma);
static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm)
{
XE_WARN_ON(!xe_vm_in_compute_mode(vm));
queue_work(vm->xe->ordered_wq, &vm->preempt.rebind_work);
}
/* /*
* XE_ONSTACK_TV is used to size the tv_onstack array that is input * XE_ONSTACK_TV is used to size the tv_onstack array that is input
* to xe_vm_lock_dma_resv() and xe_vm_unlock_dma_resv(). * to xe_vm_lock_dma_resv() and xe_vm_unlock_dma_resv().
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment