Commit d490ecf5 authored by Thomas Hellström's avatar Thomas Hellström Committed by Rodrigo Vivi

drm/xe: Rework xe_exec and the VM rebind worker to use the drm_exec helper

Replace the calls to ttm_eu_reserve_buffers() by using the drm_exec
helper instead. Also make sure the locking loop covers any calls to
xe_bo_validate() / ttm_bo_validate() so that these function calls may
easily benefit from being called from within an unsealed locking
transaction and may thus perform blocking dma_resv locks in the future.

For the unlock we remove an assert that the vm->rebind_list is empty
when locks are released. Since if the error path is hit with a partly
locked list, that assert may no longer hold true we chose to remove it.

v3:
- Don't accept duplicate bo locks in the rebind worker.
v5:
- Loop over drm_exec objects in reverse when unlocking.
v6:
- We can't keep the WW ticket when retrying validation on OOM. Fix.
Signed-off-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: default avatarMatthew Brost <matthew.brost@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230908091716.36984-5-thomas.hellstrom@linux.intel.comSigned-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent b7ab8c4f
...@@ -8,6 +8,7 @@ config DRM_XE ...@@ -8,6 +8,7 @@ config DRM_XE
select SHMEM select SHMEM
select TMPFS select TMPFS
select DRM_BUDDY select DRM_BUDDY
select DRM_EXEC
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_PANEL select DRM_PANEL
select DRM_SUBALLOC_HELPER select DRM_SUBALLOC_HELPER
...@@ -21,6 +22,7 @@ config DRM_XE ...@@ -21,6 +22,7 @@ config DRM_XE
select VMAP_PFN select VMAP_PFN
select DRM_TTM select DRM_TTM
select DRM_TTM_HELPER select DRM_TTM_HELPER
select DRM_EXEC
select DRM_GPUVM select DRM_GPUVM
select DRM_SCHED select DRM_SCHED
select MMU_NOTIFIER select MMU_NOTIFIER
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include "xe_exec.h" #include "xe_exec.h"
#include <drm/drm_device.h> #include <drm/drm_device.h>
#include <drm/drm_exec.h>
#include <drm/drm_file.h> #include <drm/drm_file.h>
#include <drm/xe_drm.h> #include <drm/xe_drm.h>
#include <linux/delay.h> #include <linux/delay.h>
...@@ -93,25 +94,16 @@ ...@@ -93,25 +94,16 @@
* Unlock all * Unlock all
*/ */
#define XE_EXEC_BIND_RETRY_TIMEOUT_MS 1000 static int xe_exec_begin(struct drm_exec *exec, struct xe_vm *vm)
static int xe_exec_begin(struct xe_exec_queue *q, struct ww_acquire_ctx *ww,
struct ttm_validate_buffer tv_onstack[],
struct ttm_validate_buffer **tv,
struct list_head *objs)
{ {
struct xe_vm *vm = q->vm;
struct xe_vma *vma; struct xe_vma *vma;
LIST_HEAD(dups); LIST_HEAD(dups);
ktime_t end = 0;
int err = 0; int err = 0;
*tv = NULL; if (xe_vm_no_dma_fences(vm))
if (xe_vm_no_dma_fences(q->vm))
return 0; return 0;
retry: err = xe_vm_lock_dma_resv(vm, exec, 1, true);
err = xe_vm_lock_dma_resv(vm, ww, tv_onstack, tv, objs, true, 1);
if (err) if (err)
return err; return err;
...@@ -127,42 +119,13 @@ static int xe_exec_begin(struct xe_exec_queue *q, struct ww_acquire_ctx *ww, ...@@ -127,42 +119,13 @@ static int xe_exec_begin(struct xe_exec_queue *q, struct ww_acquire_ctx *ww,
continue; continue;
err = xe_bo_validate(xe_vma_bo(vma), vm, false); err = xe_bo_validate(xe_vma_bo(vma), vm, false);
if (err) { if (err)
xe_vm_unlock_dma_resv(vm, tv_onstack, *tv, ww, objs);
*tv = NULL;
break; break;
}
}
/*
* With multiple active VMs, under memory pressure, it is possible that
* ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
* Until ttm properly handles locking in such scenarios, best thing the
* driver can do is retry with a timeout.
*/
if (err == -ENOMEM) {
ktime_t cur = ktime_get();
end = end ? : ktime_add_ms(cur, XE_EXEC_BIND_RETRY_TIMEOUT_MS);
if (ktime_before(cur, end)) {
msleep(20);
goto retry;
}
} }
return err; return err;
} }
static void xe_exec_end(struct xe_exec_queue *q,
struct ttm_validate_buffer *tv_onstack,
struct ttm_validate_buffer *tv,
struct ww_acquire_ctx *ww,
struct list_head *objs)
{
if (!xe_vm_no_dma_fences(q->vm))
xe_vm_unlock_dma_resv(q->vm, tv_onstack, tv, ww, objs);
}
int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{ {
struct xe_device *xe = to_xe_device(dev); struct xe_device *xe = to_xe_device(dev);
...@@ -173,15 +136,13 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -173,15 +136,13 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
struct xe_exec_queue *q; struct xe_exec_queue *q;
struct xe_sync_entry *syncs = NULL; struct xe_sync_entry *syncs = NULL;
u64 addresses[XE_HW_ENGINE_MAX_INSTANCE]; u64 addresses[XE_HW_ENGINE_MAX_INSTANCE];
struct ttm_validate_buffer tv_onstack[XE_ONSTACK_TV]; struct drm_exec exec;
struct ttm_validate_buffer *tv = NULL;
u32 i, num_syncs = 0; u32 i, num_syncs = 0;
struct xe_sched_job *job; struct xe_sched_job *job;
struct dma_fence *rebind_fence; struct dma_fence *rebind_fence;
struct xe_vm *vm; struct xe_vm *vm;
struct ww_acquire_ctx ww;
struct list_head objs;
bool write_locked; bool write_locked;
ktime_t end = 0;
int err = 0; int err = 0;
if (XE_IOCTL_DBG(xe, args->extensions) || if (XE_IOCTL_DBG(xe, args->extensions) ||
...@@ -294,26 +255,34 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -294,26 +255,34 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
goto err_unlock_list; goto err_unlock_list;
} }
err = xe_exec_begin(q, &ww, tv_onstack, &tv, &objs); drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
if (err) drm_exec_until_all_locked(&exec) {
goto err_unlock_list; err = xe_exec_begin(&exec, vm);
drm_exec_retry_on_contention(&exec);
if (err && xe_vm_validate_should_retry(&exec, err, &end)) {
err = -EAGAIN;
goto err_unlock_list;
}
if (err)
goto err_exec;
}
if (xe_vm_is_closed_or_banned(q->vm)) { if (xe_vm_is_closed_or_banned(q->vm)) {
drm_warn(&xe->drm, "Trying to schedule after vm is closed or banned\n"); drm_warn(&xe->drm, "Trying to schedule after vm is closed or banned\n");
err = -ECANCELED; err = -ECANCELED;
goto err_exec_queue_end; goto err_exec;
} }
if (xe_exec_queue_is_lr(q) && xe_exec_queue_ring_full(q)) { if (xe_exec_queue_is_lr(q) && xe_exec_queue_ring_full(q)) {
err = -EWOULDBLOCK; err = -EWOULDBLOCK;
goto err_exec_queue_end; goto err_exec;
} }
job = xe_sched_job_create(q, xe_exec_queue_is_parallel(q) ? job = xe_sched_job_create(q, xe_exec_queue_is_parallel(q) ?
addresses : &args->address); addresses : &args->address);
if (IS_ERR(job)) { if (IS_ERR(job)) {
err = PTR_ERR(job); err = PTR_ERR(job);
goto err_exec_queue_end; goto err_exec;
} }
/* /*
...@@ -412,8 +381,8 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -412,8 +381,8 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
err_put_job: err_put_job:
if (err) if (err)
xe_sched_job_put(job); xe_sched_job_put(job);
err_exec_queue_end: err_exec:
xe_exec_end(q, tv_onstack, tv, &ww, &objs); drm_exec_fini(&exec);
err_unlock_list: err_unlock_list:
if (write_locked) if (write_locked)
up_write(&vm->lock); up_write(&vm->lock);
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <linux/dma-fence-array.h> #include <linux/dma-fence-array.h>
#include <drm/drm_exec.h>
#include <drm/drm_print.h> #include <drm/drm_print.h>
#include <drm/ttm/ttm_execbuf_util.h> #include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_tt.h> #include <drm/ttm/ttm_tt.h>
...@@ -327,10 +328,7 @@ static void resume_and_reinstall_preempt_fences(struct xe_vm *vm) ...@@ -327,10 +328,7 @@ static void resume_and_reinstall_preempt_fences(struct xe_vm *vm)
int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
{ {
struct ttm_validate_buffer tv_onstack[XE_ONSTACK_TV]; struct drm_exec exec;
struct ttm_validate_buffer *tv;
struct ww_acquire_ctx ww;
struct list_head objs;
struct dma_fence *pfence; struct dma_fence *pfence;
int err; int err;
bool wait; bool wait;
...@@ -338,10 +336,13 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) ...@@ -338,10 +336,13 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
XE_WARN_ON(!xe_vm_in_compute_mode(vm)); XE_WARN_ON(!xe_vm_in_compute_mode(vm));
down_write(&vm->lock); down_write(&vm->lock);
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
err = xe_vm_lock_dma_resv(vm, &ww, tv_onstack, &tv, &objs, true, 1); drm_exec_until_all_locked(&exec) {
if (err) err = xe_vm_lock_dma_resv(vm, &exec, 1, true);
goto out_unlock_outer; drm_exec_retry_on_contention(&exec);
if (err)
goto out_unlock;
}
pfence = xe_preempt_fence_create(q, q->compute.context, pfence = xe_preempt_fence_create(q, q->compute.context,
++q->compute.seqno); ++q->compute.seqno);
...@@ -373,8 +374,7 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) ...@@ -373,8 +374,7 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
up_read(&vm->userptr.notifier_lock); up_read(&vm->userptr.notifier_lock);
out_unlock: out_unlock:
xe_vm_unlock_dma_resv(vm, tv_onstack, tv, &ww, &objs); drm_exec_fini(&exec);
out_unlock_outer:
up_write(&vm->lock); up_write(&vm->lock);
return err; return err;
...@@ -403,68 +403,35 @@ int __xe_vm_userptr_needs_repin(struct xe_vm *vm) ...@@ -403,68 +403,35 @@ int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
* xe_vm_lock_dma_resv() - Lock the vm dma_resv object and the dma_resv * xe_vm_lock_dma_resv() - Lock the vm dma_resv object and the dma_resv
* objects of the vm's external buffer objects. * objects of the vm's external buffer objects.
* @vm: The vm. * @vm: The vm.
* @ww: Pointer to a struct ww_acquire_ctx locking context. * @exec: Pointer to a struct drm_exec locking context.
* @tv_onstack: Array size XE_ONSTACK_TV of storage for the struct
* ttm_validate_buffers used for locking.
* @tv: Pointer to a pointer that on output contains the actual storage used.
* @objs: List head for the buffer objects locked.
* @intr: Whether to lock interruptible.
* @num_shared: Number of dma-fence slots to reserve in the locked objects. * @num_shared: Number of dma-fence slots to reserve in the locked objects.
* @lock_vm: Lock also the vm's dma_resv.
* *
* Locks the vm dma-resv objects and all the dma-resv objects of the * Locks the vm dma-resv objects and all the dma-resv objects of the
* buffer objects on the vm external object list. The TTM utilities require * buffer objects on the vm external object list.
* a list of struct ttm_validate_buffers pointing to the actual buffer
* objects to lock. Storage for those struct ttm_validate_buffers should
* be provided in @tv_onstack, and is typically reserved on the stack
* of the caller. If the size of @tv_onstack isn't sufficient, then
* storage will be allocated internally using kvmalloc().
*
* The function performs deadlock handling internally, and after a
* successful return the ww locking transaction should be considered
* sealed.
* *
* Return: 0 on success, Negative error code on error. In particular if * Return: 0 on success, Negative error code on error. In particular if
* @intr is set to true, -EINTR or -ERESTARTSYS may be returned. In case * @intr is set to true, -EINTR or -ERESTARTSYS may be returned.
* of error, any locking performed has been reverted.
*/ */
int xe_vm_lock_dma_resv(struct xe_vm *vm, struct ww_acquire_ctx *ww, int xe_vm_lock_dma_resv(struct xe_vm *vm, struct drm_exec *exec,
struct ttm_validate_buffer *tv_onstack, unsigned int num_shared, bool lock_vm)
struct ttm_validate_buffer **tv, {
struct list_head *objs,
bool intr,
unsigned int num_shared)
{
struct ttm_validate_buffer *tv_vm, *tv_bo;
struct xe_vma *vma, *next; struct xe_vma *vma, *next;
LIST_HEAD(dups); int err = 0;
int err;
lockdep_assert_held(&vm->lock); lockdep_assert_held(&vm->lock);
if (vm->extobj.entries < XE_ONSTACK_TV) { if (lock_vm) {
tv_vm = tv_onstack; err = drm_exec_prepare_obj(exec, &xe_vm_ttm_bo(vm)->base, num_shared);
} else { if (err)
tv_vm = kvmalloc_array(vm->extobj.entries + 1, sizeof(*tv_vm), return err;
GFP_KERNEL);
if (!tv_vm)
return -ENOMEM;
} }
tv_bo = tv_vm + 1;
INIT_LIST_HEAD(objs);
list_for_each_entry(vma, &vm->extobj.list, extobj.link) { list_for_each_entry(vma, &vm->extobj.list, extobj.link) {
tv_bo->num_shared = num_shared; err = drm_exec_prepare_obj(exec, &xe_vma_bo(vma)->ttm.base, num_shared);
tv_bo->bo = &xe_vma_bo(vma)->ttm; if (err)
return err;
list_add_tail(&tv_bo->head, objs);
tv_bo++;
} }
tv_vm->num_shared = num_shared;
tv_vm->bo = xe_vm_ttm_bo(vm);
list_add_tail(&tv_vm->head, objs);
err = ttm_eu_reserve_buffers(ww, objs, intr, &dups);
if (err)
goto out_err;
spin_lock(&vm->notifier.list_lock); spin_lock(&vm->notifier.list_lock);
list_for_each_entry_safe(vma, next, &vm->notifier.rebind_list, list_for_each_entry_safe(vma, next, &vm->notifier.rebind_list,
...@@ -478,45 +445,7 @@ int xe_vm_lock_dma_resv(struct xe_vm *vm, struct ww_acquire_ctx *ww, ...@@ -478,45 +445,7 @@ int xe_vm_lock_dma_resv(struct xe_vm *vm, struct ww_acquire_ctx *ww,
} }
spin_unlock(&vm->notifier.list_lock); spin_unlock(&vm->notifier.list_lock);
*tv = tv_vm;
return 0; return 0;
out_err:
if (tv_vm != tv_onstack)
kvfree(tv_vm);
return err;
}
/**
* xe_vm_unlock_dma_resv() - Unlock reservation objects locked by
* xe_vm_lock_dma_resv()
* @vm: The vm.
* @tv_onstack: The @tv_onstack array given to xe_vm_lock_dma_resv().
* @tv: The value of *@tv given by xe_vm_lock_dma_resv().
* @ww: The ww_acquire_context used for locking.
* @objs: The list returned from xe_vm_lock_dma_resv().
*
* Unlocks the reservation objects and frees any memory allocated by
* xe_vm_lock_dma_resv().
*/
void xe_vm_unlock_dma_resv(struct xe_vm *vm,
struct ttm_validate_buffer *tv_onstack,
struct ttm_validate_buffer *tv,
struct ww_acquire_ctx *ww,
struct list_head *objs)
{
/*
* Nothing should've been able to enter the list while we were locked,
* since we've held the dma-resvs of all the vm's external objects,
* and holding the dma_resv of an object is required for list
* addition, and we shouldn't add ourselves.
*/
XE_WARN_ON(!list_empty(&vm->notifier.rebind_list));
ttm_eu_backoff_reservation(ww, objs);
if (tv && tv != tv_onstack)
kvfree(tv);
} }
#define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000 #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
...@@ -538,14 +467,94 @@ static void xe_vm_kill(struct xe_vm *vm) ...@@ -538,14 +467,94 @@ static void xe_vm_kill(struct xe_vm *vm)
/* TODO: Inform user the VM is banned */ /* TODO: Inform user the VM is banned */
} }
/**
* xe_vm_validate_should_retry() - Whether to retry after a validate error.
* @exec: The drm_exec object used for locking before validation.
* @err: The error returned from ttm_bo_validate().
* @end: A ktime_t cookie that should be set to 0 before first use and
* that should be reused on subsequent calls.
*
* With multiple active VMs, under memory pressure, it is possible that
* ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
* Until ttm properly handles locking in such scenarios, best thing the
* driver can do is retry with a timeout. Check if that is necessary, and
* if so unlock the drm_exec's objects while keeping the ticket to prepare
* for a rerun.
*
* Return: true if a retry after drm_exec_init() is recommended;
* false otherwise.
*/
bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end)
{
ktime_t cur;
if (err != -ENOMEM)
return false;
cur = ktime_get();
*end = *end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS);
if (!ktime_before(cur, *end))
return false;
/*
* We would like to keep the ticket here with
* drm_exec_unlock_all(), but WW mutex asserts currently
* stop us from that. In any case this function could go away
* with proper TTM -EDEADLK handling.
*/
drm_exec_fini(exec);
msleep(20);
return true;
}
static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
bool *done)
{
struct xe_vma *vma;
int err;
err = drm_exec_prepare_obj(exec, &xe_vm_ttm_bo(vm)->base,
vm->preempt.num_exec_queues);
if (err)
return err;
if (xe_vm_is_idle(vm)) {
vm->preempt.rebind_deactivated = true;
*done = true;
return 0;
}
if (!preempt_fences_waiting(vm)) {
*done = true;
return 0;
}
err = xe_vm_lock_dma_resv(vm, exec, vm->preempt.num_exec_queues, false);
if (err)
return err;
err = wait_for_existing_preempt_fences(vm);
if (err)
return err;
list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) {
if (xe_vma_has_no_bo(vma) ||
vma->gpuva.flags & XE_VMA_DESTROYED)
continue;
err = xe_bo_validate(xe_vma_bo(vma), vm, false);
if (err)
break;
}
return err;
}
static void preempt_rebind_work_func(struct work_struct *w) static void preempt_rebind_work_func(struct work_struct *w)
{ {
struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work); struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
struct xe_vma *vma; struct drm_exec exec;
struct ttm_validate_buffer tv_onstack[XE_ONSTACK_TV];
struct ttm_validate_buffer *tv;
struct ww_acquire_ctx ww;
struct list_head objs;
struct dma_fence *rebind_fence; struct dma_fence *rebind_fence;
unsigned int fence_count = 0; unsigned int fence_count = 0;
LIST_HEAD(preempt_fences); LIST_HEAD(preempt_fences);
...@@ -588,42 +597,25 @@ static void preempt_rebind_work_func(struct work_struct *w) ...@@ -588,42 +597,25 @@ static void preempt_rebind_work_func(struct work_struct *w)
goto out_unlock_outer; goto out_unlock_outer;
} }
err = xe_vm_lock_dma_resv(vm, &ww, tv_onstack, &tv, &objs, drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
false, vm->preempt.num_exec_queues);
if (err)
goto out_unlock_outer;
if (xe_vm_is_idle(vm)) { drm_exec_until_all_locked(&exec) {
vm->preempt.rebind_deactivated = true; bool done = false;
goto out_unlock;
}
/* Fresh preempt fences already installed. Everyting is running. */
if (!preempt_fences_waiting(vm))
goto out_unlock;
/* err = xe_preempt_work_begin(&exec, vm, &done);
* This makes sure vm is completely suspended and also balances drm_exec_retry_on_contention(&exec);
* xe_engine suspend- and resume; we resume *all* vm engines below. if (err && xe_vm_validate_should_retry(&exec, err, &end)) {
*/ err = -EAGAIN;
err = wait_for_existing_preempt_fences(vm); goto out_unlock_outer;
if (err) }
goto out_unlock; if (err || done)
goto out_unlock;
}
err = alloc_preempt_fences(vm, &preempt_fences, &fence_count); err = alloc_preempt_fences(vm, &preempt_fences, &fence_count);
if (err) if (err)
goto out_unlock; goto out_unlock;
list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) {
if (xe_vma_has_no_bo(vma) ||
vma->gpuva.flags & XE_VMA_DESTROYED)
continue;
err = xe_bo_validate(xe_vma_bo(vma), vm, false);
if (err)
goto out_unlock;
}
rebind_fence = xe_vm_rebind(vm, true); rebind_fence = xe_vm_rebind(vm, true);
if (IS_ERR(rebind_fence)) { if (IS_ERR(rebind_fence)) {
err = PTR_ERR(rebind_fence); err = PTR_ERR(rebind_fence);
...@@ -668,30 +660,13 @@ static void preempt_rebind_work_func(struct work_struct *w) ...@@ -668,30 +660,13 @@ static void preempt_rebind_work_func(struct work_struct *w)
up_read(&vm->userptr.notifier_lock); up_read(&vm->userptr.notifier_lock);
out_unlock: out_unlock:
xe_vm_unlock_dma_resv(vm, tv_onstack, tv, &ww, &objs); drm_exec_fini(&exec);
out_unlock_outer: out_unlock_outer:
if (err == -EAGAIN) { if (err == -EAGAIN) {
trace_xe_vm_rebind_worker_retry(vm); trace_xe_vm_rebind_worker_retry(vm);
goto retry; goto retry;
} }
/*
* With multiple active VMs, under memory pressure, it is possible that
* ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
* Until ttm properly handles locking in such scenarios, best thing the
* driver can do is retry with a timeout. Killing the VM or putting it
* in error state after timeout or other error scenarios is still TBD.
*/
if (err == -ENOMEM) {
ktime_t cur = ktime_get();
end = end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS);
if (ktime_before(cur, end)) {
msleep(20);
trace_xe_vm_rebind_worker_retry(vm);
goto retry;
}
}
if (err) { if (err) {
drm_warn(&vm->xe->drm, "VM worker error: %d\n", err); drm_warn(&vm->xe->drm, "VM worker error: %d\n", err);
xe_vm_kill(vm); xe_vm_kill(vm);
......
...@@ -21,6 +21,7 @@ struct ttm_validate_buffer; ...@@ -21,6 +21,7 @@ struct ttm_validate_buffer;
struct xe_exec_queue; struct xe_exec_queue;
struct xe_file; struct xe_file;
struct xe_sync_entry; struct xe_sync_entry;
struct drm_exec;
struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags); struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags);
...@@ -208,23 +209,10 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma); ...@@ -208,23 +209,10 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma);
int xe_vma_userptr_check_repin(struct xe_vma *vma); int xe_vma_userptr_check_repin(struct xe_vma *vma);
/* bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end);
* XE_ONSTACK_TV is used to size the tv_onstack array that is input
* to xe_vm_lock_dma_resv() and xe_vm_unlock_dma_resv(). int xe_vm_lock_dma_resv(struct xe_vm *vm, struct drm_exec *exec,
*/ unsigned int num_shared, bool lock_vm);
#define XE_ONSTACK_TV 20
int xe_vm_lock_dma_resv(struct xe_vm *vm, struct ww_acquire_ctx *ww,
struct ttm_validate_buffer *tv_onstack,
struct ttm_validate_buffer **tv,
struct list_head *objs,
bool intr,
unsigned int num_shared);
void xe_vm_unlock_dma_resv(struct xe_vm *vm,
struct ttm_validate_buffer *tv_onstack,
struct ttm_validate_buffer *tv,
struct ww_acquire_ctx *ww,
struct list_head *objs);
void xe_vm_fence_all_extobjs(struct xe_vm *vm, struct dma_fence *fence, void xe_vm_fence_all_extobjs(struct xe_vm *vm, struct dma_fence *fence,
enum dma_resv_usage usage); enum dma_resv_usage usage);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment