Commit 617eebb9 authored by Matthew Brost's avatar Matthew Brost Committed by Rodrigo Vivi

drm/xe: Fix array of binds

If multiple bind ops in an array of binds touch the same address range
invalid GPUVA operations are generated as each GPUVA operation is
generated based on the orignal GPUVA state. To fix this, after each
GPUVA operations is generated, commit the GPUVA operation updating the
GPUVA state so subsequent bind ops can see a current GPUVA state.
Reviewed-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent bbd52b61
...@@ -2430,24 +2430,73 @@ static u64 xe_vma_set_pte_size(struct xe_vma *vma, u64 size) ...@@ -2430,24 +2430,73 @@ static u64 xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
return SZ_4K; return SZ_4K;
} }
/* static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
* Parse operations list and create any resources needed for the operations {
* prior to fully committing to the operations. This setup can fail. int err = 0;
*/
lockdep_assert_held_write(&vm->lock);
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
err |= xe_vm_insert_vma(vm, op->map.vma);
if (!err)
op->flags |= XE_VMA_OP_COMMITTED;
break;
case DRM_GPUVA_OP_REMAP:
prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va),
true);
op->flags |= XE_VMA_OP_COMMITTED;
if (op->remap.prev) {
err |= xe_vm_insert_vma(vm, op->remap.prev);
if (!err)
op->flags |= XE_VMA_OP_PREV_COMMITTED;
if (!err && op->remap.skip_prev)
op->remap.prev = NULL;
}
if (op->remap.next) {
err |= xe_vm_insert_vma(vm, op->remap.next);
if (!err)
op->flags |= XE_VMA_OP_NEXT_COMMITTED;
if (!err && op->remap.skip_next)
op->remap.next = NULL;
}
/* Adjust for partial unbind after removin VMA from VM */
if (!err) {
op->base.remap.unmap->va->va.addr = op->remap.start;
op->base.remap.unmap->va->va.range = op->remap.range;
}
break;
case DRM_GPUVA_OP_UNMAP:
prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true);
op->flags |= XE_VMA_OP_COMMITTED;
break;
case DRM_GPUVA_OP_PREFETCH:
op->flags |= XE_VMA_OP_COMMITTED;
break;
default:
XE_WARN_ON("NOT POSSIBLE");
}
return err;
}
static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
struct drm_gpuva_ops **ops, int num_ops_list, struct drm_gpuva_ops *ops,
struct xe_sync_entry *syncs, u32 num_syncs, struct xe_sync_entry *syncs, u32 num_syncs,
struct list_head *ops_list, bool async) struct list_head *ops_list, bool last,
bool async)
{ {
struct xe_vma_op *last_op = NULL; struct xe_vma_op *last_op = NULL;
struct list_head *async_list = NULL;
struct async_op_fence *fence = NULL; struct async_op_fence *fence = NULL;
int err, i; struct drm_gpuva_op *__op;
int err = 0;
lockdep_assert_held_write(&vm->lock); lockdep_assert_held_write(&vm->lock);
XE_WARN_ON(num_ops_list > 1 && !async);
if (num_syncs && async) { if (last && num_syncs && async) {
u64 seqno; u64 seqno;
fence = kmalloc(sizeof(*fence), GFP_KERNEL); fence = kmalloc(sizeof(*fence), GFP_KERNEL);
...@@ -2466,145 +2515,145 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, ...@@ -2466,145 +2515,145 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
} }
} }
for (i = 0; i < num_ops_list; ++i) { drm_gpuva_for_each_op(__op, ops) {
struct drm_gpuva_ops *__ops = ops[i]; struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
struct drm_gpuva_op *__op; bool first = list_empty(ops_list);
drm_gpuva_for_each_op(__op, __ops) { XE_WARN_ON(!first && !async);
struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
bool first = !async_list; INIT_LIST_HEAD(&op->link);
list_add_tail(&op->link, ops_list);
XE_WARN_ON(!first && !async); if (first) {
op->flags |= XE_VMA_OP_FIRST;
op->num_syncs = num_syncs;
op->syncs = syncs;
}
INIT_LIST_HEAD(&op->link); op->q = q;
if (first)
async_list = ops_list; switch (op->base.op) {
list_add_tail(&op->link, async_list); case DRM_GPUVA_OP_MAP:
{
struct xe_vma *vma;
if (first) { vma = new_vma(vm, &op->base.map,
op->flags |= XE_VMA_OP_FIRST; op->tile_mask, op->map.read_only,
op->num_syncs = num_syncs; op->map.is_null);
op->syncs = syncs; if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto free_fence;
} }
op->q = q; op->map.vma = vma;
break;
}
case DRM_GPUVA_OP_REMAP:
{
struct xe_vma *old =
gpuva_to_vma(op->base.remap.unmap->va);
switch (op->base.op) { op->remap.start = xe_vma_start(old);
case DRM_GPUVA_OP_MAP: op->remap.range = xe_vma_size(old);
{
struct xe_vma *vma;
vma = new_vma(vm, &op->base.map, if (op->base.remap.prev) {
op->tile_mask, op->map.read_only, struct xe_vma *vma;
op->map.is_null); bool read_only =
op->base.remap.unmap->va->flags &
XE_VMA_READ_ONLY;
bool is_null =
op->base.remap.unmap->va->flags &
DRM_GPUVA_SPARSE;
vma = new_vma(vm, op->base.remap.prev,
op->tile_mask, read_only,
is_null);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
goto free_fence; goto free_fence;
} }
op->map.vma = vma; op->remap.prev = vma;
break;
/*
* Userptr creates a new SG mapping so
* we must also rebind.
*/
op->remap.skip_prev = !xe_vma_is_userptr(old) &&
IS_ALIGNED(xe_vma_end(vma),
xe_vma_max_pte_size(old));
if (op->remap.skip_prev) {
xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
op->remap.range -=
xe_vma_end(vma) -
xe_vma_start(old);
op->remap.start = xe_vma_end(vma);
}
} }
case DRM_GPUVA_OP_REMAP:
{ if (op->base.remap.next) {
struct xe_vma *old = struct xe_vma *vma;
gpuva_to_vma(op->base.remap.unmap->va); bool read_only =
op->base.remap.unmap->va->flags &
op->remap.start = xe_vma_start(old); XE_VMA_READ_ONLY;
op->remap.range = xe_vma_size(old);
bool is_null =
if (op->base.remap.prev) { op->base.remap.unmap->va->flags &
struct xe_vma *vma; DRM_GPUVA_SPARSE;
bool read_only =
op->base.remap.unmap->va->flags & vma = new_vma(vm, op->base.remap.next,
XE_VMA_READ_ONLY; op->tile_mask, read_only,
bool is_null = is_null);
op->base.remap.unmap->va->flags & if (IS_ERR(vma)) {
DRM_GPUVA_SPARSE; err = PTR_ERR(vma);
goto free_fence;
vma = new_vma(vm, op->base.remap.prev,
op->tile_mask, read_only,
is_null);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto free_fence;
}
op->remap.prev = vma;
/*
* Userptr creates a new SG mapping so
* we must also rebind.
*/
op->remap.skip_prev = !xe_vma_is_userptr(old) &&
IS_ALIGNED(xe_vma_end(vma),
xe_vma_max_pte_size(old));
if (op->remap.skip_prev) {
xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
op->remap.range -=
xe_vma_end(vma) -
xe_vma_start(old);
op->remap.start = xe_vma_end(vma);
}
} }
if (op->base.remap.next) { op->remap.next = vma;
struct xe_vma *vma;
bool read_only = /*
op->base.remap.unmap->va->flags & * Userptr creates a new SG mapping so
XE_VMA_READ_ONLY; * we must also rebind.
*/
bool is_null = op->remap.skip_next = !xe_vma_is_userptr(old) &&
op->base.remap.unmap->va->flags & IS_ALIGNED(xe_vma_start(vma),
DRM_GPUVA_SPARSE; xe_vma_max_pte_size(old));
if (op->remap.skip_next) {
vma = new_vma(vm, op->base.remap.next, xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
op->tile_mask, read_only, op->remap.range -=
is_null); xe_vma_end(old) -
if (IS_ERR(vma)) { xe_vma_start(vma);
err = PTR_ERR(vma);
goto free_fence;
}
op->remap.next = vma;
/*
* Userptr creates a new SG mapping so
* we must also rebind.
*/
op->remap.skip_next = !xe_vma_is_userptr(old) &&
IS_ALIGNED(xe_vma_start(vma),
xe_vma_max_pte_size(old));
if (op->remap.skip_next) {
xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
op->remap.range -=
xe_vma_end(old) -
xe_vma_start(vma);
}
} }
break;
}
case DRM_GPUVA_OP_UNMAP:
case DRM_GPUVA_OP_PREFETCH:
/* Nothing to do */
break;
default:
XE_WARN_ON("NOT POSSIBLE");
} }
break;
last_op = op; }
case DRM_GPUVA_OP_UNMAP:
case DRM_GPUVA_OP_PREFETCH:
/* Nothing to do */
break;
default:
XE_WARN_ON("NOT POSSIBLE");
} }
last_op->ops = __ops; last_op = op;
err = xe_vma_op_commit(vm, op);
if (err)
goto free_fence;
} }
if (!last_op) /* FIXME: Unhandled corner case */
return -ENODATA; XE_WARN_ON(!last_op && last && !list_empty(ops_list));
last_op->flags |= XE_VMA_OP_LAST; if (!last_op)
last_op->num_syncs = num_syncs; goto free_fence;
last_op->syncs = syncs; last_op->ops = ops;
last_op->fence = fence; if (last) {
last_op->flags |= XE_VMA_OP_LAST;
last_op->num_syncs = num_syncs;
last_op->syncs = syncs;
last_op->fence = fence;
}
return 0; return 0;
...@@ -2613,58 +2662,6 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, ...@@ -2613,58 +2662,6 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
return err; return err;
} }
static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
{
int err = 0;
lockdep_assert_held_write(&vm->lock);
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
err |= xe_vm_insert_vma(vm, op->map.vma);
if (!err)
op->flags |= XE_VMA_OP_COMMITTED;
break;
case DRM_GPUVA_OP_REMAP:
prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va),
true);
op->flags |= XE_VMA_OP_COMMITTED;
if (op->remap.prev) {
err |= xe_vm_insert_vma(vm, op->remap.prev);
if (!err)
op->flags |= XE_VMA_OP_PREV_COMMITTED;
if (!err && op->remap.skip_prev)
op->remap.prev = NULL;
}
if (op->remap.next) {
err |= xe_vm_insert_vma(vm, op->remap.next);
if (!err)
op->flags |= XE_VMA_OP_NEXT_COMMITTED;
if (!err && op->remap.skip_next)
op->remap.next = NULL;
}
/* Adjust for partial unbind after removin VMA from VM */
if (!err) {
op->base.remap.unmap->va->va.addr = op->remap.start;
op->base.remap.unmap->va->va.range = op->remap.range;
}
break;
case DRM_GPUVA_OP_UNMAP:
prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true);
op->flags |= XE_VMA_OP_COMMITTED;
break;
case DRM_GPUVA_OP_PREFETCH:
op->flags |= XE_VMA_OP_COMMITTED;
break;
default:
XE_WARN_ON("NOT POSSIBLE");
}
return err;
}
static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma, static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
struct xe_vma_op *op) struct xe_vma_op *op)
{ {
...@@ -2882,11 +2879,13 @@ static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op, ...@@ -2882,11 +2879,13 @@ static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
{ {
struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va); struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
down_read(&vm->userptr.notifier_lock); if (vma) {
vma->gpuva.flags &= ~XE_VMA_DESTROYED; down_read(&vm->userptr.notifier_lock);
up_read(&vm->userptr.notifier_lock); vma->gpuva.flags &= ~XE_VMA_DESTROYED;
if (post_commit) up_read(&vm->userptr.notifier_lock);
xe_vm_insert_vma(vm, vma); if (post_commit)
xe_vm_insert_vma(vm, vma);
}
break; break;
} }
case DRM_GPUVA_OP_REMAP: case DRM_GPUVA_OP_REMAP:
...@@ -2901,11 +2900,13 @@ static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op, ...@@ -2901,11 +2900,13 @@ static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
prep_vma_destroy(vm, op->remap.next, next_post_commit); prep_vma_destroy(vm, op->remap.next, next_post_commit);
xe_vma_destroy_unlocked(op->remap.next); xe_vma_destroy_unlocked(op->remap.next);
} }
down_read(&vm->userptr.notifier_lock); if (vma) {
vma->gpuva.flags &= ~XE_VMA_DESTROYED; down_read(&vm->userptr.notifier_lock);
up_read(&vm->userptr.notifier_lock); vma->gpuva.flags &= ~XE_VMA_DESTROYED;
if (post_commit) up_read(&vm->userptr.notifier_lock);
xe_vm_insert_vma(vm, vma); if (post_commit)
xe_vm_insert_vma(vm, vma);
}
break; break;
} }
case DRM_GPUVA_OP_PREFETCH: case DRM_GPUVA_OP_PREFETCH:
...@@ -2994,20 +2995,16 @@ static void xe_vma_op_work_func(struct work_struct *w) ...@@ -2994,20 +2995,16 @@ static void xe_vma_op_work_func(struct work_struct *w)
} }
} }
static int vm_bind_ioctl_ops_commit(struct xe_vm *vm, static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
struct list_head *ops_list, bool async) struct list_head *ops_list, bool async)
{ {
struct xe_vma_op *op, *last_op, *next; struct xe_vma_op *op, *last_op, *next;
int err; int err;
lockdep_assert_held_write(&vm->lock); lockdep_assert_held_write(&vm->lock);
list_for_each_entry(op, ops_list, link) { list_for_each_entry(op, ops_list, link)
last_op = op; last_op = op;
err = xe_vma_op_commit(vm, op);
if (err)
goto unwind;
}
if (!async) { if (!async) {
err = xe_vma_op_execute(vm, last_op); err = xe_vma_op_execute(vm, last_op);
...@@ -3046,28 +3043,29 @@ static int vm_bind_ioctl_ops_commit(struct xe_vm *vm, ...@@ -3046,28 +3043,29 @@ static int vm_bind_ioctl_ops_commit(struct xe_vm *vm,
return err; return err;
} }
/*
* Unwind operations list, called after a failure of vm_bind_ioctl_ops_create or
* vm_bind_ioctl_ops_parse.
*/
static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm, static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
struct drm_gpuva_ops **ops, struct drm_gpuva_ops **ops,
int num_ops_list) int num_ops_list)
{ {
int i; int i;
for (i = 0; i < num_ops_list; ++i) { for (i = num_ops_list - 1; i; ++i) {
struct drm_gpuva_ops *__ops = ops[i]; struct drm_gpuva_ops *__ops = ops[i];
struct drm_gpuva_op *__op; struct drm_gpuva_op *__op;
if (!__ops) if (!__ops)
continue; continue;
drm_gpuva_for_each_op(__op, __ops) { drm_gpuva_for_each_op_reverse(__op, __ops) {
struct xe_vma_op *op = gpuva_op_to_vma_op(__op); struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
xe_vma_op_unwind(vm, op, false, false, false); xe_vma_op_unwind(vm, op,
op->flags & XE_VMA_OP_COMMITTED,
op->flags & XE_VMA_OP_PREV_COMMITTED,
op->flags & XE_VMA_OP_NEXT_COMMITTED);
} }
drm_gpuva_ops_free(&vm->gpuvm, __ops);
} }
} }
...@@ -3388,14 +3386,22 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -3388,14 +3386,22 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
ops[i] = NULL; ops[i] = NULL;
goto unwind_ops; goto unwind_ops;
} }
err = vm_bind_ioctl_ops_parse(vm, q, ops[i], syncs, num_syncs,
&ops_list,
i == args->num_binds - 1,
async);
if (err)
goto unwind_ops;
} }
err = vm_bind_ioctl_ops_parse(vm, q, ops, args->num_binds, /* Nothing to do */
syncs, num_syncs, &ops_list, async); if (list_empty(&ops_list)) {
if (err) err = -ENODATA;
goto unwind_ops; goto unwind_ops;
}
err = vm_bind_ioctl_ops_commit(vm, &ops_list, async); err = vm_bind_ioctl_ops_execute(vm, &ops_list, async);
up_write(&vm->lock); up_write(&vm->lock);
for (i = 0; i < args->num_binds; ++i) for (i = 0; i < args->num_binds; ++i)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment