Commit 61e3270e authored by Matthew Brost's avatar Matthew Brost

drm/xe: Add vm_bind_ioctl_ops_fini helper

Simplify VM bind code by signaling out-fences / destroying VMAs in a
single location. Will help with transition single job for many bind ops.

v2:
 - s/vm_bind_ioctl_ops_install_fences/vm_bind_ioctl_ops_fini (Oak)
 - Set last fence in vm_bind_ioctl_ops_fini (Oak)

Cc: Oak Zeng <oak.zeng@intel.com>
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarOak Zeng <oak.zeng@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240425045513.1913039-10-matthew.brost@intel.com
parent 22cfdd28
...@@ -1646,7 +1646,7 @@ xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q, ...@@ -1646,7 +1646,7 @@ xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
struct dma_fence *fence = NULL; struct dma_fence *fence = NULL;
struct dma_fence **fences = NULL; struct dma_fence **fences = NULL;
struct dma_fence_array *cf = NULL; struct dma_fence_array *cf = NULL;
int cur_fence = 0, i; int cur_fence = 0;
int number_tiles = hweight8(vma->tile_present); int number_tiles = hweight8(vma->tile_present);
int err; int err;
u8 id; u8 id;
...@@ -1704,10 +1704,6 @@ xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q, ...@@ -1704,10 +1704,6 @@ xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
fence = cf ? &cf->base : !fence ? fence = cf ? &cf->base : !fence ?
xe_exec_queue_last_fence_get(wait_exec_queue, vm) : fence; xe_exec_queue_last_fence_get(wait_exec_queue, vm) : fence;
if (last_op) {
for (i = 0; i < num_syncs; i++)
xe_sync_entry_signal(&syncs[i], fence);
}
return fence; return fence;
...@@ -1731,7 +1727,7 @@ xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q, ...@@ -1731,7 +1727,7 @@ xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
struct dma_fence **fences = NULL; struct dma_fence **fences = NULL;
struct dma_fence_array *cf = NULL; struct dma_fence_array *cf = NULL;
struct xe_vm *vm = xe_vma_vm(vma); struct xe_vm *vm = xe_vma_vm(vma);
int cur_fence = 0, i; int cur_fence = 0;
int number_tiles = hweight8(tile_mask); int number_tiles = hweight8(tile_mask);
int err; int err;
u8 id; u8 id;
...@@ -1778,12 +1774,6 @@ xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q, ...@@ -1778,12 +1774,6 @@ xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
} }
} }
if (last_op) {
for (i = 0; i < num_syncs; i++)
xe_sync_entry_signal(&syncs[i],
cf ? &cf->base : fence);
}
return cf ? &cf->base : fence; return cf ? &cf->base : fence;
err_fences: err_fences:
...@@ -1835,20 +1825,11 @@ xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q, ...@@ -1835,20 +1825,11 @@ xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q,
if (IS_ERR(fence)) if (IS_ERR(fence))
return fence; return fence;
} else { } else {
int i;
xe_assert(vm->xe, xe_vm_in_fault_mode(vm)); xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm); fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm);
if (last_op) {
for (i = 0; i < num_syncs; i++)
xe_sync_entry_signal(&syncs[i], fence);
}
} }
if (last_op)
xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
return fence; return fence;
} }
...@@ -1858,7 +1839,6 @@ xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma, ...@@ -1858,7 +1839,6 @@ xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
u32 num_syncs, bool first_op, bool last_op) u32 num_syncs, bool first_op, bool last_op)
{ {
struct dma_fence *fence; struct dma_fence *fence;
struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
xe_vm_assert_held(vm); xe_vm_assert_held(vm);
xe_bo_assert_held(xe_vma_bo(vma)); xe_bo_assert_held(xe_vma_bo(vma));
...@@ -1867,10 +1847,6 @@ xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma, ...@@ -1867,10 +1847,6 @@ xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
if (IS_ERR(fence)) if (IS_ERR(fence))
return fence; return fence;
xe_vma_destroy(vma, fence);
if (last_op)
xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
return fence; return fence;
} }
...@@ -2025,17 +2001,7 @@ xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma, ...@@ -2025,17 +2001,7 @@ xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs, return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
vma->tile_mask, true, first_op, last_op); vma->tile_mask, true, first_op, last_op);
} else { } else {
struct dma_fence *fence = return xe_exec_queue_last_fence_get(wait_exec_queue, vm);
xe_exec_queue_last_fence_get(wait_exec_queue, vm);
int i;
/* Nothing to do, signal fences now */
if (last_op) {
for (i = 0; i < num_syncs; i++)
xe_sync_entry_signal(&syncs[i], fence);
}
return fence;
} }
} }
...@@ -2838,6 +2804,26 @@ static struct dma_fence *ops_execute(struct xe_vm *vm, ...@@ -2838,6 +2804,26 @@ static struct dma_fence *ops_execute(struct xe_vm *vm,
return fence; return fence;
} }
static void vm_bind_ioctl_ops_fini(struct xe_vm *vm, struct xe_vma_ops *vops,
struct dma_fence *fence)
{
struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, vops->q);
struct xe_vma_op *op;
int i;
list_for_each_entry(op, &vops->list, link) {
if (op->base.op == DRM_GPUVA_OP_UNMAP)
xe_vma_destroy(gpuva_to_vma(op->base.unmap.va), fence);
else if (op->base.op == DRM_GPUVA_OP_REMAP)
xe_vma_destroy(gpuva_to_vma(op->base.remap.unmap->va),
fence);
}
for (i = 0; i < vops->num_syncs; i++)
xe_sync_entry_signal(vops->syncs + i, fence);
xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
dma_fence_put(fence);
}
static int vm_bind_ioctl_ops_execute(struct xe_vm *vm, static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
struct xe_vma_ops *vops) struct xe_vma_ops *vops)
{ {
...@@ -2862,7 +2848,7 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm, ...@@ -2862,7 +2848,7 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
xe_vm_kill(vm, false); xe_vm_kill(vm, false);
goto unlock; goto unlock;
} else { } else {
dma_fence_put(fence); vm_bind_ioctl_ops_fini(vm, vops, fence);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment