Commit 5278ca04 authored by Thomas Hellström's avatar Thomas Hellström

drm/xe: Fix unexpected backmerge results

The recent backmerge from drm-next to drm-xe-next brought with it
some silent unexpected results. One code snippet was added twice
and a partial revert had merge errors. Fix that up to
reinstate the affected code as it was before the backmerge.

v2:
- Commit log message rewording (Lucas DeMarchi)

Fixes: 79790b68 ("Merge drm/drm-next into drm-xe-next")
Signed-off-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: default avatarMatthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarLucas De Marchi <lucas.demarchi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240423121114.39325-1-thomas.hellstrom@linux.intel.com
(cherry picked from commit 06e7139a)
Signed-off-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
parent 4a56c0ed
...@@ -863,11 +863,6 @@ static void xe_vma_destroy_late(struct xe_vma *vma) ...@@ -863,11 +863,6 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
vma->ufence = NULL; vma->ufence = NULL;
} }
if (vma->ufence) {
xe_sync_ufence_put(vma->ufence);
vma->ufence = NULL;
}
if (xe_vma_is_userptr(vma)) { if (xe_vma_is_userptr(vma)) {
struct xe_userptr_vma *uvma = to_userptr_vma(vma); struct xe_userptr_vma *uvma = to_userptr_vma(vma);
struct xe_userptr *userptr = &uvma->userptr; struct xe_userptr *userptr = &uvma->userptr;
...@@ -2100,6 +2095,10 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo, ...@@ -2100,6 +2095,10 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
struct xe_vma_op *op = gpuva_op_to_vma_op(__op); struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
if (__op->op == DRM_GPUVA_OP_MAP) { if (__op->op == DRM_GPUVA_OP_MAP) {
op->map.immediate =
flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
op->map.read_only =
flags & DRM_XE_VM_BIND_FLAG_READONLY;
op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL; op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE; op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE;
op->map.pat_index = pat_index; op->map.pat_index = pat_index;
...@@ -2294,6 +2293,8 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, ...@@ -2294,6 +2293,8 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
switch (op->base.op) { switch (op->base.op) {
case DRM_GPUVA_OP_MAP: case DRM_GPUVA_OP_MAP:
{ {
flags |= op->map.read_only ?
VMA_CREATE_FLAG_READ_ONLY : 0;
flags |= op->map.is_null ? flags |= op->map.is_null ?
VMA_CREATE_FLAG_IS_NULL : 0; VMA_CREATE_FLAG_IS_NULL : 0;
flags |= op->map.dumpable ? flags |= op->map.dumpable ?
...@@ -2438,7 +2439,7 @@ static int op_execute(struct drm_exec *exec, struct xe_vm *vm, ...@@ -2438,7 +2439,7 @@ static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
case DRM_GPUVA_OP_MAP: case DRM_GPUVA_OP_MAP:
err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma), err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
op->syncs, op->num_syncs, op->syncs, op->num_syncs,
!xe_vm_in_fault_mode(vm), op->map.immediate || !xe_vm_in_fault_mode(vm),
op->flags & XE_VMA_OP_FIRST, op->flags & XE_VMA_OP_FIRST,
op->flags & XE_VMA_OP_LAST); op->flags & XE_VMA_OP_LAST);
break; break;
......
...@@ -269,6 +269,10 @@ struct xe_vm { ...@@ -269,6 +269,10 @@ struct xe_vm {
struct xe_vma_op_map { struct xe_vma_op_map {
/** @vma: VMA to map */ /** @vma: VMA to map */
struct xe_vma *vma; struct xe_vma *vma;
/** @immediate: Immediate bind */
bool immediate;
/** @read_only: Read only */
bool read_only;
/** @is_null: is NULL binding */ /** @is_null: is NULL binding */
bool is_null; bool is_null;
/** @dumpable: whether BO is dumped on GPU hang */ /** @dumpable: whether BO is dumped on GPU hang */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment