Commit e05c6c97 authored by Matthew Brost's avatar Matthew Brost Committed by Rodrigo Vivi

drm/xe: Deprecate XE_EXEC_QUEUE_SET_PROPERTY_COMPUTE_MODE implementation

We are going to remove XE_EXEC_QUEUE_SET_PROPERTY_COMPUTE_MODE from the
uAPI, deprecate the implementation first by making
XE_EXEC_QUEUE_SET_PROPERTY_COMPUTE_MODE a NOP. After removal of
XE_EXEC_QUEUE_SET_PROPERTY_COMPUTE_MODE the proper is simply inherented
from the VM.

v2:
 - Update commit message with explaination of removal (Niranjana)
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarNiranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 5009d554
...@@ -323,39 +323,6 @@ static int exec_queue_set_preemption_timeout(struct xe_device *xe, ...@@ -323,39 +323,6 @@ static int exec_queue_set_preemption_timeout(struct xe_device *xe,
static int exec_queue_set_compute_mode(struct xe_device *xe, struct xe_exec_queue *q, static int exec_queue_set_compute_mode(struct xe_device *xe, struct xe_exec_queue *q,
u64 value, bool create) u64 value, bool create)
{ {
if (XE_IOCTL_DBG(xe, !create))
return -EINVAL;
if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_COMPUTE_MODE))
return -EINVAL;
if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_VM))
return -EINVAL;
if (value) {
struct xe_vm *vm = q->vm;
int err;
if (XE_IOCTL_DBG(xe, xe_vm_in_fault_mode(vm)))
return -EOPNOTSUPP;
if (XE_IOCTL_DBG(xe, !xe_vm_in_compute_mode(vm)))
return -EOPNOTSUPP;
if (XE_IOCTL_DBG(xe, q->width != 1))
return -EINVAL;
q->compute.context = dma_fence_context_alloc(1);
spin_lock_init(&q->compute.lock);
err = xe_vm_add_compute_exec_queue(vm, q);
if (XE_IOCTL_DBG(xe, err))
return err;
q->flags |= EXEC_QUEUE_FLAG_COMPUTE_MODE;
q->flags &= ~EXEC_QUEUE_FLAG_PERSISTENT;
}
return 0; return 0;
} }
...@@ -365,7 +332,7 @@ static int exec_queue_set_persistence(struct xe_device *xe, struct xe_exec_queue ...@@ -365,7 +332,7 @@ static int exec_queue_set_persistence(struct xe_device *xe, struct xe_exec_queue
if (XE_IOCTL_DBG(xe, !create)) if (XE_IOCTL_DBG(xe, !create))
return -EINVAL; return -EINVAL;
if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_COMPUTE_MODE)) if (XE_IOCTL_DBG(xe, xe_vm_in_compute_mode(q->vm)))
return -EINVAL; return -EINVAL;
if (value) if (value)
...@@ -742,18 +709,21 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data, ...@@ -742,18 +709,21 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
xe_vm_put(vm); xe_vm_put(vm);
if (IS_ERR(q)) if (IS_ERR(q))
return PTR_ERR(q); return PTR_ERR(q);
if (xe_vm_in_compute_mode(vm)) {
q->compute.context = dma_fence_context_alloc(1);
spin_lock_init(&q->compute.lock);
err = xe_vm_add_compute_exec_queue(vm, q);
if (XE_IOCTL_DBG(xe, err))
goto put_exec_queue;
}
} }
if (args->extensions) { if (args->extensions) {
err = exec_queue_user_extensions(xe, q, args->extensions, 0, true); err = exec_queue_user_extensions(xe, q, args->extensions, 0, true);
if (XE_IOCTL_DBG(xe, err)) if (XE_IOCTL_DBG(xe, err))
goto put_exec_queue; goto kill_exec_queue;
}
if (XE_IOCTL_DBG(xe, q->vm && xe_vm_in_compute_mode(q->vm) !=
!!(q->flags & EXEC_QUEUE_FLAG_COMPUTE_MODE))) {
err = -EOPNOTSUPP;
goto put_exec_queue;
} }
q->persistent.xef = xef; q->persistent.xef = xef;
...@@ -762,14 +732,15 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data, ...@@ -762,14 +732,15 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL); err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
mutex_unlock(&xef->exec_queue.lock); mutex_unlock(&xef->exec_queue.lock);
if (err) if (err)
goto put_exec_queue; goto kill_exec_queue;
args->exec_queue_id = id; args->exec_queue_id = id;
return 0; return 0;
put_exec_queue: kill_exec_queue:
xe_exec_queue_kill(q); xe_exec_queue_kill(q);
put_exec_queue:
xe_exec_queue_put(q); xe_exec_queue_put(q);
return err; return err;
} }
......
...@@ -73,12 +73,10 @@ struct xe_exec_queue { ...@@ -73,12 +73,10 @@ struct xe_exec_queue {
#define EXEC_QUEUE_FLAG_PERMANENT BIT(2) #define EXEC_QUEUE_FLAG_PERMANENT BIT(2)
/* queue keeps running pending jobs after destroy ioctl */ /* queue keeps running pending jobs after destroy ioctl */
#define EXEC_QUEUE_FLAG_PERSISTENT BIT(3) #define EXEC_QUEUE_FLAG_PERSISTENT BIT(3)
/* queue for use with compute VMs */
#define EXEC_QUEUE_FLAG_COMPUTE_MODE BIT(4)
/* for VM jobs. Caller needs to hold rpm ref when creating queue with this flag */ /* for VM jobs. Caller needs to hold rpm ref when creating queue with this flag */
#define EXEC_QUEUE_FLAG_VM BIT(5) #define EXEC_QUEUE_FLAG_VM BIT(4)
/* child of VM queue for multi-tile VM jobs */ /* child of VM queue for multi-tile VM jobs */
#define EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD BIT(6) #define EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD BIT(5)
/** /**
* @flags: flags for this exec queue, should statically setup aside from ban * @flags: flags for this exec queue, should statically setup aside from ban
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment