Commit 3ac4a789 authored by Francois Dugast's avatar Francois Dugast Committed by Rodrigo Vivi

drm/xe/uapi: Add _FLAG to uAPI constants usable for flags

Most constants defined in xe_drm.h which can be used for flags are
named DRM_XE_*_FLAG_*, which is helpful to identify them. Make this
systematic and add _FLAG where it was missing.
Signed-off-by: default avatarFrancois Dugast <francois.dugast@intel.com>
Reviewed-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: default avatarJosé Roberto de Souza <jose.souza@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent d5dc73db
...@@ -110,14 +110,14 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef, ...@@ -110,14 +110,14 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
return -EFAULT; return -EFAULT;
if (XE_IOCTL_DBG(xe, sync_in.flags & if (XE_IOCTL_DBG(xe, sync_in.flags &
~(SYNC_FLAGS_TYPE_MASK | DRM_XE_SYNC_SIGNAL)) || ~(SYNC_FLAGS_TYPE_MASK | DRM_XE_SYNC_FLAG_SIGNAL)) ||
XE_IOCTL_DBG(xe, sync_in.pad) || XE_IOCTL_DBG(xe, sync_in.pad) ||
XE_IOCTL_DBG(xe, sync_in.reserved[0] || sync_in.reserved[1])) XE_IOCTL_DBG(xe, sync_in.reserved[0] || sync_in.reserved[1]))
return -EINVAL; return -EINVAL;
signal = sync_in.flags & DRM_XE_SYNC_SIGNAL; signal = sync_in.flags & DRM_XE_SYNC_FLAG_SIGNAL;
switch (sync_in.flags & SYNC_FLAGS_TYPE_MASK) { switch (sync_in.flags & SYNC_FLAGS_TYPE_MASK) {
case DRM_XE_SYNC_SYNCOBJ: case DRM_XE_SYNC_FLAG_SYNCOBJ:
if (XE_IOCTL_DBG(xe, no_dma_fences && signal)) if (XE_IOCTL_DBG(xe, no_dma_fences && signal))
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -135,7 +135,7 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef, ...@@ -135,7 +135,7 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
} }
break; break;
case DRM_XE_SYNC_TIMELINE_SYNCOBJ: case DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ:
if (XE_IOCTL_DBG(xe, no_dma_fences && signal)) if (XE_IOCTL_DBG(xe, no_dma_fences && signal))
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -165,12 +165,12 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef, ...@@ -165,12 +165,12 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
} }
break; break;
case DRM_XE_SYNC_DMA_BUF: case DRM_XE_SYNC_FLAG_DMA_BUF:
if (XE_IOCTL_DBG(xe, "TODO")) if (XE_IOCTL_DBG(xe, "TODO"))
return -EINVAL; return -EINVAL;
break; break;
case DRM_XE_SYNC_USER_FENCE: case DRM_XE_SYNC_FLAG_USER_FENCE:
if (XE_IOCTL_DBG(xe, !signal)) if (XE_IOCTL_DBG(xe, !signal))
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -225,7 +225,7 @@ int xe_sync_entry_add_deps(struct xe_sync_entry *sync, struct xe_sched_job *job) ...@@ -225,7 +225,7 @@ int xe_sync_entry_add_deps(struct xe_sync_entry *sync, struct xe_sched_job *job)
void xe_sync_entry_signal(struct xe_sync_entry *sync, struct xe_sched_job *job, void xe_sync_entry_signal(struct xe_sync_entry *sync, struct xe_sched_job *job,
struct dma_fence *fence) struct dma_fence *fence)
{ {
if (!(sync->flags & DRM_XE_SYNC_SIGNAL)) if (!(sync->flags & DRM_XE_SYNC_FLAG_SIGNAL))
return; return;
if (sync->chain_fence) { if (sync->chain_fence) {
...@@ -253,7 +253,7 @@ void xe_sync_entry_signal(struct xe_sync_entry *sync, struct xe_sched_job *job, ...@@ -253,7 +253,7 @@ void xe_sync_entry_signal(struct xe_sync_entry *sync, struct xe_sched_job *job,
dma_fence_put(fence); dma_fence_put(fence);
} }
} else if ((sync->flags & SYNC_FLAGS_TYPE_MASK) == } else if ((sync->flags & SYNC_FLAGS_TYPE_MASK) ==
DRM_XE_SYNC_USER_FENCE) { DRM_XE_SYNC_FLAG_USER_FENCE) {
job->user_fence.used = true; job->user_fence.used = true;
job->user_fence.addr = sync->addr; job->user_fence.addr = sync->addr;
job->user_fence.value = sync->timeline_value; job->user_fence.value = sync->timeline_value;
......
...@@ -1920,10 +1920,10 @@ static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma, ...@@ -1920,10 +1920,10 @@ static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
return 0; return 0;
} }
#define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_SCRATCH_PAGE | \ #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE | \
DRM_XE_VM_CREATE_COMPUTE_MODE | \ DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE | \
DRM_XE_VM_CREATE_ASYNC_DEFAULT | \ DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT | \
DRM_XE_VM_CREATE_FAULT_MODE) DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
int xe_vm_create_ioctl(struct drm_device *dev, void *data, int xe_vm_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file) struct drm_file *file)
...@@ -1941,9 +1941,9 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data, ...@@ -1941,9 +1941,9 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
return -EINVAL; return -EINVAL;
if (XE_WA(xe_root_mmio_gt(xe), 14016763929)) if (XE_WA(xe_root_mmio_gt(xe), 14016763929))
args->flags |= DRM_XE_VM_CREATE_SCRATCH_PAGE; args->flags |= DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE;
if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE && if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
!xe->info.supports_usm)) !xe->info.supports_usm))
return -EINVAL; return -EINVAL;
...@@ -1953,32 +1953,32 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data, ...@@ -1953,32 +1953,32 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
if (XE_IOCTL_DBG(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS)) if (XE_IOCTL_DBG(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
return -EINVAL; return -EINVAL;
if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE && if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE &&
args->flags & DRM_XE_VM_CREATE_FAULT_MODE)) args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
return -EINVAL; return -EINVAL;
if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE && if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE &&
args->flags & DRM_XE_VM_CREATE_FAULT_MODE)) args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
return -EINVAL; return -EINVAL;
if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE && if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
xe_device_in_non_fault_mode(xe))) xe_device_in_non_fault_mode(xe)))
return -EINVAL; return -EINVAL;
if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FAULT_MODE) && if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE) &&
xe_device_in_fault_mode(xe))) xe_device_in_fault_mode(xe)))
return -EINVAL; return -EINVAL;
if (XE_IOCTL_DBG(xe, args->extensions)) if (XE_IOCTL_DBG(xe, args->extensions))
return -EINVAL; return -EINVAL;
if (args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE) if (args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE)
flags |= XE_VM_FLAG_SCRATCH_PAGE; flags |= XE_VM_FLAG_SCRATCH_PAGE;
if (args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE) if (args->flags & DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE)
flags |= XE_VM_FLAG_COMPUTE_MODE; flags |= XE_VM_FLAG_COMPUTE_MODE;
if (args->flags & DRM_XE_VM_CREATE_ASYNC_DEFAULT) if (args->flags & DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT)
flags |= XE_VM_FLAG_ASYNC_DEFAULT; flags |= XE_VM_FLAG_ASYNC_DEFAULT;
if (args->flags & DRM_XE_VM_CREATE_FAULT_MODE) if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
flags |= XE_VM_FLAG_FAULT_MODE; flags |= XE_VM_FLAG_FAULT_MODE;
vm = xe_vm_create(xe, flags); vm = xe_vm_create(xe, flags);
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
* Scratch page * Scratch page
* ------------ * ------------
* *
* If the VM is created with the flag, DRM_XE_VM_CREATE_SCRATCH_PAGE, set the * If the VM is created with the flag, DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE, set the
* entire page table structure defaults pointing to blank page allocated by the * entire page table structure defaults pointing to blank page allocated by the
* VM. Invalid memory access rather than fault just read / write to this page. * VM. Invalid memory access rather than fault just read / write to this page.
* *
......
...@@ -79,8 +79,8 @@ static int check_hw_engines(struct xe_device *xe, ...@@ -79,8 +79,8 @@ static int check_hw_engines(struct xe_device *xe,
return 0; return 0;
} }
#define VALID_FLAGS (DRM_XE_UFENCE_WAIT_SOFT_OP | \ #define VALID_FLAGS (DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP | \
DRM_XE_UFENCE_WAIT_ABSTIME) DRM_XE_UFENCE_WAIT_FLAG_ABSTIME)
#define MAX_OP DRM_XE_UFENCE_WAIT_LTE #define MAX_OP DRM_XE_UFENCE_WAIT_LTE
static long to_jiffies_timeout(struct xe_device *xe, static long to_jiffies_timeout(struct xe_device *xe,
...@@ -107,7 +107,7 @@ static long to_jiffies_timeout(struct xe_device *xe, ...@@ -107,7 +107,7 @@ static long to_jiffies_timeout(struct xe_device *xe,
* Save the timeout to an u64 variable because nsecs_to_jiffies * Save the timeout to an u64 variable because nsecs_to_jiffies
* might return a value that overflows s32 variable. * might return a value that overflows s32 variable.
*/ */
if (args->flags & DRM_XE_UFENCE_WAIT_ABSTIME) if (args->flags & DRM_XE_UFENCE_WAIT_FLAG_ABSTIME)
t = drm_timeout_abs_to_jiffies(args->timeout); t = drm_timeout_abs_to_jiffies(args->timeout);
else else
t = nsecs_to_jiffies(args->timeout); t = nsecs_to_jiffies(args->timeout);
...@@ -137,7 +137,7 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data, ...@@ -137,7 +137,7 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data,
u64_to_user_ptr(args->instances); u64_to_user_ptr(args->instances);
u64 addr = args->addr; u64 addr = args->addr;
int err; int err;
bool no_engines = args->flags & DRM_XE_UFENCE_WAIT_SOFT_OP; bool no_engines = args->flags & DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP;
long timeout; long timeout;
ktime_t start; ktime_t start;
...@@ -206,7 +206,7 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data, ...@@ -206,7 +206,7 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data,
} }
remove_wait_queue(&xe->ufence_wq, &w_wait); remove_wait_queue(&xe->ufence_wq, &w_wait);
if (!(args->flags & DRM_XE_UFENCE_WAIT_ABSTIME)) { if (!(args->flags & DRM_XE_UFENCE_WAIT_FLAG_ABSTIME)) {
args->timeout -= ktime_to_ns(ktime_sub(ktime_get(), start)); args->timeout -= ktime_to_ns(ktime_sub(ktime_get(), start));
if (args->timeout < 0) if (args->timeout < 0)
args->timeout = 0; args->timeout = 0;
......
...@@ -585,10 +585,10 @@ struct drm_xe_vm_create { ...@@ -585,10 +585,10 @@ struct drm_xe_vm_create {
/** @extensions: Pointer to the first extension struct, if any */ /** @extensions: Pointer to the first extension struct, if any */
__u64 extensions; __u64 extensions;
#define DRM_XE_VM_CREATE_SCRATCH_PAGE (0x1 << 0) #define DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE (0x1 << 0)
#define DRM_XE_VM_CREATE_COMPUTE_MODE (0x1 << 1) #define DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE (0x1 << 1)
#define DRM_XE_VM_CREATE_ASYNC_DEFAULT (0x1 << 2) #define DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT (0x1 << 2)
#define DRM_XE_VM_CREATE_FAULT_MODE (0x1 << 3) #define DRM_XE_VM_CREATE_FLAG_FAULT_MODE (0x1 << 3)
/** @flags: Flags */ /** @flags: Flags */
__u32 flags; __u32 flags;
...@@ -831,11 +831,11 @@ struct drm_xe_sync { ...@@ -831,11 +831,11 @@ struct drm_xe_sync {
/** @extensions: Pointer to the first extension struct, if any */ /** @extensions: Pointer to the first extension struct, if any */
__u64 extensions; __u64 extensions;
#define DRM_XE_SYNC_SYNCOBJ 0x0 #define DRM_XE_SYNC_FLAG_SYNCOBJ 0x0
#define DRM_XE_SYNC_TIMELINE_SYNCOBJ 0x1 #define DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ 0x1
#define DRM_XE_SYNC_DMA_BUF 0x2 #define DRM_XE_SYNC_FLAG_DMA_BUF 0x2
#define DRM_XE_SYNC_USER_FENCE 0x3 #define DRM_XE_SYNC_FLAG_USER_FENCE 0x3
#define DRM_XE_SYNC_SIGNAL 0x10 #define DRM_XE_SYNC_FLAG_SIGNAL 0x10
__u32 flags; __u32 flags;
/** @pad: MBZ */ /** @pad: MBZ */
...@@ -921,8 +921,8 @@ struct drm_xe_wait_user_fence { ...@@ -921,8 +921,8 @@ struct drm_xe_wait_user_fence {
/** @op: wait operation (type of comparison) */ /** @op: wait operation (type of comparison) */
__u16 op; __u16 op;
#define DRM_XE_UFENCE_WAIT_SOFT_OP (1 << 0) /* e.g. Wait on VM bind */ #define DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP (1 << 0) /* e.g. Wait on VM bind */
#define DRM_XE_UFENCE_WAIT_ABSTIME (1 << 1) #define DRM_XE_UFENCE_WAIT_FLAG_ABSTIME (1 << 1)
/** @flags: wait flags */ /** @flags: wait flags */
__u16 flags; __u16 flags;
...@@ -940,10 +940,10 @@ struct drm_xe_wait_user_fence { ...@@ -940,10 +940,10 @@ struct drm_xe_wait_user_fence {
__u64 mask; __u64 mask;
/** /**
* @timeout: how long to wait before bailing, value in nanoseconds. * @timeout: how long to wait before bailing, value in nanoseconds.
* Without DRM_XE_UFENCE_WAIT_ABSTIME flag set (relative timeout) * Without DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flag set (relative timeout)
* it contains timeout expressed in nanoseconds to wait (fence will * it contains timeout expressed in nanoseconds to wait (fence will
* expire at now() + timeout). * expire at now() + timeout).
* When DRM_XE_UFENCE_WAIT_ABSTIME flat is set (absolute timeout) wait * When DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flat is set (absolute timeout) wait
* will end at timeout (uses system MONOTONIC_CLOCK). * will end at timeout (uses system MONOTONIC_CLOCK).
* Passing negative timeout leads to neverending wait. * Passing negative timeout leads to neverending wait.
* *
...@@ -956,13 +956,13 @@ struct drm_xe_wait_user_fence { ...@@ -956,13 +956,13 @@ struct drm_xe_wait_user_fence {
/** /**
* @num_engines: number of engine instances to wait on, must be zero * @num_engines: number of engine instances to wait on, must be zero
* when DRM_XE_UFENCE_WAIT_SOFT_OP set * when DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP set
*/ */
__u64 num_engines; __u64 num_engines;
/** /**
* @instances: user pointer to array of drm_xe_engine_class_instance to * @instances: user pointer to array of drm_xe_engine_class_instance to
* wait on, must be NULL when DRM_XE_UFENCE_WAIT_SOFT_OP set * wait on, must be NULL when DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP set
*/ */
__u64 instances; __u64 instances;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment