Commit 3ac4a789 authored by Francois Dugast's avatar Francois Dugast Committed by Rodrigo Vivi

drm/xe/uapi: Add _FLAG to uAPI constants usable for flags

Most constants defined in xe_drm.h which can be used for flags are
named DRM_XE_*_FLAG_*, which is helpful to identify them. Make this
systematic and add _FLAG where it was missing.
Signed-off-by: default avatarFrancois Dugast <francois.dugast@intel.com>
Reviewed-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: default avatarJosé Roberto de Souza <jose.souza@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent d5dc73db
......@@ -110,14 +110,14 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
return -EFAULT;
if (XE_IOCTL_DBG(xe, sync_in.flags &
~(SYNC_FLAGS_TYPE_MASK | DRM_XE_SYNC_SIGNAL)) ||
~(SYNC_FLAGS_TYPE_MASK | DRM_XE_SYNC_FLAG_SIGNAL)) ||
XE_IOCTL_DBG(xe, sync_in.pad) ||
XE_IOCTL_DBG(xe, sync_in.reserved[0] || sync_in.reserved[1]))
return -EINVAL;
signal = sync_in.flags & DRM_XE_SYNC_SIGNAL;
signal = sync_in.flags & DRM_XE_SYNC_FLAG_SIGNAL;
switch (sync_in.flags & SYNC_FLAGS_TYPE_MASK) {
case DRM_XE_SYNC_SYNCOBJ:
case DRM_XE_SYNC_FLAG_SYNCOBJ:
if (XE_IOCTL_DBG(xe, no_dma_fences && signal))
return -EOPNOTSUPP;
......@@ -135,7 +135,7 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
}
break;
case DRM_XE_SYNC_TIMELINE_SYNCOBJ:
case DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ:
if (XE_IOCTL_DBG(xe, no_dma_fences && signal))
return -EOPNOTSUPP;
......@@ -165,12 +165,12 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
}
break;
case DRM_XE_SYNC_DMA_BUF:
case DRM_XE_SYNC_FLAG_DMA_BUF:
if (XE_IOCTL_DBG(xe, "TODO"))
return -EINVAL;
break;
case DRM_XE_SYNC_USER_FENCE:
case DRM_XE_SYNC_FLAG_USER_FENCE:
if (XE_IOCTL_DBG(xe, !signal))
return -EOPNOTSUPP;
......@@ -225,7 +225,7 @@ int xe_sync_entry_add_deps(struct xe_sync_entry *sync, struct xe_sched_job *job)
void xe_sync_entry_signal(struct xe_sync_entry *sync, struct xe_sched_job *job,
struct dma_fence *fence)
{
if (!(sync->flags & DRM_XE_SYNC_SIGNAL))
if (!(sync->flags & DRM_XE_SYNC_FLAG_SIGNAL))
return;
if (sync->chain_fence) {
......@@ -253,7 +253,7 @@ void xe_sync_entry_signal(struct xe_sync_entry *sync, struct xe_sched_job *job,
dma_fence_put(fence);
}
} else if ((sync->flags & SYNC_FLAGS_TYPE_MASK) ==
DRM_XE_SYNC_USER_FENCE) {
DRM_XE_SYNC_FLAG_USER_FENCE) {
job->user_fence.used = true;
job->user_fence.addr = sync->addr;
job->user_fence.value = sync->timeline_value;
......
......@@ -1920,10 +1920,10 @@ static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
return 0;
}
#define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_SCRATCH_PAGE | \
DRM_XE_VM_CREATE_COMPUTE_MODE | \
DRM_XE_VM_CREATE_ASYNC_DEFAULT | \
DRM_XE_VM_CREATE_FAULT_MODE)
#define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE | \
DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE | \
DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT | \
DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
int xe_vm_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
......@@ -1941,9 +1941,9 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
if (XE_WA(xe_root_mmio_gt(xe), 14016763929))
args->flags |= DRM_XE_VM_CREATE_SCRATCH_PAGE;
args->flags |= DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE;
if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
!xe->info.supports_usm))
return -EINVAL;
......@@ -1953,32 +1953,32 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
if (XE_IOCTL_DBG(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
return -EINVAL;
if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE &&
args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE &&
args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
return -EINVAL;
if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE &&
args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE &&
args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
return -EINVAL;
if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
xe_device_in_non_fault_mode(xe)))
return -EINVAL;
if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FAULT_MODE) &&
if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE) &&
xe_device_in_fault_mode(xe)))
return -EINVAL;
if (XE_IOCTL_DBG(xe, args->extensions))
return -EINVAL;
if (args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE)
if (args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE)
flags |= XE_VM_FLAG_SCRATCH_PAGE;
if (args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE)
if (args->flags & DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE)
flags |= XE_VM_FLAG_COMPUTE_MODE;
if (args->flags & DRM_XE_VM_CREATE_ASYNC_DEFAULT)
if (args->flags & DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT)
flags |= XE_VM_FLAG_ASYNC_DEFAULT;
if (args->flags & DRM_XE_VM_CREATE_FAULT_MODE)
if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
flags |= XE_VM_FLAG_FAULT_MODE;
vm = xe_vm_create(xe, flags);
......
......@@ -18,7 +18,7 @@
* Scratch page
* ------------
*
* If the VM is created with the flag, DRM_XE_VM_CREATE_SCRATCH_PAGE, set the
* If the VM is created with the flag, DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE, set the
* entire page table structure defaults pointing to blank page allocated by the
* VM. Invalid memory access rather than fault just read / write to this page.
*
......
......@@ -79,8 +79,8 @@ static int check_hw_engines(struct xe_device *xe,
return 0;
}
#define VALID_FLAGS (DRM_XE_UFENCE_WAIT_SOFT_OP | \
DRM_XE_UFENCE_WAIT_ABSTIME)
#define VALID_FLAGS (DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP | \
DRM_XE_UFENCE_WAIT_FLAG_ABSTIME)
#define MAX_OP DRM_XE_UFENCE_WAIT_LTE
static long to_jiffies_timeout(struct xe_device *xe,
......@@ -107,7 +107,7 @@ static long to_jiffies_timeout(struct xe_device *xe,
* Save the timeout to an u64 variable because nsecs_to_jiffies
* might return a value that overflows s32 variable.
*/
if (args->flags & DRM_XE_UFENCE_WAIT_ABSTIME)
if (args->flags & DRM_XE_UFENCE_WAIT_FLAG_ABSTIME)
t = drm_timeout_abs_to_jiffies(args->timeout);
else
t = nsecs_to_jiffies(args->timeout);
......@@ -137,7 +137,7 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data,
u64_to_user_ptr(args->instances);
u64 addr = args->addr;
int err;
bool no_engines = args->flags & DRM_XE_UFENCE_WAIT_SOFT_OP;
bool no_engines = args->flags & DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP;
long timeout;
ktime_t start;
......@@ -206,7 +206,7 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data,
}
remove_wait_queue(&xe->ufence_wq, &w_wait);
if (!(args->flags & DRM_XE_UFENCE_WAIT_ABSTIME)) {
if (!(args->flags & DRM_XE_UFENCE_WAIT_FLAG_ABSTIME)) {
args->timeout -= ktime_to_ns(ktime_sub(ktime_get(), start));
if (args->timeout < 0)
args->timeout = 0;
......
......@@ -585,10 +585,10 @@ struct drm_xe_vm_create {
/** @extensions: Pointer to the first extension struct, if any */
__u64 extensions;
#define DRM_XE_VM_CREATE_SCRATCH_PAGE (0x1 << 0)
#define DRM_XE_VM_CREATE_COMPUTE_MODE (0x1 << 1)
#define DRM_XE_VM_CREATE_ASYNC_DEFAULT (0x1 << 2)
#define DRM_XE_VM_CREATE_FAULT_MODE (0x1 << 3)
#define DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE (0x1 << 0)
#define DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE (0x1 << 1)
#define DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT (0x1 << 2)
#define DRM_XE_VM_CREATE_FLAG_FAULT_MODE (0x1 << 3)
/** @flags: Flags */
__u32 flags;
......@@ -831,11 +831,11 @@ struct drm_xe_sync {
/** @extensions: Pointer to the first extension struct, if any */
__u64 extensions;
#define DRM_XE_SYNC_SYNCOBJ 0x0
#define DRM_XE_SYNC_TIMELINE_SYNCOBJ 0x1
#define DRM_XE_SYNC_DMA_BUF 0x2
#define DRM_XE_SYNC_USER_FENCE 0x3
#define DRM_XE_SYNC_SIGNAL 0x10
#define DRM_XE_SYNC_FLAG_SYNCOBJ 0x0
#define DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ 0x1
#define DRM_XE_SYNC_FLAG_DMA_BUF 0x2
#define DRM_XE_SYNC_FLAG_USER_FENCE 0x3
#define DRM_XE_SYNC_FLAG_SIGNAL 0x10
__u32 flags;
/** @pad: MBZ */
......@@ -921,8 +921,8 @@ struct drm_xe_wait_user_fence {
/** @op: wait operation (type of comparison) */
__u16 op;
#define DRM_XE_UFENCE_WAIT_SOFT_OP (1 << 0) /* e.g. Wait on VM bind */
#define DRM_XE_UFENCE_WAIT_ABSTIME (1 << 1)
#define DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP (1 << 0) /* e.g. Wait on VM bind */
#define DRM_XE_UFENCE_WAIT_FLAG_ABSTIME (1 << 1)
/** @flags: wait flags */
__u16 flags;
......@@ -940,10 +940,10 @@ struct drm_xe_wait_user_fence {
__u64 mask;
/**
* @timeout: how long to wait before bailing, value in nanoseconds.
* Without DRM_XE_UFENCE_WAIT_ABSTIME flag set (relative timeout)
* Without DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flag set (relative timeout)
* it contains timeout expressed in nanoseconds to wait (fence will
* expire at now() + timeout).
* When DRM_XE_UFENCE_WAIT_ABSTIME flat is set (absolute timeout) wait
* When DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flat is set (absolute timeout) wait
* will end at timeout (uses system MONOTONIC_CLOCK).
* Passing negative timeout leads to neverending wait.
*
......@@ -956,13 +956,13 @@ struct drm_xe_wait_user_fence {
/**
* @num_engines: number of engine instances to wait on, must be zero
* when DRM_XE_UFENCE_WAIT_SOFT_OP set
* when DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP set
*/
__u64 num_engines;
/**
* @instances: user pointer to array of drm_xe_engine_class_instance to
* wait on, must be NULL when DRM_XE_UFENCE_WAIT_SOFT_OP set
* wait on, must be NULL when DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP set
*/
__u64 instances;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment