Commit d5dc73db authored by Francois Dugast's avatar Francois Dugast Committed by Rodrigo Vivi

drm/xe/uapi: Add missing DRM_ prefix in uAPI constants

Most constants defined in xe_drm.h use DRM_XE_ as prefix which is
helpful to identify the name space. Make this systematic and add
this prefix where it was missing.

v2:
- fix vertical alignment of define values
- remove double DRM_ in some variables (José Roberto de Souza)

v3: Rebase
Signed-off-by: default avatarFrancois Dugast <francois.dugast@intel.com>
Reviewed-by: default avatarMatthew Brost <matthew.brost@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent b646ce9c
......@@ -209,7 +209,7 @@ static int __xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
/* The order of placements should indicate preferred location */
if (bo->props.preferred_mem_class == XE_MEM_REGION_CLASS_SYSMEM) {
if (bo->props.preferred_mem_class == DRM_XE_MEM_REGION_CLASS_SYSMEM) {
try_add_system(bo, places, bo_flags, &c);
try_add_vram(xe, bo, places, bo_flags, &c);
} else {
......@@ -1814,9 +1814,9 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
if (XE_IOCTL_DBG(xe, args->flags &
~(XE_GEM_CREATE_FLAG_DEFER_BACKING |
XE_GEM_CREATE_FLAG_SCANOUT |
XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM |
~(DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING |
DRM_XE_GEM_CREATE_FLAG_SCANOUT |
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM |
xe->info.mem_region_mask)))
return -EINVAL;
......@@ -1836,15 +1836,15 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
if (XE_IOCTL_DBG(xe, args->size & ~PAGE_MASK))
return -EINVAL;
if (args->flags & XE_GEM_CREATE_FLAG_DEFER_BACKING)
if (args->flags & DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING)
bo_flags |= XE_BO_DEFER_BACKING;
if (args->flags & XE_GEM_CREATE_FLAG_SCANOUT)
if (args->flags & DRM_XE_GEM_CREATE_FLAG_SCANOUT)
bo_flags |= XE_BO_SCANOUT_BIT;
bo_flags |= args->flags << (ffs(XE_BO_CREATE_SYSTEM_BIT) - 1);
if (args->flags & XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM) {
if (args->flags & DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM) {
if (XE_IOCTL_DBG(xe, !(bo_flags & XE_BO_CREATE_VRAM_MASK)))
return -EINVAL;
......
......@@ -393,7 +393,7 @@ static int exec_queue_set_acc_granularity(struct xe_device *xe, struct xe_exec_q
if (XE_IOCTL_DBG(xe, !xe->info.supports_usm))
return -EINVAL;
if (value > XE_ACC_GRANULARITY_64M)
if (value > DRM_XE_ACC_GRANULARITY_64M)
return -EINVAL;
q->usm.acc_granularity = value;
......@@ -406,14 +406,14 @@ typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
u64 value, bool create);
static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
[XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
[XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
[XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT] = exec_queue_set_preemption_timeout,
[XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE] = exec_queue_set_persistence,
[XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT] = exec_queue_set_job_timeout,
[XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER] = exec_queue_set_acc_trigger,
[XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY] = exec_queue_set_acc_notify,
[XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY] = exec_queue_set_acc_granularity,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT] = exec_queue_set_preemption_timeout,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE] = exec_queue_set_persistence,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT] = exec_queue_set_job_timeout,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER] = exec_queue_set_acc_trigger,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY] = exec_queue_set_acc_notify,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY] = exec_queue_set_acc_granularity,
};
static int exec_queue_user_ext_set_property(struct xe_device *xe,
......@@ -445,7 +445,7 @@ typedef int (*xe_exec_queue_user_extension_fn)(struct xe_device *xe,
bool create);
static const xe_exec_queue_set_property_fn exec_queue_user_extension_funcs[] = {
[XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY] = exec_queue_user_ext_set_property,
[DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY] = exec_queue_user_ext_set_property,
};
#define MAX_USER_EXTENSIONS 16
......@@ -764,7 +764,7 @@ int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
switch (args->property) {
case XE_EXEC_QUEUE_GET_PROPERTY_BAN:
case DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN:
args->value = !!(q->flags & EXEC_QUEUE_FLAG_BANNED);
ret = 0;
break;
......
......@@ -560,7 +560,7 @@ static void xe_uevent_gt_reset_failure(struct pci_dev *pdev, u8 tile_id, u8 gt_i
{
char *reset_event[4];
reset_event[0] = XE_RESET_FAILED_UEVENT "=NEEDS_RESET";
reset_event[0] = DRM_XE_RESET_FAILED_UEVENT "=NEEDS_RESET";
reset_event[1] = kasprintf(GFP_KERNEL, "TILE_ID=%d", tile_id);
reset_event[2] = kasprintf(GFP_KERNEL, "GT_ID=%d", gt_id);
reset_event[3] = NULL;
......
......@@ -17,12 +17,12 @@ static unsigned int xe_pmu_target_cpu = -1;
static unsigned int config_gt_id(const u64 config)
{
return config >> __XE_PMU_GT_SHIFT;
return config >> __DRM_XE_PMU_GT_SHIFT;
}
static u64 config_counter(const u64 config)
{
return config & ~(~0ULL << __XE_PMU_GT_SHIFT);
return config & ~(~0ULL << __DRM_XE_PMU_GT_SHIFT);
}
static void xe_pmu_event_destroy(struct perf_event *event)
......@@ -114,13 +114,13 @@ config_status(struct xe_device *xe, u64 config)
return -ENOENT;
switch (config_counter(config)) {
case XE_PMU_RENDER_GROUP_BUSY(0):
case XE_PMU_COPY_GROUP_BUSY(0):
case XE_PMU_ANY_ENGINE_GROUP_BUSY(0):
case DRM_XE_PMU_RENDER_GROUP_BUSY(0):
case DRM_XE_PMU_COPY_GROUP_BUSY(0):
case DRM_XE_PMU_ANY_ENGINE_GROUP_BUSY(0):
if (gt->info.type == XE_GT_TYPE_MEDIA)
return -ENOENT;
break;
case XE_PMU_MEDIA_GROUP_BUSY(0):
case DRM_XE_PMU_MEDIA_GROUP_BUSY(0):
if (!(gt->info.engine_mask & (BIT(XE_HW_ENGINE_VCS0) | BIT(XE_HW_ENGINE_VECS0))))
return -ENOENT;
break;
......@@ -180,10 +180,10 @@ static u64 __xe_pmu_event_read(struct perf_event *event)
u64 val;
switch (config_counter(config)) {
case XE_PMU_RENDER_GROUP_BUSY(0):
case XE_PMU_COPY_GROUP_BUSY(0):
case XE_PMU_ANY_ENGINE_GROUP_BUSY(0):
case XE_PMU_MEDIA_GROUP_BUSY(0):
case DRM_XE_PMU_RENDER_GROUP_BUSY(0):
case DRM_XE_PMU_COPY_GROUP_BUSY(0):
case DRM_XE_PMU_ANY_ENGINE_GROUP_BUSY(0):
case DRM_XE_PMU_MEDIA_GROUP_BUSY(0):
val = engine_group_busyness_read(gt, config);
break;
default:
......@@ -369,7 +369,7 @@ create_event_attributes(struct xe_pmu *pmu)
/* Count how many counters we will be exposing. */
for_each_gt(gt, xe, j) {
for (i = 0; i < ARRAY_SIZE(events); i++) {
u64 config = ___XE_PMU_OTHER(j, events[i].counter);
u64 config = ___DRM_XE_PMU_OTHER(j, events[i].counter);
if (!config_status(xe, config))
count++;
......@@ -396,7 +396,7 @@ create_event_attributes(struct xe_pmu *pmu)
for_each_gt(gt, xe, j) {
for (i = 0; i < ARRAY_SIZE(events); i++) {
u64 config = ___XE_PMU_OTHER(j, events[i].counter);
u64 config = ___DRM_XE_PMU_OTHER(j, events[i].counter);
char *str;
if (config_status(xe, config))
......
......@@ -261,7 +261,7 @@ static int query_memory_usage(struct xe_device *xe,
return -ENOMEM;
man = ttm_manager_type(&xe->ttm, XE_PL_TT);
usage->regions[0].mem_class = XE_MEM_REGION_CLASS_SYSMEM;
usage->regions[0].mem_class = DRM_XE_MEM_REGION_CLASS_SYSMEM;
usage->regions[0].instance = 0;
usage->regions[0].min_page_size = PAGE_SIZE;
usage->regions[0].total_size = man->size << PAGE_SHIFT;
......@@ -273,7 +273,7 @@ static int query_memory_usage(struct xe_device *xe,
man = ttm_manager_type(&xe->ttm, i);
if (man) {
usage->regions[usage->num_regions].mem_class =
XE_MEM_REGION_CLASS_VRAM;
DRM_XE_MEM_REGION_CLASS_VRAM;
usage->regions[usage->num_regions].instance =
usage->num_regions;
usage->regions[usage->num_regions].min_page_size =
......@@ -305,7 +305,7 @@ static int query_memory_usage(struct xe_device *xe,
static int query_config(struct xe_device *xe, struct drm_xe_device_query *query)
{
const u32 num_params = XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY + 1;
const u32 num_params = DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY + 1;
size_t size =
sizeof(struct drm_xe_query_config) + num_params * sizeof(u64);
struct drm_xe_query_config __user *query_ptr =
......@@ -324,15 +324,15 @@ static int query_config(struct xe_device *xe, struct drm_xe_device_query *query)
return -ENOMEM;
config->num_params = num_params;
config->info[XE_QUERY_CONFIG_REV_AND_DEVICE_ID] =
config->info[DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID] =
xe->info.devid | (xe->info.revid << 16);
if (xe_device_get_root_tile(xe)->mem.vram.usable_size)
config->info[XE_QUERY_CONFIG_FLAGS] =
XE_QUERY_CONFIG_FLAGS_HAS_VRAM;
config->info[XE_QUERY_CONFIG_MIN_ALIGNMENT] =
config->info[DRM_XE_QUERY_CONFIG_FLAGS] =
DRM_XE_QUERY_CONFIG_FLAGS_HAS_VRAM;
config->info[DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT] =
xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K;
config->info[XE_QUERY_CONFIG_VA_BITS] = xe->info.va_bits;
config->info[XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY] =
config->info[DRM_XE_QUERY_CONFIG_VA_BITS] = xe->info.va_bits;
config->info[DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY] =
xe_exec_queue_device_get_max_priority(xe);
if (copy_to_user(query_ptr, config, size)) {
......@@ -368,9 +368,9 @@ static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query
gt_list->num_gt = xe->info.gt_count;
for_each_gt(gt, xe, id) {
if (xe_gt_is_media_type(gt))
gt_list->gt_list[id].type = XE_QUERY_GT_TYPE_MEDIA;
gt_list->gt_list[id].type = DRM_XE_QUERY_GT_TYPE_MEDIA;
else
gt_list->gt_list[id].type = XE_QUERY_GT_TYPE_MAIN;
gt_list->gt_list[id].type = DRM_XE_QUERY_GT_TYPE_MAIN;
gt_list->gt_list[id].gt_id = gt->info.id;
gt_list->gt_list[id].clock_freq = gt->info.clock_freq;
if (!IS_DGFX(xe))
......@@ -468,21 +468,21 @@ static int query_gt_topology(struct xe_device *xe,
for_each_gt(gt, xe, id) {
topo.gt_id = id;
topo.type = XE_TOPO_DSS_GEOMETRY;
topo.type = DRM_XE_TOPO_DSS_GEOMETRY;
query_ptr = copy_mask(query_ptr, &topo,
gt->fuse_topo.g_dss_mask,
sizeof(gt->fuse_topo.g_dss_mask));
if (IS_ERR(query_ptr))
return PTR_ERR(query_ptr);
topo.type = XE_TOPO_DSS_COMPUTE;
topo.type = DRM_XE_TOPO_DSS_COMPUTE;
query_ptr = copy_mask(query_ptr, &topo,
gt->fuse_topo.c_dss_mask,
sizeof(gt->fuse_topo.c_dss_mask));
if (IS_ERR(query_ptr))
return PTR_ERR(query_ptr);
topo.type = XE_TOPO_EU_PER_DSS;
topo.type = DRM_XE_TOPO_EU_PER_DSS;
query_ptr = copy_mask(query_ptr, &topo,
gt->fuse_topo.eu_mask_per_dss,
sizeof(gt->fuse_topo.eu_mask_per_dss));
......
......@@ -2177,8 +2177,8 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
(ULL)bo_offset_or_userptr);
switch (operation) {
case XE_VM_BIND_OP_MAP:
case XE_VM_BIND_OP_MAP_USERPTR:
case DRM_XE_VM_BIND_OP_MAP:
case DRM_XE_VM_BIND_OP_MAP_USERPTR:
ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
obj, bo_offset_or_userptr);
if (IS_ERR(ops))
......@@ -2189,13 +2189,13 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
op->tile_mask = tile_mask;
op->map.immediate =
flags & XE_VM_BIND_FLAG_IMMEDIATE;
flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
op->map.read_only =
flags & XE_VM_BIND_FLAG_READONLY;
op->map.is_null = flags & XE_VM_BIND_FLAG_NULL;
flags & DRM_XE_VM_BIND_FLAG_READONLY;
op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
}
break;
case XE_VM_BIND_OP_UNMAP:
case DRM_XE_VM_BIND_OP_UNMAP:
ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
if (IS_ERR(ops))
return ops;
......@@ -2206,7 +2206,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
op->tile_mask = tile_mask;
}
break;
case XE_VM_BIND_OP_PREFETCH:
case DRM_XE_VM_BIND_OP_PREFETCH:
ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range);
if (IS_ERR(ops))
return ops;
......@@ -2218,7 +2218,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
op->prefetch.region = region;
}
break;
case XE_VM_BIND_OP_UNMAP_ALL:
case DRM_XE_VM_BIND_OP_UNMAP_ALL:
xe_assert(vm->xe, bo);
err = xe_bo_lock(bo, true);
......@@ -2828,13 +2828,13 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
#ifdef TEST_VM_ASYNC_OPS_ERROR
#define SUPPORTED_FLAGS \
(FORCE_ASYNC_OP_ERROR | XE_VM_BIND_FLAG_ASYNC | \
XE_VM_BIND_FLAG_READONLY | XE_VM_BIND_FLAG_IMMEDIATE | \
XE_VM_BIND_FLAG_NULL | 0xffff)
(FORCE_ASYNC_OP_ERROR | DRM_XE_VM_BIND_FLAG_ASYNC | \
DRM_XE_VM_BIND_FLAG_READONLY | DRM_XE_VM_BIND_FLAG_IMMEDIATE | \
DRM_XE_VM_BIND_FLAG_NULL | 0xffff)
#else
#define SUPPORTED_FLAGS \
(XE_VM_BIND_FLAG_ASYNC | XE_VM_BIND_FLAG_READONLY | \
XE_VM_BIND_FLAG_IMMEDIATE | XE_VM_BIND_FLAG_NULL | \
(DRM_XE_VM_BIND_FLAG_ASYNC | DRM_XE_VM_BIND_FLAG_READONLY | \
DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL | \
0xffff)
#endif
#define XE_64K_PAGE_MASK 0xffffull
......@@ -2882,45 +2882,45 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
u32 obj = (*bind_ops)[i].obj;
u64 obj_offset = (*bind_ops)[i].obj_offset;
u32 region = (*bind_ops)[i].region;
bool is_null = flags & XE_VM_BIND_FLAG_NULL;
bool is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
if (i == 0) {
*async = !!(flags & XE_VM_BIND_FLAG_ASYNC);
*async = !!(flags & DRM_XE_VM_BIND_FLAG_ASYNC);
if (XE_IOCTL_DBG(xe, !*async && args->num_syncs)) {
err = -EINVAL;
goto free_bind_ops;
}
} else if (XE_IOCTL_DBG(xe, *async !=
!!(flags & XE_VM_BIND_FLAG_ASYNC))) {
!!(flags & DRM_XE_VM_BIND_FLAG_ASYNC))) {
err = -EINVAL;
goto free_bind_ops;
}
if (XE_IOCTL_DBG(xe, op > XE_VM_BIND_OP_PREFETCH) ||
if (XE_IOCTL_DBG(xe, op > DRM_XE_VM_BIND_OP_PREFETCH) ||
XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) ||
XE_IOCTL_DBG(xe, obj && is_null) ||
XE_IOCTL_DBG(xe, obj_offset && is_null) ||
XE_IOCTL_DBG(xe, op != XE_VM_BIND_OP_MAP &&
XE_IOCTL_DBG(xe, op != DRM_XE_VM_BIND_OP_MAP &&
is_null) ||
XE_IOCTL_DBG(xe, !obj &&
op == XE_VM_BIND_OP_MAP &&
op == DRM_XE_VM_BIND_OP_MAP &&
!is_null) ||
XE_IOCTL_DBG(xe, !obj &&
op == XE_VM_BIND_OP_UNMAP_ALL) ||
op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
XE_IOCTL_DBG(xe, addr &&
op == XE_VM_BIND_OP_UNMAP_ALL) ||
op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
XE_IOCTL_DBG(xe, range &&
op == XE_VM_BIND_OP_UNMAP_ALL) ||
op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
XE_IOCTL_DBG(xe, obj &&
op == XE_VM_BIND_OP_MAP_USERPTR) ||
op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
XE_IOCTL_DBG(xe, obj &&
op == XE_VM_BIND_OP_PREFETCH) ||
op == DRM_XE_VM_BIND_OP_PREFETCH) ||
XE_IOCTL_DBG(xe, region &&
op != XE_VM_BIND_OP_PREFETCH) ||
op != DRM_XE_VM_BIND_OP_PREFETCH) ||
XE_IOCTL_DBG(xe, !(BIT(region) &
xe->info.mem_region_mask)) ||
XE_IOCTL_DBG(xe, obj &&
op == XE_VM_BIND_OP_UNMAP)) {
op == DRM_XE_VM_BIND_OP_UNMAP)) {
err = -EINVAL;
goto free_bind_ops;
}
......@@ -2929,7 +2929,7 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
XE_IOCTL_DBG(xe, addr & ~PAGE_MASK) ||
XE_IOCTL_DBG(xe, range & ~PAGE_MASK) ||
XE_IOCTL_DBG(xe, !range &&
op != XE_VM_BIND_OP_UNMAP_ALL)) {
op != DRM_XE_VM_BIND_OP_UNMAP_ALL)) {
err = -EINVAL;
goto free_bind_ops;
}
......
......@@ -32,9 +32,9 @@
* Operations
* ----------
*
* XE_VM_BIND_OP_MAP - Create mapping for a BO
* XE_VM_BIND_OP_UNMAP - Destroy mapping for a BO / userptr
* XE_VM_BIND_OP_MAP_USERPTR - Create mapping for userptr
* DRM_XE_VM_BIND_OP_MAP - Create mapping for a BO
* DRM_XE_VM_BIND_OP_UNMAP - Destroy mapping for a BO / userptr
* DRM_XE_VM_BIND_OP_MAP_USERPTR - Create mapping for userptr
*
* Implementation details
* ~~~~~~~~~~~~~~~~~~~~~~
......@@ -113,7 +113,7 @@
* VM uses to report errors to. The ufence wait interface can be used to wait on
* a VM going into an error state. Once an error is reported the VM's async
* worker is paused. While the VM's async worker is paused sync,
* XE_VM_BIND_OP_UNMAP operations are allowed (this can free memory). Once the
* DRM_XE_VM_BIND_OP_UNMAP operations are allowed (this can free memory). Once the
* uses believe the error state is fixed, the async worker can be resumed via
* XE_VM_BIND_OP_RESTART operation. When VM async bind work is restarted, the
* first operation processed is the operation that caused the original error.
......@@ -193,7 +193,7 @@
* In a VM is in fault mode (TODO: link to fault mode), new bind operations that
* create mappings are by default are deferred to the page fault handler (first
* use). This behavior can be overriden by setting the flag
* XE_VM_BIND_FLAG_IMMEDIATE which indicates to creating the mapping
* DRM_XE_VM_BIND_FLAG_IMMEDIATE which indicates to creating the mapping
* immediately.
*
* User pointer
......@@ -322,7 +322,7 @@
*
* By default, on a faulting VM binds just allocate the VMA and the actual
* updating of the page tables is defered to the page fault handler. This
* behavior can be overridden by setting the flag XE_VM_BIND_FLAG_IMMEDIATE in
* behavior can be overridden by setting the flag DRM_XE_VM_BIND_FLAG_IMMEDIATE in
* the VM bind which will then do the bind immediately.
*
* Page fault handler
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment