xe_drm.h 55.5 KB
Newer Older
1
/* SPDX-License-Identifier: MIT */
2
/*
3
 * Copyright © 2023 Intel Corporation
4 5 6 7 8 9 10 11 12 13 14
 */

#ifndef _UAPI_XE_DRM_H_
#define _UAPI_XE_DRM_H_

#include "drm.h"

#if defined(__cplusplus)
extern "C" {
#endif

15 16
/*
 * Please note that modifications to all structs defined here are
17
 * subject to backwards-compatibility constraints.
18 19 20 21 22
 * Sections in this file are organized as follows:
 *   1. IOCTL definition
 *   2. Extension definition and helper structs
 *   3. IOCTL's Query structs in the order of the Query's entries.
 *   4. The rest of IOCTL structs in the order of IOCTL declaration.
23 24
 */

25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
/**
 * DOC: Xe Device Block Diagram
 *
 * The diagram below represents a high-level simplification of a discrete
 * GPU supported by the Xe driver. It shows some device components which
 * are necessary to understand this API, as well as how their relations
 * to each other. This diagram does not represent real hardware::
 *
 *   ┌──────────────────────────────────────────────────────────────────┐
 *   │ ┌──────────────────────────────────────────────────┐ ┌─────────┐ │
 *   │ │        ┌───────────────────────┐   ┌─────┐       │ │ ┌─────┐ │ │
 *   │ │        │         VRAM0         ├───┤ ... │       │ │ │VRAM1│ │ │
 *   │ │        └───────────┬───────────┘   └─GT1─┘       │ │ └──┬──┘ │ │
 *   │ │ ┌──────────────────┴───────────────────────────┐ │ │ ┌──┴──┐ │ │
 *   │ │ │ ┌─────────────────────┐  ┌─────────────────┐ │ │ │ │     │ │ │
 *   │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │  │ ┌─────┐ ┌─────┐ │ │ │ │ │     │ │ │
 *   │ │ │ │ │EU│ │EU│ │EU│ │EU│ │  │ │RCS0 │ │BCS0 │ │ │ │ │ │     │ │ │
 *   │ │ │ │ └──┘ └──┘ └──┘ └──┘ │  │ └─────┘ └─────┘ │ │ │ │ │     │ │ │
 *   │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │  │ ┌─────┐ ┌─────┐ │ │ │ │ │     │ │ │
 *   │ │ │ │ │EU│ │EU│ │EU│ │EU│ │  │ │VCS0 │ │VCS1 │ │ │ │ │ │     │ │ │
 *   │ │ │ │ └──┘ └──┘ └──┘ └──┘ │  │ └─────┘ └─────┘ │ │ │ │ │     │ │ │
 *   │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │  │ ┌─────┐ ┌─────┐ │ │ │ │ │     │ │ │
 *   │ │ │ │ │EU│ │EU│ │EU│ │EU│ │  │ │VECS0│ │VECS1│ │ │ │ │ │ ... │ │ │
 *   │ │ │ │ └──┘ └──┘ └──┘ └──┘ │  │ └─────┘ └─────┘ │ │ │ │ │     │ │ │
 *   │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │  │ ┌─────┐ ┌─────┐ │ │ │ │ │     │ │ │
 *   │ │ │ │ │EU│ │EU│ │EU│ │EU│ │  │ │CCS0 │ │CCS1 │ │ │ │ │ │     │ │ │
 *   │ │ │ │ └──┘ └──┘ └──┘ └──┘ │  │ └─────┘ └─────┘ │ │ │ │ │     │ │ │
 *   │ │ │ └─────────DSS─────────┘  │ ┌─────┐ ┌─────┐ │ │ │ │ │     │ │ │
 *   │ │ │                          │ │CCS2 │ │CCS3 │ │ │ │ │ │     │ │ │
 *   │ │ │ ┌─────┐ ┌─────┐ ┌─────┐  │ └─────┘ └─────┘ │ │ │ │ │     │ │ │
 *   │ │ │ │ ... │ │ ... │ │ ... │  │                 │ │ │ │ │     │ │ │
 *   │ │ │ └─DSS─┘ └─DSS─┘ └─DSS─┘  └─────Engines─────┘ │ │ │ │     │ │ │
 *   │ │ └───────────────────────────GT0────────────────┘ │ │ └─GT2─┘ │ │
 *   │ └────────────────────────────Tile0─────────────────┘ └─ Tile1──┘ │
 *   └─────────────────────────────Device0───────┬──────────────────────┘
 *                                               │
 *                        ───────────────────────┴────────── PCI bus
 */

64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
/**
 * DOC: Xe uAPI Overview
 *
 * This section aims to describe the Xe's IOCTL entries, its structs, and other
 * Xe related uAPI such as uevents and PMU (Platform Monitoring Unit) related
 * entries and usage.
 *
 * List of supported IOCTLs:
 *  - &DRM_IOCTL_XE_DEVICE_QUERY
 *  - &DRM_IOCTL_XE_GEM_CREATE
 *  - &DRM_IOCTL_XE_GEM_MMAP_OFFSET
 *  - &DRM_IOCTL_XE_VM_CREATE
 *  - &DRM_IOCTL_XE_VM_DESTROY
 *  - &DRM_IOCTL_XE_VM_BIND
 *  - &DRM_IOCTL_XE_EXEC_QUEUE_CREATE
 *  - &DRM_IOCTL_XE_EXEC_QUEUE_DESTROY
 *  - &DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY
 *  - &DRM_IOCTL_XE_EXEC
 *  - &DRM_IOCTL_XE_WAIT_USER_FENCE
83
 *  - &DRM_IOCTL_XE_OBSERVATION
84 85
 */

86 87
/*
 * xe specific ioctls.
88
 *
89 90 91
 * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
 * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
 * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
92
 */
93 94 95 96 97 98 99 100 101 102 103
#define DRM_XE_DEVICE_QUERY		0x00
#define DRM_XE_GEM_CREATE		0x01
#define DRM_XE_GEM_MMAP_OFFSET		0x02
#define DRM_XE_VM_CREATE		0x03
#define DRM_XE_VM_DESTROY		0x04
#define DRM_XE_VM_BIND			0x05
#define DRM_XE_EXEC_QUEUE_CREATE	0x06
#define DRM_XE_EXEC_QUEUE_DESTROY	0x07
#define DRM_XE_EXEC_QUEUE_GET_PROPERTY	0x08
#define DRM_XE_EXEC			0x09
#define DRM_XE_WAIT_USER_FENCE		0x0a
104
#define DRM_XE_OBSERVATION		0x0b
105

106 107 108 109 110 111 112 113 114 115 116 117 118
/* Must be kept compact -- no holes */

#define DRM_IOCTL_XE_DEVICE_QUERY		DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_DEVICE_QUERY, struct drm_xe_device_query)
#define DRM_IOCTL_XE_GEM_CREATE			DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_CREATE, struct drm_xe_gem_create)
#define DRM_IOCTL_XE_GEM_MMAP_OFFSET		DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_MMAP_OFFSET, struct drm_xe_gem_mmap_offset)
#define DRM_IOCTL_XE_VM_CREATE			DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_CREATE, struct drm_xe_vm_create)
#define DRM_IOCTL_XE_VM_DESTROY			DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_DESTROY, struct drm_xe_vm_destroy)
#define DRM_IOCTL_XE_VM_BIND			DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_BIND, struct drm_xe_vm_bind)
#define DRM_IOCTL_XE_EXEC_QUEUE_CREATE		DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_CREATE, struct drm_xe_exec_queue_create)
#define DRM_IOCTL_XE_EXEC_QUEUE_DESTROY		DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_DESTROY, struct drm_xe_exec_queue_destroy)
#define DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY	DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_GET_PROPERTY, struct drm_xe_exec_queue_get_property)
#define DRM_IOCTL_XE_EXEC			DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
#define DRM_IOCTL_XE_WAIT_USER_FENCE		DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
119
#define DRM_IOCTL_XE_OBSERVATION		DRM_IOW(DRM_COMMAND_BASE + DRM_XE_OBSERVATION, struct drm_xe_observation_param)
120

121
/**
122 123 124 125
 * DOC: Xe IOCTL Extensions
 *
 * Before detailing the IOCTLs and its structs, it is important to highlight
 * that every IOCTL in Xe is extensible.
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
 *
 * Many interfaces need to grow over time. In most cases we can simply
 * extend the struct and have userspace pass in more data. Another option,
 * as demonstrated by Vulkan's approach to providing extensions for forward
 * and backward compatibility, is to use a list of optional structs to
 * provide those extra details.
 *
 * The key advantage to using an extension chain is that it allows us to
 * redefine the interface more easily than an ever growing struct of
 * increasing complexity, and for large parts of that interface to be
 * entirely optional. The downside is more pointer chasing; chasing across
 * the __user boundary with pointers encapsulated inside u64.
 *
 * Example chaining:
 *
 * .. code-block:: C
 *
143
 *	struct drm_xe_user_extension ext3 {
144 145 146
 *		.next_extension = 0, // end
 *		.name = ...,
 *	};
147
 *	struct drm_xe_user_extension ext2 {
148 149 150
 *		.next_extension = (uintptr_t)&ext3,
 *		.name = ...,
 *	};
151
 *	struct drm_xe_user_extension ext1 {
152 153 154 155
 *		.next_extension = (uintptr_t)&ext2,
 *		.name = ...,
 *	};
 *
156
 * Typically the struct drm_xe_user_extension would be embedded in some uAPI
157 158
 * struct, and in this case we would feed it the head of the chain(i.e ext1),
 * which would then apply all of the above extensions.
159 160 161 162
*/

/**
 * struct drm_xe_user_extension - Base class for defining a chain of extensions
163
 */
164
struct drm_xe_user_extension {
165 166 167
	/**
	 * @next_extension:
	 *
168
	 * Pointer to the next struct drm_xe_user_extension, or zero if the end.
169 170
	 */
	__u64 next_extension;
171

172 173 174 175 176 177 178
	/**
	 * @name: Name of the extension.
	 *
	 * Note that the name here is just some integer.
	 *
	 * Also note that the name space for this is not global for the whole
	 * driver, but rather its scope/meaning is limited to the specific piece
179
	 * of uAPI which has embedded the struct drm_xe_user_extension.
180 181
	 */
	__u32 name;
182

183
	/**
184
	 * @pad: MBZ
185 186 187 188 189 190
	 *
	 * All undefined bits must be zero.
	 */
	__u32 pad;
};

191
/**
192 193 194 195
 * struct drm_xe_ext_set_property - Generic set property extension
 *
 * A generic struct that allows any of the Xe's IOCTL to be extended
 * with a set_property operation.
196
 */
197 198 199
struct drm_xe_ext_set_property {
	/** @base: base user extension */
	struct drm_xe_user_extension base;
200

201 202 203 204 205 206 207 208 209 210 211 212
	/** @property: property to set */
	__u32 property;

	/** @pad: MBZ */
	__u32 pad;

	/** @value: property value */
	__u64 value;

	/** @reserved: Reserved */
	__u64 reserved[2];
};
213

214 215 216
/**
 * struct drm_xe_engine_class_instance - instance of an engine class
 *
217 218 219
 * It is returned as part of the @drm_xe_engine, but it also is used as
 * the input of engine selection for both @drm_xe_exec_queue_create and
 * @drm_xe_query_engine_cycles
220 221 222 223 224 225 226 227 228 229
 *
 * The @engine_class can be:
 *  - %DRM_XE_ENGINE_CLASS_RENDER
 *  - %DRM_XE_ENGINE_CLASS_COPY
 *  - %DRM_XE_ENGINE_CLASS_VIDEO_DECODE
 *  - %DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE
 *  - %DRM_XE_ENGINE_CLASS_COMPUTE
 *  - %DRM_XE_ENGINE_CLASS_VM_BIND - Kernel only classes (not actual
 *    hardware engine class). Used for creating ordered queues of VM
 *    bind operations.
230
 */
231 232 233 234 235 236
struct drm_xe_engine_class_instance {
#define DRM_XE_ENGINE_CLASS_RENDER		0
#define DRM_XE_ENGINE_CLASS_COPY		1
#define DRM_XE_ENGINE_CLASS_VIDEO_DECODE	2
#define DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE	3
#define DRM_XE_ENGINE_CLASS_COMPUTE		4
237
#define DRM_XE_ENGINE_CLASS_VM_BIND		5
238
	/** @engine_class: engine class id */
239
	__u16 engine_class;
240
	/** @engine_instance: engine instance id */
241
	__u16 engine_instance;
242
	/** @gt_id: Unique ID of this GT within the PCI Device */
243
	__u16 gt_id;
244 245
	/** @pad: MBZ */
	__u16 pad;
246 247
};

248
/**
249
 * struct drm_xe_engine - describe hardware engine
250
 */
251
struct drm_xe_engine {
252 253 254 255 256 257 258
	/** @instance: The @drm_xe_engine_class_instance */
	struct drm_xe_engine_class_instance instance;

	/** @reserved: Reserved */
	__u64 reserved[3];
};

259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274
/**
 * struct drm_xe_query_engines - describe engines
 *
 * If a query is made with a struct @drm_xe_device_query where .query
 * is equal to %DRM_XE_DEVICE_QUERY_ENGINES, then the reply uses an array of
 * struct @drm_xe_query_engines in .data.
 */
struct drm_xe_query_engines {
	/** @num_engines: number of engines returned in @engines */
	__u32 num_engines;
	/** @pad: MBZ */
	__u32 pad;
	/** @engines: The returned engines for this device */
	struct drm_xe_engine engines[];
};

275 276 277 278
/**
 * enum drm_xe_memory_class - Supported memory classes.
 */
enum drm_xe_memory_class {
279 280
	/** @DRM_XE_MEM_REGION_CLASS_SYSMEM: Represents system memory. */
	DRM_XE_MEM_REGION_CLASS_SYSMEM = 0,
281
	/**
282
	 * @DRM_XE_MEM_REGION_CLASS_VRAM: On discrete platforms, this
283 284 285
	 * represents the memory that is local to the device, which we
	 * call VRAM. Not valid on integrated platforms.
	 */
286
	DRM_XE_MEM_REGION_CLASS_VRAM
287 288 289
};

/**
290
 * struct drm_xe_mem_region - Describes some region as known to
291 292
 * the driver.
 */
293
struct drm_xe_mem_region {
294 295 296 297 298 299 300
	/**
	 * @mem_class: The memory class describing this region.
	 *
	 * See enum drm_xe_memory_class for supported values.
	 */
	__u16 mem_class;
	/**
301 302 303
	 * @instance: The unique ID for this region, which serves as the
	 * index in the placement bitmask used as argument for
	 * &DRM_IOCTL_XE_GEM_CREATE
304 305 306 307 308 309 310
	 */
	__u16 instance;
	/**
	 * @min_page_size: Min page-size in bytes for this region.
	 *
	 * When the kernel allocates memory for this region, the
	 * underlying pages will be at least @min_page_size in size.
311 312 313 314 315 316 317
	 * Buffer objects with an allowable placement in this region must be
	 * created with a size aligned to this value.
	 * GPU virtual address mappings of (parts of) buffer objects that
	 * may be placed in this region must also have their GPU virtual
	 * address and range aligned to this value.
	 * Affected IOCTLS will return %-EINVAL if alignment restrictions are
	 * not met.
318 319 320 321 322 323 324 325 326 327 328 329 330 331
	 */
	__u32 min_page_size;
	/**
	 * @total_size: The usable size in bytes for this region.
	 */
	__u64 total_size;
	/**
	 * @used: Estimate of the memory used in bytes for this region.
	 *
	 * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable
	 * accounting.  Without this the value here will always equal
	 * zero.
	 */
	__u64 used;
332 333 334 335 336 337 338 339 340 341 342 343 344
	/**
	 * @cpu_visible_size: How much of this region can be CPU
	 * accessed, in bytes.
	 *
	 * This will always be <= @total_size, and the remainder (if
	 * any) will not be CPU accessible. If the CPU accessible part
	 * is smaller than @total_size then this is referred to as a
	 * small BAR system.
	 *
	 * On systems without small BAR (full BAR), the probed_size will
	 * always equal the @total_size, since all of it will be CPU
	 * accessible.
	 *
345
	 * Note this is only tracked for DRM_XE_MEM_REGION_CLASS_VRAM
346 347 348 349 350 351 352 353 354 355 356
	 * regions (for other types the value here will always equal
	 * zero).
	 */
	__u64 cpu_visible_size;
	/**
	 * @cpu_visible_used: Estimate of CPU visible memory used, in
	 * bytes.
	 *
	 * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable
	 * accounting. Without this the value here will always equal
	 * zero.  Note this is only currently tracked for
357
	 * DRM_XE_MEM_REGION_CLASS_VRAM regions (for other types the value
358 359 360
	 * here will always be zero).
	 */
	__u64 cpu_visible_used;
361
	/** @reserved: Reserved */
362
	__u64 reserved[6];
363
};
364

365
/**
366
 * struct drm_xe_query_mem_regions - describe memory regions
367 368
 *
 * If a query is made with a struct drm_xe_device_query where .query
369 370
 * is equal to DRM_XE_DEVICE_QUERY_MEM_REGIONS, then the reply uses
 * struct drm_xe_query_mem_regions in .data.
371
 */
372
struct drm_xe_query_mem_regions {
373 374
	/** @num_mem_regions: number of memory regions returned in @mem_regions */
	__u32 num_mem_regions;
375
	/** @pad: MBZ */
376
	__u32 pad;
377 378
	/** @mem_regions: The returned memory regions for this device */
	struct drm_xe_mem_region mem_regions[];
379 380
};

381 382 383 384 385 386
/**
 * struct drm_xe_query_config - describe the device configuration
 *
 * If a query is made with a struct drm_xe_device_query where .query
 * is equal to DRM_XE_DEVICE_QUERY_CONFIG, then the reply uses
 * struct drm_xe_query_config in .data.
387
 *
388 389 390 391 392 393 394 395 396 397 398 399 400
 * The index in @info can be:
 *  - %DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID - Device ID (lower 16 bits)
 *    and the device revision (next 8 bits)
 *  - %DRM_XE_QUERY_CONFIG_FLAGS - Flags describing the device
 *    configuration, see list below
 *
 *    - %DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM - Flag is set if the device
 *      has usable VRAM
 *  - %DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT - Minimal memory alignment
 *    required by this device, typically SZ_4K or SZ_64K
 *  - %DRM_XE_QUERY_CONFIG_VA_BITS - Maximum bits of a virtual address
 *  - %DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY - Value of the highest
 *    available exec queue priority
401
 */
402
struct drm_xe_query_config {
403
	/** @num_params: number of parameters returned in info */
404
	__u32 num_params;
405 406

	/** @pad: MBZ */
407
	__u32 pad;
408

409 410
#define DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID	0
#define DRM_XE_QUERY_CONFIG_FLAGS			1
411
	#define DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM	(1 << 0)
412 413 414
#define DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT		2
#define DRM_XE_QUERY_CONFIG_VA_BITS			3
#define DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY	4
415
	/** @info: array of elements containing the config info */
416 417 418
	__u64 info[];
};

419
/**
420
 * struct drm_xe_gt - describe an individual GT.
421
 *
422
 * To be used with drm_xe_query_gt_list, which will return a list with all the
423 424 425
 * existing GT individual descriptions.
 * Graphics Technology (GT) is a subset of a GPU/tile that is responsible for
 * implementing graphics and/or media operations.
426 427 428 429
 *
 * The index in @type can be:
 *  - %DRM_XE_QUERY_GT_TYPE_MAIN
 *  - %DRM_XE_QUERY_GT_TYPE_MEDIA
430
 */
431
struct drm_xe_gt {
432 433
#define DRM_XE_QUERY_GT_TYPE_MAIN		0
#define DRM_XE_QUERY_GT_TYPE_MEDIA		1
434
	/** @type: GT type: Main or Media */
435
	__u16 type;
436 437
	/** @tile_id: Tile ID where this GT lives (Information only) */
	__u16 tile_id;
438 439
	/** @gt_id: Unique ID of this GT within the PCI Device */
	__u16 gt_id;
440 441
	/** @pad: MBZ */
	__u16 pad[3];
442 443
	/** @reference_clock: A clock frequency for timestamp */
	__u32 reference_clock;
444
	/**
445
	 * @near_mem_regions: Bit mask of instances from
446
	 * drm_xe_query_mem_regions that are nearest to the current engines
447
	 * of this GT.
448 449 450 451
	 * Each index in this mask refers directly to the struct
	 * drm_xe_query_mem_regions' instance, no assumptions should
	 * be made about order. The type of each region is described
	 * by struct drm_xe_query_mem_regions' mem_class.
452
	 */
453
	__u64 near_mem_regions;
454
	/**
455
	 * @far_mem_regions: Bit mask of instances from
456
	 * drm_xe_query_mem_regions that are far from the engines of this GT.
457 458 459
	 * In general, they have extra indirections when compared to the
	 * @near_mem_regions. For a discrete device this could mean system
	 * memory and memory living in a different tile.
460 461 462 463
	 * Each index in this mask refers directly to the struct
	 * drm_xe_query_mem_regions' instance, no assumptions should
	 * be made about order. The type of each region is described
	 * by struct drm_xe_query_mem_regions' mem_class.
464
	 */
465
	__u64 far_mem_regions;
466 467 468 469 470 471 472 473
	/** @ip_ver_major: Graphics/media IP major version on GMD_ID platforms */
	__u16 ip_ver_major;
	/** @ip_ver_minor: Graphics/media IP minor version on GMD_ID platforms */
	__u16 ip_ver_minor;
	/** @ip_ver_rev: Graphics/media IP revision version on GMD_ID platforms */
	__u16 ip_ver_rev;
	/** @pad2: MBZ */
	__u16 pad2;
474
	/** @reserved: Reserved */
475
	__u64 reserved[7];
476 477
};

478
/**
479
 * struct drm_xe_query_gt_list - A list with GT description items.
480 481
 *
 * If a query is made with a struct drm_xe_device_query where .query
482 483
 * is equal to DRM_XE_DEVICE_QUERY_GT_LIST, then the reply uses struct
 * drm_xe_query_gt_list in .data.
484
 */
485 486
struct drm_xe_query_gt_list {
	/** @num_gt: number of GT items returned in gt_list */
487
	__u32 num_gt;
488
	/** @pad: MBZ */
489
	__u32 pad;
490
	/** @gt_list: The GT list returned for this device */
491
	struct drm_xe_gt gt_list[];
492 493
};

494 495 496
/**
 * struct drm_xe_query_topology_mask - describe the topology mask of a GT
 *
497 498 499
 * This is the hardware topology which reflects the internal physical
 * structure of the GPU.
 *
500 501 502
 * If a query is made with a struct drm_xe_device_query where .query
 * is equal to DRM_XE_DEVICE_QUERY_GT_TOPOLOGY, then the reply uses
 * struct drm_xe_query_topology_mask in .data.
503 504 505 506 507 508 509 510 511 512 513 514
 *
 * The @type can be:
 *  - %DRM_XE_TOPO_DSS_GEOMETRY - To query the mask of Dual Sub Slices
 *    (DSS) available for geometry operations. For example a query response
 *    containing the following in mask:
 *    ``DSS_GEOMETRY    ff ff ff ff 00 00 00 00``
 *    means 32 DSS are available for geometry.
 *  - %DRM_XE_TOPO_DSS_COMPUTE - To query the mask of Dual Sub Slices
 *    (DSS) available for compute operations. For example a query response
 *    containing the following in mask:
 *    ``DSS_COMPUTE    ff ff ff ff 00 00 00 00``
 *    means 32 DSS are available for compute.
515
 *  - %DRM_XE_TOPO_L3_BANK - To query the mask of enabled L3 banks
516 517 518 519 520
 *  - %DRM_XE_TOPO_EU_PER_DSS - To query the mask of Execution Units (EU)
 *    available per Dual Sub Slices (DSS). For example a query response
 *    containing the following in mask:
 *    ``EU_PER_DSS    ff ff 00 00 00 00 00 00``
 *    means each DSS has 16 EU.
521
 */
522 523 524 525
struct drm_xe_query_topology_mask {
	/** @gt_id: GT ID the mask is associated with */
	__u16 gt_id;

526 527
#define DRM_XE_TOPO_DSS_GEOMETRY	1
#define DRM_XE_TOPO_DSS_COMPUTE		2
528
#define DRM_XE_TOPO_L3_BANK		3
529
#define DRM_XE_TOPO_EU_PER_DSS		4
530 531
	/** @type: type of mask */
	__u16 type;
532 533 534 535 536 537 538 539

	/** @num_bytes: number of bytes in requested mask */
	__u32 num_bytes;

	/** @mask: little-endian mask of @num_bytes */
	__u8 mask[];
};

540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590
/**
 * struct drm_xe_query_engine_cycles - correlate CPU and GPU timestamps
 *
 * If a query is made with a struct drm_xe_device_query where .query is equal to
 * DRM_XE_DEVICE_QUERY_ENGINE_CYCLES, then the reply uses struct drm_xe_query_engine_cycles
 * in .data. struct drm_xe_query_engine_cycles is allocated by the user and
 * .data points to this allocated structure.
 *
 * The query returns the engine cycles, which along with GT's @reference_clock,
 * can be used to calculate the engine timestamp. In addition the
 * query returns a set of cpu timestamps that indicate when the command
 * streamer cycle count was captured.
 */
struct drm_xe_query_engine_cycles {
	/**
	 * @eci: This is input by the user and is the engine for which command
	 * streamer cycles is queried.
	 */
	struct drm_xe_engine_class_instance eci;

	/**
	 * @clockid: This is input by the user and is the reference clock id for
	 * CPU timestamp. For definition, see clock_gettime(2) and
	 * perf_event_open(2). Supported clock ids are CLOCK_MONOTONIC,
	 * CLOCK_MONOTONIC_RAW, CLOCK_REALTIME, CLOCK_BOOTTIME, CLOCK_TAI.
	 */
	__s32 clockid;

	/** @width: Width of the engine cycle counter in bits. */
	__u32 width;

	/**
	 * @engine_cycles: Engine cycles as read from its register
	 * at 0x358 offset.
	 */
	__u64 engine_cycles;

	/**
	 * @cpu_timestamp: CPU timestamp in ns. The timestamp is captured before
	 * reading the engine_cycles register using the reference clockid set by the
	 * user.
	 */
	__u64 cpu_timestamp;

	/**
	 * @cpu_delta: Time delta in ns captured around reading the lower dword
	 * of the engine_cycles register.
	 */
	__u64 cpu_delta;
};

591 592 593 594 595 596 597 598 599
/**
 * struct drm_xe_query_uc_fw_version - query a micro-controller firmware version
 *
 * Given a uc_type this will return the branch, major, minor and patch version
 * of the micro-controller firmware.
 */
struct drm_xe_query_uc_fw_version {
	/** @uc_type: The micro-controller type to query firmware version */
#define XE_QUERY_UC_TYPE_GUC_SUBMISSION 0
600
#define XE_QUERY_UC_TYPE_HUC 1
601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621
	__u16 uc_type;

	/** @pad: MBZ */
	__u16 pad;

	/** @branch_ver: branch uc fw version */
	__u32 branch_ver;
	/** @major_ver: major uc fw version */
	__u32 major_ver;
	/** @minor_ver: minor uc fw version */
	__u32 minor_ver;
	/** @patch_ver: patch uc fw version */
	__u32 patch_ver;

	/** @pad2: MBZ */
	__u32 pad2;

	/** @reserved: Reserved */
	__u64 reserved;
};

622
/**
623 624
 * struct drm_xe_device_query - Input of &DRM_IOCTL_XE_DEVICE_QUERY - main
 * structure to query device information
625
 *
626 627 628 629
 * The user selects the type of data to query among DRM_XE_DEVICE_QUERY_*
 * and sets the value in the query member. This determines the type of
 * the structure provided by the driver in data, among struct drm_xe_query_*.
 *
630 631 632 633 634 635 636 637 638 639 640 641
 * The @query can be:
 *  - %DRM_XE_DEVICE_QUERY_ENGINES
 *  - %DRM_XE_DEVICE_QUERY_MEM_REGIONS
 *  - %DRM_XE_DEVICE_QUERY_CONFIG
 *  - %DRM_XE_DEVICE_QUERY_GT_LIST
 *  - %DRM_XE_DEVICE_QUERY_HWCONFIG - Query type to retrieve the hardware
 *    configuration of the device such as information on slices, memory,
 *    caches, and so on. It is provided as a table of key / value
 *    attributes.
 *  - %DRM_XE_DEVICE_QUERY_GT_TOPOLOGY
 *  - %DRM_XE_DEVICE_QUERY_ENGINE_CYCLES
 *
642 643 644 645 646
 * If size is set to 0, the driver fills it with the required size for
 * the requested type of data to query. If size is equal to the required
 * size, the queried information is copied into data. If size is set to
 * a value different from 0 and different from the required size, the
 * IOCTL call returns -EINVAL.
647 648 649 650 651 652
 *
 * For example the following code snippet allows retrieving and printing
 * information about the device engines with DRM_XE_DEVICE_QUERY_ENGINES:
 *
 * .. code-block:: C
 *
653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678
 *     struct drm_xe_query_engines *engines;
 *     struct drm_xe_device_query query = {
 *         .extensions = 0,
 *         .query = DRM_XE_DEVICE_QUERY_ENGINES,
 *         .size = 0,
 *         .data = 0,
 *     };
 *     ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query);
 *     engines = malloc(query.size);
 *     query.data = (uintptr_t)engines;
 *     ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query);
 *     for (int i = 0; i < engines->num_engines; i++) {
 *         printf("Engine %d: %s\n", i,
 *             engines->engines[i].instance.engine_class ==
 *                 DRM_XE_ENGINE_CLASS_RENDER ? "RENDER":
 *             engines->engines[i].instance.engine_class ==
 *                 DRM_XE_ENGINE_CLASS_COPY ? "COPY":
 *             engines->engines[i].instance.engine_class ==
 *                 DRM_XE_ENGINE_CLASS_VIDEO_DECODE ? "VIDEO_DECODE":
 *             engines->engines[i].instance.engine_class ==
 *                 DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE ? "VIDEO_ENHANCE":
 *             engines->engines[i].instance.engine_class ==
 *                 DRM_XE_ENGINE_CLASS_COMPUTE ? "COMPUTE":
 *             "UNKNOWN");
 *     }
 *     free(engines);
679
 */
680 681 682 683
struct drm_xe_device_query {
	/** @extensions: Pointer to the first extension struct, if any */
	__u64 extensions;

684
#define DRM_XE_DEVICE_QUERY_ENGINES		0
685
#define DRM_XE_DEVICE_QUERY_MEM_REGIONS		1
686
#define DRM_XE_DEVICE_QUERY_CONFIG		2
687
#define DRM_XE_DEVICE_QUERY_GT_LIST		3
688 689 690
#define DRM_XE_DEVICE_QUERY_HWCONFIG		4
#define DRM_XE_DEVICE_QUERY_GT_TOPOLOGY		5
#define DRM_XE_DEVICE_QUERY_ENGINE_CYCLES	6
691
#define DRM_XE_DEVICE_QUERY_UC_FW_VERSION	7
692
#define DRM_XE_DEVICE_QUERY_OA_UNITS		8
693 694
	/** @query: The type of data to query */
	__u32 query;
695 696 697 698 699 700 701 702 703 704 705

	/** @size: Size of the queried data */
	__u32 size;

	/** @data: Queried data is placed here */
	__u64 data;

	/** @reserved: Reserved */
	__u64 reserved[2];
};

706 707 708
/**
 * struct drm_xe_gem_create - Input of &DRM_IOCTL_XE_GEM_CREATE - A structure for
 * gem creation
709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735
 *
 * The @flags can be:
 *  - %DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING
 *  - %DRM_XE_GEM_CREATE_FLAG_SCANOUT
 *  - %DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM - When using VRAM as a
 *    possible placement, ensure that the corresponding VRAM allocation
 *    will always use the CPU accessible part of VRAM. This is important
 *    for small-bar systems (on full-bar systems this gets turned into a
 *    noop).
 *    Note1: System memory can be used as an extra placement if the kernel
 *    should spill the allocation to system memory, if space can't be made
 *    available in the CPU accessible part of VRAM (giving the same
 *    behaviour as the i915 interface, see
 *    I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS).
 *    Note2: For clear-color CCS surfaces the kernel needs to read the
 *    clear-color value stored in the buffer, and on discrete platforms we
 *    need to use VRAM for display surfaces, therefore the kernel requires
 *    setting this flag for such objects, otherwise an error is thrown on
 *    small-bar systems.
 *
 * @cpu_caching supports the following values:
 *  - %DRM_XE_GEM_CPU_CACHING_WB - Allocate the pages with write-back
 *    caching. On iGPU this can't be used for scanout surfaces. Currently
 *    not allowed for objects placed in VRAM.
 *  - %DRM_XE_GEM_CPU_CACHING_WC - Allocate the pages as write-combined. This
 *    is uncached. Scanout surfaces should likely use this. All objects
 *    that can be placed in VRAM must use this.
736
 */
737 738 739 740 741
struct drm_xe_gem_create {
	/** @extensions: Pointer to the first extension struct, if any */
	__u64 extensions;

	/**
742 743
	 * @size: Size of the object to be created, must match region
	 * (system or vram) minimum alignment (&min_page_size).
744 745 746
	 */
	__u64 size;

747 748 749 750 751 752 753
	/**
	 * @placement: A mask of memory instances of where BO can be placed.
	 * Each index in this mask refers directly to the struct
	 * drm_xe_query_mem_regions' instance, no assumptions should
	 * be made about order. The type of each region is described
	 * by struct drm_xe_query_mem_regions' mem_class.
	 */
754 755 756 757 758
	__u32 placement;

#define DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING		(1 << 0)
#define DRM_XE_GEM_CREATE_FLAG_SCANOUT			(1 << 1)
#define DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM	(1 << 2)
759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781
	/**
	 * @flags: Flags, currently a mask of memory instances of where BO can
	 * be placed
	 */
	__u32 flags;

	/**
	 * @vm_id: Attached VM, if any
	 *
	 * If a VM is specified, this BO must:
	 *
	 *  1. Only ever be bound to that VM.
	 *  2. Cannot be exported as a PRIME fd.
	 */
	__u32 vm_id;

	/**
	 * @handle: Returned handle for the object.
	 *
	 * Object handles are nonzero.
	 */
	__u32 handle;

782 783
#define DRM_XE_GEM_CPU_CACHING_WB                      1
#define DRM_XE_GEM_CPU_CACHING_WC                      2
784 785 786 787 788
	/**
	 * @cpu_caching: The CPU caching mode to select for this object. If
	 * mmaping the object the mode selected here will also be used.
	 */
	__u16 cpu_caching;
789
	/** @pad: MBZ */
790
	__u16 pad[3];
791

792 793 794 795
	/** @reserved: Reserved */
	__u64 reserved[2];
};

796 797 798
/**
 * struct drm_xe_gem_mmap_offset - Input of &DRM_IOCTL_XE_GEM_MMAP_OFFSET
 */
799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815
struct drm_xe_gem_mmap_offset {
	/** @extensions: Pointer to the first extension struct, if any */
	__u64 extensions;

	/** @handle: Handle for the object being mapped. */
	__u32 handle;

	/** @flags: Must be zero */
	__u32 flags;

	/** @offset: The fake offset to use for subsequent mmap call */
	__u64 offset;

	/** @reserved: Reserved */
	__u64 reserved[2];
};

816 817
/**
 * struct drm_xe_vm_create - Input of &DRM_IOCTL_XE_VM_CREATE
818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837
 *
 * The @flags can be:
 *  - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE
 *  - %DRM_XE_VM_CREATE_FLAG_LR_MODE - An LR, or Long Running VM accepts
 *    exec submissions to its exec_queues that don't have an upper time
 *    limit on the job execution time. But exec submissions to these
 *    don't allow any of the flags DRM_XE_SYNC_FLAG_SYNCOBJ,
 *    DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ, DRM_XE_SYNC_FLAG_DMA_BUF,
 *    used as out-syncobjs, that is, together with DRM_XE_SYNC_FLAG_SIGNAL.
 *    LR VMs can be created in recoverable page-fault mode using
 *    DRM_XE_VM_CREATE_FLAG_FAULT_MODE, if the device supports it.
 *    If that flag is omitted, the UMD can not rely on the slightly
 *    different per-VM overcommit semantics that are enabled by
 *    DRM_XE_VM_CREATE_FLAG_FAULT_MODE (see below), but KMD may
 *    still enable recoverable pagefaults if supported by the device.
 *  - %DRM_XE_VM_CREATE_FLAG_FAULT_MODE - Requires also
 *    DRM_XE_VM_CREATE_FLAG_LR_MODE. It allows memory to be allocated on
 *    demand when accessed, and also allows per-VM overcommit of memory.
 *    The xe driver internally uses recoverable pagefaults to implement
 *    this.
838
 */
839
struct drm_xe_vm_create {
840
	/** @extensions: Pointer to the first extension struct, if any */
841 842
	__u64 extensions;

843
#define DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE	(1 << 0)
844
#define DRM_XE_VM_CREATE_FLAG_LR_MODE	        (1 << 1)
845
#define DRM_XE_VM_CREATE_FLAG_FAULT_MODE	(1 << 2)
846 847
	/** @flags: Flags */
	__u32 flags;
848 849 850 851 852 853 854 855

	/** @vm_id: Returned VM ID */
	__u32 vm_id;

	/** @reserved: Reserved */
	__u64 reserved[2];
};

856 857 858
/**
 * struct drm_xe_vm_destroy - Input of &DRM_IOCTL_XE_VM_DESTROY
 */
859 860 861 862 863 864 865 866 867 868 869
struct drm_xe_vm_destroy {
	/** @vm_id: VM ID */
	__u32 vm_id;

	/** @pad: MBZ */
	__u32 pad;

	/** @reserved: Reserved */
	__u64 reserved[2];
};

870
/**
871 872 873 874 875 876 877 878 879 880
 * struct drm_xe_vm_bind_op - run bind operations
 *
 * The @op can be:
 *  - %DRM_XE_VM_BIND_OP_MAP
 *  - %DRM_XE_VM_BIND_OP_UNMAP
 *  - %DRM_XE_VM_BIND_OP_MAP_USERPTR
 *  - %DRM_XE_VM_BIND_OP_UNMAP_ALL
 *  - %DRM_XE_VM_BIND_OP_PREFETCH
 *
 * and the @flags can be:
881 882 883 884 885 886
 *  - %DRM_XE_VM_BIND_FLAG_READONLY - Setup the page tables as read-only
 *    to ensure write protection
 *  - %DRM_XE_VM_BIND_FLAG_IMMEDIATE - On a faulting VM, do the
 *    MAP operation immediately rather than deferring the MAP to the page
 *    fault handler. This is implied on a non-faulting VM as there is no
 *    fault handler to defer to.
887 888 889 890 891 892
 *  - %DRM_XE_VM_BIND_FLAG_NULL - When the NULL flag is set, the page
 *    tables are setup with a special bit which indicates writes are
 *    dropped and all reads return zero. In the future, the NULL flags
 *    will only be valid for DRM_XE_VM_BIND_OP_MAP operations, the BO
 *    handle MBZ, and the BO offset MBZ. This flag is intended to
 *    implement VK sparse bindings.
893
 */
894
struct drm_xe_vm_bind_op {
895 896 897
	/** @extensions: Pointer to the first extension struct, if any */
	__u64 extensions;

898 899 900 901 902
	/**
	 * @obj: GEM object to operate on, MBZ for MAP_USERPTR, MBZ for UNMAP
	 */
	__u32 obj;

903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948
	/**
	 * @pat_index: The platform defined @pat_index to use for this mapping.
	 * The index basically maps to some predefined memory attributes,
	 * including things like caching, coherency, compression etc.  The exact
	 * meaning of the pat_index is platform specific and defined in the
	 * Bspec and PRMs.  When the KMD sets up the binding the index here is
	 * encoded into the ppGTT PTE.
	 *
	 * For coherency the @pat_index needs to be at least 1way coherent when
	 * drm_xe_gem_create.cpu_caching is DRM_XE_GEM_CPU_CACHING_WB. The KMD
	 * will extract the coherency mode from the @pat_index and reject if
	 * there is a mismatch (see note below for pre-MTL platforms).
	 *
	 * Note: On pre-MTL platforms there is only a caching mode and no
	 * explicit coherency mode, but on such hardware there is always a
	 * shared-LLC (or is dgpu) so all GT memory accesses are coherent with
	 * CPU caches even with the caching mode set as uncached.  It's only the
	 * display engine that is incoherent (on dgpu it must be in VRAM which
	 * is always mapped as WC on the CPU). However to keep the uapi somewhat
	 * consistent with newer platforms the KMD groups the different cache
	 * levels into the following coherency buckets on all pre-MTL platforms:
	 *
	 *	ppGTT UC -> COH_NONE
	 *	ppGTT WC -> COH_NONE
	 *	ppGTT WT -> COH_NONE
	 *	ppGTT WB -> COH_AT_LEAST_1WAY
	 *
	 * In practice UC/WC/WT should only ever used for scanout surfaces on
	 * such platforms (or perhaps in general for dma-buf if shared with
	 * another device) since it is only the display engine that is actually
	 * incoherent.  Everything else should typically use WB given that we
	 * have a shared-LLC.  On MTL+ this completely changes and the HW
	 * defines the coherency mode as part of the @pat_index, where
	 * incoherent GT access is possible.
	 *
	 * Note: For userptr and externally imported dma-buf the kernel expects
	 * either 1WAY or 2WAY for the @pat_index.
	 *
	 * For DRM_XE_VM_BIND_FLAG_NULL bindings there are no KMD restrictions
	 * on the @pat_index. For such mappings there is no actual memory being
	 * mapped (the address in the PTE is invalid), so the various PAT memory
	 * attributes likely do not apply.  Simply leaving as zero is one
	 * option (still a valid pat_index).
	 */
	__u16 pat_index;

949
	/** @pad: MBZ */
950
	__u16 pad;
951

952 953 954 955 956 957
	union {
		/**
		 * @obj_offset: Offset into the object, MBZ for CLEAR_RANGE,
		 * ignored for unbind
		 */
		__u64 obj_offset;
958

959 960 961 962 963 964 965 966 967 968 969 970
		/** @userptr: user pointer to bind on */
		__u64 userptr;
	};

	/**
	 * @range: Number of bytes from the object to bind to addr, MBZ for UNMAP_ALL
	 */
	__u64 range;

	/** @addr: Address to operate on, MBZ for UNMAP_ALL */
	__u64 addr;

971 972 973 974 975
#define DRM_XE_VM_BIND_OP_MAP		0x0
#define DRM_XE_VM_BIND_OP_UNMAP		0x1
#define DRM_XE_VM_BIND_OP_MAP_USERPTR	0x2
#define DRM_XE_VM_BIND_OP_UNMAP_ALL	0x3
#define DRM_XE_VM_BIND_OP_PREFETCH	0x4
976 977
	/** @op: Bind operation to perform */
	__u32 op;
978

979 980
#define DRM_XE_VM_BIND_FLAG_READONLY	(1 << 0)
#define DRM_XE_VM_BIND_FLAG_IMMEDIATE	(1 << 1)
981
#define DRM_XE_VM_BIND_FLAG_NULL	(1 << 2)
982
#define DRM_XE_VM_BIND_FLAG_DUMPABLE	(1 << 3)
983 984
	/** @flags: Bind flags */
	__u32 flags;
985

986 987 988 989 990 991
	/**
	 * @prefetch_mem_region_instance: Memory region to prefetch VMA to.
	 * It is a region instance, not a mask.
	 * To be used only with %DRM_XE_VM_BIND_OP_PREFETCH operation.
	 */
	__u32 prefetch_mem_region_instance;
992

993
	/** @pad2: MBZ */
994 995
	__u32 pad2;

996
	/** @reserved: Reserved */
997
	__u64 reserved[3];
998 999
};

1000 1001
/**
 * struct drm_xe_vm_bind - Input of &DRM_IOCTL_XE_VM_BIND
1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025
 *
 * Below is an example of a minimal use of @drm_xe_vm_bind to
 * asynchronously bind the buffer `data` at address `BIND_ADDRESS` to
 * illustrate `userptr`. It can be synchronized by using the example
 * provided for @drm_xe_sync.
 *
 * .. code-block:: C
 *
 *     data = aligned_alloc(ALIGNMENT, BO_SIZE);
 *     struct drm_xe_vm_bind bind = {
 *         .vm_id = vm,
 *         .num_binds = 1,
 *         .bind.obj = 0,
 *         .bind.obj_offset = to_user_pointer(data),
 *         .bind.range = BO_SIZE,
 *         .bind.addr = BIND_ADDRESS,
 *         .bind.op = DRM_XE_VM_BIND_OP_MAP_USERPTR,
 *         .bind.flags = 0,
 *         .num_syncs = 1,
 *         .syncs = &sync,
 *         .exec_queue_id = 0,
 *     };
 *     ioctl(fd, DRM_IOCTL_XE_VM_BIND, &bind);
 *
1026
 */
1027 1028 1029 1030 1031 1032 1033 1034
struct drm_xe_vm_bind {
	/** @extensions: Pointer to the first extension struct, if any */
	__u64 extensions;

	/** @vm_id: The ID of the VM to bind to */
	__u32 vm_id;

	/**
1035 1036
	 * @exec_queue_id: exec_queue_id, must be of class DRM_XE_ENGINE_CLASS_VM_BIND
	 * and exec queue must have same vm_id. If zero, the default VM bind engine
1037 1038
	 * is used.
	 */
1039
	__u32 exec_queue_id;
1040

1041 1042 1043
	/** @pad: MBZ */
	__u32 pad;

1044 1045 1046
	/** @num_binds: number of binds in this IOCTL */
	__u32 num_binds;

1047 1048 1049
	union {
		/** @bind: used if num_binds == 1 */
		struct drm_xe_vm_bind_op bind;
1050

1051 1052 1053 1054 1055 1056 1057
		/**
		 * @vector_of_binds: userptr to array of struct
		 * drm_xe_vm_bind_op if num_binds > 1
		 */
		__u64 vector_of_binds;
	};

1058
	/** @pad2: MBZ */
1059 1060
	__u32 pad2;

1061 1062 1063 1064 1065 1066 1067 1068 1069 1070
	/** @num_syncs: amount of syncs to wait on */
	__u32 num_syncs;

	/** @syncs: pointer to struct drm_xe_sync array */
	__u64 syncs;

	/** @reserved: Reserved */
	__u64 reserved[2];
};

1071 1072
/**
 * struct drm_xe_exec_queue_create - Input of &DRM_IOCTL_XE_EXEC_QUEUE_CREATE
1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091
 *
 * The example below shows how to use @drm_xe_exec_queue_create to create
 * a simple exec_queue (no parallel submission) of class
 * &DRM_XE_ENGINE_CLASS_RENDER.
 *
 * .. code-block:: C
 *
 *     struct drm_xe_engine_class_instance instance = {
 *         .engine_class = DRM_XE_ENGINE_CLASS_RENDER,
 *     };
 *     struct drm_xe_exec_queue_create exec_queue_create = {
 *          .extensions = 0,
 *          .vm_id = vm,
 *          .num_bb_per_exec = 1,
 *          .num_eng_per_bb = 1,
 *          .instances = to_user_pointer(&instance),
 *     };
 *     ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &exec_queue_create);
 *
1092
 */
1093
struct drm_xe_exec_queue_create {
1094 1095 1096 1097
#define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY		0
#define   DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY		0
#define   DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE		1

1098
	/** @extensions: Pointer to the first extension struct, if any */
1099 1100
	__u64 extensions;

1101
	/** @width: submission width (number BB per exec) for this exec queue */
1102 1103
	__u16 width;

1104
	/** @num_placements: number of valid placements for this exec queue */
1105 1106
	__u16 num_placements;

1107
	/** @vm_id: VM to use for this exec queue */
1108 1109 1110 1111 1112
	__u32 vm_id;

	/** @flags: MBZ */
	__u32 flags;

1113 1114
	/** @exec_queue_id: Returned exec queue ID */
	__u32 exec_queue_id;
1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128

	/**
	 * @instances: user pointer to a 2-d array of struct
	 * drm_xe_engine_class_instance
	 *
	 * length = width (i) * num_placements (j)
	 * index = j + i * width
	 */
	__u64 instances;

	/** @reserved: Reserved */
	__u64 reserved[2];
};

1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142
/**
 * struct drm_xe_exec_queue_destroy - Input of &DRM_IOCTL_XE_EXEC_QUEUE_DESTROY
 */
struct drm_xe_exec_queue_destroy {
	/** @exec_queue_id: Exec queue ID */
	__u32 exec_queue_id;

	/** @pad: MBZ */
	__u32 pad;

	/** @reserved: Reserved */
	__u64 reserved[2];
};

1143 1144
/**
 * struct drm_xe_exec_queue_get_property - Input of &DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY
1145 1146 1147
 *
 * The @property can be:
 *  - %DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN
1148
 */
1149
struct drm_xe_exec_queue_get_property {
1150 1151 1152
	/** @extensions: Pointer to the first extension struct, if any */
	__u64 extensions;

1153 1154
	/** @exec_queue_id: Exec queue ID */
	__u32 exec_queue_id;
1155

1156
#define DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN	0
1157
	/** @property: property to get */
1158 1159 1160 1161 1162 1163 1164 1165 1166
	__u32 property;

	/** @value: property value */
	__u64 value;

	/** @reserved: Reserved */
	__u64 reserved[2];
};

1167
/**
1168 1169 1170 1171 1172 1173 1174 1175 1176
 * struct drm_xe_sync - sync object
 *
 * The @type can be:
 *  - %DRM_XE_SYNC_TYPE_SYNCOBJ
 *  - %DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ
 *  - %DRM_XE_SYNC_TYPE_USER_FENCE
 *
 * and the @flags can be:
 *  - %DRM_XE_SYNC_FLAG_SIGNAL
1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200
 *
 * A minimal use of @drm_xe_sync looks like this:
 *
 * .. code-block:: C
 *
 *     struct drm_xe_sync sync = {
 *         .flags = DRM_XE_SYNC_FLAG_SIGNAL,
 *         .type = DRM_XE_SYNC_TYPE_SYNCOBJ,
 *     };
 *     struct drm_syncobj_create syncobj_create = { 0 };
 *     ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &syncobj_create);
 *     sync.handle = syncobj_create.handle;
 *         ...
 *         use of &sync in drm_xe_exec or drm_xe_vm_bind
 *         ...
 *     struct drm_syncobj_wait wait = {
 *         .handles = &sync.handle,
 *         .timeout_nsec = INT64_MAX,
 *         .count_handles = 1,
 *         .flags = 0,
 *         .first_signaled = 0,
 *         .pad = 0,
 *     };
 *     ioctl(fd, DRM_IOCTL_SYNCOBJ_WAIT, &wait);
1201
 */
1202 1203 1204 1205
struct drm_xe_sync {
	/** @extensions: Pointer to the first extension struct, if any */
	__u64 extensions;

1206 1207 1208 1209 1210 1211 1212 1213
#define DRM_XE_SYNC_TYPE_SYNCOBJ		0x0
#define DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ	0x1
#define DRM_XE_SYNC_TYPE_USER_FENCE		0x2
	/** @type: Type of the this sync object */
	__u32 type;

#define DRM_XE_SYNC_FLAG_SIGNAL	(1 << 0)
	/** @flags: Sync Flags */
1214
	__u32 flags;
1215 1216

	union {
1217
		/** @handle: Handle for the object */
1218
		__u32 handle;
1219

1220
		/**
1221 1222
		 * @addr: Address of user fence. When sync is passed in via exec
		 * IOCTL this is a GPU address in the VM. When sync passed in via
1223 1224 1225 1226 1227 1228 1229 1230
		 * VM bind IOCTL this is a user pointer. In either case, it is
		 * the users responsibility that this address is present and
		 * mapped when the user fence is signalled. Must be qword
		 * aligned.
		 */
		__u64 addr;
	};

1231 1232 1233 1234
	/**
	 * @timeline_value: Input for the timeline sync object. Needs to be
	 * different than 0 when used with %DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ.
	 */
1235 1236 1237 1238 1239 1240
	__u64 timeline_value;

	/** @reserved: Reserved */
	__u64 reserved[2];
};

1241 1242
/**
 * struct drm_xe_exec - Input of &DRM_IOCTL_XE_EXEC
1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259
 *
 * This is an example to use @drm_xe_exec for execution of the object
 * at BIND_ADDRESS (see example in @drm_xe_vm_bind) by an exec_queue
 * (see example in @drm_xe_exec_queue_create). It can be synchronized
 * by using the example provided for @drm_xe_sync.
 *
 * .. code-block:: C
 *
 *     struct drm_xe_exec exec = {
 *         .exec_queue_id = exec_queue,
 *         .syncs = &sync,
 *         .num_syncs = 1,
 *         .address = BIND_ADDRESS,
 *         .num_batch_buffer = 1,
 *     };
 *     ioctl(fd, DRM_IOCTL_XE_EXEC, &exec);
 *
1260
 */
1261 1262 1263 1264
struct drm_xe_exec {
	/** @extensions: Pointer to the first extension struct, if any */
	__u64 extensions;

1265 1266
	/** @exec_queue_id: Exec queue ID for the batch buffer */
	__u32 exec_queue_id;
1267 1268 1269 1270 1271 1272 1273 1274

	/** @num_syncs: Amount of struct drm_xe_sync in array. */
	__u32 num_syncs;

	/** @syncs: Pointer to struct drm_xe_sync array. */
	__u64 syncs;

	/**
1275 1276 1277
	 * @address: address of batch buffer if num_batch_buffer == 1 or an
	 * array of batch buffer addresses
	 */
1278 1279 1280 1281 1282 1283 1284 1285
	__u64 address;

	/**
	 * @num_batch_buffer: number of batch buffer in this exec, must match
	 * the width of the engine
	 */
	__u16 num_batch_buffer;

1286 1287 1288
	/** @pad: MBZ */
	__u16 pad[3];

1289 1290 1291 1292 1293
	/** @reserved: Reserved */
	__u64 reserved[2];
};

/**
1294
 * struct drm_xe_wait_user_fence - Input of &DRM_IOCTL_XE_WAIT_USER_FENCE
1295
 *
1296
 * Wait on user fence, XE will wake-up on every HW engine interrupt in the
1297 1298 1299
 * instances list and check if user fence is complete::
 *
 *	(*addr & MASK) OP (VALUE & MASK)
1300 1301
 *
 * Returns to user on user fence completion or timeout.
1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319
 *
 * The @op can be:
 *  - %DRM_XE_UFENCE_WAIT_OP_EQ
 *  - %DRM_XE_UFENCE_WAIT_OP_NEQ
 *  - %DRM_XE_UFENCE_WAIT_OP_GT
 *  - %DRM_XE_UFENCE_WAIT_OP_GTE
 *  - %DRM_XE_UFENCE_WAIT_OP_LT
 *  - %DRM_XE_UFENCE_WAIT_OP_LTE
 *
 * and the @flags can be:
 *  - %DRM_XE_UFENCE_WAIT_FLAG_ABSTIME
 *  - %DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP
 *
 * The @mask values can be for example:
 *  - 0xffu for u8
 *  - 0xffffu for u16
 *  - 0xffffffffu for u32
 *  - 0xffffffffffffffffu for u64
1320 1321 1322 1323
 */
struct drm_xe_wait_user_fence {
	/** @extensions: Pointer to the first extension struct, if any */
	__u64 extensions;
1324

1325 1326 1327 1328
	/**
	 * @addr: user pointer address to wait on, must qword aligned
	 */
	__u64 addr;
1329

1330 1331 1332 1333 1334 1335
#define DRM_XE_UFENCE_WAIT_OP_EQ	0x0
#define DRM_XE_UFENCE_WAIT_OP_NEQ	0x1
#define DRM_XE_UFENCE_WAIT_OP_GT	0x2
#define DRM_XE_UFENCE_WAIT_OP_GTE	0x3
#define DRM_XE_UFENCE_WAIT_OP_LT	0x4
#define DRM_XE_UFENCE_WAIT_OP_LTE	0x5
1336
	/** @op: wait operation (type of comparison) */
1337
	__u16 op;
1338

1339
#define DRM_XE_UFENCE_WAIT_FLAG_ABSTIME	(1 << 0)
1340
	/** @flags: wait flags */
1341
	__u16 flags;
1342

1343 1344
	/** @pad: MBZ */
	__u32 pad;
1345

1346 1347
	/** @value: compare value */
	__u64 value;
1348

1349
	/** @mask: comparison mask */
1350
	__u64 mask;
1351

1352 1353
	/**
	 * @timeout: how long to wait before bailing, value in nanoseconds.
1354
	 * Without DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flag set (relative timeout)
1355 1356
	 * it contains timeout expressed in nanoseconds to wait (fence will
	 * expire at now() + timeout).
1357
	 * When DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flat is set (absolute timeout) wait
1358 1359 1360 1361 1362 1363 1364 1365
	 * will end at timeout (uses system MONOTONIC_CLOCK).
	 * Passing negative timeout leads to neverending wait.
	 *
	 * On relative timeout this value is updated with timeout left
	 * (for restarting the call in case of signal delivery).
	 * On absolute timeout this value stays intact (restarted call still
	 * expire at the same point of time).
	 */
1366
	__s64 timeout;
1367

1368 1369
	/** @exec_queue_id: exec_queue_id returned from xe_exec_queue_create_ioctl */
	__u32 exec_queue_id;
1370

1371 1372
	/** @pad2: MBZ */
	__u32 pad2;
1373 1374 1375 1376

	/** @reserved: Reserved */
	__u64 reserved[2];
};
1377

1378
/**
1379
 * enum drm_xe_observation_type - Observation stream types
1380
 */
1381 1382 1383
enum drm_xe_observation_type {
	/** @DRM_XE_OBSERVATION_TYPE_OA: OA observation stream type */
	DRM_XE_OBSERVATION_TYPE_OA,
1384 1385 1386
};

/**
1387
 * enum drm_xe_observation_op - Observation stream ops
1388
 */
1389 1390 1391
enum drm_xe_observation_op {
	/** @DRM_XE_OBSERVATION_OP_STREAM_OPEN: Open an observation stream */
	DRM_XE_OBSERVATION_OP_STREAM_OPEN,
1392

1393 1394
	/** @DRM_XE_OBSERVATION_OP_ADD_CONFIG: Add observation stream config */
	DRM_XE_OBSERVATION_OP_ADD_CONFIG,
1395

1396 1397
	/** @DRM_XE_OBSERVATION_OP_REMOVE_CONFIG: Remove observation stream config */
	DRM_XE_OBSERVATION_OP_REMOVE_CONFIG,
1398 1399 1400
};

/**
1401
 * struct drm_xe_observation_param - Input of &DRM_XE_OBSERVATION
1402
 *
1403 1404 1405 1406
 * The observation layer enables multiplexing observation streams of
 * multiple types. The actual params for a particular stream operation are
 * supplied via the @param pointer (use __copy_from_user to get these
 * params).
1407
 */
1408
struct drm_xe_observation_param {
1409 1410
	/** @extensions: Pointer to the first extension struct, if any */
	__u64 extensions;
1411 1412 1413 1414
	/** @observation_type: observation stream type, of enum @drm_xe_observation_type */
	__u64 observation_type;
	/** @observation_op: observation stream op, of enum @drm_xe_observation_op */
	__u64 observation_op;
1415 1416 1417 1418 1419
	/** @param: Pointer to actual stream params */
	__u64 param;
};

/**
1420
 * enum drm_xe_observation_ioctls - Observation stream fd ioctl's
1421
 *
1422 1423
 * Information exchanged between userspace and kernel for observation fd
 * ioctl's is stream type specific
1424
 */
1425 1426 1427
enum drm_xe_observation_ioctls {
	/** @DRM_XE_OBSERVATION_IOCTL_ENABLE: Enable data capture for an observation stream */
	DRM_XE_OBSERVATION_IOCTL_ENABLE = _IO('i', 0x0),
1428

1429 1430
	/** @DRM_XE_OBSERVATION_IOCTL_DISABLE: Disable data capture for a observation stream */
	DRM_XE_OBSERVATION_IOCTL_DISABLE = _IO('i', 0x1),
1431

1432 1433
	/** @DRM_XE_OBSERVATION_IOCTL_CONFIG: Change observation stream configuration */
	DRM_XE_OBSERVATION_IOCTL_CONFIG = _IO('i', 0x2),
1434

1435 1436
	/** @DRM_XE_OBSERVATION_IOCTL_STATUS: Return observation stream status */
	DRM_XE_OBSERVATION_IOCTL_STATUS = _IO('i', 0x3),
1437

1438 1439
	/** @DRM_XE_OBSERVATION_IOCTL_INFO: Return observation stream info */
	DRM_XE_OBSERVATION_IOCTL_INFO = _IO('i', 0x4),
1440 1441
};

1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455
/**
 * enum drm_xe_oa_unit_type - OA unit types
 */
enum drm_xe_oa_unit_type {
	/**
	 * @DRM_XE_OA_UNIT_TYPE_OAG: OAG OA unit. OAR/OAC are considered
	 * sub-types of OAG. For OAR/OAC, use OAG.
	 */
	DRM_XE_OA_UNIT_TYPE_OAG,

	/** @DRM_XE_OA_UNIT_TYPE_OAM: OAM OA unit */
	DRM_XE_OA_UNIT_TYPE_OAM,
};

1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524
/**
 * struct drm_xe_oa_unit - describe OA unit
 */
struct drm_xe_oa_unit {
	/** @extensions: Pointer to the first extension struct, if any */
	__u64 extensions;

	/** @oa_unit_id: OA unit ID */
	__u32 oa_unit_id;

	/** @oa_unit_type: OA unit type of @drm_xe_oa_unit_type */
	__u32 oa_unit_type;

	/** @capabilities: OA capabilities bit-mask */
	__u64 capabilities;
#define DRM_XE_OA_CAPS_BASE		(1 << 0)

	/** @oa_timestamp_freq: OA timestamp freq */
	__u64 oa_timestamp_freq;

	/** @reserved: MBZ */
	__u64 reserved[4];

	/** @num_engines: number of engines in @eci array */
	__u64 num_engines;

	/** @eci: engines attached to this OA unit */
	struct drm_xe_engine_class_instance eci[];
};

/**
 * struct drm_xe_query_oa_units - describe OA units
 *
 * If a query is made with a struct drm_xe_device_query where .query
 * is equal to DRM_XE_DEVICE_QUERY_OA_UNITS, then the reply uses struct
 * drm_xe_query_oa_units in .data.
 *
 * OA unit properties for all OA units can be accessed using a code block
 * such as the one below:
 *
 * .. code-block:: C
 *
 *	struct drm_xe_query_oa_units *qoa;
 *	struct drm_xe_oa_unit *oau;
 *	u8 *poau;
 *
 *	// malloc qoa and issue DRM_XE_DEVICE_QUERY_OA_UNITS. Then:
 *	poau = (u8 *)&qoa->oa_units[0];
 *	for (int i = 0; i < qoa->num_oa_units; i++) {
 *		oau = (struct drm_xe_oa_unit *)poau;
 *		// Access 'struct drm_xe_oa_unit' fields here
 *		poau += sizeof(*oau) + oau->num_engines * sizeof(oau->eci[0]);
 *	}
 */
struct drm_xe_query_oa_units {
	/** @extensions: Pointer to the first extension struct, if any */
	__u64 extensions;
	/** @num_oa_units: number of OA units returned in oau[] */
	__u32 num_oa_units;
	/** @pad: MBZ */
	__u32 pad;
	/**
	 * @oa_units: struct @drm_xe_oa_unit array returned for this device.
	 * Written below as a u64 array to avoid problems with nested flexible
	 * arrays with some compilers
	 */
	__u64 oa_units[];
};

1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543
/**
 * enum drm_xe_oa_format_type - OA format types as specified in PRM/Bspec
 * 52198/60942
 */
enum drm_xe_oa_format_type {
	/** @DRM_XE_OA_FMT_TYPE_OAG: OAG report format */
	DRM_XE_OA_FMT_TYPE_OAG,
	/** @DRM_XE_OA_FMT_TYPE_OAR: OAR report format */
	DRM_XE_OA_FMT_TYPE_OAR,
	/** @DRM_XE_OA_FMT_TYPE_OAM: OAM report format */
	DRM_XE_OA_FMT_TYPE_OAM,
	/** @DRM_XE_OA_FMT_TYPE_OAC: OAC report format */
	DRM_XE_OA_FMT_TYPE_OAC,
	/** @DRM_XE_OA_FMT_TYPE_OAM_MPEC: OAM SAMEDIA or OAM MPEC report format */
	DRM_XE_OA_FMT_TYPE_OAM_MPEC,
	/** @DRM_XE_OA_FMT_TYPE_PEC: PEC report format */
	DRM_XE_OA_FMT_TYPE_PEC,
};

1544 1545 1546 1547 1548 1549
/**
 * enum drm_xe_oa_property_id - OA stream property id's
 *
 * Stream params are specified as a chain of @drm_xe_ext_set_property
 * struct's, with @property values from enum @drm_xe_oa_property_id and
 * @drm_xe_user_extension base.name set to @DRM_XE_OA_EXTENSION_SET_PROPERTY.
1550
 * @param field in struct @drm_xe_observation_param points to the first
1551
 * @drm_xe_ext_set_property struct.
1552
 *
1553 1554 1555
 * Exactly the same mechanism is also used for stream reconfiguration using the
 * @DRM_XE_OBSERVATION_IOCTL_CONFIG observation stream fd ioctl, though only a
 * subset of properties below can be specified for stream reconfiguration.
1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574
 */
enum drm_xe_oa_property_id {
#define DRM_XE_OA_EXTENSION_SET_PROPERTY	0
	/**
	 * @DRM_XE_OA_PROPERTY_OA_UNIT_ID: ID of the OA unit on which to open
	 * the OA stream, see @oa_unit_id in 'struct
	 * drm_xe_query_oa_units'. Defaults to 0 if not provided.
	 */
	DRM_XE_OA_PROPERTY_OA_UNIT_ID = 1,

	/**
	 * @DRM_XE_OA_PROPERTY_SAMPLE_OA: A value of 1 requests inclusion of raw
	 * OA unit reports or stream samples in a global buffer attached to an
	 * OA unit.
	 */
	DRM_XE_OA_PROPERTY_SAMPLE_OA,

	/**
	 * @DRM_XE_OA_PROPERTY_OA_METRIC_SET: OA metrics defining contents of OA
1575
	 * reports, previously added via @DRM_XE_OBSERVATION_OP_ADD_CONFIG.
1576 1577 1578
	 */
	DRM_XE_OA_PROPERTY_OA_METRIC_SET,

1579
	/** @DRM_XE_OA_PROPERTY_OA_FORMAT: OA counter report format */
1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599
	DRM_XE_OA_PROPERTY_OA_FORMAT,
	/*
	 * OA_FORMAT's are specified the same way as in PRM/Bspec 52198/60942,
	 * in terms of the following quantities: a. enum @drm_xe_oa_format_type
	 * b. Counter select c. Counter size and d. BC report. Also refer to the
	 * oa_formats array in drivers/gpu/drm/xe/xe_oa.c.
	 */
#define DRM_XE_OA_FORMAT_MASK_FMT_TYPE		(0xff << 0)
#define DRM_XE_OA_FORMAT_MASK_COUNTER_SEL	(0xff << 8)
#define DRM_XE_OA_FORMAT_MASK_COUNTER_SIZE	(0xff << 16)
#define DRM_XE_OA_FORMAT_MASK_BC_REPORT		(0xff << 24)

	/**
	 * @DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT: Requests periodic OA unit
	 * sampling with sampling frequency proportional to 2^(period_exponent + 1)
	 */
	DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT,

	/**
	 * @DRM_XE_OA_PROPERTY_OA_DISABLED: A value of 1 will open the OA
1600
	 * stream in a DISABLED state (see @DRM_XE_OBSERVATION_IOCTL_ENABLE).
1601 1602 1603 1604 1605
	 */
	DRM_XE_OA_PROPERTY_OA_DISABLED,

	/**
	 * @DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID: Open the stream for a specific
1606
	 * @exec_queue_id. OA queries can be executed on this exec queue.
1607 1608 1609 1610 1611 1612 1613 1614
	 */
	DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID,

	/**
	 * @DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE: Optional engine instance to
	 * pass along with @DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID or will default to 0.
	 */
	DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE,
1615 1616 1617 1618 1619 1620

	/**
	 * @DRM_XE_OA_PROPERTY_NO_PREEMPT: Allow preemption and timeslicing
	 * to be disabled for the stream exec queue.
	 */
	DRM_XE_OA_PROPERTY_NO_PREEMPT,
1621 1622
};

1623 1624 1625
/**
 * struct drm_xe_oa_config - OA metric configuration
 *
1626
 * Multiple OA configs can be added using @DRM_XE_OBSERVATION_OP_ADD_CONFIG. A
1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646
 * particular config can be specified when opening an OA stream using
 * @DRM_XE_OA_PROPERTY_OA_METRIC_SET property.
 */
struct drm_xe_oa_config {
	/** @extensions: Pointer to the first extension struct, if any */
	__u64 extensions;

	/** @uuid: String formatted like "%\08x-%\04x-%\04x-%\04x-%\012x" */
	char uuid[36];

	/** @n_regs: Number of regs in @regs_ptr */
	__u32 n_regs;

	/**
	 * @regs_ptr: Pointer to (register address, value) pairs for OA config
	 * registers. Expected length of buffer is: (2 * sizeof(u32) * @n_regs).
	 */
	__u64 regs_ptr;
};

1647 1648
/**
 * struct drm_xe_oa_stream_status - OA stream status returned from
1649 1650 1651
 * @DRM_XE_OBSERVATION_IOCTL_STATUS observation stream fd ioctl. Userspace can
 * call the ioctl to query stream status in response to EIO errno from
 * observation fd read().
1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667
 */
struct drm_xe_oa_stream_status {
	/** @extensions: Pointer to the first extension struct, if any */
	__u64 extensions;

	/** @oa_status: OA stream status (see Bspec 46717/61226) */
	__u64 oa_status;
#define DRM_XE_OASTATUS_MMIO_TRG_Q_FULL		(1 << 3)
#define DRM_XE_OASTATUS_COUNTER_OVERFLOW	(1 << 2)
#define DRM_XE_OASTATUS_BUFFER_OVERFLOW		(1 << 1)
#define DRM_XE_OASTATUS_REPORT_LOST		(1 << 0)

	/** @reserved: reserved for future use */
	__u64 reserved[3];
};

1668 1669
/**
 * struct drm_xe_oa_stream_info - OA stream info returned from
1670
 * @DRM_XE_OBSERVATION_IOCTL_INFO observation stream fd ioctl
1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682
 */
struct drm_xe_oa_stream_info {
	/** @extensions: Pointer to the first extension struct, if any */
	__u64 extensions;

	/** @oa_buf_size: OA buffer size */
	__u64 oa_buf_size;

	/** @reserved: reserved for future use */
	__u64 reserved[3];
};

1683 1684 1685 1686 1687
#if defined(__cplusplus)
}
#endif

#endif /* _UAPI_XE_DRM_H_ */