Commit b76c01f1 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-gt-next-2023-12-15' of...

Merge tag 'drm-intel-gt-next-2023-12-15' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

Driver Changes:

- Eliminate use of kmap_atomic() in i915 (Zhao)
- Add Wa_14019877138 for DG2 (Haridhar)
- Static checker and spelling fixes (Colin, Karthik, Randy)
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/ZXxCibZZQqlqhDN3@jlahtine-mobl.ger.corp.intel.com
parents e54478fb 31accc37
...@@ -1159,7 +1159,7 @@ static void reloc_cache_unmap(struct reloc_cache *cache) ...@@ -1159,7 +1159,7 @@ static void reloc_cache_unmap(struct reloc_cache *cache)
vaddr = unmask_page(cache->vaddr); vaddr = unmask_page(cache->vaddr);
if (cache->vaddr & KMAP) if (cache->vaddr & KMAP)
kunmap_atomic(vaddr); kunmap_local(vaddr);
else else
io_mapping_unmap_atomic((void __iomem *)vaddr); io_mapping_unmap_atomic((void __iomem *)vaddr);
} }
...@@ -1175,7 +1175,7 @@ static void reloc_cache_remap(struct reloc_cache *cache, ...@@ -1175,7 +1175,7 @@ static void reloc_cache_remap(struct reloc_cache *cache,
if (cache->vaddr & KMAP) { if (cache->vaddr & KMAP) {
struct page *page = i915_gem_object_get_page(obj, cache->page); struct page *page = i915_gem_object_get_page(obj, cache->page);
vaddr = kmap_atomic(page); vaddr = kmap_local_page(page);
cache->vaddr = unmask_flags(cache->vaddr) | cache->vaddr = unmask_flags(cache->vaddr) |
(unsigned long)vaddr; (unsigned long)vaddr;
} else { } else {
...@@ -1205,7 +1205,7 @@ static void reloc_cache_reset(struct reloc_cache *cache, struct i915_execbuffer ...@@ -1205,7 +1205,7 @@ static void reloc_cache_reset(struct reloc_cache *cache, struct i915_execbuffer
if (cache->vaddr & CLFLUSH_AFTER) if (cache->vaddr & CLFLUSH_AFTER)
mb(); mb();
kunmap_atomic(vaddr); kunmap_local(vaddr);
i915_gem_object_finish_access(obj); i915_gem_object_finish_access(obj);
} else { } else {
struct i915_ggtt *ggtt = cache_to_ggtt(cache); struct i915_ggtt *ggtt = cache_to_ggtt(cache);
...@@ -1237,7 +1237,7 @@ static void *reloc_kmap(struct drm_i915_gem_object *obj, ...@@ -1237,7 +1237,7 @@ static void *reloc_kmap(struct drm_i915_gem_object *obj,
struct page *page; struct page *page;
if (cache->vaddr) { if (cache->vaddr) {
kunmap_atomic(unmask_page(cache->vaddr)); kunmap_local(unmask_page(cache->vaddr));
} else { } else {
unsigned int flushes; unsigned int flushes;
int err; int err;
...@@ -1259,7 +1259,7 @@ static void *reloc_kmap(struct drm_i915_gem_object *obj, ...@@ -1259,7 +1259,7 @@ static void *reloc_kmap(struct drm_i915_gem_object *obj,
if (!obj->mm.dirty) if (!obj->mm.dirty)
set_page_dirty(page); set_page_dirty(page);
vaddr = kmap_atomic(page); vaddr = kmap_local_page(page);
cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr; cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
cache->page = pageno; cache->page = pageno;
......
...@@ -500,17 +500,15 @@ static void ...@@ -500,17 +500,15 @@ static void
i915_gem_object_read_from_page_kmap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size) i915_gem_object_read_from_page_kmap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
{ {
pgoff_t idx = offset >> PAGE_SHIFT; pgoff_t idx = offset >> PAGE_SHIFT;
void *src_map;
void *src_ptr; void *src_ptr;
src_map = kmap_atomic(i915_gem_object_get_page(obj, idx)); src_ptr = kmap_local_page(i915_gem_object_get_page(obj, idx))
+ offset_in_page(offset);
src_ptr = src_map + offset_in_page(offset);
if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
drm_clflush_virt_range(src_ptr, size); drm_clflush_virt_range(src_ptr, size);
memcpy(dst, src_ptr, size); memcpy(dst, src_ptr, size);
kunmap_atomic(src_map); kunmap_local(src_ptr);
} }
static void static void
......
...@@ -65,16 +65,13 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) ...@@ -65,16 +65,13 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
dst = vaddr; dst = vaddr;
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
struct page *page; struct page *page;
void *src;
page = shmem_read_mapping_page(mapping, i); page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page)) if (IS_ERR(page))
goto err_st; goto err_st;
src = kmap_atomic(page); memcpy_from_page(dst, page, 0, PAGE_SIZE);
memcpy(dst, src, PAGE_SIZE);
drm_clflush_virt_range(dst, PAGE_SIZE); drm_clflush_virt_range(dst, PAGE_SIZE);
kunmap_atomic(src);
put_page(page); put_page(page);
dst += PAGE_SIZE; dst += PAGE_SIZE;
...@@ -113,16 +110,13 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, ...@@ -113,16 +110,13 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
struct page *page; struct page *page;
char *dst;
page = shmem_read_mapping_page(mapping, i); page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page)) if (IS_ERR(page))
continue; continue;
dst = kmap_atomic(page);
drm_clflush_virt_range(src, PAGE_SIZE); drm_clflush_virt_range(src, PAGE_SIZE);
memcpy(dst, src, PAGE_SIZE); memcpy_to_page(page, 0, src, PAGE_SIZE);
kunmap_atomic(dst);
set_page_dirty(page); set_page_dirty(page);
if (obj->mm.madv == I915_MADV_WILLNEED) if (obj->mm.madv == I915_MADV_WILLNEED)
......
...@@ -485,11 +485,13 @@ shmem_pwrite(struct drm_i915_gem_object *obj, ...@@ -485,11 +485,13 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
if (err < 0) if (err < 0)
return err; return err;
vaddr = kmap_atomic(page); vaddr = kmap_local_page(page);
pagefault_disable();
unwritten = __copy_from_user_inatomic(vaddr + pg, unwritten = __copy_from_user_inatomic(vaddr + pg,
user_data, user_data,
len); len);
kunmap_atomic(vaddr); pagefault_enable();
kunmap_local(vaddr);
err = aops->write_end(obj->base.filp, mapping, offset, len, err = aops->write_end(obj->base.filp, mapping, offset, len,
len - unwritten, page, data); len - unwritten, page, data);
......
...@@ -1082,7 +1082,7 @@ __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val) ...@@ -1082,7 +1082,7 @@ __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
goto err_unlock; goto err_unlock;
for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) { for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
u32 *ptr = kmap_atomic(i915_gem_object_get_page(obj, n)); u32 *ptr = kmap_local_page(i915_gem_object_get_page(obj, n));
if (needs_flush & CLFLUSH_BEFORE) if (needs_flush & CLFLUSH_BEFORE)
drm_clflush_virt_range(ptr, PAGE_SIZE); drm_clflush_virt_range(ptr, PAGE_SIZE);
...@@ -1090,12 +1090,12 @@ __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val) ...@@ -1090,12 +1090,12 @@ __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
if (ptr[dword] != val) { if (ptr[dword] != val) {
pr_err("n=%lu ptr[%u]=%u, val=%u\n", pr_err("n=%lu ptr[%u]=%u, val=%u\n",
n, dword, ptr[dword], val); n, dword, ptr[dword], val);
kunmap_atomic(ptr); kunmap_local(ptr);
err = -EINVAL; err = -EINVAL;
break; break;
} }
kunmap_atomic(ptr); kunmap_local(ptr);
} }
i915_gem_object_finish_access(obj); i915_gem_object_finish_access(obj);
......
...@@ -24,7 +24,6 @@ static int cpu_set(struct context *ctx, unsigned long offset, u32 v) ...@@ -24,7 +24,6 @@ static int cpu_set(struct context *ctx, unsigned long offset, u32 v)
{ {
unsigned int needs_clflush; unsigned int needs_clflush;
struct page *page; struct page *page;
void *map;
u32 *cpu; u32 *cpu;
int err; int err;
...@@ -34,8 +33,7 @@ static int cpu_set(struct context *ctx, unsigned long offset, u32 v) ...@@ -34,8 +33,7 @@ static int cpu_set(struct context *ctx, unsigned long offset, u32 v)
goto out; goto out;
page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT); page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT);
map = kmap_atomic(page); cpu = kmap_local_page(page) + offset_in_page(offset);
cpu = map + offset_in_page(offset);
if (needs_clflush & CLFLUSH_BEFORE) if (needs_clflush & CLFLUSH_BEFORE)
drm_clflush_virt_range(cpu, sizeof(*cpu)); drm_clflush_virt_range(cpu, sizeof(*cpu));
...@@ -45,7 +43,7 @@ static int cpu_set(struct context *ctx, unsigned long offset, u32 v) ...@@ -45,7 +43,7 @@ static int cpu_set(struct context *ctx, unsigned long offset, u32 v)
if (needs_clflush & CLFLUSH_AFTER) if (needs_clflush & CLFLUSH_AFTER)
drm_clflush_virt_range(cpu, sizeof(*cpu)); drm_clflush_virt_range(cpu, sizeof(*cpu));
kunmap_atomic(map); kunmap_local(cpu);
i915_gem_object_finish_access(ctx->obj); i915_gem_object_finish_access(ctx->obj);
out: out:
...@@ -57,7 +55,6 @@ static int cpu_get(struct context *ctx, unsigned long offset, u32 *v) ...@@ -57,7 +55,6 @@ static int cpu_get(struct context *ctx, unsigned long offset, u32 *v)
{ {
unsigned int needs_clflush; unsigned int needs_clflush;
struct page *page; struct page *page;
void *map;
u32 *cpu; u32 *cpu;
int err; int err;
...@@ -67,15 +64,14 @@ static int cpu_get(struct context *ctx, unsigned long offset, u32 *v) ...@@ -67,15 +64,14 @@ static int cpu_get(struct context *ctx, unsigned long offset, u32 *v)
goto out; goto out;
page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT); page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT);
map = kmap_atomic(page); cpu = kmap_local_page(page) + offset_in_page(offset);
cpu = map + offset_in_page(offset);
if (needs_clflush & CLFLUSH_BEFORE) if (needs_clflush & CLFLUSH_BEFORE)
drm_clflush_virt_range(cpu, sizeof(*cpu)); drm_clflush_virt_range(cpu, sizeof(*cpu));
*v = *cpu; *v = *cpu;
kunmap_atomic(map); kunmap_local(cpu);
i915_gem_object_finish_access(ctx->obj); i915_gem_object_finish_access(ctx->obj);
out: out:
......
...@@ -489,12 +489,12 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value) ...@@ -489,12 +489,12 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
for (n = 0; n < real_page_count(obj); n++) { for (n = 0; n < real_page_count(obj); n++) {
u32 *map; u32 *map;
map = kmap_atomic(i915_gem_object_get_page(obj, n)); map = kmap_local_page(i915_gem_object_get_page(obj, n));
for (m = 0; m < DW_PER_PAGE; m++) for (m = 0; m < DW_PER_PAGE; m++)
map[m] = value; map[m] = value;
if (!has_llc) if (!has_llc)
drm_clflush_virt_range(map, PAGE_SIZE); drm_clflush_virt_range(map, PAGE_SIZE);
kunmap_atomic(map); kunmap_local(map);
} }
i915_gem_object_finish_access(obj); i915_gem_object_finish_access(obj);
...@@ -520,7 +520,7 @@ static noinline int cpu_check(struct drm_i915_gem_object *obj, ...@@ -520,7 +520,7 @@ static noinline int cpu_check(struct drm_i915_gem_object *obj,
for (n = 0; n < real_page_count(obj); n++) { for (n = 0; n < real_page_count(obj); n++) {
u32 *map, m; u32 *map, m;
map = kmap_atomic(i915_gem_object_get_page(obj, n)); map = kmap_local_page(i915_gem_object_get_page(obj, n));
if (needs_flush & CLFLUSH_BEFORE) if (needs_flush & CLFLUSH_BEFORE)
drm_clflush_virt_range(map, PAGE_SIZE); drm_clflush_virt_range(map, PAGE_SIZE);
...@@ -546,7 +546,7 @@ static noinline int cpu_check(struct drm_i915_gem_object *obj, ...@@ -546,7 +546,7 @@ static noinline int cpu_check(struct drm_i915_gem_object *obj,
} }
out_unmap: out_unmap:
kunmap_atomic(map); kunmap_local(map);
if (err) if (err)
break; break;
} }
......
...@@ -504,7 +504,7 @@ static int igt_dmabuf_export_vmap(void *arg) ...@@ -504,7 +504,7 @@ static int igt_dmabuf_export_vmap(void *arg)
} }
if (memchr_inv(ptr, 0, dmabuf->size)) { if (memchr_inv(ptr, 0, dmabuf->size)) {
pr_err("Exported object not initialiased to zero!\n"); pr_err("Exported object not initialised to zero!\n");
err = -EINVAL; err = -EINVAL;
goto out; goto out;
} }
......
...@@ -469,6 +469,9 @@ ...@@ -469,6 +469,9 @@
#define XEHP_PSS_MODE2 MCR_REG(0x703c) #define XEHP_PSS_MODE2 MCR_REG(0x703c)
#define SCOREBOARD_STALL_FLUSH_CONTROL REG_BIT(5) #define SCOREBOARD_STALL_FLUSH_CONTROL REG_BIT(5)
#define XEHP_PSS_CHICKEN MCR_REG(0x7044)
#define FD_END_COLLECT REG_BIT(5)
#define GEN7_SC_INSTDONE _MMIO(0x7100) #define GEN7_SC_INSTDONE _MMIO(0x7100)
#define GEN12_SC_INSTDONE_EXTRA _MMIO(0x7104) #define GEN12_SC_INSTDONE_EXTRA _MMIO(0x7104)
#define GEN12_SC_INSTDONE_EXTRA2 _MMIO(0x7108) #define GEN12_SC_INSTDONE_EXTRA2 _MMIO(0x7108)
......
...@@ -777,6 +777,9 @@ static void dg2_ctx_workarounds_init(struct intel_engine_cs *engine, ...@@ -777,6 +777,9 @@ static void dg2_ctx_workarounds_init(struct intel_engine_cs *engine,
/* Wa_18019271663:dg2 */ /* Wa_18019271663:dg2 */
wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE); wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE);
/* Wa_14019877138:dg2 */
wa_mcr_masked_en(wal, XEHP_PSS_CHICKEN, FD_END_COLLECT);
} }
static void xelpg_ctx_gt_tuning_init(struct intel_engine_cs *engine, static void xelpg_ctx_gt_tuning_init(struct intel_engine_cs *engine,
......
...@@ -1343,16 +1343,13 @@ size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len) ...@@ -1343,16 +1343,13 @@ size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len)
for_each_sgt_page(page, iter, uc_fw->obj->mm.pages) { for_each_sgt_page(page, iter, uc_fw->obj->mm.pages) {
u32 len = min_t(u32, size, PAGE_SIZE - offset); u32 len = min_t(u32, size, PAGE_SIZE - offset);
void *vaddr;
if (idx > 0) { if (idx > 0) {
idx--; idx--;
continue; continue;
} }
vaddr = kmap_atomic(page); memcpy_from_page(dst, page, offset, len);
memcpy(dst, vaddr + offset, len);
kunmap_atomic(vaddr);
offset = 0; offset = 0;
dst += len; dst += len;
......
...@@ -1211,11 +1211,11 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, ...@@ -1211,11 +1211,11 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
for (n = offset >> PAGE_SHIFT; remain; n++) { for (n = offset >> PAGE_SHIFT; remain; n++) {
int len = min(remain, PAGE_SIZE - x); int len = min(remain, PAGE_SIZE - x);
src = kmap_atomic(i915_gem_object_get_page(src_obj, n)); src = kmap_local_page(i915_gem_object_get_page(src_obj, n));
if (src_needs_clflush) if (src_needs_clflush)
drm_clflush_virt_range(src + x, len); drm_clflush_virt_range(src + x, len);
memcpy(ptr, src + x, len); memcpy(ptr, src + x, len);
kunmap_atomic(src); kunmap_local(src);
ptr += len; ptr += len;
remain -= len; remain -= len;
......
...@@ -175,7 +175,7 @@ hwm_power1_max_interval_show(struct device *dev, struct device_attribute *attr, ...@@ -175,7 +175,7 @@ hwm_power1_max_interval_show(struct device *dev, struct device_attribute *attr,
* tau4 = (4 | x) << y * tau4 = (4 | x) << y
* but add 2 when doing the final right shift to account for units * but add 2 when doing the final right shift to account for units
*/ */
tau4 = ((1 << x_w) | x) << y; tau4 = (u64)((1 << x_w) | x) << y;
/* val in hwmon interface units (millisec) */ /* val in hwmon interface units (millisec) */
out = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w); out = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
...@@ -211,7 +211,7 @@ hwm_power1_max_interval_store(struct device *dev, ...@@ -211,7 +211,7 @@ hwm_power1_max_interval_store(struct device *dev,
r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT); r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT);
x = REG_FIELD_GET(PKG_MAX_WIN_X, r); x = REG_FIELD_GET(PKG_MAX_WIN_X, r);
y = REG_FIELD_GET(PKG_MAX_WIN_Y, r); y = REG_FIELD_GET(PKG_MAX_WIN_Y, r);
tau4 = ((1 << x_w) | x) << y; tau4 = (u64)((1 << x_w) | x) << y;
max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w); max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
if (val > max_win) if (val > max_win)
......
...@@ -693,7 +693,7 @@ typedef struct drm_i915_irq_wait { ...@@ -693,7 +693,7 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_EXEC_FENCE 44 #define I915_PARAM_HAS_EXEC_FENCE 44
/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture
* user specified bufffers for post-mortem debugging of GPU hangs. See * user-specified buffers for post-mortem debugging of GPU hangs. See
* EXEC_OBJECT_CAPTURE. * EXEC_OBJECT_CAPTURE.
*/ */
#define I915_PARAM_HAS_EXEC_CAPTURE 45 #define I915_PARAM_HAS_EXEC_CAPTURE 45
...@@ -1606,7 +1606,7 @@ struct drm_i915_gem_busy { ...@@ -1606,7 +1606,7 @@ struct drm_i915_gem_busy {
* is accurate. * is accurate.
* *
* The returned dword is split into two fields to indicate both * The returned dword is split into two fields to indicate both
* the engine classess on which the object is being read, and the * the engine classes on which the object is being read, and the
* engine class on which it is currently being written (if any). * engine class on which it is currently being written (if any).
* *
* The low word (bits 0:15) indicate if the object is being written * The low word (bits 0:15) indicate if the object is being written
...@@ -1815,7 +1815,7 @@ struct drm_i915_gem_madvise { ...@@ -1815,7 +1815,7 @@ struct drm_i915_gem_madvise {
__u32 handle; __u32 handle;
/* Advice: either the buffer will be needed again in the near future, /* Advice: either the buffer will be needed again in the near future,
* or wont be and could be discarded under memory pressure. * or won't be and could be discarded under memory pressure.
*/ */
__u32 madv; __u32 madv;
...@@ -3246,7 +3246,7 @@ struct drm_i915_query_topology_info { ...@@ -3246,7 +3246,7 @@ struct drm_i915_query_topology_info {
* // enough to hold our array of engines. The kernel will fill out the * // enough to hold our array of engines. The kernel will fill out the
* // item.length for us, which is the number of bytes we need. * // item.length for us, which is the number of bytes we need.
* // * //
* // Alternatively a large buffer can be allocated straight away enabling * // Alternatively a large buffer can be allocated straightaway enabling
* // querying in one pass, in which case item.length should contain the * // querying in one pass, in which case item.length should contain the
* // length of the provided buffer. * // length of the provided buffer.
* err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query); * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
...@@ -3256,7 +3256,7 @@ struct drm_i915_query_topology_info { ...@@ -3256,7 +3256,7 @@ struct drm_i915_query_topology_info {
* // Now that we allocated the required number of bytes, we call the ioctl * // Now that we allocated the required number of bytes, we call the ioctl
* // again, this time with the data_ptr pointing to our newly allocated * // again, this time with the data_ptr pointing to our newly allocated
* // blob, which the kernel can then populate with info on all engines. * // blob, which the kernel can then populate with info on all engines.
* item.data_ptr = (uintptr_t)&info, * item.data_ptr = (uintptr_t)&info;
* *
* err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query); * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
* if (err) ... * if (err) ...
...@@ -3286,7 +3286,7 @@ struct drm_i915_query_topology_info { ...@@ -3286,7 +3286,7 @@ struct drm_i915_query_topology_info {
/** /**
* struct drm_i915_engine_info * struct drm_i915_engine_info
* *
* Describes one engine and it's capabilities as known to the driver. * Describes one engine and its capabilities as known to the driver.
*/ */
struct drm_i915_engine_info { struct drm_i915_engine_info {
/** @engine: Engine class and instance. */ /** @engine: Engine class and instance. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment