Commit 75b974a8 authored by Chris Wilson's avatar Chris Wilson

drm/i915/selftests: Teach igt_gpu_fill_dw() to take intel_context

Avoid having to pass around (ctx, engine) everywhere by passing the
actual intel_context we intend to use. Today we preach this lesson to
igt_gpu_fill_dw and its callers' callers.

The immediate benefit for the GEM selftests is that we aim to use the
GEM context as the control, the source of the engines on which to test
the GEM context.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190823235141.31799-1-chris@chris-wilson.co.uk
parent 77715906
...@@ -879,9 +879,8 @@ static int igt_mock_ppgtt_64K(void *arg) ...@@ -879,9 +879,8 @@ static int igt_mock_ppgtt_64K(void *arg)
return err; return err;
} }
static int gpu_write(struct i915_vma *vma, static int gpu_write(struct intel_context *ce,
struct i915_gem_context *ctx, struct i915_vma *vma,
struct intel_engine_cs *engine,
u32 dw, u32 dw,
u32 val) u32 val)
{ {
...@@ -893,7 +892,7 @@ static int gpu_write(struct i915_vma *vma, ...@@ -893,7 +892,7 @@ static int gpu_write(struct i915_vma *vma,
if (err) if (err)
return err; return err;
return igt_gpu_fill_dw(vma, ctx, engine, dw * sizeof(u32), return igt_gpu_fill_dw(ce, vma, dw * sizeof(u32),
vma->size >> PAGE_SHIFT, val); vma->size >> PAGE_SHIFT, val);
} }
...@@ -929,18 +928,16 @@ static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val) ...@@ -929,18 +928,16 @@ static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
return err; return err;
} }
static int __igt_write_huge(struct i915_gem_context *ctx, static int __igt_write_huge(struct intel_context *ce,
struct intel_engine_cs *engine,
struct drm_i915_gem_object *obj, struct drm_i915_gem_object *obj,
u64 size, u64 offset, u64 size, u64 offset,
u32 dword, u32 val) u32 dword, u32 val)
{ {
struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm;
unsigned int flags = PIN_USER | PIN_OFFSET_FIXED; unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
struct i915_vma *vma; struct i915_vma *vma;
int err; int err;
vma = i915_vma_instance(obj, vm, NULL); vma = i915_vma_instance(obj, ce->vm, NULL);
if (IS_ERR(vma)) if (IS_ERR(vma))
return PTR_ERR(vma); return PTR_ERR(vma);
...@@ -954,7 +951,7 @@ static int __igt_write_huge(struct i915_gem_context *ctx, ...@@ -954,7 +951,7 @@ static int __igt_write_huge(struct i915_gem_context *ctx,
* The ggtt may have some pages reserved so * The ggtt may have some pages reserved so
* refrain from erroring out. * refrain from erroring out.
*/ */
if (err == -ENOSPC && i915_is_ggtt(vm)) if (err == -ENOSPC && i915_is_ggtt(ce->vm))
err = 0; err = 0;
goto out_vma_close; goto out_vma_close;
...@@ -964,7 +961,7 @@ static int __igt_write_huge(struct i915_gem_context *ctx, ...@@ -964,7 +961,7 @@ static int __igt_write_huge(struct i915_gem_context *ctx,
if (err) if (err)
goto out_vma_unpin; goto out_vma_unpin;
err = gpu_write(vma, ctx, engine, dword, val); err = gpu_write(ce, vma, dword, val);
if (err) { if (err) {
pr_err("gpu-write failed at offset=%llx\n", offset); pr_err("gpu-write failed at offset=%llx\n", offset);
goto out_vma_unpin; goto out_vma_unpin;
...@@ -987,14 +984,13 @@ static int __igt_write_huge(struct i915_gem_context *ctx, ...@@ -987,14 +984,13 @@ static int __igt_write_huge(struct i915_gem_context *ctx,
static int igt_write_huge(struct i915_gem_context *ctx, static int igt_write_huge(struct i915_gem_context *ctx,
struct drm_i915_gem_object *obj) struct drm_i915_gem_object *obj)
{ {
struct drm_i915_private *i915 = to_i915(obj->base.dev); struct i915_gem_engines *engines;
struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm; struct i915_gem_engines_iter it;
static struct intel_engine_cs *engines[I915_NUM_ENGINES]; struct intel_context *ce;
struct intel_engine_cs *engine;
I915_RND_STATE(prng); I915_RND_STATE(prng);
IGT_TIMEOUT(end_time); IGT_TIMEOUT(end_time);
unsigned int max_page_size; unsigned int max_page_size;
unsigned int id; unsigned int count;
u64 max; u64 max;
u64 num; u64 num;
u64 size; u64 size;
...@@ -1008,19 +1004,18 @@ static int igt_write_huge(struct i915_gem_context *ctx, ...@@ -1008,19 +1004,18 @@ static int igt_write_huge(struct i915_gem_context *ctx,
if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K) if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
size = round_up(size, I915_GTT_PAGE_SIZE_2M); size = round_up(size, I915_GTT_PAGE_SIZE_2M);
max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
max = div_u64((vm->total - size), max_page_size);
n = 0; n = 0;
for_each_engine(engine, i915, id) { count = 0;
if (!intel_engine_can_store_dword(engine)) { max = U64_MAX;
pr_info("store-dword-imm not supported on engine=%u\n", for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
id); count++;
if (!intel_engine_can_store_dword(ce->engine))
continue; continue;
}
engines[n++] = engine;
}
max = min(max, ce->vm->total);
n++;
}
i915_gem_context_unlock_engines(ctx);
if (!n) if (!n)
return 0; return 0;
...@@ -1029,23 +1024,30 @@ static int igt_write_huge(struct i915_gem_context *ctx, ...@@ -1029,23 +1024,30 @@ static int igt_write_huge(struct i915_gem_context *ctx,
* randomized order, lets also make feeding to the same engine a few * randomized order, lets also make feeding to the same engine a few
* times in succession a possibility by enlarging the permutation array. * times in succession a possibility by enlarging the permutation array.
*/ */
order = i915_random_order(n * I915_NUM_ENGINES, &prng); order = i915_random_order(count * count, &prng);
if (!order) if (!order)
return -ENOMEM; return -ENOMEM;
max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
max = div_u64(max - size, max_page_size);
/* /*
* Try various offsets in an ascending/descending fashion until we * Try various offsets in an ascending/descending fashion until we
* timeout -- we want to avoid issues hidden by effectively always using * timeout -- we want to avoid issues hidden by effectively always using
* offset = 0. * offset = 0.
*/ */
i = 0; i = 0;
engines = i915_gem_context_lock_engines(ctx);
for_each_prime_number_from(num, 0, max) { for_each_prime_number_from(num, 0, max) {
u64 offset_low = num * max_page_size; u64 offset_low = num * max_page_size;
u64 offset_high = (max - num) * max_page_size; u64 offset_high = (max - num) * max_page_size;
u32 dword = offset_in_page(num) / 4; u32 dword = offset_in_page(num) / 4;
struct intel_context *ce;
engine = engines[order[i] % n]; ce = engines->engines[order[i] % engines->num_engines];
i = (i + 1) % (n * I915_NUM_ENGINES); i = (i + 1) % (count * count);
if (!ce || !intel_engine_can_store_dword(ce->engine))
continue;
/* /*
* In order to utilize 64K pages we need to both pad the vma * In order to utilize 64K pages we need to both pad the vma
...@@ -1057,22 +1059,23 @@ static int igt_write_huge(struct i915_gem_context *ctx, ...@@ -1057,22 +1059,23 @@ static int igt_write_huge(struct i915_gem_context *ctx,
offset_low = round_down(offset_low, offset_low = round_down(offset_low,
I915_GTT_PAGE_SIZE_2M); I915_GTT_PAGE_SIZE_2M);
err = __igt_write_huge(ctx, engine, obj, size, offset_low, err = __igt_write_huge(ce, obj, size, offset_low,
dword, num + 1); dword, num + 1);
if (err) if (err)
break; break;
err = __igt_write_huge(ctx, engine, obj, size, offset_high, err = __igt_write_huge(ce, obj, size, offset_high,
dword, num + 1); dword, num + 1);
if (err) if (err)
break; break;
if (igt_timeout(end_time, if (igt_timeout(end_time,
"%s timed out on engine=%u, offset_low=%llx offset_high=%llx, max_page_size=%x\n", "%s timed out on %s, offset_low=%llx offset_high=%llx, max_page_size=%x\n",
__func__, engine->id, offset_low, offset_high, __func__, ce->engine->name, offset_low, offset_high,
max_page_size)) max_page_size))
break; break;
} }
i915_gem_context_unlock_engines(ctx);
kfree(order); kfree(order);
...@@ -1316,10 +1319,10 @@ static int igt_ppgtt_pin_update(void *arg) ...@@ -1316,10 +1319,10 @@ static int igt_ppgtt_pin_update(void *arg)
unsigned long supported = INTEL_INFO(dev_priv)->page_sizes; unsigned long supported = INTEL_INFO(dev_priv)->page_sizes;
struct i915_address_space *vm = ctx->vm; struct i915_address_space *vm = ctx->vm;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct i915_gem_engines_iter it;
struct intel_context *ce;
struct i915_vma *vma; struct i915_vma *vma;
unsigned int flags = PIN_USER | PIN_OFFSET_FIXED; unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned int n; unsigned int n;
int first, last; int first, last;
int err; int err;
...@@ -1419,14 +1422,18 @@ static int igt_ppgtt_pin_update(void *arg) ...@@ -1419,14 +1422,18 @@ static int igt_ppgtt_pin_update(void *arg)
*/ */
n = 0; n = 0;
for_each_engine(engine, dev_priv, id) { for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
if (!intel_engine_can_store_dword(engine)) if (!intel_engine_can_store_dword(ce->engine))
continue; continue;
err = gpu_write(vma, ctx, engine, n++, 0xdeadbeaf); err = gpu_write(ce, vma, n++, 0xdeadbeaf);
if (err) if (err)
goto out_unpin; break;
} }
i915_gem_context_unlock_engines(ctx);
if (err)
goto out_unpin;
while (n--) { while (n--) {
err = cpu_check(obj, n, 0xdeadbeaf); err = cpu_check(obj, n, 0xdeadbeaf);
if (err) if (err)
...@@ -1507,8 +1514,8 @@ static int igt_shrink_thp(void *arg) ...@@ -1507,8 +1514,8 @@ static int igt_shrink_thp(void *arg)
struct drm_i915_private *i915 = ctx->i915; struct drm_i915_private *i915 = ctx->i915;
struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm; struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct intel_engine_cs *engine; struct i915_gem_engines_iter it;
enum intel_engine_id id; struct intel_context *ce;
struct i915_vma *vma; struct i915_vma *vma;
unsigned int flags = PIN_USER; unsigned int flags = PIN_USER;
unsigned int n; unsigned int n;
...@@ -1548,16 +1555,19 @@ static int igt_shrink_thp(void *arg) ...@@ -1548,16 +1555,19 @@ static int igt_shrink_thp(void *arg)
goto out_unpin; goto out_unpin;
n = 0; n = 0;
for_each_engine(engine, i915, id) {
if (!intel_engine_can_store_dword(engine)) for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
if (!intel_engine_can_store_dword(ce->engine))
continue; continue;
err = gpu_write(vma, ctx, engine, n++, 0xdeadbeaf); err = gpu_write(ce, vma, n++, 0xdeadbeaf);
if (err) if (err)
goto out_unpin; break;
} }
i915_gem_context_unlock_engines(ctx);
i915_vma_unpin(vma); i915_vma_unpin(vma);
if (err)
goto out_close;
/* /*
* Now that the pages are *unpinned* shrink-all should invoke * Now that the pages are *unpinned* shrink-all should invoke
...@@ -1583,10 +1593,9 @@ static int igt_shrink_thp(void *arg) ...@@ -1583,10 +1593,9 @@ static int igt_shrink_thp(void *arg)
while (n--) { while (n--) {
err = cpu_check(obj, n, 0xdeadbeaf); err = cpu_check(obj, n, 0xdeadbeaf);
if (err) if (err)
goto out_unpin; break;
} }
out_unpin: out_unpin:
i915_vma_unpin(vma); i915_vma_unpin(vma);
out_close: out_close:
......
...@@ -166,19 +166,17 @@ static unsigned long fake_page_count(struct drm_i915_gem_object *obj) ...@@ -166,19 +166,17 @@ static unsigned long fake_page_count(struct drm_i915_gem_object *obj)
return huge_gem_object_dma_size(obj) >> PAGE_SHIFT; return huge_gem_object_dma_size(obj) >> PAGE_SHIFT;
} }
static int gpu_fill(struct drm_i915_gem_object *obj, static int gpu_fill(struct intel_context *ce,
struct i915_gem_context *ctx, struct drm_i915_gem_object *obj,
struct intel_engine_cs *engine,
unsigned int dw) unsigned int dw)
{ {
struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm;
struct i915_vma *vma; struct i915_vma *vma;
int err; int err;
GEM_BUG_ON(obj->base.size > vm->total); GEM_BUG_ON(obj->base.size > ce->vm->total);
GEM_BUG_ON(!intel_engine_can_store_dword(engine)); GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
vma = i915_vma_instance(obj, vm, NULL); vma = i915_vma_instance(obj, ce->vm, NULL);
if (IS_ERR(vma)) if (IS_ERR(vma))
return PTR_ERR(vma); return PTR_ERR(vma);
...@@ -200,9 +198,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj, ...@@ -200,9 +198,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
* whilst checking that each context provides a unique view * whilst checking that each context provides a unique view
* into the object. * into the object.
*/ */
err = igt_gpu_fill_dw(vma, err = igt_gpu_fill_dw(ce, vma,
ctx,
engine,
(dw * real_page_count(obj)) << PAGE_SHIFT | (dw * real_page_count(obj)) << PAGE_SHIFT |
(dw * sizeof(u32)), (dw * sizeof(u32)),
real_page_count(obj), real_page_count(obj),
...@@ -305,22 +301,21 @@ static int file_add_object(struct drm_file *file, ...@@ -305,22 +301,21 @@ static int file_add_object(struct drm_file *file,
} }
static struct drm_i915_gem_object * static struct drm_i915_gem_object *
create_test_object(struct i915_gem_context *ctx, create_test_object(struct i915_address_space *vm,
struct drm_file *file, struct drm_file *file,
struct list_head *objects) struct list_head *objects)
{ {
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct i915_address_space *vm = ctx->vm ?: &ctx->i915->ggtt.vm;
u64 size; u64 size;
int err; int err;
/* Keep in GEM's good graces */ /* Keep in GEM's good graces */
i915_retire_requests(ctx->i915); i915_retire_requests(vm->i915);
size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE); size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
size = round_down(size, DW_PER_PAGE * PAGE_SIZE); size = round_down(size, DW_PER_PAGE * PAGE_SIZE);
obj = huge_gem_object(ctx->i915, DW_PER_PAGE * PAGE_SIZE, size); obj = huge_gem_object(vm->i915, DW_PER_PAGE * PAGE_SIZE, size);
if (IS_ERR(obj)) if (IS_ERR(obj))
return obj; return obj;
...@@ -393,6 +388,7 @@ static int igt_ctx_exec(void *arg) ...@@ -393,6 +388,7 @@ static int igt_ctx_exec(void *arg)
dw = 0; dw = 0;
while (!time_after(jiffies, end_time)) { while (!time_after(jiffies, end_time)) {
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
struct intel_context *ce;
ctx = live_context(i915, file); ctx = live_context(i915, file);
if (IS_ERR(ctx)) { if (IS_ERR(ctx)) {
...@@ -400,15 +396,20 @@ static int igt_ctx_exec(void *arg) ...@@ -400,15 +396,20 @@ static int igt_ctx_exec(void *arg)
goto out_unlock; goto out_unlock;
} }
ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
if (!obj) { if (!obj) {
obj = create_test_object(ctx, file, &objects); obj = create_test_object(ce->vm, file, &objects);
if (IS_ERR(obj)) { if (IS_ERR(obj)) {
err = PTR_ERR(obj); err = PTR_ERR(obj);
intel_context_put(ce);
goto out_unlock; goto out_unlock;
} }
} }
err = gpu_fill(obj, ctx, engine, dw); err = gpu_fill(ce, obj, dw);
intel_context_put(ce);
if (err) { if (err) {
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj), ndwords, dw, max_dwords(obj),
...@@ -509,6 +510,7 @@ static int igt_shared_ctx_exec(void *arg) ...@@ -509,6 +510,7 @@ static int igt_shared_ctx_exec(void *arg)
ncontexts = 0; ncontexts = 0;
while (!time_after(jiffies, end_time)) { while (!time_after(jiffies, end_time)) {
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
struct intel_context *ce;
ctx = kernel_context(i915); ctx = kernel_context(i915);
if (IS_ERR(ctx)) { if (IS_ERR(ctx)) {
...@@ -518,22 +520,26 @@ static int igt_shared_ctx_exec(void *arg) ...@@ -518,22 +520,26 @@ static int igt_shared_ctx_exec(void *arg)
__assign_ppgtt(ctx, parent->vm); __assign_ppgtt(ctx, parent->vm);
ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
if (!obj) { if (!obj) {
obj = create_test_object(parent, file, &objects); obj = create_test_object(parent->vm, file, &objects);
if (IS_ERR(obj)) { if (IS_ERR(obj)) {
err = PTR_ERR(obj); err = PTR_ERR(obj);
intel_context_put(ce);
kernel_context_close(ctx); kernel_context_close(ctx);
goto out_test; goto out_test;
} }
} }
err = gpu_fill(obj, ctx, engine, dw); err = gpu_fill(ce, obj, dw);
intel_context_put(ce);
kernel_context_close(ctx);
if (err) { if (err) {
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj), ndwords, dw, max_dwords(obj),
engine->name, ctx->hw_id, engine->name, ctx->hw_id,
yesno(!!ctx->vm), err); yesno(!!ctx->vm), err);
kernel_context_close(ctx);
goto out_test; goto out_test;
} }
...@@ -544,8 +550,6 @@ static int igt_shared_ctx_exec(void *arg) ...@@ -544,8 +550,6 @@ static int igt_shared_ctx_exec(void *arg)
ndwords++; ndwords++;
ncontexts++; ncontexts++;
kernel_context_close(ctx);
} }
pr_info("Submitted %lu contexts to %s, filling %lu dwords\n", pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
ncontexts, engine->name, ndwords); ncontexts, engine->name, ndwords);
...@@ -604,6 +608,8 @@ static struct i915_vma *rpcs_query_batch(struct i915_vma *vma) ...@@ -604,6 +608,8 @@ static struct i915_vma *rpcs_query_batch(struct i915_vma *vma)
__i915_gem_object_flush_map(obj, 0, 64); __i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj); i915_gem_object_unpin_map(obj);
intel_gt_chipset_flush(vma->vm->gt);
vma = i915_vma_instance(obj, vma->vm, NULL); vma = i915_vma_instance(obj, vma->vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
...@@ -1082,17 +1088,19 @@ static int igt_ctx_readonly(void *arg) ...@@ -1082,17 +1088,19 @@ static int igt_ctx_readonly(void *arg)
ndwords = 0; ndwords = 0;
dw = 0; dw = 0;
while (!time_after(jiffies, end_time)) { while (!time_after(jiffies, end_time)) {
struct intel_engine_cs *engine; struct i915_gem_engines_iter it;
unsigned int id; struct intel_context *ce;
for_each_engine(engine, i915, id) { for_each_gem_engine(ce,
if (!intel_engine_can_store_dword(engine)) i915_gem_context_lock_engines(ctx), it) {
if (!intel_engine_can_store_dword(ce->engine))
continue; continue;
if (!obj) { if (!obj) {
obj = create_test_object(ctx, file, &objects); obj = create_test_object(ce->vm, file, &objects);
if (IS_ERR(obj)) { if (IS_ERR(obj)) {
err = PTR_ERR(obj); err = PTR_ERR(obj);
i915_gem_context_unlock_engines(ctx);
goto out_unlock; goto out_unlock;
} }
...@@ -1100,12 +1108,13 @@ static int igt_ctx_readonly(void *arg) ...@@ -1100,12 +1108,13 @@ static int igt_ctx_readonly(void *arg)
i915_gem_object_set_readonly(obj); i915_gem_object_set_readonly(obj);
} }
err = gpu_fill(obj, ctx, engine, dw); err = gpu_fill(ce, obj, dw);
if (err) { if (err) {
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj), ndwords, dw, max_dwords(obj),
engine->name, ctx->hw_id, ce->engine->name, ctx->hw_id,
yesno(!!ctx->vm), err); yesno(!!ctx->vm), err);
i915_gem_context_unlock_engines(ctx);
goto out_unlock; goto out_unlock;
} }
...@@ -1115,6 +1124,7 @@ static int igt_ctx_readonly(void *arg) ...@@ -1115,6 +1124,7 @@ static int igt_ctx_readonly(void *arg)
} }
ndwords++; ndwords++;
} }
i915_gem_context_unlock_engines(ctx);
} }
pr_info("Submitted %lu dwords (across %u engines)\n", pr_info("Submitted %lu dwords (across %u engines)\n",
ndwords, RUNTIME_INFO(i915)->num_engines); ndwords, RUNTIME_INFO(i915)->num_engines);
...@@ -1197,6 +1207,8 @@ static int write_to_scratch(struct i915_gem_context *ctx, ...@@ -1197,6 +1207,8 @@ static int write_to_scratch(struct i915_gem_context *ctx,
__i915_gem_object_flush_map(obj, 0, 64); __i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj); i915_gem_object_unpin_map(obj);
intel_gt_chipset_flush(engine->gt);
vma = i915_vma_instance(obj, ctx->vm, NULL); vma = i915_vma_instance(obj, ctx->vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
...@@ -1296,6 +1308,8 @@ static int read_from_scratch(struct i915_gem_context *ctx, ...@@ -1296,6 +1308,8 @@ static int read_from_scratch(struct i915_gem_context *ctx,
i915_gem_object_flush_map(obj); i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj); i915_gem_object_unpin_map(obj);
intel_gt_chipset_flush(engine->gt);
vma = i915_vma_instance(obj, ctx->vm, NULL); vma = i915_vma_instance(obj, ctx->vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include "gem/i915_gem_context.h" #include "gem/i915_gem_context.h"
#include "gem/i915_gem_pm.h" #include "gem/i915_gem_pm.h"
#include "gt/intel_context.h" #include "gt/intel_context.h"
#include "gt/intel_gt.h"
#include "i915_vma.h" #include "i915_vma.h"
#include "i915_drv.h" #include "i915_drv.h"
...@@ -84,6 +85,8 @@ igt_emit_store_dw(struct i915_vma *vma, ...@@ -84,6 +85,8 @@ igt_emit_store_dw(struct i915_vma *vma,
*cmd = MI_BATCH_BUFFER_END; *cmd = MI_BATCH_BUFFER_END;
i915_gem_object_unpin_map(obj); i915_gem_object_unpin_map(obj);
intel_gt_chipset_flush(vma->vm->gt);
vma = i915_vma_instance(obj, vma->vm, NULL); vma = i915_vma_instance(obj, vma->vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
...@@ -101,40 +104,35 @@ igt_emit_store_dw(struct i915_vma *vma, ...@@ -101,40 +104,35 @@ igt_emit_store_dw(struct i915_vma *vma,
return ERR_PTR(err); return ERR_PTR(err);
} }
int igt_gpu_fill_dw(struct i915_vma *vma, int igt_gpu_fill_dw(struct intel_context *ce,
struct i915_gem_context *ctx, struct i915_vma *vma, u64 offset,
struct intel_engine_cs *engine, unsigned long count, u32 val)
u64 offset,
unsigned long count,
u32 val)
{ {
struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm;
struct i915_request *rq; struct i915_request *rq;
struct i915_vma *batch; struct i915_vma *batch;
unsigned int flags; unsigned int flags;
int err; int err;
GEM_BUG_ON(vma->size > vm->total); GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
GEM_BUG_ON(!intel_engine_can_store_dword(engine));
GEM_BUG_ON(!i915_vma_is_pinned(vma)); GEM_BUG_ON(!i915_vma_is_pinned(vma));
batch = igt_emit_store_dw(vma, offset, count, val); batch = igt_emit_store_dw(vma, offset, count, val);
if (IS_ERR(batch)) if (IS_ERR(batch))
return PTR_ERR(batch); return PTR_ERR(batch);
rq = igt_request_alloc(ctx, engine); rq = intel_context_create_request(ce);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
err = PTR_ERR(rq); err = PTR_ERR(rq);
goto err_batch; goto err_batch;
} }
flags = 0; flags = 0;
if (INTEL_GEN(vm->i915) <= 5) if (INTEL_GEN(ce->vm->i915) <= 5)
flags |= I915_DISPATCH_SECURE; flags |= I915_DISPATCH_SECURE;
err = engine->emit_bb_start(rq, err = rq->engine->emit_bb_start(rq,
batch->node.start, batch->node.size, batch->node.start, batch->node.size,
flags); flags);
if (err) if (err)
goto err_request; goto err_request;
......
...@@ -11,9 +11,11 @@ ...@@ -11,9 +11,11 @@
struct i915_request; struct i915_request;
struct i915_gem_context; struct i915_gem_context;
struct intel_engine_cs;
struct i915_vma; struct i915_vma;
struct intel_context;
struct intel_engine_cs;
struct i915_request * struct i915_request *
igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine); igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine);
...@@ -23,11 +25,8 @@ igt_emit_store_dw(struct i915_vma *vma, ...@@ -23,11 +25,8 @@ igt_emit_store_dw(struct i915_vma *vma,
unsigned long count, unsigned long count,
u32 val); u32 val);
int igt_gpu_fill_dw(struct i915_vma *vma, int igt_gpu_fill_dw(struct intel_context *ce,
struct i915_gem_context *ctx, struct i915_vma *vma, u64 offset,
struct intel_engine_cs *engine, unsigned long count, u32 val);
u64 offset,
unsigned long count,
u32 val);
#endif /* __IGT_GEM_UTILS_H__ */ #endif /* __IGT_GEM_UTILS_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment