Commit f277bc0c authored by Chris Wilson's avatar Chris Wilson

drm/i915/selftests: Pass intel_context to igt_spinner

Teach igt_spinner to only use our internal structs, decoupling the
interface from the GEM contexts. This makes it easier to avoid
requiring ce->gem_context back references for kernel_context that may
have them in future.

v2: Lift engine lock to verify_wa() caller.
v3: Less than v2, but more so
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190731081126.9139-1-chris@chris-wilson.co.uk
parent cb0c43f3
...@@ -821,8 +821,7 @@ emit_rpcs_query(struct drm_i915_gem_object *obj, ...@@ -821,8 +821,7 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
#define TEST_RESET BIT(2) #define TEST_RESET BIT(2)
static int static int
__sseu_prepare(struct drm_i915_private *i915, __sseu_prepare(const char *name,
const char *name,
unsigned int flags, unsigned int flags,
struct intel_context *ce, struct intel_context *ce,
struct igt_spinner **spin) struct igt_spinner **spin)
...@@ -838,14 +837,11 @@ __sseu_prepare(struct drm_i915_private *i915, ...@@ -838,14 +837,11 @@ __sseu_prepare(struct drm_i915_private *i915,
if (!*spin) if (!*spin)
return -ENOMEM; return -ENOMEM;
ret = igt_spinner_init(*spin, i915); ret = igt_spinner_init(*spin, ce->engine->gt);
if (ret) if (ret)
goto err_free; goto err_free;
rq = igt_spinner_create_request(*spin, rq = igt_spinner_create_request(*spin, ce, MI_NOOP);
ce->gem_context,
ce->engine,
MI_NOOP);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
ret = PTR_ERR(rq); ret = PTR_ERR(rq);
goto err_fini; goto err_fini;
...@@ -871,8 +867,7 @@ __sseu_prepare(struct drm_i915_private *i915, ...@@ -871,8 +867,7 @@ __sseu_prepare(struct drm_i915_private *i915,
} }
static int static int
__read_slice_count(struct drm_i915_private *i915, __read_slice_count(struct intel_context *ce,
struct intel_context *ce,
struct drm_i915_gem_object *obj, struct drm_i915_gem_object *obj,
struct igt_spinner *spin, struct igt_spinner *spin,
u32 *rpcs) u32 *rpcs)
...@@ -901,7 +896,7 @@ __read_slice_count(struct drm_i915_private *i915, ...@@ -901,7 +896,7 @@ __read_slice_count(struct drm_i915_private *i915,
return ret; return ret;
} }
if (INTEL_GEN(i915) >= 11) { if (INTEL_GEN(ce->engine->i915) >= 11) {
s_mask = GEN11_RPCS_S_CNT_MASK; s_mask = GEN11_RPCS_S_CNT_MASK;
s_shift = GEN11_RPCS_S_CNT_SHIFT; s_shift = GEN11_RPCS_S_CNT_SHIFT;
} else { } else {
...@@ -944,8 +939,7 @@ __check_rpcs(const char *name, u32 rpcs, int slices, unsigned int expected, ...@@ -944,8 +939,7 @@ __check_rpcs(const char *name, u32 rpcs, int slices, unsigned int expected,
} }
static int static int
__sseu_finish(struct drm_i915_private *i915, __sseu_finish(const char *name,
const char *name,
unsigned int flags, unsigned int flags,
struct intel_context *ce, struct intel_context *ce,
struct drm_i915_gem_object *obj, struct drm_i915_gem_object *obj,
...@@ -962,14 +956,13 @@ __sseu_finish(struct drm_i915_private *i915, ...@@ -962,14 +956,13 @@ __sseu_finish(struct drm_i915_private *i915,
goto out; goto out;
} }
ret = __read_slice_count(i915, ce, obj, ret = __read_slice_count(ce, obj,
flags & TEST_RESET ? NULL : spin, &rpcs); flags & TEST_RESET ? NULL : spin, &rpcs);
ret = __check_rpcs(name, rpcs, ret, expected, "Context", "!"); ret = __check_rpcs(name, rpcs, ret, expected, "Context", "!");
if (ret) if (ret)
goto out; goto out;
ret = __read_slice_count(i915, ce->engine->kernel_context, obj, ret = __read_slice_count(ce->engine->kernel_context, obj, NULL, &rpcs);
NULL, &rpcs);
ret = __check_rpcs(name, rpcs, ret, slices, "Kernel context", "!"); ret = __check_rpcs(name, rpcs, ret, slices, "Kernel context", "!");
out: out:
...@@ -977,11 +970,12 @@ __sseu_finish(struct drm_i915_private *i915, ...@@ -977,11 +970,12 @@ __sseu_finish(struct drm_i915_private *i915,
igt_spinner_end(spin); igt_spinner_end(spin);
if ((flags & TEST_IDLE) && ret == 0) { if ((flags & TEST_IDLE) && ret == 0) {
ret = i915_gem_wait_for_idle(i915, 0, MAX_SCHEDULE_TIMEOUT); ret = i915_gem_wait_for_idle(ce->engine->i915,
0, MAX_SCHEDULE_TIMEOUT);
if (ret) if (ret)
return ret; return ret;
ret = __read_slice_count(i915, ce, obj, NULL, &rpcs); ret = __read_slice_count(ce, obj, NULL, &rpcs);
ret = __check_rpcs(name, rpcs, ret, expected, ret = __check_rpcs(name, rpcs, ret, expected,
"Context", " after idle!"); "Context", " after idle!");
} }
...@@ -990,8 +984,7 @@ __sseu_finish(struct drm_i915_private *i915, ...@@ -990,8 +984,7 @@ __sseu_finish(struct drm_i915_private *i915,
} }
static int static int
__sseu_test(struct drm_i915_private *i915, __sseu_test(const char *name,
const char *name,
unsigned int flags, unsigned int flags,
struct intel_context *ce, struct intel_context *ce,
struct drm_i915_gem_object *obj, struct drm_i915_gem_object *obj,
...@@ -1000,7 +993,7 @@ __sseu_test(struct drm_i915_private *i915, ...@@ -1000,7 +993,7 @@ __sseu_test(struct drm_i915_private *i915,
struct igt_spinner *spin = NULL; struct igt_spinner *spin = NULL;
int ret; int ret;
ret = __sseu_prepare(i915, name, flags, ce, &spin); ret = __sseu_prepare(name, flags, ce, &spin);
if (ret) if (ret)
return ret; return ret;
...@@ -1008,7 +1001,7 @@ __sseu_test(struct drm_i915_private *i915, ...@@ -1008,7 +1001,7 @@ __sseu_test(struct drm_i915_private *i915,
if (ret) if (ret)
goto out_spin; goto out_spin;
ret = __sseu_finish(i915, name, flags, ce, obj, ret = __sseu_finish(name, flags, ce, obj,
hweight32(sseu.slice_mask), spin); hweight32(sseu.slice_mask), spin);
out_spin: out_spin:
...@@ -1088,22 +1081,22 @@ __igt_ctx_sseu(struct drm_i915_private *i915, ...@@ -1088,22 +1081,22 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
goto out_context; goto out_context;
/* First set the default mask. */ /* First set the default mask. */
ret = __sseu_test(i915, name, flags, ce, obj, engine->sseu); ret = __sseu_test(name, flags, ce, obj, engine->sseu);
if (ret) if (ret)
goto out_fail; goto out_fail;
/* Then set a power-gated configuration. */ /* Then set a power-gated configuration. */
ret = __sseu_test(i915, name, flags, ce, obj, pg_sseu); ret = __sseu_test(name, flags, ce, obj, pg_sseu);
if (ret) if (ret)
goto out_fail; goto out_fail;
/* Back to defaults. */ /* Back to defaults. */
ret = __sseu_test(i915, name, flags, ce, obj, engine->sseu); ret = __sseu_test(name, flags, ce, obj, engine->sseu);
if (ret) if (ret)
goto out_fail; goto out_fail;
/* One last power-gated configuration for the road. */ /* One last power-gated configuration for the road. */
ret = __sseu_test(i915, name, flags, ce, obj, pg_sseu); ret = __sseu_test(name, flags, ce, obj, pg_sseu);
if (ret) if (ret)
goto out_fail; goto out_fail;
......
...@@ -22,9 +22,9 @@ ...@@ -22,9 +22,9 @@
static int live_sanitycheck(void *arg) static int live_sanitycheck(void *arg)
{ {
struct drm_i915_private *i915 = arg; struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine; struct i915_gem_engines_iter it;
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
enum intel_engine_id id; struct intel_context *ce;
struct igt_spinner spin; struct igt_spinner spin;
intel_wakeref_t wakeref; intel_wakeref_t wakeref;
int err = -ENOMEM; int err = -ENOMEM;
...@@ -35,17 +35,17 @@ static int live_sanitycheck(void *arg) ...@@ -35,17 +35,17 @@ static int live_sanitycheck(void *arg)
mutex_lock(&i915->drm.struct_mutex); mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm); wakeref = intel_runtime_pm_get(&i915->runtime_pm);
if (igt_spinner_init(&spin, i915)) if (igt_spinner_init(&spin, &i915->gt))
goto err_unlock; goto err_unlock;
ctx = kernel_context(i915); ctx = kernel_context(i915);
if (!ctx) if (!ctx)
goto err_spin; goto err_spin;
for_each_engine(engine, i915, id) { for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
struct i915_request *rq; struct i915_request *rq;
rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP); rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
err = PTR_ERR(rq); err = PTR_ERR(rq);
goto err_ctx; goto err_ctx;
...@@ -69,6 +69,7 @@ static int live_sanitycheck(void *arg) ...@@ -69,6 +69,7 @@ static int live_sanitycheck(void *arg)
err = 0; err = 0;
err_ctx: err_ctx:
i915_gem_context_unlock_engines(ctx);
kernel_context_close(ctx); kernel_context_close(ctx);
err_spin: err_spin:
igt_spinner_fini(&spin); igt_spinner_fini(&spin);
...@@ -480,6 +481,24 @@ static int live_busywait_preempt(void *arg) ...@@ -480,6 +481,24 @@ static int live_busywait_preempt(void *arg)
return err; return err;
} }
static struct i915_request *
spinner_create_request(struct igt_spinner *spin,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
u32 arb)
{
struct intel_context *ce;
struct i915_request *rq;
ce = i915_gem_context_get_engine(ctx, engine->id);
if (IS_ERR(ce))
return ERR_CAST(ce);
rq = igt_spinner_create_request(spin, ce, arb);
intel_context_put(ce);
return rq;
}
static int live_preempt(void *arg) static int live_preempt(void *arg)
{ {
struct drm_i915_private *i915 = arg; struct drm_i915_private *i915 = arg;
...@@ -499,10 +518,10 @@ static int live_preempt(void *arg) ...@@ -499,10 +518,10 @@ static int live_preempt(void *arg)
mutex_lock(&i915->drm.struct_mutex); mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm); wakeref = intel_runtime_pm_get(&i915->runtime_pm);
if (igt_spinner_init(&spin_hi, i915)) if (igt_spinner_init(&spin_hi, &i915->gt))
goto err_unlock; goto err_unlock;
if (igt_spinner_init(&spin_lo, i915)) if (igt_spinner_init(&spin_lo, &i915->gt))
goto err_spin_hi; goto err_spin_hi;
ctx_hi = kernel_context(i915); ctx_hi = kernel_context(i915);
...@@ -529,8 +548,8 @@ static int live_preempt(void *arg) ...@@ -529,8 +548,8 @@ static int live_preempt(void *arg)
goto err_ctx_lo; goto err_ctx_lo;
} }
rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine, rq = spinner_create_request(&spin_lo, ctx_lo, engine,
MI_ARB_CHECK); MI_ARB_CHECK);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
err = PTR_ERR(rq); err = PTR_ERR(rq);
goto err_ctx_lo; goto err_ctx_lo;
...@@ -545,8 +564,8 @@ static int live_preempt(void *arg) ...@@ -545,8 +564,8 @@ static int live_preempt(void *arg)
goto err_ctx_lo; goto err_ctx_lo;
} }
rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine, rq = spinner_create_request(&spin_hi, ctx_hi, engine,
MI_ARB_CHECK); MI_ARB_CHECK);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
igt_spinner_end(&spin_lo); igt_spinner_end(&spin_lo);
err = PTR_ERR(rq); err = PTR_ERR(rq);
...@@ -603,10 +622,10 @@ static int live_late_preempt(void *arg) ...@@ -603,10 +622,10 @@ static int live_late_preempt(void *arg)
mutex_lock(&i915->drm.struct_mutex); mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm); wakeref = intel_runtime_pm_get(&i915->runtime_pm);
if (igt_spinner_init(&spin_hi, i915)) if (igt_spinner_init(&spin_hi, &i915->gt))
goto err_unlock; goto err_unlock;
if (igt_spinner_init(&spin_lo, i915)) if (igt_spinner_init(&spin_lo, &i915->gt))
goto err_spin_hi; goto err_spin_hi;
ctx_hi = kernel_context(i915); ctx_hi = kernel_context(i915);
...@@ -632,8 +651,8 @@ static int live_late_preempt(void *arg) ...@@ -632,8 +651,8 @@ static int live_late_preempt(void *arg)
goto err_ctx_lo; goto err_ctx_lo;
} }
rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine, rq = spinner_create_request(&spin_lo, ctx_lo, engine,
MI_ARB_CHECK); MI_ARB_CHECK);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
err = PTR_ERR(rq); err = PTR_ERR(rq);
goto err_ctx_lo; goto err_ctx_lo;
...@@ -645,8 +664,8 @@ static int live_late_preempt(void *arg) ...@@ -645,8 +664,8 @@ static int live_late_preempt(void *arg)
goto err_wedged; goto err_wedged;
} }
rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine, rq = spinner_create_request(&spin_hi, ctx_hi, engine,
MI_NOOP); MI_NOOP);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
igt_spinner_end(&spin_lo); igt_spinner_end(&spin_lo);
err = PTR_ERR(rq); err = PTR_ERR(rq);
...@@ -711,7 +730,7 @@ static int preempt_client_init(struct drm_i915_private *i915, ...@@ -711,7 +730,7 @@ static int preempt_client_init(struct drm_i915_private *i915,
if (!c->ctx) if (!c->ctx)
return -ENOMEM; return -ENOMEM;
if (igt_spinner_init(&c->spin, i915)) if (igt_spinner_init(&c->spin, &i915->gt))
goto err_ctx; goto err_ctx;
return 0; return 0;
...@@ -761,9 +780,9 @@ static int live_nopreempt(void *arg) ...@@ -761,9 +780,9 @@ static int live_nopreempt(void *arg)
engine->execlists.preempt_hang.count = 0; engine->execlists.preempt_hang.count = 0;
rq_a = igt_spinner_create_request(&a.spin, rq_a = spinner_create_request(&a.spin,
a.ctx, engine, a.ctx, engine,
MI_ARB_CHECK); MI_ARB_CHECK);
if (IS_ERR(rq_a)) { if (IS_ERR(rq_a)) {
err = PTR_ERR(rq_a); err = PTR_ERR(rq_a);
goto err_client_b; goto err_client_b;
...@@ -778,9 +797,9 @@ static int live_nopreempt(void *arg) ...@@ -778,9 +797,9 @@ static int live_nopreempt(void *arg)
goto err_wedged; goto err_wedged;
} }
rq_b = igt_spinner_create_request(&b.spin, rq_b = spinner_create_request(&b.spin,
b.ctx, engine, b.ctx, engine,
MI_ARB_CHECK); MI_ARB_CHECK);
if (IS_ERR(rq_b)) { if (IS_ERR(rq_b)) {
err = PTR_ERR(rq_b); err = PTR_ERR(rq_b);
goto err_client_b; goto err_client_b;
...@@ -880,9 +899,9 @@ static int live_suppress_self_preempt(void *arg) ...@@ -880,9 +899,9 @@ static int live_suppress_self_preempt(void *arg)
engine->execlists.preempt_hang.count = 0; engine->execlists.preempt_hang.count = 0;
rq_a = igt_spinner_create_request(&a.spin, rq_a = spinner_create_request(&a.spin,
a.ctx, engine, a.ctx, engine,
MI_NOOP); MI_NOOP);
if (IS_ERR(rq_a)) { if (IS_ERR(rq_a)) {
err = PTR_ERR(rq_a); err = PTR_ERR(rq_a);
goto err_client_b; goto err_client_b;
...@@ -895,9 +914,9 @@ static int live_suppress_self_preempt(void *arg) ...@@ -895,9 +914,9 @@ static int live_suppress_self_preempt(void *arg)
} }
for (depth = 0; depth < 8; depth++) { for (depth = 0; depth < 8; depth++) {
rq_b = igt_spinner_create_request(&b.spin, rq_b = spinner_create_request(&b.spin,
b.ctx, engine, b.ctx, engine,
MI_NOOP); MI_NOOP);
if (IS_ERR(rq_b)) { if (IS_ERR(rq_b)) {
err = PTR_ERR(rq_b); err = PTR_ERR(rq_b);
goto err_client_b; goto err_client_b;
...@@ -1048,9 +1067,9 @@ static int live_suppress_wait_preempt(void *arg) ...@@ -1048,9 +1067,9 @@ static int live_suppress_wait_preempt(void *arg)
goto err_client_3; goto err_client_3;
for (i = 0; i < ARRAY_SIZE(client); i++) { for (i = 0; i < ARRAY_SIZE(client); i++) {
rq[i] = igt_spinner_create_request(&client[i].spin, rq[i] = spinner_create_request(&client[i].spin,
client[i].ctx, engine, client[i].ctx, engine,
MI_NOOP); MI_NOOP);
if (IS_ERR(rq[i])) { if (IS_ERR(rq[i])) {
err = PTR_ERR(rq[i]); err = PTR_ERR(rq[i]);
goto err_wedged; goto err_wedged;
...@@ -1157,9 +1176,9 @@ static int live_chain_preempt(void *arg) ...@@ -1157,9 +1176,9 @@ static int live_chain_preempt(void *arg)
if (!intel_engine_has_preemption(engine)) if (!intel_engine_has_preemption(engine))
continue; continue;
rq = igt_spinner_create_request(&lo.spin, rq = spinner_create_request(&lo.spin,
lo.ctx, engine, lo.ctx, engine,
MI_ARB_CHECK); MI_ARB_CHECK);
if (IS_ERR(rq)) if (IS_ERR(rq))
goto err_wedged; goto err_wedged;
i915_request_add(rq); i915_request_add(rq);
...@@ -1183,18 +1202,18 @@ static int live_chain_preempt(void *arg) ...@@ -1183,18 +1202,18 @@ static int live_chain_preempt(void *arg)
} }
for_each_prime_number_from(count, 1, ring_size) { for_each_prime_number_from(count, 1, ring_size) {
rq = igt_spinner_create_request(&hi.spin, rq = spinner_create_request(&hi.spin,
hi.ctx, engine, hi.ctx, engine,
MI_ARB_CHECK); MI_ARB_CHECK);
if (IS_ERR(rq)) if (IS_ERR(rq))
goto err_wedged; goto err_wedged;
i915_request_add(rq); i915_request_add(rq);
if (!igt_wait_for_spinner(&hi.spin, rq)) if (!igt_wait_for_spinner(&hi.spin, rq))
goto err_wedged; goto err_wedged;
rq = igt_spinner_create_request(&lo.spin, rq = spinner_create_request(&lo.spin,
lo.ctx, engine, lo.ctx, engine,
MI_ARB_CHECK); MI_ARB_CHECK);
if (IS_ERR(rq)) if (IS_ERR(rq))
goto err_wedged; goto err_wedged;
i915_request_add(rq); i915_request_add(rq);
...@@ -1284,10 +1303,10 @@ static int live_preempt_hang(void *arg) ...@@ -1284,10 +1303,10 @@ static int live_preempt_hang(void *arg)
mutex_lock(&i915->drm.struct_mutex); mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm); wakeref = intel_runtime_pm_get(&i915->runtime_pm);
if (igt_spinner_init(&spin_hi, i915)) if (igt_spinner_init(&spin_hi, &i915->gt))
goto err_unlock; goto err_unlock;
if (igt_spinner_init(&spin_lo, i915)) if (igt_spinner_init(&spin_lo, &i915->gt))
goto err_spin_hi; goto err_spin_hi;
ctx_hi = kernel_context(i915); ctx_hi = kernel_context(i915);
...@@ -1308,8 +1327,8 @@ static int live_preempt_hang(void *arg) ...@@ -1308,8 +1327,8 @@ static int live_preempt_hang(void *arg)
if (!intel_engine_has_preemption(engine)) if (!intel_engine_has_preemption(engine))
continue; continue;
rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine, rq = spinner_create_request(&spin_lo, ctx_lo, engine,
MI_ARB_CHECK); MI_ARB_CHECK);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
err = PTR_ERR(rq); err = PTR_ERR(rq);
goto err_ctx_lo; goto err_ctx_lo;
...@@ -1324,8 +1343,8 @@ static int live_preempt_hang(void *arg) ...@@ -1324,8 +1343,8 @@ static int live_preempt_hang(void *arg)
goto err_ctx_lo; goto err_ctx_lo;
} }
rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine, rq = spinner_create_request(&spin_hi, ctx_hi, engine,
MI_ARB_CHECK); MI_ARB_CHECK);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
igt_spinner_end(&spin_lo); igt_spinner_end(&spin_lo);
err = PTR_ERR(rq); err = PTR_ERR(rq);
......
...@@ -238,6 +238,7 @@ switch_to_scratch_context(struct intel_engine_cs *engine, ...@@ -238,6 +238,7 @@ switch_to_scratch_context(struct intel_engine_cs *engine,
struct igt_spinner *spin) struct igt_spinner *spin)
{ {
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
struct intel_context *ce;
struct i915_request *rq; struct i915_request *rq;
intel_wakeref_t wakeref; intel_wakeref_t wakeref;
int err = 0; int err = 0;
...@@ -248,10 +249,14 @@ switch_to_scratch_context(struct intel_engine_cs *engine, ...@@ -248,10 +249,14 @@ switch_to_scratch_context(struct intel_engine_cs *engine,
GEM_BUG_ON(i915_gem_context_is_bannable(ctx)); GEM_BUG_ON(i915_gem_context_is_bannable(ctx));
ce = i915_gem_context_get_engine(ctx, engine->id);
GEM_BUG_ON(IS_ERR(ce));
rq = ERR_PTR(-ENODEV); rq = ERR_PTR(-ENODEV);
with_intel_runtime_pm(&engine->i915->runtime_pm, wakeref) with_intel_runtime_pm(&engine->i915->runtime_pm, wakeref)
rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP); rq = igt_spinner_create_request(spin, ce, MI_NOOP);
intel_context_put(ce);
kernel_context_close(ctx); kernel_context_close(ctx);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
...@@ -291,7 +296,7 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine, ...@@ -291,7 +296,7 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
if (IS_ERR(ctx)) if (IS_ERR(ctx))
return PTR_ERR(ctx); return PTR_ERR(ctx);
err = igt_spinner_init(&spin, i915); err = igt_spinner_init(&spin, engine->gt);
if (err) if (err)
goto out_ctx; goto out_ctx;
...@@ -1083,7 +1088,7 @@ verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists, ...@@ -1083,7 +1088,7 @@ verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str); ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str);
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { for_each_gem_engine(ce, i915_gem_context_engines(ctx), it) {
enum intel_engine_id id = ce->engine->id; enum intel_engine_id id = ce->engine->id;
ok &= engine_wa_list_verify(ce, ok &= engine_wa_list_verify(ce,
...@@ -1094,7 +1099,6 @@ verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists, ...@@ -1094,7 +1099,6 @@ verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
&lists->engine[id].ctx_wa_list, &lists->engine[id].ctx_wa_list,
str) == 0; str) == 0;
} }
i915_gem_context_unlock_engines(ctx);
return ok; return ok;
} }
...@@ -1115,6 +1119,8 @@ live_gpu_reset_workarounds(void *arg) ...@@ -1115,6 +1119,8 @@ live_gpu_reset_workarounds(void *arg)
if (IS_ERR(ctx)) if (IS_ERR(ctx))
return PTR_ERR(ctx); return PTR_ERR(ctx);
i915_gem_context_lock_engines(ctx);
pr_info("Verifying after GPU reset...\n"); pr_info("Verifying after GPU reset...\n");
igt_global_reset_lock(&i915->gt); igt_global_reset_lock(&i915->gt);
...@@ -1131,6 +1137,7 @@ live_gpu_reset_workarounds(void *arg) ...@@ -1131,6 +1137,7 @@ live_gpu_reset_workarounds(void *arg)
ok = verify_wa_lists(ctx, &lists, "after reset"); ok = verify_wa_lists(ctx, &lists, "after reset");
out: out:
i915_gem_context_unlock_engines(ctx);
kernel_context_close(ctx); kernel_context_close(ctx);
reference_lists_fini(i915, &lists); reference_lists_fini(i915, &lists);
intel_runtime_pm_put(&i915->runtime_pm, wakeref); intel_runtime_pm_put(&i915->runtime_pm, wakeref);
...@@ -1143,10 +1150,10 @@ static int ...@@ -1143,10 +1150,10 @@ static int
live_engine_reset_workarounds(void *arg) live_engine_reset_workarounds(void *arg)
{ {
struct drm_i915_private *i915 = arg; struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine; struct i915_gem_engines_iter it;
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
struct intel_context *ce;
struct igt_spinner spin; struct igt_spinner spin;
enum intel_engine_id id;
struct i915_request *rq; struct i915_request *rq;
intel_wakeref_t wakeref; intel_wakeref_t wakeref;
struct wa_lists lists; struct wa_lists lists;
...@@ -1164,7 +1171,8 @@ live_engine_reset_workarounds(void *arg) ...@@ -1164,7 +1171,8 @@ live_engine_reset_workarounds(void *arg)
reference_lists_init(i915, &lists); reference_lists_init(i915, &lists);
for_each_engine(engine, i915, id) { for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
struct intel_engine_cs *engine = ce->engine;
bool ok; bool ok;
pr_info("Verifying after %s reset...\n", engine->name); pr_info("Verifying after %s reset...\n", engine->name);
...@@ -1183,11 +1191,11 @@ live_engine_reset_workarounds(void *arg) ...@@ -1183,11 +1191,11 @@ live_engine_reset_workarounds(void *arg)
goto err; goto err;
} }
ret = igt_spinner_init(&spin, i915); ret = igt_spinner_init(&spin, engine->gt);
if (ret) if (ret)
goto err; goto err;
rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP); rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
ret = PTR_ERR(rq); ret = PTR_ERR(rq);
igt_spinner_fini(&spin); igt_spinner_fini(&spin);
...@@ -1214,8 +1222,8 @@ live_engine_reset_workarounds(void *arg) ...@@ -1214,8 +1222,8 @@ live_engine_reset_workarounds(void *arg)
goto err; goto err;
} }
} }
err: err:
i915_gem_context_unlock_engines(ctx);
reference_lists_fini(i915, &lists); reference_lists_fini(i915, &lists);
intel_runtime_pm_put(&i915->runtime_pm, wakeref); intel_runtime_pm_put(&i915->runtime_pm, wakeref);
igt_global_reset_unlock(&i915->gt); igt_global_reset_unlock(&i915->gt);
......
...@@ -9,25 +9,24 @@ ...@@ -9,25 +9,24 @@
#include "igt_spinner.h" #include "igt_spinner.h"
int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915) int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt)
{ {
unsigned int mode; unsigned int mode;
void *vaddr; void *vaddr;
int err; int err;
GEM_BUG_ON(INTEL_GEN(i915) < 8); GEM_BUG_ON(INTEL_GEN(gt->i915) < 8);
memset(spin, 0, sizeof(*spin)); memset(spin, 0, sizeof(*spin));
spin->i915 = i915; spin->gt = gt;
spin->gt = &i915->gt;
spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE); spin->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(spin->hws)) { if (IS_ERR(spin->hws)) {
err = PTR_ERR(spin->hws); err = PTR_ERR(spin->hws);
goto err; goto err;
} }
spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE); spin->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(spin->obj)) { if (IS_ERR(spin->obj)) {
err = PTR_ERR(spin->obj); err = PTR_ERR(spin->obj);
goto err_hws; goto err_hws;
...@@ -41,7 +40,7 @@ int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915) ...@@ -41,7 +40,7 @@ int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
} }
spin->seqno = memset(vaddr, 0xff, PAGE_SIZE); spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
mode = i915_coherent_map_type(i915); mode = i915_coherent_map_type(gt->i915);
vaddr = i915_gem_object_pin_map(spin->obj, mode); vaddr = i915_gem_object_pin_map(spin->obj, mode);
if (IS_ERR(vaddr)) { if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr); err = PTR_ERR(vaddr);
...@@ -87,22 +86,22 @@ static int move_to_active(struct i915_vma *vma, ...@@ -87,22 +86,22 @@ static int move_to_active(struct i915_vma *vma,
struct i915_request * struct i915_request *
igt_spinner_create_request(struct igt_spinner *spin, igt_spinner_create_request(struct igt_spinner *spin,
struct i915_gem_context *ctx, struct intel_context *ce,
struct intel_engine_cs *engine,
u32 arbitration_command) u32 arbitration_command)
{ {
struct intel_engine_cs *engine = ce->engine;
struct i915_request *rq = NULL; struct i915_request *rq = NULL;
struct i915_vma *hws, *vma; struct i915_vma *hws, *vma;
u32 *batch; u32 *batch;
int err; int err;
spin->gt = engine->gt; GEM_BUG_ON(spin->gt != ce->vm->gt);
vma = i915_vma_instance(spin->obj, ctx->vm, NULL); vma = i915_vma_instance(spin->obj, ce->vm, NULL);
if (IS_ERR(vma)) if (IS_ERR(vma))
return ERR_CAST(vma); return ERR_CAST(vma);
hws = i915_vma_instance(spin->hws, ctx->vm, NULL); hws = i915_vma_instance(spin->hws, ce->vm, NULL);
if (IS_ERR(hws)) if (IS_ERR(hws))
return ERR_CAST(hws); return ERR_CAST(hws);
...@@ -114,7 +113,7 @@ igt_spinner_create_request(struct igt_spinner *spin, ...@@ -114,7 +113,7 @@ igt_spinner_create_request(struct igt_spinner *spin,
if (err) if (err)
goto unpin_vma; goto unpin_vma;
rq = igt_request_alloc(ctx, engine); rq = intel_context_create_request(ce);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
err = PTR_ERR(rq); err = PTR_ERR(rq);
goto unpin_hws; goto unpin_hws;
......
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
struct intel_gt; struct intel_gt;
struct igt_spinner { struct igt_spinner {
struct drm_i915_private *i915;
struct intel_gt *gt; struct intel_gt *gt;
struct drm_i915_gem_object *hws; struct drm_i915_gem_object *hws;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
...@@ -25,13 +24,12 @@ struct igt_spinner { ...@@ -25,13 +24,12 @@ struct igt_spinner {
void *seqno; void *seqno;
}; };
int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915); int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt);
void igt_spinner_fini(struct igt_spinner *spin); void igt_spinner_fini(struct igt_spinner *spin);
struct i915_request * struct i915_request *
igt_spinner_create_request(struct igt_spinner *spin, igt_spinner_create_request(struct igt_spinner *spin,
struct i915_gem_context *ctx, struct intel_context *ce,
struct intel_engine_cs *engine,
u32 arbitration_command); u32 arbitration_command);
void igt_spinner_end(struct igt_spinner *spin); void igt_spinner_end(struct igt_spinner *spin);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment