Commit c9d08cc3 authored by Chris Wilson's avatar Chris Wilson

drm/i915/selftests: Mark up rpm wakerefs

Track the temporary wakerefs used within the selftests so that leaks are
clear.

v2: Add a couple of coarse annotations for mock selftests as we now
loudly warn about the errors.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Jani Nikula <jani.nikula@intel.com>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190114142129.24398-14-chris@chris-wilson.co.uk
parent 2cb2cb5f
......@@ -1756,6 +1756,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
};
struct drm_file *file;
struct i915_gem_context *ctx;
intel_wakeref_t wakeref;
int err;
if (!HAS_PPGTT(dev_priv)) {
......@@ -1771,7 +1772,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
return PTR_ERR(file);
mutex_lock(&dev_priv->drm.struct_mutex);
intel_runtime_pm_get(dev_priv);
wakeref = intel_runtime_pm_get(dev_priv);
ctx = live_context(dev_priv, file);
if (IS_ERR(ctx)) {
......@@ -1785,7 +1786,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
err = i915_subtests(tests, ctx);
out_unlock:
intel_runtime_pm_put_unchecked(dev_priv);
intel_runtime_pm_put(dev_priv, wakeref);
mutex_unlock(&dev_priv->drm.struct_mutex);
mock_file_free(dev_priv, file);
......
......@@ -16,9 +16,10 @@ static int switch_to_context(struct drm_i915_private *i915,
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
intel_wakeref_t wakeref;
int err = 0;
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
for_each_engine(engine, i915, id) {
struct i915_request *rq;
......@@ -32,7 +33,7 @@ static int switch_to_context(struct drm_i915_private *i915,
i915_request_add(rq);
}
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
return err;
}
......@@ -65,7 +66,9 @@ static void trash_stolen(struct drm_i915_private *i915)
static void simulate_hibernate(struct drm_i915_private *i915)
{
intel_runtime_pm_get(i915);
intel_wakeref_t wakeref;
wakeref = intel_runtime_pm_get(i915);
/*
* As a final sting in the tail, invalidate stolen. Under a real S4,
......@@ -76,7 +79,7 @@ static void simulate_hibernate(struct drm_i915_private *i915)
*/
trash_stolen(i915);
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
}
static int pm_prepare(struct drm_i915_private *i915)
......@@ -93,39 +96,45 @@ static int pm_prepare(struct drm_i915_private *i915)
static void pm_suspend(struct drm_i915_private *i915)
{
intel_runtime_pm_get(i915);
intel_wakeref_t wakeref;
wakeref = intel_runtime_pm_get(i915);
i915_gem_suspend_gtt_mappings(i915);
i915_gem_suspend_late(i915);
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
}
static void pm_hibernate(struct drm_i915_private *i915)
{
intel_runtime_pm_get(i915);
intel_wakeref_t wakeref;
wakeref = intel_runtime_pm_get(i915);
i915_gem_suspend_gtt_mappings(i915);
i915_gem_freeze(i915);
i915_gem_freeze_late(i915);
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
}
static void pm_resume(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref;
/*
* Both suspend and hibernate follow the same wakeup path and assume
* that runtime-pm just works.
*/
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
intel_engines_sanitize(i915, false);
i915_gem_sanitize(i915);
i915_gem_resume(i915);
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
}
static int igt_gem_suspend(void *arg)
......
......@@ -279,6 +279,7 @@ static int igt_gem_coherency(void *arg)
struct drm_i915_private *i915 = arg;
const struct igt_coherency_mode *read, *write, *over;
struct drm_i915_gem_object *obj;
intel_wakeref_t wakeref;
unsigned long count, n;
u32 *offsets, *values;
int err = 0;
......@@ -298,7 +299,7 @@ static int igt_gem_coherency(void *arg)
values = offsets + ncachelines;
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
for (over = igt_coherency_mode; over->name; over++) {
if (!over->set)
continue;
......@@ -376,7 +377,7 @@ static int igt_gem_coherency(void *arg)
}
}
unlock:
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
kfree(offsets);
return err;
......
......@@ -119,6 +119,7 @@ static int live_nop_switch(void *arg)
struct intel_engine_cs *engine;
struct i915_gem_context **ctx;
enum intel_engine_id id;
intel_wakeref_t wakeref;
struct drm_file *file;
struct live_test t;
unsigned long n;
......@@ -140,7 +141,7 @@ static int live_nop_switch(void *arg)
return PTR_ERR(file);
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
......@@ -243,7 +244,7 @@ static int live_nop_switch(void *arg)
}
out_unlock:
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
mock_file_free(i915, file);
return err;
......@@ -593,6 +594,8 @@ static int igt_ctx_exec(void *arg)
}
for_each_engine(engine, i915, id) {
intel_wakeref_t wakeref;
if (!engine->context_size)
continue; /* No logical context support in HW */
......@@ -607,9 +610,9 @@ static int igt_ctx_exec(void *arg)
}
}
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
err = gpu_fill(obj, ctx, engine, dw);
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
if (err) {
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
......@@ -699,6 +702,8 @@ static int igt_ctx_readonly(void *arg)
unsigned int id;
for_each_engine(engine, i915, id) {
intel_wakeref_t wakeref;
if (!intel_engine_can_store_dword(engine))
continue;
......@@ -713,9 +718,9 @@ static int igt_ctx_readonly(void *arg)
i915_gem_object_set_readonly(obj);
}
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
err = gpu_fill(obj, ctx, engine, dw);
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
if (err) {
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
......@@ -976,6 +981,7 @@ static int igt_vm_isolation(void *arg)
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx_a, *ctx_b;
struct intel_engine_cs *engine;
intel_wakeref_t wakeref;
struct drm_file *file;
I915_RND_STATE(prng);
unsigned long count;
......@@ -1022,7 +1028,7 @@ static int igt_vm_isolation(void *arg)
GEM_BUG_ON(ctx_b->ppgtt->vm.total != vm_total);
vm_total -= I915_GTT_PAGE_SIZE;
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
count = 0;
for_each_engine(engine, i915, id) {
......@@ -1067,7 +1073,7 @@ static int igt_vm_isolation(void *arg)
count, RUNTIME_INFO(i915)->num_rings);
out_rpm:
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
out_unlock:
if (end_live_test(&t))
err = -EIO;
......@@ -1165,6 +1171,7 @@ static int igt_switch_to_kernel_context(void *arg)
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
enum intel_engine_id id;
intel_wakeref_t wakeref;
int err;
/*
......@@ -1175,7 +1182,7 @@ static int igt_switch_to_kernel_context(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
ctx = kernel_context(i915);
if (IS_ERR(ctx)) {
......@@ -1200,7 +1207,7 @@ static int igt_switch_to_kernel_context(void *arg)
if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO;
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
kernel_context_close(ctx);
......
......@@ -336,6 +336,7 @@ static int igt_evict_contexts(void *arg)
struct drm_mm_node node;
struct reserved *next;
} *reserved = NULL;
intel_wakeref_t wakeref;
struct drm_mm_node hole;
unsigned long count;
int err;
......@@ -355,7 +356,7 @@ static int igt_evict_contexts(void *arg)
return 0;
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
/* Reserve a block so that we know we have enough to fit a few rq */
memset(&hole, 0, sizeof(hole));
......@@ -400,8 +401,10 @@ static int igt_evict_contexts(void *arg)
struct drm_file *file;
file = mock_file(i915);
if (IS_ERR(file))
return PTR_ERR(file);
if (IS_ERR(file)) {
err = PTR_ERR(file);
break;
}
count = 0;
mutex_lock(&i915->drm.struct_mutex);
......@@ -464,7 +467,7 @@ static int igt_evict_contexts(void *arg)
}
if (drm_mm_node_allocated(&hole))
drm_mm_remove_node(&hole);
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
......@@ -480,6 +483,7 @@ int i915_gem_evict_mock_selftests(void)
SUBTEST(igt_overcommit),
};
struct drm_i915_private *i915;
intel_wakeref_t wakeref;
int err;
i915 = mock_gem_device();
......@@ -487,7 +491,11 @@ int i915_gem_evict_mock_selftests(void)
return -ENOMEM;
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(i915);
err = i915_subtests(tests, i915);
intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
drm_dev_put(&i915->drm);
......
......@@ -275,6 +275,7 @@ static int lowlevel_hole(struct drm_i915_private *i915,
for (n = 0; n < count; n++) {
u64 addr = hole_start + order[n] * BIT_ULL(size);
intel_wakeref_t wakeref;
GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
......@@ -293,9 +294,9 @@ static int lowlevel_hole(struct drm_i915_private *i915,
mock_vma.node.size = BIT_ULL(size);
mock_vma.node.start = addr;
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0);
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
}
count = n;
......@@ -1144,6 +1145,7 @@ static int igt_ggtt_page(void *arg)
struct drm_i915_private *i915 = arg;
struct i915_ggtt *ggtt = &i915->ggtt;
struct drm_i915_gem_object *obj;
intel_wakeref_t wakeref;
struct drm_mm_node tmp;
unsigned int *order, n;
int err;
......@@ -1169,7 +1171,7 @@ static int igt_ggtt_page(void *arg)
if (err)
goto out_unpin;
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
for (n = 0; n < count; n++) {
u64 offset = tmp.start + n * PAGE_SIZE;
......@@ -1216,7 +1218,7 @@ static int igt_ggtt_page(void *arg)
kfree(order);
out_remove:
ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
drm_mm_remove_node(&tmp);
out_unpin:
i915_gem_object_unpin_pages(obj);
......
......@@ -308,6 +308,7 @@ static int igt_partial_tiling(void *arg)
const unsigned int nreal = 1 << 12; /* largest tile row x2 */
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
intel_wakeref_t wakeref;
int tiling;
int err;
......@@ -333,7 +334,7 @@ static int igt_partial_tiling(void *arg)
}
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
if (1) {
IGT_TIMEOUT(end);
......@@ -444,7 +445,7 @@ next_tiling: ;
}
out_unlock:
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
i915_gem_object_unpin_pages(obj);
out:
......@@ -506,11 +507,14 @@ static void disable_retire_worker(struct drm_i915_private *i915)
mutex_lock(&i915->drm.struct_mutex);
if (!i915->gt.active_requests++) {
intel_runtime_pm_get(i915);
intel_wakeref_t wakeref;
wakeref = intel_runtime_pm_get(i915);
i915_gem_unpark(i915);
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
}
mutex_unlock(&i915->drm.struct_mutex);
cancel_delayed_work_sync(&i915->gt.retire_work);
cancel_delayed_work_sync(&i915->gt.idle_work);
}
......@@ -578,6 +582,8 @@ static int igt_mmap_offset_exhaustion(void *arg)
/* Now fill with busy dead objects that we expect to reap */
for (loop = 0; loop < 3; loop++) {
intel_wakeref_t wakeref;
if (i915_terminally_wedged(&i915->gpu_error))
break;
......@@ -588,9 +594,9 @@ static int igt_mmap_offset_exhaustion(void *arg)
}
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
err = make_obj_busy(obj);
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
if (err) {
pr_err("[loop %d] Failed to busy the object\n", loop);
......
......@@ -255,13 +255,18 @@ int i915_request_mock_selftests(void)
SUBTEST(igt_request_rewind),
};
struct drm_i915_private *i915;
intel_wakeref_t wakeref;
int err;
i915 = mock_gem_device();
if (!i915)
return -ENOMEM;
wakeref = intel_runtime_pm_get(i915);
err = i915_subtests(tests, i915);
intel_runtime_pm_put(i915, wakeref);
drm_dev_put(&i915->drm);
return err;
......@@ -332,6 +337,7 @@ static int live_nop_request(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
intel_wakeref_t wakeref;
struct live_test t;
unsigned int id;
int err = -ENODEV;
......@@ -342,7 +348,7 @@ static int live_nop_request(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
for_each_engine(engine, i915, id) {
struct i915_request *request = NULL;
......@@ -403,7 +409,7 @@ static int live_nop_request(void *arg)
}
out_unlock:
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
......@@ -478,8 +484,9 @@ static int live_empty_request(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
struct live_test t;
intel_wakeref_t wakeref;
struct i915_vma *batch;
struct live_test t;
unsigned int id;
int err = 0;
......@@ -489,7 +496,7 @@ static int live_empty_request(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
batch = empty_batch(i915);
if (IS_ERR(batch)) {
......@@ -553,7 +560,7 @@ static int live_empty_request(void *arg)
i915_vma_unpin(batch);
i915_vma_put(batch);
out_unlock:
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
......@@ -637,6 +644,7 @@ static int live_all_engines(void *arg)
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
struct i915_request *request[I915_NUM_ENGINES];
intel_wakeref_t wakeref;
struct i915_vma *batch;
struct live_test t;
unsigned int id;
......@@ -648,7 +656,7 @@ static int live_all_engines(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
err = begin_live_test(&t, i915, __func__, "");
if (err)
......@@ -731,7 +739,7 @@ static int live_all_engines(void *arg)
i915_vma_unpin(batch);
i915_vma_put(batch);
out_unlock:
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
......@@ -742,6 +750,7 @@ static int live_sequential_engines(void *arg)
struct i915_request *request[I915_NUM_ENGINES] = {};
struct i915_request *prev = NULL;
struct intel_engine_cs *engine;
intel_wakeref_t wakeref;
struct live_test t;
unsigned int id;
int err;
......@@ -753,7 +762,7 @@ static int live_sequential_engines(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
err = begin_live_test(&t, i915, __func__, "");
if (err)
......@@ -860,7 +869,7 @@ static int live_sequential_engines(void *arg)
i915_request_put(request[id]);
}
out_unlock:
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
......
......@@ -137,12 +137,13 @@ static bool client_doorbell_in_sync(struct intel_guc_client *client)
static int igt_guc_clients(void *args)
{
struct drm_i915_private *dev_priv = args;
intel_wakeref_t wakeref;
struct intel_guc *guc;
int err = 0;
GEM_BUG_ON(!HAS_GUC(dev_priv));
mutex_lock(&dev_priv->drm.struct_mutex);
intel_runtime_pm_get(dev_priv);
wakeref = intel_runtime_pm_get(dev_priv);
guc = &dev_priv->guc;
if (!guc) {
......@@ -225,7 +226,7 @@ static int igt_guc_clients(void *args)
guc_clients_create(guc);
guc_clients_enable(guc);
unlock:
intel_runtime_pm_put_unchecked(dev_priv);
intel_runtime_pm_put(dev_priv, wakeref);
mutex_unlock(&dev_priv->drm.struct_mutex);
return err;
}
......@@ -238,13 +239,14 @@ static int igt_guc_clients(void *args)
static int igt_guc_doorbells(void *arg)
{
struct drm_i915_private *dev_priv = arg;
intel_wakeref_t wakeref;
struct intel_guc *guc;
int i, err = 0;
u16 db_id;
GEM_BUG_ON(!HAS_GUC(dev_priv));
mutex_lock(&dev_priv->drm.struct_mutex);
intel_runtime_pm_get(dev_priv);
wakeref = intel_runtime_pm_get(dev_priv);
guc = &dev_priv->guc;
if (!guc) {
......@@ -337,7 +339,7 @@ static int igt_guc_doorbells(void *arg)
guc_client_free(clients[i]);
}
unlock:
intel_runtime_pm_put_unchecked(dev_priv);
intel_runtime_pm_put(dev_priv, wakeref);
mutex_unlock(&dev_priv->drm.struct_mutex);
return err;
}
......
......@@ -388,12 +388,13 @@ static int igt_global_reset(void *arg)
static int igt_wedged_reset(void *arg)
{
struct drm_i915_private *i915 = arg;
intel_wakeref_t wakeref;
/* Check that we can recover a wedged device with a GPU reset */
igt_global_reset_lock(i915);
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
i915_gem_set_wedged(i915);
GEM_BUG_ON(!i915_terminally_wedged(&i915->gpu_error));
......@@ -402,7 +403,7 @@ static int igt_wedged_reset(void *arg)
i915_reset(i915, ALL_ENGINES, NULL);
GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags));
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
igt_global_reset_unlock(i915);
......@@ -1600,6 +1601,7 @@ static int igt_atomic_reset(void *arg)
{ }
};
struct drm_i915_private *i915 = arg;
intel_wakeref_t wakeref;
int err = 0;
/* Check that the resets are usable from atomic context */
......@@ -1609,7 +1611,7 @@ static int igt_atomic_reset(void *arg)
igt_global_reset_lock(i915);
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
/* Flush any requests before we get started and check basics */
force_reset(i915);
......@@ -1636,7 +1638,7 @@ static int igt_atomic_reset(void *arg)
force_reset(i915);
unlock:
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
igt_global_reset_unlock(i915);
......@@ -1660,6 +1662,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_handle_error),
SUBTEST(igt_atomic_reset),
};
intel_wakeref_t wakeref;
bool saved_hangcheck;
int err;
......@@ -1669,7 +1672,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
if (i915_terminally_wedged(&i915->gpu_error))
return -EIO; /* we're long past hope of a successful reset */
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck);
err = i915_subtests(tests, i915);
......@@ -1679,7 +1682,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
mutex_unlock(&i915->drm.struct_mutex);
i915_modparams.enable_hangcheck = saved_hangcheck;
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
return err;
}
......@@ -18,13 +18,14 @@ static int live_sanitycheck(void *arg)
struct i915_gem_context *ctx;
enum intel_engine_id id;
struct igt_spinner spin;
intel_wakeref_t wakeref;
int err = -ENOMEM;
if (!HAS_LOGICAL_RING_CONTEXTS(i915))
return 0;
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
if (igt_spinner_init(&spin, i915))
goto err_unlock;
......@@ -65,7 +66,7 @@ static int live_sanitycheck(void *arg)
igt_spinner_fini(&spin);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
......@@ -77,13 +78,14 @@ static int live_preempt(void *arg)
struct igt_spinner spin_hi, spin_lo;
struct intel_engine_cs *engine;
enum intel_engine_id id;
intel_wakeref_t wakeref;
int err = -ENOMEM;
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
return 0;
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
if (igt_spinner_init(&spin_hi, i915))
goto err_unlock;
......@@ -158,7 +160,7 @@ static int live_preempt(void *arg)
igt_spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
......@@ -171,13 +173,14 @@ static int live_late_preempt(void *arg)
struct intel_engine_cs *engine;
struct i915_sched_attr attr = {};
enum intel_engine_id id;
intel_wakeref_t wakeref;
int err = -ENOMEM;
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
return 0;
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
if (igt_spinner_init(&spin_hi, i915))
goto err_unlock;
......@@ -251,7 +254,7 @@ static int live_late_preempt(void *arg)
igt_spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
......@@ -270,6 +273,7 @@ static int live_preempt_hang(void *arg)
struct igt_spinner spin_hi, spin_lo;
struct intel_engine_cs *engine;
enum intel_engine_id id;
intel_wakeref_t wakeref;
int err = -ENOMEM;
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
......@@ -279,7 +283,7 @@ static int live_preempt_hang(void *arg)
return 0;
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
if (igt_spinner_init(&spin_hi, i915))
goto err_unlock;
......@@ -374,7 +378,7 @@ static int live_preempt_hang(void *arg)
igt_spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
......@@ -562,6 +566,7 @@ static int live_preempt_smoke(void *arg)
.ncontext = 1024,
};
const unsigned int phase[] = { 0, BATCH };
intel_wakeref_t wakeref;
int err = -ENOMEM;
u32 *cs;
int n;
......@@ -576,7 +581,7 @@ static int live_preempt_smoke(void *arg)
return -ENOMEM;
mutex_lock(&smoke.i915->drm.struct_mutex);
intel_runtime_pm_get(smoke.i915);
wakeref = intel_runtime_pm_get(smoke.i915);
smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
if (IS_ERR(smoke.batch)) {
......@@ -627,7 +632,7 @@ static int live_preempt_smoke(void *arg)
err_batch:
i915_gem_object_put(smoke.batch);
err_unlock:
intel_runtime_pm_put_unchecked(smoke.i915);
intel_runtime_pm_put(smoke.i915, wakeref);
mutex_unlock(&smoke.i915->drm.struct_mutex);
kfree(smoke.contexts);
......
......@@ -60,10 +60,11 @@ reference_lists_fini(struct drm_i915_private *i915, struct wa_lists *lists)
static struct drm_i915_gem_object *
read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
{
const u32 base = engine->mmio_base;
struct drm_i915_gem_object *result;
intel_wakeref_t wakeref;
struct i915_request *rq;
struct i915_vma *vma;
const u32 base = engine->mmio_base;
u32 srm, *cs;
int err;
int i;
......@@ -92,9 +93,9 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
if (err)
goto err_obj;
intel_runtime_pm_get(engine->i915);
wakeref = intel_runtime_pm_get(engine->i915);
rq = i915_request_alloc(engine, ctx);
intel_runtime_pm_put_unchecked(engine->i915);
intel_runtime_pm_put(engine->i915, wakeref);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_pin;
......@@ -228,20 +229,21 @@ switch_to_scratch_context(struct intel_engine_cs *engine,
{
struct i915_gem_context *ctx;
struct i915_request *rq;
intel_wakeref_t wakeref;
int err = 0;
ctx = kernel_context(engine->i915);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
intel_runtime_pm_get(engine->i915);
wakeref = intel_runtime_pm_get(engine->i915);
if (spin)
rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP);
else
rq = i915_request_alloc(engine, ctx);
intel_runtime_pm_put_unchecked(engine->i915);
intel_runtime_pm_put(engine->i915, wakeref);
kernel_context_close(ctx);
......@@ -273,6 +275,7 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
bool want_spin = reset == do_engine_reset;
struct i915_gem_context *ctx;
struct igt_spinner spin;
intel_wakeref_t wakeref;
int err;
pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n",
......@@ -298,9 +301,9 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
if (err)
goto out;
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
err = reset(engine);
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
if (want_spin) {
igt_spinner_end(&spin);
......@@ -391,6 +394,7 @@ live_gpu_reset_gt_engine_workarounds(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_gpu_error *error = &i915->gpu_error;
intel_wakeref_t wakeref;
struct wa_lists lists;
bool ok;
......@@ -400,7 +404,8 @@ live_gpu_reset_gt_engine_workarounds(void *arg)
pr_info("Verifying after GPU reset...\n");
igt_global_reset_lock(i915);
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
reference_lists_init(i915, &lists);
ok = verify_gt_engine_wa(i915, &lists, "before reset");
......@@ -414,7 +419,7 @@ live_gpu_reset_gt_engine_workarounds(void *arg)
out:
reference_lists_fini(i915, &lists);
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
igt_global_reset_unlock(i915);
return ok ? 0 : -ESRCH;
......@@ -429,6 +434,7 @@ live_engine_reset_gt_engine_workarounds(void *arg)
struct igt_spinner spin;
enum intel_engine_id id;
struct i915_request *rq;
intel_wakeref_t wakeref;
struct wa_lists lists;
int ret = 0;
......@@ -440,7 +446,8 @@ live_engine_reset_gt_engine_workarounds(void *arg)
return PTR_ERR(ctx);
igt_global_reset_lock(i915);
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
reference_lists_init(i915, &lists);
for_each_engine(engine, i915, id) {
......@@ -496,7 +503,7 @@ live_engine_reset_gt_engine_workarounds(void *arg)
err:
reference_lists_fini(i915, &lists);
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
igt_global_reset_unlock(i915);
kernel_context_close(ctx);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment