Commit c53d1313 authored by Chris Wilson's avatar Chris Wilson

drm/i915/selftests: Take a ref to the request we wait upon

i915_request_add() consumes the passed in reference to the i915_request,
so if the selftest caller wishes to wait upon it afterwards, it needs to
take a reference for itself.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191120102741.3734346-1-chris@chris-wilson.co.uk
parent e6689501
...@@ -73,25 +73,34 @@ static int live_nop_switch(void *arg) ...@@ -73,25 +73,34 @@ static int live_nop_switch(void *arg)
} }
for_each_uabi_engine(engine, i915) { for_each_uabi_engine(engine, i915) {
struct i915_request *rq; struct i915_request *rq = NULL;
unsigned long end_time, prime; unsigned long end_time, prime;
ktime_t times[2] = {}; ktime_t times[2] = {};
times[0] = ktime_get_raw(); times[0] = ktime_get_raw();
for (n = 0; n < nctx; n++) { for (n = 0; n < nctx; n++) {
rq = igt_request_alloc(ctx[n], engine); struct i915_request *this;
if (IS_ERR(rq)) {
err = PTR_ERR(rq); this = igt_request_alloc(ctx[n], engine);
if (IS_ERR(this)) {
err = PTR_ERR(this);
goto out_file; goto out_file;
} }
i915_request_add(rq); if (rq) {
i915_request_await_dma_fence(this, &rq->fence);
i915_request_put(rq);
}
rq = i915_request_get(this);
i915_request_add(this);
} }
if (i915_request_wait(rq, 0, HZ / 5) < 0) { if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("Failed to populated %d contexts\n", nctx); pr_err("Failed to populated %d contexts\n", nctx);
intel_gt_set_wedged(&i915->gt); intel_gt_set_wedged(&i915->gt);
i915_request_put(rq);
err = -EIO; err = -EIO;
goto out_file; goto out_file;
} }
i915_request_put(rq);
times[1] = ktime_get_raw(); times[1] = ktime_get_raw();
...@@ -106,13 +115,21 @@ static int live_nop_switch(void *arg) ...@@ -106,13 +115,21 @@ static int live_nop_switch(void *arg)
for_each_prime_number_from(prime, 2, 8192) { for_each_prime_number_from(prime, 2, 8192) {
times[1] = ktime_get_raw(); times[1] = ktime_get_raw();
rq = NULL;
for (n = 0; n < prime; n++) { for (n = 0; n < prime; n++) {
rq = igt_request_alloc(ctx[n % nctx], engine); struct i915_request *this;
if (IS_ERR(rq)) {
err = PTR_ERR(rq); this = igt_request_alloc(ctx[n % nctx], engine);
if (IS_ERR(this)) {
err = PTR_ERR(this);
goto out_file; goto out_file;
} }
if (rq) { /* Force submission order */
i915_request_await_dma_fence(this, &rq->fence);
i915_request_put(rq);
}
/* /*
* This space is left intentionally blank. * This space is left intentionally blank.
* *
...@@ -127,14 +144,18 @@ static int live_nop_switch(void *arg) ...@@ -127,14 +144,18 @@ static int live_nop_switch(void *arg)
* for latency. * for latency.
*/ */
i915_request_add(rq); rq = i915_request_get(this);
i915_request_add(this);
} }
GEM_BUG_ON(!rq);
if (i915_request_wait(rq, 0, HZ / 5) < 0) { if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("Switching between %ld contexts timed out\n", pr_err("Switching between %ld contexts timed out\n",
prime); prime);
intel_gt_set_wedged(&i915->gt); intel_gt_set_wedged(&i915->gt);
i915_request_put(rq);
break; break;
} }
i915_request_put(rq);
times[1] = ktime_sub(ktime_get_raw(), times[1]); times[1] = ktime_sub(ktime_get_raw(), times[1]);
if (prime == 2) if (prime == 2)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment