Commit 982b1d00 authored by Chris Wilson's avatar Chris Wilson Committed by Jani Nikula

drm/i915: Lock the engine while dumping the active request

We cannot let the request be retired and freed while we are trying to
dump it during error capture. It is not sufficient just to grab a
reference to the request, as during retirement we may free the ring
which we are also dumping. So take the engine lock to prevent retiring
and freeing of the request.
Reported-by: default avatarAlex Shumsky <alexthreed@gmail.com>
Fixes: 83c31783 ("drm/i915: Dump the ringbuffer of the active request for debugging")
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Alex Shumsky <alexthreed@gmail.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190715080946.15593-6-chris@chris-wilson.co.uk
(cherry picked from commit cfe7288c)
Signed-off-by: default avatarJani Nikula <jani.nikula@intel.com>
parent 8f48de49
......@@ -1471,6 +1471,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
struct i915_gpu_error * const error = &engine->i915->gpu_error;
struct i915_request *rq;
intel_wakeref_t wakeref;
unsigned long flags;
if (header) {
va_list ap;
......@@ -1490,10 +1491,9 @@ void intel_engine_dump(struct intel_engine_cs *engine,
i915_reset_engine_count(error, engine),
i915_reset_count(error));
rcu_read_lock();
drm_printf(m, "\tRequests:\n");
spin_lock_irqsave(&engine->active.lock, flags);
rq = intel_engine_find_active_request(engine);
if (rq) {
print_request(m, rq, "\t\tactive ");
......@@ -1513,8 +1513,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
print_request_ring(m, rq);
}
rcu_read_unlock();
spin_unlock_irqrestore(&engine->active.lock, flags);
wakeref = intel_runtime_pm_get_if_in_use(&engine->i915->runtime_pm);
if (wakeref) {
......@@ -1672,7 +1671,6 @@ struct i915_request *
intel_engine_find_active_request(struct intel_engine_cs *engine)
{
struct i915_request *request, *active = NULL;
unsigned long flags;
/*
* We are called by the error capture, reset and to dump engine
......@@ -1685,7 +1683,7 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
* At all other times, we must assume the GPU is still running, but
* we only care about the snapshot of this moment.
*/
spin_lock_irqsave(&engine->active.lock, flags);
lockdep_assert_held(&engine->active.lock);
list_for_each_entry(request, &engine->active.requests, sched.link) {
if (i915_request_completed(request))
continue;
......@@ -1700,7 +1698,6 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
active = request;
break;
}
spin_unlock_irqrestore(&engine->active.lock, flags);
return active;
}
......
......@@ -1418,6 +1418,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
struct intel_engine_cs *engine = i915->engine[i];
struct drm_i915_error_engine *ee = &error->engine[i];
struct i915_request *request;
unsigned long flags;
ee->engine_id = -1;
......@@ -1429,10 +1430,11 @@ static void gem_record_rings(struct i915_gpu_state *error)
error_record_engine_registers(error, engine, ee);
error_record_engine_execlists(engine, ee);
spin_lock_irqsave(&engine->active.lock, flags);
request = intel_engine_find_active_request(engine);
if (request) {
struct i915_gem_context *ctx = request->gem_context;
struct intel_ring *ring;
struct intel_ring *ring = request->ring;
ee->vm = ctx->vm ?: &ggtt->vm;
......@@ -1462,7 +1464,6 @@ static void gem_record_rings(struct i915_gpu_state *error)
ee->rq_post = request->postfix;
ee->rq_tail = request->tail;
ring = request->ring;
ee->cpu_ring_head = ring->head;
ee->cpu_ring_tail = ring->tail;
ee->ringbuffer =
......@@ -1470,6 +1471,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
engine_record_requests(engine, request, ee);
}
spin_unlock_irqrestore(&engine->active.lock, flags);
ee->hws_page =
i915_error_object_create(i915,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment