Commit 70c2a24d authored by Chris Wilson's avatar Chris Wilson

drm/i915: Simplify ELSP queue request tracking

Emulate HW to track and manage ELSP queue. A set of SW ports are defined
and requests are assigned to these ports before submitting them to HW. This
helps in cleaning up incomplete requests during reset recovery easier
especially after engine reset by decoupling elsp queue management. This
will become more clear in the next patch.

In the engine reset case we want to resume where we left-off after skipping
the incomplete batch which requires checking the elsp queue, removing
element and fixing elsp_submitted counts in some cases. Instead of directly
manipulating the elsp queue from reset path we can examine these ports, fix
up ringbuffer pointers using the incomplete request and restart submissions
again after reset.

Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Cc: Arun Siluvery <arun.siluvery@linux.intel.com>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/1470414607-32453-3-git-send-email-arun.siluvery@linux.intel.comReviewed-by: default avatarMika Kuoppala <mika.kuoppala@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160909131201.16673-6-chris@chris-wilson.co.uk
parent bbd6c47e
...@@ -2051,7 +2051,7 @@ static int i915_execlists(struct seq_file *m, void *data) ...@@ -2051,7 +2051,7 @@ static int i915_execlists(struct seq_file *m, void *data)
status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(engine)); status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer); seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
read_pointer = engine->next_context_status_buffer; read_pointer = GEN8_CSB_READ_PTR(status_pointer);
write_pointer = GEN8_CSB_WRITE_PTR(status_pointer); write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
if (read_pointer > write_pointer) if (read_pointer > write_pointer)
write_pointer += GEN8_CSB_ENTRIES; write_pointer += GEN8_CSB_ENTRIES;
......
...@@ -2575,6 +2575,9 @@ static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine) ...@@ -2575,6 +2575,9 @@ static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
struct intel_ring *ring; struct intel_ring *ring;
/* Ensure irq handler finishes, and not run again. */
tasklet_kill(&engine->irq_tasklet);
/* Mark all pending requests as complete so that any concurrent /* Mark all pending requests as complete so that any concurrent
* (lockless) lookup doesn't try and wait upon the request as we * (lockless) lookup doesn't try and wait upon the request as we
* reset it. * reset it.
...@@ -2588,10 +2591,12 @@ static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine) ...@@ -2588,10 +2591,12 @@ static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
*/ */
if (i915.enable_execlists) { if (i915.enable_execlists) {
/* Ensure irq handler finishes or is cancelled. */ spin_lock(&engine->execlist_lock);
tasklet_kill(&engine->irq_tasklet); INIT_LIST_HEAD(&engine->execlist_queue);
i915_gem_request_put(engine->execlist_port[0].request);
intel_execlists_cancel_requests(engine); i915_gem_request_put(engine->execlist_port[1].request);
memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
spin_unlock(&engine->execlist_lock);
} }
/* /*
......
...@@ -402,7 +402,6 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, ...@@ -402,7 +402,6 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
req->previous_context = NULL; req->previous_context = NULL;
req->file_priv = NULL; req->file_priv = NULL;
req->batch = NULL; req->batch = NULL;
req->elsp_submitted = 0;
/* /*
* Reserve space in the ring buffer for all the commands required to * Reserve space in the ring buffer for all the commands required to
......
...@@ -137,27 +137,8 @@ struct drm_i915_gem_request { ...@@ -137,27 +137,8 @@ struct drm_i915_gem_request {
/** file_priv list entry for this request */ /** file_priv list entry for this request */
struct list_head client_list; struct list_head client_list;
/** /** Link in the execlist submission queue, guarded by execlist_lock. */
* The ELSP only accepts two elements at a time, so we queue
* context/tail pairs on a given queue (ring->execlist_queue) until the
* hardware is available. The queue serves a double purpose: we also use
* it to keep track of the up to 2 contexts currently in the hardware
* (usually one in execution and the other queued up by the GPU): We
* only remove elements from the head of the queue when the hardware
* informs us that an element has been completed.
*
* All accesses to the queue are mediated by a spinlock
* (ring->execlist_lock).
*/
/** Execlist link in the submission queue.*/
struct list_head execlist_link; struct list_head execlist_link;
/** Execlists no. of times this request has been sent to the ELSP */
int elsp_submitted;
/** Execlists context hardware id. */
unsigned int ctx_hw_id;
}; };
extern const struct fence_ops i915_fence_ops; extern const struct fence_ops i915_fence_ops;
......
This diff is collapsed.
...@@ -97,6 +97,4 @@ int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, ...@@ -97,6 +97,4 @@ int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
int enable_execlists); int enable_execlists);
void intel_execlists_enable_submission(struct drm_i915_private *dev_priv); void intel_execlists_enable_submission(struct drm_i915_private *dev_priv);
void intel_execlists_cancel_requests(struct intel_engine_cs *engine);
#endif /* _INTEL_LRC_H_ */ #endif /* _INTEL_LRC_H_ */
...@@ -298,11 +298,14 @@ struct intel_engine_cs { ...@@ -298,11 +298,14 @@ struct intel_engine_cs {
/* Execlists */ /* Execlists */
struct tasklet_struct irq_tasklet; struct tasklet_struct irq_tasklet;
spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */ spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */
struct execlist_port {
struct drm_i915_gem_request *request;
unsigned int count;
} execlist_port[2];
struct list_head execlist_queue; struct list_head execlist_queue;
unsigned int fw_domains; unsigned int fw_domains;
unsigned int next_context_status_buffer;
unsigned int idle_lite_restore_wa;
bool disable_lite_restore_wa; bool disable_lite_restore_wa;
bool preempt_wa;
u32 ctx_desc_template; u32 ctx_desc_template;
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment