Commit 5d266692 authored by Chris Wilson's avatar Chris Wilson Committed by Jani Nikula

drm/i915: Filter out spurious execlists context-switch interrupts

Back in commit a4b2b015 ("drm/i915: Don't mark an execlists
context-switch when idle") we noticed the presence of late
context-switch interrupts. We were able to filter those out by looking
at whether the ELSP remained active, but in commit beecec90
("drm/i915/execlists: Preemption!") that became problematic as we now
anticipate receiving a context-switch event for preemption while ELSP
may be empty. To restore the spurious interrupt suppression, add a
counter for the expected number of pending context-switches and skip if
we do not need to handle this interrupt to make forward progress.

v2: Don't forget to switch on for preempt.
v3: Reduce the counter to a on/off boolean tracker. Declare the HW as
active when we first submit, and idle after the final completion event
(with which we confirm the HW says it is idle), and track each source
of activity separately. With a finite number of sources, it should aide
us in debugging which gets stuck.

Fixes: beecec90 ("drm/i915/execlists: Preemption!")
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Michal Winiarski <michal.winiarski@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Arkadiusz Hiler <arkadiusz.hiler@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171023213237.26536-3-chris@chris-wilson.co.ukReviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
(cherry picked from commit 4a118ecb)
Signed-off-by: default avatarJani Nikula <jani.nikula@intel.com>
parent 8a6fb5b5
...@@ -610,6 +610,7 @@ static void i915_guc_dequeue(struct intel_engine_cs *engine) ...@@ -610,6 +610,7 @@ static void i915_guc_dequeue(struct intel_engine_cs *engine)
execlists->first = rb; execlists->first = rb;
if (submit) { if (submit) {
port_assign(port, last); port_assign(port, last);
execlists_set_active(execlists, EXECLISTS_ACTIVE_USER);
i915_guc_submit(engine); i915_guc_submit(engine);
} }
spin_unlock_irq(&engine->timeline->lock); spin_unlock_irq(&engine->timeline->lock);
...@@ -633,6 +634,8 @@ static void i915_guc_irq_handler(unsigned long data) ...@@ -633,6 +634,8 @@ static void i915_guc_irq_handler(unsigned long data)
rq = port_request(&port[0]); rq = port_request(&port[0]);
} }
if (!rq)
execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER);
if (!port_isset(last_port)) if (!port_isset(last_port))
i915_guc_dequeue(engine); i915_guc_dequeue(engine);
......
...@@ -1388,8 +1388,10 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift) ...@@ -1388,8 +1388,10 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
bool tasklet = false; bool tasklet = false;
if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) { if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) {
__set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); if (READ_ONCE(engine->execlists.active)) {
tasklet = true; __set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
tasklet = true;
}
} }
if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) { if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) {
......
...@@ -1548,8 +1548,8 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine) ...@@ -1548,8 +1548,8 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
if (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) if (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted))
return false; return false;
/* Both ports drained, no more ELSP submission? */ /* Waiting to drain ELSP? */
if (port_request(&engine->execlists.port[0])) if (READ_ONCE(engine->execlists.active))
return false; return false;
/* ELSP is empty, but there are ready requests? */ /* ELSP is empty, but there are ready requests? */
...@@ -1749,6 +1749,7 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m) ...@@ -1749,6 +1749,7 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
idx); idx);
} }
} }
drm_printf(m, "\t\tHW active? 0x%x\n", execlists->active);
rcu_read_unlock(); rcu_read_unlock();
} else if (INTEL_GEN(dev_priv) > 6) { } else if (INTEL_GEN(dev_priv) > 6) {
drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n", drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
......
...@@ -575,7 +575,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -575,7 +575,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* the state of the GPU is known (idle). * the state of the GPU is known (idle).
*/ */
inject_preempt_context(engine); inject_preempt_context(engine);
execlists->preempt = true; execlists_set_active(execlists,
EXECLISTS_ACTIVE_PREEMPT);
goto unlock; goto unlock;
} else { } else {
/* /*
...@@ -683,8 +684,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -683,8 +684,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
unlock: unlock:
spin_unlock_irq(&engine->timeline->lock); spin_unlock_irq(&engine->timeline->lock);
if (submit) if (submit) {
execlists_set_active(execlists, EXECLISTS_ACTIVE_USER);
execlists_submit_ports(engine); execlists_submit_ports(engine);
}
} }
static void static void
...@@ -696,6 +699,7 @@ execlist_cancel_port_requests(struct intel_engine_execlists *execlists) ...@@ -696,6 +699,7 @@ execlist_cancel_port_requests(struct intel_engine_execlists *execlists)
while (num_ports-- && port_isset(port)) { while (num_ports-- && port_isset(port)) {
struct drm_i915_gem_request *rq = port_request(port); struct drm_i915_gem_request *rq = port_request(port);
GEM_BUG_ON(!execlists->active);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_PREEMPTED); execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_PREEMPTED);
i915_gem_request_put(rq); i915_gem_request_put(rq);
...@@ -861,15 +865,21 @@ static void intel_lrc_irq_handler(unsigned long data) ...@@ -861,15 +865,21 @@ static void intel_lrc_irq_handler(unsigned long data)
unwind_incomplete_requests(engine); unwind_incomplete_requests(engine);
spin_unlock_irq(&engine->timeline->lock); spin_unlock_irq(&engine->timeline->lock);
GEM_BUG_ON(!execlists->preempt); GEM_BUG_ON(!execlists_is_active(execlists,
execlists->preempt = false; EXECLISTS_ACTIVE_PREEMPT));
execlists_clear_active(execlists,
EXECLISTS_ACTIVE_PREEMPT);
continue; continue;
} }
if (status & GEN8_CTX_STATUS_PREEMPTED && if (status & GEN8_CTX_STATUS_PREEMPTED &&
execlists->preempt) execlists_is_active(execlists,
EXECLISTS_ACTIVE_PREEMPT))
continue; continue;
GEM_BUG_ON(!execlists_is_active(execlists,
EXECLISTS_ACTIVE_USER));
/* Check the context/desc id for this event matches */ /* Check the context/desc id for this event matches */
GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id); GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id);
...@@ -892,6 +902,9 @@ static void intel_lrc_irq_handler(unsigned long data) ...@@ -892,6 +902,9 @@ static void intel_lrc_irq_handler(unsigned long data)
/* After the final element, the hw should be idle */ /* After the final element, the hw should be idle */
GEM_BUG_ON(port_count(port) == 0 && GEM_BUG_ON(port_count(port) == 0 &&
!(status & GEN8_CTX_STATUS_ACTIVE_IDLE)); !(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
if (port_count(port) == 0)
execlists_clear_active(execlists,
EXECLISTS_ACTIVE_USER);
} }
if (head != execlists->csb_head) { if (head != execlists->csb_head) {
...@@ -901,7 +914,7 @@ static void intel_lrc_irq_handler(unsigned long data) ...@@ -901,7 +914,7 @@ static void intel_lrc_irq_handler(unsigned long data)
} }
} }
if (!execlists->preempt) if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT))
execlists_dequeue(engine); execlists_dequeue(engine);
intel_uncore_forcewake_put(dev_priv, execlists->fw_domains); intel_uncore_forcewake_put(dev_priv, execlists->fw_domains);
...@@ -1460,7 +1473,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine) ...@@ -1460,7 +1473,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift); GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift);
clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
execlists->csb_head = -1; execlists->csb_head = -1;
execlists->preempt = false; execlists->active = 0;
/* After a GPU reset, we may have requests to replay */ /* After a GPU reset, we may have requests to replay */
if (!i915_modparams.enable_guc_submission && execlists->first) if (!i915_modparams.enable_guc_submission && execlists->first)
......
...@@ -241,9 +241,17 @@ struct intel_engine_execlists { ...@@ -241,9 +241,17 @@ struct intel_engine_execlists {
} port[EXECLIST_MAX_PORTS]; } port[EXECLIST_MAX_PORTS];
/** /**
* @preempt: are we currently handling a preempting context switch? * @active: is the HW active? We consider the HW as active after
* submitting any context for execution and until we have seen the
* last context completion event. After that, we do not expect any
* more events until we submit, and so can park the HW.
*
* As we have a small number of different sources from which we feed
* the HW, we track the state of each inside a single bitfield.
*/ */
bool preempt; unsigned int active;
#define EXECLISTS_ACTIVE_USER 0
#define EXECLISTS_ACTIVE_PREEMPT 1
/** /**
* @port_mask: number of execlist ports - 1 * @port_mask: number of execlist ports - 1
...@@ -525,6 +533,27 @@ struct intel_engine_cs { ...@@ -525,6 +533,27 @@ struct intel_engine_cs {
u32 (*get_cmd_length_mask)(u32 cmd_header); u32 (*get_cmd_length_mask)(u32 cmd_header);
}; };
static inline void
execlists_set_active(struct intel_engine_execlists *execlists,
unsigned int bit)
{
__set_bit(bit, (unsigned long *)&execlists->active);
}
static inline void
execlists_clear_active(struct intel_engine_execlists *execlists,
unsigned int bit)
{
__clear_bit(bit, (unsigned long *)&execlists->active);
}
static inline bool
execlists_is_active(const struct intel_engine_execlists *execlists,
unsigned int bit)
{
return test_bit(bit, (unsigned long *)&execlists->active);
}
static inline unsigned int static inline unsigned int
execlists_num_ports(const struct intel_engine_execlists * const execlists) execlists_num_ports(const struct intel_engine_execlists * const execlists)
{ {
...@@ -538,6 +567,7 @@ execlists_port_complete(struct intel_engine_execlists * const execlists, ...@@ -538,6 +567,7 @@ execlists_port_complete(struct intel_engine_execlists * const execlists,
const unsigned int m = execlists->port_mask; const unsigned int m = execlists->port_mask;
GEM_BUG_ON(port_index(port, execlists) != 0); GEM_BUG_ON(port_index(port, execlists) != 0);
GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
memmove(port, port + 1, m * sizeof(struct execlist_port)); memmove(port, port + 1, m * sizeof(struct execlist_port));
memset(port + m, 0, sizeof(struct execlist_port)); memset(port + m, 0, sizeof(struct execlist_port));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment