Commit 220dcfc1 authored by Chris Wilson's avatar Chris Wilson Committed by Rodrigo Vivi

drm/i915/gt: Yield the timeslice if caught waiting on a user semaphore

If we find ourselves waiting on a MI_SEMAPHORE_WAIT, either within the
user batch or in our own preamble, the engine raises a
GT_WAIT_ON_SEMAPHORE interrupt. We can unmask that interrupt and so
respond to a semaphore wait by yielding the timeslice, if we have
another context to yield to!

The only real complication is that the interrupt is only generated for
the start of the semaphore wait, and is asynchronous to our
process_csb() -- that is, we may not have registered the timeslice before
we see the interrupt. To ensure we don't miss a potential semaphore
blocking forward progress (e.g. selftests/live_timeslice_preempt) we mark
the interrupt and apply it to the next timeslice regardless of whether it
was active at the time.

v2: We use semaphores in preempt-to-busy, within the timeslicing
implementation itself! Ergo, when we do insert a preemption due to an
expired timeslice, the new context may start with the missed semaphore
flagged by the retired context and be yielded, ad infinitum. To avoid
this, read the context id at the time of the semaphore interrupt and
only yield if that context is still active.

Fixes: 8ee36e04 ("drm/i915/execlists: Minimalistic timeslicing")
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200407130811.17321-1-chris@chris-wilson.co.uk
(cherry picked from commit c4e8ba73)
(cherry picked from commit cd60e4ac4738a6921592c4f7baf87f9a3499f0e2)
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent af23facc
...@@ -1295,6 +1295,12 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, ...@@ -1295,6 +1295,12 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
if (engine->id == RENDER_CLASS && IS_GEN_RANGE(dev_priv, 4, 7)) if (engine->id == RENDER_CLASS && IS_GEN_RANGE(dev_priv, 4, 7))
drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID)); drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID));
if (HAS_EXECLISTS(dev_priv)) {
drm_printf(m, "\tEL_STAT_HI: 0x%08x\n",
ENGINE_READ(engine, RING_EXECLIST_STATUS_HI));
drm_printf(m, "\tEL_STAT_LO: 0x%08x\n",
ENGINE_READ(engine, RING_EXECLIST_STATUS_LO));
}
drm_printf(m, "\tRING_START: 0x%08x\n", drm_printf(m, "\tRING_START: 0x%08x\n",
ENGINE_READ(engine, RING_START)); ENGINE_READ(engine, RING_START));
drm_printf(m, "\tRING_HEAD: 0x%08x\n", drm_printf(m, "\tRING_HEAD: 0x%08x\n",
......
...@@ -156,6 +156,15 @@ struct intel_engine_execlists { ...@@ -156,6 +156,15 @@ struct intel_engine_execlists {
*/ */
struct i915_priolist default_priolist; struct i915_priolist default_priolist;
/**
* @yield: CCID at the time of the last semaphore-wait interrupt.
*
* Instead of leaving a semaphore busy-spinning on an engine, we would
* like to switch to another ready context, i.e. yielding the semaphore
* timeslice.
*/
u32 yield;
/** /**
* @error_interrupt: CS Master EIR * @error_interrupt: CS Master EIR
* *
......
...@@ -39,6 +39,15 @@ cs_irq_handler(struct intel_engine_cs *engine, u32 iir) ...@@ -39,6 +39,15 @@ cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
} }
} }
if (iir & GT_WAIT_SEMAPHORE_INTERRUPT) {
WRITE_ONCE(engine->execlists.yield,
ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI));
ENGINE_TRACE(engine, "semaphore yield: %08x\n",
engine->execlists.yield);
if (del_timer(&engine->execlists.timer))
tasklet = true;
}
if (iir & GT_CONTEXT_SWITCH_INTERRUPT) if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
tasklet = true; tasklet = true;
...@@ -228,7 +237,8 @@ void gen11_gt_irq_postinstall(struct intel_gt *gt) ...@@ -228,7 +237,8 @@ void gen11_gt_irq_postinstall(struct intel_gt *gt)
const u32 irqs = const u32 irqs =
GT_CS_MASTER_ERROR_INTERRUPT | GT_CS_MASTER_ERROR_INTERRUPT |
GT_RENDER_USER_INTERRUPT | GT_RENDER_USER_INTERRUPT |
GT_CONTEXT_SWITCH_INTERRUPT; GT_CONTEXT_SWITCH_INTERRUPT |
GT_WAIT_SEMAPHORE_INTERRUPT;
struct intel_uncore *uncore = gt->uncore; struct intel_uncore *uncore = gt->uncore;
const u32 dmask = irqs << 16 | irqs; const u32 dmask = irqs << 16 | irqs;
const u32 smask = irqs << 16; const u32 smask = irqs << 16;
...@@ -366,7 +376,8 @@ void gen8_gt_irq_postinstall(struct intel_gt *gt) ...@@ -366,7 +376,8 @@ void gen8_gt_irq_postinstall(struct intel_gt *gt)
const u32 irqs = const u32 irqs =
GT_CS_MASTER_ERROR_INTERRUPT | GT_CS_MASTER_ERROR_INTERRUPT |
GT_RENDER_USER_INTERRUPT | GT_RENDER_USER_INTERRUPT |
GT_CONTEXT_SWITCH_INTERRUPT; GT_CONTEXT_SWITCH_INTERRUPT |
GT_WAIT_SEMAPHORE_INTERRUPT;
const u32 gt_interrupts[] = { const u32 gt_interrupts[] = {
irqs << GEN8_RCS_IRQ_SHIFT | irqs << GEN8_BCS_IRQ_SHIFT, irqs << GEN8_RCS_IRQ_SHIFT | irqs << GEN8_BCS_IRQ_SHIFT,
irqs << GEN8_VCS0_IRQ_SHIFT | irqs << GEN8_VCS1_IRQ_SHIFT, irqs << GEN8_VCS0_IRQ_SHIFT | irqs << GEN8_VCS1_IRQ_SHIFT,
......
...@@ -1754,7 +1754,8 @@ static void defer_active(struct intel_engine_cs *engine) ...@@ -1754,7 +1754,8 @@ static void defer_active(struct intel_engine_cs *engine)
} }
static bool static bool
need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq) need_timeslice(const struct intel_engine_cs *engine,
const struct i915_request *rq)
{ {
int hint; int hint;
...@@ -1768,6 +1769,32 @@ need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq) ...@@ -1768,6 +1769,32 @@ need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq)
return hint >= effective_prio(rq); return hint >= effective_prio(rq);
} }
static bool
timeslice_yield(const struct intel_engine_execlists *el,
const struct i915_request *rq)
{
/*
* Once bitten, forever smitten!
*
* If the active context ever busy-waited on a semaphore,
* it will be treated as a hog until the end of its timeslice (i.e.
* until it is scheduled out and replaced by a new submission,
* possibly even its own lite-restore). The HW only sends an interrupt
* on the first miss, and we do know if that semaphore has been
* signaled, or even if it is now stuck on another semaphore. Play
* safe, yield if it might be stuck -- it will be given a fresh
* timeslice in the near future.
*/
return upper_32_bits(rq->context->lrc_desc) == READ_ONCE(el->yield);
}
static bool
timeslice_expired(const struct intel_engine_execlists *el,
const struct i915_request *rq)
{
return timer_expired(&el->timer) || timeslice_yield(el, rq);
}
static int static int
switch_prio(struct intel_engine_cs *engine, const struct i915_request *rq) switch_prio(struct intel_engine_cs *engine, const struct i915_request *rq)
{ {
...@@ -1783,8 +1810,7 @@ timeslice(const struct intel_engine_cs *engine) ...@@ -1783,8 +1810,7 @@ timeslice(const struct intel_engine_cs *engine)
return READ_ONCE(engine->props.timeslice_duration_ms); return READ_ONCE(engine->props.timeslice_duration_ms);
} }
static unsigned long static unsigned long active_timeslice(const struct intel_engine_cs *engine)
active_timeslice(const struct intel_engine_cs *engine)
{ {
const struct intel_engine_execlists *execlists = &engine->execlists; const struct intel_engine_execlists *execlists = &engine->execlists;
const struct i915_request *rq = *execlists->active; const struct i915_request *rq = *execlists->active;
...@@ -1946,13 +1972,14 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -1946,13 +1972,14 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
last = NULL; last = NULL;
} else if (need_timeslice(engine, last) && } else if (need_timeslice(engine, last) &&
timer_expired(&engine->execlists.timer)) { timeslice_expired(execlists, last)) {
ENGINE_TRACE(engine, ENGINE_TRACE(engine,
"expired last=%llx:%lld, prio=%d, hint=%d\n", "expired last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n",
last->fence.context, last->fence.context,
last->fence.seqno, last->fence.seqno,
last->sched.attr.priority, last->sched.attr.priority,
execlists->queue_priority_hint); execlists->queue_priority_hint,
yesno(timeslice_yield(execlists, last)));
ring_set_paused(engine, 1); ring_set_paused(engine, 1);
defer_active(engine); defer_active(engine);
...@@ -2213,6 +2240,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -2213,6 +2240,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
} }
clear_ports(port + 1, last_port - port); clear_ports(port + 1, last_port - port);
WRITE_ONCE(execlists->yield, -1);
execlists_submit_ports(engine); execlists_submit_ports(engine);
set_preempt_timeout(engine, *active); set_preempt_timeout(engine, *active);
} else { } else {
...@@ -4452,6 +4480,7 @@ logical_ring_default_irqs(struct intel_engine_cs *engine) ...@@ -4452,6 +4480,7 @@ logical_ring_default_irqs(struct intel_engine_cs *engine)
engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift; engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift; engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
engine->irq_keep_mask |= GT_CS_MASTER_ERROR_INTERRUPT << shift; engine->irq_keep_mask |= GT_CS_MASTER_ERROR_INTERRUPT << shift;
engine->irq_keep_mask |= GT_WAIT_SEMAPHORE_INTERRUPT << shift;
} }
static void rcs_submission_override(struct intel_engine_cs *engine) static void rcs_submission_override(struct intel_engine_cs *engine)
......
...@@ -929,7 +929,7 @@ create_rewinder(struct intel_context *ce, ...@@ -929,7 +929,7 @@ create_rewinder(struct intel_context *ce,
goto err; goto err;
} }
cs = intel_ring_begin(rq, 10); cs = intel_ring_begin(rq, 14);
if (IS_ERR(cs)) { if (IS_ERR(cs)) {
err = PTR_ERR(cs); err = PTR_ERR(cs);
goto err; goto err;
...@@ -941,8 +941,8 @@ create_rewinder(struct intel_context *ce, ...@@ -941,8 +941,8 @@ create_rewinder(struct intel_context *ce,
*cs++ = MI_SEMAPHORE_WAIT | *cs++ = MI_SEMAPHORE_WAIT |
MI_SEMAPHORE_GLOBAL_GTT | MI_SEMAPHORE_GLOBAL_GTT |
MI_SEMAPHORE_POLL | MI_SEMAPHORE_POLL |
MI_SEMAPHORE_SAD_NEQ_SDD; MI_SEMAPHORE_SAD_GTE_SDD;
*cs++ = 0; *cs++ = idx;
*cs++ = offset; *cs++ = offset;
*cs++ = 0; *cs++ = 0;
...@@ -951,6 +951,11 @@ create_rewinder(struct intel_context *ce, ...@@ -951,6 +951,11 @@ create_rewinder(struct intel_context *ce,
*cs++ = offset + idx * sizeof(u32); *cs++ = offset + idx * sizeof(u32);
*cs++ = 0; *cs++ = 0;
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = offset;
*cs++ = 0;
*cs++ = idx + 1;
intel_ring_advance(rq, cs); intel_ring_advance(rq, cs);
rq->sched.attr.priority = I915_PRIORITY_MASK; rq->sched.attr.priority = I915_PRIORITY_MASK;
...@@ -984,7 +989,7 @@ static int live_timeslice_rewind(void *arg) ...@@ -984,7 +989,7 @@ static int live_timeslice_rewind(void *arg)
for_each_engine(engine, gt, id) { for_each_engine(engine, gt, id) {
enum { A1, A2, B1 }; enum { A1, A2, B1 };
enum { X = 1, Y, Z }; enum { X = 1, Z, Y };
struct i915_request *rq[3] = {}; struct i915_request *rq[3] = {};
struct intel_context *ce; struct intel_context *ce;
unsigned long heartbeat; unsigned long heartbeat;
...@@ -1017,13 +1022,13 @@ static int live_timeslice_rewind(void *arg) ...@@ -1017,13 +1022,13 @@ static int live_timeslice_rewind(void *arg)
goto err; goto err;
} }
rq[0] = create_rewinder(ce, NULL, slot, 1); rq[0] = create_rewinder(ce, NULL, slot, X);
if (IS_ERR(rq[0])) { if (IS_ERR(rq[0])) {
intel_context_put(ce); intel_context_put(ce);
goto err; goto err;
} }
rq[1] = create_rewinder(ce, NULL, slot, 2); rq[1] = create_rewinder(ce, NULL, slot, Y);
intel_context_put(ce); intel_context_put(ce);
if (IS_ERR(rq[1])) if (IS_ERR(rq[1]))
goto err; goto err;
...@@ -1041,7 +1046,7 @@ static int live_timeslice_rewind(void *arg) ...@@ -1041,7 +1046,7 @@ static int live_timeslice_rewind(void *arg)
goto err; goto err;
} }
rq[2] = create_rewinder(ce, rq[0], slot, 3); rq[2] = create_rewinder(ce, rq[0], slot, Z);
intel_context_put(ce); intel_context_put(ce);
if (IS_ERR(rq[2])) if (IS_ERR(rq[2]))
goto err; goto err;
...@@ -1055,15 +1060,12 @@ static int live_timeslice_rewind(void *arg) ...@@ -1055,15 +1060,12 @@ static int live_timeslice_rewind(void *arg)
GEM_BUG_ON(!timer_pending(&engine->execlists.timer)); GEM_BUG_ON(!timer_pending(&engine->execlists.timer));
/* ELSP[] = { { A:rq1, A:rq2 }, { B:rq1 } } */ /* ELSP[] = { { A:rq1, A:rq2 }, { B:rq1 } } */
GEM_BUG_ON(!i915_request_is_active(rq[A1])); if (i915_request_is_active(rq[A2])) { /* semaphore yielded! */
GEM_BUG_ON(!i915_request_is_active(rq[A2])); /* Wait for the timeslice to kick in */
GEM_BUG_ON(!i915_request_is_active(rq[B1])); del_timer(&engine->execlists.timer);
tasklet_hi_schedule(&engine->execlists.tasklet);
/* Wait for the timeslice to kick in */ intel_engine_flush_submission(engine);
del_timer(&engine->execlists.timer); }
tasklet_hi_schedule(&engine->execlists.tasklet);
intel_engine_flush_submission(engine);
/* -> ELSP[] = { { A:rq1 }, { B:rq1 } } */ /* -> ELSP[] = { { A:rq1 }, { B:rq1 } } */
GEM_BUG_ON(!i915_request_is_active(rq[A1])); GEM_BUG_ON(!i915_request_is_active(rq[A1]));
GEM_BUG_ON(!i915_request_is_active(rq[B1])); GEM_BUG_ON(!i915_request_is_active(rq[B1]));
......
...@@ -3094,6 +3094,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) ...@@ -3094,6 +3094,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GT_BSD_CS_ERROR_INTERRUPT (1 << 15) #define GT_BSD_CS_ERROR_INTERRUPT (1 << 15)
#define GT_BSD_USER_INTERRUPT (1 << 12) #define GT_BSD_USER_INTERRUPT (1 << 12)
#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 (1 << 11) /* hsw+; rsvd on snb, ivb, vlv */ #define GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 (1 << 11) /* hsw+; rsvd on snb, ivb, vlv */
#define GT_WAIT_SEMAPHORE_INTERRUPT REG_BIT(11) /* bdw+ */
#define GT_CONTEXT_SWITCH_INTERRUPT (1 << 8) #define GT_CONTEXT_SWITCH_INTERRUPT (1 << 8)
#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT (1 << 5) /* !snb */ #define GT_RENDER_L3_PARITY_ERROR_INTERRUPT (1 << 5) /* !snb */
#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT (1 << 4) #define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT (1 << 4)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment