Commit 6702cf16 authored by Ben Widawsky's avatar Ben Widawsky Committed by Daniel Vetter

drm/i915: Initialize all contexts

The problem is we're going to switch to a new context, which could be
the default context. The plan was to use restore inhibit, which would be
fine, except if we are using dynamic page tables (which we will). If we
use dynamic page tables and we don't load new page tables, the previous
page tables might go away, and future operations will fault.

CTXA runs.
switch to default, restore inhibit
CTXA dies and has its address space taken away.
Run CTXB, tries to save using the context A's address space - this
fails.

The general solution is to make sure every context has it's own state,
and its own address space. For cases when we must restore inhibit, first
thing we do is load a valid address space. I thought this would be
enough, but apparently there are references within the context itself
which will refer to the old address space - therefore, we also must
reinitialize.

v2: to->ppgtt is only valid in full ppgtt.
v3: Rebased.
v4: Make post PDP update clearer.
Signed-off-by: default avatarBen Widawsky <ben@bwidawsk.net>
Signed-off-by: Michel Thierry <michel.thierry@intel.com> (v2+)
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 563222a7
...@@ -609,7 +609,8 @@ needs_pd_load_pre(struct intel_engine_cs *ring, struct intel_context *to) ...@@ -609,7 +609,8 @@ needs_pd_load_pre(struct intel_engine_cs *ring, struct intel_context *to)
} }
static bool static bool
needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to) needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to,
u32 hw_flags)
{ {
struct drm_i915_private *dev_priv = ring->dev->dev_private; struct drm_i915_private *dev_priv = ring->dev->dev_private;
...@@ -622,7 +623,7 @@ needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to) ...@@ -622,7 +623,7 @@ needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to)
if (ring != &dev_priv->ring[RCS]) if (ring != &dev_priv->ring[RCS])
return false; return false;
if (to->ppgtt->pd_dirty_rings) if (hw_flags & MI_RESTORE_INHIBIT)
return true; return true;
return false; return false;
...@@ -661,9 +662,6 @@ static int do_switch(struct intel_engine_cs *ring, ...@@ -661,9 +662,6 @@ static int do_switch(struct intel_engine_cs *ring,
*/ */
from = ring->last_context; from = ring->last_context;
/* We should never emit switch_mm more than once */
WARN_ON(needs_pd_load_pre(ring, to) && needs_pd_load_post(ring, to));
if (needs_pd_load_pre(ring, to)) { if (needs_pd_load_pre(ring, to)) {
/* Older GENs and non render rings still want the load first, /* Older GENs and non render rings still want the load first,
* "PP_DCLV followed by PP_DIR_BASE register through Load * "PP_DCLV followed by PP_DIR_BASE register through Load
...@@ -706,16 +704,28 @@ static int do_switch(struct intel_engine_cs *ring, ...@@ -706,16 +704,28 @@ static int do_switch(struct intel_engine_cs *ring,
goto unpin_out; goto unpin_out;
} }
if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) if (!to->legacy_hw_ctx.initialized) {
hw_flags |= MI_RESTORE_INHIBIT; hw_flags |= MI_RESTORE_INHIBIT;
else if (to->ppgtt && test_and_clear_bit(ring->id, &to->ppgtt->pd_dirty_rings)) /* NB: If we inhibit the restore, the context is not allowed to
* die because future work may end up depending on valid address
* space. This means we must enforce that a page table load
* occur when this occurs. */
} else if (to->ppgtt &&
test_and_clear_bit(ring->id, &to->ppgtt->pd_dirty_rings))
hw_flags |= MI_FORCE_RESTORE; hw_flags |= MI_FORCE_RESTORE;
/* We should never emit switch_mm more than once */
WARN_ON(needs_pd_load_pre(ring, to) &&
needs_pd_load_post(ring, to, hw_flags));
ret = mi_set_context(ring, to, hw_flags); ret = mi_set_context(ring, to, hw_flags);
if (ret) if (ret)
goto unpin_out; goto unpin_out;
if (needs_pd_load_post(ring, to)) { /* GEN8 does *not* require an explicit reload if the PDPs have been
* setup, and we do not wish to move them.
*/
if (needs_pd_load_post(ring, to, hw_flags)) {
trace_switch_mm(ring, to); trace_switch_mm(ring, to);
ret = to->ppgtt->switch_mm(to->ppgtt, ring); ret = to->ppgtt->switch_mm(to->ppgtt, ring);
/* The hardware context switch is emitted, but we haven't /* The hardware context switch is emitted, but we haven't
...@@ -766,7 +776,7 @@ static int do_switch(struct intel_engine_cs *ring, ...@@ -766,7 +776,7 @@ static int do_switch(struct intel_engine_cs *ring,
i915_gem_context_unreference(from); i915_gem_context_unreference(from);
} }
uninitialized = !to->legacy_hw_ctx.initialized && from == NULL; uninitialized = !to->legacy_hw_ctx.initialized;
to->legacy_hw_ctx.initialized = true; to->legacy_hw_ctx.initialized = true;
done: done:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment