Commit ea0c76f8 authored by Oscar Mateo's avatar Oscar Mateo Committed by Daniel Vetter

drm/i915: Emphasize that ctx->obj & ctx->is_initialized refer to the legacy rcs ctx

We have already advanced that Logical Ring Contexts have their own kind
of backing objects, but everything will be better explained in the Execlists
series. For now, suffice it to say that the current backing object is only
ever used with the render ring, so we're making this fact more explicit
(which is a good reason on its own).

As for the is_initialized flag, we only use to signify that the render state
has been initialized (a.k.a. golden context, a.k.a. null context). It doesn't
mean anything for the other engines, so make that distinction obvious.

Done with the following Coccinelle patch (plus manual frobbing of the struct):

    @@
    struct intel_context c;
    @@
    - (c).obj
    + c.legacy_hw_ctx.rcs_state

    @@
    struct intel_context *c;
    @@
    - (c)->obj
    + c->legacy_hw_ctx.rcs_state

    @@
    struct intel_context c;
    @@
    - (c).is_initialized
    + c.legacy_hw_ctx.initialized

    @@
    struct intel_context *c;
    @@
    - (c)->is_initialized
    + c->legacy_hw_ctx.initialized

This Execlists prep-work patch has been suggested by Chris Wilson and Daniel
Vetter separately.

Initially, it was two separate patches:
drm/i915: Rename ctx->obj to ctx->rcs_state
drm/i915: Make it obvious that ctx->id is merely a user handle
Signed-off-by: default avatarOscar Mateo <oscar.mateo@intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
[danvet: s/id/is_initialized/ to fix the subject and resolve a
conflict in i915_gem_context_reset. Also introduce a new lctx local
variable to avoid overtly long lines.]
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent aa0c13da
...@@ -176,7 +176,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) ...@@ -176,7 +176,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
static void describe_ctx(struct seq_file *m, struct intel_context *ctx) static void describe_ctx(struct seq_file *m, struct intel_context *ctx)
{ {
seq_putc(m, ctx->is_initialized ? 'I' : 'i'); seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i');
seq_putc(m, ctx->remap_slice ? 'R' : 'r'); seq_putc(m, ctx->remap_slice ? 'R' : 'r');
seq_putc(m, ' '); seq_putc(m, ' ');
} }
...@@ -1753,7 +1753,7 @@ static int i915_context_status(struct seq_file *m, void *unused) ...@@ -1753,7 +1753,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
} }
list_for_each_entry(ctx, &dev_priv->context_list, link) { list_for_each_entry(ctx, &dev_priv->context_list, link) {
if (ctx->obj == NULL) if (ctx->legacy_hw_ctx.rcs_state == NULL)
continue; continue;
seq_puts(m, "HW context "); seq_puts(m, "HW context ");
...@@ -1762,7 +1762,7 @@ static int i915_context_status(struct seq_file *m, void *unused) ...@@ -1762,7 +1762,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
if (ring->default_context == ctx) if (ring->default_context == ctx)
seq_printf(m, "(default context %s) ", ring->name); seq_printf(m, "(default context %s) ", ring->name);
describe_obj(m, ctx->obj); describe_obj(m, ctx->legacy_hw_ctx.rcs_state);
seq_putc(m, '\n'); seq_putc(m, '\n');
} }
......
...@@ -589,13 +589,16 @@ struct i915_ctx_hang_stats { ...@@ -589,13 +589,16 @@ struct i915_ctx_hang_stats {
struct intel_context { struct intel_context {
struct kref ref; struct kref ref;
int id; int id;
bool is_initialized;
uint8_t remap_slice; uint8_t remap_slice;
struct drm_i915_file_private *file_priv; struct drm_i915_file_private *file_priv;
struct drm_i915_gem_object *obj;
struct i915_ctx_hang_stats hang_stats; struct i915_ctx_hang_stats hang_stats;
struct i915_address_space *vm; struct i915_address_space *vm;
struct {
struct drm_i915_gem_object *rcs_state;
bool initialized;
} legacy_hw_ctx;
struct list_head link; struct list_head link;
}; };
......
...@@ -182,14 +182,14 @@ void i915_gem_context_free(struct kref *ctx_ref) ...@@ -182,14 +182,14 @@ void i915_gem_context_free(struct kref *ctx_ref)
typeof(*ctx), ref); typeof(*ctx), ref);
struct i915_hw_ppgtt *ppgtt = NULL; struct i915_hw_ppgtt *ppgtt = NULL;
if (ctx->obj) { if (ctx->legacy_hw_ctx.rcs_state) {
/* We refcount even the aliasing PPGTT to keep the code symmetric */ /* We refcount even the aliasing PPGTT to keep the code symmetric */
if (USES_PPGTT(ctx->obj->base.dev)) if (USES_PPGTT(ctx->legacy_hw_ctx.rcs_state->base.dev))
ppgtt = ctx_to_ppgtt(ctx); ppgtt = ctx_to_ppgtt(ctx);
/* XXX: Free up the object before tearing down the address space, in /* XXX: Free up the object before tearing down the address space, in
* case we're bound in the PPGTT */ * case we're bound in the PPGTT */
drm_gem_object_unreference(&ctx->obj->base); drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
} }
if (ppgtt) if (ppgtt)
...@@ -270,7 +270,7 @@ __create_hw_context(struct drm_device *dev, ...@@ -270,7 +270,7 @@ __create_hw_context(struct drm_device *dev,
ret = PTR_ERR(obj); ret = PTR_ERR(obj);
goto err_out; goto err_out;
} }
ctx->obj = obj; ctx->legacy_hw_ctx.rcs_state = obj;
} }
/* Default context will never have a file_priv */ /* Default context will never have a file_priv */
...@@ -317,7 +317,7 @@ i915_gem_create_context(struct drm_device *dev, ...@@ -317,7 +317,7 @@ i915_gem_create_context(struct drm_device *dev,
if (IS_ERR(ctx)) if (IS_ERR(ctx))
return ctx; return ctx;
if (is_global_default_ctx && ctx->obj) { if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) {
/* We may need to do things with the shrinker which /* We may need to do things with the shrinker which
* require us to immediately switch back to the default * require us to immediately switch back to the default
* context. This can cause a problem as pinning the * context. This can cause a problem as pinning the
...@@ -325,7 +325,7 @@ i915_gem_create_context(struct drm_device *dev, ...@@ -325,7 +325,7 @@ i915_gem_create_context(struct drm_device *dev,
* be available. To avoid this we always pin the default * be available. To avoid this we always pin the default
* context. * context.
*/ */
ret = i915_gem_obj_ggtt_pin(ctx->obj, ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
get_context_alignment(dev), 0); get_context_alignment(dev), 0);
if (ret) { if (ret) {
DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret); DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
...@@ -365,8 +365,8 @@ i915_gem_create_context(struct drm_device *dev, ...@@ -365,8 +365,8 @@ i915_gem_create_context(struct drm_device *dev,
return ctx; return ctx;
err_unpin: err_unpin:
if (is_global_default_ctx && ctx->obj) if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state)
i915_gem_object_ggtt_unpin(ctx->obj); i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
err_destroy: err_destroy:
i915_gem_context_unreference(ctx); i915_gem_context_unreference(ctx);
return ERR_PTR(ret); return ERR_PTR(ret);
...@@ -382,26 +382,27 @@ void i915_gem_context_reset(struct drm_device *dev) ...@@ -382,26 +382,27 @@ void i915_gem_context_reset(struct drm_device *dev)
for (i = 0; i < I915_NUM_RINGS; i++) { for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_engine_cs *ring = &dev_priv->ring[i]; struct intel_engine_cs *ring = &dev_priv->ring[i];
struct intel_context *dctx = ring->default_context; struct intel_context *dctx = ring->default_context;
struct intel_context *lctx = ring->last_context;
/* Do a fake switch to the default context */ /* Do a fake switch to the default context */
if (ring->last_context == dctx) if (lctx == dctx)
continue; continue;
if (!ring->last_context) if (!lctx)
continue; continue;
if (dctx->obj && i == RCS) { if (dctx->legacy_hw_ctx.rcs_state && i == RCS) {
WARN_ON(i915_gem_obj_ggtt_pin(dctx->obj, WARN_ON(i915_gem_obj_ggtt_pin(dctx->legacy_hw_ctx.rcs_state,
get_context_alignment(dev), 0)); get_context_alignment(dev), 0));
/* Fake a finish/inactive */ /* Fake a finish/inactive */
dctx->obj->base.write_domain = 0; dctx->legacy_hw_ctx.rcs_state->base.write_domain = 0;
dctx->obj->active = 0; dctx->legacy_hw_ctx.rcs_state->active = 0;
} }
if (ring->last_context->obj && i == RCS) if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
i915_gem_object_ggtt_unpin(ring->last_context->obj); i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
i915_gem_context_unreference(ring->last_context); i915_gem_context_unreference(lctx);
i915_gem_context_reference(dctx); i915_gem_context_reference(dctx);
ring->last_context = dctx; ring->last_context = dctx;
} }
...@@ -448,7 +449,7 @@ void i915_gem_context_fini(struct drm_device *dev) ...@@ -448,7 +449,7 @@ void i915_gem_context_fini(struct drm_device *dev)
struct intel_context *dctx = dev_priv->ring[RCS].default_context; struct intel_context *dctx = dev_priv->ring[RCS].default_context;
int i; int i;
if (dctx->obj) { if (dctx->legacy_hw_ctx.rcs_state) {
/* The only known way to stop the gpu from accessing the hw context is /* The only known way to stop the gpu from accessing the hw context is
* to reset it. Do this as the very last operation to avoid confusing * to reset it. Do this as the very last operation to avoid confusing
* other code, leading to spurious errors. */ * other code, leading to spurious errors. */
...@@ -463,13 +464,13 @@ void i915_gem_context_fini(struct drm_device *dev) ...@@ -463,13 +464,13 @@ void i915_gem_context_fini(struct drm_device *dev)
WARN_ON(!dev_priv->ring[RCS].last_context); WARN_ON(!dev_priv->ring[RCS].last_context);
if (dev_priv->ring[RCS].last_context == dctx) { if (dev_priv->ring[RCS].last_context == dctx) {
/* Fake switch to NULL context */ /* Fake switch to NULL context */
WARN_ON(dctx->obj->active); WARN_ON(dctx->legacy_hw_ctx.rcs_state->active);
i915_gem_object_ggtt_unpin(dctx->obj); i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
i915_gem_context_unreference(dctx); i915_gem_context_unreference(dctx);
dev_priv->ring[RCS].last_context = NULL; dev_priv->ring[RCS].last_context = NULL;
} }
i915_gem_object_ggtt_unpin(dctx->obj); i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
} }
for (i = 0; i < I915_NUM_RINGS; i++) { for (i = 0; i < I915_NUM_RINGS; i++) {
...@@ -589,7 +590,7 @@ mi_set_context(struct intel_engine_cs *ring, ...@@ -589,7 +590,7 @@ mi_set_context(struct intel_engine_cs *ring,
intel_ring_emit(ring, MI_NOOP); intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_SET_CONTEXT); intel_ring_emit(ring, MI_SET_CONTEXT);
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->obj) | intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) |
MI_MM_SPACE_GTT | MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN | MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN |
...@@ -621,8 +622,8 @@ static int do_switch(struct intel_engine_cs *ring, ...@@ -621,8 +622,8 @@ static int do_switch(struct intel_engine_cs *ring,
int ret, i; int ret, i;
if (from != NULL && ring == &dev_priv->ring[RCS]) { if (from != NULL && ring == &dev_priv->ring[RCS]) {
BUG_ON(from->obj == NULL); BUG_ON(from->legacy_hw_ctx.rcs_state == NULL);
BUG_ON(!i915_gem_obj_is_pinned(from->obj)); BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
} }
if (from == to && !to->remap_slice) if (from == to && !to->remap_slice)
...@@ -630,7 +631,7 @@ static int do_switch(struct intel_engine_cs *ring, ...@@ -630,7 +631,7 @@ static int do_switch(struct intel_engine_cs *ring,
/* Trying to pin first makes error handling easier. */ /* Trying to pin first makes error handling easier. */
if (ring == &dev_priv->ring[RCS]) { if (ring == &dev_priv->ring[RCS]) {
ret = i915_gem_obj_ggtt_pin(to->obj, ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
get_context_alignment(ring->dev), 0); get_context_alignment(ring->dev), 0);
if (ret) if (ret)
return ret; return ret;
...@@ -663,17 +664,17 @@ static int do_switch(struct intel_engine_cs *ring, ...@@ -663,17 +664,17 @@ static int do_switch(struct intel_engine_cs *ring,
* *
* XXX: We need a real interface to do this instead of trickery. * XXX: We need a real interface to do this instead of trickery.
*/ */
ret = i915_gem_object_set_to_gtt_domain(to->obj, false); ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false);
if (ret) if (ret)
goto unpin_out; goto unpin_out;
if (!to->obj->has_global_gtt_mapping) { if (!to->legacy_hw_ctx.rcs_state->has_global_gtt_mapping) {
struct i915_vma *vma = i915_gem_obj_to_vma(to->obj, struct i915_vma *vma = i915_gem_obj_to_vma(to->legacy_hw_ctx.rcs_state,
&dev_priv->gtt.base); &dev_priv->gtt.base);
vma->bind_vma(vma, to->obj->cache_level, GLOBAL_BIND); vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level, GLOBAL_BIND);
} }
if (!to->is_initialized || i915_gem_context_is_default(to)) if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
hw_flags |= MI_RESTORE_INHIBIT; hw_flags |= MI_RESTORE_INHIBIT;
ret = mi_set_context(ring, to, hw_flags); ret = mi_set_context(ring, to, hw_flags);
...@@ -699,8 +700,8 @@ static int do_switch(struct intel_engine_cs *ring, ...@@ -699,8 +700,8 @@ static int do_switch(struct intel_engine_cs *ring,
* MI_SET_CONTEXT instead of when the next seqno has completed. * MI_SET_CONTEXT instead of when the next seqno has completed.
*/ */
if (from != NULL) { if (from != NULL) {
from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->obj), ring); i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), ring);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the * whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be * object dirty. The only exception is that the context must be
...@@ -708,16 +709,16 @@ static int do_switch(struct intel_engine_cs *ring, ...@@ -708,16 +709,16 @@ static int do_switch(struct intel_engine_cs *ring,
* able to defer doing this until we know the object would be * able to defer doing this until we know the object would be
* swapped, but there is no way to do that yet. * swapped, but there is no way to do that yet.
*/ */
from->obj->dirty = 1; from->legacy_hw_ctx.rcs_state->dirty = 1;
BUG_ON(from->obj->ring != ring); BUG_ON(from->legacy_hw_ctx.rcs_state->ring != ring);
/* obj is kept alive until the next request by its active ref */ /* obj is kept alive until the next request by its active ref */
i915_gem_object_ggtt_unpin(from->obj); i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
i915_gem_context_unreference(from); i915_gem_context_unreference(from);
} }
uninitialized = !to->is_initialized && from == NULL; uninitialized = !to->legacy_hw_ctx.initialized && from == NULL;
to->is_initialized = true; to->legacy_hw_ctx.initialized = true;
done: done:
i915_gem_context_reference(to); i915_gem_context_reference(to);
...@@ -733,7 +734,7 @@ static int do_switch(struct intel_engine_cs *ring, ...@@ -733,7 +734,7 @@ static int do_switch(struct intel_engine_cs *ring,
unpin_out: unpin_out:
if (ring->id == RCS) if (ring->id == RCS)
i915_gem_object_ggtt_unpin(to->obj); i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
return ret; return ret;
} }
...@@ -754,7 +755,7 @@ int i915_switch_context(struct intel_engine_cs *ring, ...@@ -754,7 +755,7 @@ int i915_switch_context(struct intel_engine_cs *ring,
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
if (to->obj == NULL) { /* We have the fake context */ if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
if (to != ring->last_context) { if (to != ring->last_context) {
i915_gem_context_reference(to); i915_gem_context_reference(to);
if (ring->last_context) if (ring->last_context)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment