Commit bca44d80 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Merge legacy+execlists context structs

struct intel_context contains two substructs, one for the legacy RCS and
one for every execlists engine. Since legacy RCS is a subset of the
execlists engine support, just combine the two substructs.

v2: Only pin the default context for legacy mode (the object only exists
for legacy, but adding i915.enable_execlists provides symmetry with the
cleanup functions).
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1464098023-3294-8-git-send-email-chris@chris-wilson.co.uk
parent 0ca5fa3a
...@@ -199,13 +199,6 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) ...@@ -199,13 +199,6 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits); seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
} }
static void describe_ctx(struct seq_file *m, struct i915_gem_context *ctx)
{
seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i');
seq_putc(m, ctx->remap_slice ? 'R' : 'r');
seq_putc(m, ' ');
}
static int i915_gem_object_list_info(struct seq_file *m, void *data) static int i915_gem_object_list_info(struct seq_file *m, void *data)
{ {
struct drm_info_node *node = m->private; struct drm_info_node *node = m->private;
...@@ -2001,7 +1994,6 @@ static int i915_context_status(struct seq_file *m, void *unused) ...@@ -2001,7 +1994,6 @@ static int i915_context_status(struct seq_file *m, void *unused)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
enum intel_engine_id id;
int ret; int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex); ret = mutex_lock_interruptible(&dev->struct_mutex);
...@@ -2009,10 +2001,6 @@ static int i915_context_status(struct seq_file *m, void *unused) ...@@ -2009,10 +2001,6 @@ static int i915_context_status(struct seq_file *m, void *unused)
return ret; return ret;
list_for_each_entry(ctx, &dev_priv->context_list, link) { list_for_each_entry(ctx, &dev_priv->context_list, link) {
if (!i915.enable_execlists &&
ctx->legacy_hw_ctx.rcs_state == NULL)
continue;
seq_printf(m, "HW context %u ", ctx->hw_id); seq_printf(m, "HW context %u ", ctx->hw_id);
if (IS_ERR(ctx->file_priv)) { if (IS_ERR(ctx->file_priv)) {
seq_puts(m, "(deleted) "); seq_puts(m, "(deleted) ");
...@@ -2030,25 +2018,19 @@ static int i915_context_status(struct seq_file *m, void *unused) ...@@ -2030,25 +2018,19 @@ static int i915_context_status(struct seq_file *m, void *unused)
seq_puts(m, "(kernel) "); seq_puts(m, "(kernel) ");
} }
describe_ctx(m, ctx); seq_putc(m, ctx->remap_slice ? 'R' : 'r');
seq_putc(m, '\n');
if (i915.enable_execlists) { for_each_engine(engine, dev_priv) {
struct intel_context *ce = &ctx->engine[engine->id];
seq_printf(m, "%s: ", engine->name);
seq_putc(m, ce->initialised ? 'I' : 'i');
if (ce->state)
describe_obj(m, ce->state);
if (ce->ringbuf)
describe_ctx_ringbuf(m, ce->ringbuf);
seq_putc(m, '\n'); seq_putc(m, '\n');
for_each_engine_id(engine, dev_priv, id) {
struct drm_i915_gem_object *ctx_obj =
ctx->engine[id].state;
struct intel_ringbuffer *ringbuf =
ctx->engine[id].ringbuf;
seq_printf(m, "%s: ", engine->name);
if (ctx_obj)
describe_obj(m, ctx_obj);
if (ringbuf)
describe_ctx_ringbuf(m, ringbuf);
seq_putc(m, '\n');
}
} else {
describe_obj(m, ctx->legacy_hw_ctx.rcs_state);
} }
seq_putc(m, '\n'); seq_putc(m, '\n');
...@@ -2063,10 +2045,10 @@ static void i915_dump_lrc_obj(struct seq_file *m, ...@@ -2063,10 +2045,10 @@ static void i915_dump_lrc_obj(struct seq_file *m,
struct i915_gem_context *ctx, struct i915_gem_context *ctx,
struct intel_engine_cs *engine) struct intel_engine_cs *engine)
{ {
struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
struct page *page; struct page *page;
uint32_t *reg_state; uint32_t *reg_state;
int j; int j;
struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
unsigned long ggtt_offset = 0; unsigned long ggtt_offset = 0;
seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id); seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id);
......
...@@ -862,13 +862,6 @@ struct i915_gem_context { ...@@ -862,13 +862,6 @@ struct i915_gem_context {
/* Unique identifier for this context, used by the hw for tracking */ /* Unique identifier for this context, used by the hw for tracking */
unsigned hw_id; unsigned hw_id;
/* Legacy ring buffer submission */
struct {
struct drm_i915_gem_object *rcs_state;
bool initialized;
} legacy_hw_ctx;
/* Execlists */
struct intel_context { struct intel_context {
struct drm_i915_gem_object *state; struct drm_i915_gem_object *state;
struct intel_ringbuffer *ringbuf; struct intel_ringbuffer *ringbuf;
......
...@@ -152,13 +152,11 @@ static void i915_gem_context_clean(struct i915_gem_context *ctx) ...@@ -152,13 +152,11 @@ static void i915_gem_context_clean(struct i915_gem_context *ctx)
void i915_gem_context_free(struct kref *ctx_ref) void i915_gem_context_free(struct kref *ctx_ref)
{ {
struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref); struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
int i;
lockdep_assert_held(&ctx->i915->dev->struct_mutex); lockdep_assert_held(&ctx->i915->dev->struct_mutex);
trace_i915_context_free(ctx); trace_i915_context_free(ctx);
if (i915.enable_execlists)
intel_lr_context_free(ctx);
/* /*
* This context is going away and we need to remove all VMAs still * This context is going away and we need to remove all VMAs still
* around. This is to handle imported shared objects for which * around. This is to handle imported shared objects for which
...@@ -168,8 +166,19 @@ void i915_gem_context_free(struct kref *ctx_ref) ...@@ -168,8 +166,19 @@ void i915_gem_context_free(struct kref *ctx_ref)
i915_ppgtt_put(ctx->ppgtt); i915_ppgtt_put(ctx->ppgtt);
if (ctx->legacy_hw_ctx.rcs_state) for (i = 0; i < I915_NUM_ENGINES; i++) {
drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base); struct intel_context *ce = &ctx->engine[i];
if (!ce->state)
continue;
WARN_ON(ce->pin_count);
if (ce->ringbuf)
intel_ringbuffer_free(ce->ringbuf);
drm_gem_object_unreference(&ce->state->base);
}
list_del(&ctx->link); list_del(&ctx->link);
ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id); ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id);
...@@ -266,7 +275,7 @@ __create_hw_context(struct drm_device *dev, ...@@ -266,7 +275,7 @@ __create_hw_context(struct drm_device *dev,
ret = PTR_ERR(obj); ret = PTR_ERR(obj);
goto err_out; goto err_out;
} }
ctx->legacy_hw_ctx.rcs_state = obj; ctx->engine[RCS].state = obj;
} }
/* Default context will never have a file_priv */ /* Default context will never have a file_priv */
...@@ -336,8 +345,11 @@ static void i915_gem_context_unpin(struct i915_gem_context *ctx, ...@@ -336,8 +345,11 @@ static void i915_gem_context_unpin(struct i915_gem_context *ctx,
if (i915.enable_execlists) { if (i915.enable_execlists) {
intel_lr_context_unpin(ctx, engine); intel_lr_context_unpin(ctx, engine);
} else { } else {
if (engine->id == RCS && ctx->legacy_hw_ctx.rcs_state) struct intel_context *ce = &ctx->engine[engine->id];
i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
if (ce->state)
i915_gem_object_ggtt_unpin(ce->state);
i915_gem_context_unreference(ctx); i915_gem_context_unreference(ctx);
} }
} }
...@@ -401,7 +413,7 @@ int i915_gem_context_init(struct drm_device *dev) ...@@ -401,7 +413,7 @@ int i915_gem_context_init(struct drm_device *dev)
return PTR_ERR(ctx); return PTR_ERR(ctx);
} }
if (ctx->legacy_hw_ctx.rcs_state) { if (!i915.enable_execlists && ctx->engine[RCS].state) {
int ret; int ret;
/* We may need to do things with the shrinker which /* We may need to do things with the shrinker which
...@@ -411,7 +423,7 @@ int i915_gem_context_init(struct drm_device *dev) ...@@ -411,7 +423,7 @@ int i915_gem_context_init(struct drm_device *dev)
* be available. To avoid this we always pin the default * be available. To avoid this we always pin the default
* context. * context.
*/ */
ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state, ret = i915_gem_obj_ggtt_pin(ctx->engine[RCS].state,
get_context_alignment(dev_priv), 0); get_context_alignment(dev_priv), 0);
if (ret) { if (ret) {
DRM_ERROR("Failed to pinned default global context (error %d)\n", DRM_ERROR("Failed to pinned default global context (error %d)\n",
...@@ -436,15 +448,17 @@ void i915_gem_context_lost(struct drm_i915_private *dev_priv) ...@@ -436,15 +448,17 @@ void i915_gem_context_lost(struct drm_i915_private *dev_priv)
lockdep_assert_held(&dev_priv->dev->struct_mutex); lockdep_assert_held(&dev_priv->dev->struct_mutex);
for_each_engine(engine, dev_priv) { for_each_engine(engine, dev_priv) {
if (engine->last_context == NULL) if (engine->last_context) {
continue; i915_gem_context_unpin(engine->last_context, engine);
engine->last_context = NULL;
}
i915_gem_context_unpin(engine->last_context, engine); /* Force the GPU state to be reinitialised on enabling */
engine->last_context = NULL; dev_priv->kernel_context->engine[engine->id].initialised =
engine->init_context == NULL;
} }
/* Force the GPU state to be reinitialised on enabling */ /* Force the GPU state to be reinitialised on enabling */
dev_priv->kernel_context->legacy_hw_ctx.initialized = false;
dev_priv->kernel_context->remap_slice = ALL_L3_SLICES(dev_priv); dev_priv->kernel_context->remap_slice = ALL_L3_SLICES(dev_priv);
} }
...@@ -455,8 +469,8 @@ void i915_gem_context_fini(struct drm_device *dev) ...@@ -455,8 +469,8 @@ void i915_gem_context_fini(struct drm_device *dev)
lockdep_assert_held(&dev->struct_mutex); lockdep_assert_held(&dev->struct_mutex);
if (dctx->legacy_hw_ctx.rcs_state) if (!i915.enable_execlists && dctx->engine[RCS].state)
i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state); i915_gem_object_ggtt_unpin(dctx->engine[RCS].state);
i915_gem_context_unreference(dctx); i915_gem_context_unreference(dctx);
dev_priv->kernel_context = NULL; dev_priv->kernel_context = NULL;
...@@ -564,7 +578,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) ...@@ -564,7 +578,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
intel_ring_emit(engine, MI_NOOP); intel_ring_emit(engine, MI_NOOP);
intel_ring_emit(engine, MI_SET_CONTEXT); intel_ring_emit(engine, MI_SET_CONTEXT);
intel_ring_emit(engine, intel_ring_emit(engine,
i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) | i915_gem_obj_ggtt_offset(req->ctx->engine[RCS].state) |
flags); flags);
/* /*
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
...@@ -641,7 +655,7 @@ static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt, ...@@ -641,7 +655,7 @@ static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt,
if (to->remap_slice) if (to->remap_slice)
return false; return false;
if (!to->legacy_hw_ctx.initialized) if (!to->engine[RCS].initialised)
return false; return false;
if (ppgtt && (intel_engine_flag(engine) & ppgtt->pd_dirty_rings)) if (ppgtt && (intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
...@@ -706,7 +720,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) ...@@ -706,7 +720,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
return 0; return 0;
/* Trying to pin first makes error handling easier. */ /* Trying to pin first makes error handling easier. */
ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state, ret = i915_gem_obj_ggtt_pin(to->engine[RCS].state,
get_context_alignment(engine->i915), get_context_alignment(engine->i915),
0); 0);
if (ret) if (ret)
...@@ -729,7 +743,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) ...@@ -729,7 +743,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
* *
* XXX: We need a real interface to do this instead of trickery. * XXX: We need a real interface to do this instead of trickery.
*/ */
ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false); ret = i915_gem_object_set_to_gtt_domain(to->engine[RCS].state, false);
if (ret) if (ret)
goto unpin_out; goto unpin_out;
...@@ -744,7 +758,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) ...@@ -744,7 +758,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
goto unpin_out; goto unpin_out;
} }
if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) if (!to->engine[RCS].initialised || i915_gem_context_is_default(to))
/* NB: If we inhibit the restore, the context is not allowed to /* NB: If we inhibit the restore, the context is not allowed to
* die because future work may end up depending on valid address * die because future work may end up depending on valid address
* space. This means we must enforce that a page table load * space. This means we must enforce that a page table load
...@@ -768,8 +782,8 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) ...@@ -768,8 +782,8 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
* MI_SET_CONTEXT instead of when the next seqno has completed. * MI_SET_CONTEXT instead of when the next seqno has completed.
*/ */
if (from != NULL) { if (from != NULL) {
from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; from->engine[RCS].state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req); i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->engine[RCS].state), req);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the * whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be * object dirty. The only exception is that the context must be
...@@ -777,10 +791,10 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) ...@@ -777,10 +791,10 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
* able to defer doing this until we know the object would be * able to defer doing this until we know the object would be
* swapped, but there is no way to do that yet. * swapped, but there is no way to do that yet.
*/ */
from->legacy_hw_ctx.rcs_state->dirty = 1; from->engine[RCS].state->dirty = 1;
/* obj is kept alive until the next request by its active ref */ /* obj is kept alive until the next request by its active ref */
i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state); i915_gem_object_ggtt_unpin(from->engine[RCS].state);
i915_gem_context_unreference(from); i915_gem_context_unreference(from);
} }
i915_gem_context_reference(to); i915_gem_context_reference(to);
...@@ -815,19 +829,19 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) ...@@ -815,19 +829,19 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
to->remap_slice &= ~(1<<i); to->remap_slice &= ~(1<<i);
} }
if (!to->legacy_hw_ctx.initialized) { if (!to->engine[RCS].initialised) {
if (engine->init_context) { if (engine->init_context) {
ret = engine->init_context(req); ret = engine->init_context(req);
if (ret) if (ret)
return ret; return ret;
} }
to->legacy_hw_ctx.initialized = true; to->engine[RCS].initialised = true;
} }
return 0; return 0;
unpin_out: unpin_out:
i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state); i915_gem_object_ggtt_unpin(to->engine[RCS].state);
return ret; return ret;
} }
...@@ -851,8 +865,7 @@ int i915_switch_context(struct drm_i915_gem_request *req) ...@@ -851,8 +865,7 @@ int i915_switch_context(struct drm_i915_gem_request *req)
WARN_ON(i915.enable_execlists); WARN_ON(i915.enable_execlists);
lockdep_assert_held(&req->i915->dev->struct_mutex); lockdep_assert_held(&req->i915->dev->struct_mutex);
if (engine->id != RCS || if (!req->ctx->engine[engine->id].state) {
req->ctx->legacy_hw_ctx.rcs_state == NULL) {
struct i915_gem_context *to = req->ctx; struct i915_gem_context *to = req->ctx;
struct i915_hw_ppgtt *ppgtt = struct i915_hw_ppgtt *ppgtt =
to->ppgtt ?: req->i915->mm.aliasing_ppgtt; to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
......
...@@ -2409,31 +2409,6 @@ populate_lr_context(struct i915_gem_context *ctx, ...@@ -2409,31 +2409,6 @@ populate_lr_context(struct i915_gem_context *ctx,
return 0; return 0;
} }
/**
* intel_lr_context_free() - free the LRC specific bits of a context
* @ctx: the LR context to free.
*
* The real context freeing is done in i915_gem_context_free: this only
* takes care of the bits that are LRC related: the per-engine backing
* objects and the logical ringbuffer.
*/
void intel_lr_context_free(struct i915_gem_context *ctx)
{
int i;
for (i = I915_NUM_ENGINES; --i >= 0; ) {
struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
if (!ctx_obj)
continue;
WARN_ON(ctx->engine[i].pin_count);
intel_ringbuffer_free(ringbuf);
drm_gem_object_unreference(&ctx_obj->base);
}
}
/** /**
* intel_lr_context_size() - return the size of the context for an engine * intel_lr_context_size() - return the size of the context for an engine
* @ring: which engine to find the context size for * @ring: which engine to find the context size for
...@@ -2494,7 +2469,6 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, ...@@ -2494,7 +2469,6 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
struct intel_ringbuffer *ringbuf; struct intel_ringbuffer *ringbuf;
int ret; int ret;
WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
WARN_ON(ce->state); WARN_ON(ce->state);
context_size = round_up(intel_lr_context_size(engine), 4096); context_size = round_up(intel_lr_context_size(engine), 4096);
......
...@@ -101,7 +101,6 @@ static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf, ...@@ -101,7 +101,6 @@ static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf,
struct i915_gem_context; struct i915_gem_context;
void intel_lr_context_free(struct i915_gem_context *ctx);
uint32_t intel_lr_context_size(struct intel_engine_cs *engine); uint32_t intel_lr_context_size(struct intel_engine_cs *engine);
void intel_lr_context_unpin(struct i915_gem_context *ctx, void intel_lr_context_unpin(struct i915_gem_context *ctx,
struct intel_engine_cs *engine); struct intel_engine_cs *engine);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment