Commit ab82a063 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Wrap engine->context_pin() and engine->context_unpin()

Make life easier in upcoming patches by moving the context_pin and
context_unpin vfuncs into inline helpers.

v2: Fixup mock_engine to mark the context as pinned on use.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180430131503.5375-2-chris@chris-wilson.co.uk
parent 52d7f16e
...@@ -448,7 +448,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, ...@@ -448,7 +448,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id) bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id)
{ {
u32 *reg_state = ctx->engine[ring_id].lrc_reg_state; u32 *reg_state = ctx->__engine[ring_id].lrc_reg_state;
u32 inhibit_mask = u32 inhibit_mask =
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
......
...@@ -58,7 +58,7 @@ static void update_shadow_pdps(struct intel_vgpu_workload *workload) ...@@ -58,7 +58,7 @@ static void update_shadow_pdps(struct intel_vgpu_workload *workload)
int ring_id = workload->ring_id; int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx; struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
struct drm_i915_gem_object *ctx_obj = struct drm_i915_gem_object *ctx_obj =
shadow_ctx->engine[ring_id].state->obj; shadow_ctx->__engine[ring_id].state->obj;
struct execlist_ring_context *shadow_ring_context; struct execlist_ring_context *shadow_ring_context;
struct page *page; struct page *page;
...@@ -130,7 +130,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) ...@@ -130,7 +130,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
int ring_id = workload->ring_id; int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx; struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
struct drm_i915_gem_object *ctx_obj = struct drm_i915_gem_object *ctx_obj =
shadow_ctx->engine[ring_id].state->obj; shadow_ctx->__engine[ring_id].state->obj;
struct execlist_ring_context *shadow_ring_context; struct execlist_ring_context *shadow_ring_context;
struct page *page; struct page *page;
void *dst; void *dst;
...@@ -283,7 +283,7 @@ static int shadow_context_status_change(struct notifier_block *nb, ...@@ -283,7 +283,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
static void shadow_context_descriptor_update(struct i915_gem_context *ctx, static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
struct intel_engine_cs *engine) struct intel_engine_cs *engine)
{ {
struct intel_context *ce = &ctx->engine[engine->id]; struct intel_context *ce = to_intel_context(ctx, engine);
u64 desc = 0; u64 desc = 0;
desc = ce->lrc_desc; desc = ce->lrc_desc;
...@@ -389,7 +389,7 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) ...@@ -389,7 +389,7 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
* shadow_ctx pages invalid. So gvt need to pin itself. After update * shadow_ctx pages invalid. So gvt need to pin itself. After update
* the guest context, gvt can unpin the shadow_ctx safely. * the guest context, gvt can unpin the shadow_ctx safely.
*/ */
ring = engine->context_pin(engine, shadow_ctx); ring = intel_context_pin(shadow_ctx, engine);
if (IS_ERR(ring)) { if (IS_ERR(ring)) {
ret = PTR_ERR(ring); ret = PTR_ERR(ring);
gvt_vgpu_err("fail to pin shadow context\n"); gvt_vgpu_err("fail to pin shadow context\n");
...@@ -403,7 +403,7 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) ...@@ -403,7 +403,7 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
return 0; return 0;
err_unpin: err_unpin:
engine->context_unpin(engine, shadow_ctx); intel_context_unpin(shadow_ctx, engine);
err_shadow: err_shadow:
release_shadow_wa_ctx(&workload->wa_ctx); release_shadow_wa_ctx(&workload->wa_ctx);
err_scan: err_scan:
...@@ -437,7 +437,7 @@ static int intel_gvt_generate_request(struct intel_vgpu_workload *workload) ...@@ -437,7 +437,7 @@ static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
return 0; return 0;
err_unpin: err_unpin:
engine->context_unpin(engine, shadow_ctx); intel_context_unpin(shadow_ctx, engine);
release_shadow_wa_ctx(&workload->wa_ctx); release_shadow_wa_ctx(&workload->wa_ctx);
return ret; return ret;
} }
...@@ -526,7 +526,7 @@ static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx) ...@@ -526,7 +526,7 @@ static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
struct intel_vgpu_submission *s = &workload->vgpu->submission; struct intel_vgpu_submission *s = &workload->vgpu->submission;
struct i915_gem_context *shadow_ctx = s->shadow_ctx; struct i915_gem_context *shadow_ctx = s->shadow_ctx;
struct drm_i915_gem_object *ctx_obj = struct drm_i915_gem_object *ctx_obj =
shadow_ctx->engine[ring_id].state->obj; shadow_ctx->__engine[ring_id].state->obj;
struct execlist_ring_context *shadow_ring_context; struct execlist_ring_context *shadow_ring_context;
struct page *page; struct page *page;
...@@ -688,7 +688,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) ...@@ -688,7 +688,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
ret = prepare_workload(workload); ret = prepare_workload(workload);
if (ret) { if (ret) {
engine->context_unpin(engine, shadow_ctx); intel_context_unpin(shadow_ctx, engine);
goto out; goto out;
} }
...@@ -771,7 +771,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload) ...@@ -771,7 +771,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
struct i915_gem_context *shadow_ctx = s->shadow_ctx; struct i915_gem_context *shadow_ctx = s->shadow_ctx;
int ring_id = workload->ring_id; int ring_id = workload->ring_id;
struct drm_i915_gem_object *ctx_obj = struct drm_i915_gem_object *ctx_obj =
shadow_ctx->engine[ring_id].state->obj; shadow_ctx->__engine[ring_id].state->obj;
struct execlist_ring_context *shadow_ring_context; struct execlist_ring_context *shadow_ring_context;
struct page *page; struct page *page;
void *src; void *src;
...@@ -898,7 +898,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) ...@@ -898,7 +898,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
} }
mutex_lock(&dev_priv->drm.struct_mutex); mutex_lock(&dev_priv->drm.struct_mutex);
/* unpin shadow ctx as the shadow_ctx update is done */ /* unpin shadow ctx as the shadow_ctx update is done */
engine->context_unpin(engine, s->shadow_ctx); intel_context_unpin(s->shadow_ctx, engine);
mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&dev_priv->drm.struct_mutex);
} }
......
...@@ -377,16 +377,19 @@ static void print_batch_pool_stats(struct seq_file *m, ...@@ -377,16 +377,19 @@ static void print_batch_pool_stats(struct seq_file *m,
print_file_stats(m, "[k]batch pool", stats); print_file_stats(m, "[k]batch pool", stats);
} }
static int per_file_ctx_stats(int id, void *ptr, void *data) static int per_file_ctx_stats(int idx, void *ptr, void *data)
{ {
struct i915_gem_context *ctx = ptr; struct i915_gem_context *ctx = ptr;
int n; struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, ctx->i915, id) {
struct intel_context *ce = to_intel_context(ctx, engine);
for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) { if (ce->state)
if (ctx->engine[n].state) per_file_stats(0, ce->state->obj, data);
per_file_stats(0, ctx->engine[n].state->obj, data); if (ce->ring)
if (ctx->engine[n].ring) per_file_stats(0, ce->ring->vma->obj, data);
per_file_stats(0, ctx->engine[n].ring->vma->obj, data);
} }
return 0; return 0;
...@@ -1959,7 +1962,8 @@ static int i915_context_status(struct seq_file *m, void *unused) ...@@ -1959,7 +1962,8 @@ static int i915_context_status(struct seq_file *m, void *unused)
seq_putc(m, '\n'); seq_putc(m, '\n');
for_each_engine(engine, dev_priv, id) { for_each_engine(engine, dev_priv, id) {
struct intel_context *ce = &ctx->engine[engine->id]; struct intel_context *ce =
to_intel_context(ctx, engine);
seq_printf(m, "%s: ", engine->name); seq_printf(m, "%s: ", engine->name);
if (ce->state) if (ce->state)
......
...@@ -3234,7 +3234,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv, ...@@ -3234,7 +3234,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv,
stalled_mask & ENGINE_MASK(id)); stalled_mask & ENGINE_MASK(id));
ctx = fetch_and_zero(&engine->last_retired_context); ctx = fetch_and_zero(&engine->last_retired_context);
if (ctx) if (ctx)
engine->context_unpin(engine, ctx); intel_context_unpin(ctx, engine);
/* /*
* Ostensibily, we always want a context loaded for powersaving, * Ostensibily, we always want a context loaded for powersaving,
...@@ -5291,7 +5291,7 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915) ...@@ -5291,7 +5291,7 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
for_each_engine(engine, i915, id) { for_each_engine(engine, i915, id) {
struct i915_vma *state; struct i915_vma *state;
state = ctx->engine[id].state; state = to_intel_context(ctx, engine)->state;
if (!state) if (!state)
continue; continue;
......
...@@ -117,15 +117,15 @@ static void lut_close(struct i915_gem_context *ctx) ...@@ -117,15 +117,15 @@ static void lut_close(struct i915_gem_context *ctx)
static void i915_gem_context_free(struct i915_gem_context *ctx) static void i915_gem_context_free(struct i915_gem_context *ctx)
{ {
int i; unsigned int n;
lockdep_assert_held(&ctx->i915->drm.struct_mutex); lockdep_assert_held(&ctx->i915->drm.struct_mutex);
GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
i915_ppgtt_put(ctx->ppgtt); i915_ppgtt_put(ctx->ppgtt);
for (i = 0; i < I915_NUM_ENGINES; i++) { for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
struct intel_context *ce = &ctx->engine[i]; struct intel_context *ce = &ctx->__engine[n];
if (!ce->state) if (!ce->state)
continue; continue;
...@@ -521,7 +521,7 @@ void i915_gem_contexts_lost(struct drm_i915_private *dev_priv) ...@@ -521,7 +521,7 @@ void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
if (!engine->last_retired_context) if (!engine->last_retired_context)
continue; continue;
engine->context_unpin(engine, engine->last_retired_context); intel_context_unpin(engine->last_retired_context, engine);
engine->last_retired_context = NULL; engine->last_retired_context = NULL;
} }
} }
......
...@@ -149,7 +149,7 @@ struct i915_gem_context { ...@@ -149,7 +149,7 @@ struct i915_gem_context {
u32 *lrc_reg_state; u32 *lrc_reg_state;
u64 lrc_desc; u64 lrc_desc;
int pin_count; int pin_count;
} engine[I915_NUM_ENGINES]; } __engine[I915_NUM_ENGINES];
/** ring_size: size for allocating the per-engine ring buffer */ /** ring_size: size for allocating the per-engine ring buffer */
u32 ring_size; u32 ring_size;
...@@ -256,6 +256,34 @@ static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx) ...@@ -256,6 +256,34 @@ static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
return !ctx->file_priv; return !ctx->file_priv;
} }
static inline struct intel_context *
to_intel_context(struct i915_gem_context *ctx,
const struct intel_engine_cs *engine)
{
return &ctx->__engine[engine->id];
}
static inline struct intel_ring *
intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
{
return engine->context_pin(engine, ctx);
}
static inline void __intel_context_pin(struct i915_gem_context *ctx,
const struct intel_engine_cs *engine)
{
struct intel_context *ce = to_intel_context(ctx, engine);
GEM_BUG_ON(!ce->pin_count);
ce->pin_count++;
}
static inline void intel_context_unpin(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
engine->context_unpin(engine, ctx);
}
/* i915_gem_context.c */ /* i915_gem_context.c */
int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv); int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv);
void i915_gem_contexts_lost(struct drm_i915_private *dev_priv); void i915_gem_contexts_lost(struct drm_i915_private *dev_priv);
......
...@@ -1472,7 +1472,8 @@ static void gem_record_rings(struct i915_gpu_state *error) ...@@ -1472,7 +1472,8 @@ static void gem_record_rings(struct i915_gpu_state *error)
ee->ctx = ee->ctx =
i915_error_object_create(i915, i915_error_object_create(i915,
request->ctx->engine[i].state); to_intel_context(request->ctx,
engine)->state);
error->simulated |= error->simulated |=
i915_gem_context_no_error_capture(request->ctx); i915_gem_context_no_error_capture(request->ctx);
......
...@@ -1234,7 +1234,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream) ...@@ -1234,7 +1234,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
* *
* NB: implied RCS engine... * NB: implied RCS engine...
*/ */
ring = engine->context_pin(engine, stream->ctx); ring = intel_context_pin(stream->ctx, engine);
mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&dev_priv->drm.struct_mutex);
if (IS_ERR(ring)) if (IS_ERR(ring))
return PTR_ERR(ring); return PTR_ERR(ring);
...@@ -1246,7 +1246,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream) ...@@ -1246,7 +1246,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
* with gen8+ and execlists * with gen8+ and execlists
*/ */
dev_priv->perf.oa.specific_ctx_id = dev_priv->perf.oa.specific_ctx_id =
i915_ggtt_offset(stream->ctx->engine[engine->id].state); i915_ggtt_offset(to_intel_context(stream->ctx, engine)->state);
} }
return 0; return 0;
...@@ -1271,7 +1271,7 @@ static void oa_put_render_ctx_id(struct i915_perf_stream *stream) ...@@ -1271,7 +1271,7 @@ static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
mutex_lock(&dev_priv->drm.struct_mutex); mutex_lock(&dev_priv->drm.struct_mutex);
dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID; dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
engine->context_unpin(engine, stream->ctx); intel_context_unpin(stream->ctx, engine);
mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&dev_priv->drm.struct_mutex);
} }
...@@ -1759,6 +1759,7 @@ static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_pr ...@@ -1759,6 +1759,7 @@ static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_pr
static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
const struct i915_oa_config *oa_config) const struct i915_oa_config *oa_config)
{ {
struct intel_engine_cs *engine = dev_priv->engine[RCS];
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
int ret; int ret;
unsigned int wait_flags = I915_WAIT_LOCKED; unsigned int wait_flags = I915_WAIT_LOCKED;
...@@ -1789,7 +1790,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, ...@@ -1789,7 +1790,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
/* Update all contexts now that we've stalled the submission. */ /* Update all contexts now that we've stalled the submission. */
list_for_each_entry(ctx, &dev_priv->contexts.list, link) { list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
struct intel_context *ce = &ctx->engine[RCS]; struct intel_context *ce = to_intel_context(ctx, engine);
u32 *regs; u32 *regs;
/* OA settings will be set upon first use */ /* OA settings will be set upon first use */
......
...@@ -409,7 +409,7 @@ static void i915_request_retire(struct i915_request *request) ...@@ -409,7 +409,7 @@ static void i915_request_retire(struct i915_request *request)
* the subsequent request. * the subsequent request.
*/ */
if (engine->last_retired_context) if (engine->last_retired_context)
engine->context_unpin(engine, engine->last_retired_context); intel_context_unpin(engine->last_retired_context, engine);
engine->last_retired_context = request->ctx; engine->last_retired_context = request->ctx;
spin_lock_irq(&request->lock); spin_lock_irq(&request->lock);
...@@ -638,7 +638,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) ...@@ -638,7 +638,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
* GGTT space, so do this first before we reserve a seqno for * GGTT space, so do this first before we reserve a seqno for
* ourselves. * ourselves.
*/ */
ring = engine->context_pin(engine, ctx); ring = intel_context_pin(ctx, engine);
if (IS_ERR(ring)) if (IS_ERR(ring))
return ERR_CAST(ring); return ERR_CAST(ring);
GEM_BUG_ON(!ring); GEM_BUG_ON(!ring);
...@@ -787,7 +787,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) ...@@ -787,7 +787,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
err_unreserve: err_unreserve:
unreserve_gt(i915); unreserve_gt(i915);
err_unpin: err_unpin:
engine->context_unpin(engine, ctx); intel_context_unpin(ctx, engine);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
......
...@@ -685,7 +685,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine) ...@@ -685,7 +685,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
* be available. To avoid this we always pin the default * be available. To avoid this we always pin the default
* context. * context.
*/ */
ring = engine->context_pin(engine, engine->i915->kernel_context); ring = intel_context_pin(engine->i915->kernel_context, engine);
if (IS_ERR(ring)) if (IS_ERR(ring))
return PTR_ERR(ring); return PTR_ERR(ring);
...@@ -694,8 +694,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine) ...@@ -694,8 +694,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
* we can interrupt the engine at any time. * we can interrupt the engine at any time.
*/ */
if (engine->i915->preempt_context) { if (engine->i915->preempt_context) {
ring = engine->context_pin(engine, ring = intel_context_pin(engine->i915->preempt_context, engine);
engine->i915->preempt_context);
if (IS_ERR(ring)) { if (IS_ERR(ring)) {
ret = PTR_ERR(ring); ret = PTR_ERR(ring);
goto err_unpin_kernel; goto err_unpin_kernel;
...@@ -719,9 +718,9 @@ int intel_engine_init_common(struct intel_engine_cs *engine) ...@@ -719,9 +718,9 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
intel_engine_fini_breadcrumbs(engine); intel_engine_fini_breadcrumbs(engine);
err_unpin_preempt: err_unpin_preempt:
if (engine->i915->preempt_context) if (engine->i915->preempt_context)
engine->context_unpin(engine, engine->i915->preempt_context); intel_context_unpin(engine->i915->preempt_context, engine);
err_unpin_kernel: err_unpin_kernel:
engine->context_unpin(engine, engine->i915->kernel_context); intel_context_unpin(engine->i915->kernel_context, engine);
return ret; return ret;
} }
...@@ -749,8 +748,8 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) ...@@ -749,8 +748,8 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
i915_gem_object_put(engine->default_state); i915_gem_object_put(engine->default_state);
if (engine->i915->preempt_context) if (engine->i915->preempt_context)
engine->context_unpin(engine, engine->i915->preempt_context); intel_context_unpin(engine->i915->preempt_context, engine);
engine->context_unpin(engine, engine->i915->kernel_context); intel_context_unpin(engine->i915->kernel_context, engine);
} }
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine) u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
......
...@@ -121,7 +121,8 @@ int intel_guc_ads_create(struct intel_guc *guc) ...@@ -121,7 +121,8 @@ int intel_guc_ads_create(struct intel_guc *guc)
* to find it. Note that we have to skip our header (1 page), * to find it. Note that we have to skip our header (1 page),
* because our GuC shared data is there. * because our GuC shared data is there.
*/ */
kernel_ctx_vma = dev_priv->kernel_context->engine[RCS].state; kernel_ctx_vma = to_intel_context(dev_priv->kernel_context,
dev_priv->engine[RCS])->state;
blob->ads.golden_context_lrca = blob->ads.golden_context_lrca =
intel_guc_ggtt_offset(guc, kernel_ctx_vma) + skipped_offset; intel_guc_ggtt_offset(guc, kernel_ctx_vma) + skipped_offset;
......
...@@ -362,7 +362,7 @@ static void guc_stage_desc_init(struct intel_guc *guc, ...@@ -362,7 +362,7 @@ static void guc_stage_desc_init(struct intel_guc *guc,
desc->db_id = client->doorbell_id; desc->db_id = client->doorbell_id;
for_each_engine_masked(engine, dev_priv, client->engines, tmp) { for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
struct intel_context *ce = &ctx->engine[engine->id]; struct intel_context *ce = to_intel_context(ctx, engine);
u32 guc_engine_id = engine->guc_id; u32 guc_engine_id = engine->guc_id;
struct guc_execlist_context *lrc = &desc->lrc[guc_engine_id]; struct guc_execlist_context *lrc = &desc->lrc[guc_engine_id];
...@@ -990,7 +990,8 @@ static void guc_fill_preempt_context(struct intel_guc *guc) ...@@ -990,7 +990,8 @@ static void guc_fill_preempt_context(struct intel_guc *guc)
enum intel_engine_id id; enum intel_engine_id id;
for_each_engine(engine, dev_priv, id) { for_each_engine(engine, dev_priv, id) {
struct intel_context *ce = &client->owner->engine[id]; struct intel_context *ce =
to_intel_context(client->owner, engine);
u32 addr = intel_hws_preempt_done_address(engine); u32 addr = intel_hws_preempt_done_address(engine);
u32 *cs; u32 *cs;
......
...@@ -223,7 +223,7 @@ static void ...@@ -223,7 +223,7 @@ static void
intel_lr_context_descriptor_update(struct i915_gem_context *ctx, intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
struct intel_engine_cs *engine) struct intel_engine_cs *engine)
{ {
struct intel_context *ce = &ctx->engine[engine->id]; struct intel_context *ce = to_intel_context(ctx, engine);
u64 desc; u64 desc;
BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH))); BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH)));
...@@ -414,7 +414,7 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state) ...@@ -414,7 +414,7 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
static u64 execlists_update_context(struct i915_request *rq) static u64 execlists_update_context(struct i915_request *rq)
{ {
struct intel_context *ce = &rq->ctx->engine[rq->engine->id]; struct intel_context *ce = to_intel_context(rq->ctx, rq->engine);
struct i915_hw_ppgtt *ppgtt = struct i915_hw_ppgtt *ppgtt =
rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt; rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
u32 *reg_state = ce->lrc_reg_state; u32 *reg_state = ce->lrc_reg_state;
...@@ -523,7 +523,7 @@ static void inject_preempt_context(struct intel_engine_cs *engine) ...@@ -523,7 +523,7 @@ static void inject_preempt_context(struct intel_engine_cs *engine)
{ {
struct intel_engine_execlists *execlists = &engine->execlists; struct intel_engine_execlists *execlists = &engine->execlists;
struct intel_context *ce = struct intel_context *ce =
&engine->i915->preempt_context->engine[engine->id]; to_intel_context(engine->i915->preempt_context, engine);
unsigned int n; unsigned int n;
GEM_BUG_ON(execlists->preempt_complete_status != GEM_BUG_ON(execlists->preempt_complete_status !=
...@@ -1327,7 +1327,7 @@ static struct intel_ring * ...@@ -1327,7 +1327,7 @@ static struct intel_ring *
execlists_context_pin(struct intel_engine_cs *engine, execlists_context_pin(struct intel_engine_cs *engine,
struct i915_gem_context *ctx) struct i915_gem_context *ctx)
{ {
struct intel_context *ce = &ctx->engine[engine->id]; struct intel_context *ce = to_intel_context(ctx, engine);
void *vaddr; void *vaddr;
int ret; int ret;
...@@ -1380,7 +1380,7 @@ execlists_context_pin(struct intel_engine_cs *engine, ...@@ -1380,7 +1380,7 @@ execlists_context_pin(struct intel_engine_cs *engine,
static void execlists_context_unpin(struct intel_engine_cs *engine, static void execlists_context_unpin(struct intel_engine_cs *engine,
struct i915_gem_context *ctx) struct i915_gem_context *ctx)
{ {
struct intel_context *ce = &ctx->engine[engine->id]; struct intel_context *ce = to_intel_context(ctx, engine);
lockdep_assert_held(&ctx->i915->drm.struct_mutex); lockdep_assert_held(&ctx->i915->drm.struct_mutex);
GEM_BUG_ON(ce->pin_count == 0); GEM_BUG_ON(ce->pin_count == 0);
...@@ -1399,8 +1399,8 @@ static void execlists_context_unpin(struct intel_engine_cs *engine, ...@@ -1399,8 +1399,8 @@ static void execlists_context_unpin(struct intel_engine_cs *engine,
static int execlists_request_alloc(struct i915_request *request) static int execlists_request_alloc(struct i915_request *request)
{ {
struct intel_engine_cs *engine = request->engine; struct intel_context *ce =
struct intel_context *ce = &request->ctx->engine[engine->id]; to_intel_context(request->ctx, request->engine);
int ret; int ret;
GEM_BUG_ON(!ce->pin_count); GEM_BUG_ON(!ce->pin_count);
...@@ -1854,7 +1854,7 @@ static void reset_common_ring(struct intel_engine_cs *engine, ...@@ -1854,7 +1854,7 @@ static void reset_common_ring(struct intel_engine_cs *engine,
* future request will be after userspace has had the opportunity * future request will be after userspace has had the opportunity
* to recreate its own state. * to recreate its own state.
*/ */
regs = request->ctx->engine[engine->id].lrc_reg_state; regs = to_intel_context(request->ctx, engine)->lrc_reg_state;
if (engine->default_state) { if (engine->default_state) {
void *defaults; void *defaults;
...@@ -2305,9 +2305,13 @@ static int logical_ring_init(struct intel_engine_cs *engine) ...@@ -2305,9 +2305,13 @@ static int logical_ring_init(struct intel_engine_cs *engine)
} }
engine->execlists.preempt_complete_status = ~0u; engine->execlists.preempt_complete_status = ~0u;
if (engine->i915->preempt_context) if (engine->i915->preempt_context) {
struct intel_context *ce =
to_intel_context(engine->i915->preempt_context, engine);
engine->execlists.preempt_complete_status = engine->execlists.preempt_complete_status =
upper_32_bits(engine->i915->preempt_context->engine[engine->id].lrc_desc); upper_32_bits(ce->lrc_desc);
}
return 0; return 0;
...@@ -2589,7 +2593,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, ...@@ -2589,7 +2593,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
struct intel_engine_cs *engine) struct intel_engine_cs *engine)
{ {
struct drm_i915_gem_object *ctx_obj; struct drm_i915_gem_object *ctx_obj;
struct intel_context *ce = &ctx->engine[engine->id]; struct intel_context *ce = to_intel_context(ctx, engine);
struct i915_vma *vma; struct i915_vma *vma;
uint32_t context_size; uint32_t context_size;
struct intel_ring *ring; struct intel_ring *ring;
...@@ -2660,7 +2664,8 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv) ...@@ -2660,7 +2664,8 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv)
*/ */
list_for_each_entry(ctx, &dev_priv->contexts.list, link) { list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
for_each_engine(engine, dev_priv, id) { for_each_engine(engine, dev_priv, id) {
struct intel_context *ce = &ctx->engine[engine->id]; struct intel_context *ce =
to_intel_context(ctx, engine);
u32 *reg; u32 *reg;
if (!ce->state) if (!ce->state)
......
...@@ -108,7 +108,7 @@ static inline uint64_t ...@@ -108,7 +108,7 @@ static inline uint64_t
intel_lr_context_descriptor(struct i915_gem_context *ctx, intel_lr_context_descriptor(struct i915_gem_context *ctx,
struct intel_engine_cs *engine) struct intel_engine_cs *engine)
{ {
return ctx->engine[engine->id].lrc_desc; return to_intel_context(ctx, engine)->lrc_desc;
} }
#endif /* _INTEL_LRC_H_ */ #endif /* _INTEL_LRC_H_ */
...@@ -558,7 +558,8 @@ static void reset_ring_common(struct intel_engine_cs *engine, ...@@ -558,7 +558,8 @@ static void reset_ring_common(struct intel_engine_cs *engine,
*/ */
if (request) { if (request) {
struct drm_i915_private *dev_priv = request->i915; struct drm_i915_private *dev_priv = request->i915;
struct intel_context *ce = &request->ctx->engine[engine->id]; struct intel_context *ce = to_intel_context(request->ctx,
engine);
struct i915_hw_ppgtt *ppgtt; struct i915_hw_ppgtt *ppgtt;
if (ce->state) { if (ce->state) {
...@@ -1163,9 +1164,9 @@ intel_ring_free(struct intel_ring *ring) ...@@ -1163,9 +1164,9 @@ intel_ring_free(struct intel_ring *ring)
kfree(ring); kfree(ring);
} }
static int context_pin(struct i915_gem_context *ctx) static int context_pin(struct intel_context *ce)
{ {
struct i915_vma *vma = ctx->engine[RCS].state; struct i915_vma *vma = ce->state;
int ret; int ret;
/* /*
...@@ -1256,7 +1257,7 @@ static struct intel_ring * ...@@ -1256,7 +1257,7 @@ static struct intel_ring *
intel_ring_context_pin(struct intel_engine_cs *engine, intel_ring_context_pin(struct intel_engine_cs *engine,
struct i915_gem_context *ctx) struct i915_gem_context *ctx)
{ {
struct intel_context *ce = &ctx->engine[engine->id]; struct intel_context *ce = to_intel_context(ctx, engine);
int ret; int ret;
lockdep_assert_held(&ctx->i915->drm.struct_mutex); lockdep_assert_held(&ctx->i915->drm.struct_mutex);
...@@ -1278,7 +1279,7 @@ intel_ring_context_pin(struct intel_engine_cs *engine, ...@@ -1278,7 +1279,7 @@ intel_ring_context_pin(struct intel_engine_cs *engine,
} }
if (ce->state) { if (ce->state) {
ret = context_pin(ctx); ret = context_pin(ce);
if (ret) if (ret)
goto err; goto err;
...@@ -1299,7 +1300,7 @@ intel_ring_context_pin(struct intel_engine_cs *engine, ...@@ -1299,7 +1300,7 @@ intel_ring_context_pin(struct intel_engine_cs *engine,
static void intel_ring_context_unpin(struct intel_engine_cs *engine, static void intel_ring_context_unpin(struct intel_engine_cs *engine,
struct i915_gem_context *ctx) struct i915_gem_context *ctx)
{ {
struct intel_context *ce = &ctx->engine[engine->id]; struct intel_context *ce = to_intel_context(ctx, engine);
lockdep_assert_held(&ctx->i915->drm.struct_mutex); lockdep_assert_held(&ctx->i915->drm.struct_mutex);
GEM_BUG_ON(ce->pin_count == 0); GEM_BUG_ON(ce->pin_count == 0);
...@@ -1427,7 +1428,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags) ...@@ -1427,7 +1428,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
*cs++ = MI_NOOP; *cs++ = MI_NOOP;
*cs++ = MI_SET_CONTEXT; *cs++ = MI_SET_CONTEXT;
*cs++ = i915_ggtt_offset(rq->ctx->engine[RCS].state) | flags; *cs++ = i915_ggtt_offset(to_intel_context(rq->ctx, engine)->state) | flags;
/* /*
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
* WaMiSetContext_Hang:snb,ivb,vlv * WaMiSetContext_Hang:snb,ivb,vlv
...@@ -1518,7 +1519,7 @@ static int switch_context(struct i915_request *rq) ...@@ -1518,7 +1519,7 @@ static int switch_context(struct i915_request *rq)
hw_flags = MI_FORCE_RESTORE; hw_flags = MI_FORCE_RESTORE;
} }
if (to_ctx->engine[engine->id].state && if (to_intel_context(to_ctx, engine)->state &&
(to_ctx != from_ctx || hw_flags & MI_FORCE_RESTORE)) { (to_ctx != from_ctx || hw_flags & MI_FORCE_RESTORE)) {
GEM_BUG_ON(engine->id != RCS); GEM_BUG_ON(engine->id != RCS);
...@@ -1566,7 +1567,7 @@ static int ring_request_alloc(struct i915_request *request) ...@@ -1566,7 +1567,7 @@ static int ring_request_alloc(struct i915_request *request)
{ {
int ret; int ret;
GEM_BUG_ON(!request->ctx->engine[request->engine->id].pin_count); GEM_BUG_ON(!to_intel_context(request->ctx, request->engine)->pin_count);
/* Flush enough space to reduce the likelihood of waiting after /* Flush enough space to reduce the likelihood of waiting after
* we start building the request - in which case we will just * we start building the request - in which case we will just
......
...@@ -71,14 +71,21 @@ static struct intel_ring * ...@@ -71,14 +71,21 @@ static struct intel_ring *
mock_context_pin(struct intel_engine_cs *engine, mock_context_pin(struct intel_engine_cs *engine,
struct i915_gem_context *ctx) struct i915_gem_context *ctx)
{ {
i915_gem_context_get(ctx); struct intel_context *ce = to_intel_context(ctx, engine);
if (!ce->pin_count++)
i915_gem_context_get(ctx);
return engine->buffer; return engine->buffer;
} }
static void mock_context_unpin(struct intel_engine_cs *engine, static void mock_context_unpin(struct intel_engine_cs *engine,
struct i915_gem_context *ctx) struct i915_gem_context *ctx)
{ {
i915_gem_context_put(ctx); struct intel_context *ce = to_intel_context(ctx, engine);
if (!--ce->pin_count)
i915_gem_context_put(ctx);
} }
static int mock_request_alloc(struct i915_request *request) static int mock_request_alloc(struct i915_request *request)
...@@ -217,7 +224,7 @@ void mock_engine_free(struct intel_engine_cs *engine) ...@@ -217,7 +224,7 @@ void mock_engine_free(struct intel_engine_cs *engine)
GEM_BUG_ON(timer_pending(&mock->hw_delay)); GEM_BUG_ON(timer_pending(&mock->hw_delay));
if (engine->last_retired_context) if (engine->last_retired_context)
engine->context_unpin(engine, engine->last_retired_context); intel_context_unpin(engine->last_retired_context, engine);
intel_engine_fini_breadcrumbs(engine); intel_engine_fini_breadcrumbs(engine);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment