Commit e5292823 authored by Tvrtko Ursulin's avatar Tvrtko Ursulin

drm/i915: Make LRC (un)pinning work on context and engine

Previously intel_lr_context_(un)pin were operating on requests
which is in conflict with their names.

If we make them take a context and an engine, it makes the names
make more sense and it also makes future fixes possible.

v2: Rebase for default_context/kernel_context change.
Signed-off-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Nick Hoath <nicholas.hoath@intel.com>
parent d9da6aa0
...@@ -2680,7 +2680,7 @@ void i915_gem_request_free(struct kref *req_ref) ...@@ -2680,7 +2680,7 @@ void i915_gem_request_free(struct kref *req_ref)
if (ctx) { if (ctx) {
if (i915.enable_execlists && ctx != req->i915->kernel_context) if (i915.enable_execlists && ctx != req->i915->kernel_context)
intel_lr_context_unpin(req); intel_lr_context_unpin(ctx, req->ring);
i915_gem_context_unreference(ctx); i915_gem_context_unreference(ctx);
} }
......
...@@ -225,7 +225,8 @@ enum { ...@@ -225,7 +225,8 @@ enum {
#define GEN8_CTX_ID_SHIFT 32 #define GEN8_CTX_ID_SHIFT 32
#define CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17 #define CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
static int intel_lr_context_pin(struct drm_i915_gem_request *rq); static int intel_lr_context_pin(struct intel_context *ctx,
struct intel_engine_cs *engine);
static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring, static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
struct drm_i915_gem_object *default_ctx_obj); struct drm_i915_gem_object *default_ctx_obj);
...@@ -598,7 +599,7 @@ static int execlists_context_queue(struct drm_i915_gem_request *request) ...@@ -598,7 +599,7 @@ static int execlists_context_queue(struct drm_i915_gem_request *request)
int num_elements = 0; int num_elements = 0;
if (request->ctx != request->i915->kernel_context) if (request->ctx != request->i915->kernel_context)
intel_lr_context_pin(request); intel_lr_context_pin(request->ctx, ring);
i915_gem_request_reference(request); i915_gem_request_reference(request);
...@@ -703,7 +704,7 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request ...@@ -703,7 +704,7 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
} }
if (request->ctx != request->i915->kernel_context) if (request->ctx != request->i915->kernel_context)
ret = intel_lr_context_pin(request); ret = intel_lr_context_pin(request->ctx, request->ring);
return ret; return ret;
} }
...@@ -1014,7 +1015,8 @@ void intel_execlists_retire_requests(struct intel_engine_cs *ring) ...@@ -1014,7 +1015,8 @@ void intel_execlists_retire_requests(struct intel_engine_cs *ring)
ctx->engine[ring->id].state; ctx->engine[ring->id].state;
if (ctx_obj && (ctx != req->i915->kernel_context)) if (ctx_obj && (ctx != req->i915->kernel_context))
intel_lr_context_unpin(req); intel_lr_context_unpin(ctx, ring);
list_del(&req->execlist_link); list_del(&req->execlist_link);
i915_gem_request_unreference(req); i915_gem_request_unreference(req);
} }
...@@ -1058,8 +1060,8 @@ int logical_ring_flush_all_caches(struct drm_i915_gem_request *req) ...@@ -1058,8 +1060,8 @@ int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
return 0; return 0;
} }
static int intel_lr_context_do_pin(struct intel_engine_cs *ring, static int intel_lr_context_do_pin(struct intel_context *ctx,
struct intel_context *ctx) struct intel_engine_cs *ring)
{ {
struct drm_device *dev = ring->dev; struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
...@@ -1105,41 +1107,40 @@ static int intel_lr_context_do_pin(struct intel_engine_cs *ring, ...@@ -1105,41 +1107,40 @@ static int intel_lr_context_do_pin(struct intel_engine_cs *ring,
return ret; return ret;
} }
static int intel_lr_context_pin(struct drm_i915_gem_request *rq) static int intel_lr_context_pin(struct intel_context *ctx,
struct intel_engine_cs *engine)
{ {
int ret = 0; int ret = 0;
struct intel_engine_cs *ring = rq->ring;
if (rq->ctx->engine[ring->id].pin_count++ == 0) { if (ctx->engine[engine->id].pin_count++ == 0) {
ret = intel_lr_context_do_pin(ring, rq->ctx); ret = intel_lr_context_do_pin(ctx, engine);
if (ret) if (ret)
goto reset_pin_count; goto reset_pin_count;
} }
return ret; return ret;
reset_pin_count: reset_pin_count:
rq->ctx->engine[ring->id].pin_count = 0; ctx->engine[engine->id].pin_count = 0;
return ret; return ret;
} }
void intel_lr_context_unpin(struct drm_i915_gem_request *rq) void intel_lr_context_unpin(struct intel_context *ctx,
struct intel_engine_cs *engine)
{ {
struct intel_engine_cs *ring = rq->ring; struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
struct intel_ringbuffer *ringbuf = rq->ringbuf;
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex)); WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex));
if (!ctx_obj) if (WARN_ON_ONCE(!ctx_obj))
return; return;
if (--rq->ctx->engine[ring->id].pin_count == 0) { if (--ctx->engine[engine->id].pin_count == 0) {
kunmap(kmap_to_page(rq->ctx->engine[ring->id].lrc_reg_state)); kunmap(kmap_to_page(ctx->engine[engine->id].lrc_reg_state));
intel_unpin_ringbuffer_obj(ringbuf); intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf);
i915_gem_object_ggtt_unpin(ctx_obj); i915_gem_object_ggtt_unpin(ctx_obj);
rq->ctx->engine[ring->id].lrc_vma = NULL; ctx->engine[engine->id].lrc_vma = NULL;
rq->ctx->engine[ring->id].lrc_desc = 0; ctx->engine[engine->id].lrc_desc = 0;
rq->ctx->engine[ring->id].lrc_reg_state = NULL; ctx->engine[engine->id].lrc_reg_state = NULL;
} }
} }
...@@ -2064,7 +2065,7 @@ logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring) ...@@ -2064,7 +2065,7 @@ logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
goto error; goto error;
/* As this is the default context, always pin it */ /* As this is the default context, always pin it */
ret = intel_lr_context_do_pin(ring, dctx); ret = intel_lr_context_do_pin(dctx, ring);
if (ret) { if (ret) {
DRM_ERROR( DRM_ERROR(
"Failed to pin and map ringbuffer %s: %d\n", "Failed to pin and map ringbuffer %s: %d\n",
......
...@@ -101,7 +101,8 @@ void intel_lr_context_free(struct intel_context *ctx); ...@@ -101,7 +101,8 @@ void intel_lr_context_free(struct intel_context *ctx);
uint32_t intel_lr_context_size(struct intel_engine_cs *ring); uint32_t intel_lr_context_size(struct intel_engine_cs *ring);
int intel_lr_context_deferred_alloc(struct intel_context *ctx, int intel_lr_context_deferred_alloc(struct intel_context *ctx,
struct intel_engine_cs *ring); struct intel_engine_cs *ring);
void intel_lr_context_unpin(struct drm_i915_gem_request *req); void intel_lr_context_unpin(struct intel_context *ctx,
struct intel_engine_cs *engine);
void intel_lr_context_reset(struct drm_device *dev, void intel_lr_context_reset(struct drm_device *dev,
struct intel_context *ctx); struct intel_context *ctx);
uint64_t intel_lr_context_descriptor(struct intel_context *ctx, uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment