Commit 82352e90 authored by Tvrtko Ursulin's avatar Tvrtko Ursulin

drm/i915: Cache LRC state page in the context

LRC lifetime is well defined so we can cache the page pointing
to the object backing store in the context in order to avoid
walking over the object SG page list from the interrupt context
without the big lock held.

v2: Also cache the mapping. (Chris Wilson)
v3: Unmap on the error path.
v4: No need to cache the page. (Chris Wilson)
v5: No need to dirty the page on unpin. (Chris Wilson)
v6: kmap() cannot fail and use kmap_to_page to simplify unpin.
    (Chris Wilson)
Signed-off-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Dave Gordon <david.s.gordon@intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/1452877965-32042-1-git-send-email-tvrtko.ursulin@linux.intel.com
parent 0eb973d3
...@@ -890,6 +890,7 @@ struct intel_context { ...@@ -890,6 +890,7 @@ struct intel_context {
int pin_count; int pin_count;
struct i915_vma *lrc_vma; struct i915_vma *lrc_vma;
u64 lrc_desc; u64 lrc_desc;
uint32_t *lrc_reg_state;
} engine[I915_NUM_RINGS]; } engine[I915_NUM_RINGS];
struct list_head link; struct list_head link;
......
...@@ -390,14 +390,7 @@ static int execlists_update_context(struct drm_i915_gem_request *rq) ...@@ -390,14 +390,7 @@ static int execlists_update_context(struct drm_i915_gem_request *rq)
{ {
struct intel_engine_cs *ring = rq->ring; struct intel_engine_cs *ring = rq->ring;
struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt; struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state; uint32_t *reg_state = rq->ctx->engine[ring->id].lrc_reg_state;
struct page *page;
uint32_t *reg_state;
BUG_ON(!ctx_obj);
page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
reg_state = kmap_atomic(page);
reg_state[CTX_RING_TAIL+1] = rq->tail; reg_state[CTX_RING_TAIL+1] = rq->tail;
reg_state[CTX_RING_BUFFER_START+1] = rq->ringbuf->vma->node.start; reg_state[CTX_RING_BUFFER_START+1] = rq->ringbuf->vma->node.start;
...@@ -414,8 +407,6 @@ static int execlists_update_context(struct drm_i915_gem_request *rq) ...@@ -414,8 +407,6 @@ static int execlists_update_context(struct drm_i915_gem_request *rq)
ASSIGN_CTX_PDP(ppgtt, reg_state, 0); ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
} }
kunmap_atomic(reg_state);
return 0; return 0;
} }
...@@ -1067,6 +1058,7 @@ static int intel_lr_context_do_pin(struct intel_engine_cs *ring, ...@@ -1067,6 +1058,7 @@ static int intel_lr_context_do_pin(struct intel_engine_cs *ring,
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf; struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
struct page *lrc_state_page;
int ret; int ret;
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex)); WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
...@@ -1076,12 +1068,19 @@ static int intel_lr_context_do_pin(struct intel_engine_cs *ring, ...@@ -1076,12 +1068,19 @@ static int intel_lr_context_do_pin(struct intel_engine_cs *ring,
if (ret) if (ret)
return ret; return ret;
lrc_state_page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
if (WARN_ON(!lrc_state_page)) {
ret = -ENODEV;
goto unpin_ctx_obj;
}
ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf); ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
if (ret) if (ret)
goto unpin_ctx_obj; goto unpin_ctx_obj;
ctx->engine[ring->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj); ctx->engine[ring->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
intel_lr_context_descriptor_update(ctx, ring); intel_lr_context_descriptor_update(ctx, ring);
ctx->engine[ring->id].lrc_reg_state = kmap(lrc_state_page);
ctx_obj->dirty = true; ctx_obj->dirty = true;
/* Invalidate GuC TLB. */ /* Invalidate GuC TLB. */
...@@ -1119,14 +1118,18 @@ void intel_lr_context_unpin(struct drm_i915_gem_request *rq) ...@@ -1119,14 +1118,18 @@ void intel_lr_context_unpin(struct drm_i915_gem_request *rq)
struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state; struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
struct intel_ringbuffer *ringbuf = rq->ringbuf; struct intel_ringbuffer *ringbuf = rq->ringbuf;
if (ctx_obj) {
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex)); WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
if (!ctx_obj)
return;
if (--rq->ctx->engine[ring->id].pin_count == 0) { if (--rq->ctx->engine[ring->id].pin_count == 0) {
kunmap(kmap_to_page(rq->ctx->engine[ring->id].lrc_reg_state));
intel_unpin_ringbuffer_obj(ringbuf); intel_unpin_ringbuffer_obj(ringbuf);
i915_gem_object_ggtt_unpin(ctx_obj); i915_gem_object_ggtt_unpin(ctx_obj);
rq->ctx->engine[ring->id].lrc_vma = NULL; rq->ctx->engine[ring->id].lrc_vma = NULL;
rq->ctx->engine[ring->id].lrc_desc = 0; rq->ctx->engine[ring->id].lrc_desc = 0;
} rq->ctx->engine[ring->id].lrc_reg_state = NULL;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment