Commit 21076372 authored by Nick Hoath's avatar Nick Hoath Committed by Daniel Vetter

drm/i915: Remove FIXME_lrc_ctx backpointer

The first pass implementation of execlists required a backpointer to the context to be held
in the intel_ringbuffer. However the context pointer is available higher in the call stack.
Remove the backpointer from the ring buffer structure and instead pass it down through the
call stack.

v2: Integrate this changeset with the removal of duplicate request/execlist queue item members.
v3: Rebase
v4: Rebase. Remove passing of context when the request is passed.
Signed-off-by: default avatarNick Hoath <nicholas.hoath@intel.com>
Reviewed-by: default avatarThomas Daniel <thomas.daniel@intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 72f95afa
...@@ -2422,8 +2422,7 @@ int __i915_add_request(struct intel_engine_cs *ring, ...@@ -2422,8 +2422,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
return -ENOMEM; return -ENOMEM;
if (i915.enable_execlists) { if (i915.enable_execlists) {
struct intel_context *ctx = request->ctx; ringbuf = request->ctx->engine[ring->id].ringbuf;
ringbuf = ctx->engine[ring->id].ringbuf;
} else } else
ringbuf = ring->buffer; ringbuf = ring->buffer;
...@@ -2436,7 +2435,7 @@ int __i915_add_request(struct intel_engine_cs *ring, ...@@ -2436,7 +2435,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
* what. * what.
*/ */
if (i915.enable_execlists) { if (i915.enable_execlists) {
ret = logical_ring_flush_all_caches(ringbuf); ret = logical_ring_flush_all_caches(ringbuf, request->ctx);
if (ret) if (ret)
return ret; return ret;
} else { } else {
......
...@@ -559,6 +559,8 @@ static int execlists_context_queue(struct intel_engine_cs *ring, ...@@ -559,6 +559,8 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
if (request == NULL) if (request == NULL)
return -ENOMEM; return -ENOMEM;
request->ring = ring; request->ring = ring;
} else {
WARN_ON(to != request->ctx);
} }
request->ctx = to; request->ctx = to;
request->tail = tail; request->tail = tail;
...@@ -599,7 +601,8 @@ static int execlists_context_queue(struct intel_engine_cs *ring, ...@@ -599,7 +601,8 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
return 0; return 0;
} }
static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf) static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf,
struct intel_context *ctx)
{ {
struct intel_engine_cs *ring = ringbuf->ring; struct intel_engine_cs *ring = ringbuf->ring;
uint32_t flush_domains; uint32_t flush_domains;
...@@ -609,7 +612,8 @@ static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf) ...@@ -609,7 +612,8 @@ static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf)
if (ring->gpu_caches_dirty) if (ring->gpu_caches_dirty)
flush_domains = I915_GEM_GPU_DOMAINS; flush_domains = I915_GEM_GPU_DOMAINS;
ret = ring->emit_flush(ringbuf, I915_GEM_GPU_DOMAINS, flush_domains); ret = ring->emit_flush(ringbuf, ctx,
I915_GEM_GPU_DOMAINS, flush_domains);
if (ret) if (ret)
return ret; return ret;
...@@ -618,6 +622,7 @@ static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf) ...@@ -618,6 +622,7 @@ static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf)
} }
static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf, static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
struct intel_context *ctx,
struct list_head *vmas) struct list_head *vmas)
{ {
struct intel_engine_cs *ring = ringbuf->ring; struct intel_engine_cs *ring = ringbuf->ring;
...@@ -645,7 +650,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf, ...@@ -645,7 +650,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
/* Unconditionally invalidate gpu caches and ensure that we do flush /* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch. * any residual writes from the previous batch.
*/ */
return logical_ring_invalidate_all_caches(ringbuf); return logical_ring_invalidate_all_caches(ringbuf, ctx);
} }
/** /**
...@@ -725,13 +730,13 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file, ...@@ -725,13 +730,13 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
return -EINVAL; return -EINVAL;
} }
ret = execlists_move_to_gpu(ringbuf, vmas); ret = execlists_move_to_gpu(ringbuf, ctx, vmas);
if (ret) if (ret)
return ret; return ret;
if (ring == &dev_priv->ring[RCS] && if (ring == &dev_priv->ring[RCS] &&
instp_mode != dev_priv->relative_constants_mode) { instp_mode != dev_priv->relative_constants_mode) {
ret = intel_logical_ring_begin(ringbuf, 4); ret = intel_logical_ring_begin(ringbuf, ctx, 4);
if (ret) if (ret)
return ret; return ret;
...@@ -744,7 +749,7 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file, ...@@ -744,7 +749,7 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
dev_priv->relative_constants_mode = instp_mode; dev_priv->relative_constants_mode = instp_mode;
} }
ret = ring->emit_bb_start(ringbuf, exec_start, flags); ret = ring->emit_bb_start(ringbuf, ctx, exec_start, flags);
if (ret) if (ret)
return ret; return ret;
...@@ -807,7 +812,8 @@ void intel_logical_ring_stop(struct intel_engine_cs *ring) ...@@ -807,7 +812,8 @@ void intel_logical_ring_stop(struct intel_engine_cs *ring)
I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING)); I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
} }
int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf) int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
struct intel_context *ctx)
{ {
struct intel_engine_cs *ring = ringbuf->ring; struct intel_engine_cs *ring = ringbuf->ring;
int ret; int ret;
...@@ -815,7 +821,7 @@ int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf) ...@@ -815,7 +821,7 @@ int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf)
if (!ring->gpu_caches_dirty) if (!ring->gpu_caches_dirty)
return 0; return 0;
ret = ring->emit_flush(ringbuf, 0, I915_GEM_GPU_DOMAINS); ret = ring->emit_flush(ringbuf, ctx, 0, I915_GEM_GPU_DOMAINS);
if (ret) if (ret)
return ret; return ret;
...@@ -833,10 +839,10 @@ int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf) ...@@ -833,10 +839,10 @@ int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf)
* point, the tail *inside* the context is updated and the ELSP written to. * point, the tail *inside* the context is updated and the ELSP written to.
*/ */
void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf, void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf,
struct intel_context *ctx,
struct drm_i915_gem_request *request) struct drm_i915_gem_request *request)
{ {
struct intel_engine_cs *ring = ringbuf->ring; struct intel_engine_cs *ring = ringbuf->ring;
struct intel_context *ctx = ringbuf->FIXME_lrc_ctx;
intel_logical_ring_advance(ringbuf); intel_logical_ring_advance(ringbuf);
...@@ -974,6 +980,7 @@ static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf, ...@@ -974,6 +980,7 @@ static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf,
} }
static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf, static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
struct intel_context *ctx,
int bytes) int bytes)
{ {
struct intel_engine_cs *ring = ringbuf->ring; struct intel_engine_cs *ring = ringbuf->ring;
...@@ -987,7 +994,7 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf, ...@@ -987,7 +994,7 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
return ret; return ret;
/* Force the context submission in case we have been skipping it */ /* Force the context submission in case we have been skipping it */
intel_logical_ring_advance_and_submit(ringbuf, NULL); intel_logical_ring_advance_and_submit(ringbuf, ctx, NULL);
/* With GEM the hangcheck timer should kick us out of the loop, /* With GEM the hangcheck timer should kick us out of the loop,
* leaving it early runs the risk of corrupting GEM state (due * leaving it early runs the risk of corrupting GEM state (due
...@@ -1022,13 +1029,14 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf, ...@@ -1022,13 +1029,14 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
return ret; return ret;
} }
static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf) static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf,
struct intel_context *ctx)
{ {
uint32_t __iomem *virt; uint32_t __iomem *virt;
int rem = ringbuf->size - ringbuf->tail; int rem = ringbuf->size - ringbuf->tail;
if (ringbuf->space < rem) { if (ringbuf->space < rem) {
int ret = logical_ring_wait_for_space(ringbuf, rem); int ret = logical_ring_wait_for_space(ringbuf, ctx, rem);
if (ret) if (ret)
return ret; return ret;
...@@ -1045,18 +1053,19 @@ static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf) ...@@ -1045,18 +1053,19 @@ static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf)
return 0; return 0;
} }
static int logical_ring_prepare(struct intel_ringbuffer *ringbuf, int bytes) static int logical_ring_prepare(struct intel_ringbuffer *ringbuf,
struct intel_context *ctx, int bytes)
{ {
int ret; int ret;
if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) { if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
ret = logical_ring_wrap_buffer(ringbuf); ret = logical_ring_wrap_buffer(ringbuf, ctx);
if (unlikely(ret)) if (unlikely(ret))
return ret; return ret;
} }
if (unlikely(ringbuf->space < bytes)) { if (unlikely(ringbuf->space < bytes)) {
ret = logical_ring_wait_for_space(ringbuf, bytes); ret = logical_ring_wait_for_space(ringbuf, ctx, bytes);
if (unlikely(ret)) if (unlikely(ret))
return ret; return ret;
} }
...@@ -1077,7 +1086,8 @@ static int logical_ring_prepare(struct intel_ringbuffer *ringbuf, int bytes) ...@@ -1077,7 +1086,8 @@ static int logical_ring_prepare(struct intel_ringbuffer *ringbuf, int bytes)
* *
* Return: non-zero if the ringbuffer is not ready to be written to. * Return: non-zero if the ringbuffer is not ready to be written to.
*/ */
int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords) int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf,
struct intel_context *ctx, int num_dwords)
{ {
struct intel_engine_cs *ring = ringbuf->ring; struct intel_engine_cs *ring = ringbuf->ring;
struct drm_device *dev = ring->dev; struct drm_device *dev = ring->dev;
...@@ -1089,12 +1099,12 @@ int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords) ...@@ -1089,12 +1099,12 @@ int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords)
if (ret) if (ret)
return ret; return ret;
ret = logical_ring_prepare(ringbuf, num_dwords * sizeof(uint32_t)); ret = logical_ring_prepare(ringbuf, ctx, num_dwords * sizeof(uint32_t));
if (ret) if (ret)
return ret; return ret;
/* Preallocate the olr before touching the ring */ /* Preallocate the olr before touching the ring */
ret = logical_ring_alloc_request(ring, ringbuf->FIXME_lrc_ctx); ret = logical_ring_alloc_request(ring, ctx);
if (ret) if (ret)
return ret; return ret;
...@@ -1115,11 +1125,11 @@ static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring, ...@@ -1115,11 +1125,11 @@ static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring,
return 0; return 0;
ring->gpu_caches_dirty = true; ring->gpu_caches_dirty = true;
ret = logical_ring_flush_all_caches(ringbuf); ret = logical_ring_flush_all_caches(ringbuf, ctx);
if (ret) if (ret)
return ret; return ret;
ret = intel_logical_ring_begin(ringbuf, w->count * 2 + 2); ret = intel_logical_ring_begin(ringbuf, ctx, w->count * 2 + 2);
if (ret) if (ret)
return ret; return ret;
...@@ -1133,7 +1143,7 @@ static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring, ...@@ -1133,7 +1143,7 @@ static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring,
intel_logical_ring_advance(ringbuf); intel_logical_ring_advance(ringbuf);
ring->gpu_caches_dirty = true; ring->gpu_caches_dirty = true;
ret = logical_ring_flush_all_caches(ringbuf); ret = logical_ring_flush_all_caches(ringbuf, ctx);
if (ret) if (ret)
return ret; return ret;
...@@ -1184,12 +1194,13 @@ static int gen8_init_render_ring(struct intel_engine_cs *ring) ...@@ -1184,12 +1194,13 @@ static int gen8_init_render_ring(struct intel_engine_cs *ring)
} }
static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf, static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf,
struct intel_context *ctx,
u64 offset, unsigned flags) u64 offset, unsigned flags)
{ {
bool ppgtt = !(flags & I915_DISPATCH_SECURE); bool ppgtt = !(flags & I915_DISPATCH_SECURE);
int ret; int ret;
ret = intel_logical_ring_begin(ringbuf, 4); ret = intel_logical_ring_begin(ringbuf, ctx, 4);
if (ret) if (ret)
return ret; return ret;
...@@ -1237,6 +1248,7 @@ static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring) ...@@ -1237,6 +1248,7 @@ static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring)
} }
static int gen8_emit_flush(struct intel_ringbuffer *ringbuf, static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
struct intel_context *ctx,
u32 invalidate_domains, u32 invalidate_domains,
u32 unused) u32 unused)
{ {
...@@ -1246,7 +1258,7 @@ static int gen8_emit_flush(struct intel_ringbuffer *ringbuf, ...@@ -1246,7 +1258,7 @@ static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
uint32_t cmd; uint32_t cmd;
int ret; int ret;
ret = intel_logical_ring_begin(ringbuf, 4); ret = intel_logical_ring_begin(ringbuf, ctx, 4);
if (ret) if (ret)
return ret; return ret;
...@@ -1275,6 +1287,7 @@ static int gen8_emit_flush(struct intel_ringbuffer *ringbuf, ...@@ -1275,6 +1287,7 @@ static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
} }
static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf, static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
struct intel_context *ctx,
u32 invalidate_domains, u32 invalidate_domains,
u32 flush_domains) u32 flush_domains)
{ {
...@@ -1301,7 +1314,7 @@ static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf, ...@@ -1301,7 +1314,7 @@ static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
} }
ret = intel_logical_ring_begin(ringbuf, 6); ret = intel_logical_ring_begin(ringbuf, ctx, 6);
if (ret) if (ret)
return ret; return ret;
...@@ -1333,7 +1346,7 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf, ...@@ -1333,7 +1346,7 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf,
u32 cmd; u32 cmd;
int ret; int ret;
ret = intel_logical_ring_begin(ringbuf, 6); ret = intel_logical_ring_begin(ringbuf, request->ctx, 6);
if (ret) if (ret)
return ret; return ret;
...@@ -1349,7 +1362,7 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf, ...@@ -1349,7 +1362,7 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf,
i915_gem_request_get_seqno(ring->outstanding_lazy_request)); i915_gem_request_get_seqno(ring->outstanding_lazy_request));
intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT); intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
intel_logical_ring_emit(ringbuf, MI_NOOP); intel_logical_ring_emit(ringbuf, MI_NOOP);
intel_logical_ring_advance_and_submit(ringbuf, request); intel_logical_ring_advance_and_submit(ringbuf, request->ctx, request);
return 0; return 0;
} }
...@@ -1636,6 +1649,7 @@ int intel_lr_context_render_state_init(struct intel_engine_cs *ring, ...@@ -1636,6 +1649,7 @@ int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
return 0; return 0;
ret = ring->emit_bb_start(ringbuf, ret = ring->emit_bb_start(ringbuf,
ctx,
so.ggtt_offset, so.ggtt_offset,
I915_DISPATCH_SECURE); I915_DISPATCH_SECURE);
if (ret) if (ret)
...@@ -1892,7 +1906,6 @@ int intel_lr_context_deferred_create(struct intel_context *ctx, ...@@ -1892,7 +1906,6 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
} }
ringbuf->ring = ring; ringbuf->ring = ring;
ringbuf->FIXME_lrc_ctx = ctx;
ringbuf->size = 32 * PAGE_SIZE; ringbuf->size = 32 * PAGE_SIZE;
ringbuf->effective_size = ringbuf->size; ringbuf->effective_size = ringbuf->size;
......
...@@ -38,9 +38,11 @@ void intel_logical_ring_stop(struct intel_engine_cs *ring); ...@@ -38,9 +38,11 @@ void intel_logical_ring_stop(struct intel_engine_cs *ring);
void intel_logical_ring_cleanup(struct intel_engine_cs *ring); void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
int intel_logical_rings_init(struct drm_device *dev); int intel_logical_rings_init(struct drm_device *dev);
int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf); int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
struct intel_context *ctx);
void intel_logical_ring_advance_and_submit( void intel_logical_ring_advance_and_submit(
struct intel_ringbuffer *ringbuf, struct intel_ringbuffer *ringbuf,
struct intel_context *ctx,
struct drm_i915_gem_request *request); struct drm_i915_gem_request *request);
/** /**
* intel_logical_ring_advance() - advance the ringbuffer tail * intel_logical_ring_advance() - advance the ringbuffer tail
...@@ -63,7 +65,9 @@ static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf, ...@@ -63,7 +65,9 @@ static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
iowrite32(data, ringbuf->virtual_start + ringbuf->tail); iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
ringbuf->tail += 4; ringbuf->tail += 4;
} }
int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords); int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf,
struct intel_context *ctx,
int num_dwords);
/* Logical Ring Contexts */ /* Logical Ring Contexts */
int intel_lr_context_render_state_init(struct intel_engine_cs *ring, int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
......
...@@ -99,13 +99,6 @@ struct intel_ringbuffer { ...@@ -99,13 +99,6 @@ struct intel_ringbuffer {
struct intel_engine_cs *ring; struct intel_engine_cs *ring;
/*
* FIXME: This backpointer is an artifact of the history of how the
* execlist patches came into being. It will get removed once the basic
* code has landed.
*/
struct intel_context *FIXME_lrc_ctx;
u32 head; u32 head;
u32 tail; u32 tail;
int space; int space;
...@@ -123,6 +116,8 @@ struct intel_ringbuffer { ...@@ -123,6 +116,8 @@ struct intel_ringbuffer {
u32 last_retired_head; u32 last_retired_head;
}; };
struct intel_context;
struct intel_engine_cs { struct intel_engine_cs {
const char *name; const char *name;
enum intel_ring_id { enum intel_ring_id {
...@@ -242,9 +237,11 @@ struct intel_engine_cs { ...@@ -242,9 +237,11 @@ struct intel_engine_cs {
int (*emit_request)(struct intel_ringbuffer *ringbuf, int (*emit_request)(struct intel_ringbuffer *ringbuf,
struct drm_i915_gem_request *request); struct drm_i915_gem_request *request);
int (*emit_flush)(struct intel_ringbuffer *ringbuf, int (*emit_flush)(struct intel_ringbuffer *ringbuf,
struct intel_context *ctx,
u32 invalidate_domains, u32 invalidate_domains,
u32 flush_domains); u32 flush_domains);
int (*emit_bb_start)(struct intel_ringbuffer *ringbuf, int (*emit_bb_start)(struct intel_ringbuffer *ringbuf,
struct intel_context *ctx,
u64 offset, unsigned flags); u64 offset, unsigned flags);
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment