Commit 6259cead authored by John Harrison's avatar John Harrison Committed by Daniel Vetter

drm/i915: Remove 'outstanding_lazy_seqno'

The OLS value is now obsolete. Exactly the same value is guarateed to be always
available as PLR->seqno. Thus it is safe to remove the OLS completely. And also
to rename the PLR to OLR to keep the 'outstanding lazy ...' naming convention
valid.

For: VIZ-4377
Signed-off-by: default avatarJohn Harrison <John.C.Harrison@Intel.com>
Reviewed-by: default avatarThomas Daniel <Thomas.Daniel@intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent ff865885
...@@ -1164,7 +1164,7 @@ i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno) ...@@ -1164,7 +1164,7 @@ i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno)
BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex)); BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
ret = 0; ret = 0;
if (seqno == ring->outstanding_lazy_seqno) if (seqno == i915_gem_request_get_seqno(ring->outstanding_lazy_request))
ret = i915_add_request(ring, NULL); ret = i915_add_request(ring, NULL);
return ret; return ret;
...@@ -2421,7 +2421,7 @@ int __i915_add_request(struct intel_engine_cs *ring, ...@@ -2421,7 +2421,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
u32 request_ring_position, request_start; u32 request_ring_position, request_start;
int ret; int ret;
request = ring->preallocated_lazy_request; request = ring->outstanding_lazy_request;
if (WARN_ON(request == NULL)) if (WARN_ON(request == NULL))
return -ENOMEM; return -ENOMEM;
...@@ -2466,7 +2466,6 @@ int __i915_add_request(struct intel_engine_cs *ring, ...@@ -2466,7 +2466,6 @@ int __i915_add_request(struct intel_engine_cs *ring,
return ret; return ret;
} }
request->seqno = intel_ring_get_seqno(ring);
request->ring = ring; request->ring = ring;
request->head = request_start; request->head = request_start;
request->tail = request_ring_position; request->tail = request_ring_position;
...@@ -2503,8 +2502,7 @@ int __i915_add_request(struct intel_engine_cs *ring, ...@@ -2503,8 +2502,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
} }
trace_i915_gem_request_add(ring, request->seqno); trace_i915_gem_request_add(ring, request->seqno);
ring->outstanding_lazy_seqno = 0; ring->outstanding_lazy_request = NULL;
ring->preallocated_lazy_request = NULL;
i915_queue_hangcheck(ring->dev); i915_queue_hangcheck(ring->dev);
...@@ -2689,9 +2687,8 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, ...@@ -2689,9 +2687,8 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
i915_gem_free_request(request); i915_gem_free_request(request);
} }
/* These may not have been flush before the reset, do so now */ /* This may not have been flushed before the reset, so clean it now */
i915_gem_request_assign(&ring->preallocated_lazy_request, NULL); i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
ring->outstanding_lazy_seqno = 0;
} }
void i915_gem_restore_fences(struct drm_device *dev) void i915_gem_restore_fences(struct drm_device *dev)
......
...@@ -1211,7 +1211,9 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file, ...@@ -1211,7 +1211,9 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
return ret; return ret;
} }
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); trace_i915_gem_ring_dispatch(ring,
i915_gem_request_get_seqno(intel_ring_get_request(ring)),
flags);
i915_gem_execbuffer_move_to_active(vmas, ring); i915_gem_execbuffer_move_to_active(vmas, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
......
...@@ -9910,7 +9910,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ...@@ -9910,7 +9910,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (ret) if (ret)
goto cleanup_unpin; goto cleanup_unpin;
work->flip_queued_seqno = intel_ring_get_seqno(ring); work->flip_queued_seqno =
i915_gem_request_get_seqno(intel_ring_get_request(ring));
work->flip_queued_ring = ring; work->flip_queued_ring = ring;
} }
......
...@@ -876,22 +876,14 @@ void intel_lr_context_unpin(struct intel_engine_cs *ring, ...@@ -876,22 +876,14 @@ void intel_lr_context_unpin(struct intel_engine_cs *ring,
} }
} }
static int logical_ring_alloc_seqno(struct intel_engine_cs *ring, static int logical_ring_alloc_request(struct intel_engine_cs *ring,
struct intel_context *ctx) struct intel_context *ctx)
{ {
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
int ret; int ret;
/* XXX: The aim is to replace seqno values with request structures. if (ring->outstanding_lazy_request)
* A step along the way is to switch to using the PLR in preference
* to the OLS. That requires the PLR to only be valid when the OLS is
* also valid. I.e., the two must be kept in step. */
if (ring->outstanding_lazy_seqno) {
WARN_ON(ring->preallocated_lazy_request == NULL);
return 0; return 0;
}
WARN_ON(ring->preallocated_lazy_request != NULL);
request = kmalloc(sizeof(*request), GFP_KERNEL); request = kmalloc(sizeof(*request), GFP_KERNEL);
if (request == NULL) if (request == NULL)
...@@ -907,7 +899,7 @@ static int logical_ring_alloc_seqno(struct intel_engine_cs *ring, ...@@ -907,7 +899,7 @@ static int logical_ring_alloc_seqno(struct intel_engine_cs *ring,
kref_init(&request->ref); kref_init(&request->ref);
ret = i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno); ret = i915_gem_get_seqno(ring->dev, &request->seqno);
if (ret) { if (ret) {
intel_lr_context_unpin(ring, ctx); intel_lr_context_unpin(ring, ctx);
kfree(request); kfree(request);
...@@ -921,7 +913,7 @@ static int logical_ring_alloc_seqno(struct intel_engine_cs *ring, ...@@ -921,7 +913,7 @@ static int logical_ring_alloc_seqno(struct intel_engine_cs *ring,
request->ctx = ctx; request->ctx = ctx;
i915_gem_context_reference(request->ctx); i915_gem_context_reference(request->ctx);
ring->preallocated_lazy_request = request; ring->outstanding_lazy_request = request;
return 0; return 0;
} }
...@@ -1098,7 +1090,7 @@ int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords) ...@@ -1098,7 +1090,7 @@ int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords)
return ret; return ret;
/* Preallocate the olr before touching the ring */ /* Preallocate the olr before touching the ring */
ret = logical_ring_alloc_seqno(ring, ringbuf->FIXME_lrc_ctx); ret = logical_ring_alloc_request(ring, ringbuf->FIXME_lrc_ctx);
if (ret) if (ret)
return ret; return ret;
...@@ -1351,7 +1343,8 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf) ...@@ -1351,7 +1343,8 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf)
(ring->status_page.gfx_addr + (ring->status_page.gfx_addr +
(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT))); (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
intel_logical_ring_emit(ringbuf, 0); intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, ring->outstanding_lazy_seqno); intel_logical_ring_emit(ringbuf,
i915_gem_request_get_seqno(ring->outstanding_lazy_request));
intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT); intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
intel_logical_ring_emit(ringbuf, MI_NOOP); intel_logical_ring_emit(ringbuf, MI_NOOP);
intel_logical_ring_advance_and_submit(ringbuf); intel_logical_ring_advance_and_submit(ringbuf);
...@@ -1376,8 +1369,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring) ...@@ -1376,8 +1369,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
intel_logical_ring_stop(ring); intel_logical_ring_stop(ring);
WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
i915_gem_request_assign(&ring->preallocated_lazy_request, NULL); i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
ring->outstanding_lazy_seqno = 0;
if (ring->cleanup) if (ring->cleanup)
ring->cleanup(ring); ring->cleanup(ring);
......
...@@ -911,17 +911,20 @@ static int gen8_rcs_signal(struct intel_engine_cs *signaller, ...@@ -911,17 +911,20 @@ static int gen8_rcs_signal(struct intel_engine_cs *signaller,
return ret; return ret;
for_each_ring(waiter, dev_priv, i) { for_each_ring(waiter, dev_priv, i) {
u32 seqno;
u64 gtt_offset = signaller->semaphore.signal_ggtt[i]; u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue; continue;
seqno = i915_gem_request_get_seqno(
signaller->outstanding_lazy_request);
intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6)); intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB | intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_FLUSH_ENABLE); PIPE_CONTROL_FLUSH_ENABLE);
intel_ring_emit(signaller, lower_32_bits(gtt_offset)); intel_ring_emit(signaller, lower_32_bits(gtt_offset));
intel_ring_emit(signaller, upper_32_bits(gtt_offset)); intel_ring_emit(signaller, upper_32_bits(gtt_offset));
intel_ring_emit(signaller, signaller->outstanding_lazy_seqno); intel_ring_emit(signaller, seqno);
intel_ring_emit(signaller, 0); intel_ring_emit(signaller, 0);
intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
MI_SEMAPHORE_TARGET(waiter->id)); MI_SEMAPHORE_TARGET(waiter->id));
...@@ -949,16 +952,19 @@ static int gen8_xcs_signal(struct intel_engine_cs *signaller, ...@@ -949,16 +952,19 @@ static int gen8_xcs_signal(struct intel_engine_cs *signaller,
return ret; return ret;
for_each_ring(waiter, dev_priv, i) { for_each_ring(waiter, dev_priv, i) {
u32 seqno;
u64 gtt_offset = signaller->semaphore.signal_ggtt[i]; u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue; continue;
seqno = i915_gem_request_get_seqno(
signaller->outstanding_lazy_request);
intel_ring_emit(signaller, (MI_FLUSH_DW + 1) | intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
MI_FLUSH_DW_OP_STOREDW); MI_FLUSH_DW_OP_STOREDW);
intel_ring_emit(signaller, lower_32_bits(gtt_offset) | intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
MI_FLUSH_DW_USE_GTT); MI_FLUSH_DW_USE_GTT);
intel_ring_emit(signaller, upper_32_bits(gtt_offset)); intel_ring_emit(signaller, upper_32_bits(gtt_offset));
intel_ring_emit(signaller, signaller->outstanding_lazy_seqno); intel_ring_emit(signaller, seqno);
intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
MI_SEMAPHORE_TARGET(waiter->id)); MI_SEMAPHORE_TARGET(waiter->id));
intel_ring_emit(signaller, 0); intel_ring_emit(signaller, 0);
...@@ -987,9 +993,11 @@ static int gen6_signal(struct intel_engine_cs *signaller, ...@@ -987,9 +993,11 @@ static int gen6_signal(struct intel_engine_cs *signaller,
for_each_ring(useless, dev_priv, i) { for_each_ring(useless, dev_priv, i) {
u32 mbox_reg = signaller->semaphore.mbox.signal[i]; u32 mbox_reg = signaller->semaphore.mbox.signal[i];
if (mbox_reg != GEN6_NOSYNC) { if (mbox_reg != GEN6_NOSYNC) {
u32 seqno = i915_gem_request_get_seqno(
signaller->outstanding_lazy_request);
intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1)); intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(signaller, mbox_reg); intel_ring_emit(signaller, mbox_reg);
intel_ring_emit(signaller, signaller->outstanding_lazy_seqno); intel_ring_emit(signaller, seqno);
} }
} }
...@@ -1024,7 +1032,8 @@ gen6_add_request(struct intel_engine_cs *ring) ...@@ -1024,7 +1032,8 @@ gen6_add_request(struct intel_engine_cs *ring)
intel_ring_emit(ring, MI_STORE_DWORD_INDEX); intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
intel_ring_emit(ring, ring->outstanding_lazy_seqno); intel_ring_emit(ring,
i915_gem_request_get_seqno(ring->outstanding_lazy_request));
intel_ring_emit(ring, MI_USER_INTERRUPT); intel_ring_emit(ring, MI_USER_INTERRUPT);
__intel_ring_advance(ring); __intel_ring_advance(ring);
...@@ -1142,7 +1151,8 @@ pc_render_add_request(struct intel_engine_cs *ring) ...@@ -1142,7 +1151,8 @@ pc_render_add_request(struct intel_engine_cs *ring)
PIPE_CONTROL_WRITE_FLUSH | PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, ring->outstanding_lazy_seqno); intel_ring_emit(ring,
i915_gem_request_get_seqno(ring->outstanding_lazy_request));
intel_ring_emit(ring, 0); intel_ring_emit(ring, 0);
PIPE_CONTROL_FLUSH(ring, scratch_addr); PIPE_CONTROL_FLUSH(ring, scratch_addr);
scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */ scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
...@@ -1161,7 +1171,8 @@ pc_render_add_request(struct intel_engine_cs *ring) ...@@ -1161,7 +1171,8 @@ pc_render_add_request(struct intel_engine_cs *ring)
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
PIPE_CONTROL_NOTIFY); PIPE_CONTROL_NOTIFY);
intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, ring->outstanding_lazy_seqno); intel_ring_emit(ring,
i915_gem_request_get_seqno(ring->outstanding_lazy_request));
intel_ring_emit(ring, 0); intel_ring_emit(ring, 0);
__intel_ring_advance(ring); __intel_ring_advance(ring);
...@@ -1401,7 +1412,8 @@ i9xx_add_request(struct intel_engine_cs *ring) ...@@ -1401,7 +1412,8 @@ i9xx_add_request(struct intel_engine_cs *ring)
intel_ring_emit(ring, MI_STORE_DWORD_INDEX); intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
intel_ring_emit(ring, ring->outstanding_lazy_seqno); intel_ring_emit(ring,
i915_gem_request_get_seqno(ring->outstanding_lazy_request));
intel_ring_emit(ring, MI_USER_INTERRUPT); intel_ring_emit(ring, MI_USER_INTERRUPT);
__intel_ring_advance(ring); __intel_ring_advance(ring);
...@@ -1870,8 +1882,7 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) ...@@ -1870,8 +1882,7 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
intel_unpin_ringbuffer_obj(ringbuf); intel_unpin_ringbuffer_obj(ringbuf);
intel_destroy_ringbuffer_obj(ringbuf); intel_destroy_ringbuffer_obj(ringbuf);
i915_gem_request_assign(&ring->preallocated_lazy_request, NULL); i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
ring->outstanding_lazy_seqno = 0;
if (ring->cleanup) if (ring->cleanup)
ring->cleanup(ring); ring->cleanup(ring);
...@@ -2004,7 +2015,7 @@ int intel_ring_idle(struct intel_engine_cs *ring) ...@@ -2004,7 +2015,7 @@ int intel_ring_idle(struct intel_engine_cs *ring)
int ret; int ret;
/* We need to add any requests required to flush the objects and ring */ /* We need to add any requests required to flush the objects and ring */
if (ring->outstanding_lazy_seqno) { if (ring->outstanding_lazy_request) {
ret = i915_add_request(ring, NULL); ret = i915_add_request(ring, NULL);
if (ret) if (ret)
return ret; return ret;
...@@ -2022,22 +2033,13 @@ int intel_ring_idle(struct intel_engine_cs *ring) ...@@ -2022,22 +2033,13 @@ int intel_ring_idle(struct intel_engine_cs *ring)
} }
static int static int
intel_ring_alloc_seqno(struct intel_engine_cs *ring) intel_ring_alloc_request(struct intel_engine_cs *ring)
{ {
int ret; int ret;
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
/* XXX: The aim is to replace seqno values with request structures. if (ring->outstanding_lazy_request)
* A step along the way is to switch to using the PLR in preference
* to the OLS. That requires the PLR to only be valid when the OLS
* is also valid. I.e., the two must be kept in step. */
if (ring->outstanding_lazy_seqno) {
WARN_ON(ring->preallocated_lazy_request == NULL);
return 0; return 0;
}
WARN_ON(ring->preallocated_lazy_request != NULL);
request = kmalloc(sizeof(*request), GFP_KERNEL); request = kmalloc(sizeof(*request), GFP_KERNEL);
if (request == NULL) if (request == NULL)
...@@ -2045,13 +2047,13 @@ intel_ring_alloc_seqno(struct intel_engine_cs *ring) ...@@ -2045,13 +2047,13 @@ intel_ring_alloc_seqno(struct intel_engine_cs *ring)
kref_init(&request->ref); kref_init(&request->ref);
ret = i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno); ret = i915_gem_get_seqno(ring->dev, &request->seqno);
if (ret) { if (ret) {
kfree(request); kfree(request);
return ret; return ret;
} }
ring->preallocated_lazy_request = request; ring->outstanding_lazy_request = request;
return 0; return 0;
} }
...@@ -2092,7 +2094,7 @@ int intel_ring_begin(struct intel_engine_cs *ring, ...@@ -2092,7 +2094,7 @@ int intel_ring_begin(struct intel_engine_cs *ring,
return ret; return ret;
/* Preallocate the olr before touching the ring */ /* Preallocate the olr before touching the ring */
ret = intel_ring_alloc_seqno(ring); ret = intel_ring_alloc_request(ring);
if (ret) if (ret)
return ret; return ret;
...@@ -2127,7 +2129,7 @@ void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno) ...@@ -2127,7 +2129,7 @@ void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)
struct drm_device *dev = ring->dev; struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
BUG_ON(ring->outstanding_lazy_seqno); BUG_ON(ring->outstanding_lazy_request);
if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) { if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) {
I915_WRITE(RING_SYNC_0(ring->mmio_base), 0); I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
......
...@@ -267,8 +267,7 @@ struct intel_engine_cs { ...@@ -267,8 +267,7 @@ struct intel_engine_cs {
/** /**
* Do we have some not yet emitted requests outstanding? * Do we have some not yet emitted requests outstanding?
*/ */
struct drm_i915_gem_request *preallocated_lazy_request; struct drm_i915_gem_request *outstanding_lazy_request;
u32 outstanding_lazy_seqno;
bool gpu_caches_dirty; bool gpu_caches_dirty;
bool fbc_dirty; bool fbc_dirty;
...@@ -436,17 +435,11 @@ static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf) ...@@ -436,17 +435,11 @@ static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
return ringbuf->tail; return ringbuf->tail;
} }
static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring)
{
BUG_ON(ring->outstanding_lazy_seqno == 0);
return ring->outstanding_lazy_seqno;
}
static inline struct drm_i915_gem_request * static inline struct drm_i915_gem_request *
intel_ring_get_request(struct intel_engine_cs *ring) intel_ring_get_request(struct intel_engine_cs *ring)
{ {
BUG_ON(ring->preallocated_lazy_request == NULL); BUG_ON(ring->outstanding_lazy_request == NULL);
return ring->preallocated_lazy_request; return ring->outstanding_lazy_request;
} }
static inline void i915_trace_irq_get(struct intel_engine_cs *ring, u32 seqno) static inline void i915_trace_irq_get(struct intel_engine_cs *ring, u32 seqno)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment