Commit a7b9761d authored by Chris Wilson's avatar Chris Wilson Committed by Daniel Vetter

drm/i915: Split i915_gem_flush_ring() into seperate invalidate/flush funcs

By moving the function to intel_ringbuffer and currying the appropriate
parameter, hopefully we make the callsites easier to read and
understand.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 016fd0c1
...@@ -1256,9 +1256,6 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data, ...@@ -1256,9 +1256,6 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev); void i915_gem_load(struct drm_device *dev);
int i915_gem_init_object(struct drm_gem_object *obj); int i915_gem_init_object(struct drm_gem_object *obj);
int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring,
uint32_t invalidate_domains,
uint32_t flush_domains);
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size); size_t size);
void i915_gem_free_object(struct drm_gem_object *obj); void i915_gem_free_object(struct drm_gem_object *obj);
......
...@@ -1549,14 +1549,10 @@ i915_add_request(struct intel_ring_buffer *ring, ...@@ -1549,14 +1549,10 @@ i915_add_request(struct intel_ring_buffer *ring,
* is that the flush _must_ happen before the next request, no matter * is that the flush _must_ happen before the next request, no matter
* what. * what.
*/ */
if (ring->gpu_caches_dirty) { ret = intel_ring_flush_all_caches(ring);
ret = i915_gem_flush_ring(ring, 0, I915_GEM_GPU_DOMAINS);
if (ret) if (ret)
return ret; return ret;
ring->gpu_caches_dirty = false;
}
if (request == NULL) { if (request == NULL) {
request = kmalloc(sizeof(*request), GFP_KERNEL); request = kmalloc(sizeof(*request), GFP_KERNEL);
if (request == NULL) if (request == NULL)
...@@ -2254,25 +2250,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) ...@@ -2254,25 +2250,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
return ret; return ret;
} }
int
i915_gem_flush_ring(struct intel_ring_buffer *ring,
uint32_t invalidate_domains,
uint32_t flush_domains)
{
int ret;
if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
return 0;
trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
ret = ring->flush(ring, invalidate_domains, flush_domains);
if (ret)
return ret;
return 0;
}
static int i915_ring_idle(struct intel_ring_buffer *ring) static int i915_ring_idle(struct intel_ring_buffer *ring)
{ {
if (list_empty(&ring->active_list)) if (list_empty(&ring->active_list))
......
...@@ -707,14 +707,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, ...@@ -707,14 +707,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
/* Unconditionally invalidate gpu caches and ensure that we do flush /* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch. * any residual writes from the previous batch.
*/ */
ret = i915_gem_flush_ring(ring, return intel_ring_invalidate_all_caches(ring);
I915_GEM_GPU_DOMAINS,
ring->gpu_caches_dirty ? I915_GEM_GPU_DOMAINS : 0);
if (ret)
return ret;
ring->gpu_caches_dirty = false;
return 0;
} }
static bool static bool
......
...@@ -1564,3 +1564,41 @@ int intel_init_blt_ring_buffer(struct drm_device *dev) ...@@ -1564,3 +1564,41 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
return intel_init_ring_buffer(dev, ring); return intel_init_ring_buffer(dev, ring);
} }
int
intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
{
int ret;
if (!ring->gpu_caches_dirty)
return 0;
ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
ring->gpu_caches_dirty = false;
return 0;
}
int
intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
{
uint32_t flush_domains;
int ret;
flush_domains = 0;
if (ring->gpu_caches_dirty)
flush_domains = I915_GEM_GPU_DOMAINS;
ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
if (ret)
return ret;
trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
ring->gpu_caches_dirty = false;
return 0;
}
...@@ -195,6 +195,8 @@ static inline void intel_ring_emit(struct intel_ring_buffer *ring, ...@@ -195,6 +195,8 @@ static inline void intel_ring_emit(struct intel_ring_buffer *ring,
void intel_ring_advance(struct intel_ring_buffer *ring); void intel_ring_advance(struct intel_ring_buffer *ring);
u32 intel_ring_get_seqno(struct intel_ring_buffer *ring); u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
int intel_init_render_ring_buffer(struct drm_device *dev); int intel_init_render_ring_buffer(struct drm_device *dev);
int intel_init_bsd_ring_buffer(struct drm_device *dev); int intel_init_bsd_ring_buffer(struct drm_device *dev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment