Commit b93f9cf1 authored by Ben Widawsky's avatar Ben Widawsky Committed by Daniel Vetter

drm/i915: argument to control retiring behavior

Sometimes it may be the case when we idle the gpu or wait on something
we don't actually want to process the retiring list. This patch allows
callers to choose the behavior.
Reviewed-by: default avatarKeith Packard <keithp@keithp.com>
Reviewed-by: default avatarEugeni Dodonov <eugeni.dodonov@intel.com>
Signed-off-by: default avatarBen Widawsky <ben@bwidawsk.net>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent fc74d8e0
...@@ -2131,7 +2131,7 @@ int i915_driver_unload(struct drm_device *dev) ...@@ -2131,7 +2131,7 @@ int i915_driver_unload(struct drm_device *dev)
unregister_shrinker(&dev_priv->mm.inactive_shrinker); unregister_shrinker(&dev_priv->mm.inactive_shrinker);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
ret = i915_gpu_idle(dev); ret = i915_gpu_idle(dev, true);
if (ret) if (ret)
DRM_ERROR("failed to idle hardware: %d\n", ret); DRM_ERROR("failed to idle hardware: %d\n", ret);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
......
...@@ -1179,13 +1179,14 @@ void i915_gem_do_init(struct drm_device *dev, ...@@ -1179,13 +1179,14 @@ void i915_gem_do_init(struct drm_device *dev,
unsigned long start, unsigned long start,
unsigned long mappable_end, unsigned long mappable_end,
unsigned long end); unsigned long end);
int __must_check i915_gpu_idle(struct drm_device *dev); int __must_check i915_gpu_idle(struct drm_device *dev, bool do_retire);
int __must_check i915_gem_idle(struct drm_device *dev); int __must_check i915_gem_idle(struct drm_device *dev);
int __must_check i915_add_request(struct intel_ring_buffer *ring, int __must_check i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file, struct drm_file *file,
struct drm_i915_gem_request *request); struct drm_i915_gem_request *request);
int __must_check i915_wait_request(struct intel_ring_buffer *ring, int __must_check i915_wait_request(struct intel_ring_buffer *ring,
uint32_t seqno); uint32_t seqno,
bool do_retire);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
......
...@@ -1943,7 +1943,8 @@ i915_gem_retire_work_handler(struct work_struct *work) ...@@ -1943,7 +1943,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
*/ */
int int
i915_wait_request(struct intel_ring_buffer *ring, i915_wait_request(struct intel_ring_buffer *ring,
uint32_t seqno) uint32_t seqno,
bool do_retire)
{ {
drm_i915_private_t *dev_priv = ring->dev->dev_private; drm_i915_private_t *dev_priv = ring->dev->dev_private;
u32 ier; u32 ier;
...@@ -2027,7 +2028,7 @@ i915_wait_request(struct intel_ring_buffer *ring, ...@@ -2027,7 +2028,7 @@ i915_wait_request(struct intel_ring_buffer *ring,
* buffer to have made it to the inactive list, and we would need * buffer to have made it to the inactive list, and we would need
* a separate wait queue to handle that. * a separate wait queue to handle that.
*/ */
if (ret == 0) if (ret == 0 && do_retire)
i915_gem_retire_requests_ring(ring); i915_gem_retire_requests_ring(ring);
return ret; return ret;
...@@ -2051,7 +2052,8 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj) ...@@ -2051,7 +2052,8 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
* it. * it.
*/ */
if (obj->active) { if (obj->active) {
ret = i915_wait_request(obj->ring, obj->last_rendering_seqno); ret = i915_wait_request(obj->ring, obj->last_rendering_seqno,
true);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -2172,7 +2174,7 @@ i915_gem_flush_ring(struct intel_ring_buffer *ring, ...@@ -2172,7 +2174,7 @@ i915_gem_flush_ring(struct intel_ring_buffer *ring,
return 0; return 0;
} }
static int i915_ring_idle(struct intel_ring_buffer *ring) static int i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire)
{ {
int ret; int ret;
...@@ -2186,18 +2188,18 @@ static int i915_ring_idle(struct intel_ring_buffer *ring) ...@@ -2186,18 +2188,18 @@ static int i915_ring_idle(struct intel_ring_buffer *ring)
return ret; return ret;
} }
return i915_wait_request(ring, i915_gem_next_request_seqno(ring)); return i915_wait_request(ring, i915_gem_next_request_seqno(ring),
do_retire);
} }
int int i915_gpu_idle(struct drm_device *dev, bool do_retire)
i915_gpu_idle(struct drm_device *dev)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
int ret, i; int ret, i;
/* Flush everything onto the inactive list. */ /* Flush everything onto the inactive list. */
for (i = 0; i < I915_NUM_RINGS; i++) { for (i = 0; i < I915_NUM_RINGS; i++) {
ret = i915_ring_idle(&dev_priv->ring[i]); ret = i915_ring_idle(&dev_priv->ring[i], do_retire);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -2400,7 +2402,8 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj, ...@@ -2400,7 +2402,8 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
if (!ring_passed_seqno(obj->last_fenced_ring, if (!ring_passed_seqno(obj->last_fenced_ring,
obj->last_fenced_seqno)) { obj->last_fenced_seqno)) {
ret = i915_wait_request(obj->last_fenced_ring, ret = i915_wait_request(obj->last_fenced_ring,
obj->last_fenced_seqno); obj->last_fenced_seqno,
true);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -2541,7 +2544,8 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, ...@@ -2541,7 +2544,8 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
if (!ring_passed_seqno(obj->last_fenced_ring, if (!ring_passed_seqno(obj->last_fenced_ring,
reg->setup_seqno)) { reg->setup_seqno)) {
ret = i915_wait_request(obj->last_fenced_ring, ret = i915_wait_request(obj->last_fenced_ring,
reg->setup_seqno); reg->setup_seqno,
true);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -3710,7 +3714,7 @@ i915_gem_idle(struct drm_device *dev) ...@@ -3710,7 +3714,7 @@ i915_gem_idle(struct drm_device *dev)
return 0; return 0;
} }
ret = i915_gpu_idle(dev); ret = i915_gpu_idle(dev, true);
if (ret) { if (ret) {
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return ret; return ret;
...@@ -4201,7 +4205,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) ...@@ -4201,7 +4205,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
* This has a dramatic impact to reduce the number of * This has a dramatic impact to reduce the number of
* OOM-killer events whilst running the GPU aggressively. * OOM-killer events whilst running the GPU aggressively.
*/ */
if (i915_gpu_idle(dev) == 0) if (i915_gpu_idle(dev, true) == 0)
goto rescan; goto rescan;
} }
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
......
...@@ -195,7 +195,7 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only) ...@@ -195,7 +195,7 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
trace_i915_gem_evict_everything(dev, purgeable_only); trace_i915_gem_evict_everything(dev, purgeable_only);
/* Flush everything (on to the inactive lists) and evict */ /* Flush everything (on to the inactive lists) and evict */
ret = i915_gpu_idle(dev); ret = i915_gpu_idle(dev, true);
if (ret) if (ret)
return ret; return ret;
......
...@@ -1186,7 +1186,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -1186,7 +1186,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
* so every billion or so execbuffers, we need to stall * so every billion or so execbuffers, we need to stall
* the GPU in order to reset the counters. * the GPU in order to reset the counters.
*/ */
ret = i915_gpu_idle(dev); ret = i915_gpu_idle(dev, true);
if (ret) if (ret)
goto err; goto err;
......
...@@ -55,7 +55,7 @@ static bool do_idling(struct drm_i915_private *dev_priv) ...@@ -55,7 +55,7 @@ static bool do_idling(struct drm_i915_private *dev_priv)
if (unlikely(dev_priv->mm.gtt->do_idle_maps)) { if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
dev_priv->mm.interruptible = false; dev_priv->mm.interruptible = false;
if (i915_gpu_idle(dev_priv->dev)) { if (i915_gpu_idle(dev_priv->dev, true)) {
DRM_ERROR("Couldn't idle GPU\n"); DRM_ERROR("Couldn't idle GPU\n");
/* Wait a bit, in hopes it avoids the hang */ /* Wait a bit, in hopes it avoids the hang */
udelay(10); udelay(10);
......
...@@ -227,7 +227,8 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, ...@@ -227,7 +227,8 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
} }
overlay->last_flip_req = request->seqno; overlay->last_flip_req = request->seqno;
overlay->flip_tail = tail; overlay->flip_tail = tail;
ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req); ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req,
true);
if (ret) if (ret)
return ret; return ret;
...@@ -448,7 +449,8 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay) ...@@ -448,7 +449,8 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
if (overlay->last_flip_req == 0) if (overlay->last_flip_req == 0)
return 0; return 0;
ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req); ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req,
true);
if (ret) if (ret)
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment