Commit 2e2f351d authored by Chris Wilson's avatar Chris Wilson Committed by Daniel Vetter

drm/i915: Remove domain flubbing from i915_gem_object_finish_gpu()

We no longer interpolate domains in the same manner, and even if we did,
we should trust setting either of the other write domains would trigger
an invalidation rather than force it. Remove the tweaking of the
read_domains since it serves no purpose and use
i915_gem_object_wait_rendering() directly.

Note that this goes back to

commit a8198eea
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date:   Wed Apr 13 22:04:09 2011 +0100

    drm/i915: Introduce i915_gem_object_finish_gpu()

and gpu domain tracking died in

commit cc889e0f
Author: Daniel Vetter <daniel.vetter@ffwll.ch>
Date:   Wed Jun 13 20:45:19 2012 +0200

    drm/i915: disable flushing_list/gpu_write_list

which is more than 1 year older.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
[danvet: Add notes with information dug out of git history.]
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 779949f4
...@@ -2821,7 +2821,6 @@ static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv) ...@@ -2821,7 +2821,6 @@ static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv)
void i915_gem_reset(struct drm_device *dev); void i915_gem_reset(struct drm_device *dev);
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
int __must_check i915_gem_init(struct drm_device *dev); int __must_check i915_gem_init(struct drm_device *dev);
int i915_gem_init_rings(struct drm_device *dev); int i915_gem_init_rings(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev); int __must_check i915_gem_init_hw(struct drm_device *dev);
...@@ -2843,6 +2842,9 @@ int __i915_wait_request(struct drm_i915_gem_request *req, ...@@ -2843,6 +2842,9 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
int __must_check i915_wait_request(struct drm_i915_gem_request *req); int __must_check i915_wait_request(struct drm_i915_gem_request *req);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check int __must_check
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool readonly);
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
bool write); bool write);
int __must_check int __must_check
......
...@@ -40,9 +40,6 @@ ...@@ -40,9 +40,6 @@
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
static __must_check int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool readonly);
static void static void
i915_gem_object_retire(struct drm_i915_gem_object *obj); i915_gem_object_retire(struct drm_i915_gem_object *obj);
...@@ -1397,7 +1394,7 @@ i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj) ...@@ -1397,7 +1394,7 @@ i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj)
* Ensures that all rendering to the object has completed and the object is * Ensures that all rendering to the object has completed and the object is
* safe to unbind from the GTT or access from the CPU. * safe to unbind from the GTT or access from the CPU.
*/ */
static __must_check int int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool readonly) bool readonly)
{ {
...@@ -3078,7 +3075,7 @@ int i915_vma_unbind(struct i915_vma *vma) ...@@ -3078,7 +3075,7 @@ int i915_vma_unbind(struct i915_vma *vma)
BUG_ON(obj->pages == NULL); BUG_ON(obj->pages == NULL);
ret = i915_gem_object_finish_gpu(obj); ret = i915_gem_object_wait_rendering(obj, false);
if (ret) if (ret)
return ret; return ret;
/* Continue on if we fail due to EIO, the GPU is hung so we /* Continue on if we fail due to EIO, the GPU is hung so we
...@@ -3853,7 +3850,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, ...@@ -3853,7 +3850,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
} }
if (i915_gem_obj_bound_any(obj)) { if (i915_gem_obj_bound_any(obj)) {
ret = i915_gem_object_finish_gpu(obj); ret = i915_gem_object_wait_rendering(obj, false);
if (ret) if (ret)
return ret; return ret;
...@@ -4044,23 +4041,6 @@ i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj, ...@@ -4044,23 +4041,6 @@ i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
obj->pin_display--; obj->pin_display--;
} }
int
i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
{
int ret;
if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
return 0;
ret = i915_gem_object_wait_rendering(obj, false);
if (ret)
return ret;
/* Ensure that we invalidate the GPU's caches and TLBs. */
obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
return 0;
}
/** /**
* Moves a single object to the CPU read, and possibly write domain. * Moves a single object to the CPU read, and possibly write domain.
* *
......
...@@ -3295,27 +3295,30 @@ void intel_finish_reset(struct drm_device *dev) ...@@ -3295,27 +3295,30 @@ void intel_finish_reset(struct drm_device *dev)
drm_modeset_unlock_all(dev); drm_modeset_unlock_all(dev);
} }
static int static void
intel_finish_fb(struct drm_framebuffer *old_fb) intel_finish_fb(struct drm_framebuffer *old_fb)
{ {
struct drm_i915_gem_object *obj = intel_fb_obj(old_fb); struct drm_i915_gem_object *obj = intel_fb_obj(old_fb);
struct drm_i915_private *dev_priv = obj->base.dev->dev_private; struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
bool was_interruptible = dev_priv->mm.interruptible; bool was_interruptible = dev_priv->mm.interruptible;
int ret; int ret;
/* Big Hammer, we also need to ensure that any pending /* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the * MI_WAIT_FOR_EVENT inside a user batch buffer on the
* current scanout is retired before unpinning the old * current scanout is retired before unpinning the old
* framebuffer. * framebuffer. Note that we rely on userspace rendering
* into the buffer attached to the pipe they are waiting
* on. If not, userspace generates a GPU hang with IPEHR
* point to the MI_WAIT_FOR_EVENT.
* *
* This should only fail upon a hung GPU, in which case we * This should only fail upon a hung GPU, in which case we
* can safely continue. * can safely continue.
*/ */
dev_priv->mm.interruptible = false; dev_priv->mm.interruptible = false;
ret = i915_gem_object_finish_gpu(obj); ret = i915_gem_object_wait_rendering(obj, true);
dev_priv->mm.interruptible = was_interruptible; dev_priv->mm.interruptible = was_interruptible;
return ret; WARN_ON(ret);
} }
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment