Commit 48b956c5 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Push pipelining of display plane flushes to the caller

This ensures that we do wait upon the flushes to complete if necessary
and avoid the visual tears, whilst enabling pipelined page-flips.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
parent 9e76e7b8
...@@ -1013,7 +1013,8 @@ void i915_gem_process_flushing_list(struct drm_device *dev, ...@@ -1013,7 +1013,8 @@ void i915_gem_process_flushing_list(struct drm_device *dev,
struct intel_ring_buffer *ring); struct intel_ring_buffer *ring);
int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
int write); int write);
int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj); int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
bool pipelined);
int i915_gem_attach_phys_object(struct drm_device *dev, int i915_gem_attach_phys_object(struct drm_device *dev,
struct drm_gem_object *obj, struct drm_gem_object *obj,
int id, int id,
......
...@@ -2597,6 +2597,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, ...@@ -2597,6 +2597,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
/* Queue the GPU write cache flushing we need. */ /* Queue the GPU write cache flushing we need. */
old_write_domain = obj->write_domain; old_write_domain = obj->write_domain;
i915_gem_flush(dev, 0, obj->write_domain); i915_gem_flush(dev, 0, obj->write_domain);
BUG_ON(obj->write_domain);
trace_i915_gem_object_change_domain(obj, trace_i915_gem_object_change_domain(obj,
obj->read_domains, obj->read_domains,
...@@ -2704,7 +2705,8 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) ...@@ -2704,7 +2705,8 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
* wait, as in modesetting process we're not supposed to be interrupted. * wait, as in modesetting process we're not supposed to be interrupted.
*/ */
int int
i915_gem_object_set_to_display_plane(struct drm_gem_object *obj) i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
bool pipelined)
{ {
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t old_read_domains; uint32_t old_read_domains;
...@@ -2714,8 +2716,8 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj) ...@@ -2714,8 +2716,8 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
if (obj_priv->gtt_space == NULL) if (obj_priv->gtt_space == NULL)
return -EINVAL; return -EINVAL;
ret = i915_gem_object_flush_gpu_write_domain(obj, true); ret = i915_gem_object_flush_gpu_write_domain(obj, pipelined);
if (ret != 0) if (ret)
return ret; return ret;
i915_gem_object_flush_cpu_write_domain(obj); i915_gem_object_flush_cpu_write_domain(obj);
......
...@@ -1417,7 +1417,9 @@ static void intel_update_fbc(struct drm_device *dev) ...@@ -1417,7 +1417,9 @@ static void intel_update_fbc(struct drm_device *dev)
} }
int int
intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_gem_object *obj,
bool pipelined)
{ {
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
u32 alignment; u32 alignment;
...@@ -1445,14 +1447,12 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) ...@@ -1445,14 +1447,12 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
} }
ret = i915_gem_object_pin(obj, alignment); ret = i915_gem_object_pin(obj, alignment);
if (ret != 0) if (ret)
return ret; return ret;
ret = i915_gem_object_set_to_display_plane(obj); ret = i915_gem_object_set_to_display_plane(obj, pipelined);
if (ret != 0) { if (ret)
i915_gem_object_unpin(obj); goto err_unpin;
return ret;
}
/* Install a fence for tiled scan-out. Pre-i965 always needs a /* Install a fence for tiled scan-out. Pre-i965 always needs a
* fence, whereas 965+ only requires a fence if using * fence, whereas 965+ only requires a fence if using
...@@ -1462,13 +1462,15 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) ...@@ -1462,13 +1462,15 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
if (obj_priv->fence_reg == I915_FENCE_REG_NONE && if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
obj_priv->tiling_mode != I915_TILING_NONE) { obj_priv->tiling_mode != I915_TILING_NONE) {
ret = i915_gem_object_get_fence_reg(obj); ret = i915_gem_object_get_fence_reg(obj);
if (ret != 0) { if (ret)
i915_gem_object_unpin(obj); goto err_unpin;
return ret;
}
} }
return 0; return 0;
err_unpin:
i915_gem_object_unpin(obj);
return ret;
} }
/* Assume fb object is pinned & idle & fenced and just update base pointers */ /* Assume fb object is pinned & idle & fenced and just update base pointers */
...@@ -1589,7 +1591,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, ...@@ -1589,7 +1591,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
obj_priv = to_intel_bo(obj); obj_priv = to_intel_bo(obj);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
ret = intel_pin_and_fence_fb_obj(dev, obj); ret = intel_pin_and_fence_fb_obj(dev, obj, false);
if (ret != 0) { if (ret != 0) {
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return ret; return ret;
...@@ -5004,7 +5006,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ...@@ -5004,7 +5006,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct intel_unpin_work *work; struct intel_unpin_work *work;
unsigned long flags, offset; unsigned long flags, offset;
int pipe = intel_crtc->pipe; int pipe = intel_crtc->pipe;
u32 pf, pipesrc; u32 was_dirty, pf, pipesrc;
int ret; int ret;
work = kzalloc(sizeof *work, GFP_KERNEL); work = kzalloc(sizeof *work, GFP_KERNEL);
...@@ -5033,7 +5035,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ...@@ -5033,7 +5035,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
obj = intel_fb->obj; obj = intel_fb->obj;
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
ret = intel_pin_and_fence_fb_obj(dev, obj); was_dirty = obj->write_domain & I915_GEM_GPU_DOMAINS;
ret = intel_pin_and_fence_fb_obj(dev, obj, true);
if (ret) if (ret)
goto cleanup_work; goto cleanup_work;
...@@ -5051,17 +5054,24 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ...@@ -5051,17 +5054,24 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
atomic_inc(&obj_priv->pending_flip); atomic_inc(&obj_priv->pending_flip);
work->pending_flip_obj = obj; work->pending_flip_obj = obj;
if (was_dirty || IS_GEN3(dev) || IS_GEN2(dev)) {
BEGIN_LP_RING(2);
if (IS_GEN3(dev) || IS_GEN2(dev)) { if (IS_GEN3(dev) || IS_GEN2(dev)) {
u32 flip_mask; u32 flip_mask;
/* Can't queue multiple flips, so wait for the previous
* one to finish before executing the next.
*/
if (intel_crtc->plane) if (intel_crtc->plane)
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
BEGIN_LP_RING(2);
OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
OUT_RING(0); } else
OUT_RING(MI_NOOP);
OUT_RING(MI_FLUSH);
ADVANCE_LP_RING(); ADVANCE_LP_RING();
} }
......
...@@ -281,7 +281,8 @@ extern void ironlake_enable_drps(struct drm_device *dev); ...@@ -281,7 +281,8 @@ extern void ironlake_enable_drps(struct drm_device *dev);
extern void ironlake_disable_drps(struct drm_device *dev); extern void ironlake_disable_drps(struct drm_device *dev);
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_gem_object *obj); struct drm_gem_object *obj,
bool pipelined);
extern int intel_framebuffer_init(struct drm_device *dev, extern int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *ifb, struct intel_framebuffer *ifb,
......
...@@ -94,7 +94,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev, ...@@ -94,7 +94,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
/* Flush everything out, we'll be doing GTT only from now on */ /* Flush everything out, we'll be doing GTT only from now on */
ret = intel_pin_and_fence_fb_obj(dev, fbo); ret = intel_pin_and_fence_fb_obj(dev, fbo, false);
if (ret) { if (ret) {
DRM_ERROR("failed to pin fb: %d\n", ret); DRM_ERROR("failed to pin fb: %d\n", ret);
goto out_unref; goto out_unref;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment