Commit 9a5a53b3 authored by Chris Wilson's avatar Chris Wilson Committed by Daniel Vetter

drm/i915: Reorganise rules for get_fence/put_fence

By simplifying the rules to calling get_fence when writing to the
through the GTT in a tiled manner, and calling put_fence before writing
to the object through the GTT in a linear manner, the code becomes
clearer and there is less chance of making a mistake.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
[danvet: fixed up conflict with ppgtt code and spelling in a new
comment.]
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent cce66a28
...@@ -1256,13 +1256,15 @@ int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj, ...@@ -1256,13 +1256,15 @@ int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *pipelined); struct intel_ring_buffer *pipelined);
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
static inline void static inline bool
i915_gem_object_pin_fence(struct drm_i915_gem_object *obj) i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
{ {
if (obj->fence_reg != I915_FENCE_REG_NONE) { if (obj->fence_reg != I915_FENCE_REG_NONE) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private; struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
dev_priv->fence_regs[obj->fence_reg].pin_count++; dev_priv->fence_regs[obj->fence_reg].pin_count++;
} return true;
} else
return false;
} }
static inline void static inline void
......
...@@ -1078,9 +1078,6 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -1078,9 +1078,6 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (!obj->has_global_gtt_mapping) if (!obj->has_global_gtt_mapping)
i915_gem_gtt_bind_object(obj, obj->cache_level); i915_gem_gtt_bind_object(obj, obj->cache_level);
if (obj->tiling_mode == I915_TILING_NONE)
ret = i915_gem_object_put_fence(obj);
else
ret = i915_gem_object_get_fence(obj, NULL); ret = i915_gem_object_get_fence(obj, NULL);
if (ret) if (ret)
goto unlock; goto unlock;
...@@ -2395,19 +2392,19 @@ i915_find_fence_reg(struct drm_device *dev, ...@@ -2395,19 +2392,19 @@ i915_find_fence_reg(struct drm_device *dev,
} }
/** /**
* i915_gem_object_get_fence - set up a fence reg for an object * i915_gem_object_get_fence - set up fencing for an object
* @obj: object to map through a fence reg * @obj: object to map through a fence reg
* @pipelined: ring on which to queue the change, or NULL for CPU access * @pipelined: ring on which to queue the change, or NULL for CPU access
* @interruptible: must we wait uninterruptibly for the register to retire?
* *
* When mapping objects through the GTT, userspace wants to be able to write * When mapping objects through the GTT, userspace wants to be able to write
* to them without having to worry about swizzling if the object is tiled. * to them without having to worry about swizzling if the object is tiled.
*
* This function walks the fence regs looking for a free one for @obj, * This function walks the fence regs looking for a free one for @obj,
* stealing one if it can't find any. * stealing one if it can't find any.
* *
* It then sets up the reg based on the object's properties: address, pitch * It then sets up the reg based on the object's properties: address, pitch
* and tiling format. * and tiling format.
*
* For an untiled surface, this removes any existing fence.
*/ */
int int
i915_gem_object_get_fence(struct drm_i915_gem_object *obj, i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
...@@ -2418,6 +2415,9 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, ...@@ -2418,6 +2415,9 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
struct drm_i915_fence_reg *reg; struct drm_i915_fence_reg *reg;
int ret; int ret;
if (obj->tiling_mode == I915_TILING_NONE)
return i915_gem_object_put_fence(obj);
/* XXX disable pipelining. There are bugs. Shocking. */ /* XXX disable pipelining. There are bugs. Shocking. */
pipelined = NULL; pipelined = NULL;
......
...@@ -530,18 +530,13 @@ pin_and_fence_object(struct drm_i915_gem_object *obj, ...@@ -530,18 +530,13 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,
if (has_fenced_gpu_access) { if (has_fenced_gpu_access) {
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
if (obj->tiling_mode) {
ret = i915_gem_object_get_fence(obj, ring); ret = i915_gem_object_get_fence(obj, ring);
if (ret) if (ret)
goto err_unpin; goto err_unpin;
if (i915_gem_object_pin_fence(obj))
entry->flags |= __EXEC_OBJECT_HAS_FENCE; entry->flags |= __EXEC_OBJECT_HAS_FENCE;
i915_gem_object_pin_fence(obj);
} else {
ret = i915_gem_object_put_fence(obj);
if (ret)
goto err_unpin;
}
obj->pending_fenced_gpu_access = true; obj->pending_fenced_gpu_access = true;
} }
} }
......
...@@ -2152,13 +2152,11 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, ...@@ -2152,13 +2152,11 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
* framebuffer compression. For simplicity, we always install * framebuffer compression. For simplicity, we always install
* a fence as the cost is not that onerous. * a fence as the cost is not that onerous.
*/ */
if (obj->tiling_mode != I915_TILING_NONE) {
ret = i915_gem_object_get_fence(obj, pipelined); ret = i915_gem_object_get_fence(obj, pipelined);
if (ret) if (ret)
goto err_unpin; goto err_unpin;
i915_gem_object_pin_fence(obj); i915_gem_object_pin_fence(obj);
}
dev_priv->mm.interruptible = true; dev_priv->mm.interruptible = true;
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment