Commit 0d86ee35 authored by Chris Wilson's avatar Chris Wilson

drm/i915/gt: Make fence revocation unequivocal

If we must revoke the fence because the VMA is no longer present, or
because the fence no longer applies, ensure that we do and convert it
into an error if we try but cannot.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200401210104.15907-3-chris@chris-wilson.co.uk
parent 725c9ee7
...@@ -298,23 +298,26 @@ static int fence_update(struct i915_fence_reg *fence, ...@@ -298,23 +298,26 @@ static int fence_update(struct i915_fence_reg *fence,
* *
* This function force-removes any fence from the given object, which is useful * This function force-removes any fence from the given object, which is useful
* if the kernel wants to do untiled GTT access. * if the kernel wants to do untiled GTT access.
*
* Returns:
*
* 0 on success, negative error code on failure.
*/ */
int i915_vma_revoke_fence(struct i915_vma *vma) void i915_vma_revoke_fence(struct i915_vma *vma)
{ {
struct i915_fence_reg *fence = vma->fence; struct i915_fence_reg *fence = vma->fence;
intel_wakeref_t wakeref;
lockdep_assert_held(&vma->vm->mutex); lockdep_assert_held(&vma->vm->mutex);
if (!fence) if (!fence)
return 0; return;
if (atomic_read(&fence->pin_count)) GEM_BUG_ON(fence->vma != vma);
return -EBUSY; GEM_BUG_ON(!i915_active_is_idle(&fence->active));
GEM_BUG_ON(atomic_read(&fence->pin_count));
return fence_update(fence, NULL); fence->tiling = 0;
WRITE_ONCE(fence->vma, NULL);
vma->fence = NULL;
with_intel_runtime_pm_if_in_use(fence_to_uncore(fence)->rpm, wakeref)
fence_write(fence);
} }
static struct i915_fence_reg *fence_find(struct i915_ggtt *ggtt) static struct i915_fence_reg *fence_find(struct i915_ggtt *ggtt)
......
...@@ -993,18 +993,16 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, ...@@ -993,18 +993,16 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
return ERR_PTR(ret); return ERR_PTR(ret);
} }
ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
if (ret)
return ERR_PTR(ret);
if (vma->fence && !i915_gem_object_is_tiled(obj)) { if (vma->fence && !i915_gem_object_is_tiled(obj)) {
mutex_lock(&ggtt->vm.mutex); mutex_lock(&ggtt->vm.mutex);
ret = i915_vma_revoke_fence(vma); i915_vma_revoke_fence(vma);
mutex_unlock(&ggtt->vm.mutex); mutex_unlock(&ggtt->vm.mutex);
if (ret)
return ERR_PTR(ret);
} }
ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
if (ret)
return ERR_PTR(ret);
ret = i915_vma_wait_for_bind(vma); ret = i915_vma_wait_for_bind(vma);
if (ret) { if (ret) {
i915_vma_unpin(vma); i915_vma_unpin(vma);
......
...@@ -1298,9 +1298,7 @@ int __i915_vma_unbind(struct i915_vma *vma) ...@@ -1298,9 +1298,7 @@ int __i915_vma_unbind(struct i915_vma *vma)
i915_vma_flush_writes(vma); i915_vma_flush_writes(vma);
/* release the fence reg _after_ flushing */ /* release the fence reg _after_ flushing */
ret = i915_vma_revoke_fence(vma); i915_vma_revoke_fence(vma);
if (ret)
return ret;
/* Force a pagefault for domain tracking on next user access */ /* Force a pagefault for domain tracking on next user access */
i915_vma_revoke_mmap(vma); i915_vma_revoke_mmap(vma);
......
...@@ -326,7 +326,7 @@ static inline struct page *i915_vma_first_page(struct i915_vma *vma) ...@@ -326,7 +326,7 @@ static inline struct page *i915_vma_first_page(struct i915_vma *vma)
* True if the vma has a fence, false otherwise. * True if the vma has a fence, false otherwise.
*/ */
int __must_check i915_vma_pin_fence(struct i915_vma *vma); int __must_check i915_vma_pin_fence(struct i915_vma *vma);
int __must_check i915_vma_revoke_fence(struct i915_vma *vma); void i915_vma_revoke_fence(struct i915_vma *vma);
int __i915_vma_pin_fence(struct i915_vma *vma); int __i915_vma_pin_fence(struct i915_vma *vma);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment