Commit 294996a9 authored by Maarten Lankhorst's avatar Maarten Lankhorst

drm/i915: Remove support for unlocked i915_vma unbind

Now that we require the object lock for all ops, some code handling
race conditions can be removed.

This is required to not take short-term pins inside execbuf.
Signed-off-by: default avatarMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Acked-by: default avatarNiranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
Reviewed-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220114132320.109030-6-maarten.lankhorst@linux.intel.com
parent 0f341974
...@@ -838,7 +838,6 @@ i915_vma_detach(struct i915_vma *vma) ...@@ -838,7 +838,6 @@ i915_vma_detach(struct i915_vma *vma)
static bool try_qad_pin(struct i915_vma *vma, unsigned int flags) static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
{ {
unsigned int bound; unsigned int bound;
bool pinned = true;
bound = atomic_read(&vma->flags); bound = atomic_read(&vma->flags);
do { do {
...@@ -848,34 +847,10 @@ static bool try_qad_pin(struct i915_vma *vma, unsigned int flags) ...@@ -848,34 +847,10 @@ static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR)))
return false; return false;
if (!(bound & I915_VMA_PIN_MASK))
goto unpinned;
GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0); GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0);
} while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1)); } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
return true; return true;
unpinned:
/*
* If pin_count==0, but we are bound, check under the lock to avoid
* racing with a concurrent i915_vma_unbind().
*/
mutex_lock(&vma->vm->mutex);
do {
if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) {
pinned = false;
break;
}
if (unlikely(flags & ~bound)) {
pinned = false;
break;
}
} while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
mutex_unlock(&vma->vm->mutex);
return pinned;
} }
static struct scatterlist * static struct scatterlist *
...@@ -1159,7 +1134,6 @@ static int ...@@ -1159,7 +1134,6 @@ static int
__i915_vma_get_pages(struct i915_vma *vma) __i915_vma_get_pages(struct i915_vma *vma)
{ {
struct sg_table *pages; struct sg_table *pages;
int ret;
/* /*
* The vma->pages are only valid within the lifespan of the borrowed * The vma->pages are only valid within the lifespan of the borrowed
...@@ -1192,18 +1166,16 @@ __i915_vma_get_pages(struct i915_vma *vma) ...@@ -1192,18 +1166,16 @@ __i915_vma_get_pages(struct i915_vma *vma)
break; break;
} }
ret = 0;
if (IS_ERR(pages)) { if (IS_ERR(pages)) {
ret = PTR_ERR(pages);
pages = NULL;
drm_err(&vma->vm->i915->drm, drm_err(&vma->vm->i915->drm,
"Failed to get pages for VMA view type %u (%d)!\n", "Failed to get pages for VMA view type %u (%ld)!\n",
vma->ggtt_view.type, ret); vma->ggtt_view.type, PTR_ERR(pages));
return PTR_ERR(pages);
} }
vma->pages = pages; vma->pages = pages;
return ret; return 0;
} }
I915_SELFTEST_EXPORT int i915_vma_get_pages(struct i915_vma *vma) I915_SELFTEST_EXPORT int i915_vma_get_pages(struct i915_vma *vma)
...@@ -1235,25 +1207,14 @@ I915_SELFTEST_EXPORT int i915_vma_get_pages(struct i915_vma *vma) ...@@ -1235,25 +1207,14 @@ I915_SELFTEST_EXPORT int i915_vma_get_pages(struct i915_vma *vma)
static void __vma_put_pages(struct i915_vma *vma, unsigned int count) static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
{ {
/* We allocate under vma_get_pages, so beware the shrinker */ /* We allocate under vma_get_pages, so beware the shrinker */
struct sg_table *pages = READ_ONCE(vma->pages);
GEM_BUG_ON(atomic_read(&vma->pages_count) < count); GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
if (atomic_sub_return(count, &vma->pages_count) == 0) { if (atomic_sub_return(count, &vma->pages_count) == 0) {
/* if (vma->pages != vma->obj->mm.pages) {
* The atomic_sub_return is a read barrier for the READ_ONCE of sg_free_table(vma->pages);
* vma->pages above. kfree(vma->pages);
*
* READ_ONCE is safe because this is either called from the same
* function (i915_vma_pin_ww), or guarded by vma->vm->mutex.
*
* TODO: We're leaving vma->pages dangling, until vma->obj->resv
* lock is required.
*/
if (pages != vma->obj->mm.pages) {
sg_free_table(pages);
kfree(pages);
} }
vma->pages = NULL;
i915_gem_object_unpin_pages(vma->obj); i915_gem_object_unpin_pages(vma->obj);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment