Commit 768e159f authored by Chris Wilson's avatar Chris Wilson Committed by Daniel Vetter

drm/i915: Improve handling of overlapping objects

The generic interval tree we use to speed up range invalidation is an
augmented rbtree that can report all overlapping intervals for a given
range. Therefore we do not need to degrade to a linear list if we find
overlapping objects. Oops.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Michał Winiarski <michal.winiarski@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1453397563-2848-1-git-send-email-chris@chris-wilson.co.ukReviewed-by: default avatarMichał Winiarski <michal.winiarski@intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 6ecf56ae
...@@ -49,21 +49,18 @@ struct i915_mmu_notifier { ...@@ -49,21 +49,18 @@ struct i915_mmu_notifier {
struct hlist_node node; struct hlist_node node;
struct mmu_notifier mn; struct mmu_notifier mn;
struct rb_root objects; struct rb_root objects;
struct list_head linear;
bool has_linear;
}; };
struct i915_mmu_object { struct i915_mmu_object {
struct i915_mmu_notifier *mn; struct i915_mmu_notifier *mn;
struct drm_i915_gem_object *obj;
struct interval_tree_node it; struct interval_tree_node it;
struct list_head link; struct list_head link;
struct drm_i915_gem_object *obj;
struct work_struct work; struct work_struct work;
bool active; bool attached;
bool is_linear;
}; };
static void __cancel_userptr__worker(struct work_struct *work) static void cancel_userptr(struct work_struct *work)
{ {
struct i915_mmu_object *mo = container_of(work, typeof(*mo), work); struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
struct drm_i915_gem_object *obj = mo->obj; struct drm_i915_gem_object *obj = mo->obj;
...@@ -94,24 +91,22 @@ static void __cancel_userptr__worker(struct work_struct *work) ...@@ -94,24 +91,22 @@ static void __cancel_userptr__worker(struct work_struct *work)
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
} }
static unsigned long cancel_userptr(struct i915_mmu_object *mo) static void add_object(struct i915_mmu_object *mo)
{ {
unsigned long end = mo->obj->userptr.ptr + mo->obj->base.size; if (mo->attached)
return;
/* The mmu_object is released late when destroying the
* GEM object so it is entirely possible to gain a interval_tree_insert(&mo->it, &mo->mn->objects);
* reference on an object in the process of being freed mo->attached = true;
* since our serialisation is via the spinlock and not }
* the struct_mutex - and consequently use it after it
* is freed and then double free it.
*/
if (mo->active && kref_get_unless_zero(&mo->obj->base.refcount)) {
schedule_work(&mo->work);
/* only schedule one work packet to avoid the refleak */
mo->active = false;
}
return end; static void del_object(struct i915_mmu_object *mo)
{
if (!mo->attached)
return;
interval_tree_remove(&mo->it, &mo->mn->objects);
mo->attached = false;
} }
static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
...@@ -122,28 +117,36 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, ...@@ -122,28 +117,36 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
struct i915_mmu_notifier *mn = struct i915_mmu_notifier *mn =
container_of(_mn, struct i915_mmu_notifier, mn); container_of(_mn, struct i915_mmu_notifier, mn);
struct i915_mmu_object *mo; struct i915_mmu_object *mo;
struct interval_tree_node *it;
LIST_HEAD(cancelled);
if (RB_EMPTY_ROOT(&mn->objects))
return;
/* interval ranges are inclusive, but invalidate range is exclusive */ /* interval ranges are inclusive, but invalidate range is exclusive */
end--; end--;
spin_lock(&mn->lock); spin_lock(&mn->lock);
if (mn->has_linear) { it = interval_tree_iter_first(&mn->objects, start, end);
list_for_each_entry(mo, &mn->linear, link) { while (it) {
if (mo->it.last < start || mo->it.start > end) /* The mmu_object is released late when destroying the
continue; * GEM object so it is entirely possible to gain a
* reference on an object in the process of being freed
cancel_userptr(mo); * since our serialisation is via the spinlock and not
} * the struct_mutex - and consequently use it after it
} else { * is freed and then double free it. To prevent that
struct interval_tree_node *it; * use-after-free we only acquire a reference on the
* object if it is not in the process of being destroyed.
*/
mo = container_of(it, struct i915_mmu_object, it);
if (kref_get_unless_zero(&mo->obj->base.refcount))
schedule_work(&mo->work);
it = interval_tree_iter_first(&mn->objects, start, end); list_add(&mo->link, &cancelled);
while (it) { it = interval_tree_iter_next(it, start, end);
mo = container_of(it, struct i915_mmu_object, it);
start = cancel_userptr(mo);
it = interval_tree_iter_next(it, start, end);
}
} }
list_for_each_entry(mo, &cancelled, link)
del_object(mo);
spin_unlock(&mn->lock); spin_unlock(&mn->lock);
} }
...@@ -164,8 +167,6 @@ i915_mmu_notifier_create(struct mm_struct *mm) ...@@ -164,8 +167,6 @@ i915_mmu_notifier_create(struct mm_struct *mm)
spin_lock_init(&mn->lock); spin_lock_init(&mn->lock);
mn->mn.ops = &i915_gem_userptr_notifier; mn->mn.ops = &i915_gem_userptr_notifier;
mn->objects = RB_ROOT; mn->objects = RB_ROOT;
INIT_LIST_HEAD(&mn->linear);
mn->has_linear = false;
/* Protected by mmap_sem (write-lock) */ /* Protected by mmap_sem (write-lock) */
ret = __mmu_notifier_register(&mn->mn, mm); ret = __mmu_notifier_register(&mn->mn, mm);
...@@ -177,85 +178,6 @@ i915_mmu_notifier_create(struct mm_struct *mm) ...@@ -177,85 +178,6 @@ i915_mmu_notifier_create(struct mm_struct *mm)
return mn; return mn;
} }
static int
i915_mmu_notifier_add(struct drm_device *dev,
struct i915_mmu_notifier *mn,
struct i915_mmu_object *mo)
{
struct interval_tree_node *it;
int ret = 0;
/* By this point we have already done a lot of expensive setup that
* we do not want to repeat just because the caller (e.g. X) has a
* signal pending (and partly because of that expensive setup, X
* using an interrupt timer is likely to get stuck in an EINTR loop).
*/
mutex_lock(&dev->struct_mutex);
/* Make sure we drop the final active reference (and thereby
* remove the objects from the interval tree) before we do
* the check for overlapping objects.
*/
i915_gem_retire_requests(dev);
spin_lock(&mn->lock);
it = interval_tree_iter_first(&mn->objects,
mo->it.start, mo->it.last);
if (it) {
struct drm_i915_gem_object *obj;
/* We only need to check the first object in the range as it
* either has cancelled gup work queued and we need to
* return back to the user to give time for the gup-workers
* to flush their object references upon which the object will
* be removed from the interval-tree, or the the range is
* still in use by another client and the overlap is invalid.
*
* If we do have an overlap, we cannot use the interval tree
* for fast range invalidation.
*/
obj = container_of(it, struct i915_mmu_object, it)->obj;
if (!obj->userptr.workers)
mn->has_linear = mo->is_linear = true;
else
ret = -EAGAIN;
} else
interval_tree_insert(&mo->it, &mn->objects);
if (ret == 0)
list_add(&mo->link, &mn->linear);
spin_unlock(&mn->lock);
mutex_unlock(&dev->struct_mutex);
return ret;
}
static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mn)
{
struct i915_mmu_object *mo;
list_for_each_entry(mo, &mn->linear, link)
if (mo->is_linear)
return true;
return false;
}
static void
i915_mmu_notifier_del(struct i915_mmu_notifier *mn,
struct i915_mmu_object *mo)
{
spin_lock(&mn->lock);
list_del(&mo->link);
if (mo->is_linear)
mn->has_linear = i915_mmu_notifier_has_linear(mn);
else
interval_tree_remove(&mo->it, &mn->objects);
spin_unlock(&mn->lock);
}
static void static void
i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
{ {
...@@ -265,7 +187,9 @@ i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) ...@@ -265,7 +187,9 @@ i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
if (mo == NULL) if (mo == NULL)
return; return;
i915_mmu_notifier_del(mo->mn, mo); spin_lock(&mo->mn->lock);
del_object(mo);
spin_unlock(&mo->mn->lock);
kfree(mo); kfree(mo);
obj->userptr.mmu_object = NULL; obj->userptr.mmu_object = NULL;
...@@ -299,7 +223,6 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, ...@@ -299,7 +223,6 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
{ {
struct i915_mmu_notifier *mn; struct i915_mmu_notifier *mn;
struct i915_mmu_object *mo; struct i915_mmu_object *mo;
int ret;
if (flags & I915_USERPTR_UNSYNCHRONIZED) if (flags & I915_USERPTR_UNSYNCHRONIZED)
return capable(CAP_SYS_ADMIN) ? 0 : -EPERM; return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
...@@ -316,16 +239,10 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, ...@@ -316,16 +239,10 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
return -ENOMEM; return -ENOMEM;
mo->mn = mn; mo->mn = mn;
mo->it.start = obj->userptr.ptr;
mo->it.last = mo->it.start + obj->base.size - 1;
mo->obj = obj; mo->obj = obj;
INIT_WORK(&mo->work, __cancel_userptr__worker); mo->it.start = obj->userptr.ptr;
mo->it.last = obj->userptr.ptr + obj->base.size - 1;
ret = i915_mmu_notifier_add(obj->base.dev, mn, mo); INIT_WORK(&mo->work, cancel_userptr);
if (ret) {
kfree(mo);
return ret;
}
obj->userptr.mmu_object = mo; obj->userptr.mmu_object = mo;
return 0; return 0;
...@@ -552,8 +469,10 @@ __i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, ...@@ -552,8 +469,10 @@ __i915_gem_userptr_set_active(struct drm_i915_gem_object *obj,
/* In order to serialise get_pages with an outstanding /* In order to serialise get_pages with an outstanding
* cancel_userptr, we must drop the struct_mutex and try again. * cancel_userptr, we must drop the struct_mutex and try again.
*/ */
if (!value || !work_pending(&obj->userptr.mmu_object->work)) if (!value)
obj->userptr.mmu_object->active = value; del_object(obj->userptr.mmu_object);
else if (!work_pending(&obj->userptr.mmu_object->work))
add_object(obj->userptr.mmu_object);
else else
ret = -EAGAIN; ret = -EAGAIN;
spin_unlock(&obj->userptr.mmu_object->mn->lock); spin_unlock(&obj->userptr.mmu_object->mn->lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment