Commit 3365e226 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Lazily unbind vma on close

When userspace is passing around swapbuffers using DRI, we frequently
have to open and close the same object in the foreign address space.
This shows itself as the same object being rebound at roughly 30fps
(with a second object also being rebound at 30fps), which involves us
having to rewrite the page tables and maintain the drm_mm range manager
every time.

However, since the object still exists and it is only the local handle
that disappears, if we are lazy and do not unbind the VMA immediately
when the local user closes the object but defer it until the GPU is
idle, then we can reuse the same VMA binding. We still have to be
careful to mark the handle and lookup tables as closed to maintain the
uABI, just allowing the underlying VMA to be resurrected if the user is
able to access the same object from the same context again.

If the object itself is destroyed (neither userspace keeping a handle to
it), the VMA will be reaped immediately as usual.

In the future, this will be even more useful as instantiating a new VMA
for use on the GPU will become heavier. A nuisance indeed, so nip it in
the bud.

v2: s/__i915_vma_final_close/i915_vma_destroy/ etc.
v3: Leave a hint as to why we deferred the unbind on close.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180503195115.22309-1-chris@chris-wilson.co.uk
parent dc74f6fe
...@@ -2062,6 +2062,7 @@ struct drm_i915_private { ...@@ -2062,6 +2062,7 @@ struct drm_i915_private {
struct list_head timelines; struct list_head timelines;
struct list_head active_rings; struct list_head active_rings;
struct list_head closed_vma;
u32 active_requests; u32 active_requests;
u32 request_serial; u32 request_serial;
......
...@@ -165,6 +165,7 @@ static u32 __i915_gem_park(struct drm_i915_private *i915) ...@@ -165,6 +165,7 @@ static u32 __i915_gem_park(struct drm_i915_private *i915)
i915_timelines_park(i915); i915_timelines_park(i915);
i915_pmu_gt_parked(i915); i915_pmu_gt_parked(i915);
i915_vma_parked(i915);
i915->gt.awake = false; i915->gt.awake = false;
...@@ -4795,7 +4796,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, ...@@ -4795,7 +4796,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
&obj->vma_list, obj_link) { &obj->vma_list, obj_link) {
GEM_BUG_ON(i915_vma_is_active(vma)); GEM_BUG_ON(i915_vma_is_active(vma));
vma->flags &= ~I915_VMA_PIN_MASK; vma->flags &= ~I915_VMA_PIN_MASK;
i915_vma_close(vma); i915_vma_destroy(vma);
} }
GEM_BUG_ON(!list_empty(&obj->vma_list)); GEM_BUG_ON(!list_empty(&obj->vma_list));
GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree)); GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
...@@ -5598,6 +5599,7 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv) ...@@ -5598,6 +5599,7 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
INIT_LIST_HEAD(&dev_priv->gt.timelines); INIT_LIST_HEAD(&dev_priv->gt.timelines);
INIT_LIST_HEAD(&dev_priv->gt.active_rings); INIT_LIST_HEAD(&dev_priv->gt.active_rings);
INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
i915_gem_init__mm(dev_priv); i915_gem_init__mm(dev_priv);
......
...@@ -762,7 +762,8 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) ...@@ -762,7 +762,8 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
} }
/* transfer ref to ctx */ /* transfer ref to ctx */
vma->open_count++; if (!vma->open_count++)
i915_vma_reopen(vma);
list_add(&lut->obj_link, &obj->lut_list); list_add(&lut->obj_link, &obj->lut_list);
list_add(&lut->ctx_link, &eb->ctx->handles_list); list_add(&lut->ctx_link, &eb->ctx->handles_list);
lut->ctx = eb->ctx; lut->ctx = eb->ctx;
......
...@@ -2218,6 +2218,12 @@ i915_ppgtt_create(struct drm_i915_private *dev_priv, ...@@ -2218,6 +2218,12 @@ i915_ppgtt_create(struct drm_i915_private *dev_priv,
} }
void i915_ppgtt_close(struct i915_address_space *vm) void i915_ppgtt_close(struct i915_address_space *vm)
{
GEM_BUG_ON(vm->closed);
vm->closed = true;
}
static void ppgtt_destroy_vma(struct i915_address_space *vm)
{ {
struct list_head *phases[] = { struct list_head *phases[] = {
&vm->active_list, &vm->active_list,
...@@ -2226,15 +2232,12 @@ void i915_ppgtt_close(struct i915_address_space *vm) ...@@ -2226,15 +2232,12 @@ void i915_ppgtt_close(struct i915_address_space *vm)
NULL, NULL,
}, **phase; }, **phase;
GEM_BUG_ON(vm->closed);
vm->closed = true; vm->closed = true;
for (phase = phases; *phase; phase++) { for (phase = phases; *phase; phase++) {
struct i915_vma *vma, *vn; struct i915_vma *vma, *vn;
list_for_each_entry_safe(vma, vn, *phase, vm_link) list_for_each_entry_safe(vma, vn, *phase, vm_link)
if (!i915_vma_is_closed(vma)) i915_vma_destroy(vma);
i915_vma_close(vma);
} }
} }
...@@ -2245,7 +2248,8 @@ void i915_ppgtt_release(struct kref *kref) ...@@ -2245,7 +2248,8 @@ void i915_ppgtt_release(struct kref *kref)
trace_i915_ppgtt_release(&ppgtt->base); trace_i915_ppgtt_release(&ppgtt->base);
/* vmas should already be unbound and destroyed */ ppgtt_destroy_vma(&ppgtt->base);
GEM_BUG_ON(!list_empty(&ppgtt->base.active_list)); GEM_BUG_ON(!list_empty(&ppgtt->base.active_list));
GEM_BUG_ON(!list_empty(&ppgtt->base.inactive_list)); GEM_BUG_ON(!list_empty(&ppgtt->base.inactive_list));
GEM_BUG_ON(!list_empty(&ppgtt->base.unbound_list)); GEM_BUG_ON(!list_empty(&ppgtt->base.unbound_list));
......
...@@ -46,8 +46,6 @@ i915_vma_retire(struct i915_gem_active *active, struct i915_request *rq) ...@@ -46,8 +46,6 @@ i915_vma_retire(struct i915_gem_active *active, struct i915_request *rq)
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
list_move_tail(&vma->vm_link, &vma->vm->inactive_list); list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
WARN_ON(i915_vma_unbind(vma));
GEM_BUG_ON(!i915_gem_object_is_active(obj)); GEM_BUG_ON(!i915_gem_object_is_active(obj));
if (--obj->active_count) if (--obj->active_count)
...@@ -232,7 +230,6 @@ i915_vma_instance(struct drm_i915_gem_object *obj, ...@@ -232,7 +230,6 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
if (!vma) if (!vma)
vma = vma_create(obj, vm, view); vma = vma_create(obj, vm, view);
GEM_BUG_ON(!IS_ERR(vma) && i915_vma_is_closed(vma));
GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view)); GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
GEM_BUG_ON(!IS_ERR(vma) && vma_lookup(obj, vm, view) != vma); GEM_BUG_ON(!IS_ERR(vma) && vma_lookup(obj, vm, view) != vma);
return vma; return vma;
...@@ -684,13 +681,43 @@ int __i915_vma_do_pin(struct i915_vma *vma, ...@@ -684,13 +681,43 @@ int __i915_vma_do_pin(struct i915_vma *vma,
return ret; return ret;
} }
static void i915_vma_destroy(struct i915_vma *vma) void i915_vma_close(struct i915_vma *vma)
{
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
GEM_BUG_ON(i915_vma_is_closed(vma));
vma->flags |= I915_VMA_CLOSED;
/*
* We defer actually closing, unbinding and destroying the VMA until
* the next idle point, or if the object is freed in the meantime. By
* postponing the unbind, we allow for it to be resurrected by the
* client, avoiding the work required to rebind the VMA. This is
* advantageous for DRI, where the client/server pass objects
* between themselves, temporarily opening a local VMA to the
* object, and then closing it again. The same object is then reused
* on the next frame (or two, depending on the depth of the swap queue)
* causing us to rebind the VMA once more. This ends up being a lot
* of wasted work for the steady state.
*/
list_add_tail(&vma->closed_link, &vma->vm->i915->gt.closed_vma);
}
void i915_vma_reopen(struct i915_vma *vma)
{
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
if (vma->flags & I915_VMA_CLOSED) {
vma->flags &= ~I915_VMA_CLOSED;
list_del(&vma->closed_link);
}
}
static void __i915_vma_destroy(struct i915_vma *vma)
{ {
int i; int i;
GEM_BUG_ON(vma->node.allocated); GEM_BUG_ON(vma->node.allocated);
GEM_BUG_ON(i915_vma_is_active(vma));
GEM_BUG_ON(!i915_vma_is_closed(vma));
GEM_BUG_ON(vma->fence); GEM_BUG_ON(vma->fence);
for (i = 0; i < ARRAY_SIZE(vma->last_read); i++) for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
...@@ -699,6 +726,7 @@ static void i915_vma_destroy(struct i915_vma *vma) ...@@ -699,6 +726,7 @@ static void i915_vma_destroy(struct i915_vma *vma)
list_del(&vma->obj_link); list_del(&vma->obj_link);
list_del(&vma->vm_link); list_del(&vma->vm_link);
rb_erase(&vma->obj_node, &vma->obj->vma_tree);
if (!i915_vma_is_ggtt(vma)) if (!i915_vma_is_ggtt(vma))
i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm)); i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
...@@ -706,15 +734,30 @@ static void i915_vma_destroy(struct i915_vma *vma) ...@@ -706,15 +734,30 @@ static void i915_vma_destroy(struct i915_vma *vma)
kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma); kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
} }
void i915_vma_close(struct i915_vma *vma) void i915_vma_destroy(struct i915_vma *vma)
{ {
GEM_BUG_ON(i915_vma_is_closed(vma)); lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
vma->flags |= I915_VMA_CLOSED;
rb_erase(&vma->obj_node, &vma->obj->vma_tree); GEM_BUG_ON(i915_vma_is_active(vma));
GEM_BUG_ON(i915_vma_is_pinned(vma));
if (i915_vma_is_closed(vma))
list_del(&vma->closed_link);
if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
WARN_ON(i915_vma_unbind(vma)); WARN_ON(i915_vma_unbind(vma));
__i915_vma_destroy(vma);
}
void i915_vma_parked(struct drm_i915_private *i915)
{
struct i915_vma *vma, *next;
list_for_each_entry_safe(vma, next, &i915->gt.closed_vma, closed_link) {
GEM_BUG_ON(!i915_vma_is_closed(vma));
i915_vma_destroy(vma);
}
GEM_BUG_ON(!list_empty(&i915->gt.closed_vma));
} }
static void __i915_vma_iounmap(struct i915_vma *vma) static void __i915_vma_iounmap(struct i915_vma *vma)
...@@ -804,7 +847,7 @@ int i915_vma_unbind(struct i915_vma *vma) ...@@ -804,7 +847,7 @@ int i915_vma_unbind(struct i915_vma *vma)
return -EBUSY; return -EBUSY;
if (!drm_mm_node_allocated(&vma->node)) if (!drm_mm_node_allocated(&vma->node))
goto destroy; return 0;
GEM_BUG_ON(obj->bind_count == 0); GEM_BUG_ON(obj->bind_count == 0);
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
...@@ -841,10 +884,6 @@ int i915_vma_unbind(struct i915_vma *vma) ...@@ -841,10 +884,6 @@ int i915_vma_unbind(struct i915_vma *vma)
i915_vma_remove(vma); i915_vma_remove(vma);
destroy:
if (unlikely(i915_vma_is_closed(vma)))
i915_vma_destroy(vma);
return 0; return 0;
} }
......
...@@ -119,6 +119,8 @@ struct i915_vma { ...@@ -119,6 +119,8 @@ struct i915_vma {
/** This vma's place in the eviction list */ /** This vma's place in the eviction list */
struct list_head evict_link; struct list_head evict_link;
struct list_head closed_link;
/** /**
* Used for performing relocations during execbuffer insertion. * Used for performing relocations during execbuffer insertion.
*/ */
...@@ -285,6 +287,8 @@ void i915_vma_revoke_mmap(struct i915_vma *vma); ...@@ -285,6 +287,8 @@ void i915_vma_revoke_mmap(struct i915_vma *vma);
int __must_check i915_vma_unbind(struct i915_vma *vma); int __must_check i915_vma_unbind(struct i915_vma *vma);
void i915_vma_unlink_ctx(struct i915_vma *vma); void i915_vma_unlink_ctx(struct i915_vma *vma);
void i915_vma_close(struct i915_vma *vma); void i915_vma_close(struct i915_vma *vma);
void i915_vma_reopen(struct i915_vma *vma);
void i915_vma_destroy(struct i915_vma *vma);
int __i915_vma_do_pin(struct i915_vma *vma, int __i915_vma_do_pin(struct i915_vma *vma,
u64 size, u64 alignment, u64 flags); u64 size, u64 alignment, u64 flags);
...@@ -408,6 +412,8 @@ i915_vma_unpin_fence(struct i915_vma *vma) ...@@ -408,6 +412,8 @@ i915_vma_unpin_fence(struct i915_vma *vma)
__i915_vma_unpin_fence(vma); __i915_vma_unpin_fence(vma);
} }
void i915_vma_parked(struct drm_i915_private *i915);
#define for_each_until(cond) if (cond) break; else #define for_each_until(cond) if (cond) break; else
/** /**
......
...@@ -1091,7 +1091,7 @@ static int __igt_write_huge(struct i915_gem_context *ctx, ...@@ -1091,7 +1091,7 @@ static int __igt_write_huge(struct i915_gem_context *ctx,
out_vma_unpin: out_vma_unpin:
i915_vma_unpin(vma); i915_vma_unpin(vma);
out_vma_close: out_vma_close:
i915_vma_close(vma); i915_vma_destroy(vma);
return err; return err;
} }
......
...@@ -226,6 +226,7 @@ struct drm_i915_private *mock_gem_device(void) ...@@ -226,6 +226,7 @@ struct drm_i915_private *mock_gem_device(void)
INIT_LIST_HEAD(&i915->gt.timelines); INIT_LIST_HEAD(&i915->gt.timelines);
INIT_LIST_HEAD(&i915->gt.active_rings); INIT_LIST_HEAD(&i915->gt.active_rings);
INIT_LIST_HEAD(&i915->gt.closed_vma);
mutex_lock(&i915->drm.struct_mutex); mutex_lock(&i915->drm.struct_mutex);
mock_init_ggtt(i915); mock_init_ggtt(i915);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment