Commit f8a7fde4 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Defer active reference until required

We only need the active reference to keep the object alive after the
handle has been deleted (so as to prevent a synchronous gem_close). Why
then pay the price of a kref on every execbuf when we can insert that
final active ref just in time for the handle deletion?
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-6-chris@chris-wilson.co.uk
parent 2e36991a
...@@ -2246,6 +2246,12 @@ struct drm_i915_gem_object { ...@@ -2246,6 +2246,12 @@ struct drm_i915_gem_object {
#define __I915_BO_ACTIVE(bo) \ #define __I915_BO_ACTIVE(bo) \
((READ_ONCE((bo)->flags) >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK) ((READ_ONCE((bo)->flags) >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK)
/**
* Have we taken a reference for the object for incomplete GPU
* activity?
*/
#define I915_BO_ACTIVE_REF (I915_BO_ACTIVE_SHIFT + I915_NUM_ENGINES)
/** /**
* This is set if the object has been written to since last bound * This is set if the object has been written to since last bound
* to the GTT * to the GTT
...@@ -2407,6 +2413,28 @@ i915_gem_object_has_active_engine(const struct drm_i915_gem_object *obj, ...@@ -2407,6 +2413,28 @@ i915_gem_object_has_active_engine(const struct drm_i915_gem_object *obj,
return obj->flags & BIT(engine + I915_BO_ACTIVE_SHIFT); return obj->flags & BIT(engine + I915_BO_ACTIVE_SHIFT);
} }
static inline bool
i915_gem_object_has_active_reference(const struct drm_i915_gem_object *obj)
{
return test_bit(I915_BO_ACTIVE_REF, &obj->flags);
}
static inline void
i915_gem_object_set_active_reference(struct drm_i915_gem_object *obj)
{
lockdep_assert_held(&obj->base.dev->struct_mutex);
__set_bit(I915_BO_ACTIVE_REF, &obj->flags);
}
static inline void
i915_gem_object_clear_active_reference(struct drm_i915_gem_object *obj)
{
lockdep_assert_held(&obj->base.dev->struct_mutex);
__clear_bit(I915_BO_ACTIVE_REF, &obj->flags);
}
void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj);
static inline unsigned int static inline unsigned int
i915_gem_object_get_tiling(struct drm_i915_gem_object *obj) i915_gem_object_get_tiling(struct drm_i915_gem_object *obj)
{ {
......
...@@ -2661,7 +2661,10 @@ i915_gem_object_retire__read(struct i915_gem_active *active, ...@@ -2661,7 +2661,10 @@ i915_gem_object_retire__read(struct i915_gem_active *active,
list_move_tail(&obj->global_list, list_move_tail(&obj->global_list,
&request->i915->mm.bound_list); &request->i915->mm.bound_list);
i915_gem_object_put(obj); if (i915_gem_object_has_active_reference(obj)) {
i915_gem_object_clear_active_reference(obj);
i915_gem_object_put(obj);
}
} }
static bool i915_context_is_banned(const struct i915_gem_context *ctx) static bool i915_context_is_banned(const struct i915_gem_context *ctx)
...@@ -2935,6 +2938,12 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file) ...@@ -2935,6 +2938,12 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link) list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
if (vma->vm->file == fpriv) if (vma->vm->file == fpriv)
i915_vma_close(vma); i915_vma_close(vma);
if (i915_gem_object_is_active(obj) &&
!i915_gem_object_has_active_reference(obj)) {
i915_gem_object_set_active_reference(obj);
i915_gem_object_get(obj);
}
mutex_unlock(&obj->base.dev->struct_mutex); mutex_unlock(&obj->base.dev->struct_mutex);
} }
...@@ -4475,6 +4484,17 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) ...@@ -4475,6 +4484,17 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
intel_runtime_pm_put(dev_priv); intel_runtime_pm_put(dev_priv);
} }
void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
{
lockdep_assert_held(&obj->base.dev->struct_mutex);
GEM_BUG_ON(i915_gem_object_has_active_reference(obj));
if (i915_gem_object_is_active(obj))
i915_gem_object_set_active_reference(obj);
else
i915_gem_object_put(obj);
}
int i915_gem_suspend(struct drm_device *dev) int i915_gem_suspend(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
......
...@@ -73,7 +73,7 @@ void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool) ...@@ -73,7 +73,7 @@ void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
list_for_each_entry_safe(obj, next, list_for_each_entry_safe(obj, next,
&pool->cache_list[n], &pool->cache_list[n],
batch_pool_link) batch_pool_link)
i915_gem_object_put(obj); __i915_gem_object_release_unless_active(obj);
INIT_LIST_HEAD(&pool->cache_list[n]); INIT_LIST_HEAD(&pool->cache_list[n]);
} }
......
...@@ -155,7 +155,7 @@ void i915_gem_context_free(struct kref *ctx_ref) ...@@ -155,7 +155,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
if (ce->ring) if (ce->ring)
intel_ring_free(ce->ring); intel_ring_free(ce->ring);
i915_vma_put(ce->state); __i915_gem_object_release_unless_active(ce->state->obj);
} }
put_pid(ctx->pid); put_pid(ctx->pid);
......
...@@ -1290,8 +1290,6 @@ void i915_vma_move_to_active(struct i915_vma *vma, ...@@ -1290,8 +1290,6 @@ void i915_vma_move_to_active(struct i915_vma *vma,
* add the active reference first and queue for it to be dropped * add the active reference first and queue for it to be dropped
* *last*. * *last*.
*/ */
if (!i915_gem_object_is_active(obj))
i915_gem_object_get(obj);
i915_gem_object_set_active(obj, idx); i915_gem_object_set_active(obj, idx);
i915_gem_active_set(&obj->last_read[idx], req); i915_gem_active_set(&obj->last_read[idx], req);
......
...@@ -3734,11 +3734,16 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) ...@@ -3734,11 +3734,16 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
void i915_vma_unpin_and_release(struct i915_vma **p_vma) void i915_vma_unpin_and_release(struct i915_vma **p_vma)
{ {
struct i915_vma *vma; struct i915_vma *vma;
struct drm_i915_gem_object *obj;
vma = fetch_and_zero(p_vma); vma = fetch_and_zero(p_vma);
if (!vma) if (!vma)
return; return;
obj = vma->obj;
i915_vma_unpin(vma); i915_vma_unpin(vma);
i915_vma_put(vma); i915_vma_close(vma);
__i915_gem_object_release_unless_active(obj);
} }
...@@ -224,7 +224,8 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req) ...@@ -224,7 +224,8 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req)
i915_vma_move_to_active(so.vma, req, 0); i915_vma_move_to_active(so.vma, req, 0);
err_unpin: err_unpin:
i915_vma_unpin(so.vma); i915_vma_unpin(so.vma);
i915_vma_close(so.vma);
err_obj: err_obj:
i915_gem_object_put(obj); __i915_gem_object_release_unless_active(obj);
return ret; return ret;
} }
...@@ -1762,14 +1762,19 @@ static void cleanup_phys_status_page(struct intel_engine_cs *engine) ...@@ -1762,14 +1762,19 @@ static void cleanup_phys_status_page(struct intel_engine_cs *engine)
static void cleanup_status_page(struct intel_engine_cs *engine) static void cleanup_status_page(struct intel_engine_cs *engine)
{ {
struct i915_vma *vma; struct i915_vma *vma;
struct drm_i915_gem_object *obj;
vma = fetch_and_zero(&engine->status_page.vma); vma = fetch_and_zero(&engine->status_page.vma);
if (!vma) if (!vma)
return; return;
obj = vma->obj;
i915_vma_unpin(vma); i915_vma_unpin(vma);
i915_gem_object_unpin_map(vma->obj); i915_vma_close(vma);
i915_vma_put(vma);
i915_gem_object_unpin_map(obj);
__i915_gem_object_release_unless_active(obj);
} }
static int init_status_page(struct intel_engine_cs *engine) static int init_status_page(struct intel_engine_cs *engine)
...@@ -1967,7 +1972,11 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size) ...@@ -1967,7 +1972,11 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
void void
intel_ring_free(struct intel_ring *ring) intel_ring_free(struct intel_ring *ring)
{ {
i915_vma_put(ring->vma); struct drm_i915_gem_object *obj = ring->vma->obj;
i915_vma_close(ring->vma);
__i915_gem_object_release_unless_active(obj);
kfree(ring); kfree(ring);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment