Commit a42375af authored by Chris Wilson's avatar Chris Wilson

drm/i915: Release the active tracker tree upon idling

As soon as we detect that the active tracker is idle and we prepare to
call the retire callback, release the storage for our tree of
per-timeline nodes. We expect these to be infrequently used and quick
to allocate, so there is little benefit in keeping the tree cached and
we would prefer to return the pages back to the system in a timely
fashion.

This also means that when we finalize the struct as a whole, we know as
the activity tracker must be idle, the tree has already been released.
Indeed we can reduce i915_active_fini() just to the assertions that there
is nothing to do.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190205130005.2807-3-chris@chris-wilson.co.uk
parent 64d6c500
...@@ -16,12 +16,29 @@ struct active_node { ...@@ -16,12 +16,29 @@ struct active_node {
u64 timeline; u64 timeline;
}; };
static void
__active_park(struct i915_active *ref)
{
struct active_node *it, *n;
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
GEM_BUG_ON(i915_gem_active_isset(&it->base));
kfree(it);
}
ref->tree = RB_ROOT;
}
static void static void
__active_retire(struct i915_active *ref) __active_retire(struct i915_active *ref)
{ {
GEM_BUG_ON(!ref->count); GEM_BUG_ON(!ref->count);
if (!--ref->count) if (--ref->count)
ref->retire(ref); return;
/* return the unused nodes to our slabcache */
__active_park(ref);
ref->retire(ref);
} }
static void static void
...@@ -210,18 +227,14 @@ int i915_request_await_active(struct i915_request *rq, struct i915_active *ref) ...@@ -210,18 +227,14 @@ int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
return 0; return 0;
} }
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
void i915_active_fini(struct i915_active *ref) void i915_active_fini(struct i915_active *ref)
{ {
struct active_node *it, *n;
GEM_BUG_ON(i915_gem_active_isset(&ref->last)); GEM_BUG_ON(i915_gem_active_isset(&ref->last));
GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree));
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { GEM_BUG_ON(ref->count);
GEM_BUG_ON(i915_gem_active_isset(&it->base));
kfree(it);
}
ref->tree = RB_ROOT;
} }
#endif
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/i915_active.c" #include "selftests/i915_active.c"
......
...@@ -64,6 +64,10 @@ i915_active_is_idle(const struct i915_active *ref) ...@@ -64,6 +64,10 @@ i915_active_is_idle(const struct i915_active *ref)
return !ref->count; return !ref->count;
} }
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
void i915_active_fini(struct i915_active *ref); void i915_active_fini(struct i915_active *ref);
#else
static inline void i915_active_fini(struct i915_active *ref) { }
#endif
#endif /* _I915_ACTIVE_H_ */ #endif /* _I915_ACTIVE_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment