Commit 4f62a7e0 authored by Daniel Vetter's avatar Daniel Vetter

drm/i915: Ditch i915 globals shrink infrastructure

This essentially reverts

commit 84a10749
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date:   Wed Jan 24 11:36:08 2018 +0000

    drm/i915: Shrink the GEM kmem_caches upon idling

mm/vmscan.c:do_shrink_slab() is a thing, if there's an issue with it
then we need to fix that there, not hand-roll our own slab shrinking
code in i915.

Also when this was added there was only one other caller of
kmem_cache_shrink (added 2005 to the acpi code). Now there's a 2nd one
outside of i915 code in a kunit test, which seems legit since that
wants to very carefully control what's in the kmem_cache. This out of
a total of over 500 calls to kmem_cache_create. This alone should have
been warning sign enough that we're doing something silly.

Noticed while reviewing a patch set from Jason to fix up some issues
in our i915_init() and i915_exit() module load/cleanup code. Now that
i915_globals.c isn't any different than normal init/exit functions, we
should convert them over to one unified table and remove
i915_globals.[hc] entirely.

v2: Improve commit message (Jason)
Reviewed-by: default avatarJason Ekstrand <jason@jlekstrand.net>
Cc: David Airlie <airlied@linux.ie>
Cc: Jason Ekstrand <jason@jlekstrand.net>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210721183229.4136488-1-daniel.vetter@ffwll.ch
parent 6b73a7f3
...@@ -2280,18 +2280,12 @@ i915_gem_engines_iter_next(struct i915_gem_engines_iter *it) ...@@ -2280,18 +2280,12 @@ i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
#include "selftests/i915_gem_context.c" #include "selftests/i915_gem_context.c"
#endif #endif
static void i915_global_gem_context_shrink(void)
{
kmem_cache_shrink(global.slab_luts);
}
static void i915_global_gem_context_exit(void) static void i915_global_gem_context_exit(void)
{ {
kmem_cache_destroy(global.slab_luts); kmem_cache_destroy(global.slab_luts);
} }
static struct i915_global_gem_context global = { { static struct i915_global_gem_context global = { {
.shrink = i915_global_gem_context_shrink,
.exit = i915_global_gem_context_exit, .exit = i915_global_gem_context_exit,
} }; } };
......
...@@ -664,18 +664,12 @@ void i915_gem_init__objects(struct drm_i915_private *i915) ...@@ -664,18 +664,12 @@ void i915_gem_init__objects(struct drm_i915_private *i915)
INIT_WORK(&i915->mm.free_work, __i915_gem_free_work); INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
} }
static void i915_global_objects_shrink(void)
{
kmem_cache_shrink(global.slab_objects);
}
static void i915_global_objects_exit(void) static void i915_global_objects_exit(void)
{ {
kmem_cache_destroy(global.slab_objects); kmem_cache_destroy(global.slab_objects);
} }
static struct i915_global_object global = { { static struct i915_global_object global = { {
.shrink = i915_global_objects_shrink,
.exit = i915_global_objects_exit, .exit = i915_global_objects_exit,
} }; } };
......
...@@ -398,18 +398,12 @@ void intel_context_fini(struct intel_context *ce) ...@@ -398,18 +398,12 @@ void intel_context_fini(struct intel_context *ce)
i915_active_fini(&ce->active); i915_active_fini(&ce->active);
} }
static void i915_global_context_shrink(void)
{
kmem_cache_shrink(global.slab_ce);
}
static void i915_global_context_exit(void) static void i915_global_context_exit(void)
{ {
kmem_cache_destroy(global.slab_ce); kmem_cache_destroy(global.slab_ce);
} }
static struct i915_global_context global = { { static struct i915_global_context global = { {
.shrink = i915_global_context_shrink,
.exit = i915_global_context_exit, .exit = i915_global_context_exit,
} }; } };
......
...@@ -67,8 +67,6 @@ static int __gt_unpark(struct intel_wakeref *wf) ...@@ -67,8 +67,6 @@ static int __gt_unpark(struct intel_wakeref *wf)
GT_TRACE(gt, "\n"); GT_TRACE(gt, "\n");
i915_globals_unpark();
/* /*
* It seems that the DMC likes to transition between the DC states a lot * It seems that the DMC likes to transition between the DC states a lot
* when there are no connected displays (no active power domains) during * when there are no connected displays (no active power domains) during
...@@ -116,8 +114,6 @@ static int __gt_park(struct intel_wakeref *wf) ...@@ -116,8 +114,6 @@ static int __gt_park(struct intel_wakeref *wf)
GEM_BUG_ON(!wakeref); GEM_BUG_ON(!wakeref);
intel_display_power_put_async(i915, POWER_DOMAIN_GT_IRQ, wakeref); intel_display_power_put_async(i915, POWER_DOMAIN_GT_IRQ, wakeref);
i915_globals_park();
return 0; return 0;
} }
......
...@@ -1176,18 +1176,12 @@ struct i915_active *i915_active_create(void) ...@@ -1176,18 +1176,12 @@ struct i915_active *i915_active_create(void)
#include "selftests/i915_active.c" #include "selftests/i915_active.c"
#endif #endif
static void i915_global_active_shrink(void)
{
kmem_cache_shrink(global.slab_cache);
}
static void i915_global_active_exit(void) static void i915_global_active_exit(void)
{ {
kmem_cache_destroy(global.slab_cache); kmem_cache_destroy(global.slab_cache);
} }
static struct i915_global_active global = { { static struct i915_global_active global = { {
.shrink = i915_global_active_shrink,
.exit = i915_global_active_exit, .exit = i915_global_active_exit,
} }; } };
......
...@@ -17,61 +17,8 @@ ...@@ -17,61 +17,8 @@
static LIST_HEAD(globals); static LIST_HEAD(globals);
static atomic_t active;
static atomic_t epoch;
static struct park_work {
struct delayed_work work;
struct rcu_head rcu;
unsigned long flags;
#define PENDING 0
int epoch;
} park;
static void i915_globals_shrink(void)
{
struct i915_global *global;
/*
* kmem_cache_shrink() discards empty slabs and reorders partially
* filled slabs to prioritise allocating from the mostly full slabs,
* with the aim of reducing fragmentation.
*/
list_for_each_entry(global, &globals, link)
global->shrink();
}
static void __i915_globals_grace(struct rcu_head *rcu)
{
/* Ratelimit parking as shrinking is quite slow */
schedule_delayed_work(&park.work, round_jiffies_up_relative(2 * HZ));
}
static void __i915_globals_queue_rcu(void)
{
park.epoch = atomic_inc_return(&epoch);
if (!atomic_read(&active)) {
init_rcu_head(&park.rcu);
call_rcu(&park.rcu, __i915_globals_grace);
}
}
static void __i915_globals_park(struct work_struct *work)
{
destroy_rcu_head(&park.rcu);
/* Confirm nothing woke up in the last grace period */
if (park.epoch != atomic_read(&epoch)) {
__i915_globals_queue_rcu();
return;
}
clear_bit(PENDING, &park.flags);
i915_globals_shrink();
}
void __init i915_global_register(struct i915_global *global) void __init i915_global_register(struct i915_global *global)
{ {
GEM_BUG_ON(!global->shrink);
GEM_BUG_ON(!global->exit); GEM_BUG_ON(!global->exit);
list_add_tail(&global->link, &globals); list_add_tail(&global->link, &globals);
...@@ -109,52 +56,10 @@ int __init i915_globals_init(void) ...@@ -109,52 +56,10 @@ int __init i915_globals_init(void)
} }
} }
INIT_DELAYED_WORK(&park.work, __i915_globals_park);
return 0; return 0;
} }
void i915_globals_park(void)
{
/*
* Defer shrinking the global slab caches (and other work) until
* after a RCU grace period has completed with no activity. This
* is to try and reduce the latency impact on the consumers caused
* by us shrinking the caches the same time as they are trying to
* allocate, with the assumption being that if we idle long enough
* for an RCU grace period to elapse since the last use, it is likely
* to be longer until we need the caches again.
*/
if (!atomic_dec_and_test(&active))
return;
/* Queue cleanup after the next RCU grace period has freed slabs */
if (!test_and_set_bit(PENDING, &park.flags))
__i915_globals_queue_rcu();
}
void i915_globals_unpark(void)
{
atomic_inc(&epoch);
atomic_inc(&active);
}
static void __exit __i915_globals_flush(void)
{
atomic_inc(&active); /* skip shrinking */
rcu_barrier(); /* wait for the work to be queued */
flush_delayed_work(&park.work);
atomic_dec(&active);
}
void __exit i915_globals_exit(void) void __exit i915_globals_exit(void)
{ {
GEM_BUG_ON(atomic_read(&active));
__i915_globals_flush();
__i915_globals_cleanup(); __i915_globals_cleanup();
/* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
rcu_barrier();
} }
...@@ -14,15 +14,12 @@ typedef void (*i915_global_func_t)(void); ...@@ -14,15 +14,12 @@ typedef void (*i915_global_func_t)(void);
struct i915_global { struct i915_global {
struct list_head link; struct list_head link;
i915_global_func_t shrink;
i915_global_func_t exit; i915_global_func_t exit;
}; };
void i915_global_register(struct i915_global *global); void i915_global_register(struct i915_global *global);
int i915_globals_init(void); int i915_globals_init(void);
void i915_globals_park(void);
void i915_globals_unpark(void);
void i915_globals_exit(void); void i915_globals_exit(void);
/* constructors */ /* constructors */
......
...@@ -2077,12 +2077,6 @@ void i915_request_show(struct drm_printer *m, ...@@ -2077,12 +2077,6 @@ void i915_request_show(struct drm_printer *m,
#include "selftests/i915_request.c" #include "selftests/i915_request.c"
#endif #endif
static void i915_global_request_shrink(void)
{
kmem_cache_shrink(global.slab_execute_cbs);
kmem_cache_shrink(global.slab_requests);
}
static void i915_global_request_exit(void) static void i915_global_request_exit(void)
{ {
kmem_cache_destroy(global.slab_execute_cbs); kmem_cache_destroy(global.slab_execute_cbs);
...@@ -2090,7 +2084,6 @@ static void i915_global_request_exit(void) ...@@ -2090,7 +2084,6 @@ static void i915_global_request_exit(void)
} }
static struct i915_global_request global = { { static struct i915_global_request global = { {
.shrink = i915_global_request_shrink,
.exit = i915_global_request_exit, .exit = i915_global_request_exit,
} }; } };
......
...@@ -475,12 +475,6 @@ i915_sched_engine_create(unsigned int subclass) ...@@ -475,12 +475,6 @@ i915_sched_engine_create(unsigned int subclass)
return sched_engine; return sched_engine;
} }
static void i915_global_scheduler_shrink(void)
{
kmem_cache_shrink(global.slab_dependencies);
kmem_cache_shrink(global.slab_priorities);
}
static void i915_global_scheduler_exit(void) static void i915_global_scheduler_exit(void)
{ {
kmem_cache_destroy(global.slab_dependencies); kmem_cache_destroy(global.slab_dependencies);
...@@ -488,7 +482,6 @@ static void i915_global_scheduler_exit(void) ...@@ -488,7 +482,6 @@ static void i915_global_scheduler_exit(void)
} }
static struct i915_global_scheduler global = { { static struct i915_global_scheduler global = { {
.shrink = i915_global_scheduler_shrink,
.exit = i915_global_scheduler_exit, .exit = i915_global_scheduler_exit,
} }; } };
......
...@@ -1414,18 +1414,12 @@ void i915_vma_make_purgeable(struct i915_vma *vma) ...@@ -1414,18 +1414,12 @@ void i915_vma_make_purgeable(struct i915_vma *vma)
#include "selftests/i915_vma.c" #include "selftests/i915_vma.c"
#endif #endif
static void i915_global_vma_shrink(void)
{
kmem_cache_shrink(global.slab_vmas);
}
static void i915_global_vma_exit(void) static void i915_global_vma_exit(void)
{ {
kmem_cache_destroy(global.slab_vmas); kmem_cache_destroy(global.slab_vmas);
} }
static struct i915_global_vma global = { { static struct i915_global_vma global = { {
.shrink = i915_global_vma_shrink,
.exit = i915_global_vma_exit, .exit = i915_global_vma_exit,
} }; } };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment