Commit 83835b3d authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

slab, lockdep: Annotate slab -> rcu -> debug_object -> slab

Lockdep thinks there's lock recursion through:

	kmem_cache_free()
	  cache_flusharray()
	    spin_lock(&l3->list_lock)  <----------------.
	    free_block()                                |
	      slab_destroy()                            |
		call_rcu()                              |
		  debug_object_activate()               |
		    debug_object_init()                 |
		      __debug_object_init()             |
			kmem_cache_alloc()              |
			  cache_alloc_refill()          |
			    spin_lock(&l3->list_lock) --'

Now debug objects doesn't use SLAB_DESTROY_BY_RCU and hence there is no
actual possibility of recursing. Luckily debug objects marks it slab
with SLAB_DEBUG_OBJECTS so we can identify the thing.

Mark all SLAB_DEBUG_OBJECTS (all one!) slab caches with a special
lockdep key so that lockdep sees its a different cachep.

Also add a WARN on trying to create a SLAB_DESTROY_BY_RCU |
SLAB_DEBUG_OBJECTS cache, to avoid possible future trouble.
Reported-and-tested-by: default avatarSebastian Siewior <sebastian@breakpoint.cc>
[ fixes to the initial patch ]
Reported-by: default avatarThomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarPekka Enberg <penberg@kernel.org>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1311341165.27400.58.camel@twinsSigned-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 70a0686a
...@@ -622,6 +622,51 @@ int slab_is_available(void) ...@@ -622,6 +622,51 @@ int slab_is_available(void)
static struct lock_class_key on_slab_l3_key; static struct lock_class_key on_slab_l3_key;
static struct lock_class_key on_slab_alc_key; static struct lock_class_key on_slab_alc_key;
static struct lock_class_key debugobj_l3_key;
static struct lock_class_key debugobj_alc_key;
static void slab_set_lock_classes(struct kmem_cache *cachep,
struct lock_class_key *l3_key, struct lock_class_key *alc_key,
int q)
{
struct array_cache **alc;
struct kmem_list3 *l3;
int r;
l3 = cachep->nodelists[q];
if (!l3)
return;
lockdep_set_class(&l3->list_lock, l3_key);
alc = l3->alien;
/*
* FIXME: This check for BAD_ALIEN_MAGIC
* should go away when common slab code is taught to
* work even without alien caches.
* Currently, non NUMA code returns BAD_ALIEN_MAGIC
* for alloc_alien_cache,
*/
if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
return;
for_each_node(r) {
if (alc[r])
lockdep_set_class(&alc[r]->lock, alc_key);
}
}
static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
{
slab_set_lock_classes(cachep, &debugobj_l3_key, &debugobj_alc_key, node);
}
static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
{
int node;
for_each_online_node(node)
slab_set_debugobj_lock_classes_node(cachep, node);
}
static void init_node_lock_keys(int q) static void init_node_lock_keys(int q)
{ {
struct cache_sizes *s = malloc_sizes; struct cache_sizes *s = malloc_sizes;
...@@ -630,29 +675,14 @@ static void init_node_lock_keys(int q) ...@@ -630,29 +675,14 @@ static void init_node_lock_keys(int q)
return; return;
for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) { for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) {
struct array_cache **alc;
struct kmem_list3 *l3; struct kmem_list3 *l3;
int r;
l3 = s->cs_cachep->nodelists[q]; l3 = s->cs_cachep->nodelists[q];
if (!l3 || OFF_SLAB(s->cs_cachep)) if (!l3 || OFF_SLAB(s->cs_cachep))
continue; continue;
lockdep_set_class(&l3->list_lock, &on_slab_l3_key);
alc = l3->alien; slab_set_lock_classes(s->cs_cachep, &on_slab_l3_key,
/* &on_slab_alc_key, q);
* FIXME: This check for BAD_ALIEN_MAGIC
* should go away when common slab code is taught to
* work even without alien caches.
* Currently, non NUMA code returns BAD_ALIEN_MAGIC
* for alloc_alien_cache,
*/
if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
continue;
for_each_node(r) {
if (alc[r])
lockdep_set_class(&alc[r]->lock,
&on_slab_alc_key);
}
} }
} }
...@@ -671,6 +701,14 @@ static void init_node_lock_keys(int q) ...@@ -671,6 +701,14 @@ static void init_node_lock_keys(int q)
static inline void init_lock_keys(void) static inline void init_lock_keys(void)
{ {
} }
static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
{
}
static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
{
}
#endif #endif
/* /*
...@@ -1264,6 +1302,8 @@ static int __cpuinit cpuup_prepare(long cpu) ...@@ -1264,6 +1302,8 @@ static int __cpuinit cpuup_prepare(long cpu)
spin_unlock_irq(&l3->list_lock); spin_unlock_irq(&l3->list_lock);
kfree(shared); kfree(shared);
free_alien_cache(alien); free_alien_cache(alien);
if (cachep->flags & SLAB_DEBUG_OBJECTS)
slab_set_debugobj_lock_classes_node(cachep, node);
} }
init_node_lock_keys(node); init_node_lock_keys(node);
...@@ -2426,6 +2466,16 @@ kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -2426,6 +2466,16 @@ kmem_cache_create (const char *name, size_t size, size_t align,
goto oops; goto oops;
} }
if (flags & SLAB_DEBUG_OBJECTS) {
/*
* Would deadlock through slab_destroy()->call_rcu()->
* debug_object_activate()->kmem_cache_alloc().
*/
WARN_ON_ONCE(flags & SLAB_DESTROY_BY_RCU);
slab_set_debugobj_lock_classes(cachep);
}
/* cache setup completed, link it into the list */ /* cache setup completed, link it into the list */
list_add(&cachep->next, &cache_chain); list_add(&cachep->next, &cache_chain);
oops: oops:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment