Commit aa456c7a authored by Christopher Lameter's avatar Christopher Lameter Committed by Linus Torvalds

slub: remove kmalloc under list_lock from list_slab_objects() V2

list_slab_objects() is called when a slab is destroyed and there are
objects still left to list the objects in the syslog.  This is a pretty
rare event.

And there it seems we take the list_lock and call kmalloc while holding
that lock.

Perform the allocation in free_partial() before the list_lock is taken.

Fixes: bbd7d57b ("slub: Potential stack overflow")
Signed-off-by: default avatarChristopher Lameter <cl@linux.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Tetsuo Handa <penguin-kernel@i-love.sakura.ne.jp>
Cc: Yu Zhao <yuzhao@google.com>
Link: http://lkml.kernel.org/r/alpine.DEB.2.21.2002031721250.1668@www.lameter.comSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d7660ce5
...@@ -3766,12 +3766,14 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags) ...@@ -3766,12 +3766,14 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
} }
static void list_slab_objects(struct kmem_cache *s, struct page *page, static void list_slab_objects(struct kmem_cache *s, struct page *page,
const char *text) const char *text, unsigned long *map)
{ {
#ifdef CONFIG_SLUB_DEBUG #ifdef CONFIG_SLUB_DEBUG
void *addr = page_address(page); void *addr = page_address(page);
void *p; void *p;
unsigned long *map;
if (!map)
return;
slab_err(s, page, text, s->name); slab_err(s, page, text, s->name);
slab_lock(page); slab_lock(page);
...@@ -3784,8 +3786,6 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page, ...@@ -3784,8 +3786,6 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
print_tracking(s, p); print_tracking(s, p);
} }
} }
put_map(map);
slab_unlock(page); slab_unlock(page);
#endif #endif
} }
...@@ -3799,6 +3799,11 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) ...@@ -3799,6 +3799,11 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
{ {
LIST_HEAD(discard); LIST_HEAD(discard);
struct page *page, *h; struct page *page, *h;
unsigned long *map = NULL;
#ifdef CONFIG_SLUB_DEBUG
map = bitmap_alloc(oo_objects(s->max), GFP_KERNEL);
#endif
BUG_ON(irqs_disabled()); BUG_ON(irqs_disabled());
spin_lock_irq(&n->list_lock); spin_lock_irq(&n->list_lock);
...@@ -3808,11 +3813,16 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) ...@@ -3808,11 +3813,16 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
list_add(&page->slab_list, &discard); list_add(&page->slab_list, &discard);
} else { } else {
list_slab_objects(s, page, list_slab_objects(s, page,
"Objects remaining in %s on __kmem_cache_shutdown()"); "Objects remaining in %s on __kmem_cache_shutdown()",
map);
} }
} }
spin_unlock_irq(&n->list_lock); spin_unlock_irq(&n->list_lock);
#ifdef CONFIG_SLUB_DEBUG
bitmap_free(map);
#endif
list_for_each_entry_safe(page, h, &discard, slab_list) list_for_each_entry_safe(page, h, &discard, slab_list)
discard_slab(s, page); discard_slab(s, page);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment