Commit 7878c231 authored by Qian Cai's avatar Qian Cai Committed by Linus Torvalds

slab: remove /proc/slab_allocators

It turned out that DEBUG_SLAB_LEAK is still broken even after recent
recue efforts that when there is a large number of objects like
kmemleak_object which is normal on a debug kernel,

  # grep kmemleak /proc/slabinfo
  kmemleak_object   2243606 3436210 ...

reading /proc/slab_allocators could easily loop forever while processing
the kmemleak_object cache and any additional freeing or allocating
objects will trigger a reprocessing. To make a situation worse,
soft-lockups could easily happen in this sitatuion which will call
printk() to allocate more kmemleak objects to guarantee an infinite
loop.

Also, since it seems no one had noticed when it was totally broken
more than 2-year ago - see the commit fcf88917 ("slab: fix a crash
by reading /proc/slab_allocators"), probably nobody cares about it
anymore due to the decline of the SLAB. Just remove it entirely.
Suggested-by: default avatarVlastimil Babka <vbabka@suse.cz>
Suggested-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: default avatarQian Cai <cai@lca.pw>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 01be377c
...@@ -61,9 +61,6 @@ struct kmem_cache { ...@@ -61,9 +61,6 @@ struct kmem_cache {
atomic_t allocmiss; atomic_t allocmiss;
atomic_t freehit; atomic_t freehit;
atomic_t freemiss; atomic_t freemiss;
#ifdef CONFIG_DEBUG_SLAB_LEAK
atomic_t store_user_clean;
#endif
/* /*
* If debugging is enabled, then the allocator can add additional * If debugging is enabled, then the allocator can add additional
......
...@@ -542,10 +542,6 @@ config DEBUG_SLAB ...@@ -542,10 +542,6 @@ config DEBUG_SLAB
allocation as well as poisoning memory on free to catch use of freed allocation as well as poisoning memory on free to catch use of freed
memory. This can make kmalloc/kfree-intensive workloads much slower. memory. This can make kmalloc/kfree-intensive workloads much slower.
config DEBUG_SLAB_LEAK
bool "Memory leak debugging"
depends on DEBUG_SLAB
config SLUB_DEBUG_ON config SLUB_DEBUG_ON
bool "SLUB debugging on by default" bool "SLUB debugging on by default"
depends on SLUB && SLUB_DEBUG depends on SLUB && SLUB_DEBUG
......
...@@ -362,29 +362,6 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp) ...@@ -362,29 +362,6 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
#endif #endif
#ifdef CONFIG_DEBUG_SLAB_LEAK
static inline bool is_store_user_clean(struct kmem_cache *cachep)
{
return atomic_read(&cachep->store_user_clean) == 1;
}
static inline void set_store_user_clean(struct kmem_cache *cachep)
{
atomic_set(&cachep->store_user_clean, 1);
}
static inline void set_store_user_dirty(struct kmem_cache *cachep)
{
if (is_store_user_clean(cachep))
atomic_set(&cachep->store_user_clean, 0);
}
#else
static inline void set_store_user_dirty(struct kmem_cache *cachep) {}
#endif
/* /*
* Do not go above this order unless 0 objects fit into the slab or * Do not go above this order unless 0 objects fit into the slab or
* overridden on the command line. * overridden on the command line.
...@@ -2552,11 +2529,6 @@ static void *slab_get_obj(struct kmem_cache *cachep, struct page *page) ...@@ -2552,11 +2529,6 @@ static void *slab_get_obj(struct kmem_cache *cachep, struct page *page)
objp = index_to_obj(cachep, page, get_free_obj(page, page->active)); objp = index_to_obj(cachep, page, get_free_obj(page, page->active));
page->active++; page->active++;
#if DEBUG
if (cachep->flags & SLAB_STORE_USER)
set_store_user_dirty(cachep);
#endif
return objp; return objp;
} }
...@@ -2762,10 +2734,8 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, ...@@ -2762,10 +2734,8 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
*dbg_redzone1(cachep, objp) = RED_INACTIVE; *dbg_redzone1(cachep, objp) = RED_INACTIVE;
*dbg_redzone2(cachep, objp) = RED_INACTIVE; *dbg_redzone2(cachep, objp) = RED_INACTIVE;
} }
if (cachep->flags & SLAB_STORE_USER) { if (cachep->flags & SLAB_STORE_USER)
set_store_user_dirty(cachep);
*dbg_userword(cachep, objp) = (void *)caller; *dbg_userword(cachep, objp) = (void *)caller;
}
objnr = obj_to_index(cachep, page, objp); objnr = obj_to_index(cachep, page, objp);
...@@ -4184,200 +4154,6 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer, ...@@ -4184,200 +4154,6 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer,
return res; return res;
} }
#ifdef CONFIG_DEBUG_SLAB_LEAK
static inline int add_caller(unsigned long *n, unsigned long v)
{
unsigned long *p;
int l;
if (!v)
return 1;
l = n[1];
p = n + 2;
while (l) {
int i = l/2;
unsigned long *q = p + 2 * i;
if (*q == v) {
q[1]++;
return 1;
}
if (*q > v) {
l = i;
} else {
p = q + 2;
l -= i + 1;
}
}
if (++n[1] == n[0])
return 0;
memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
p[0] = v;
p[1] = 1;
return 1;
}
static void handle_slab(unsigned long *n, struct kmem_cache *c,
struct page *page)
{
void *p;
int i, j;
unsigned long v;
if (n[0] == n[1])
return;
for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) {
bool active = true;
for (j = page->active; j < c->num; j++) {
if (get_free_obj(page, j) == i) {
active = false;
break;
}
}
if (!active)
continue;
/*
* probe_kernel_read() is used for DEBUG_PAGEALLOC. page table
* mapping is established when actual object allocation and
* we could mistakenly access the unmapped object in the cpu
* cache.
*/
if (probe_kernel_read(&v, dbg_userword(c, p), sizeof(v)))
continue;
if (!add_caller(n, v))
return;
}
}
static void show_symbol(struct seq_file *m, unsigned long address)
{
#ifdef CONFIG_KALLSYMS
unsigned long offset, size;
char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN];
if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
if (modname[0])
seq_printf(m, " [%s]", modname);
return;
}
#endif
seq_printf(m, "%px", (void *)address);
}
static int leaks_show(struct seq_file *m, void *p)
{
struct kmem_cache *cachep = list_entry(p, struct kmem_cache,
root_caches_node);
struct page *page;
struct kmem_cache_node *n;
const char *name;
unsigned long *x = m->private;
int node;
int i;
if (!(cachep->flags & SLAB_STORE_USER))
return 0;
if (!(cachep->flags & SLAB_RED_ZONE))
return 0;
/*
* Set store_user_clean and start to grab stored user information
* for all objects on this cache. If some alloc/free requests comes
* during the processing, information would be wrong so restart
* whole processing.
*/
do {
drain_cpu_caches(cachep);
/*
* drain_cpu_caches() could make kmemleak_object and
* debug_objects_cache dirty, so reset afterwards.
*/
set_store_user_clean(cachep);
x[1] = 0;
for_each_kmem_cache_node(cachep, node, n) {
check_irq_on();
spin_lock_irq(&n->list_lock);
list_for_each_entry(page, &n->slabs_full, slab_list)
handle_slab(x, cachep, page);
list_for_each_entry(page, &n->slabs_partial, slab_list)
handle_slab(x, cachep, page);
spin_unlock_irq(&n->list_lock);
}
} while (!is_store_user_clean(cachep));
name = cachep->name;
if (x[0] == x[1]) {
/* Increase the buffer size */
mutex_unlock(&slab_mutex);
m->private = kcalloc(x[0] * 4, sizeof(unsigned long),
GFP_KERNEL);
if (!m->private) {
/* Too bad, we are really out */
m->private = x;
mutex_lock(&slab_mutex);
return -ENOMEM;
}
*(unsigned long *)m->private = x[0] * 2;
kfree(x);
mutex_lock(&slab_mutex);
/* Now make sure this entry will be retried */
m->count = m->size;
return 0;
}
for (i = 0; i < x[1]; i++) {
seq_printf(m, "%s: %lu ", name, x[2*i+3]);
show_symbol(m, x[2*i+2]);
seq_putc(m, '\n');
}
return 0;
}
static const struct seq_operations slabstats_op = {
.start = slab_start,
.next = slab_next,
.stop = slab_stop,
.show = leaks_show,
};
static int slabstats_open(struct inode *inode, struct file *file)
{
unsigned long *n;
n = __seq_open_private(file, &slabstats_op, PAGE_SIZE);
if (!n)
return -ENOMEM;
*n = PAGE_SIZE / (2 * sizeof(unsigned long));
return 0;
}
static const struct file_operations proc_slabstats_operations = {
.open = slabstats_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_private,
};
#endif
static int __init slab_proc_init(void)
{
#ifdef CONFIG_DEBUG_SLAB_LEAK
proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
#endif
return 0;
}
module_init(slab_proc_init);
#ifdef CONFIG_HARDENED_USERCOPY #ifdef CONFIG_HARDENED_USERCOPY
/* /*
* Rejects incorrectly sized objects and objects that are to be copied * Rejects incorrectly sized objects and objects that are to be copied
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment