Commit 76c39e4f authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6: (27 commits)
  SLUB: Fix memory hotplug with !NUMA
  slub: Move functions to reduce #ifdefs
  slub: Enable sysfs support for !CONFIG_SLUB_DEBUG
  SLUB: Optimize slab_free() debug check
  slub: Move NUMA-related functions under CONFIG_NUMA
  slub: Add lock release annotation
  slub: Fix signedness warnings
  slub: extract common code to remove objects from partial list without locking
  SLUB: Pass active and inactive redzone flags instead of boolean to debug functions
  slub: reduce differences between SMP and NUMA
  Revert "Slub: UP bandaid"
  percpu: clear memory allocated with the km allocator
  percpu: use percpu allocator on UP too
  percpu: reduce PCPU_MIN_UNIT_SIZE to 32k
  vmalloc: pcpu_get/free_vm_areas() aren't needed on UP
  SLUB: Fix merged slab cache names
  Slub: UP bandaid
  slub: fix SLUB_RESILIENCY_TEST for dynamic kmalloc caches
  slub: Fix up missing kmalloc_cache -> kmem_cache_node case for memoryhotplug
  slub: Add dummy functions for the !SLUB_DEBUG case
  ...
parents 1765a1fe 6d4121f6
...@@ -87,7 +87,7 @@ struct kmem_cache { ...@@ -87,7 +87,7 @@ struct kmem_cache {
unsigned long min_partial; unsigned long min_partial;
const char *name; /* Name (only for display!) */ const char *name; /* Name (only for display!) */
struct list_head list; /* List of slab caches */ struct list_head list; /* List of slab caches */
#ifdef CONFIG_SLUB_DEBUG #ifdef CONFIG_SYSFS
struct kobject kobj; /* For sysfs */ struct kobject kobj; /* For sysfs */
#endif #endif
...@@ -96,11 +96,8 @@ struct kmem_cache { ...@@ -96,11 +96,8 @@ struct kmem_cache {
* Defragmentation by allocating from a remote node. * Defragmentation by allocating from a remote node.
*/ */
int remote_node_defrag_ratio; int remote_node_defrag_ratio;
struct kmem_cache_node *node[MAX_NUMNODES];
#else
/* Avoid an extra cache line for UP */
struct kmem_cache_node local_node;
#endif #endif
struct kmem_cache_node *node[MAX_NUMNODES];
}; };
/* /*
...@@ -139,19 +136,16 @@ struct kmem_cache { ...@@ -139,19 +136,16 @@ struct kmem_cache {
#ifdef CONFIG_ZONE_DMA #ifdef CONFIG_ZONE_DMA
#define SLUB_DMA __GFP_DMA #define SLUB_DMA __GFP_DMA
/* Reserve extra caches for potential DMA use */
#define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT)
#else #else
/* Disable DMA functionality */ /* Disable DMA functionality */
#define SLUB_DMA (__force gfp_t)0 #define SLUB_DMA (__force gfp_t)0
#define KMALLOC_CACHES SLUB_PAGE_SHIFT
#endif #endif
/* /*
* We keep the general caches in an array of slab caches that are used for * We keep the general caches in an array of slab caches that are used for
* 2^x bytes of allocations. * 2^x bytes of allocations.
*/ */
extern struct kmem_cache kmalloc_caches[KMALLOC_CACHES]; extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
/* /*
* Sorry that the following has to be that ugly but some versions of GCC * Sorry that the following has to be that ugly but some versions of GCC
...@@ -216,7 +210,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size) ...@@ -216,7 +210,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
if (index == 0) if (index == 0)
return NULL; return NULL;
return &kmalloc_caches[index]; return kmalloc_caches[index];
} }
void *kmem_cache_alloc(struct kmem_cache *, gfp_t); void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
......
...@@ -353,7 +353,7 @@ config SLUB_DEBUG_ON ...@@ -353,7 +353,7 @@ config SLUB_DEBUG_ON
config SLUB_STATS config SLUB_STATS
default n default n
bool "Enable SLUB performance statistics" bool "Enable SLUB performance statistics"
depends on SLUB && SLUB_DEBUG && SYSFS depends on SLUB && SYSFS
help help
SLUB statistics are useful to debug SLUBs allocation behavior in SLUB statistics are useful to debug SLUBs allocation behavior in
order find ways to optimize the allocator. This should never be order find ways to optimize the allocator. This should never be
......
...@@ -500,7 +500,9 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) ...@@ -500,7 +500,9 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
} else { } else {
unsigned int order = get_order(size); unsigned int order = get_order(size);
ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node); if (likely(order))
gfp |= __GFP_COMP;
ret = slob_new_pages(gfp, order, node);
if (ret) { if (ret) {
struct page *page; struct page *page;
page = virt_to_page(ret); page = virt_to_page(ret);
......
...@@ -168,7 +168,6 @@ static inline int kmem_cache_debug(struct kmem_cache *s) ...@@ -168,7 +168,6 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
/* Internal SLUB flags */ /* Internal SLUB flags */
#define __OBJECT_POISON 0x80000000UL /* Poison object */ #define __OBJECT_POISON 0x80000000UL /* Poison object */
#define __SYSFS_ADD_DEFERRED 0x40000000UL /* Not yet visible via sysfs */
static int kmem_size = sizeof(struct kmem_cache); static int kmem_size = sizeof(struct kmem_cache);
...@@ -178,7 +177,7 @@ static struct notifier_block slab_notifier; ...@@ -178,7 +177,7 @@ static struct notifier_block slab_notifier;
static enum { static enum {
DOWN, /* No slab functionality available */ DOWN, /* No slab functionality available */
PARTIAL, /* kmem_cache_open() works but kmalloc does not */ PARTIAL, /* Kmem_cache_node works */
UP, /* Everything works but does not show up in sysfs */ UP, /* Everything works but does not show up in sysfs */
SYSFS /* Sysfs up */ SYSFS /* Sysfs up */
} slab_state = DOWN; } slab_state = DOWN;
...@@ -199,7 +198,7 @@ struct track { ...@@ -199,7 +198,7 @@ struct track {
enum track_item { TRACK_ALLOC, TRACK_FREE }; enum track_item { TRACK_ALLOC, TRACK_FREE };
#ifdef CONFIG_SLUB_DEBUG #ifdef CONFIG_SYSFS
static int sysfs_slab_add(struct kmem_cache *); static int sysfs_slab_add(struct kmem_cache *);
static int sysfs_slab_alias(struct kmem_cache *, const char *); static int sysfs_slab_alias(struct kmem_cache *, const char *);
static void sysfs_slab_remove(struct kmem_cache *); static void sysfs_slab_remove(struct kmem_cache *);
...@@ -210,6 +209,7 @@ static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) ...@@ -210,6 +209,7 @@ static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
{ return 0; } { return 0; }
static inline void sysfs_slab_remove(struct kmem_cache *s) static inline void sysfs_slab_remove(struct kmem_cache *s)
{ {
kfree(s->name);
kfree(s); kfree(s);
} }
...@@ -233,11 +233,7 @@ int slab_is_available(void) ...@@ -233,11 +233,7 @@ int slab_is_available(void)
static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
{ {
#ifdef CONFIG_NUMA
return s->node[node]; return s->node[node];
#else
return &s->local_node;
#endif
} }
/* Verify that a pointer has an address that is valid within a slab page */ /* Verify that a pointer has an address that is valid within a slab page */
...@@ -494,7 +490,7 @@ static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...) ...@@ -494,7 +490,7 @@ static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
dump_stack(); dump_stack();
} }
static void init_object(struct kmem_cache *s, void *object, int active) static void init_object(struct kmem_cache *s, void *object, u8 val)
{ {
u8 *p = object; u8 *p = object;
...@@ -504,9 +500,7 @@ static void init_object(struct kmem_cache *s, void *object, int active) ...@@ -504,9 +500,7 @@ static void init_object(struct kmem_cache *s, void *object, int active)
} }
if (s->flags & SLAB_RED_ZONE) if (s->flags & SLAB_RED_ZONE)
memset(p + s->objsize, memset(p + s->objsize, val, s->inuse - s->objsize);
active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE,
s->inuse - s->objsize);
} }
static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes) static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes)
...@@ -641,17 +635,14 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page) ...@@ -641,17 +635,14 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
} }
static int check_object(struct kmem_cache *s, struct page *page, static int check_object(struct kmem_cache *s, struct page *page,
void *object, int active) void *object, u8 val)
{ {
u8 *p = object; u8 *p = object;
u8 *endobject = object + s->objsize; u8 *endobject = object + s->objsize;
if (s->flags & SLAB_RED_ZONE) { if (s->flags & SLAB_RED_ZONE) {
unsigned int red =
active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE;
if (!check_bytes_and_report(s, page, object, "Redzone", if (!check_bytes_and_report(s, page, object, "Redzone",
endobject, red, s->inuse - s->objsize)) endobject, val, s->inuse - s->objsize))
return 0; return 0;
} else { } else {
if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) { if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) {
...@@ -661,7 +652,7 @@ static int check_object(struct kmem_cache *s, struct page *page, ...@@ -661,7 +652,7 @@ static int check_object(struct kmem_cache *s, struct page *page,
} }
if (s->flags & SLAB_POISON) { if (s->flags & SLAB_POISON) {
if (!active && (s->flags & __OBJECT_POISON) && if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
(!check_bytes_and_report(s, page, p, "Poison", p, (!check_bytes_and_report(s, page, p, "Poison", p,
POISON_FREE, s->objsize - 1) || POISON_FREE, s->objsize - 1) ||
!check_bytes_and_report(s, page, p, "Poison", !check_bytes_and_report(s, page, p, "Poison",
...@@ -673,7 +664,7 @@ static int check_object(struct kmem_cache *s, struct page *page, ...@@ -673,7 +664,7 @@ static int check_object(struct kmem_cache *s, struct page *page,
check_pad_bytes(s, page, p); check_pad_bytes(s, page, p);
} }
if (!s->offset && active) if (!s->offset && val == SLUB_RED_ACTIVE)
/* /*
* Object and freepointer overlap. Cannot check * Object and freepointer overlap. Cannot check
* freepointer while object is allocated. * freepointer while object is allocated.
...@@ -791,6 +782,39 @@ static void trace(struct kmem_cache *s, struct page *page, void *object, ...@@ -791,6 +782,39 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
} }
} }
/*
* Hooks for other subsystems that check memory allocations. In a typical
* production configuration these hooks all should produce no code at all.
*/
static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
{
flags &= gfp_allowed_mask;
lockdep_trace_alloc(flags);
might_sleep_if(flags & __GFP_WAIT);
return should_failslab(s->objsize, flags, s->flags);
}
static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
{
flags &= gfp_allowed_mask;
kmemcheck_slab_alloc(s, flags, object, s->objsize);
kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags);
}
static inline void slab_free_hook(struct kmem_cache *s, void *x)
{
kmemleak_free_recursive(x, s->flags);
}
static inline void slab_free_hook_irq(struct kmem_cache *s, void *object)
{
kmemcheck_slab_free(s, object, s->objsize);
debug_check_no_locks_freed(object, s->objsize);
if (!(s->flags & SLAB_DEBUG_OBJECTS))
debug_check_no_obj_freed(object, s->objsize);
}
/* /*
* Tracking of fully allocated slabs for debugging purposes. * Tracking of fully allocated slabs for debugging purposes.
*/ */
...@@ -838,7 +862,7 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) ...@@ -838,7 +862,7 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
* dilemma by deferring the increment of the count during * dilemma by deferring the increment of the count during
* bootstrap (see early_kmem_cache_node_alloc). * bootstrap (see early_kmem_cache_node_alloc).
*/ */
if (!NUMA_BUILD || n) { if (n) {
atomic_long_inc(&n->nr_slabs); atomic_long_inc(&n->nr_slabs);
atomic_long_add(objects, &n->total_objects); atomic_long_add(objects, &n->total_objects);
} }
...@@ -858,11 +882,11 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page, ...@@ -858,11 +882,11 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page,
if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))) if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
return; return;
init_object(s, object, 0); init_object(s, object, SLUB_RED_INACTIVE);
init_tracking(s, object); init_tracking(s, object);
} }
static int alloc_debug_processing(struct kmem_cache *s, struct page *page, static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *page,
void *object, unsigned long addr) void *object, unsigned long addr)
{ {
if (!check_slab(s, page)) if (!check_slab(s, page))
...@@ -878,14 +902,14 @@ static int alloc_debug_processing(struct kmem_cache *s, struct page *page, ...@@ -878,14 +902,14 @@ static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
goto bad; goto bad;
} }
if (!check_object(s, page, object, 0)) if (!check_object(s, page, object, SLUB_RED_INACTIVE))
goto bad; goto bad;
/* Success perform special debug activities for allocs */ /* Success perform special debug activities for allocs */
if (s->flags & SLAB_STORE_USER) if (s->flags & SLAB_STORE_USER)
set_track(s, object, TRACK_ALLOC, addr); set_track(s, object, TRACK_ALLOC, addr);
trace(s, page, object, 1); trace(s, page, object, 1);
init_object(s, object, 1); init_object(s, object, SLUB_RED_ACTIVE);
return 1; return 1;
bad: bad:
...@@ -902,8 +926,8 @@ static int alloc_debug_processing(struct kmem_cache *s, struct page *page, ...@@ -902,8 +926,8 @@ static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
return 0; return 0;
} }
static int free_debug_processing(struct kmem_cache *s, struct page *page, static noinline int free_debug_processing(struct kmem_cache *s,
void *object, unsigned long addr) struct page *page, void *object, unsigned long addr)
{ {
if (!check_slab(s, page)) if (!check_slab(s, page))
goto fail; goto fail;
...@@ -918,7 +942,7 @@ static int free_debug_processing(struct kmem_cache *s, struct page *page, ...@@ -918,7 +942,7 @@ static int free_debug_processing(struct kmem_cache *s, struct page *page,
goto fail; goto fail;
} }
if (!check_object(s, page, object, 1)) if (!check_object(s, page, object, SLUB_RED_ACTIVE))
return 0; return 0;
if (unlikely(s != page->slab)) { if (unlikely(s != page->slab)) {
...@@ -942,7 +966,7 @@ static int free_debug_processing(struct kmem_cache *s, struct page *page, ...@@ -942,7 +966,7 @@ static int free_debug_processing(struct kmem_cache *s, struct page *page,
if (s->flags & SLAB_STORE_USER) if (s->flags & SLAB_STORE_USER)
set_track(s, object, TRACK_FREE, addr); set_track(s, object, TRACK_FREE, addr);
trace(s, page, object, 0); trace(s, page, object, 0);
init_object(s, object, 0); init_object(s, object, SLUB_RED_INACTIVE);
return 1; return 1;
fail: fail:
...@@ -1046,7 +1070,7 @@ static inline int free_debug_processing(struct kmem_cache *s, ...@@ -1046,7 +1070,7 @@ static inline int free_debug_processing(struct kmem_cache *s,
static inline int slab_pad_check(struct kmem_cache *s, struct page *page) static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
{ return 1; } { return 1; }
static inline int check_object(struct kmem_cache *s, struct page *page, static inline int check_object(struct kmem_cache *s, struct page *page,
void *object, int active) { return 1; } void *object, u8 val) { return 1; }
static inline void add_full(struct kmem_cache_node *n, struct page *page) {} static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
static inline unsigned long kmem_cache_flags(unsigned long objsize, static inline unsigned long kmem_cache_flags(unsigned long objsize,
unsigned long flags, const char *name, unsigned long flags, const char *name,
...@@ -1066,7 +1090,19 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node, ...@@ -1066,7 +1090,19 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node,
int objects) {} int objects) {}
static inline void dec_slabs_node(struct kmem_cache *s, int node, static inline void dec_slabs_node(struct kmem_cache *s, int node,
int objects) {} int objects) {}
#endif
static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
{ return 0; }
static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
void *object) {}
static inline void slab_free_hook(struct kmem_cache *s, void *x) {}
static inline void slab_free_hook_irq(struct kmem_cache *s,
void *object) {}
#endif /* CONFIG_SLUB_DEBUG */
/* /*
* Slab allocation and freeing * Slab allocation and freeing
...@@ -1194,7 +1230,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page) ...@@ -1194,7 +1230,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
slab_pad_check(s, page); slab_pad_check(s, page);
for_each_object(p, s, page_address(page), for_each_object(p, s, page_address(page),
page->objects) page->objects)
check_object(s, page, p, 0); check_object(s, page, p, SLUB_RED_INACTIVE);
} }
kmemcheck_free_shadow(page, compound_order(page)); kmemcheck_free_shadow(page, compound_order(page));
...@@ -1274,13 +1310,19 @@ static void add_partial(struct kmem_cache_node *n, ...@@ -1274,13 +1310,19 @@ static void add_partial(struct kmem_cache_node *n,
spin_unlock(&n->list_lock); spin_unlock(&n->list_lock);
} }
static inline void __remove_partial(struct kmem_cache_node *n,
struct page *page)
{
list_del(&page->lru);
n->nr_partial--;
}
static void remove_partial(struct kmem_cache *s, struct page *page) static void remove_partial(struct kmem_cache *s, struct page *page)
{ {
struct kmem_cache_node *n = get_node(s, page_to_nid(page)); struct kmem_cache_node *n = get_node(s, page_to_nid(page));
spin_lock(&n->list_lock); spin_lock(&n->list_lock);
list_del(&page->lru); __remove_partial(n, page);
n->nr_partial--;
spin_unlock(&n->list_lock); spin_unlock(&n->list_lock);
} }
...@@ -1293,8 +1335,7 @@ static inline int lock_and_freeze_slab(struct kmem_cache_node *n, ...@@ -1293,8 +1335,7 @@ static inline int lock_and_freeze_slab(struct kmem_cache_node *n,
struct page *page) struct page *page)
{ {
if (slab_trylock(page)) { if (slab_trylock(page)) {
list_del(&page->lru); __remove_partial(n, page);
n->nr_partial--;
__SetPageSlubFrozen(page); __SetPageSlubFrozen(page);
return 1; return 1;
} }
...@@ -1405,6 +1446,7 @@ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node) ...@@ -1405,6 +1446,7 @@ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
* On exit the slab lock will have been dropped. * On exit the slab lock will have been dropped.
*/ */
static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
__releases(bitlock)
{ {
struct kmem_cache_node *n = get_node(s, page_to_nid(page)); struct kmem_cache_node *n = get_node(s, page_to_nid(page));
...@@ -1447,6 +1489,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) ...@@ -1447,6 +1489,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
* Remove the cpu slab * Remove the cpu slab
*/ */
static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
__releases(bitlock)
{ {
struct page *page = c->page; struct page *page = c->page;
int tail = 1; int tail = 1;
...@@ -1647,6 +1690,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, ...@@ -1647,6 +1690,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
goto load_freelist; goto load_freelist;
} }
gfpflags &= gfp_allowed_mask;
if (gfpflags & __GFP_WAIT) if (gfpflags & __GFP_WAIT)
local_irq_enable(); local_irq_enable();
...@@ -1674,7 +1718,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, ...@@ -1674,7 +1718,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
c->page->inuse++; c->page->inuse++;
c->page->freelist = get_freepointer(s, object); c->page->freelist = get_freepointer(s, object);
c->node = -1; c->node = NUMA_NO_NODE;
goto unlock_out; goto unlock_out;
} }
...@@ -1695,12 +1739,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, ...@@ -1695,12 +1739,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
struct kmem_cache_cpu *c; struct kmem_cache_cpu *c;
unsigned long flags; unsigned long flags;
gfpflags &= gfp_allowed_mask; if (slab_pre_alloc_hook(s, gfpflags))
lockdep_trace_alloc(gfpflags);
might_sleep_if(gfpflags & __GFP_WAIT);
if (should_failslab(s->objsize, gfpflags, s->flags))
return NULL; return NULL;
local_irq_save(flags); local_irq_save(flags);
...@@ -1719,8 +1758,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, ...@@ -1719,8 +1758,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
if (unlikely(gfpflags & __GFP_ZERO) && object) if (unlikely(gfpflags & __GFP_ZERO) && object)
memset(object, 0, s->objsize); memset(object, 0, s->objsize);
kmemcheck_slab_alloc(s, gfpflags, object, s->objsize); slab_post_alloc_hook(s, gfpflags, object);
kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, gfpflags);
return object; return object;
} }
...@@ -1754,7 +1792,6 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) ...@@ -1754,7 +1792,6 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
return ret; return ret;
} }
EXPORT_SYMBOL(kmem_cache_alloc_node); EXPORT_SYMBOL(kmem_cache_alloc_node);
#endif
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
...@@ -1765,6 +1802,7 @@ void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, ...@@ -1765,6 +1802,7 @@ void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
} }
EXPORT_SYMBOL(kmem_cache_alloc_node_notrace); EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
#endif #endif
#endif
/* /*
* Slow patch handling. This may still be called frequently since objects * Slow patch handling. This may still be called frequently since objects
...@@ -1850,14 +1888,14 @@ static __always_inline void slab_free(struct kmem_cache *s, ...@@ -1850,14 +1888,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
struct kmem_cache_cpu *c; struct kmem_cache_cpu *c;
unsigned long flags; unsigned long flags;
kmemleak_free_recursive(x, s->flags); slab_free_hook(s, x);
local_irq_save(flags); local_irq_save(flags);
c = __this_cpu_ptr(s->cpu_slab); c = __this_cpu_ptr(s->cpu_slab);
kmemcheck_slab_free(s, object, s->objsize);
debug_check_no_locks_freed(object, s->objsize); slab_free_hook_irq(s, x);
if (!(s->flags & SLAB_DEBUG_OBJECTS))
debug_check_no_obj_freed(object, s->objsize); if (likely(page == c->page && c->node != NUMA_NO_NODE)) {
if (likely(page == c->page && c->node >= 0)) {
set_freepointer(s, object, c->freelist); set_freepointer(s, object, c->freelist);
c->freelist = object; c->freelist = object;
stat(s, FREE_FASTPATH); stat(s, FREE_FASTPATH);
...@@ -2062,26 +2100,18 @@ init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) ...@@ -2062,26 +2100,18 @@ init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
#endif #endif
} }
static DEFINE_PER_CPU(struct kmem_cache_cpu, kmalloc_percpu[KMALLOC_CACHES]); static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
{ {
if (s < kmalloc_caches + KMALLOC_CACHES && s >= kmalloc_caches) BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
/* SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu));
* Boot time creation of the kmalloc array. Use static per cpu data
* since the per cpu allocator is not available yet.
*/
s->cpu_slab = kmalloc_percpu + (s - kmalloc_caches);
else
s->cpu_slab = alloc_percpu(struct kmem_cache_cpu);
if (!s->cpu_slab) s->cpu_slab = alloc_percpu(struct kmem_cache_cpu);
return 0;
return 1; return s->cpu_slab != NULL;
} }
#ifdef CONFIG_NUMA static struct kmem_cache *kmem_cache_node;
/* /*
* No kmalloc_node yet so do it by hand. We know that this is the first * No kmalloc_node yet so do it by hand. We know that this is the first
* slab on the node for this slabcache. There are no concurrent accesses * slab on the node for this slabcache. There are no concurrent accesses
...@@ -2091,15 +2121,15 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags) ...@@ -2091,15 +2121,15 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
* when allocating for the kmalloc_node_cache. This is used for bootstrapping * when allocating for the kmalloc_node_cache. This is used for bootstrapping
* memory on a fresh node that has no slab structures yet. * memory on a fresh node that has no slab structures yet.
*/ */
static void early_kmem_cache_node_alloc(gfp_t gfpflags, int node) static void early_kmem_cache_node_alloc(int node)
{ {
struct page *page; struct page *page;
struct kmem_cache_node *n; struct kmem_cache_node *n;
unsigned long flags; unsigned long flags;
BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node)); BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
page = new_slab(kmalloc_caches, gfpflags, node); page = new_slab(kmem_cache_node, GFP_NOWAIT, node);
BUG_ON(!page); BUG_ON(!page);
if (page_to_nid(page) != node) { if (page_to_nid(page) != node) {
...@@ -2111,15 +2141,15 @@ static void early_kmem_cache_node_alloc(gfp_t gfpflags, int node) ...@@ -2111,15 +2141,15 @@ static void early_kmem_cache_node_alloc(gfp_t gfpflags, int node)
n = page->freelist; n = page->freelist;
BUG_ON(!n); BUG_ON(!n);
page->freelist = get_freepointer(kmalloc_caches, n); page->freelist = get_freepointer(kmem_cache_node, n);
page->inuse++; page->inuse++;
kmalloc_caches->node[node] = n; kmem_cache_node->node[node] = n;
#ifdef CONFIG_SLUB_DEBUG #ifdef CONFIG_SLUB_DEBUG
init_object(kmalloc_caches, n, 1); init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
init_tracking(kmalloc_caches, n); init_tracking(kmem_cache_node, n);
#endif #endif
init_kmem_cache_node(n, kmalloc_caches); init_kmem_cache_node(n, kmem_cache_node);
inc_slabs_node(kmalloc_caches, node, page->objects); inc_slabs_node(kmem_cache_node, node, page->objects);
/* /*
* lockdep requires consistent irq usage for each lock * lockdep requires consistent irq usage for each lock
...@@ -2137,13 +2167,15 @@ static void free_kmem_cache_nodes(struct kmem_cache *s) ...@@ -2137,13 +2167,15 @@ static void free_kmem_cache_nodes(struct kmem_cache *s)
for_each_node_state(node, N_NORMAL_MEMORY) { for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n = s->node[node]; struct kmem_cache_node *n = s->node[node];
if (n) if (n)
kmem_cache_free(kmalloc_caches, n); kmem_cache_free(kmem_cache_node, n);
s->node[node] = NULL; s->node[node] = NULL;
} }
} }
static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) static int init_kmem_cache_nodes(struct kmem_cache *s)
{ {
int node; int node;
...@@ -2151,11 +2183,11 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) ...@@ -2151,11 +2183,11 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
struct kmem_cache_node *n; struct kmem_cache_node *n;
if (slab_state == DOWN) { if (slab_state == DOWN) {
early_kmem_cache_node_alloc(gfpflags, node); early_kmem_cache_node_alloc(node);
continue; continue;
} }
n = kmem_cache_alloc_node(kmalloc_caches, n = kmem_cache_alloc_node(kmem_cache_node,
gfpflags, node); GFP_KERNEL, node);
if (!n) { if (!n) {
free_kmem_cache_nodes(s); free_kmem_cache_nodes(s);
...@@ -2167,17 +2199,6 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) ...@@ -2167,17 +2199,6 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
} }
return 1; return 1;
} }
#else
static void free_kmem_cache_nodes(struct kmem_cache *s)
{
}
static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
{
init_kmem_cache_node(&s->local_node, s);
return 1;
}
#endif
static void set_min_partial(struct kmem_cache *s, unsigned long min) static void set_min_partial(struct kmem_cache *s, unsigned long min)
{ {
...@@ -2312,7 +2333,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) ...@@ -2312,7 +2333,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
} }
static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, static int kmem_cache_open(struct kmem_cache *s,
const char *name, size_t size, const char *name, size_t size,
size_t align, unsigned long flags, size_t align, unsigned long flags,
void (*ctor)(void *)) void (*ctor)(void *))
...@@ -2348,10 +2369,10 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, ...@@ -2348,10 +2369,10 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
s->remote_node_defrag_ratio = 1000; s->remote_node_defrag_ratio = 1000;
#endif #endif
if (!init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA)) if (!init_kmem_cache_nodes(s))
goto error; goto error;
if (alloc_kmem_cache_cpus(s, gfpflags & ~SLUB_DMA)) if (alloc_kmem_cache_cpus(s))
return 1; return 1;
free_kmem_cache_nodes(s); free_kmem_cache_nodes(s);
...@@ -2414,9 +2435,8 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page, ...@@ -2414,9 +2435,8 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
#ifdef CONFIG_SLUB_DEBUG #ifdef CONFIG_SLUB_DEBUG
void *addr = page_address(page); void *addr = page_address(page);
void *p; void *p;
long *map = kzalloc(BITS_TO_LONGS(page->objects) * sizeof(long), unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
GFP_ATOMIC); sizeof(long), GFP_ATOMIC);
if (!map) if (!map)
return; return;
slab_err(s, page, "%s", text); slab_err(s, page, "%s", text);
...@@ -2448,9 +2468,8 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) ...@@ -2448,9 +2468,8 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
spin_lock_irqsave(&n->list_lock, flags); spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry_safe(page, h, &n->partial, lru) { list_for_each_entry_safe(page, h, &n->partial, lru) {
if (!page->inuse) { if (!page->inuse) {
list_del(&page->lru); __remove_partial(n, page);
discard_slab(s, page); discard_slab(s, page);
n->nr_partial--;
} else { } else {
list_slab_objects(s, page, list_slab_objects(s, page,
"Objects remaining on kmem_cache_close()"); "Objects remaining on kmem_cache_close()");
...@@ -2507,9 +2526,15 @@ EXPORT_SYMBOL(kmem_cache_destroy); ...@@ -2507,9 +2526,15 @@ EXPORT_SYMBOL(kmem_cache_destroy);
* Kmalloc subsystem * Kmalloc subsystem
*******************************************************************/ *******************************************************************/
struct kmem_cache kmalloc_caches[KMALLOC_CACHES] __cacheline_aligned; struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
EXPORT_SYMBOL(kmalloc_caches); EXPORT_SYMBOL(kmalloc_caches);
static struct kmem_cache *kmem_cache;
#ifdef CONFIG_ZONE_DMA
static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
#endif
static int __init setup_slub_min_order(char *str) static int __init setup_slub_min_order(char *str)
{ {
get_option(&str, &slub_min_order); get_option(&str, &slub_min_order);
...@@ -2546,116 +2571,29 @@ static int __init setup_slub_nomerge(char *str) ...@@ -2546,116 +2571,29 @@ static int __init setup_slub_nomerge(char *str)
__setup("slub_nomerge", setup_slub_nomerge); __setup("slub_nomerge", setup_slub_nomerge);
static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, static struct kmem_cache *__init create_kmalloc_cache(const char *name,
const char *name, int size, gfp_t gfp_flags) int size, unsigned int flags)
{ {
unsigned int flags = 0; struct kmem_cache *s;
if (gfp_flags & SLUB_DMA) s = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
flags = SLAB_CACHE_DMA;
/* /*
* This function is called with IRQs disabled during early-boot on * This function is called with IRQs disabled during early-boot on
* single CPU so there's no need to take slub_lock here. * single CPU so there's no need to take slub_lock here.
*/ */
if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN,
flags, NULL)) flags, NULL))
goto panic; goto panic;
list_add(&s->list, &slab_caches); list_add(&s->list, &slab_caches);
if (sysfs_slab_add(s))
goto panic;
return s; return s;
panic: panic:
panic("Creation of kmalloc slab %s size=%d failed.\n", name, size); panic("Creation of kmalloc slab %s size=%d failed.\n", name, size);
return NULL;
} }
#ifdef CONFIG_ZONE_DMA
static struct kmem_cache *kmalloc_caches_dma[SLUB_PAGE_SHIFT];
static void sysfs_add_func(struct work_struct *w)
{
struct kmem_cache *s;
down_write(&slub_lock);
list_for_each_entry(s, &slab_caches, list) {
if (s->flags & __SYSFS_ADD_DEFERRED) {
s->flags &= ~__SYSFS_ADD_DEFERRED;
sysfs_slab_add(s);
}
}
up_write(&slub_lock);
}
static DECLARE_WORK(sysfs_add_work, sysfs_add_func);
static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
{
struct kmem_cache *s;
char *text;
size_t realsize;
unsigned long slabflags;
int i;
s = kmalloc_caches_dma[index];
if (s)
return s;
/* Dynamically create dma cache */
if (flags & __GFP_WAIT)
down_write(&slub_lock);
else {
if (!down_write_trylock(&slub_lock))
goto out;
}
if (kmalloc_caches_dma[index])
goto unlock_out;
realsize = kmalloc_caches[index].objsize;
text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d",
(unsigned int)realsize);
s = NULL;
for (i = 0; i < KMALLOC_CACHES; i++)
if (!kmalloc_caches[i].size)
break;
BUG_ON(i >= KMALLOC_CACHES);
s = kmalloc_caches + i;
/*
* Must defer sysfs creation to a workqueue because we don't know
* what context we are called from. Before sysfs comes up, we don't
* need to do anything because our sysfs initcall will start by
* adding all existing slabs to sysfs.
*/
slabflags = SLAB_CACHE_DMA|SLAB_NOTRACK;
if (slab_state >= SYSFS)
slabflags |= __SYSFS_ADD_DEFERRED;
if (!text || !kmem_cache_open(s, flags, text,
realsize, ARCH_KMALLOC_MINALIGN, slabflags, NULL)) {
s->size = 0;
kfree(text);
goto unlock_out;
}
list_add(&s->list, &slab_caches);
kmalloc_caches_dma[index] = s;
if (slab_state >= SYSFS)
schedule_work(&sysfs_add_work);
unlock_out:
up_write(&slub_lock);
out:
return kmalloc_caches_dma[index];
}
#endif
/* /*
* Conversion table for small slabs sizes / 8 to the index in the * Conversion table for small slabs sizes / 8 to the index in the
* kmalloc array. This is necessary for slabs < 192 since we have non power * kmalloc array. This is necessary for slabs < 192 since we have non power
...@@ -2708,10 +2646,10 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags) ...@@ -2708,10 +2646,10 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
#ifdef CONFIG_ZONE_DMA #ifdef CONFIG_ZONE_DMA
if (unlikely((flags & SLUB_DMA))) if (unlikely((flags & SLUB_DMA)))
return dma_kmalloc_cache(index, flags); return kmalloc_dma_caches[index];
#endif #endif
return &kmalloc_caches[index]; return kmalloc_caches[index];
} }
void *__kmalloc(size_t size, gfp_t flags) void *__kmalloc(size_t size, gfp_t flags)
...@@ -2735,6 +2673,7 @@ void *__kmalloc(size_t size, gfp_t flags) ...@@ -2735,6 +2673,7 @@ void *__kmalloc(size_t size, gfp_t flags)
} }
EXPORT_SYMBOL(__kmalloc); EXPORT_SYMBOL(__kmalloc);
#ifdef CONFIG_NUMA
static void *kmalloc_large_node(size_t size, gfp_t flags, int node) static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
{ {
struct page *page; struct page *page;
...@@ -2749,7 +2688,6 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node) ...@@ -2749,7 +2688,6 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
return ptr; return ptr;
} }
#ifdef CONFIG_NUMA
void *__kmalloc_node(size_t size, gfp_t flags, int node) void *__kmalloc_node(size_t size, gfp_t flags, int node)
{ {
struct kmem_cache *s; struct kmem_cache *s;
...@@ -2889,8 +2827,7 @@ int kmem_cache_shrink(struct kmem_cache *s) ...@@ -2889,8 +2827,7 @@ int kmem_cache_shrink(struct kmem_cache *s)
* may have freed the last object and be * may have freed the last object and be
* waiting to release the slab. * waiting to release the slab.
*/ */
list_del(&page->lru); __remove_partial(n, page);
n->nr_partial--;
slab_unlock(page); slab_unlock(page);
discard_slab(s, page); discard_slab(s, page);
} else { } else {
...@@ -2914,7 +2851,7 @@ int kmem_cache_shrink(struct kmem_cache *s) ...@@ -2914,7 +2851,7 @@ int kmem_cache_shrink(struct kmem_cache *s)
} }
EXPORT_SYMBOL(kmem_cache_shrink); EXPORT_SYMBOL(kmem_cache_shrink);
#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG) #if defined(CONFIG_MEMORY_HOTPLUG)
static int slab_mem_going_offline_callback(void *arg) static int slab_mem_going_offline_callback(void *arg)
{ {
struct kmem_cache *s; struct kmem_cache *s;
...@@ -2956,7 +2893,7 @@ static void slab_mem_offline_callback(void *arg) ...@@ -2956,7 +2893,7 @@ static void slab_mem_offline_callback(void *arg)
BUG_ON(slabs_node(s, offline_node)); BUG_ON(slabs_node(s, offline_node));
s->node[offline_node] = NULL; s->node[offline_node] = NULL;
kmem_cache_free(kmalloc_caches, n); kmem_cache_free(kmem_cache_node, n);
} }
} }
up_read(&slub_lock); up_read(&slub_lock);
...@@ -2989,7 +2926,7 @@ static int slab_mem_going_online_callback(void *arg) ...@@ -2989,7 +2926,7 @@ static int slab_mem_going_online_callback(void *arg)
* since memory is not yet available from the node that * since memory is not yet available from the node that
* is brought up. * is brought up.
*/ */
n = kmem_cache_alloc(kmalloc_caches, GFP_KERNEL); n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
if (!n) { if (!n) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
...@@ -3035,46 +2972,92 @@ static int slab_memory_callback(struct notifier_block *self, ...@@ -3035,46 +2972,92 @@ static int slab_memory_callback(struct notifier_block *self,
* Basic setup of slabs * Basic setup of slabs
*******************************************************************/ *******************************************************************/
/*
* Used for early kmem_cache structures that were allocated using
* the page allocator
*/
static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
{
int node;
list_add(&s->list, &slab_caches);
s->refcount = -1;
for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n = get_node(s, node);
struct page *p;
if (n) {
list_for_each_entry(p, &n->partial, lru)
p->slab = s;
#ifdef CONFIG_SLAB_DEBUG
list_for_each_entry(p, &n->full, lru)
p->slab = s;
#endif
}
}
}
void __init kmem_cache_init(void) void __init kmem_cache_init(void)
{ {
int i; int i;
int caches = 0; int caches = 0;
struct kmem_cache *temp_kmem_cache;
int order;
struct kmem_cache *temp_kmem_cache_node;
unsigned long kmalloc_size;
kmem_size = offsetof(struct kmem_cache, node) +
nr_node_ids * sizeof(struct kmem_cache_node *);
/* Allocate two kmem_caches from the page allocator */
kmalloc_size = ALIGN(kmem_size, cache_line_size());
order = get_order(2 * kmalloc_size);
kmem_cache = (void *)__get_free_pages(GFP_NOWAIT, order);
#ifdef CONFIG_NUMA
/* /*
* Must first have the slab cache available for the allocations of the * Must first have the slab cache available for the allocations of the
* struct kmem_cache_node's. There is special bootstrap code in * struct kmem_cache_node's. There is special bootstrap code in
* kmem_cache_open for slab_state == DOWN. * kmem_cache_open for slab_state == DOWN.
*/ */
create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node", kmem_cache_node = (void *)kmem_cache + kmalloc_size;
sizeof(struct kmem_cache_node), GFP_NOWAIT);
kmalloc_caches[0].refcount = -1; kmem_cache_open(kmem_cache_node, "kmem_cache_node",
caches++; sizeof(struct kmem_cache_node),
0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
#endif
/* Able to allocate the per node structures */ /* Able to allocate the per node structures */
slab_state = PARTIAL; slab_state = PARTIAL;
/* Caches that are not of the two-to-the-power-of size */ temp_kmem_cache = kmem_cache;
if (KMALLOC_MIN_SIZE <= 32) { kmem_cache_open(kmem_cache, "kmem_cache", kmem_size,
create_kmalloc_cache(&kmalloc_caches[1], 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
"kmalloc-96", 96, GFP_NOWAIT); kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
caches++; memcpy(kmem_cache, temp_kmem_cache, kmem_size);
}
if (KMALLOC_MIN_SIZE <= 64) { /*
create_kmalloc_cache(&kmalloc_caches[2], * Allocate kmem_cache_node properly from the kmem_cache slab.
"kmalloc-192", 192, GFP_NOWAIT); * kmem_cache_node is separately allocated so no need to
caches++; * update any list pointers.
} */
temp_kmem_cache_node = kmem_cache_node;
kmem_cache_node = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
memcpy(kmem_cache_node, temp_kmem_cache_node, kmem_size);
kmem_cache_bootstrap_fixup(kmem_cache_node);
for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
create_kmalloc_cache(&kmalloc_caches[i],
"kmalloc", 1 << i, GFP_NOWAIT);
caches++; caches++;
} kmem_cache_bootstrap_fixup(kmem_cache);
caches++;
/* Free temporary boot structure */
free_pages((unsigned long)temp_kmem_cache, order);
/* Now we can use the kmem_cache to allocate kmalloc slabs */
/* /*
* Patch up the size_index table if we have strange large alignment * Patch up the size_index table if we have strange large alignment
...@@ -3114,26 +3097,60 @@ void __init kmem_cache_init(void) ...@@ -3114,26 +3097,60 @@ void __init kmem_cache_init(void)
size_index[size_index_elem(i)] = 8; size_index[size_index_elem(i)] = 8;
} }
/* Caches that are not of the two-to-the-power-of size */
if (KMALLOC_MIN_SIZE <= 32) {
kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
caches++;
}
if (KMALLOC_MIN_SIZE <= 64) {
kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
caches++;
}
for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
caches++;
}
slab_state = UP; slab_state = UP;
/* Provide the correct kmalloc names now that the caches are up */ /* Provide the correct kmalloc names now that the caches are up */
if (KMALLOC_MIN_SIZE <= 32) {
kmalloc_caches[1]->name = kstrdup(kmalloc_caches[1]->name, GFP_NOWAIT);
BUG_ON(!kmalloc_caches[1]->name);
}
if (KMALLOC_MIN_SIZE <= 64) {
kmalloc_caches[2]->name = kstrdup(kmalloc_caches[2]->name, GFP_NOWAIT);
BUG_ON(!kmalloc_caches[2]->name);
}
for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
char *s = kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i); char *s = kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i);
BUG_ON(!s); BUG_ON(!s);
kmalloc_caches[i].name = s; kmalloc_caches[i]->name = s;
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
register_cpu_notifier(&slab_notifier); register_cpu_notifier(&slab_notifier);
#endif #endif
#ifdef CONFIG_NUMA
kmem_size = offsetof(struct kmem_cache, node) +
nr_node_ids * sizeof(struct kmem_cache_node *);
#else
kmem_size = sizeof(struct kmem_cache);
#endif
#ifdef CONFIG_ZONE_DMA
for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
struct kmem_cache *s = kmalloc_caches[i];
if (s && s->size) {
char *name = kasprintf(GFP_NOWAIT,
"dma-kmalloc-%d", s->objsize);
BUG_ON(!name);
kmalloc_dma_caches[i] = create_kmalloc_cache(name,
s->objsize, SLAB_CACHE_DMA);
}
}
#endif
printk(KERN_INFO printk(KERN_INFO
"SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d," "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
" CPUs=%d, Nodes=%d\n", " CPUs=%d, Nodes=%d\n",
...@@ -3211,6 +3228,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, ...@@ -3211,6 +3228,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
size_t align, unsigned long flags, void (*ctor)(void *)) size_t align, unsigned long flags, void (*ctor)(void *))
{ {
struct kmem_cache *s; struct kmem_cache *s;
char *n;
if (WARN_ON(!name)) if (WARN_ON(!name))
return NULL; return NULL;
...@@ -3234,19 +3252,25 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, ...@@ -3234,19 +3252,25 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
return s; return s;
} }
n = kstrdup(name, GFP_KERNEL);
if (!n)
goto err;
s = kmalloc(kmem_size, GFP_KERNEL); s = kmalloc(kmem_size, GFP_KERNEL);
if (s) { if (s) {
if (kmem_cache_open(s, GFP_KERNEL, name, if (kmem_cache_open(s, n,
size, align, flags, ctor)) { size, align, flags, ctor)) {
list_add(&s->list, &slab_caches); list_add(&s->list, &slab_caches);
if (sysfs_slab_add(s)) { if (sysfs_slab_add(s)) {
list_del(&s->list); list_del(&s->list);
kfree(n);
kfree(s); kfree(s);
goto err; goto err;
} }
up_write(&slub_lock); up_write(&slub_lock);
return s; return s;
} }
kfree(n);
kfree(s); kfree(s);
} }
up_write(&slub_lock); up_write(&slub_lock);
...@@ -3318,6 +3342,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) ...@@ -3318,6 +3342,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
return ret; return ret;
} }
#ifdef CONFIG_NUMA
void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
int node, unsigned long caller) int node, unsigned long caller)
{ {
...@@ -3346,8 +3371,9 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, ...@@ -3346,8 +3371,9 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
return ret; return ret;
} }
#endif
#ifdef CONFIG_SLUB_DEBUG #ifdef CONFIG_SYSFS
static int count_inuse(struct page *page) static int count_inuse(struct page *page)
{ {
return page->inuse; return page->inuse;
...@@ -3357,7 +3383,9 @@ static int count_total(struct page *page) ...@@ -3357,7 +3383,9 @@ static int count_total(struct page *page)
{ {
return page->objects; return page->objects;
} }
#endif
#ifdef CONFIG_SLUB_DEBUG
static int validate_slab(struct kmem_cache *s, struct page *page, static int validate_slab(struct kmem_cache *s, struct page *page,
unsigned long *map) unsigned long *map)
{ {
...@@ -3448,65 +3476,6 @@ static long validate_slab_cache(struct kmem_cache *s) ...@@ -3448,65 +3476,6 @@ static long validate_slab_cache(struct kmem_cache *s)
kfree(map); kfree(map);
return count; return count;
} }
#ifdef SLUB_RESILIENCY_TEST
static void resiliency_test(void)
{
u8 *p;
printk(KERN_ERR "SLUB resiliency testing\n");
printk(KERN_ERR "-----------------------\n");
printk(KERN_ERR "A. Corruption after allocation\n");
p = kzalloc(16, GFP_KERNEL);
p[16] = 0x12;
printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
" 0x12->0x%p\n\n", p + 16);
validate_slab_cache(kmalloc_caches + 4);
/* Hmmm... The next two are dangerous */
p = kzalloc(32, GFP_KERNEL);
p[32 + sizeof(void *)] = 0x34;
printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
" 0x34 -> -0x%p\n", p);
printk(KERN_ERR
"If allocated object is overwritten then not detectable\n\n");
validate_slab_cache(kmalloc_caches + 5);
p = kzalloc(64, GFP_KERNEL);
p += 64 + (get_cycles() & 0xff) * sizeof(void *);
*p = 0x56;
printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
p);
printk(KERN_ERR
"If allocated object is overwritten then not detectable\n\n");
validate_slab_cache(kmalloc_caches + 6);
printk(KERN_ERR "\nB. Corruption after free\n");
p = kzalloc(128, GFP_KERNEL);
kfree(p);
*p = 0x78;
printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
validate_slab_cache(kmalloc_caches + 7);
p = kzalloc(256, GFP_KERNEL);
kfree(p);
p[50] = 0x9a;
printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n",
p);
validate_slab_cache(kmalloc_caches + 8);
p = kzalloc(512, GFP_KERNEL);
kfree(p);
p[512] = 0xab;
printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
validate_slab_cache(kmalloc_caches + 9);
}
#else
static void resiliency_test(void) {};
#endif
/* /*
* Generate lists of code addresses where slabcache objects are allocated * Generate lists of code addresses where slabcache objects are allocated
* and freed. * and freed.
...@@ -3635,7 +3604,7 @@ static int add_location(struct loc_track *t, struct kmem_cache *s, ...@@ -3635,7 +3604,7 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
static void process_slab(struct loc_track *t, struct kmem_cache *s, static void process_slab(struct loc_track *t, struct kmem_cache *s,
struct page *page, enum track_item alloc, struct page *page, enum track_item alloc,
long *map) unsigned long *map)
{ {
void *addr = page_address(page); void *addr = page_address(page);
void *p; void *p;
...@@ -3735,7 +3704,71 @@ static int list_locations(struct kmem_cache *s, char *buf, ...@@ -3735,7 +3704,71 @@ static int list_locations(struct kmem_cache *s, char *buf,
len += sprintf(buf, "No data\n"); len += sprintf(buf, "No data\n");
return len; return len;
} }
#endif
#ifdef SLUB_RESILIENCY_TEST
static void resiliency_test(void)
{
u8 *p;
BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || SLUB_PAGE_SHIFT < 10);
printk(KERN_ERR "SLUB resiliency testing\n");
printk(KERN_ERR "-----------------------\n");
printk(KERN_ERR "A. Corruption after allocation\n");
p = kzalloc(16, GFP_KERNEL);
p[16] = 0x12;
printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
" 0x12->0x%p\n\n", p + 16);
validate_slab_cache(kmalloc_caches[4]);
/* Hmmm... The next two are dangerous */
p = kzalloc(32, GFP_KERNEL);
p[32 + sizeof(void *)] = 0x34;
printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
" 0x34 -> -0x%p\n", p);
printk(KERN_ERR
"If allocated object is overwritten then not detectable\n\n");
validate_slab_cache(kmalloc_caches[5]);
p = kzalloc(64, GFP_KERNEL);
p += 64 + (get_cycles() & 0xff) * sizeof(void *);
*p = 0x56;
printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
p);
printk(KERN_ERR
"If allocated object is overwritten then not detectable\n\n");
validate_slab_cache(kmalloc_caches[6]);
printk(KERN_ERR "\nB. Corruption after free\n");
p = kzalloc(128, GFP_KERNEL);
kfree(p);
*p = 0x78;
printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
validate_slab_cache(kmalloc_caches[7]);
p = kzalloc(256, GFP_KERNEL);
kfree(p);
p[50] = 0x9a;
printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n",
p);
validate_slab_cache(kmalloc_caches[8]);
p = kzalloc(512, GFP_KERNEL);
kfree(p);
p[512] = 0xab;
printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
validate_slab_cache(kmalloc_caches[9]);
}
#else
#ifdef CONFIG_SYSFS
static void resiliency_test(void) {};
#endif
#endif
#ifdef CONFIG_SYSFS
enum slab_stat_type { enum slab_stat_type {
SL_ALL, /* All slabs */ SL_ALL, /* All slabs */
SL_PARTIAL, /* Only partially allocated slabs */ SL_PARTIAL, /* Only partially allocated slabs */
...@@ -3788,6 +3821,8 @@ static ssize_t show_slab_objects(struct kmem_cache *s, ...@@ -3788,6 +3821,8 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
} }
} }
down_read(&slub_lock);
#ifdef CONFIG_SLUB_DEBUG
if (flags & SO_ALL) { if (flags & SO_ALL) {
for_each_node_state(node, N_NORMAL_MEMORY) { for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n = get_node(s, node); struct kmem_cache_node *n = get_node(s, node);
...@@ -3804,7 +3839,9 @@ static ssize_t show_slab_objects(struct kmem_cache *s, ...@@ -3804,7 +3839,9 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
nodes[node] += x; nodes[node] += x;
} }
} else if (flags & SO_PARTIAL) { } else
#endif
if (flags & SO_PARTIAL) {
for_each_node_state(node, N_NORMAL_MEMORY) { for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n = get_node(s, node); struct kmem_cache_node *n = get_node(s, node);
...@@ -3829,6 +3866,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s, ...@@ -3829,6 +3866,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
return x + sprintf(buf + x, "\n"); return x + sprintf(buf + x, "\n");
} }
#ifdef CONFIG_SLUB_DEBUG
static int any_slab_objects(struct kmem_cache *s) static int any_slab_objects(struct kmem_cache *s)
{ {
int node; int node;
...@@ -3844,6 +3882,7 @@ static int any_slab_objects(struct kmem_cache *s) ...@@ -3844,6 +3882,7 @@ static int any_slab_objects(struct kmem_cache *s)
} }
return 0; return 0;
} }
#endif
#define to_slab_attr(n) container_of(n, struct slab_attribute, attr) #define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
#define to_slab(n) container_of(n, struct kmem_cache, kobj); #define to_slab(n) container_of(n, struct kmem_cache, kobj);
...@@ -3945,12 +3984,6 @@ static ssize_t aliases_show(struct kmem_cache *s, char *buf) ...@@ -3945,12 +3984,6 @@ static ssize_t aliases_show(struct kmem_cache *s, char *buf)
} }
SLAB_ATTR_RO(aliases); SLAB_ATTR_RO(aliases);
static ssize_t slabs_show(struct kmem_cache *s, char *buf)
{
return show_slab_objects(s, buf, SO_ALL);
}
SLAB_ATTR_RO(slabs);
static ssize_t partial_show(struct kmem_cache *s, char *buf) static ssize_t partial_show(struct kmem_cache *s, char *buf)
{ {
return show_slab_objects(s, buf, SO_PARTIAL); return show_slab_objects(s, buf, SO_PARTIAL);
...@@ -3975,93 +4008,83 @@ static ssize_t objects_partial_show(struct kmem_cache *s, char *buf) ...@@ -3975,93 +4008,83 @@ static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
} }
SLAB_ATTR_RO(objects_partial); SLAB_ATTR_RO(objects_partial);
static ssize_t total_objects_show(struct kmem_cache *s, char *buf) static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
{
return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
}
SLAB_ATTR_RO(total_objects);
static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
{ {
return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE)); return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
} }
static ssize_t sanity_checks_store(struct kmem_cache *s, static ssize_t reclaim_account_store(struct kmem_cache *s,
const char *buf, size_t length) const char *buf, size_t length)
{ {
s->flags &= ~SLAB_DEBUG_FREE; s->flags &= ~SLAB_RECLAIM_ACCOUNT;
if (buf[0] == '1') if (buf[0] == '1')
s->flags |= SLAB_DEBUG_FREE; s->flags |= SLAB_RECLAIM_ACCOUNT;
return length; return length;
} }
SLAB_ATTR(sanity_checks); SLAB_ATTR(reclaim_account);
static ssize_t trace_show(struct kmem_cache *s, char *buf) static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
{ {
return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE)); return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
} }
SLAB_ATTR_RO(hwcache_align);
static ssize_t trace_store(struct kmem_cache *s, const char *buf, #ifdef CONFIG_ZONE_DMA
size_t length) static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
{ {
s->flags &= ~SLAB_TRACE; return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
if (buf[0] == '1')
s->flags |= SLAB_TRACE;
return length;
} }
SLAB_ATTR(trace); SLAB_ATTR_RO(cache_dma);
#endif
#ifdef CONFIG_FAILSLAB static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
static ssize_t failslab_show(struct kmem_cache *s, char *buf)
{ {
return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB)); return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
} }
SLAB_ATTR_RO(destroy_by_rcu);
static ssize_t failslab_store(struct kmem_cache *s, const char *buf, #ifdef CONFIG_SLUB_DEBUG
size_t length) static ssize_t slabs_show(struct kmem_cache *s, char *buf)
{ {
s->flags &= ~SLAB_FAILSLAB; return show_slab_objects(s, buf, SO_ALL);
if (buf[0] == '1')
s->flags |= SLAB_FAILSLAB;
return length;
} }
SLAB_ATTR(failslab); SLAB_ATTR_RO(slabs);
#endif
static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
{ {
return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
} }
SLAB_ATTR_RO(total_objects);
static ssize_t reclaim_account_store(struct kmem_cache *s, static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
const char *buf, size_t length)
{ {
s->flags &= ~SLAB_RECLAIM_ACCOUNT; return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
if (buf[0] == '1')
s->flags |= SLAB_RECLAIM_ACCOUNT;
return length;
} }
SLAB_ATTR(reclaim_account);
static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) static ssize_t sanity_checks_store(struct kmem_cache *s,
const char *buf, size_t length)
{ {
return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); s->flags &= ~SLAB_DEBUG_FREE;
if (buf[0] == '1')
s->flags |= SLAB_DEBUG_FREE;
return length;
} }
SLAB_ATTR_RO(hwcache_align); SLAB_ATTR(sanity_checks);
#ifdef CONFIG_ZONE_DMA static ssize_t trace_show(struct kmem_cache *s, char *buf)
static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
{ {
return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
} }
SLAB_ATTR_RO(cache_dma);
#endif
static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) static ssize_t trace_store(struct kmem_cache *s, const char *buf,
size_t length)
{ {
return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU)); s->flags &= ~SLAB_TRACE;
if (buf[0] == '1')
s->flags |= SLAB_TRACE;
return length;
} }
SLAB_ATTR_RO(destroy_by_rcu); SLAB_ATTR(trace);
static ssize_t red_zone_show(struct kmem_cache *s, char *buf) static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
{ {
...@@ -4139,6 +4162,40 @@ static ssize_t validate_store(struct kmem_cache *s, ...@@ -4139,6 +4162,40 @@ static ssize_t validate_store(struct kmem_cache *s,
} }
SLAB_ATTR(validate); SLAB_ATTR(validate);
static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
{
if (!(s->flags & SLAB_STORE_USER))
return -ENOSYS;
return list_locations(s, buf, TRACK_ALLOC);
}
SLAB_ATTR_RO(alloc_calls);
static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
{
if (!(s->flags & SLAB_STORE_USER))
return -ENOSYS;
return list_locations(s, buf, TRACK_FREE);
}
SLAB_ATTR_RO(free_calls);
#endif /* CONFIG_SLUB_DEBUG */
#ifdef CONFIG_FAILSLAB
static ssize_t failslab_show(struct kmem_cache *s, char *buf)
{
return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
}
static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
size_t length)
{
s->flags &= ~SLAB_FAILSLAB;
if (buf[0] == '1')
s->flags |= SLAB_FAILSLAB;
return length;
}
SLAB_ATTR(failslab);
#endif
static ssize_t shrink_show(struct kmem_cache *s, char *buf) static ssize_t shrink_show(struct kmem_cache *s, char *buf)
{ {
return 0; return 0;
...@@ -4158,22 +4215,6 @@ static ssize_t shrink_store(struct kmem_cache *s, ...@@ -4158,22 +4215,6 @@ static ssize_t shrink_store(struct kmem_cache *s,
} }
SLAB_ATTR(shrink); SLAB_ATTR(shrink);
static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
{
if (!(s->flags & SLAB_STORE_USER))
return -ENOSYS;
return list_locations(s, buf, TRACK_ALLOC);
}
SLAB_ATTR_RO(alloc_calls);
static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
{
if (!(s->flags & SLAB_STORE_USER))
return -ENOSYS;
return list_locations(s, buf, TRACK_FREE);
}
SLAB_ATTR_RO(free_calls);
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
{ {
...@@ -4279,25 +4320,27 @@ static struct attribute *slab_attrs[] = { ...@@ -4279,25 +4320,27 @@ static struct attribute *slab_attrs[] = {
&min_partial_attr.attr, &min_partial_attr.attr,
&objects_attr.attr, &objects_attr.attr,
&objects_partial_attr.attr, &objects_partial_attr.attr,
&total_objects_attr.attr,
&slabs_attr.attr,
&partial_attr.attr, &partial_attr.attr,
&cpu_slabs_attr.attr, &cpu_slabs_attr.attr,
&ctor_attr.attr, &ctor_attr.attr,
&aliases_attr.attr, &aliases_attr.attr,
&align_attr.attr, &align_attr.attr,
&sanity_checks_attr.attr,
&trace_attr.attr,
&hwcache_align_attr.attr, &hwcache_align_attr.attr,
&reclaim_account_attr.attr, &reclaim_account_attr.attr,
&destroy_by_rcu_attr.attr, &destroy_by_rcu_attr.attr,
&shrink_attr.attr,
#ifdef CONFIG_SLUB_DEBUG
&total_objects_attr.attr,
&slabs_attr.attr,
&sanity_checks_attr.attr,
&trace_attr.attr,
&red_zone_attr.attr, &red_zone_attr.attr,
&poison_attr.attr, &poison_attr.attr,
&store_user_attr.attr, &store_user_attr.attr,
&validate_attr.attr, &validate_attr.attr,
&shrink_attr.attr,
&alloc_calls_attr.attr, &alloc_calls_attr.attr,
&free_calls_attr.attr, &free_calls_attr.attr,
#endif
#ifdef CONFIG_ZONE_DMA #ifdef CONFIG_ZONE_DMA
&cache_dma_attr.attr, &cache_dma_attr.attr,
#endif #endif
...@@ -4377,6 +4420,7 @@ static void kmem_cache_release(struct kobject *kobj) ...@@ -4377,6 +4420,7 @@ static void kmem_cache_release(struct kobject *kobj)
{ {
struct kmem_cache *s = to_slab(kobj); struct kmem_cache *s = to_slab(kobj);
kfree(s->name);
kfree(s); kfree(s);
} }
...@@ -4579,7 +4623,7 @@ static int __init slab_sysfs_init(void) ...@@ -4579,7 +4623,7 @@ static int __init slab_sysfs_init(void)
} }
__initcall(slab_sysfs_init); __initcall(slab_sysfs_init);
#endif #endif /* CONFIG_SYSFS */
/* /*
* The /proc/slabinfo ABI * The /proc/slabinfo ABI
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment