Commit b56efcf0 authored by Eric Dumazet's avatar Eric Dumazet Committed by Pekka Enberg

slab: shrink sizeof(struct kmem_cache)

Reduce high order allocations for some setups.
(NR_CPUS=4096 -> we need 64KB per kmem_cache struct)

We now allocate exact needed size (using nr_cpu_ids and nr_node_ids)

This also makes code a bit smaller on x86_64, since some field offsets
are less than the 127 limit :

Before patch :
# size mm/slab.o
   text    data     bss     dec     hex filename
  22605  361665      32  384302   5dd2e mm/slab.o

After patch :
# size mm/slab.o
   text    data     bss     dec     hex filename
  22349	 353473	   8224	 384046	  5dc2e	mm/slab.o

CC: Andrew Morton <akpm@linux-foundation.org>
Reported-by: default avatarKonstantin Khlebnikov <khlebnikov@openvz.org>
Signed-off-by: default avatarEric Dumazet <eric.dumazet@gmail.com>
Acked-by: default avatarChristoph Lameter <cl@linux.com>
Signed-off-by: default avatarPekka Enberg <penberg@kernel.org>
parent c225150b
...@@ -24,21 +24,19 @@ ...@@ -24,21 +24,19 @@
*/ */
struct kmem_cache { struct kmem_cache {
/* 1) per-cpu data, touched during every alloc/free */ /* 1) Cache tunables. Protected by cache_chain_mutex */
struct array_cache *array[NR_CPUS];
/* 2) Cache tunables. Protected by cache_chain_mutex */
unsigned int batchcount; unsigned int batchcount;
unsigned int limit; unsigned int limit;
unsigned int shared; unsigned int shared;
unsigned int buffer_size; unsigned int buffer_size;
u32 reciprocal_buffer_size; u32 reciprocal_buffer_size;
/* 3) touched by every alloc & free from the backend */ /* 2) touched by every alloc & free from the backend */
unsigned int flags; /* constant flags */ unsigned int flags; /* constant flags */
unsigned int num; /* # of objs per slab */ unsigned int num; /* # of objs per slab */
/* 4) cache_grow/shrink */ /* 3) cache_grow/shrink */
/* order of pgs per slab (2^n) */ /* order of pgs per slab (2^n) */
unsigned int gfporder; unsigned int gfporder;
...@@ -54,11 +52,11 @@ struct kmem_cache { ...@@ -54,11 +52,11 @@ struct kmem_cache {
/* constructor func */ /* constructor func */
void (*ctor)(void *obj); void (*ctor)(void *obj);
/* 5) cache creation/removal */ /* 4) cache creation/removal */
const char *name; const char *name;
struct list_head next; struct list_head next;
/* 6) statistics */ /* 5) statistics */
#ifdef CONFIG_DEBUG_SLAB #ifdef CONFIG_DEBUG_SLAB
unsigned long num_active; unsigned long num_active;
unsigned long num_allocations; unsigned long num_allocations;
...@@ -85,16 +83,18 @@ struct kmem_cache { ...@@ -85,16 +83,18 @@ struct kmem_cache {
int obj_size; int obj_size;
#endif /* CONFIG_DEBUG_SLAB */ #endif /* CONFIG_DEBUG_SLAB */
/* 6) per-cpu/per-node data, touched during every alloc/free */
/* /*
* We put nodelists[] at the end of kmem_cache, because we want to size * We put array[] at the end of kmem_cache, because we want to size
* this array to nr_node_ids slots instead of MAX_NUMNODES * this array to nr_cpu_ids slots instead of NR_CPUS
* (see kmem_cache_init()) * (see kmem_cache_init())
* We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache * We still use [NR_CPUS] and not [1] or [0] because cache_cache
* is statically defined, so we reserve the max number of nodes. * is statically defined, so we reserve the max number of cpus.
*/ */
struct kmem_list3 *nodelists[MAX_NUMNODES]; struct kmem_list3 **nodelists;
struct array_cache *array[NR_CPUS];
/* /*
* Do not add fields after nodelists[] * Do not add fields after array[]
*/ */
}; };
......
...@@ -574,7 +574,9 @@ static struct arraycache_init initarray_generic = ...@@ -574,7 +574,9 @@ static struct arraycache_init initarray_generic =
{ {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
/* internal cache of cache description objs */ /* internal cache of cache description objs */
static struct kmem_list3 *cache_cache_nodelists[MAX_NUMNODES];
static struct kmem_cache cache_cache = { static struct kmem_cache cache_cache = {
.nodelists = cache_cache_nodelists,
.batchcount = 1, .batchcount = 1,
.limit = BOOT_CPUCACHE_ENTRIES, .limit = BOOT_CPUCACHE_ENTRIES,
.shared = 1, .shared = 1,
...@@ -1492,10 +1494,9 @@ void __init kmem_cache_init(void) ...@@ -1492,10 +1494,9 @@ void __init kmem_cache_init(void)
cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node]; cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
/* /*
* struct kmem_cache size depends on nr_node_ids, which * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
* can be less than MAX_NUMNODES.
*/ */
cache_cache.buffer_size = offsetof(struct kmem_cache, nodelists) + cache_cache.buffer_size = offsetof(struct kmem_cache, array[nr_cpu_ids]) +
nr_node_ids * sizeof(struct kmem_list3 *); nr_node_ids * sizeof(struct kmem_list3 *);
#if DEBUG #if DEBUG
cache_cache.obj_size = cache_cache.buffer_size; cache_cache.obj_size = cache_cache.buffer_size;
...@@ -2308,6 +2309,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -2308,6 +2309,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
if (!cachep) if (!cachep)
goto oops; goto oops;
cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids];
#if DEBUG #if DEBUG
cachep->obj_size = size; cachep->obj_size = size;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment