Commit b28a02de authored by Pekka Enberg's avatar Pekka Enberg Committed by Linus Torvalds

[PATCH] slab: fix code formatting

The slab allocator code is inconsistent in coding style and messy.  For this
patch, I ran Lindent for mm/slab.c and fixed up goofs by hand.
Signed-off-by: default avatarPekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 4d268eba
......@@ -130,7 +130,6 @@
#define FORCED_DEBUG 0
#endif
/* Shouldn't this be in a header file somewhere? */
#define BYTES_PER_WORD sizeof(void *)
......@@ -279,7 +278,7 @@ struct array_cache {
#define BOOT_CPUCACHE_ENTRIES 1
struct arraycache_init {
struct array_cache cache;
void * entries[BOOT_CPUCACHE_ENTRIES];
void *entries[BOOT_CPUCACHE_ENTRIES];
};
/*
......@@ -396,10 +395,10 @@ struct kmem_cache {
unsigned int dflags; /* dynamic flags */
/* constructor func */
void (*ctor)(void *, kmem_cache_t *, unsigned long);
void (*ctor) (void *, kmem_cache_t *, unsigned long);
/* de-constructor func */
void (*dtor)(void *, kmem_cache_t *, unsigned long);
void (*dtor) (void *, kmem_cache_t *, unsigned long);
/* 4) cache creation/removal */
const char *name;
......@@ -523,14 +522,15 @@ static unsigned long *dbg_redzone2(kmem_cache_t *cachep, void *objp)
{
BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
if (cachep->flags & SLAB_STORE_USER)
return (unsigned long*) (objp+cachep->objsize-2*BYTES_PER_WORD);
return (unsigned long*) (objp+cachep->objsize-BYTES_PER_WORD);
return (unsigned long *)(objp + cachep->objsize -
2 * BYTES_PER_WORD);
return (unsigned long *)(objp + cachep->objsize - BYTES_PER_WORD);
}
static void **dbg_userword(kmem_cache_t *cachep, void *objp)
{
BUG_ON(!(cachep->flags & SLAB_STORE_USER));
return (void**)(objp+cachep->objsize-BYTES_PER_WORD);
return (void **)(objp + cachep->objsize - BYTES_PER_WORD);
}
#else
......@@ -607,14 +607,14 @@ struct cache_names {
static struct cache_names __initdata cache_names[] = {
#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
#include <linux/kmalloc_sizes.h>
{ NULL, }
{NULL,}
#undef CACHE
};
static struct arraycache_init initarray_cache __initdata =
{ { 0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
{ {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
static struct arraycache_init initarray_generic =
{ { 0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
{ {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
/* internal cache of cache description objs */
static kmem_cache_t cache_cache = {
......@@ -655,9 +655,9 @@ static enum {
static DEFINE_PER_CPU(struct work_struct, reap_work);
static void free_block(kmem_cache_t* cachep, void** objpp, int len, int node);
static void enable_cpucache (kmem_cache_t *cachep);
static void cache_reap (void *unused);
static void free_block(kmem_cache_t *cachep, void **objpp, int len, int node);
static void enable_cpucache(kmem_cache_t *cachep);
static void cache_reap(void *unused);
static int __node_shrink(kmem_cache_t *cachep, int node);
static inline struct array_cache *ac_data(kmem_cache_t *cachep)
......@@ -700,7 +700,7 @@ static void cache_estimate(unsigned long gfporder, size_t size, size_t align,
int flags, size_t *left_over, unsigned int *num)
{
int i;
size_t wastage = PAGE_SIZE<<gfporder;
size_t wastage = PAGE_SIZE << gfporder;
size_t extra = 0;
size_t base = 0;
......@@ -709,7 +709,7 @@ static void cache_estimate(unsigned long gfporder, size_t size, size_t align,
extra = sizeof(kmem_bufctl_t);
}
i = 0;
while (i*size + ALIGN(base+i*extra, align) <= wastage)
while (i * size + ALIGN(base + i * extra, align) <= wastage)
i++;
if (i > 0)
i--;
......@@ -718,8 +718,8 @@ static void cache_estimate(unsigned long gfporder, size_t size, size_t align,
i = SLAB_LIMIT;
*num = i;
wastage -= i*size;
wastage -= ALIGN(base+i*extra, align);
wastage -= i * size;
wastage -= ALIGN(base + i * extra, align);
*left_over = wastage;
}
......@@ -757,7 +757,7 @@ static void __devinit start_cpu_timer(int cpu)
static struct array_cache *alloc_arraycache(int node, int entries,
int batchcount)
{
int memsize = sizeof(void*)*entries+sizeof(struct array_cache);
int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
struct array_cache *nc = NULL;
nc = kmalloc_node(memsize, GFP_KERNEL, node);
......@@ -775,7 +775,7 @@ static struct array_cache *alloc_arraycache(int node, int entries,
static inline struct array_cache **alloc_alien_cache(int node, int limit)
{
struct array_cache **ac_ptr;
int memsize = sizeof(void*)*MAX_NUMNODES;
int memsize = sizeof(void *) * MAX_NUMNODES;
int i;
if (limit > 1)
......@@ -789,7 +789,7 @@ static inline struct array_cache **alloc_alien_cache(int node, int limit)
}
ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d);
if (!ac_ptr[i]) {
for (i--; i <=0; i--)
for (i--; i <= 0; i--)
kfree(ac_ptr[i]);
kfree(ac_ptr);
return NULL;
......@@ -812,7 +812,8 @@ static inline void free_alien_cache(struct array_cache **ac_ptr)
kfree(ac_ptr);
}
static inline void __drain_alien_cache(kmem_cache_t *cachep, struct array_cache *ac, int node)
static inline void __drain_alien_cache(kmem_cache_t *cachep,
struct array_cache *ac, int node)
{
struct kmem_list3 *rl3 = cachep->nodelists[node];
......@@ -826,7 +827,7 @@ static inline void __drain_alien_cache(kmem_cache_t *cachep, struct array_cache
static void drain_alien_cache(kmem_cache_t *cachep, struct kmem_list3 *l3)
{
int i=0;
int i = 0;
struct array_cache *ac;
unsigned long flags;
......@@ -849,7 +850,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
kmem_cache_t* cachep;
kmem_cache_t *cachep;
struct kmem_list3 *l3 = NULL;
int node = cpu_to_node(cpu);
int memsize = sizeof(struct kmem_list3);
......@@ -875,7 +876,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
goto bad;
kmem_list3_init(l3);
l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
((unsigned long)cachep)%REAPTIMEOUT_LIST3;
((unsigned long)cachep) % REAPTIMEOUT_LIST3;
cachep->nodelists[node] = l3;
}
......@@ -900,7 +901,8 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
BUG_ON(!l3);
if (!l3->shared) {
if (!(nc = alloc_arraycache(node,
cachep->shared*cachep->batchcount,
cachep->shared *
cachep->batchcount,
0xbaadf00d)))
goto bad;
......@@ -966,7 +968,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
} else {
spin_unlock(&l3->list_lock);
}
unlock_cache:
unlock_cache:
spin_unlock_irq(&cachep->spinlock);
kfree(nc);
}
......@@ -975,7 +977,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
#endif
}
return NOTIFY_OK;
bad:
bad:
up(&cache_chain_sem);
return NOTIFY_BAD;
}
......@@ -985,8 +987,7 @@ static struct notifier_block cpucache_notifier = { &cpuup_callback, NULL, 0 };
/*
* swap the static kmem_list3 with kmalloced memory
*/
static void init_list(kmem_cache_t *cachep, struct kmem_list3 *list,
int nodeid)
static void init_list(kmem_cache_t *cachep, struct kmem_list3 *list, int nodeid)
{
struct kmem_list3 *ptr;
......@@ -1059,9 +1060,9 @@ void __init kmem_cache_init(void)
if (!cache_cache.num)
BUG();
cache_cache.colour = left_over/cache_cache.colour_off;
cache_cache.colour = left_over / cache_cache.colour_off;
cache_cache.colour_next = 0;
cache_cache.slab_size = ALIGN(cache_cache.num*sizeof(kmem_bufctl_t) +
cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
sizeof(struct slab), cache_line_size());
/* 2+3) create the kmalloc caches */
......@@ -1074,14 +1075,18 @@ void __init kmem_cache_init(void)
*/
sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
sizes[INDEX_AC].cs_size, ARCH_KMALLOC_MINALIGN,
(ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL, NULL);
sizes[INDEX_AC].cs_size,
ARCH_KMALLOC_MINALIGN,
(ARCH_KMALLOC_FLAGS |
SLAB_PANIC), NULL, NULL);
if (INDEX_AC != INDEX_L3)
sizes[INDEX_L3].cs_cachep =
kmem_cache_create(names[INDEX_L3].name,
sizes[INDEX_L3].cs_size, ARCH_KMALLOC_MINALIGN,
(ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL, NULL);
sizes[INDEX_L3].cs_size,
ARCH_KMALLOC_MINALIGN,
(ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL,
NULL);
while (sizes->cs_size != ULONG_MAX) {
/*
......@@ -1091,28 +1096,34 @@ void __init kmem_cache_init(void)
* Note for systems short on memory removing the alignment will
* allow tighter packing of the smaller caches.
*/
if(!sizes->cs_cachep)
if (!sizes->cs_cachep)
sizes->cs_cachep = kmem_cache_create(names->name,
sizes->cs_size, ARCH_KMALLOC_MINALIGN,
(ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL, NULL);
sizes->cs_size,
ARCH_KMALLOC_MINALIGN,
(ARCH_KMALLOC_FLAGS
| SLAB_PANIC),
NULL, NULL);
/* Inc off-slab bufctl limit until the ceiling is hit. */
if (!(OFF_SLAB(sizes->cs_cachep))) {
offslab_limit = sizes->cs_size-sizeof(struct slab);
offslab_limit = sizes->cs_size - sizeof(struct slab);
offslab_limit /= sizeof(kmem_bufctl_t);
}
sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
sizes->cs_size, ARCH_KMALLOC_MINALIGN,
(ARCH_KMALLOC_FLAGS | SLAB_CACHE_DMA | SLAB_PANIC),
NULL, NULL);
sizes->cs_size,
ARCH_KMALLOC_MINALIGN,
(ARCH_KMALLOC_FLAGS |
SLAB_CACHE_DMA |
SLAB_PANIC), NULL,
NULL);
sizes++;
names++;
}
/* 4) Replace the bootstrap head arrays */
{
void * ptr;
void *ptr;
ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
......@@ -1143,11 +1154,11 @@ void __init kmem_cache_init(void)
for_each_online_node(node) {
init_list(malloc_sizes[INDEX_AC].cs_cachep,
&initkmem_list3[SIZE_AC+node], node);
&initkmem_list3[SIZE_AC + node], node);
if (INDEX_AC != INDEX_L3) {
init_list(malloc_sizes[INDEX_L3].cs_cachep,
&initkmem_list3[SIZE_L3+node],
&initkmem_list3[SIZE_L3 + node],
node);
}
}
......@@ -1226,7 +1237,7 @@ static void *kmem_getpages(kmem_cache_t *cachep, gfp_t flags, int nodeid)
*/
static void kmem_freepages(kmem_cache_t *cachep, void *addr)
{
unsigned long i = (1<<cachep->gfporder);
unsigned long i = (1 << cachep->gfporder);
struct page *page = virt_to_page(addr);
const unsigned long nr_freed = i;
......@@ -1240,12 +1251,12 @@ static void kmem_freepages(kmem_cache_t *cachep, void *addr)
current->reclaim_state->reclaimed_slab += nr_freed;
free_pages((unsigned long)addr, cachep->gfporder);
if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
atomic_sub(1<<cachep->gfporder, &slab_reclaim_pages);
atomic_sub(1 << cachep->gfporder, &slab_reclaim_pages);
}
static void kmem_rcu_free(struct rcu_head *head)
{
struct slab_rcu *slab_rcu = (struct slab_rcu *) head;
struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
kmem_cache_t *cachep = slab_rcu->cachep;
kmem_freepages(cachep, slab_rcu->addr);
......@@ -1261,15 +1272,15 @@ static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr,
{
int size = obj_reallen(cachep);
addr = (unsigned long *)&((char*)addr)[obj_dbghead(cachep)];
addr = (unsigned long *)&((char *)addr)[obj_dbghead(cachep)];
if (size < 5*sizeof(unsigned long))
if (size < 5 * sizeof(unsigned long))
return;
*addr++=0x12345678;
*addr++=caller;
*addr++=smp_processor_id();
size -= 3*sizeof(unsigned long);
*addr++ = 0x12345678;
*addr++ = caller;
*addr++ = smp_processor_id();
size -= 3 * sizeof(unsigned long);
{
unsigned long *sptr = &caller;
unsigned long svalue;
......@@ -1277,7 +1288,7 @@ static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr,
while (!kstack_end(sptr)) {
svalue = *sptr++;
if (kernel_text_address(svalue)) {
*addr++=svalue;
*addr++ = svalue;
size -= sizeof(unsigned long);
if (size <= sizeof(unsigned long))
break;
......@@ -1285,25 +1296,25 @@ static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr,
}
}
*addr++=0x87654321;
*addr++ = 0x87654321;
}
#endif
static void poison_obj(kmem_cache_t *cachep, void *addr, unsigned char val)
{
int size = obj_reallen(cachep);
addr = &((char*)addr)[obj_dbghead(cachep)];
addr = &((char *)addr)[obj_dbghead(cachep)];
memset(addr, val, size);
*(unsigned char *)(addr+size-1) = POISON_END;
*(unsigned char *)(addr + size - 1) = POISON_END;
}
static void dump_line(char *data, int offset, int limit)
{
int i;
printk(KERN_ERR "%03x:", offset);
for (i=0;i<limit;i++) {
printk(" %02x", (unsigned char)data[offset+i]);
for (i = 0; i < limit; i++) {
printk(" %02x", (unsigned char)data[offset + i]);
}
printk("\n");
}
......@@ -1329,13 +1340,13 @@ static void print_objinfo(kmem_cache_t *cachep, void *objp, int lines)
(unsigned long)*dbg_userword(cachep, objp));
printk("\n");
}
realobj = (char*)objp+obj_dbghead(cachep);
realobj = (char *)objp + obj_dbghead(cachep);
size = obj_reallen(cachep);
for (i=0; i<size && lines;i+=16, lines--) {
for (i = 0; i < size && lines; i += 16, lines--) {
int limit;
limit = 16;
if (i+limit > size)
limit = size-i;
if (i + limit > size)
limit = size - i;
dump_line(realobj, i, limit);
}
}
......@@ -1346,27 +1357,28 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp)
int size, i;
int lines = 0;
realobj = (char*)objp+obj_dbghead(cachep);
realobj = (char *)objp + obj_dbghead(cachep);
size = obj_reallen(cachep);
for (i=0;i<size;i++) {
for (i = 0; i < size; i++) {
char exp = POISON_FREE;
if (i == size-1)
if (i == size - 1)
exp = POISON_END;
if (realobj[i] != exp) {
int limit;
/* Mismatch ! */
/* Print header */
if (lines == 0) {
printk(KERN_ERR "Slab corruption: start=%p, len=%d\n",
printk(KERN_ERR
"Slab corruption: start=%p, len=%d\n",
realobj, size);
print_objinfo(cachep, objp, 0);
}
/* Hexdump the affected line */
i = (i/16)*16;
i = (i / 16) * 16;
limit = 16;
if (i+limit > size)
limit = size-i;
if (i + limit > size)
limit = size - i;
dump_line(realobj, i, limit);
i += 16;
lines++;
......@@ -1382,17 +1394,17 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp)
struct slab *slabp = page_get_slab(virt_to_page(objp));
int objnr;
objnr = (objp-slabp->s_mem)/cachep->objsize;
objnr = (objp - slabp->s_mem) / cachep->objsize;
if (objnr) {
objp = slabp->s_mem+(objnr-1)*cachep->objsize;
realobj = (char*)objp+obj_dbghead(cachep);
objp = slabp->s_mem + (objnr - 1) * cachep->objsize;
realobj = (char *)objp + obj_dbghead(cachep);
printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
realobj, size);
print_objinfo(cachep, objp, 2);
}
if (objnr+1 < cachep->num) {
objp = slabp->s_mem+(objnr+1)*cachep->objsize;
realobj = (char*)objp+obj_dbghead(cachep);
if (objnr + 1 < cachep->num) {
objp = slabp->s_mem + (objnr + 1) * cachep->objsize;
realobj = (char *)objp + obj_dbghead(cachep);
printk(KERN_ERR "Next obj: start=%p, len=%d\n",
realobj, size);
print_objinfo(cachep, objp, 2);
......@@ -1405,7 +1417,7 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp)
* Before calling the slab must have been unlinked from the cache.
* The cache-lock is not held/needed.
*/
static void slab_destroy (kmem_cache_t *cachep, struct slab *slabp)
static void slab_destroy(kmem_cache_t *cachep, struct slab *slabp)
{
void *addr = slabp->s_mem - slabp->colouroff;
......@@ -1416,8 +1428,11 @@ static void slab_destroy (kmem_cache_t *cachep, struct slab *slabp)
if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
if ((cachep->objsize%PAGE_SIZE)==0 && OFF_SLAB(cachep))
kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE,1);
if ((cachep->objsize % PAGE_SIZE) == 0
&& OFF_SLAB(cachep))
kernel_map_pages(virt_to_page(objp),
cachep->objsize / PAGE_SIZE,
1);
else
check_poison_obj(cachep, objp);
#else
......@@ -1433,14 +1448,14 @@ static void slab_destroy (kmem_cache_t *cachep, struct slab *slabp)
"was overwritten");
}
if (cachep->dtor && !(cachep->flags & SLAB_POISON))
(cachep->dtor)(objp+obj_dbghead(cachep), cachep, 0);
(cachep->dtor) (objp + obj_dbghead(cachep), cachep, 0);
}
#else
if (cachep->dtor) {
int i;
for (i = 0; i < cachep->num; i++) {
void* objp = slabp->s_mem+cachep->objsize*i;
(cachep->dtor)(objp, cachep, 0);
void *objp = slabp->s_mem + cachep->objsize * i;
(cachep->dtor) (objp, cachep, 0);
}
}
#endif
......@@ -1448,7 +1463,7 @@ static void slab_destroy (kmem_cache_t *cachep, struct slab *slabp)
if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
struct slab_rcu *slab_rcu;
slab_rcu = (struct slab_rcu *) slabp;
slab_rcu = (struct slab_rcu *)slabp;
slab_rcu->cachep = cachep;
slab_rcu->addr = addr;
call_rcu(&slab_rcu->head, kmem_rcu_free);
......@@ -1466,10 +1481,10 @@ static inline void set_up_list3s(kmem_cache_t *cachep, int index)
int node;
for_each_online_node(node) {
cachep->nodelists[node] = &initkmem_list3[index+node];
cachep->nodelists[node] = &initkmem_list3[index + node];
cachep->nodelists[node]->next_reap = jiffies +
REAPTIMEOUT_LIST3 +
((unsigned long)cachep)%REAPTIMEOUT_LIST3;
((unsigned long)cachep) % REAPTIMEOUT_LIST3;
}
}
......@@ -1486,7 +1501,7 @@ static inline size_t calculate_slab_order(kmem_cache_t *cachep, size_t size,
{
size_t left_over = 0;
for ( ; ; cachep->gfporder++) {
for (;; cachep->gfporder++) {
unsigned int num;
size_t remainder;
......@@ -1568,8 +1583,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
if ((!name) ||
in_interrupt() ||
(size < BYTES_PER_WORD) ||
(size > (1<<MAX_OBJ_ORDER)*PAGE_SIZE) ||
(dtor && !ctor)) {
(size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || (dtor && !ctor)) {
printk(KERN_ERR "%s: Early error in slab %s\n",
__FUNCTION__, name);
BUG();
......@@ -1597,7 +1611,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
continue;
}
if (!strcmp(pc->name,name)) {
if (!strcmp(pc->name, name)) {
printk("kmem_cache_create: duplicate cache %s\n", name);
dump_stack();
goto oops;
......@@ -1612,7 +1626,6 @@ kmem_cache_create (const char *name, size_t size, size_t align,
"requested - %s\n", __FUNCTION__, name);
flags &= ~SLAB_DEBUG_INITIAL;
}
#if FORCED_DEBUG
/*
* Enable redzoning and last user accounting, except for caches with
......@@ -1620,8 +1633,9 @@ kmem_cache_create (const char *name, size_t size, size_t align,
* above the next power of two: caches with object sizes just above a
* power of two have a significant amount of internal fragmentation.
*/
if ((size < 4096 || fls(size-1) == fls(size-1+3*BYTES_PER_WORD)))
flags |= SLAB_RED_ZONE|SLAB_STORE_USER;
if ((size < 4096
|| fls(size - 1) == fls(size - 1 + 3 * BYTES_PER_WORD)))
flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
if (!(flags & SLAB_DESTROY_BY_RCU))
flags |= SLAB_POISON;
#endif
......@@ -1642,9 +1656,9 @@ kmem_cache_create (const char *name, size_t size, size_t align,
* unaligned accesses for some archs when redzoning is used, and makes
* sure any on-slab bufctl's are also correctly aligned.
*/
if (size & (BYTES_PER_WORD-1)) {
size += (BYTES_PER_WORD-1);
size &= ~(BYTES_PER_WORD-1);
if (size & (BYTES_PER_WORD - 1)) {
size += (BYTES_PER_WORD - 1);
size &= ~(BYTES_PER_WORD - 1);
}
/* calculate out the final buffer alignment: */
......@@ -1655,7 +1669,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
* objects into one cacheline.
*/
ralign = cache_line_size();
while (size <= ralign/2)
while (size <= ralign / 2)
ralign /= 2;
} else {
ralign = BYTES_PER_WORD;
......@@ -1664,13 +1678,13 @@ kmem_cache_create (const char *name, size_t size, size_t align,
if (ralign < ARCH_SLAB_MINALIGN) {
ralign = ARCH_SLAB_MINALIGN;
if (ralign > BYTES_PER_WORD)
flags &= ~(SLAB_RED_ZONE|SLAB_STORE_USER);
flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
}
/* 3) caller mandated alignment: disables debug if necessary */
if (ralign < align) {
ralign = align;
if (ralign > BYTES_PER_WORD)
flags &= ~(SLAB_RED_ZONE|SLAB_STORE_USER);
flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
}
/* 4) Store it. Note that the debug code below can reduce
* the alignment to BYTES_PER_WORD.
......@@ -1692,7 +1706,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
/* add space for red zone words */
cachep->dbghead += BYTES_PER_WORD;
size += 2*BYTES_PER_WORD;
size += 2 * BYTES_PER_WORD;
}
if (flags & SLAB_STORE_USER) {
/* user store requires word alignment and
......@@ -1703,7 +1717,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
size += BYTES_PER_WORD;
}
#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
if (size >= malloc_sizes[INDEX_L3+1].cs_size && cachep->reallen > cache_line_size() && size < PAGE_SIZE) {
if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
&& cachep->reallen > cache_line_size() && size < PAGE_SIZE) {
cachep->dbghead += PAGE_SIZE - size;
size = PAGE_SIZE;
}
......@@ -1711,7 +1726,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
#endif
/* Determine if the slab management is 'on' or 'off' slab. */
if (size >= (PAGE_SIZE>>3))
if (size >= (PAGE_SIZE >> 3))
/*
* Size is large, assume best to place the slab management obj
* off-slab (should allow better packing of objs).
......@@ -1738,7 +1753,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
cachep = NULL;
goto oops;
}
slab_size = ALIGN(cachep->num*sizeof(kmem_bufctl_t)
slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
+ sizeof(struct slab), align);
/*
......@@ -1752,14 +1767,15 @@ kmem_cache_create (const char *name, size_t size, size_t align,
if (flags & CFLGS_OFF_SLAB) {
/* really off slab. No need for manual alignment */
slab_size = cachep->num*sizeof(kmem_bufctl_t)+sizeof(struct slab);
slab_size =
cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
}
cachep->colour_off = cache_line_size();
/* Offset must be a multiple of the alignment. */
if (cachep->colour_off < align)
cachep->colour_off = align;
cachep->colour = left_over/cachep->colour_off;
cachep->colour = left_over / cachep->colour_off;
cachep->slab_size = slab_size;
cachep->flags = flags;
cachep->gfpflags = 0;
......@@ -1800,8 +1816,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
g_cpucache_up = PARTIAL_AC;
} else {
cachep->array[smp_processor_id()] =
kmalloc(sizeof(struct arraycache_init),
GFP_KERNEL);
kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
if (g_cpucache_up == PARTIAL_AC) {
set_up_list3s(cachep, SIZE_L3);
......@@ -1811,16 +1826,18 @@ kmem_cache_create (const char *name, size_t size, size_t align,
for_each_online_node(node) {
cachep->nodelists[node] =
kmalloc_node(sizeof(struct kmem_list3),
kmalloc_node(sizeof
(struct kmem_list3),
GFP_KERNEL, node);
BUG_ON(!cachep->nodelists[node]);
kmem_list3_init(cachep->nodelists[node]);
kmem_list3_init(cachep->
nodelists[node]);
}
}
}
cachep->nodelists[numa_node_id()]->next_reap =
jiffies + REAPTIMEOUT_LIST3 +
((unsigned long)cachep)%REAPTIMEOUT_LIST3;
((unsigned long)cachep) % REAPTIMEOUT_LIST3;
BUG_ON(!ac_data(cachep));
ac_data(cachep)->avail = 0;
......@@ -1834,7 +1851,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
/* cache setup completed, link it into the list */
list_add(&cachep->next, &cache_chain);
unlock_cpu_hotplug();
oops:
oops:
if (!cachep && (flags & SLAB_PANIC))
panic("kmem_cache_create(): failed to create slab `%s'\n",
name);
......@@ -1880,7 +1897,7 @@ static inline void check_spinlock_acquired_node(kmem_cache_t *cachep, int node)
/*
* Waits for all CPUs to execute func().
*/
static void smp_call_function_all_cpus(void (*func) (void *arg), void *arg)
static void smp_call_function_all_cpus(void (*func)(void *arg), void *arg)
{
check_irq_on();
preempt_disable();
......@@ -1895,12 +1912,12 @@ static void smp_call_function_all_cpus(void (*func) (void *arg), void *arg)
preempt_enable();
}
static void drain_array_locked(kmem_cache_t* cachep,
struct array_cache *ac, int force, int node);
static void drain_array_locked(kmem_cache_t *cachep, struct array_cache *ac,
int force, int node);
static void do_drain(void *arg)
{
kmem_cache_t *cachep = (kmem_cache_t*)arg;
kmem_cache_t *cachep = (kmem_cache_t *) arg;
struct array_cache *ac;
int node = numa_node_id();
......@@ -1958,8 +1975,7 @@ static int __node_shrink(kmem_cache_t *cachep, int node)
slab_destroy(cachep, slabp);
spin_lock_irq(&l3->list_lock);
}
ret = !list_empty(&l3->slabs_full) ||
!list_empty(&l3->slabs_partial);
ret = !list_empty(&l3->slabs_full) || !list_empty(&l3->slabs_partial);
return ret;
}
......@@ -2015,7 +2031,7 @@ EXPORT_SYMBOL(kmem_cache_shrink);
* The caller must guarantee that noone will allocate memory from the cache
* during the kmem_cache_destroy().
*/
int kmem_cache_destroy(kmem_cache_t * cachep)
int kmem_cache_destroy(kmem_cache_t *cachep)
{
int i;
struct kmem_list3 *l3;
......@@ -2037,7 +2053,7 @@ int kmem_cache_destroy(kmem_cache_t * cachep)
if (__cache_shrink(cachep)) {
slab_error(cachep, "Can't free all objects");
down(&cache_chain_sem);
list_add(&cachep->next,&cache_chain);
list_add(&cachep->next, &cache_chain);
up(&cache_chain_sem);
unlock_cpu_hotplug();
return 1;
......@@ -2066,7 +2082,7 @@ int kmem_cache_destroy(kmem_cache_t * cachep)
EXPORT_SYMBOL(kmem_cache_destroy);
/* Get the memory for a slab management obj. */
static struct slab* alloc_slabmgmt(kmem_cache_t *cachep, void *objp,
static struct slab *alloc_slabmgmt(kmem_cache_t *cachep, void *objp,
int colour_off, gfp_t local_flags)
{
struct slab *slabp;
......@@ -2077,19 +2093,19 @@ static struct slab* alloc_slabmgmt(kmem_cache_t *cachep, void *objp,
if (!slabp)
return NULL;
} else {
slabp = objp+colour_off;
slabp = objp + colour_off;
colour_off += cachep->slab_size;
}
slabp->inuse = 0;
slabp->colouroff = colour_off;
slabp->s_mem = objp+colour_off;
slabp->s_mem = objp + colour_off;
return slabp;
}
static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
{
return (kmem_bufctl_t *)(slabp+1);
return (kmem_bufctl_t *) (slabp + 1);
}
static void cache_init_objs(kmem_cache_t *cachep,
......@@ -2098,7 +2114,7 @@ static void cache_init_objs(kmem_cache_t *cachep,
int i;
for (i = 0; i < cachep->num; i++) {
void *objp = slabp->s_mem+cachep->objsize*i;
void *objp = slabp->s_mem + cachep->objsize * i;
#if DEBUG
/* need to poison the objs? */
if (cachep->flags & SLAB_POISON)
......@@ -2116,7 +2132,8 @@ static void cache_init_objs(kmem_cache_t *cachep,
* Otherwise, deadlock. They must also be threaded.
*/
if (cachep->ctor && !(cachep->flags & SLAB_POISON))
cachep->ctor(objp+obj_dbghead(cachep), cachep, ctor_flags);
cachep->ctor(objp + obj_dbghead(cachep), cachep,
ctor_flags);
if (cachep->flags & SLAB_RED_ZONE) {
if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
......@@ -2126,15 +2143,17 @@ static void cache_init_objs(kmem_cache_t *cachep,
slab_error(cachep, "constructor overwrote the"
" start of an object");
}
if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE, 0);
if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep)
&& cachep->flags & SLAB_POISON)
kernel_map_pages(virt_to_page(objp),
cachep->objsize / PAGE_SIZE, 0);
#else
if (cachep->ctor)
cachep->ctor(objp, cachep, ctor_flags);
#endif
slab_bufctl(slabp)[i] = i+1;
slab_bufctl(slabp)[i] = i + 1;
}
slab_bufctl(slabp)[i-1] = BUFCTL_END;
slab_bufctl(slabp)[i - 1] = BUFCTL_END;
slabp->free = 0;
}
......@@ -2180,7 +2199,7 @@ static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid)
/* Be lazy and only check for valid flags here,
* keeping it out of the critical path in kmem_cache_alloc().
*/
if (flags & ~(SLAB_DMA|SLAB_LEVEL_MASK|SLAB_NO_GROW))
if (flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW))
BUG();
if (flags & SLAB_NO_GROW)
return 0;
......@@ -2246,9 +2265,9 @@ static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid)
l3->free_objects += cachep->num;
spin_unlock(&l3->list_lock);
return 1;
opps1:
opps1:
kmem_freepages(cachep, objp);
failed:
failed:
if (local_flags & __GFP_WAIT)
local_irq_disable();
return 0;
......@@ -2273,7 +2292,8 @@ static void kfree_debugcheck(const void *objp)
}
page = virt_to_page(objp);
if (!PageSlab(page)) {
printk(KERN_ERR "kfree_debugcheck: bad ptr %lxh.\n", (unsigned long)objp);
printk(KERN_ERR "kfree_debugcheck: bad ptr %lxh.\n",
(unsigned long)objp);
BUG();
}
}
......@@ -2290,20 +2310,26 @@ static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp,
page = virt_to_page(objp);
if (page_get_cache(page) != cachep) {
printk(KERN_ERR "mismatch in kmem_cache_free: expected cache %p, got %p\n",
page_get_cache(page),cachep);
printk(KERN_ERR
"mismatch in kmem_cache_free: expected cache %p, got %p\n",
page_get_cache(page), cachep);
printk(KERN_ERR "%p is %s.\n", cachep, cachep->name);
printk(KERN_ERR "%p is %s.\n", page_get_cache(page), page_get_cache(page)->name);
printk(KERN_ERR "%p is %s.\n", page_get_cache(page),
page_get_cache(page)->name);
WARN_ON(1);
}
slabp = page_get_slab(page);
if (cachep->flags & SLAB_RED_ZONE) {
if (*dbg_redzone1(cachep, objp) != RED_ACTIVE || *dbg_redzone2(cachep, objp) != RED_ACTIVE) {
slab_error(cachep, "double free, or memory outside"
if (*dbg_redzone1(cachep, objp) != RED_ACTIVE
|| *dbg_redzone2(cachep, objp) != RED_ACTIVE) {
slab_error(cachep,
"double free, or memory outside"
" object was overwritten");
printk(KERN_ERR "%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n",
objp, *dbg_redzone1(cachep, objp), *dbg_redzone2(cachep, objp));
printk(KERN_ERR
"%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n",
objp, *dbg_redzone1(cachep, objp),
*dbg_redzone2(cachep, objp));
}
*dbg_redzone1(cachep, objp) = RED_INACTIVE;
*dbg_redzone2(cachep, objp) = RED_INACTIVE;
......@@ -2311,30 +2337,31 @@ static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp,
if (cachep->flags & SLAB_STORE_USER)
*dbg_userword(cachep, objp) = caller;
objnr = (objp-slabp->s_mem)/cachep->objsize;
objnr = (objp - slabp->s_mem) / cachep->objsize;
BUG_ON(objnr >= cachep->num);
BUG_ON(objp != slabp->s_mem + objnr*cachep->objsize);
BUG_ON(objp != slabp->s_mem + objnr * cachep->objsize);
if (cachep->flags & SLAB_DEBUG_INITIAL) {
/* Need to call the slab's constructor so the
* caller can perform a verify of its state (debugging).
* Called without the cache-lock held.
*/
cachep->ctor(objp+obj_dbghead(cachep),
cachep, SLAB_CTOR_CONSTRUCTOR|SLAB_CTOR_VERIFY);
cachep->ctor(objp + obj_dbghead(cachep),
cachep, SLAB_CTOR_CONSTRUCTOR | SLAB_CTOR_VERIFY);
}
if (cachep->flags & SLAB_POISON && cachep->dtor) {
/* we want to cache poison the object,
* call the destruction callback
*/
cachep->dtor(objp+obj_dbghead(cachep), cachep, 0);
cachep->dtor(objp + obj_dbghead(cachep), cachep, 0);
}
if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) {
store_stackinfo(cachep, objp, (unsigned long)caller);
kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE, 0);
kernel_map_pages(virt_to_page(objp),
cachep->objsize / PAGE_SIZE, 0);
} else {
poison_obj(cachep, objp, POISON_FREE);
}
......@@ -2357,13 +2384,16 @@ static void check_slabp(kmem_cache_t *cachep, struct slab *slabp)
goto bad;
}
if (entries != cachep->num - slabp->inuse) {
bad:
printk(KERN_ERR "slab: Internal list corruption detected in cache '%s'(%d), slabp %p(%d). Hexdump:\n",
bad:
printk(KERN_ERR
"slab: Internal list corruption detected in cache '%s'(%d), slabp %p(%d). Hexdump:\n",
cachep->name, cachep->num, slabp, slabp->inuse);
for (i=0;i<sizeof(slabp)+cachep->num*sizeof(kmem_bufctl_t);i++) {
if ((i%16)==0)
for (i = 0;
i < sizeof(slabp) + cachep->num * sizeof(kmem_bufctl_t);
i++) {
if ((i % 16) == 0)
printk("\n%03x:", i);
printk(" %02x", ((unsigned char*)slabp)[i]);
printk(" %02x", ((unsigned char *)slabp)[i]);
}
printk("\n");
BUG();
......@@ -2383,7 +2413,7 @@ static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags)
check_irq_off();
ac = ac_data(cachep);
retry:
retry:
batchcount = ac->batchcount;
if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
/* if there was little recent activity on this
......@@ -2406,7 +2436,7 @@ static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags)
ac->avail = batchcount;
memcpy(ac->entry,
&(shared_array->entry[shared_array->avail]),
sizeof(void*)*batchcount);
sizeof(void *) * batchcount);
shared_array->touched = 1;
goto alloc_done;
}
......@@ -2434,7 +2464,7 @@ static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags)
/* get obj pointer */
ac->entry[ac->avail++] = slabp->s_mem +
slabp->free*cachep->objsize;
slabp->free * cachep->objsize;
slabp->inuse++;
next = slab_bufctl(slabp)[slabp->free];
......@@ -2454,9 +2484,9 @@ static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags)
list_add(&slabp->list, &l3->slabs_partial);
}
must_grow:
must_grow:
l3->free_objects -= ac->avail;
alloc_done:
alloc_done:
spin_unlock(&l3->list_lock);
if (unlikely(!ac->avail)) {
......@@ -2485,16 +2515,16 @@ cache_alloc_debugcheck_before(kmem_cache_t *cachep, gfp_t flags)
}
#if DEBUG
static void *
cache_alloc_debugcheck_after(kmem_cache_t *cachep,
gfp_t flags, void *objp, void *caller)
static void *cache_alloc_debugcheck_after(kmem_cache_t *cachep, gfp_t flags,
void *objp, void *caller)
{
if (!objp)
return objp;
if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE, 1);
kernel_map_pages(virt_to_page(objp),
cachep->objsize / PAGE_SIZE, 1);
else
check_poison_obj(cachep, objp);
#else
......@@ -2506,11 +2536,15 @@ cache_alloc_debugcheck_after(kmem_cache_t *cachep,
*dbg_userword(cachep, objp) = caller;
if (cachep->flags & SLAB_RED_ZONE) {
if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
slab_error(cachep, "double free, or memory outside"
if (*dbg_redzone1(cachep, objp) != RED_INACTIVE
|| *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
slab_error(cachep,
"double free, or memory outside"
" object was overwritten");
printk(KERN_ERR "%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n",
objp, *dbg_redzone1(cachep, objp), *dbg_redzone2(cachep, objp));
printk(KERN_ERR
"%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n",
objp, *dbg_redzone1(cachep, objp),
*dbg_redzone2(cachep, objp));
}
*dbg_redzone1(cachep, objp) = RED_ACTIVE;
*dbg_redzone2(cachep, objp) = RED_ACTIVE;
......@@ -2532,7 +2566,7 @@ cache_alloc_debugcheck_after(kmem_cache_t *cachep,
static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags)
{
void* objp;
void *objp;
struct array_cache *ac;
check_irq_off();
......@@ -2551,7 +2585,7 @@ static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags)
static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags)
{
unsigned long save_flags;
void* objp;
void *objp;
cache_alloc_debugcheck_before(cachep, flags);
......@@ -2580,7 +2614,7 @@ static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
l3 = cachep->nodelists[nodeid];
BUG_ON(!l3);
retry:
retry:
spin_lock(&l3->list_lock);
entry = l3->slabs_partial.next;
if (entry == &l3->slabs_partial) {
......@@ -2601,7 +2635,7 @@ static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
BUG_ON(slabp->inuse == cachep->num);
/* get obj pointer */
obj = slabp->s_mem + slabp->free*cachep->objsize;
obj = slabp->s_mem + slabp->free * cachep->objsize;
slabp->inuse++;
next = slab_bufctl(slabp)[slabp->free];
#if DEBUG
......@@ -2622,7 +2656,7 @@ static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
spin_unlock(&l3->list_lock);
goto done;
must_grow:
must_grow:
spin_unlock(&l3->list_lock);
x = cache_grow(cachep, flags, nodeid);
......@@ -2630,7 +2664,7 @@ static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
return NULL;
goto retry;
done:
done:
return obj;
}
#endif
......@@ -2638,7 +2672,8 @@ static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
/*
* Caller needs to acquire correct kmem_list's list_lock
*/
static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects, int node)
static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects,
int node)
{
int i;
struct kmem_list3 *l3;
......@@ -2705,20 +2740,19 @@ static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac)
spin_lock(&l3->list_lock);
if (l3->shared) {
struct array_cache *shared_array = l3->shared;
int max = shared_array->limit-shared_array->avail;
int max = shared_array->limit - shared_array->avail;
if (max) {
if (batchcount > max)
batchcount = max;
memcpy(&(shared_array->entry[shared_array->avail]),
ac->entry,
sizeof(void*)*batchcount);
ac->entry, sizeof(void *) * batchcount);
shared_array->avail += batchcount;
goto free_done;
}
}
free_block(cachep, ac->entry, batchcount, node);
free_done:
free_done:
#if STATS
{
int i = 0;
......@@ -2740,10 +2774,9 @@ static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac)
spin_unlock(&l3->list_lock);
ac->avail -= batchcount;
memmove(ac->entry, &(ac->entry[batchcount]),
sizeof(void*)*ac->avail);
sizeof(void *) * ac->avail);
}
/*
* __cache_free
* Release an obj back to its cache. If the obj has a constructed
......@@ -2768,7 +2801,8 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp)
if (unlikely(slabp->nodeid != numa_node_id())) {
struct array_cache *alien = NULL;
int nodeid = slabp->nodeid;
struct kmem_list3 *l3 = cachep->nodelists[numa_node_id()];
struct kmem_list3 *l3 =
cachep->nodelists[numa_node_id()];
STATS_INC_NODEFREES(cachep);
if (l3->alien && l3->alien[nodeid]) {
......@@ -2831,9 +2865,9 @@ EXPORT_SYMBOL(kmem_cache_alloc);
*/
int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)
{
unsigned long addr = (unsigned long) ptr;
unsigned long addr = (unsigned long)ptr;
unsigned long min_addr = PAGE_OFFSET;
unsigned long align_mask = BYTES_PER_WORD-1;
unsigned long align_mask = BYTES_PER_WORD - 1;
unsigned long size = cachep->objsize;
struct page *page;
......@@ -2853,7 +2887,7 @@ int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)
if (unlikely(page_get_cache(page) != cachep))
goto out;
return 1;
out:
out:
return 0;
}
......@@ -2880,8 +2914,10 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
if (unlikely(!cachep->nodelists[nodeid])) {
/* Fall back to __cache_alloc if we run into trouble */
printk(KERN_WARNING "slab: not allocating in inactive node %d for cache %s\n", nodeid, cachep->name);
return __cache_alloc(cachep,flags);
printk(KERN_WARNING
"slab: not allocating in inactive node %d for cache %s\n",
nodeid, cachep->name);
return __cache_alloc(cachep, flags);
}
cache_alloc_debugcheck_before(cachep, flags);
......@@ -2891,7 +2927,9 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
else
ptr = __cache_alloc_node(cachep, flags, nodeid);
local_irq_restore(save_flags);
ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, __builtin_return_address(0));
ptr =
cache_alloc_debugcheck_after(cachep, flags, ptr,
__builtin_return_address(0));
return ptr;
}
......@@ -2957,7 +2995,7 @@ EXPORT_SYMBOL(__kmalloc);
void *__alloc_percpu(size_t size)
{
int i;
struct percpu_data *pdata = kmalloc(sizeof (*pdata), GFP_KERNEL);
struct percpu_data *pdata = kmalloc(sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return NULL;
......@@ -2981,9 +3019,9 @@ void *__alloc_percpu(size_t size)
}
/* Catch derefs w/o wrappers */
return (void *) (~(unsigned long) pdata);
return (void *)(~(unsigned long)pdata);
unwind_oom:
unwind_oom:
while (--i >= 0) {
if (!cpu_possible(i))
continue;
......@@ -3046,7 +3084,7 @@ void kfree(const void *objp)
local_irq_save(flags);
kfree_debugcheck(objp);
c = page_get_cache(virt_to_page(objp));
__cache_free(c, (void*)objp);
__cache_free(c, (void *)objp);
local_irq_restore(flags);
}
EXPORT_SYMBOL(kfree);
......@@ -3059,11 +3097,10 @@ EXPORT_SYMBOL(kfree);
* Don't free memory not originally allocated by alloc_percpu()
* The complemented objp is to check for that.
*/
void
free_percpu(const void *objp)
void free_percpu(const void *objp)
{
int i;
struct percpu_data *p = (struct percpu_data *) (~(unsigned long) objp);
struct percpu_data *p = (struct percpu_data *)(~(unsigned long)objp);
/*
* We allocate for all cpus so we cannot use for online cpu here.
......@@ -3103,23 +3140,23 @@ static int alloc_kmemlist(kmem_cache_t *cachep)
if (!(new_alien = alloc_alien_cache(node, cachep->limit)))
goto fail;
#endif
if (!(new = alloc_arraycache(node, (cachep->shared*
cachep->batchcount), 0xbaadf00d)))
if (!(new = alloc_arraycache(node, (cachep->shared *
cachep->batchcount),
0xbaadf00d)))
goto fail;
if ((l3 = cachep->nodelists[node])) {
spin_lock_irq(&l3->list_lock);
if ((nc = cachep->nodelists[node]->shared))
free_block(cachep, nc->entry,
nc->avail, node);
free_block(cachep, nc->entry, nc->avail, node);
l3->shared = new;
if (!cachep->nodelists[node]->alien) {
l3->alien = new_alien;
new_alien = NULL;
}
l3->free_limit = (1 + nr_cpus_node(node))*
l3->free_limit = (1 + nr_cpus_node(node)) *
cachep->batchcount + cachep->num;
spin_unlock_irq(&l3->list_lock);
kfree(nc);
......@@ -3132,15 +3169,15 @@ static int alloc_kmemlist(kmem_cache_t *cachep)
kmem_list3_init(l3);
l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
((unsigned long)cachep)%REAPTIMEOUT_LIST3;
((unsigned long)cachep) % REAPTIMEOUT_LIST3;
l3->shared = new;
l3->alien = new_alien;
l3->free_limit = (1 + nr_cpus_node(node))*
l3->free_limit = (1 + nr_cpus_node(node)) *
cachep->batchcount + cachep->num;
cachep->nodelists[node] = l3;
}
return err;
fail:
fail:
err = -ENOMEM;
return err;
}
......@@ -3162,18 +3199,19 @@ static void do_ccupdate_local(void *info)
new->new[smp_processor_id()] = old;
}
static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount,
int shared)
{
struct ccupdate_struct new;
int i, err;
memset(&new.new,0,sizeof(new.new));
memset(&new.new, 0, sizeof(new.new));
for_each_online_cpu(i) {
new.new[i] = alloc_arraycache(cpu_to_node(i), limit, batchcount);
new.new[i] =
alloc_arraycache(cpu_to_node(i), limit, batchcount);
if (!new.new[i]) {
for (i--; i >= 0; i--) kfree(new.new[i]);
for (i--; i >= 0; i--)
kfree(new.new[i]);
return -ENOMEM;
}
}
......@@ -3207,7 +3245,6 @@ static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount,
return 0;
}
static void enable_cpucache(kmem_cache_t *cachep)
{
int err;
......@@ -3254,14 +3291,14 @@ static void enable_cpucache(kmem_cache_t *cachep)
if (limit > 32)
limit = 32;
#endif
err = do_tune_cpucache(cachep, limit, (limit+1)/2, shared);
err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared);
if (err)
printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
cachep->name, -err);
}
static void drain_array_locked(kmem_cache_t *cachep,
struct array_cache *ac, int force, int node)
static void drain_array_locked(kmem_cache_t *cachep, struct array_cache *ac,
int force, int node)
{
int tofree;
......@@ -3269,14 +3306,14 @@ static void drain_array_locked(kmem_cache_t *cachep,
if (ac->touched && !force) {
ac->touched = 0;
} else if (ac->avail) {
tofree = force ? ac->avail : (ac->limit+4)/5;
tofree = force ? ac->avail : (ac->limit + 4) / 5;
if (tofree > ac->avail) {
tofree = (ac->avail+1)/2;
tofree = (ac->avail + 1) / 2;
}
free_block(cachep, ac->entry, tofree, node);
ac->avail -= tofree;
memmove(ac->entry, &(ac->entry[tofree]),
sizeof(void*)*ac->avail);
sizeof(void *) * ac->avail);
}
}
......@@ -3299,13 +3336,14 @@ static void cache_reap(void *unused)
if (down_trylock(&cache_chain_sem)) {
/* Give up. Setup the next iteration. */
schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC);
schedule_delayed_work(&__get_cpu_var(reap_work),
REAPTIMEOUT_CPUC);
return;
}
list_for_each(walk, &cache_chain) {
kmem_cache_t *searchp;
struct list_head* p;
struct list_head *p;
int tofree;
struct slab *slabp;
......@@ -3338,7 +3376,9 @@ static void cache_reap(void *unused)
goto next_unlock;
}
tofree = (l3->free_limit+5*searchp->num-1)/(5*searchp->num);
tofree =
(l3->free_limit + 5 * searchp->num -
1) / (5 * searchp->num);
do {
p = l3->slabs_free.next;
if (p == &(l3->slabs_free))
......@@ -3358,10 +3398,10 @@ static void cache_reap(void *unused)
spin_unlock_irq(&l3->list_lock);
slab_destroy(searchp, slabp);
spin_lock_irq(&l3->list_lock);
} while(--tofree > 0);
next_unlock:
} while (--tofree > 0);
next_unlock:
spin_unlock_irq(&l3->list_lock);
next:
next:
cond_resched();
}
check_irq_on();
......@@ -3451,14 +3491,14 @@ static int s_show(struct seq_file *m, void *p)
spin_lock(&l3->list_lock);
list_for_each(q,&l3->slabs_full) {
list_for_each(q, &l3->slabs_full) {
slabp = list_entry(q, struct slab, list);
if (slabp->inuse != cachep->num && !error)
error = "slabs_full accounting error";
active_objs += cachep->num;
active_slabs++;
}
list_for_each(q,&l3->slabs_partial) {
list_for_each(q, &l3->slabs_partial) {
slabp = list_entry(q, struct slab, list);
if (slabp->inuse == cachep->num && !error)
error = "slabs_partial inuse accounting error";
......@@ -3467,7 +3507,7 @@ static int s_show(struct seq_file *m, void *p)
active_objs += slabp->inuse;
active_slabs++;
}
list_for_each(q,&l3->slabs_free) {
list_for_each(q, &l3->slabs_free) {
slabp = list_entry(q, struct slab, list);
if (slabp->inuse && !error)
error = "slabs_free/inuse accounting error";
......@@ -3478,8 +3518,8 @@ static int s_show(struct seq_file *m, void *p)
spin_unlock(&l3->list_lock);
}
num_slabs+=active_slabs;
num_objs = num_slabs*cachep->num;
num_slabs += active_slabs;
num_objs = num_slabs * cachep->num;
if (num_objs - active_objs != free_objects && !error)
error = "free_objects accounting error";
......@@ -3489,10 +3529,9 @@ static int s_show(struct seq_file *m, void *p)
seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
name, active_objs, num_objs, cachep->objsize,
cachep->num, (1<<cachep->gfporder));
cachep->num, (1 << cachep->gfporder));
seq_printf(m, " : tunables %4u %4u %4u",
cachep->limit, cachep->batchcount,
cachep->shared);
cachep->limit, cachep->batchcount, cachep->shared);
seq_printf(m, " : slabdata %6lu %6lu %6lu",
active_slabs, num_slabs, shared_avail);
#if STATS
......@@ -3507,9 +3546,7 @@ static int s_show(struct seq_file *m, void *p)
unsigned long node_frees = cachep->node_frees;
seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \
%4lu %4lu %4lu %4lu",
allocs, high, grown, reaped, errors,
max_freeable, node_allocs, node_frees);
%4lu %4lu %4lu %4lu", allocs, high, grown, reaped, errors, max_freeable, node_allocs, node_frees);
}
/* cpu stats */
{
......@@ -3556,10 +3593,10 @@ struct seq_operations slabinfo_op = {
* @count: data length
* @ppos: unused
*/
ssize_t slabinfo_write(struct file *file, const char __user *buffer,
ssize_t slabinfo_write(struct file *file, const char __user * buffer,
size_t count, loff_t *ppos)
{
char kbuf[MAX_SLABINFO_WRITE+1], *tmp;
char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
int limit, batchcount, shared, res;
struct list_head *p;
......@@ -3580,14 +3617,13 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer,
/* Find the cache in the chain of caches. */
down(&cache_chain_sem);
res = -EINVAL;
list_for_each(p,&cache_chain) {
list_for_each(p, &cache_chain) {
kmem_cache_t *cachep = list_entry(p, kmem_cache_t, next);
if (!strcmp(cachep->name, kbuf)) {
if (limit < 1 ||
batchcount < 1 ||
batchcount > limit ||
shared < 0) {
batchcount > limit || shared < 0) {
res = 0;
} else {
res = do_tune_cpucache(cachep, limit,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment