Commit 6446faa2 authored by Christoph Lameter's avatar Christoph Lameter

slub: Fix up comments

Provide comments and fix up various spelling / style issues.
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
parent d8b42bf5
...@@ -61,7 +61,7 @@ struct kmem_cache { ...@@ -61,7 +61,7 @@ struct kmem_cache {
int size; /* The size of an object including meta data */ int size; /* The size of an object including meta data */
int objsize; /* The size of an object without meta data */ int objsize; /* The size of an object without meta data */
int offset; /* Free pointer offset. */ int offset; /* Free pointer offset. */
int order; int order; /* Current preferred allocation order */
/* /*
* Avoid an extra cache line for UP, SMP and for the node local to * Avoid an extra cache line for UP, SMP and for the node local to
...@@ -138,11 +138,11 @@ static __always_inline int kmalloc_index(size_t size) ...@@ -138,11 +138,11 @@ static __always_inline int kmalloc_index(size_t size)
if (size <= 512) return 9; if (size <= 512) return 9;
if (size <= 1024) return 10; if (size <= 1024) return 10;
if (size <= 2 * 1024) return 11; if (size <= 2 * 1024) return 11;
if (size <= 4 * 1024) return 12;
/* /*
* The following is only needed to support architectures with a larger page * The following is only needed to support architectures with a larger page
* size than 4k. * size than 4k.
*/ */
if (size <= 4 * 1024) return 12;
if (size <= 8 * 1024) return 13; if (size <= 8 * 1024) return 13;
if (size <= 16 * 1024) return 14; if (size <= 16 * 1024) return 14;
if (size <= 32 * 1024) return 15; if (size <= 32 * 1024) return 15;
......
...@@ -291,6 +291,7 @@ static inline struct kmem_cache_cpu *get_cpu_slab(struct kmem_cache *s, int cpu) ...@@ -291,6 +291,7 @@ static inline struct kmem_cache_cpu *get_cpu_slab(struct kmem_cache *s, int cpu)
#endif #endif
} }
/* Verify that a pointer has an address that is valid within a slab page */
static inline int check_valid_pointer(struct kmem_cache *s, static inline int check_valid_pointer(struct kmem_cache *s,
struct page *page, const void *object) struct page *page, const void *object)
{ {
...@@ -619,7 +620,7 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page, ...@@ -619,7 +620,7 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
* A. Free pointer (if we cannot overwrite object on free) * A. Free pointer (if we cannot overwrite object on free)
* B. Tracking data for SLAB_STORE_USER * B. Tracking data for SLAB_STORE_USER
* C. Padding to reach required alignment boundary or at mininum * C. Padding to reach required alignment boundary or at mininum
* one word if debuggin is on to be able to detect writes * one word if debugging is on to be able to detect writes
* before the word boundary. * before the word boundary.
* *
* Padding is done using 0x5a (POISON_INUSE) * Padding is done using 0x5a (POISON_INUSE)
...@@ -1268,7 +1269,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) ...@@ -1268,7 +1269,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
* may return off node objects because partial slabs are obtained * may return off node objects because partial slabs are obtained
* from other nodes and filled up. * from other nodes and filled up.
* *
* If /sys/slab/xx/defrag_ratio is set to 100 (which makes * If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes
* defrag_ratio = 1000) then every (well almost) allocation will * defrag_ratio = 1000) then every (well almost) allocation will
* first attempt to defrag slab caches on other nodes. This means * first attempt to defrag slab caches on other nodes. This means
* scanning over all nodes to look for partial slabs which may be * scanning over all nodes to look for partial slabs which may be
...@@ -1343,9 +1344,11 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) ...@@ -1343,9 +1344,11 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
* Adding an empty slab to the partial slabs in order * Adding an empty slab to the partial slabs in order
* to avoid page allocator overhead. This slab needs * to avoid page allocator overhead. This slab needs
* to come after the other slabs with objects in * to come after the other slabs with objects in
* order to fill them up. That way the size of the * so that the others get filled first. That way the
* partial list stays small. kmem_cache_shrink can * size of the partial list stays small.
* reclaim empty slabs from the partial list. *
* kmem_cache_shrink can reclaim any empty slabs from the
* partial list.
*/ */
add_partial(n, page, 1); add_partial(n, page, 1);
slab_unlock(page); slab_unlock(page);
...@@ -1368,7 +1371,7 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) ...@@ -1368,7 +1371,7 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
if (c->freelist) if (c->freelist)
stat(c, DEACTIVATE_REMOTE_FREES); stat(c, DEACTIVATE_REMOTE_FREES);
/* /*
* Merge cpu freelist into freelist. Typically we get here * Merge cpu freelist into slab freelist. Typically we get here
* because both freelists are empty. So this is unlikely * because both freelists are empty. So this is unlikely
* to occur. * to occur.
*/ */
...@@ -1399,6 +1402,7 @@ static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) ...@@ -1399,6 +1402,7 @@ static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
/* /*
* Flush cpu slab. * Flush cpu slab.
*
* Called from IPI handler with interrupts disabled. * Called from IPI handler with interrupts disabled.
*/ */
static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
...@@ -1457,7 +1461,8 @@ static inline int node_match(struct kmem_cache_cpu *c, int node) ...@@ -1457,7 +1461,8 @@ static inline int node_match(struct kmem_cache_cpu *c, int node)
* rest of the freelist to the lockless freelist. * rest of the freelist to the lockless freelist.
* *
* And if we were unable to get a new slab from the partial slab lists then * And if we were unable to get a new slab from the partial slab lists then
* we need to allocate a new slab. This is slowest path since we may sleep. * we need to allocate a new slab. This is the slowest path since it involves
* a call to the page allocator and the setup of a new slab.
*/ */
static void *__slab_alloc(struct kmem_cache *s, static void *__slab_alloc(struct kmem_cache *s,
gfp_t gfpflags, int node, void *addr, struct kmem_cache_cpu *c) gfp_t gfpflags, int node, void *addr, struct kmem_cache_cpu *c)
...@@ -1471,7 +1476,9 @@ static void *__slab_alloc(struct kmem_cache *s, ...@@ -1471,7 +1476,9 @@ static void *__slab_alloc(struct kmem_cache *s,
slab_lock(c->page); slab_lock(c->page);
if (unlikely(!node_match(c, node))) if (unlikely(!node_match(c, node)))
goto another_slab; goto another_slab;
stat(c, ALLOC_REFILL); stat(c, ALLOC_REFILL);
load_freelist: load_freelist:
object = c->page->freelist; object = c->page->freelist;
if (unlikely(!object)) if (unlikely(!object))
...@@ -1616,6 +1623,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, ...@@ -1616,6 +1623,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
if (unlikely(SlabDebug(page))) if (unlikely(SlabDebug(page)))
goto debug; goto debug;
checks_ok: checks_ok:
prior = object[offset] = page->freelist; prior = object[offset] = page->freelist;
page->freelist = object; page->freelist = object;
...@@ -1630,8 +1638,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, ...@@ -1630,8 +1638,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
goto slab_empty; goto slab_empty;
/* /*
* Objects left in the slab. If it * Objects left in the slab. If it was not on the partial list before
* was not on the partial list before
* then add it. * then add it.
*/ */
if (unlikely(!prior)) { if (unlikely(!prior)) {
...@@ -1845,13 +1852,11 @@ static unsigned long calculate_alignment(unsigned long flags, ...@@ -1845,13 +1852,11 @@ static unsigned long calculate_alignment(unsigned long flags,
unsigned long align, unsigned long size) unsigned long align, unsigned long size)
{ {
/* /*
* If the user wants hardware cache aligned objects then * If the user wants hardware cache aligned objects then follow that
* follow that suggestion if the object is sufficiently * suggestion if the object is sufficiently large.
* large.
* *
* The hardware cache alignment cannot override the * The hardware cache alignment cannot override the specified
* specified alignment though. If that is greater * alignment though. If that is greater then use it.
* then use it.
*/ */
if ((flags & SLAB_HWCACHE_ALIGN) && if ((flags & SLAB_HWCACHE_ALIGN) &&
size > cache_line_size() / 2) size > cache_line_size() / 2)
...@@ -2049,6 +2054,7 @@ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags, ...@@ -2049,6 +2054,7 @@ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags,
#endif #endif
init_kmem_cache_node(n); init_kmem_cache_node(n);
atomic_long_inc(&n->nr_slabs); atomic_long_inc(&n->nr_slabs);
/* /*
* lockdep requires consistent irq usage for each lock * lockdep requires consistent irq usage for each lock
* so even though there cannot be a race this early in * so even though there cannot be a race this early in
...@@ -2301,7 +2307,7 @@ int kmem_ptr_validate(struct kmem_cache *s, const void *object) ...@@ -2301,7 +2307,7 @@ int kmem_ptr_validate(struct kmem_cache *s, const void *object)
/* /*
* We could also check if the object is on the slabs freelist. * We could also check if the object is on the slabs freelist.
* But this would be too expensive and it seems that the main * But this would be too expensive and it seems that the main
* purpose of kmem_ptr_valid is to check if the object belongs * purpose of kmem_ptr_valid() is to check if the object belongs
* to a certain slab. * to a certain slab.
*/ */
return 1; return 1;
...@@ -2913,7 +2919,7 @@ void __init kmem_cache_init(void) ...@@ -2913,7 +2919,7 @@ void __init kmem_cache_init(void)
/* /*
* Patch up the size_index table if we have strange large alignment * Patch up the size_index table if we have strange large alignment
* requirements for the kmalloc array. This is only the case for * requirements for the kmalloc array. This is only the case for
* mips it seems. The standard arches will not generate any code here. * MIPS it seems. The standard arches will not generate any code here.
* *
* Largest permitted alignment is 256 bytes due to the way we * Largest permitted alignment is 256 bytes due to the way we
* handle the index determination for the smaller caches. * handle the index determination for the smaller caches.
...@@ -2942,7 +2948,6 @@ void __init kmem_cache_init(void) ...@@ -2942,7 +2948,6 @@ void __init kmem_cache_init(void)
kmem_size = sizeof(struct kmem_cache); kmem_size = sizeof(struct kmem_cache);
#endif #endif
printk(KERN_INFO printk(KERN_INFO
"SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d," "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
" CPUs=%d, Nodes=%d\n", " CPUs=%d, Nodes=%d\n",
...@@ -3039,12 +3044,15 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, ...@@ -3039,12 +3044,15 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
*/ */
for_each_online_cpu(cpu) for_each_online_cpu(cpu)
get_cpu_slab(s, cpu)->objsize = s->objsize; get_cpu_slab(s, cpu)->objsize = s->objsize;
s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
up_write(&slub_lock); up_write(&slub_lock);
if (sysfs_slab_alias(s, name)) if (sysfs_slab_alias(s, name))
goto err; goto err;
return s; return s;
} }
s = kmalloc(kmem_size, GFP_KERNEL); s = kmalloc(kmem_size, GFP_KERNEL);
if (s) { if (s) {
if (kmem_cache_open(s, GFP_KERNEL, name, if (kmem_cache_open(s, GFP_KERNEL, name,
...@@ -3927,7 +3935,6 @@ SLAB_ATTR(remote_node_defrag_ratio); ...@@ -3927,7 +3935,6 @@ SLAB_ATTR(remote_node_defrag_ratio);
#endif #endif
#ifdef CONFIG_SLUB_STATS #ifdef CONFIG_SLUB_STATS
static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
{ {
unsigned long sum = 0; unsigned long sum = 0;
...@@ -4111,8 +4118,8 @@ static struct kset *slab_kset; ...@@ -4111,8 +4118,8 @@ static struct kset *slab_kset;
#define ID_STR_LENGTH 64 #define ID_STR_LENGTH 64
/* Create a unique string id for a slab cache: /* Create a unique string id for a slab cache:
* format *
* :[flags-]size:[memory address of kmemcache] * Format :[flags-]size
*/ */
static char *create_unique_id(struct kmem_cache *s) static char *create_unique_id(struct kmem_cache *s)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment