Commit d32d6f8a authored by Manfred Spraul's avatar Manfred Spraul Committed by Linus Torvalds

[PATCH] slab: Add more arch overrides to control object alignment

Add ARCH_SLAB_MINALIGN and document ARCH_KMALLOC_MINALIGN: The flags allow
the arch code to override the default minimum object aligment
(BYTES_PER_WORD).
Signed-Off-By: default avatarManfred Spraul <manfred@colorfullife.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent a161d268
...@@ -128,9 +128,28 @@ ...@@ -128,9 +128,28 @@
#endif #endif
#ifndef ARCH_KMALLOC_MINALIGN #ifndef ARCH_KMALLOC_MINALIGN
/*
* Enforce a minimum alignment for the kmalloc caches.
* Usually, the kmalloc caches are cache_line_size() aligned, except when
* DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
* Some archs want to perform DMA into kmalloc caches and need a guaranteed
* alignment larger than BYTES_PER_WORD. ARCH_KMALLOC_MINALIGN allows that.
* Note that this flag disables some debug features.
*/
#define ARCH_KMALLOC_MINALIGN 0 #define ARCH_KMALLOC_MINALIGN 0
#endif #endif
#ifndef ARCH_SLAB_MINALIGN
/*
* Enforce a minimum alignment for all caches.
* Intended for archs that get misalignment faults even for BYTES_PER_WORD
* aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
* If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
* some debug features.
*/
#define ARCH_SLAB_MINALIGN 0
#endif
#ifndef ARCH_KMALLOC_FLAGS #ifndef ARCH_KMALLOC_FLAGS
#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN #define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
#endif #endif
...@@ -1172,7 +1191,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -1172,7 +1191,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
unsigned long flags, void (*ctor)(void*, kmem_cache_t *, unsigned long), unsigned long flags, void (*ctor)(void*, kmem_cache_t *, unsigned long),
void (*dtor)(void*, kmem_cache_t *, unsigned long)) void (*dtor)(void*, kmem_cache_t *, unsigned long))
{ {
size_t left_over, slab_size; size_t left_over, slab_size, ralign;
kmem_cache_t *cachep = NULL; kmem_cache_t *cachep = NULL;
/* /*
...@@ -1222,24 +1241,44 @@ kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -1222,24 +1241,44 @@ kmem_cache_create (const char *name, size_t size, size_t align,
if (flags & ~CREATE_MASK) if (flags & ~CREATE_MASK)
BUG(); BUG();
if (align) { /* Check that size is in terms of words. This is needed to avoid
/* combinations of forced alignment and advanced debugging is * unaligned accesses for some archs when redzoning is used, and makes
* not yet implemented. * sure any on-slab bufctl's are also correctly aligned.
*/
if (size & (BYTES_PER_WORD-1)) {
size += (BYTES_PER_WORD-1);
size &= ~(BYTES_PER_WORD-1);
}
/* calculate out the final buffer alignment: */
/* 1) arch recommendation: can be overridden for debug */
if (flags & SLAB_HWCACHE_ALIGN) {
/* Default alignment: as specified by the arch code.
* Except if an object is really small, then squeeze multiple
* objects into one cacheline.
*/ */
flags &= ~(SLAB_RED_ZONE|SLAB_STORE_USER); ralign = cache_line_size();
while (size <= ralign/2)
ralign /= 2;
} else { } else {
if (flags & SLAB_HWCACHE_ALIGN) { ralign = BYTES_PER_WORD;
/* Default alignment: as specified by the arch code. }
* Except if an object is really small, then squeeze multiple /* 2) arch mandated alignment: disables debug if necessary */
* into one cacheline. if (ralign < ARCH_SLAB_MINALIGN) {
*/ ralign = ARCH_SLAB_MINALIGN;
align = cache_line_size(); if (ralign > BYTES_PER_WORD)
while (size <= align/2) flags &= ~(SLAB_RED_ZONE|SLAB_STORE_USER);
align /= 2; }
} else { /* 3) caller mandated alignment: disables debug if necessary */
align = BYTES_PER_WORD; if (ralign < align) {
} ralign = align;
} if (ralign > BYTES_PER_WORD)
flags &= ~(SLAB_RED_ZONE|SLAB_STORE_USER);
}
/* 4) Store it. Note that the debug code below can reduce
* the alignment to BYTES_PER_WORD.
*/
align = ralign;
/* Get cache's description obj. */ /* Get cache's description obj. */
cachep = (kmem_cache_t *) kmem_cache_alloc(&cache_cache, SLAB_KERNEL); cachep = (kmem_cache_t *) kmem_cache_alloc(&cache_cache, SLAB_KERNEL);
...@@ -1247,15 +1286,6 @@ kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -1247,15 +1286,6 @@ kmem_cache_create (const char *name, size_t size, size_t align,
goto opps; goto opps;
memset(cachep, 0, sizeof(kmem_cache_t)); memset(cachep, 0, sizeof(kmem_cache_t));
/* Check that size is in terms of words. This is needed to avoid
* unaligned accesses for some archs when redzoning is used, and makes
* sure any on-slab bufctl's are also correctly aligned.
*/
if (size & (BYTES_PER_WORD-1)) {
size += (BYTES_PER_WORD-1);
size &= ~(BYTES_PER_WORD-1);
}
#if DEBUG #if DEBUG
cachep->reallen = size; cachep->reallen = size;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment