Commit 3de47213 authored by David Rientjes's avatar David Rientjes Committed by Pekka Enberg

slub: use size and objsize orders to disable debug flags

This patch moves the masking of debugging flags which increase a cache's
min order due to metadata when `slub_debug=O' is used from
kmem_cache_flags() to kmem_cache_open().

Instead of defining the maximum metadata size increase in a preprocessor
macro, this approach uses the cache's ->size and ->objsize members to
determine if the min order increased due to debugging options.  If so,
the flags specified in the more appropriately named DEBUG_METADATA_FLAGS
are masked off.

This approach was suggested by Christoph Lameter
<cl@linux-foundation.org>.

Cc: Christoph Lameter <cl@linux-foundation.org>
Signed-off-by: default avatarDavid Rientjes <rientjes@google.com>
Signed-off-by: default avatarPekka Enberg <penberg@cs.helsinki.fi>
parent fa5ec8a1
...@@ -142,11 +142,11 @@ ...@@ -142,11 +142,11 @@
SLAB_POISON | SLAB_STORE_USER) SLAB_POISON | SLAB_STORE_USER)
/* /*
* Debugging flags that require metadata to be stored in the slab, up to * Debugging flags that require metadata to be stored in the slab. These get
* DEBUG_SIZE in size. * disabled when slub_debug=O is used and a cache's min order increases with
* metadata.
*/ */
#define DEBUG_SIZE_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
#define DEBUG_SIZE (3 * sizeof(void *) + 2 * sizeof(struct track))
/* /*
* Set of flags that will prevent slab merging * Set of flags that will prevent slab merging
...@@ -1040,27 +1040,13 @@ static unsigned long kmem_cache_flags(unsigned long objsize, ...@@ -1040,27 +1040,13 @@ static unsigned long kmem_cache_flags(unsigned long objsize,
unsigned long flags, const char *name, unsigned long flags, const char *name,
void (*ctor)(void *)) void (*ctor)(void *))
{ {
int debug_flags = slub_debug;
/* /*
* Enable debugging if selected on the kernel commandline. * Enable debugging if selected on the kernel commandline.
*/ */
if (debug_flags) { if (slub_debug && (!slub_debug_slabs ||
if (slub_debug_slabs && !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs))))
strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs))) flags |= slub_debug;
goto out;
/*
* Disable debugging that increases slab size if the minimum
* slab order would have increased as a result.
*/
if (disable_higher_order_debug &&
get_order(objsize + DEBUG_SIZE) > get_order(objsize))
debug_flags &= ~DEBUG_SIZE_FLAGS;
flags |= debug_flags;
}
out:
return flags; return flags;
} }
#else #else
...@@ -2488,6 +2474,18 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, ...@@ -2488,6 +2474,18 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
if (!calculate_sizes(s, -1)) if (!calculate_sizes(s, -1))
goto error; goto error;
if (disable_higher_order_debug) {
/*
* Disable debugging flags that store metadata if the min slab
* order increased.
*/
if (get_order(s->size) > get_order(s->objsize)) {
s->flags &= ~DEBUG_METADATA_FLAGS;
s->offset = 0;
if (!calculate_sizes(s, -1))
goto error;
}
}
/* /*
* The larger the object size is, the more pages we want on the partial * The larger the object size is, the more pages we want on the partial
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment