Commit d07dbea4 authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds

Slab allocators: support __GFP_ZERO in all allocators

A kernel convention for many allocators is that if __GFP_ZERO is passed to an
allocator then the allocated memory should be zeroed.

This is currently not supported by the slab allocators.  The inconsistency
makes it difficult to implement in derived allocators such as in the uncached
allocator and the pool allocators.

In addition the support zeroed allocations in the slab allocators does not
have a consistent API.  There are no zeroing allocator functions for NUMA node
placement (kmalloc_node, kmem_cache_alloc_node).  The zeroing allocations are
only provided for default allocs (kzalloc, kmem_cache_zalloc_node).
__GFP_ZERO will make zeroing universally available and does not require any
addititional functions.

So add the necessary logic to all slab allocators to support __GFP_ZERO.

The code is added to the hot path.  The gfp flags are on the stack and so the
cacheline is readily available for checking if we want a zeroed object.

Zeroing while allocating is now a frequent operation and we seem to be
gradually approaching a 1-1 parity between zeroing and not zeroing allocs.
The current tree has 3476 uses of kmalloc vs 2731 uses of kzalloc.
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Acked-by: default avatarPekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 6cb8f913
...@@ -2746,7 +2746,7 @@ static int cache_grow(struct kmem_cache *cachep, ...@@ -2746,7 +2746,7 @@ static int cache_grow(struct kmem_cache *cachep,
* Be lazy and only check for valid flags here, keeping it out of the * Be lazy and only check for valid flags here, keeping it out of the
* critical path in kmem_cache_alloc(). * critical path in kmem_cache_alloc().
*/ */
BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK)); BUG_ON(flags & ~(GFP_DMA | __GFP_ZERO | GFP_LEVEL_MASK));
local_flags = (flags & GFP_LEVEL_MASK); local_flags = (flags & GFP_LEVEL_MASK);
/* Take the l3 list lock to change the colour_next on this node */ /* Take the l3 list lock to change the colour_next on this node */
...@@ -3392,6 +3392,9 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, ...@@ -3392,6 +3392,9 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
local_irq_restore(save_flags); local_irq_restore(save_flags);
ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
if (unlikely((flags & __GFP_ZERO) && ptr))
memset(ptr, 0, obj_size(cachep));
return ptr; return ptr;
} }
...@@ -3443,6 +3446,9 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) ...@@ -3443,6 +3446,9 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
prefetchw(objp); prefetchw(objp);
if (unlikely((flags & __GFP_ZERO) && objp))
memset(objp, 0, obj_size(cachep));
return objp; return objp;
} }
......
...@@ -334,6 +334,8 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) ...@@ -334,6 +334,8 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
BUG_ON(!b); BUG_ON(!b);
spin_unlock_irqrestore(&slob_lock, flags); spin_unlock_irqrestore(&slob_lock, flags);
} }
if (unlikely((gfp & __GFP_ZERO) && b))
memset(b, 0, size);
return b; return b;
} }
......
...@@ -1077,7 +1077,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) ...@@ -1077,7 +1077,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
void *last; void *last;
void *p; void *p;
BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK)); BUG_ON(flags & ~(GFP_DMA | __GFP_ZERO | GFP_LEVEL_MASK));
if (flags & __GFP_WAIT) if (flags & __GFP_WAIT)
local_irq_enable(); local_irq_enable();
...@@ -1540,7 +1540,7 @@ static void *__slab_alloc(struct kmem_cache *s, ...@@ -1540,7 +1540,7 @@ static void *__slab_alloc(struct kmem_cache *s,
* Otherwise we can simply pick the next object from the lockless free list. * Otherwise we can simply pick the next object from the lockless free list.
*/ */
static void __always_inline *slab_alloc(struct kmem_cache *s, static void __always_inline *slab_alloc(struct kmem_cache *s,
gfp_t gfpflags, int node, void *addr) gfp_t gfpflags, int node, void *addr, int length)
{ {
struct page *page; struct page *page;
void **object; void **object;
...@@ -1558,19 +1558,25 @@ static void __always_inline *slab_alloc(struct kmem_cache *s, ...@@ -1558,19 +1558,25 @@ static void __always_inline *slab_alloc(struct kmem_cache *s,
page->lockless_freelist = object[page->offset]; page->lockless_freelist = object[page->offset];
} }
local_irq_restore(flags); local_irq_restore(flags);
if (unlikely((gfpflags & __GFP_ZERO) && object))
memset(object, 0, length);
return object; return object;
} }
void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
{ {
return slab_alloc(s, gfpflags, -1, __builtin_return_address(0)); return slab_alloc(s, gfpflags, -1,
__builtin_return_address(0), s->objsize);
} }
EXPORT_SYMBOL(kmem_cache_alloc); EXPORT_SYMBOL(kmem_cache_alloc);
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
{ {
return slab_alloc(s, gfpflags, node, __builtin_return_address(0)); return slab_alloc(s, gfpflags, node,
__builtin_return_address(0), s->objsize);
} }
EXPORT_SYMBOL(kmem_cache_alloc_node); EXPORT_SYMBOL(kmem_cache_alloc_node);
#endif #endif
...@@ -2318,7 +2324,7 @@ void *__kmalloc(size_t size, gfp_t flags) ...@@ -2318,7 +2324,7 @@ void *__kmalloc(size_t size, gfp_t flags)
if (ZERO_OR_NULL_PTR(s)) if (ZERO_OR_NULL_PTR(s))
return s; return s;
return slab_alloc(s, flags, -1, __builtin_return_address(0)); return slab_alloc(s, flags, -1, __builtin_return_address(0), size);
} }
EXPORT_SYMBOL(__kmalloc); EXPORT_SYMBOL(__kmalloc);
...@@ -2330,7 +2336,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) ...@@ -2330,7 +2336,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
if (ZERO_OR_NULL_PTR(s)) if (ZERO_OR_NULL_PTR(s))
return s; return s;
return slab_alloc(s, flags, node, __builtin_return_address(0)); return slab_alloc(s, flags, node, __builtin_return_address(0), size);
} }
EXPORT_SYMBOL(__kmalloc_node); EXPORT_SYMBOL(__kmalloc_node);
#endif #endif
...@@ -2643,7 +2649,7 @@ void *kmem_cache_zalloc(struct kmem_cache *s, gfp_t flags) ...@@ -2643,7 +2649,7 @@ void *kmem_cache_zalloc(struct kmem_cache *s, gfp_t flags)
{ {
void *x; void *x;
x = slab_alloc(s, flags, -1, __builtin_return_address(0)); x = slab_alloc(s, flags, -1, __builtin_return_address(0), 0);
if (x) if (x)
memset(x, 0, s->objsize); memset(x, 0, s->objsize);
return x; return x;
...@@ -2693,7 +2699,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) ...@@ -2693,7 +2699,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
if (ZERO_OR_NULL_PTR(s)) if (ZERO_OR_NULL_PTR(s))
return s; return s;
return slab_alloc(s, gfpflags, -1, caller); return slab_alloc(s, gfpflags, -1, caller, size);
} }
void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
...@@ -2704,7 +2710,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, ...@@ -2704,7 +2710,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
if (ZERO_OR_NULL_PTR(s)) if (ZERO_OR_NULL_PTR(s))
return s; return s;
return slab_alloc(s, gfpflags, node, caller); return slab_alloc(s, gfpflags, node, caller, size);
} }
#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment