Commit 272c1d21 authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds

SLUB: return ZERO_SIZE_PTR for kmalloc(0)

Instead of returning the smallest available object return ZERO_SIZE_PTR.

A ZERO_SIZE_PTR can be legitimately used as an object pointer as long as it
is not deferenced.  The dereference of ZERO_SIZE_PTR causes a distinctive
fault.  kfree can handle a ZERO_SIZE_PTR in the same way as NULL.

This enables functions to use zero sized object. e.g. n = number of objects.

	objects = kmalloc(n * sizeof(object));

	for (i = 0; i < n; i++)
		objects[i].x = y;

	kfree(objects);
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Acked-by: default avatarPekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a17627ef
...@@ -70,11 +70,8 @@ extern struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; ...@@ -70,11 +70,8 @@ extern struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
*/ */
static inline int kmalloc_index(size_t size) static inline int kmalloc_index(size_t size)
{ {
/* if (!size)
* We should return 0 if size == 0 but we use the smallest object return 0;
* here for SLAB legacy reasons.
*/
WARN_ON_ONCE(size == 0);
if (size > KMALLOC_MAX_SIZE) if (size > KMALLOC_MAX_SIZE)
return -1; return -1;
...@@ -153,13 +150,25 @@ static inline struct kmem_cache *kmalloc_slab(size_t size) ...@@ -153,13 +150,25 @@ static inline struct kmem_cache *kmalloc_slab(size_t size)
#define SLUB_DMA 0 #define SLUB_DMA 0
#endif #endif
/*
* ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
*
* Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
*
* ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
* Both make kfree a no-op.
*/
#define ZERO_SIZE_PTR ((void *)16)
static inline void *kmalloc(size_t size, gfp_t flags) static inline void *kmalloc(size_t size, gfp_t flags)
{ {
if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) {
struct kmem_cache *s = kmalloc_slab(size); struct kmem_cache *s = kmalloc_slab(size);
if (!s) if (!s)
return NULL; return ZERO_SIZE_PTR;
return kmem_cache_alloc(s, flags); return kmem_cache_alloc(s, flags);
} else } else
...@@ -172,7 +181,7 @@ static inline void *kzalloc(size_t size, gfp_t flags) ...@@ -172,7 +181,7 @@ static inline void *kzalloc(size_t size, gfp_t flags)
struct kmem_cache *s = kmalloc_slab(size); struct kmem_cache *s = kmalloc_slab(size);
if (!s) if (!s)
return NULL; return ZERO_SIZE_PTR;
return kmem_cache_zalloc(s, flags); return kmem_cache_zalloc(s, flags);
} else } else
...@@ -188,7 +197,7 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node) ...@@ -188,7 +197,7 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
struct kmem_cache *s = kmalloc_slab(size); struct kmem_cache *s = kmalloc_slab(size);
if (!s) if (!s)
return NULL; return ZERO_SIZE_PTR;
return kmem_cache_alloc_node(s, flags, node); return kmem_cache_alloc_node(s, flags, node);
} else } else
......
...@@ -2241,7 +2241,7 @@ void *__kmalloc(size_t size, gfp_t flags) ...@@ -2241,7 +2241,7 @@ void *__kmalloc(size_t size, gfp_t flags)
if (s) if (s)
return slab_alloc(s, flags, -1, __builtin_return_address(0)); return slab_alloc(s, flags, -1, __builtin_return_address(0));
return NULL; return ZERO_SIZE_PTR;
} }
EXPORT_SYMBOL(__kmalloc); EXPORT_SYMBOL(__kmalloc);
...@@ -2252,16 +2252,20 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) ...@@ -2252,16 +2252,20 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
if (s) if (s)
return slab_alloc(s, flags, node, __builtin_return_address(0)); return slab_alloc(s, flags, node, __builtin_return_address(0));
return NULL; return ZERO_SIZE_PTR;
} }
EXPORT_SYMBOL(__kmalloc_node); EXPORT_SYMBOL(__kmalloc_node);
#endif #endif
size_t ksize(const void *object) size_t ksize(const void *object)
{ {
struct page *page = get_object_page(object); struct page *page;
struct kmem_cache *s; struct kmem_cache *s;
if (object == ZERO_SIZE_PTR)
return 0;
page = get_object_page(object);
BUG_ON(!page); BUG_ON(!page);
s = page->slab; s = page->slab;
BUG_ON(!s); BUG_ON(!s);
...@@ -2293,7 +2297,13 @@ void kfree(const void *x) ...@@ -2293,7 +2297,13 @@ void kfree(const void *x)
struct kmem_cache *s; struct kmem_cache *s;
struct page *page; struct page *page;
if (!x) /*
* This has to be an unsigned comparison. According to Linus
* some gcc version treat a pointer as a signed entity. Then
* this comparison would be true for all "negative" pointers
* (which would cover the whole upper half of the address space).
*/
if ((unsigned long)x <= (unsigned long)ZERO_SIZE_PTR)
return; return;
page = virt_to_head_page(x); page = virt_to_head_page(x);
...@@ -2398,12 +2408,12 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags) ...@@ -2398,12 +2408,12 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags)
void *ret; void *ret;
size_t ks; size_t ks;
if (unlikely(!p)) if (unlikely(!p || p == ZERO_SIZE_PTR))
return kmalloc(new_size, flags); return kmalloc(new_size, flags);
if (unlikely(!new_size)) { if (unlikely(!new_size)) {
kfree(p); kfree(p);
return NULL; return ZERO_SIZE_PTR;
} }
ks = ksize(p); ks = ksize(p);
...@@ -2652,7 +2662,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) ...@@ -2652,7 +2662,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
struct kmem_cache *s = get_slab(size, gfpflags); struct kmem_cache *s = get_slab(size, gfpflags);
if (!s) if (!s)
return NULL; return ZERO_SIZE_PTR;
return slab_alloc(s, gfpflags, -1, caller); return slab_alloc(s, gfpflags, -1, caller);
} }
...@@ -2663,7 +2673,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, ...@@ -2663,7 +2673,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
struct kmem_cache *s = get_slab(size, gfpflags); struct kmem_cache *s = get_slab(size, gfpflags);
if (!s) if (!s)
return NULL; return ZERO_SIZE_PTR;
return slab_alloc(s, gfpflags, node, caller); return slab_alloc(s, gfpflags, node, caller);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment