Commit 1d2c8eea authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Linus Torvalds

[PATCH] slab: clean up leak tracking ifdefs a little bit

- rename ____kmalloc to kmalloc_track_caller so that people have a chance
  to guess what it does just from it's name.  Add a comment describing it
  for those who don't.  Also move it after kmalloc in slab.h so people get
  less confused when they are just looking for kmalloc - move things around
  in slab.c a little to reduce the ifdef mess.

[penberg@cs.helsinki.fi: Fix up reversed #ifdef]
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarPekka Enberg <penberg@cs.helsinki.fi>
Cc: Christoph Lameter <clameter@engr.sgi.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 88ca3b94
......@@ -77,13 +77,6 @@ struct cache_sizes {
extern struct cache_sizes malloc_sizes[];
extern void *__kmalloc(size_t, gfp_t);
#ifndef CONFIG_DEBUG_SLAB
#define ____kmalloc(size, flags) __kmalloc(size, flags)
#else
extern void *__kmalloc_track_caller(size_t, gfp_t, void*);
#define ____kmalloc(size, flags) \
__kmalloc_track_caller(size, flags, __builtin_return_address(0))
#endif
/**
* kmalloc - allocate memory
......@@ -153,6 +146,23 @@ static inline void *kmalloc(size_t size, gfp_t flags)
return __kmalloc(size, flags);
}
/*
* kmalloc_track_caller is a special version of kmalloc that records the
* calling function of the routine calling it for slab leak tracking instead
* of just the calling function (confusing, eh?).
* It's useful when the call to kmalloc comes from a widely-used standard
* allocator where we care about the real place the memory allocation
* request comes from.
*/
#ifndef CONFIG_DEBUG_SLAB
#define kmalloc_track_caller(size, flags) \
__kmalloc(size, flags)
#else
extern void *__kmalloc_track_caller(size_t, gfp_t, void*);
#define kmalloc_track_caller(size, flags) \
__kmalloc_track_caller(size, flags, __builtin_return_address(0))
#endif
extern void *__kzalloc(size_t, gfp_t);
/**
......@@ -271,7 +281,7 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
#define kmem_cache_alloc_node(c, f, n) kmem_cache_alloc(c, f)
#define kmalloc_node(s, f, n) kmalloc(s, f)
#define kzalloc(s, f) __kzalloc(s, f)
#define ____kmalloc kmalloc
#define kmalloc_track_caller kmalloc
#endif /* CONFIG_SLOB */
......
......@@ -3488,22 +3488,25 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
}
#ifdef CONFIG_DEBUG_SLAB
void *__kmalloc(size_t size, gfp_t flags)
{
#ifndef CONFIG_DEBUG_SLAB
return __do_kmalloc(size, flags, NULL);
#else
return __do_kmalloc(size, flags, __builtin_return_address(0));
#endif
}
EXPORT_SYMBOL(__kmalloc);
#ifdef CONFIG_DEBUG_SLAB
void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller)
{
return __do_kmalloc(size, flags, caller);
}
EXPORT_SYMBOL(__kmalloc_track_caller);
#else
void *__kmalloc(size_t size, gfp_t flags)
{
return __do_kmalloc(size, flags, NULL);
}
EXPORT_SYMBOL(__kmalloc);
#endif
/**
......
......@@ -11,7 +11,7 @@
*/
void *__kzalloc(size_t size, gfp_t flags)
{
void *ret = ____kmalloc(size, flags);
void *ret = kmalloc_track_caller(size, flags);
if (ret)
memset(ret, 0, size);
return ret;
......@@ -33,7 +33,7 @@ char *kstrdup(const char *s, gfp_t gfp)
return NULL;
len = strlen(s) + 1;
buf = ____kmalloc(len, gfp);
buf = kmalloc_track_caller(len, gfp);
if (buf)
memcpy(buf, s, len);
return buf;
......@@ -51,7 +51,7 @@ void *kmemdup(const void *src, size_t len, gfp_t gfp)
{
void *p;
p = ____kmalloc(len, gfp);
p = kmalloc_track_caller(len, gfp);
if (p)
memcpy(p, src, len);
return p;
......
......@@ -156,7 +156,8 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
/* Get the DATA. Size must match skb_add_mtu(). */
size = SKB_DATA_ALIGN(size);
data = ____kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
data = kmalloc_track_caller(size + sizeof(struct skb_shared_info),
gfp_mask);
if (!data)
goto nodata;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment