Commit 0f24f128 authored by Li Zefan's avatar Li Zefan Committed by Ingo Molnar

tracing, slab: Define kmem_cache_alloc_notrace ifdef CONFIG_TRACING

Define kmem_trace_alloc_{,node}_notrace() if CONFIG_TRACING is
enabled, otherwise perf-kmem will show wrong stats ifndef
CONFIG_KMEM_TRACE, because a kmalloc() memory allocation may
be traced by both trace_kmalloc() and trace_kmem_cache_alloc().
Signed-off-by: default avatarLi Zefan <lizf@cn.fujitsu.com>
Reviewed-by: default avatarPekka Enberg <penberg@cs.helsinki.fi>
Cc: Christoph Lameter <cl@linux-foundation.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: linux-mm@kvack.org <linux-mm@kvack.org>
Cc: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
LKML-Reference: <4B21F89A.7000801@cn.fujitsu.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 12558038
...@@ -110,7 +110,7 @@ extern struct cache_sizes malloc_sizes[]; ...@@ -110,7 +110,7 @@ extern struct cache_sizes malloc_sizes[];
void *kmem_cache_alloc(struct kmem_cache *, gfp_t); void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags); void *__kmalloc(size_t size, gfp_t flags);
#ifdef CONFIG_KMEMTRACE #ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags); extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
extern size_t slab_buffer_size(struct kmem_cache *cachep); extern size_t slab_buffer_size(struct kmem_cache *cachep);
#else #else
...@@ -166,7 +166,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) ...@@ -166,7 +166,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
extern void *__kmalloc_node(size_t size, gfp_t flags, int node); extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
#ifdef CONFIG_KMEMTRACE #ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
gfp_t flags, gfp_t flags,
int nodeid); int nodeid);
......
...@@ -217,7 +217,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size) ...@@ -217,7 +217,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
void *kmem_cache_alloc(struct kmem_cache *, gfp_t); void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags); void *__kmalloc(size_t size, gfp_t flags);
#ifdef CONFIG_KMEMTRACE #ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags); extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
#else #else
static __always_inline void * static __always_inline void *
...@@ -266,7 +266,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) ...@@ -266,7 +266,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
void *__kmalloc_node(size_t size, gfp_t flags, int node); void *__kmalloc_node(size_t size, gfp_t flags, int node);
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
#ifdef CONFIG_KMEMTRACE #ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
gfp_t gfpflags, gfp_t gfpflags,
int node); int node);
......
...@@ -490,7 +490,7 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp) ...@@ -490,7 +490,7 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
#endif #endif
#ifdef CONFIG_KMEMTRACE #ifdef CONFIG_TRACING
size_t slab_buffer_size(struct kmem_cache *cachep) size_t slab_buffer_size(struct kmem_cache *cachep)
{ {
return cachep->buffer_size; return cachep->buffer_size;
...@@ -3558,7 +3558,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) ...@@ -3558,7 +3558,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
} }
EXPORT_SYMBOL(kmem_cache_alloc); EXPORT_SYMBOL(kmem_cache_alloc);
#ifdef CONFIG_KMEMTRACE #ifdef CONFIG_TRACING
void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags) void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
{ {
return __cache_alloc(cachep, flags, __builtin_return_address(0)); return __cache_alloc(cachep, flags, __builtin_return_address(0));
...@@ -3621,7 +3621,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) ...@@ -3621,7 +3621,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
} }
EXPORT_SYMBOL(kmem_cache_alloc_node); EXPORT_SYMBOL(kmem_cache_alloc_node);
#ifdef CONFIG_KMEMTRACE #ifdef CONFIG_TRACING
void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
gfp_t flags, gfp_t flags,
int nodeid) int nodeid)
......
...@@ -1754,7 +1754,7 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) ...@@ -1754,7 +1754,7 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
} }
EXPORT_SYMBOL(kmem_cache_alloc); EXPORT_SYMBOL(kmem_cache_alloc);
#ifdef CONFIG_KMEMTRACE #ifdef CONFIG_TRACING
void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
{ {
return slab_alloc(s, gfpflags, -1, _RET_IP_); return slab_alloc(s, gfpflags, -1, _RET_IP_);
...@@ -1775,7 +1775,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) ...@@ -1775,7 +1775,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
EXPORT_SYMBOL(kmem_cache_alloc_node); EXPORT_SYMBOL(kmem_cache_alloc_node);
#endif #endif
#ifdef CONFIG_KMEMTRACE #ifdef CONFIG_TRACING
void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
gfp_t gfpflags, gfp_t gfpflags,
int node) int node)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment