Commit 7c0cb9c6 authored by Ezequiel Garcia's avatar Ezequiel Garcia Committed by Pekka Enberg

mm, slab: Replace 'caller' type, void* -> unsigned long

This allows to use _RET_IP_ instead of builtin_address(0), thus
achiveing implementation consistency in all three allocators.
Though maybe a nitpick, the real goal behind this patch is
to be able to obtain common code between SLAB and SLUB.
Signed-off-by: default avatarEzequiel Garcia <elezegarcia@gmail.com>
Signed-off-by: default avatarPekka Enberg <penberg@kernel.org>
parent f3f74101
...@@ -3084,7 +3084,7 @@ static inline void verify_redzone_free(struct kmem_cache *cache, void *obj) ...@@ -3084,7 +3084,7 @@ static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
} }
static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
void *caller) unsigned long caller)
{ {
struct page *page; struct page *page;
unsigned int objnr; unsigned int objnr;
...@@ -3104,7 +3104,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, ...@@ -3104,7 +3104,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
*dbg_redzone2(cachep, objp) = RED_INACTIVE; *dbg_redzone2(cachep, objp) = RED_INACTIVE;
} }
if (cachep->flags & SLAB_STORE_USER) if (cachep->flags & SLAB_STORE_USER)
*dbg_userword(cachep, objp) = caller; *dbg_userword(cachep, objp) = (void *)caller;
objnr = obj_to_index(cachep, slabp, objp); objnr = obj_to_index(cachep, slabp, objp);
...@@ -3117,7 +3117,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, ...@@ -3117,7 +3117,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
if (cachep->flags & SLAB_POISON) { if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC #ifdef CONFIG_DEBUG_PAGEALLOC
if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
store_stackinfo(cachep, objp, (unsigned long)caller); store_stackinfo(cachep, objp, caller);
kernel_map_pages(virt_to_page(objp), kernel_map_pages(virt_to_page(objp),
cachep->size / PAGE_SIZE, 0); cachep->size / PAGE_SIZE, 0);
} else { } else {
...@@ -3270,7 +3270,7 @@ static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep, ...@@ -3270,7 +3270,7 @@ static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
#if DEBUG #if DEBUG
static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
gfp_t flags, void *objp, void *caller) gfp_t flags, void *objp, unsigned long caller)
{ {
if (!objp) if (!objp)
return objp; return objp;
...@@ -3287,7 +3287,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, ...@@ -3287,7 +3287,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
poison_obj(cachep, objp, POISON_INUSE); poison_obj(cachep, objp, POISON_INUSE);
} }
if (cachep->flags & SLAB_STORE_USER) if (cachep->flags & SLAB_STORE_USER)
*dbg_userword(cachep, objp) = caller; *dbg_userword(cachep, objp) = (void *)caller;
if (cachep->flags & SLAB_RED_ZONE) { if (cachep->flags & SLAB_RED_ZONE) {
if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
...@@ -3562,7 +3562,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, ...@@ -3562,7 +3562,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
*/ */
static __always_inline void * static __always_inline void *
__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
void *caller) unsigned long caller)
{ {
unsigned long save_flags; unsigned long save_flags;
void *ptr; void *ptr;
...@@ -3648,7 +3648,7 @@ __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags) ...@@ -3648,7 +3648,7 @@ __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
#endif /* CONFIG_NUMA */ #endif /* CONFIG_NUMA */
static __always_inline void * static __always_inline void *
__cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) __cache_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
{ {
unsigned long save_flags; unsigned long save_flags;
void *objp; void *objp;
...@@ -3784,7 +3784,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) ...@@ -3784,7 +3784,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
* be in this state _before_ it is released. Called with disabled ints. * be in this state _before_ it is released. Called with disabled ints.
*/ */
static inline void __cache_free(struct kmem_cache *cachep, void *objp, static inline void __cache_free(struct kmem_cache *cachep, void *objp,
void *caller) unsigned long caller)
{ {
struct array_cache *ac = cpu_cache_get(cachep); struct array_cache *ac = cpu_cache_get(cachep);
...@@ -3824,7 +3824,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp, ...@@ -3824,7 +3824,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
*/ */
void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{ {
void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0)); void *ret = __cache_alloc(cachep, flags, _RET_IP_);
trace_kmem_cache_alloc(_RET_IP_, ret, trace_kmem_cache_alloc(_RET_IP_, ret,
cachep->object_size, cachep->size, flags); cachep->object_size, cachep->size, flags);
...@@ -3839,7 +3839,7 @@ kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags) ...@@ -3839,7 +3839,7 @@ kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags)
{ {
void *ret; void *ret;
ret = __cache_alloc(cachep, flags, __builtin_return_address(0)); ret = __cache_alloc(cachep, flags, _RET_IP_);
trace_kmalloc(_RET_IP_, ret, trace_kmalloc(_RET_IP_, ret,
size, cachep->size, flags); size, cachep->size, flags);
...@@ -3851,8 +3851,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace); ...@@ -3851,8 +3851,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace);
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{ {
void *ret = __cache_alloc_node(cachep, flags, nodeid, void *ret = __cache_alloc_node(cachep, flags, nodeid, _RET_IP_);
__builtin_return_address(0));
trace_kmem_cache_alloc_node(_RET_IP_, ret, trace_kmem_cache_alloc_node(_RET_IP_, ret,
cachep->object_size, cachep->size, cachep->object_size, cachep->size,
...@@ -3870,8 +3869,8 @@ void *kmem_cache_alloc_node_trace(size_t size, ...@@ -3870,8 +3869,8 @@ void *kmem_cache_alloc_node_trace(size_t size,
{ {
void *ret; void *ret;
ret = __cache_alloc_node(cachep, flags, nodeid, ret = __cache_alloc_node(cachep, flags, nodeid, _RET_IP);
__builtin_return_address(0));
trace_kmalloc_node(_RET_IP_, ret, trace_kmalloc_node(_RET_IP_, ret,
size, cachep->size, size, cachep->size,
flags, nodeid); flags, nodeid);
...@@ -3881,7 +3880,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_trace); ...@@ -3881,7 +3880,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
#endif #endif
static __always_inline void * static __always_inline void *
__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
{ {
struct kmem_cache *cachep; struct kmem_cache *cachep;
...@@ -3894,21 +3893,20 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) ...@@ -3894,21 +3893,20 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
void *__kmalloc_node(size_t size, gfp_t flags, int node) void *__kmalloc_node(size_t size, gfp_t flags, int node)
{ {
return __do_kmalloc_node(size, flags, node, return __do_kmalloc_node(size, flags, node, _RET_IP_);
__builtin_return_address(0));
} }
EXPORT_SYMBOL(__kmalloc_node); EXPORT_SYMBOL(__kmalloc_node);
void *__kmalloc_node_track_caller(size_t size, gfp_t flags, void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
int node, unsigned long caller) int node, unsigned long caller)
{ {
return __do_kmalloc_node(size, flags, node, (void *)caller); return __do_kmalloc_node(size, flags, node, caller);
} }
EXPORT_SYMBOL(__kmalloc_node_track_caller); EXPORT_SYMBOL(__kmalloc_node_track_caller);
#else #else
void *__kmalloc_node(size_t size, gfp_t flags, int node) void *__kmalloc_node(size_t size, gfp_t flags, int node)
{ {
return __do_kmalloc_node(size, flags, node, NULL); return __do_kmalloc_node(size, flags, node, 0);
} }
EXPORT_SYMBOL(__kmalloc_node); EXPORT_SYMBOL(__kmalloc_node);
#endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */ #endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */
...@@ -3921,7 +3919,7 @@ EXPORT_SYMBOL(__kmalloc_node); ...@@ -3921,7 +3919,7 @@ EXPORT_SYMBOL(__kmalloc_node);
* @caller: function caller for debug tracking of the caller * @caller: function caller for debug tracking of the caller
*/ */
static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
void *caller) unsigned long caller)
{ {
struct kmem_cache *cachep; struct kmem_cache *cachep;
void *ret; void *ret;
...@@ -3936,7 +3934,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, ...@@ -3936,7 +3934,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
return cachep; return cachep;
ret = __cache_alloc(cachep, flags, caller); ret = __cache_alloc(cachep, flags, caller);
trace_kmalloc((unsigned long) caller, ret, trace_kmalloc(caller, ret,
size, cachep->size, flags); size, cachep->size, flags);
return ret; return ret;
...@@ -3946,20 +3944,20 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, ...@@ -3946,20 +3944,20 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
void *__kmalloc(size_t size, gfp_t flags) void *__kmalloc(size_t size, gfp_t flags)
{ {
return __do_kmalloc(size, flags, __builtin_return_address(0)); return __do_kmalloc(size, flags, _RET_IP_);
} }
EXPORT_SYMBOL(__kmalloc); EXPORT_SYMBOL(__kmalloc);
void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller) void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
{ {
return __do_kmalloc(size, flags, (void *)caller); return __do_kmalloc(size, flags, caller);
} }
EXPORT_SYMBOL(__kmalloc_track_caller); EXPORT_SYMBOL(__kmalloc_track_caller);
#else #else
void *__kmalloc(size_t size, gfp_t flags) void *__kmalloc(size_t size, gfp_t flags)
{ {
return __do_kmalloc(size, flags, NULL); return __do_kmalloc(size, flags, 0);
} }
EXPORT_SYMBOL(__kmalloc); EXPORT_SYMBOL(__kmalloc);
#endif #endif
...@@ -3980,7 +3978,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp) ...@@ -3980,7 +3978,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
debug_check_no_locks_freed(objp, cachep->object_size); debug_check_no_locks_freed(objp, cachep->object_size);
if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
debug_check_no_obj_freed(objp, cachep->object_size); debug_check_no_obj_freed(objp, cachep->object_size);
__cache_free(cachep, objp, __builtin_return_address(0)); __cache_free(cachep, objp, _RET_IP_);
local_irq_restore(flags); local_irq_restore(flags);
trace_kmem_cache_free(_RET_IP_, objp); trace_kmem_cache_free(_RET_IP_, objp);
...@@ -4011,7 +4009,7 @@ void kfree(const void *objp) ...@@ -4011,7 +4009,7 @@ void kfree(const void *objp)
debug_check_no_locks_freed(objp, c->object_size); debug_check_no_locks_freed(objp, c->object_size);
debug_check_no_obj_freed(objp, c->object_size); debug_check_no_obj_freed(objp, c->object_size);
__cache_free(c, (void *)objp, __builtin_return_address(0)); __cache_free(c, (void *)objp, _RET_IP_);
local_irq_restore(flags); local_irq_restore(flags);
} }
EXPORT_SYMBOL(kfree); EXPORT_SYMBOL(kfree);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment