Commit 26a40990 authored by Hyeonggon Yoo's avatar Hyeonggon Yoo Committed by Vlastimil Babka

mm/sl[au]b: cleanup kmem_cache_alloc[_node]_trace()

Despite its name, kmem_cache_alloc[_node]_trace() is hook for inlined
kmalloc. So rename it to kmalloc[_node]_trace().

Move its implementation to slab_common.c by using
__kmem_cache_alloc_node(), but keep CONFIG_TRACING=n varients to save a
function call when CONFIG_TRACING=n.

Use __assume_kmalloc_alignment for kmalloc[_node]_trace instead of
__assume_slab_alignement. Generally kmalloc has larger alignment
requirements.
Suggested-by: default avatarVlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarHyeonggon Yoo <42.hyeyoo@gmail.com>
Reviewed-by: default avatarVlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
parent b1405135
...@@ -449,16 +449,16 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assum ...@@ -449,16 +449,16 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assum
__malloc; __malloc;
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t flags, size_t size) void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
__assume_slab_alignment __alloc_size(3); __assume_kmalloc_alignment __alloc_size(3);
extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
int node, size_t size) __assume_slab_alignment
__alloc_size(4);
void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
int node, size_t size) __assume_kmalloc_alignment
__alloc_size(4);
#else /* CONFIG_TRACING */ #else /* CONFIG_TRACING */
static __always_inline __alloc_size(3) void *kmem_cache_alloc_trace(struct kmem_cache *s, /* Save a function call when CONFIG_TRACING=n */
gfp_t flags, size_t size) static __always_inline __alloc_size(3)
void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
{ {
void *ret = kmem_cache_alloc(s, flags); void *ret = kmem_cache_alloc(s, flags);
...@@ -466,8 +466,9 @@ static __always_inline __alloc_size(3) void *kmem_cache_alloc_trace(struct kmem_ ...@@ -466,8 +466,9 @@ static __always_inline __alloc_size(3) void *kmem_cache_alloc_trace(struct kmem_
return ret; return ret;
} }
static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, static __always_inline __alloc_size(4)
int node, size_t size) void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
int node, size_t size)
{ {
void *ret = kmem_cache_alloc_node(s, gfpflags, node); void *ret = kmem_cache_alloc_node(s, gfpflags, node);
...@@ -550,7 +551,7 @@ static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags) ...@@ -550,7 +551,7 @@ static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
if (!index) if (!index)
return ZERO_SIZE_PTR; return ZERO_SIZE_PTR;
return kmem_cache_alloc_trace( return kmalloc_trace(
kmalloc_caches[kmalloc_type(flags)][index], kmalloc_caches[kmalloc_type(flags)][index],
flags, size); flags, size);
#endif #endif
...@@ -572,9 +573,9 @@ static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t fla ...@@ -572,9 +573,9 @@ static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t fla
if (!index) if (!index)
return ZERO_SIZE_PTR; return ZERO_SIZE_PTR;
return kmem_cache_alloc_node_trace( return kmalloc_node_trace(
kmalloc_caches[kmalloc_type(flags)][index], kmalloc_caches[kmalloc_type(flags)][index],
flags, node, size); flags, node, size);
} }
return __kmalloc_node(size, flags, node); return __kmalloc_node(size, flags, node);
} }
......
...@@ -3519,22 +3519,6 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, ...@@ -3519,22 +3519,6 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
} }
EXPORT_SYMBOL(kmem_cache_alloc_bulk); EXPORT_SYMBOL(kmem_cache_alloc_bulk);
#ifdef CONFIG_TRACING
void *
kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
{
void *ret;
ret = slab_alloc(cachep, NULL, flags, size, _RET_IP_);
ret = kasan_kmalloc(cachep, ret, size, flags);
trace_kmalloc(_RET_IP_, ret, cachep,
size, cachep->size, flags);
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_trace);
#endif
/** /**
* kmem_cache_alloc_node - Allocate an object on the specified node * kmem_cache_alloc_node - Allocate an object on the specified node
* @cachep: The cache to allocate from. * @cachep: The cache to allocate from.
...@@ -3568,25 +3552,6 @@ void *__kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, ...@@ -3568,25 +3552,6 @@ void *__kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
orig_size, caller); orig_size, caller);
} }
#ifdef CONFIG_TRACING
void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
gfp_t flags,
int nodeid,
size_t size)
{
void *ret;
ret = slab_alloc_node(cachep, NULL, flags, nodeid, size, _RET_IP_);
ret = kasan_kmalloc(cachep, ret, size, flags);
trace_kmalloc_node(_RET_IP_, ret, cachep,
size, cachep->size,
flags, nodeid);
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
#endif
#ifdef CONFIG_PRINTK #ifdef CONFIG_PRINTK
void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
{ {
......
...@@ -1000,6 +1000,33 @@ size_t __ksize(const void *object) ...@@ -1000,6 +1000,33 @@ size_t __ksize(const void *object)
return slab_ksize(folio_slab(folio)->slab_cache); return slab_ksize(folio_slab(folio)->slab_cache);
} }
EXPORT_SYMBOL(__ksize); EXPORT_SYMBOL(__ksize);
#ifdef CONFIG_TRACING
void *kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
{
void *ret = __kmem_cache_alloc_node(s, gfpflags, NUMA_NO_NODE,
size, _RET_IP_);
trace_kmalloc_node(_RET_IP_, ret, s, size, s->size,
gfpflags, NUMA_NO_NODE);
ret = kasan_kmalloc(s, ret, size, gfpflags);
return ret;
}
EXPORT_SYMBOL(kmalloc_trace);
void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
int node, size_t size)
{
void *ret = __kmem_cache_alloc_node(s, gfpflags, node, size, _RET_IP_);
trace_kmalloc_node(_RET_IP_, ret, s, size, s->size, gfpflags, node);
ret = kasan_kmalloc(s, ret, size, gfpflags);
return ret;
}
EXPORT_SYMBOL(kmalloc_node_trace);
#endif /* !CONFIG_TRACING */
#endif /* !CONFIG_SLOB */ #endif /* !CONFIG_SLOB */
gfp_t kmalloc_fix_flags(gfp_t flags) gfp_t kmalloc_fix_flags(gfp_t flags)
......
...@@ -3270,17 +3270,6 @@ void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, ...@@ -3270,17 +3270,6 @@ void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
caller, orig_size); caller, orig_size);
} }
#ifdef CONFIG_TRACING
void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
{
void *ret = slab_alloc(s, NULL, gfpflags, _RET_IP_, size);
trace_kmalloc(_RET_IP_, ret, s, size, s->size, gfpflags);
ret = kasan_kmalloc(s, ret, size, gfpflags);
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_trace);
#endif
void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
{ {
void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size); void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size);
...@@ -3292,22 +3281,6 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) ...@@ -3292,22 +3281,6 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
} }
EXPORT_SYMBOL(kmem_cache_alloc_node); EXPORT_SYMBOL(kmem_cache_alloc_node);
#ifdef CONFIG_TRACING
void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
gfp_t gfpflags,
int node, size_t size)
{
void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size);
trace_kmalloc_node(_RET_IP_, ret, s,
size, s->size, gfpflags, node);
ret = kasan_kmalloc(s, ret, size, gfpflags);
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
#endif
/* /*
* Slow path handling. This may still be called frequently since objects * Slow path handling. This may still be called frequently since objects
* have a longer lifetime than the cpu slabs in most processing loads. * have a longer lifetime than the cpu slabs in most processing loads.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment