Commit 843adf23 authored by Ingo Molnar's avatar Ingo Molnar

Merge branches 'tracing/function-graph-tracer', 'tracing/kmemtrace' and...

Merge branches 'tracing/function-graph-tracer', 'tracing/kmemtrace' and 'tracing/markers' into tracing/core
...@@ -121,11 +121,24 @@ struct kmem_cache { ...@@ -121,11 +121,24 @@ struct kmem_cache {
#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
/*
* Maximum kmalloc object size handled by SLUB. Larger object allocations
* are passed through to the page allocator. The page allocator "fastpath"
* is relatively slow so we need this value sufficiently high so that
* performance critical objects are allocated through the SLUB fastpath.
*
* This should be dropped to PAGE_SIZE / 2 once the page allocator
* "fastpath" becomes competitive with the slab allocator fastpaths.
*/
#define SLUB_MAX_SIZE (PAGE_SIZE)
#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 1)
/* /*
* We keep the general caches in an array of slab caches that are used for * We keep the general caches in an array of slab caches that are used for
* 2^x bytes of allocations. * 2^x bytes of allocations.
*/ */
extern struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1]; extern struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT];
/* /*
* Sorry that the following has to be that ugly but some versions of GCC * Sorry that the following has to be that ugly but some versions of GCC
...@@ -231,7 +244,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) ...@@ -231,7 +244,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
void *ret; void *ret;
if (__builtin_constant_p(size)) { if (__builtin_constant_p(size)) {
if (size > PAGE_SIZE) if (size > SLUB_MAX_SIZE)
return kmalloc_large(size, flags); return kmalloc_large(size, flags);
if (!(flags & SLUB_DMA)) { if (!(flags & SLUB_DMA)) {
...@@ -275,7 +288,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) ...@@ -275,7 +288,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
void *ret; void *ret;
if (__builtin_constant_p(size) && if (__builtin_constant_p(size) &&
size <= PAGE_SIZE && !(flags & SLUB_DMA)) { size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) {
struct kmem_cache *s = kmalloc_slab(size); struct kmem_cache *s = kmalloc_slab(size);
if (!s) if (!s)
......
...@@ -945,7 +945,7 @@ config TRACEPOINTS ...@@ -945,7 +945,7 @@ config TRACEPOINTS
config MARKERS config MARKERS
bool "Activate markers" bool "Activate markers"
depends on TRACEPOINTS select TRACEPOINTS
help help
Place an empty function call at each marker site. Can be Place an empty function call at each marker site. Can be
dynamically changed for a probe function. dynamically changed for a probe function.
......
...@@ -2506,7 +2506,7 @@ EXPORT_SYMBOL(kmem_cache_destroy); ...@@ -2506,7 +2506,7 @@ EXPORT_SYMBOL(kmem_cache_destroy);
* Kmalloc subsystem * Kmalloc subsystem
*******************************************************************/ *******************************************************************/
struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned; struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned;
EXPORT_SYMBOL(kmalloc_caches); EXPORT_SYMBOL(kmalloc_caches);
static int __init setup_slub_min_order(char *str) static int __init setup_slub_min_order(char *str)
...@@ -2568,7 +2568,7 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, ...@@ -2568,7 +2568,7 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
} }
#ifdef CONFIG_ZONE_DMA #ifdef CONFIG_ZONE_DMA
static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1]; static struct kmem_cache *kmalloc_caches_dma[SLUB_PAGE_SHIFT];
static void sysfs_add_func(struct work_struct *w) static void sysfs_add_func(struct work_struct *w)
{ {
...@@ -2690,7 +2690,7 @@ void *__kmalloc(size_t size, gfp_t flags) ...@@ -2690,7 +2690,7 @@ void *__kmalloc(size_t size, gfp_t flags)
struct kmem_cache *s; struct kmem_cache *s;
void *ret; void *ret;
if (unlikely(size > PAGE_SIZE)) if (unlikely(size > SLUB_MAX_SIZE))
return kmalloc_large(size, flags); return kmalloc_large(size, flags);
s = get_slab(size, flags); s = get_slab(size, flags);
...@@ -2724,7 +2724,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) ...@@ -2724,7 +2724,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
struct kmem_cache *s; struct kmem_cache *s;
void *ret; void *ret;
if (unlikely(size > PAGE_SIZE)) { if (unlikely(size > SLUB_MAX_SIZE)) {
ret = kmalloc_large_node(size, flags, node); ret = kmalloc_large_node(size, flags, node);
kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
...@@ -3039,7 +3039,7 @@ void __init kmem_cache_init(void) ...@@ -3039,7 +3039,7 @@ void __init kmem_cache_init(void)
caches++; caches++;
} }
for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) { for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
create_kmalloc_cache(&kmalloc_caches[i], create_kmalloc_cache(&kmalloc_caches[i],
"kmalloc", 1 << i, GFP_KERNEL); "kmalloc", 1 << i, GFP_KERNEL);
caches++; caches++;
...@@ -3076,7 +3076,7 @@ void __init kmem_cache_init(void) ...@@ -3076,7 +3076,7 @@ void __init kmem_cache_init(void)
slab_state = UP; slab_state = UP;
/* Provide the correct kmalloc names now that the caches are up */ /* Provide the correct kmalloc names now that the caches are up */
for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++)
kmalloc_caches[i]. name = kmalloc_caches[i]. name =
kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
...@@ -3277,7 +3277,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) ...@@ -3277,7 +3277,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
struct kmem_cache *s; struct kmem_cache *s;
void *ret; void *ret;
if (unlikely(size > PAGE_SIZE)) if (unlikely(size > SLUB_MAX_SIZE))
return kmalloc_large(size, gfpflags); return kmalloc_large(size, gfpflags);
s = get_slab(size, gfpflags); s = get_slab(size, gfpflags);
...@@ -3300,7 +3300,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, ...@@ -3300,7 +3300,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
struct kmem_cache *s; struct kmem_cache *s;
void *ret; void *ret;
if (unlikely(size > PAGE_SIZE)) if (unlikely(size > SLUB_MAX_SIZE))
return kmalloc_large_node(size, gfpflags, node); return kmalloc_large_node(size, gfpflags, node);
s = get_slab(size, gfpflags); s = get_slab(size, gfpflags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment