Commit 95203fe7 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] optimize fixed-sized kmalloc calls

From: Manfred Spraul and Brian Gerst

The patch performs the kmalloc cache lookup for constant kmalloc calls at
compile time.  The idea is that the loop in kmalloc takes a significant
amount of time, and for kmalloc(4096,GFP_KERNEL), that lookup can happen
entirely at compile time.

A problem has been seen with gcc-3.2.2-5 from RedHat.  This code:

    if(__builtin_constant_t(size)) {
          if(size < 32) return kmem_cache_alloc(...);
          if(size < 64) return kmem_cache_alloc(...);
          if(size < 96) return kmem_cache_alloc(...);
          if(size < 128) return kmem_cache_alloc(...);
          ...
    }

doesn't work, because gcc only optimizes the first two or three comparisons,
and then suddenly generates code.

But we did it that way anyway.  Apparently it's fixed in later compilers.
parent 9c1d0794
...@@ -62,7 +62,38 @@ extern void *kmem_cache_alloc(kmem_cache_t *, int); ...@@ -62,7 +62,38 @@ extern void *kmem_cache_alloc(kmem_cache_t *, int);
extern void kmem_cache_free(kmem_cache_t *, void *); extern void kmem_cache_free(kmem_cache_t *, void *);
extern unsigned int kmem_cache_size(kmem_cache_t *); extern unsigned int kmem_cache_size(kmem_cache_t *);
extern void *kmalloc(size_t, int); /* Size description struct for general caches. */
struct cache_sizes {
size_t cs_size;
kmem_cache_t *cs_cachep;
kmem_cache_t *cs_dmacachep;
};
extern struct cache_sizes malloc_sizes[];
extern void *__kmalloc(size_t, int);
static inline void *kmalloc(size_t size, int flags)
{
if (__builtin_constant_p(size)) {
int i = 0;
#define CACHE(x) \
if (size <= x) \
goto found; \
else \
i++;
#include "kmalloc_sizes.h"
#undef CACHE
{
extern void __you_cannot_kmalloc_that_much(void);
__you_cannot_kmalloc_that_much();
}
found:
return kmem_cache_alloc((flags & GFP_DMA) ?
malloc_sizes[i].cs_dmacachep :
malloc_sizes[i].cs_cachep, flags);
}
return __kmalloc(size, flags);
}
extern void kfree(const void *); extern void kfree(const void *);
extern unsigned int ksize(const void *); extern unsigned int ksize(const void *);
......
...@@ -95,7 +95,8 @@ EXPORT_SYMBOL(kmem_cache_free); ...@@ -95,7 +95,8 @@ EXPORT_SYMBOL(kmem_cache_free);
EXPORT_SYMBOL(kmem_cache_size); EXPORT_SYMBOL(kmem_cache_size);
EXPORT_SYMBOL(set_shrinker); EXPORT_SYMBOL(set_shrinker);
EXPORT_SYMBOL(remove_shrinker); EXPORT_SYMBOL(remove_shrinker);
EXPORT_SYMBOL(kmalloc); EXPORT_SYMBOL(malloc_sizes);
EXPORT_SYMBOL(__kmalloc);
EXPORT_SYMBOL(kfree); EXPORT_SYMBOL(kfree);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
EXPORT_SYMBOL(__alloc_percpu); EXPORT_SYMBOL(__alloc_percpu);
......
...@@ -385,11 +385,7 @@ static int slab_break_gfp_order = BREAK_GFP_ORDER_LO; ...@@ -385,11 +385,7 @@ static int slab_break_gfp_order = BREAK_GFP_ORDER_LO;
#define GET_PAGE_SLAB(pg) ((struct slab *)(pg)->list.prev) #define GET_PAGE_SLAB(pg) ((struct slab *)(pg)->list.prev)
/* These are the default caches for kmalloc. Custom caches can have other sizes. */ /* These are the default caches for kmalloc. Custom caches can have other sizes. */
static struct cache_sizes { struct cache_sizes malloc_sizes[] = {
size_t cs_size;
kmem_cache_t *cs_cachep;
kmem_cache_t *cs_dmacachep;
} malloc_sizes[] = {
#define CACHE(x) { .cs_size = (x) }, #define CACHE(x) { .cs_size = (x) },
#include <linux/kmalloc_sizes.h> #include <linux/kmalloc_sizes.h>
{ 0, } { 0, }
...@@ -1967,7 +1963,7 @@ void * kmem_cache_alloc (kmem_cache_t *cachep, int flags) ...@@ -1967,7 +1963,7 @@ void * kmem_cache_alloc (kmem_cache_t *cachep, int flags)
* platforms. For example, on i386, it means that the memory must come * platforms. For example, on i386, it means that the memory must come
* from the first 16MB. * from the first 16MB.
*/ */
void * kmalloc (size_t size, int flags) void * __kmalloc (size_t size, int flags)
{ {
struct cache_sizes *csizep = malloc_sizes; struct cache_sizes *csizep = malloc_sizes;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment