Commit e0f06641 authored by Akinobu Mita's avatar Akinobu Mita Committed by Chris Wright

[PATCH] slob: fix page order calculation on not 4KB page

SLOB doesn't calculate correct page order when page size is not 4KB.  This
patch fixes it with using get_order() instead of find_order() which is SLOB
version of get_order().
Signed-off-by: default avatarAkinobu Mita <akinobu.mita@gmail.com>
Acked-by: default avatarMatt Mackall <mpm@selenic.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarChris Wright <chrisw@sous-sol.org>
parent ab6823b4
...@@ -150,15 +150,6 @@ static void slob_free(void *block, int size) ...@@ -150,15 +150,6 @@ static void slob_free(void *block, int size)
spin_unlock_irqrestore(&slob_lock, flags); spin_unlock_irqrestore(&slob_lock, flags);
} }
static int FASTCALL(find_order(int size));
static int fastcall find_order(int size)
{
int order = 0;
for ( ; size > 4096 ; size >>=1)
order++;
return order;
}
void *__kmalloc(size_t size, gfp_t gfp) void *__kmalloc(size_t size, gfp_t gfp)
{ {
slob_t *m; slob_t *m;
...@@ -174,7 +165,7 @@ void *__kmalloc(size_t size, gfp_t gfp) ...@@ -174,7 +165,7 @@ void *__kmalloc(size_t size, gfp_t gfp)
if (!bb) if (!bb)
return 0; return 0;
bb->order = find_order(size); bb->order = get_order(size);
bb->pages = (void *)__get_free_pages(gfp, bb->order); bb->pages = (void *)__get_free_pages(gfp, bb->order);
if (bb->pages) { if (bb->pages) {
...@@ -284,7 +275,7 @@ void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags) ...@@ -284,7 +275,7 @@ void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags)
if (c->size < PAGE_SIZE) if (c->size < PAGE_SIZE)
b = slob_alloc(c->size, flags, c->align); b = slob_alloc(c->size, flags, c->align);
else else
b = (void *)__get_free_pages(flags, find_order(c->size)); b = (void *)__get_free_pages(flags, get_order(c->size));
if (c->ctor) if (c->ctor)
c->ctor(b, c, SLAB_CTOR_CONSTRUCTOR); c->ctor(b, c, SLAB_CTOR_CONSTRUCTOR);
...@@ -311,7 +302,7 @@ void kmem_cache_free(struct kmem_cache *c, void *b) ...@@ -311,7 +302,7 @@ void kmem_cache_free(struct kmem_cache *c, void *b)
if (c->size < PAGE_SIZE) if (c->size < PAGE_SIZE)
slob_free(b, c->size); slob_free(b, c->size);
else else
free_pages((unsigned long)b, find_order(c->size)); free_pages((unsigned long)b, get_order(c->size));
} }
EXPORT_SYMBOL(kmem_cache_free); EXPORT_SYMBOL(kmem_cache_free);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment