Commit 1f99a283 authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds

SLUB: clean up krealloc

We really do not need all this gaga there.

ksize gives us all the information we need to figure out if the object can
cope with the new size.
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent abcd08a6
...@@ -2199,9 +2199,8 @@ EXPORT_SYMBOL(kmem_cache_shrink); ...@@ -2199,9 +2199,8 @@ EXPORT_SYMBOL(kmem_cache_shrink);
*/ */
void *krealloc(const void *p, size_t new_size, gfp_t flags) void *krealloc(const void *p, size_t new_size, gfp_t flags)
{ {
struct kmem_cache *new_cache;
void *ret; void *ret;
struct page *page; size_t ks;
if (unlikely(!p)) if (unlikely(!p))
return kmalloc(new_size, flags); return kmalloc(new_size, flags);
...@@ -2211,19 +2210,13 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags) ...@@ -2211,19 +2210,13 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags)
return NULL; return NULL;
} }
page = virt_to_head_page(p); ks = ksize(p);
if (ks >= new_size)
new_cache = get_slab(new_size, flags);
/*
* If new size fits in the current cache, bail out.
*/
if (likely(page->slab == new_cache))
return (void *)p; return (void *)p;
ret = kmalloc(new_size, flags); ret = kmalloc(new_size, flags);
if (ret) { if (ret) {
memcpy(ret, p, min(new_size, ksize(p))); memcpy(ret, p, min(new_size, ks));
kfree(p); kfree(p);
} }
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment