Commit 2408c550 authored by Satyam Sharma's avatar Satyam Sharma Committed by Linus Torvalds

{slub, slob}: use unlikely() for kfree(ZERO_OR_NULL_PTR) check

Considering kfree(NULL) would normally occur only in error paths and
kfree(ZERO_SIZE_PTR) is uncommon as well, so let's use unlikely() for the
condition check in SLUB's and SLOB's kfree() to optimize for the common
case.  SLAB has this already.
Signed-off-by: default avatarSatyam Sharma <satyam@infradead.org>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Christoph Lameter <clameter@sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent c92ff1bd
...@@ -360,7 +360,7 @@ static void slob_free(void *block, int size) ...@@ -360,7 +360,7 @@ static void slob_free(void *block, int size)
slobidx_t units; slobidx_t units;
unsigned long flags; unsigned long flags;
if (ZERO_OR_NULL_PTR(block)) if (unlikely(ZERO_OR_NULL_PTR(block)))
return; return;
BUG_ON(!size); BUG_ON(!size);
...@@ -466,7 +466,7 @@ void kfree(const void *block) ...@@ -466,7 +466,7 @@ void kfree(const void *block)
{ {
struct slob_page *sp; struct slob_page *sp;
if (ZERO_OR_NULL_PTR(block)) if (unlikely(ZERO_OR_NULL_PTR(block)))
return; return;
sp = (struct slob_page *)virt_to_page(block); sp = (struct slob_page *)virt_to_page(block);
...@@ -484,7 +484,7 @@ size_t ksize(const void *block) ...@@ -484,7 +484,7 @@ size_t ksize(const void *block)
{ {
struct slob_page *sp; struct slob_page *sp;
if (ZERO_OR_NULL_PTR(block)) if (unlikely(ZERO_OR_NULL_PTR(block)))
return 0; return 0;
sp = (struct slob_page *)virt_to_page(block); sp = (struct slob_page *)virt_to_page(block);
......
...@@ -2449,7 +2449,7 @@ size_t ksize(const void *object) ...@@ -2449,7 +2449,7 @@ size_t ksize(const void *object)
struct page *page; struct page *page;
struct kmem_cache *s; struct kmem_cache *s;
if (ZERO_OR_NULL_PTR(object)) if (unlikely(ZERO_OR_NULL_PTR(object)))
return 0; return 0;
page = get_object_page(object); page = get_object_page(object);
...@@ -2483,7 +2483,7 @@ void kfree(const void *x) ...@@ -2483,7 +2483,7 @@ void kfree(const void *x)
{ {
struct page *page; struct page *page;
if (ZERO_OR_NULL_PTR(x)) if (unlikely(ZERO_OR_NULL_PTR(x)))
return; return;
page = virt_to_head_page(x); page = virt_to_head_page(x);
...@@ -2800,7 +2800,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) ...@@ -2800,7 +2800,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
get_order(size)); get_order(size));
s = get_slab(size, gfpflags); s = get_slab(size, gfpflags);
if (ZERO_OR_NULL_PTR(s)) if (unlikely(ZERO_OR_NULL_PTR(s)))
return s; return s;
return slab_alloc(s, gfpflags, -1, caller); return slab_alloc(s, gfpflags, -1, caller);
...@@ -2816,7 +2816,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, ...@@ -2816,7 +2816,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
get_order(size)); get_order(size));
s = get_slab(size, gfpflags); s = get_slab(size, gfpflags);
if (ZERO_OR_NULL_PTR(s)) if (unlikely(ZERO_OR_NULL_PTR(s)))
return s; return s;
return slab_alloc(s, gfpflags, node, caller); return slab_alloc(s, gfpflags, node, caller);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment