{slub, slob}: use unlikely() for kfree(ZERO_OR_NULL_PTR) check
Considering kfree(NULL) would normally occur only in error paths and
kfree(ZERO_SIZE_PTR) is uncommon as well, so let's use unlikely() for the
condition check in SLUB's and SLOB's kfree() to optimize for the common
case. SLAB has this already.
Signed-off-by: Satyam Sharma <satyam@infradead.org>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
diff --git a/mm/slob.c b/mm/slob.c
index ec33fcd..a886e83 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -360,7 +360,7 @@
slobidx_t units;
unsigned long flags;
- if (ZERO_OR_NULL_PTR(block))
+ if (unlikely(ZERO_OR_NULL_PTR(block)))
return;
BUG_ON(!size);
@@ -466,7 +466,7 @@
{
struct slob_page *sp;
- if (ZERO_OR_NULL_PTR(block))
+ if (unlikely(ZERO_OR_NULL_PTR(block)))
return;
sp = (struct slob_page *)virt_to_page(block);
@@ -484,7 +484,7 @@
{
struct slob_page *sp;
- if (ZERO_OR_NULL_PTR(block))
+ if (unlikely(ZERO_OR_NULL_PTR(block)))
return 0;
sp = (struct slob_page *)virt_to_page(block);
diff --git a/mm/slub.c b/mm/slub.c
index edeb942..b7d3664 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2449,7 +2449,7 @@
struct page *page;
struct kmem_cache *s;
- if (ZERO_OR_NULL_PTR(object))
+ if (unlikely(ZERO_OR_NULL_PTR(object)))
return 0;
page = get_object_page(object);
@@ -2483,7 +2483,7 @@
{
struct page *page;
- if (ZERO_OR_NULL_PTR(x))
+ if (unlikely(ZERO_OR_NULL_PTR(x)))
return;
page = virt_to_head_page(x);
@@ -2800,7 +2800,7 @@
get_order(size));
s = get_slab(size, gfpflags);
- if (ZERO_OR_NULL_PTR(s))
+ if (unlikely(ZERO_OR_NULL_PTR(s)))
return s;
return slab_alloc(s, gfpflags, -1, caller);
@@ -2816,7 +2816,7 @@
get_order(size));
s = get_slab(size, gfpflags);
- if (ZERO_OR_NULL_PTR(s))
+ if (unlikely(ZERO_OR_NULL_PTR(s)))
return s;
return slab_alloc(s, gfpflags, node, caller);