Commit afcf30e6 authored by Manik Raina's avatar Manik Raina Committed by David Mosberger

[PATCH] More__builtin_expect() cleanup in favour of likely/unlikely.

parent 4c60f4e2
......@@ -15,6 +15,7 @@
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/compiler.h>
#include <asm/processor.h>
......@@ -52,7 +53,7 @@ ia64_get_itc (void)
__asm__ __volatile__("mov %0=ar.itc" : "=r"(result) :: "memory");
#ifdef CONFIG_ITANIUM
while (__builtin_expect ((__s32) result == -1, 0))
while (unlikely((__s32) result == -1))
__asm__ __volatile__("mov %0=ar.itc" : "=r"(result) :: "memory");
#endif
return result;
......
......@@ -17,6 +17,7 @@
#include <linux/mm.h>
#include <linux/threads.h>
#include <linux/compiler.h>
#include <asm/mmu_context.h>
#include <asm/processor.h>
......@@ -37,7 +38,7 @@ pgd_alloc_one_fast (struct mm_struct *mm)
{
unsigned long *ret = pgd_quicklist;
if (__builtin_expect(ret != NULL, 1)) {
if (likely(ret != NULL)) {
pgd_quicklist = (unsigned long *)(*ret);
ret[0] = 0;
--pgtable_cache_size;
......@@ -52,9 +53,9 @@ pgd_alloc (struct mm_struct *mm)
/* the VM system never calls pgd_alloc_one_fast(), so we do it here. */
pgd_t *pgd = pgd_alloc_one_fast(mm);
if (__builtin_expect(pgd == NULL, 0)) {
if (unlikely(pgd == NULL)) {
pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
if (__builtin_expect(pgd != NULL, 1))
if (likely(pgd != NULL))
clear_page(pgd);
}
return pgd;
......@@ -80,7 +81,7 @@ pmd_alloc_one_fast (struct mm_struct *mm, unsigned long addr)
{
unsigned long *ret = (unsigned long *)pmd_quicklist;
if (__builtin_expect(ret != NULL, 1)) {
if (likely(ret != NULL)) {
pmd_quicklist = (unsigned long *)(*ret);
ret[0] = 0;
--pgtable_cache_size;
......@@ -93,7 +94,7 @@ pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
{
pmd_t *pmd = (pmd_t *) __get_free_page(GFP_KERNEL);
if (__builtin_expect(pmd != NULL, 1))
if (likely(pmd != NULL))
clear_page(pmd);
return pmd;
}
......@@ -125,7 +126,7 @@ pte_alloc_one (struct mm_struct *mm, unsigned long addr)
{
struct page *pte = alloc_pages(GFP_KERNEL, 0);
if (__builtin_expect(pte != NULL, 1))
if (likely(pte != NULL))
clear_page(page_address(pte));
return pte;
}
......@@ -135,7 +136,7 @@ pte_alloc_one_kernel (struct mm_struct *mm, unsigned long addr)
{
pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL);
if (__builtin_expect(pte != NULL, 1))
if (likely(pte != NULL))
clear_page(pte);
return pte;
}
......
......@@ -16,6 +16,7 @@
#include <linux/config.h>
#include <linux/percpu.h>
#include <linux/compiler.h>
#include <asm/ptrace.h>
#include <asm/kregs.h>
......@@ -283,7 +284,7 @@ struct thread_struct {
regs->loadrs = 0; \
regs->r8 = current->mm->dumpable; /* set "don't zap registers" flag */ \
regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \
if (!__builtin_expect (current->mm->dumpable, 1)) { \
if (unlikely(!current->mm->dumpable)) { \
/* \
* Zap scratch regs to avoid leaking bits between processes with different \
* uid/privileges. \
......
......@@ -6,6 +6,7 @@
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <asm/hardirq.h>
#include <linux/compiler.h>
#define __local_bh_enable() do { barrier(); really_local_bh_count()--; } while (0)
......@@ -13,7 +14,7 @@
#define local_bh_enable() \
do { \
__local_bh_enable(); \
if (__builtin_expect(local_softirq_pending(), 0) && really_local_bh_count() == 0) \
if (unlikely(local_softirq_pending()) && really_local_bh_count() == 0) \
do_softirq(); \
} while (0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment