Commit ea32d985 authored by Manik Raina's avatar Manik Raina Committed by Linus Torvalds

[PATCH] More __builtin_expect() cleanup in favour

Changed files in the include/asm-ia64 directory to get rid of
__builtin_expect() in favour of likely/unlikely.
parent e59e6180
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/config.h> #include <linux/config.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/compiler.h>
#include <asm/processor.h> #include <asm/processor.h>
...@@ -52,7 +53,7 @@ ia64_get_itc (void) ...@@ -52,7 +53,7 @@ ia64_get_itc (void)
__asm__ __volatile__("mov %0=ar.itc" : "=r"(result) :: "memory"); __asm__ __volatile__("mov %0=ar.itc" : "=r"(result) :: "memory");
#ifdef CONFIG_ITANIUM #ifdef CONFIG_ITANIUM
while (__builtin_expect ((__s32) result == -1, 0)) while (unlikely ((__s32) result == -1)
__asm__ __volatile__("mov %0=ar.itc" : "=r"(result) :: "memory"); __asm__ __volatile__("mov %0=ar.itc" : "=r"(result) :: "memory");
#endif #endif
return result; return result;
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/threads.h> #include <linux/threads.h>
#include <linux/compiler.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/processor.h> #include <asm/processor.h>
...@@ -37,7 +38,7 @@ pgd_alloc_one_fast (struct mm_struct *mm) ...@@ -37,7 +38,7 @@ pgd_alloc_one_fast (struct mm_struct *mm)
{ {
unsigned long *ret = pgd_quicklist; unsigned long *ret = pgd_quicklist;
if (__builtin_expect(ret != NULL, 1)) { if (likely(ret != NULL)) {
pgd_quicklist = (unsigned long *)(*ret); pgd_quicklist = (unsigned long *)(*ret);
ret[0] = 0; ret[0] = 0;
--pgtable_cache_size; --pgtable_cache_size;
...@@ -52,9 +53,9 @@ pgd_alloc (struct mm_struct *mm) ...@@ -52,9 +53,9 @@ pgd_alloc (struct mm_struct *mm)
/* the VM system never calls pgd_alloc_one_fast(), so we do it here. */ /* the VM system never calls pgd_alloc_one_fast(), so we do it here. */
pgd_t *pgd = pgd_alloc_one_fast(mm); pgd_t *pgd = pgd_alloc_one_fast(mm);
if (__builtin_expect(pgd == NULL, 0)) { if (unlikely(pgd == NULL)) {
pgd = (pgd_t *)__get_free_page(GFP_KERNEL); pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
if (__builtin_expect(pgd != NULL, 1)) if (likely(pgd != NULL))
clear_page(pgd); clear_page(pgd);
} }
return pgd; return pgd;
...@@ -80,7 +81,7 @@ pmd_alloc_one_fast (struct mm_struct *mm, unsigned long addr) ...@@ -80,7 +81,7 @@ pmd_alloc_one_fast (struct mm_struct *mm, unsigned long addr)
{ {
unsigned long *ret = (unsigned long *)pmd_quicklist; unsigned long *ret = (unsigned long *)pmd_quicklist;
if (__builtin_expect(ret != NULL, 1)) { if (likely(ret != NULL)) {
pmd_quicklist = (unsigned long *)(*ret); pmd_quicklist = (unsigned long *)(*ret);
ret[0] = 0; ret[0] = 0;
--pgtable_cache_size; --pgtable_cache_size;
...@@ -93,7 +94,7 @@ pmd_alloc_one (struct mm_struct *mm, unsigned long addr) ...@@ -93,7 +94,7 @@ pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
{ {
pmd_t *pmd = (pmd_t *) __get_free_page(GFP_KERNEL); pmd_t *pmd = (pmd_t *) __get_free_page(GFP_KERNEL);
if (__builtin_expect(pmd != NULL, 1)) if (likely(pmd != NULL))
clear_page(pmd); clear_page(pmd);
return pmd; return pmd;
} }
...@@ -125,7 +126,7 @@ pte_alloc_one (struct mm_struct *mm, unsigned long addr) ...@@ -125,7 +126,7 @@ pte_alloc_one (struct mm_struct *mm, unsigned long addr)
{ {
struct page *pte = alloc_pages(GFP_KERNEL, 0); struct page *pte = alloc_pages(GFP_KERNEL, 0);
if (__builtin_expect(pte != NULL, 1)) if (likely(pte != NULL))
clear_page(page_address(pte)); clear_page(page_address(pte));
return pte; return pte;
} }
...@@ -135,7 +136,7 @@ pte_alloc_one_kernel (struct mm_struct *mm, unsigned long addr) ...@@ -135,7 +136,7 @@ pte_alloc_one_kernel (struct mm_struct *mm, unsigned long addr)
{ {
pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL); pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL);
if (__builtin_expect(pte != NULL, 1)) if (likely(pte != NULL))
clear_page(pte); clear_page(pte);
return pte; return pte;
} }
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/config.h> #include <linux/config.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/compiler.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/kregs.h> #include <asm/kregs.h>
...@@ -283,7 +284,7 @@ struct thread_struct { ...@@ -283,7 +284,7 @@ struct thread_struct {
regs->loadrs = 0; \ regs->loadrs = 0; \
regs->r8 = current->mm->dumpable; /* set "don't zap registers" flag */ \ regs->r8 = current->mm->dumpable; /* set "don't zap registers" flag */ \
regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \ regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \
if (!__builtin_expect (current->mm->dumpable, 1)) { \ if (!likely (current->mm->dumpable)) { \
/* \ /* \
* Zap scratch regs to avoid leaking bits between processes with different \ * Zap scratch regs to avoid leaking bits between processes with different \
* uid/privileges. \ * uid/privileges. \
......
#ifndef _ASM_IA64_SOFTIRQ_H #ifndef _ASM_IA64_SOFTIRQ_H
#define _ASM_IA64_SOFTIRQ_H #define _ASM_IA64_SOFTIRQ_H
#include <linux/compiler.h>
/* /*
* Copyright (C) 1998-2001 Hewlett-Packard Co * Copyright (C) 1998-2001 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
...@@ -13,7 +15,7 @@ ...@@ -13,7 +15,7 @@
#define local_bh_enable() \ #define local_bh_enable() \
do { \ do { \
__local_bh_enable(); \ __local_bh_enable(); \
if (__builtin_expect(local_softirq_pending(), 0) && really_local_bh_count() == 0) \ if (unlikely(local_softirq_pending()) && really_local_bh_count() == 0) \
do_softirq(); \ do_softirq(); \
} while (0) } while (0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment