Commit f598fb8d authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] if ... BUG() -> BUG_ON()

From: Adrian Bunk <bunk@fs.tum.de>

four months ago, Rolf Eike Beer <eike-kernel@sf-tec.de> sent a patch
against 2.6.0-test5-bk1 that converted several if ...  BUG() to BUG_ON()

This might in some cases result in slightly faster code because BUG_ON()
uses unlikely().
parent fd2ac4b2
......@@ -16,8 +16,7 @@ static inline int cpu_to_node(int cpu)
node = alpha_mv.cpuid_to_nid(cpu);
#ifdef DEBUG_NUMA
if (node < 0)
BUG();
BUG_ON(node < 0);
#endif
return node;
......
......@@ -27,15 +27,13 @@
static inline unsigned long ___mem_pci(unsigned long a)
{
if (a <= 0xc0000000 || a >= 0xe0000000)
BUG();
BUG_ON(a <= 0xc0000000 || a >= 0xe0000000);
return a;
}
static inline unsigned long ___mem_isa(unsigned long a)
{
if (a >= 16*1048576)
BUG();
BUG_ON(a >= 16*1048576);
return PCIMEM_BASE + a;
}
#define __mem_pci(a) ___mem_pci((unsigned long)(a))
......
......@@ -28,8 +28,7 @@ static inline unsigned long ___mem_pci(unsigned long a)
static inline unsigned long ___mem_isa(unsigned long a)
{
if (a >= 16*1048576)
BUG();
BUG_ON(a >= 16*1048576);
return PCIMEM_BASE + a;
}
#define __mem_pci(a) ___mem_pci((unsigned long)(a))
......
......@@ -124,8 +124,7 @@ dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
for (i = 0; i < nhwentries; i++, sg++) {
unsigned long addr;
if (!sg->page)
BUG();
BUG_ON(!sg->page);
addr = (unsigned long) page_address(sg->page);
if (addr)
......@@ -139,8 +138,7 @@ dma_sync_single(struct device *dev, dma_addr_t dma_handle, size_t size,
{
unsigned long addr;
if (direction == DMA_NONE)
BUG();
BUG_ON(direction == DMA_NONE);
addr = baddr_to_bus(hwdev->bus, dma_handle) + PAGE_OFFSET;
dma_cache_wback_inv(addr, size);
......@@ -153,8 +151,7 @@ dma_sync_single_range(struct device *dev, dma_addr_t dma_handle,
{
unsigned long addr;
if (direction == DMA_NONE)
BUG();
BUG_ON(direction == DMA_NONE);
addr = baddr_to_bus(hwdev->bus, dma_handle) + PAGE_OFFSET;
dma_cache_wback_inv(addr, size);
......@@ -168,8 +165,7 @@ dma_sync_sg(struct device *dev, struct scatterlist *sg, int nelems,
int i;
#endif
if (direction == DMA_NONE)
BUG();
BUG_ON(direction == DMA_NONE);
/* Make sure that gcc doesn't leave the empty loop body. */
#ifdef CONFIG_NONCOHERENT_IO
......
......@@ -113,8 +113,7 @@ static inline void pci_dac_dma_sync_single(struct pci_dev *pdev,
{
unsigned long addr;
if (direction == PCI_DMA_NONE)
BUG();
BUG_ON(direction == PCI_DMA_NONE);
addr = baddr_to_bus(pdev->bus, dma_addr) + PAGE_OFFSET;
dma_cache_wback_inv(addr, len);
......
......@@ -19,8 +19,7 @@ extern void free_sid(unsigned long);
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
if (atomic_read(&mm->mm_users) != 1)
BUG();
BUG_ON(atomic_read(&mm->mm_users) != 1);
mm->context = alloc_sid();
return 0;
......@@ -64,7 +63,7 @@ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
* already, so we should be OK.
*/
if (next == &init_mm) BUG(); /* Should never happen */
BUG_ON(next == &init_mm); /* Should never happen */
if (next->context == 0)
next->context = alloc_sid();
......
......@@ -27,7 +27,7 @@ extern void flush_tlb_all(void);
static inline void flush_tlb_mm(struct mm_struct *mm)
{
if (mm == &init_mm) BUG(); /* Should never happen */
BUG_ON(mm == &init_mm); /* Should never happen */
#ifdef CONFIG_SMP
flush_tlb_all();
......
......@@ -63,8 +63,7 @@ static inline void *kmap(struct page *page)
static inline void kunmap(struct page *page)
{
if (in_interrupt())
BUG();
BUG_ON(in_interrupt());
if (page < highmem_start_page)
return;
kunmap_high(page);
......@@ -89,8 +88,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = KMAP_FIX_BEGIN + idx * PAGE_SIZE;
#if HIGHMEM_DEBUG
if (!pte_none(*(kmap_pte+idx)))
BUG();
BUG_ON(!pte_none(*(kmap_pte+idx)));
#endif
set_pte(kmap_pte+idx, mk_pte(page, kmap_prot));
flush_tlb_page(0, vaddr);
......@@ -110,8 +108,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
return;
}
if (vaddr != KMAP_FIX_BEGIN + idx * PAGE_SIZE)
BUG();
BUG_ON(vaddr != KMAP_FIX_BEGIN + idx * PAGE_SIZE);
/*
* force other mappings to Oops if they'll try to access
......
......@@ -105,8 +105,7 @@ static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr,
static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
size_t size, int direction)
{
if (direction == PCI_DMA_NONE)
BUG();
BUG_ON(direction == PCI_DMA_NONE);
/* nothing to do */
}
......@@ -134,8 +133,7 @@ static inline dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
static inline void pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address,
size_t size, int direction)
{
if (direction == PCI_DMA_NONE)
BUG();
BUG_ON(direction == PCI_DMA_NONE);
/* Nothing to do */
}
......@@ -159,8 +157,7 @@ static inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
{
int i;
if (direction == PCI_DMA_NONE)
BUG();
BUG_ON(direction == PCI_DMA_NONE);
/*
* temporary 2.4 hack
......
......@@ -65,8 +65,7 @@ static inline void pci_dma_sync_single(struct pci_dev *hwdev,
dma_addr_t dma_handle,
size_t size, int direction)
{
if (direction == PCI_DMA_NONE)
BUG();
BUG_ON(direction == PCI_DMA_NONE);
/* nothing to do */
}
......@@ -74,8 +73,7 @@ static inline void pci_dma_sync_sg(struct pci_dev *hwdev,
struct scatterlist *sg,
int nelems, int direction)
{
if (direction == PCI_DMA_NONE)
BUG();
BUG_ON(direction == PCI_DMA_NONE);
/* nothing to do */
}
......
......@@ -13,8 +13,7 @@ static inline int cpu_to_node(int cpu)
node = numa_cpu_lookup_table[cpu];
#ifdef DEBUG_NUMA
if (node == -1)
BUG();
BUG_ON(node == -1);
#endif
return node;
......
......@@ -223,8 +223,7 @@ idal_buffer_to_user(struct idal_buffer *ib, void *to, size_t count)
size_t left;
int i;
if (count > ib->size)
BUG();
BUG_ON(count > ib->size);
for (i = 0; count > IDA_BLOCK_SIZE; i++) {
left = copy_to_user(to, ib->data[i], IDA_BLOCK_SIZE);
if (left)
......@@ -244,8 +243,7 @@ idal_buffer_from_user(struct idal_buffer *ib, const void *from, size_t count)
size_t left;
int i;
if (count > ib->size)
BUG();
BUG_ON(count > ib->size);
for (i = 0; count > IDA_BLOCK_SIZE; i++) {
left = copy_from_user(ib->data[i], from, IDA_BLOCK_SIZE);
if (left)
......
......@@ -48,8 +48,7 @@ static inline void _raw_spin_lock(spinlock_t *lock)
static inline void _raw_spin_unlock(spinlock_t *lock)
{
#ifdef CONFIG_DEBUG_SPINLOCK
if (!spin_is_locked(lock))
BUG();
BUG_ON(!spin_is_locked(lock));
#endif
lock->lock = 0;
......
......@@ -320,10 +320,9 @@ static void sun_pci_fd_lde_broken_outb(unsigned char val, unsigned long port)
static void sun_pci_fd_enable_dma(void)
{
if ((NULL == sun_pci_dma_pending.buf) ||
BUG_ON((NULL == sun_pci_dma_pending.buf) ||
(0 == sun_pci_dma_pending.len) ||
(0 == sun_pci_dma_pending.direction))
BUG();
(0 == sun_pci_dma_pending.direction));
sun_pci_dma_current.buf = sun_pci_dma_pending.buf;
sun_pci_dma_current.len = sun_pci_dma_pending.len;
......
......@@ -90,10 +90,9 @@ static __inline__ void flush_tlb_pgtables(struct mm_struct *mm, unsigned long st
{
/* Note the signed type. */
long s = start, e = end, vpte_base;
if (s > e)
/* Nobody should call us with start below VM hole and end above.
See if it is really true. */
BUG();
BUG_ON(s > e);
#if 0
/* Currently free_pgtables guarantees this. */
s &= PMD_MASK;
......
......@@ -24,8 +24,7 @@ extern __inline__ pmd_t *get_pmd(void)
extern __inline__ void pmd_free(pmd_t *pmd)
{
if ((unsigned long)pmd & (PAGE_SIZE-1))
BUG();
BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
free_page((unsigned long)pmd);
}
......@@ -41,8 +40,7 @@ static inline pgd_t *pgd_alloc (struct mm_struct *mm)
static inline void pgd_free (pgd_t *pgd)
{
if ((unsigned long)pgd & (PAGE_SIZE-1))
BUG();
BUG_ON((unsigned long)pgd & (PAGE_SIZE-1));
free_page((unsigned long)pgd);
}
......@@ -64,8 +62,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long add
extern __inline__ void pte_free_kernel(pte_t *pte)
{
if ((unsigned long)pte & (PAGE_SIZE-1))
BUG();
BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
free_page((unsigned long)pte);
}
......
......@@ -70,10 +70,8 @@ typedef struct {
static inline void _raw_spin_unlock(spinlock_t *lock)
{
#ifdef CONFIG_DEBUG_SPINLOCK
if (lock->magic != SPINLOCK_MAGIC)
BUG();
if (!spin_is_locked(lock))
BUG();
BUG_ON(lock->magic != SPINLOCK_MAGIC);
BUG_ON(!spin_is_locked(lock));
#endif
__asm__ __volatile__(
spin_unlock_string
......@@ -91,10 +89,8 @@ static inline void _raw_spin_unlock(spinlock_t *lock)
{
char oldval = 1;
#ifdef CONFIG_DEBUG_SPINLOCK
if (lock->magic != SPINLOCK_MAGIC)
BUG();
if (!spin_is_locked(lock))
BUG();
BUG_ON(lock->magic != SPINLOCK_MAGIC);
BUG_ON(!spin_is_locked(lock));
#endif
__asm__ __volatile__(
spin_unlock_string
......@@ -174,8 +170,7 @@ typedef struct {
static inline void _raw_read_lock(rwlock_t *rw)
{
#ifdef CONFIG_DEBUG_SPINLOCK
if (rw->magic != RWLOCK_MAGIC)
BUG();
BUG_ON(rw->magic != RWLOCK_MAGIC);
#endif
__build_read_lock(rw, "__read_lock_failed");
}
......@@ -183,8 +178,7 @@ static inline void _raw_read_lock(rwlock_t *rw)
static inline void _raw_write_lock(rwlock_t *rw)
{
#ifdef CONFIG_DEBUG_SPINLOCK
if (rw->magic != RWLOCK_MAGIC)
BUG();
BUG_ON(rw->magic != RWLOCK_MAGIC);
#endif
__build_write_lock(rw, "__write_lock_failed");
}
......
......@@ -266,8 +266,7 @@ extern inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
local_irq_save(*flags);
addr = (unsigned long) kmap_atomic(bvec->bv_page, KM_BIO_SRC_IRQ);
if (addr & ~PAGE_MASK)
BUG();
BUG_ON(addr & ~PAGE_MASK);
return (char *) addr + bvec->bv_offset;
}
......
......@@ -125,8 +125,7 @@ BUFFER_FNS(Write_EIO,write_io_error)
/* If we *know* page->private refers to buffer_heads */
#define page_buffers(page) \
({ \
if (!PagePrivate(page)) \
BUG(); \
BUG_ON(!PagePrivate(page)); \
((struct buffer_head *)(page)->private); \
})
#define page_has_buffers(page) PagePrivate(page)
......
......@@ -270,8 +270,7 @@ extern char * d_path(struct dentry *, struct vfsmount *, char *, int);
static inline struct dentry *dget(struct dentry *dentry)
{
if (dentry) {
if (!atomic_read(&dentry->d_count))
BUG();
BUG_ON(!atomic_read(&dentry->d_count));
atomic_inc(&dentry->d_count);
}
return dentry;
......
......@@ -56,8 +56,7 @@ static inline void memclear_highpage_flush(struct page *page, unsigned int offse
{
void *kaddr;
if (offset + size > PAGE_SIZE)
BUG();
BUG_ON(offset + size > PAGE_SIZE);
kaddr = kmap_atomic(page, KM_USER0);
memset((char *)kaddr + offset, 0, size);
......
......@@ -831,7 +831,7 @@ static inline void netif_rx_complete(struct net_device *dev)
unsigned long flags;
local_irq_save(flags);
if (!test_bit(__LINK_STATE_RX_SCHED, &dev->state)) BUG();
BUG_ON(!test_bit(__LINK_STATE_RX_SCHED, &dev->state));
list_del(&dev->poll_list);
smp_mb__before_clear_bit();
clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
......@@ -857,7 +857,7 @@ static inline void netif_poll_enable(struct net_device *dev)
*/
static inline void __netif_rx_complete(struct net_device *dev)
{
if (!test_bit(__LINK_STATE_RX_SCHED, &dev->state)) BUG();
BUG_ON(!test_bit(__LINK_STATE_RX_SCHED, &dev->state));
list_del(&dev->poll_list);
smp_mb__before_clear_bit();
clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
......
......@@ -255,8 +255,7 @@ nfs_file_cred(struct file *file)
if (file)
cred = (struct rpc_cred *)file->private_data;
#ifdef RPC_DEBUG
if (cred && cred->cr_magic != RPCAUTH_CRED_MAGIC)
BUG();
BUG_ON(cred && cred->cr_magic != RPCAUTH_CRED_MAGIC);
#endif
return cred;
}
......
......@@ -44,8 +44,7 @@ extern struct quotactl_ops vfs_quotactl_ops;
static __inline__ void DQUOT_INIT(struct inode *inode)
{
if (!inode->i_sb)
BUG();
BUG_ON(!inode->i_sb);
if (sb_any_quota_enabled(inode->i_sb) && !IS_NOQUOTA(inode))
inode->i_sb->dq_op->initialize(inode, -1);
}
......@@ -53,8 +52,7 @@ static __inline__ void DQUOT_INIT(struct inode *inode)
static __inline__ void DQUOT_DROP(struct inode *inode)
{
if (IS_QUOTAINIT(inode)) {
if (!inode->i_sb)
BUG();
BUG_ON(!inode->i_sb);
inode->i_sb->dq_op->drop(inode); /* Ops must be set when there's any quota... */
}
}
......
......@@ -49,8 +49,7 @@ static inline void lock_kernel(void)
static inline void unlock_kernel(void)
{
if (unlikely(current->lock_depth < 0))
BUG();
BUG_ON(current->lock_depth < 0);
if (likely(--current->lock_depth < 0))
put_kernel_lock();
}
......
......@@ -473,8 +473,8 @@ static __inline__ void sk_set_owner(struct sock *sk, struct module *owner)
* change the ownership of this struct sock, with one not needed
* transient sk_set_owner call.
*/
if (unlikely(sk->sk_owner != NULL))
BUG();
BUG_ON(sk->sk_owner != NULL);
sk->sk_owner = owner;
__module_get(owner);
}
......
......@@ -1457,7 +1457,7 @@ static __inline__ int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
if (tp->ucopy.memory > sk->sk_rcvbuf) {
struct sk_buff *skb1;
if (sock_owned_by_user(sk)) BUG();
BUG_ON(sock_owned_by_user(sk));
while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
sk->sk_backlog_rcv(sk, skb1);
......
......@@ -187,8 +187,7 @@ extern int rxrpc_incoming_call(struct rxrpc_connection *conn,
static inline void rxrpc_get_call(struct rxrpc_call *call)
{
if (atomic_read(&call->usage)<=0)
BUG();
BUG_ON(atomic_read(&call->usage)<=0);
atomic_inc(&call->usage);
/*printk("rxrpc_get_call(%p{u=%d})\n",(C),atomic_read(&(C)->usage));*/
}
......
......@@ -67,8 +67,7 @@ extern int rxrpc_connection_lookup(struct rxrpc_peer *peer,
static inline void rxrpc_get_connection(struct rxrpc_connection *conn)
{
if (atomic_read(&conn->usage)<0)
BUG();
BUG_ON(atomic_read(&conn->usage)<0);
atomic_inc(&conn->usage);
//printk("rxrpc_get_conn(%p{u=%d})\n",conn,atomic_read(&conn->usage));
}
......
......@@ -53,8 +53,7 @@ struct rxrpc_message
extern void __rxrpc_put_message(struct rxrpc_message *msg);
static inline void rxrpc_put_message(struct rxrpc_message *msg)
{
if (atomic_read(&msg->usage)<=0)
BUG();
BUG_ON(atomic_read(&msg->usage)<=0);
if (atomic_dec_and_test(&msg->usage))
__rxrpc_put_message(msg);
}
......
......@@ -72,8 +72,7 @@ extern int rxrpc_peer_lookup(struct rxrpc_transport *trans,
static inline void rxrpc_get_peer(struct rxrpc_peer *peer)
{
if (atomic_read(&peer->usage)<0)
BUG();
BUG_ON(atomic_read(&peer->usage)<0);
atomic_inc(&peer->usage);
//printk("rxrpc_get_peer(%p{u=%d})\n",peer,atomic_read(&peer->usage));
}
......
......@@ -85,8 +85,7 @@ extern int rxrpc_create_transport(unsigned short port,
static inline void rxrpc_get_transport(struct rxrpc_transport *trans)
{
if (atomic_read(&trans->usage) <= 0)
BUG();
BUG_ON(atomic_read(&trans->usage) <= 0);
atomic_inc(&trans->usage);
//printk("rxrpc_get_transport(%p{u=%d})\n",
// trans, atomic_read(&trans->usage));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment