Commit f598fb8d authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] if ... BUG() -> BUG_ON()

From: Adrian Bunk <bunk@fs.tum.de>

four months ago, Rolf Eike Beer <eike-kernel@sf-tec.de> sent a patch
against 2.6.0-test5-bk1 that converted several if ...  BUG() to BUG_ON()

This might in some cases result in slightly faster code because BUG_ON()
uses unlikely().
parent fd2ac4b2
...@@ -16,8 +16,7 @@ static inline int cpu_to_node(int cpu) ...@@ -16,8 +16,7 @@ static inline int cpu_to_node(int cpu)
node = alpha_mv.cpuid_to_nid(cpu); node = alpha_mv.cpuid_to_nid(cpu);
#ifdef DEBUG_NUMA #ifdef DEBUG_NUMA
if (node < 0) BUG_ON(node < 0);
BUG();
#endif #endif
return node; return node;
......
...@@ -27,15 +27,13 @@ ...@@ -27,15 +27,13 @@
static inline unsigned long ___mem_pci(unsigned long a) static inline unsigned long ___mem_pci(unsigned long a)
{ {
if (a <= 0xc0000000 || a >= 0xe0000000) BUG_ON(a <= 0xc0000000 || a >= 0xe0000000);
BUG();
return a; return a;
} }
static inline unsigned long ___mem_isa(unsigned long a) static inline unsigned long ___mem_isa(unsigned long a)
{ {
if (a >= 16*1048576) BUG_ON(a >= 16*1048576);
BUG();
return PCIMEM_BASE + a; return PCIMEM_BASE + a;
} }
#define __mem_pci(a) ___mem_pci((unsigned long)(a)) #define __mem_pci(a) ___mem_pci((unsigned long)(a))
......
...@@ -28,8 +28,7 @@ static inline unsigned long ___mem_pci(unsigned long a) ...@@ -28,8 +28,7 @@ static inline unsigned long ___mem_pci(unsigned long a)
static inline unsigned long ___mem_isa(unsigned long a) static inline unsigned long ___mem_isa(unsigned long a)
{ {
if (a >= 16*1048576) BUG_ON(a >= 16*1048576);
BUG();
return PCIMEM_BASE + a; return PCIMEM_BASE + a;
} }
#define __mem_pci(a) ___mem_pci((unsigned long)(a)) #define __mem_pci(a) ___mem_pci((unsigned long)(a))
......
...@@ -124,8 +124,7 @@ dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, ...@@ -124,8 +124,7 @@ dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
for (i = 0; i < nhwentries; i++, sg++) { for (i = 0; i < nhwentries; i++, sg++) {
unsigned long addr; unsigned long addr;
if (!sg->page) BUG_ON(!sg->page);
BUG();
addr = (unsigned long) page_address(sg->page); addr = (unsigned long) page_address(sg->page);
if (addr) if (addr)
...@@ -139,8 +138,7 @@ dma_sync_single(struct device *dev, dma_addr_t dma_handle, size_t size, ...@@ -139,8 +138,7 @@ dma_sync_single(struct device *dev, dma_addr_t dma_handle, size_t size,
{ {
unsigned long addr; unsigned long addr;
if (direction == DMA_NONE) BUG_ON(direction == DMA_NONE);
BUG();
addr = baddr_to_bus(hwdev->bus, dma_handle) + PAGE_OFFSET; addr = baddr_to_bus(hwdev->bus, dma_handle) + PAGE_OFFSET;
dma_cache_wback_inv(addr, size); dma_cache_wback_inv(addr, size);
...@@ -153,8 +151,7 @@ dma_sync_single_range(struct device *dev, dma_addr_t dma_handle, ...@@ -153,8 +151,7 @@ dma_sync_single_range(struct device *dev, dma_addr_t dma_handle,
{ {
unsigned long addr; unsigned long addr;
if (direction == DMA_NONE) BUG_ON(direction == DMA_NONE);
BUG();
addr = baddr_to_bus(hwdev->bus, dma_handle) + PAGE_OFFSET; addr = baddr_to_bus(hwdev->bus, dma_handle) + PAGE_OFFSET;
dma_cache_wback_inv(addr, size); dma_cache_wback_inv(addr, size);
...@@ -168,8 +165,7 @@ dma_sync_sg(struct device *dev, struct scatterlist *sg, int nelems, ...@@ -168,8 +165,7 @@ dma_sync_sg(struct device *dev, struct scatterlist *sg, int nelems,
int i; int i;
#endif #endif
if (direction == DMA_NONE) BUG_ON(direction == DMA_NONE);
BUG();
/* Make sure that gcc doesn't leave the empty loop body. */ /* Make sure that gcc doesn't leave the empty loop body. */
#ifdef CONFIG_NONCOHERENT_IO #ifdef CONFIG_NONCOHERENT_IO
......
...@@ -113,8 +113,7 @@ static inline void pci_dac_dma_sync_single(struct pci_dev *pdev, ...@@ -113,8 +113,7 @@ static inline void pci_dac_dma_sync_single(struct pci_dev *pdev,
{ {
unsigned long addr; unsigned long addr;
if (direction == PCI_DMA_NONE) BUG_ON(direction == PCI_DMA_NONE);
BUG();
addr = baddr_to_bus(pdev->bus, dma_addr) + PAGE_OFFSET; addr = baddr_to_bus(pdev->bus, dma_addr) + PAGE_OFFSET;
dma_cache_wback_inv(addr, len); dma_cache_wback_inv(addr, len);
......
...@@ -19,8 +19,7 @@ extern void free_sid(unsigned long); ...@@ -19,8 +19,7 @@ extern void free_sid(unsigned long);
static inline int static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{ {
if (atomic_read(&mm->mm_users) != 1) BUG_ON(atomic_read(&mm->mm_users) != 1);
BUG();
mm->context = alloc_sid(); mm->context = alloc_sid();
return 0; return 0;
...@@ -64,7 +63,7 @@ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) ...@@ -64,7 +63,7 @@ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
* already, so we should be OK. * already, so we should be OK.
*/ */
if (next == &init_mm) BUG(); /* Should never happen */ BUG_ON(next == &init_mm); /* Should never happen */
if (next->context == 0) if (next->context == 0)
next->context = alloc_sid(); next->context = alloc_sid();
......
...@@ -27,7 +27,7 @@ extern void flush_tlb_all(void); ...@@ -27,7 +27,7 @@ extern void flush_tlb_all(void);
static inline void flush_tlb_mm(struct mm_struct *mm) static inline void flush_tlb_mm(struct mm_struct *mm)
{ {
if (mm == &init_mm) BUG(); /* Should never happen */ BUG_ON(mm == &init_mm); /* Should never happen */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
flush_tlb_all(); flush_tlb_all();
......
...@@ -63,8 +63,7 @@ static inline void *kmap(struct page *page) ...@@ -63,8 +63,7 @@ static inline void *kmap(struct page *page)
static inline void kunmap(struct page *page) static inline void kunmap(struct page *page)
{ {
if (in_interrupt()) BUG_ON(in_interrupt());
BUG();
if (page < highmem_start_page) if (page < highmem_start_page)
return; return;
kunmap_high(page); kunmap_high(page);
...@@ -89,8 +88,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type) ...@@ -89,8 +88,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
idx = type + KM_TYPE_NR*smp_processor_id(); idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = KMAP_FIX_BEGIN + idx * PAGE_SIZE; vaddr = KMAP_FIX_BEGIN + idx * PAGE_SIZE;
#if HIGHMEM_DEBUG #if HIGHMEM_DEBUG
if (!pte_none(*(kmap_pte+idx))) BUG_ON(!pte_none(*(kmap_pte+idx)));
BUG();
#endif #endif
set_pte(kmap_pte+idx, mk_pte(page, kmap_prot)); set_pte(kmap_pte+idx, mk_pte(page, kmap_prot));
flush_tlb_page(0, vaddr); flush_tlb_page(0, vaddr);
...@@ -110,8 +108,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type) ...@@ -110,8 +108,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
return; return;
} }
if (vaddr != KMAP_FIX_BEGIN + idx * PAGE_SIZE) BUG_ON(vaddr != KMAP_FIX_BEGIN + idx * PAGE_SIZE);
BUG();
/* /*
* force other mappings to Oops if they'll try to access * force other mappings to Oops if they'll try to access
......
...@@ -105,8 +105,7 @@ static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, ...@@ -105,8 +105,7 @@ static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr,
static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
size_t size, int direction) size_t size, int direction)
{ {
if (direction == PCI_DMA_NONE) BUG_ON(direction == PCI_DMA_NONE);
BUG();
/* nothing to do */ /* nothing to do */
} }
...@@ -134,8 +133,7 @@ static inline dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page, ...@@ -134,8 +133,7 @@ static inline dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
static inline void pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address, static inline void pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address,
size_t size, int direction) size_t size, int direction)
{ {
if (direction == PCI_DMA_NONE) BUG_ON(direction == PCI_DMA_NONE);
BUG();
/* Nothing to do */ /* Nothing to do */
} }
...@@ -159,8 +157,7 @@ static inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, ...@@ -159,8 +157,7 @@ static inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
{ {
int i; int i;
if (direction == PCI_DMA_NONE) BUG_ON(direction == PCI_DMA_NONE);
BUG();
/* /*
* temporary 2.4 hack * temporary 2.4 hack
......
...@@ -65,8 +65,7 @@ static inline void pci_dma_sync_single(struct pci_dev *hwdev, ...@@ -65,8 +65,7 @@ static inline void pci_dma_sync_single(struct pci_dev *hwdev,
dma_addr_t dma_handle, dma_addr_t dma_handle,
size_t size, int direction) size_t size, int direction)
{ {
if (direction == PCI_DMA_NONE) BUG_ON(direction == PCI_DMA_NONE);
BUG();
/* nothing to do */ /* nothing to do */
} }
...@@ -74,8 +73,7 @@ static inline void pci_dma_sync_sg(struct pci_dev *hwdev, ...@@ -74,8 +73,7 @@ static inline void pci_dma_sync_sg(struct pci_dev *hwdev,
struct scatterlist *sg, struct scatterlist *sg,
int nelems, int direction) int nelems, int direction)
{ {
if (direction == PCI_DMA_NONE) BUG_ON(direction == PCI_DMA_NONE);
BUG();
/* nothing to do */ /* nothing to do */
} }
......
...@@ -13,8 +13,7 @@ static inline int cpu_to_node(int cpu) ...@@ -13,8 +13,7 @@ static inline int cpu_to_node(int cpu)
node = numa_cpu_lookup_table[cpu]; node = numa_cpu_lookup_table[cpu];
#ifdef DEBUG_NUMA #ifdef DEBUG_NUMA
if (node == -1) BUG_ON(node == -1);
BUG();
#endif #endif
return node; return node;
......
...@@ -223,8 +223,7 @@ idal_buffer_to_user(struct idal_buffer *ib, void *to, size_t count) ...@@ -223,8 +223,7 @@ idal_buffer_to_user(struct idal_buffer *ib, void *to, size_t count)
size_t left; size_t left;
int i; int i;
if (count > ib->size) BUG_ON(count > ib->size);
BUG();
for (i = 0; count > IDA_BLOCK_SIZE; i++) { for (i = 0; count > IDA_BLOCK_SIZE; i++) {
left = copy_to_user(to, ib->data[i], IDA_BLOCK_SIZE); left = copy_to_user(to, ib->data[i], IDA_BLOCK_SIZE);
if (left) if (left)
...@@ -244,8 +243,7 @@ idal_buffer_from_user(struct idal_buffer *ib, const void *from, size_t count) ...@@ -244,8 +243,7 @@ idal_buffer_from_user(struct idal_buffer *ib, const void *from, size_t count)
size_t left; size_t left;
int i; int i;
if (count > ib->size) BUG_ON(count > ib->size);
BUG();
for (i = 0; count > IDA_BLOCK_SIZE; i++) { for (i = 0; count > IDA_BLOCK_SIZE; i++) {
left = copy_from_user(ib->data[i], from, IDA_BLOCK_SIZE); left = copy_from_user(ib->data[i], from, IDA_BLOCK_SIZE);
if (left) if (left)
......
...@@ -48,8 +48,7 @@ static inline void _raw_spin_lock(spinlock_t *lock) ...@@ -48,8 +48,7 @@ static inline void _raw_spin_lock(spinlock_t *lock)
static inline void _raw_spin_unlock(spinlock_t *lock) static inline void _raw_spin_unlock(spinlock_t *lock)
{ {
#ifdef CONFIG_DEBUG_SPINLOCK #ifdef CONFIG_DEBUG_SPINLOCK
if (!spin_is_locked(lock)) BUG_ON(!spin_is_locked(lock));
BUG();
#endif #endif
lock->lock = 0; lock->lock = 0;
......
...@@ -320,10 +320,9 @@ static void sun_pci_fd_lde_broken_outb(unsigned char val, unsigned long port) ...@@ -320,10 +320,9 @@ static void sun_pci_fd_lde_broken_outb(unsigned char val, unsigned long port)
static void sun_pci_fd_enable_dma(void) static void sun_pci_fd_enable_dma(void)
{ {
if ((NULL == sun_pci_dma_pending.buf) || BUG_ON((NULL == sun_pci_dma_pending.buf) ||
(0 == sun_pci_dma_pending.len) || (0 == sun_pci_dma_pending.len) ||
(0 == sun_pci_dma_pending.direction)) (0 == sun_pci_dma_pending.direction));
BUG();
sun_pci_dma_current.buf = sun_pci_dma_pending.buf; sun_pci_dma_current.buf = sun_pci_dma_pending.buf;
sun_pci_dma_current.len = sun_pci_dma_pending.len; sun_pci_dma_current.len = sun_pci_dma_pending.len;
......
...@@ -90,10 +90,9 @@ static __inline__ void flush_tlb_pgtables(struct mm_struct *mm, unsigned long st ...@@ -90,10 +90,9 @@ static __inline__ void flush_tlb_pgtables(struct mm_struct *mm, unsigned long st
{ {
/* Note the signed type. */ /* Note the signed type. */
long s = start, e = end, vpte_base; long s = start, e = end, vpte_base;
if (s > e)
/* Nobody should call us with start below VM hole and end above. /* Nobody should call us with start below VM hole and end above.
See if it is really true. */ See if it is really true. */
BUG(); BUG_ON(s > e);
#if 0 #if 0
/* Currently free_pgtables guarantees this. */ /* Currently free_pgtables guarantees this. */
s &= PMD_MASK; s &= PMD_MASK;
......
...@@ -24,8 +24,7 @@ extern __inline__ pmd_t *get_pmd(void) ...@@ -24,8 +24,7 @@ extern __inline__ pmd_t *get_pmd(void)
extern __inline__ void pmd_free(pmd_t *pmd) extern __inline__ void pmd_free(pmd_t *pmd)
{ {
if ((unsigned long)pmd & (PAGE_SIZE-1)) BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
BUG();
free_page((unsigned long)pmd); free_page((unsigned long)pmd);
} }
...@@ -41,8 +40,7 @@ static inline pgd_t *pgd_alloc (struct mm_struct *mm) ...@@ -41,8 +40,7 @@ static inline pgd_t *pgd_alloc (struct mm_struct *mm)
static inline void pgd_free (pgd_t *pgd) static inline void pgd_free (pgd_t *pgd)
{ {
if ((unsigned long)pgd & (PAGE_SIZE-1)) BUG_ON((unsigned long)pgd & (PAGE_SIZE-1));
BUG();
free_page((unsigned long)pgd); free_page((unsigned long)pgd);
} }
...@@ -64,8 +62,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long add ...@@ -64,8 +62,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long add
extern __inline__ void pte_free_kernel(pte_t *pte) extern __inline__ void pte_free_kernel(pte_t *pte)
{ {
if ((unsigned long)pte & (PAGE_SIZE-1)) BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
BUG();
free_page((unsigned long)pte); free_page((unsigned long)pte);
} }
......
...@@ -70,10 +70,8 @@ typedef struct { ...@@ -70,10 +70,8 @@ typedef struct {
static inline void _raw_spin_unlock(spinlock_t *lock) static inline void _raw_spin_unlock(spinlock_t *lock)
{ {
#ifdef CONFIG_DEBUG_SPINLOCK #ifdef CONFIG_DEBUG_SPINLOCK
if (lock->magic != SPINLOCK_MAGIC) BUG_ON(lock->magic != SPINLOCK_MAGIC);
BUG(); BUG_ON(!spin_is_locked(lock));
if (!spin_is_locked(lock))
BUG();
#endif #endif
__asm__ __volatile__( __asm__ __volatile__(
spin_unlock_string spin_unlock_string
...@@ -91,10 +89,8 @@ static inline void _raw_spin_unlock(spinlock_t *lock) ...@@ -91,10 +89,8 @@ static inline void _raw_spin_unlock(spinlock_t *lock)
{ {
char oldval = 1; char oldval = 1;
#ifdef CONFIG_DEBUG_SPINLOCK #ifdef CONFIG_DEBUG_SPINLOCK
if (lock->magic != SPINLOCK_MAGIC) BUG_ON(lock->magic != SPINLOCK_MAGIC);
BUG(); BUG_ON(!spin_is_locked(lock));
if (!spin_is_locked(lock))
BUG();
#endif #endif
__asm__ __volatile__( __asm__ __volatile__(
spin_unlock_string spin_unlock_string
...@@ -174,8 +170,7 @@ typedef struct { ...@@ -174,8 +170,7 @@ typedef struct {
static inline void _raw_read_lock(rwlock_t *rw) static inline void _raw_read_lock(rwlock_t *rw)
{ {
#ifdef CONFIG_DEBUG_SPINLOCK #ifdef CONFIG_DEBUG_SPINLOCK
if (rw->magic != RWLOCK_MAGIC) BUG_ON(rw->magic != RWLOCK_MAGIC);
BUG();
#endif #endif
__build_read_lock(rw, "__read_lock_failed"); __build_read_lock(rw, "__read_lock_failed");
} }
...@@ -183,8 +178,7 @@ static inline void _raw_read_lock(rwlock_t *rw) ...@@ -183,8 +178,7 @@ static inline void _raw_read_lock(rwlock_t *rw)
static inline void _raw_write_lock(rwlock_t *rw) static inline void _raw_write_lock(rwlock_t *rw)
{ {
#ifdef CONFIG_DEBUG_SPINLOCK #ifdef CONFIG_DEBUG_SPINLOCK
if (rw->magic != RWLOCK_MAGIC) BUG_ON(rw->magic != RWLOCK_MAGIC);
BUG();
#endif #endif
__build_write_lock(rw, "__write_lock_failed"); __build_write_lock(rw, "__write_lock_failed");
} }
......
...@@ -266,8 +266,7 @@ extern inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) ...@@ -266,8 +266,7 @@ extern inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
local_irq_save(*flags); local_irq_save(*flags);
addr = (unsigned long) kmap_atomic(bvec->bv_page, KM_BIO_SRC_IRQ); addr = (unsigned long) kmap_atomic(bvec->bv_page, KM_BIO_SRC_IRQ);
if (addr & ~PAGE_MASK) BUG_ON(addr & ~PAGE_MASK);
BUG();
return (char *) addr + bvec->bv_offset; return (char *) addr + bvec->bv_offset;
} }
......
...@@ -125,8 +125,7 @@ BUFFER_FNS(Write_EIO,write_io_error) ...@@ -125,8 +125,7 @@ BUFFER_FNS(Write_EIO,write_io_error)
/* If we *know* page->private refers to buffer_heads */ /* If we *know* page->private refers to buffer_heads */
#define page_buffers(page) \ #define page_buffers(page) \
({ \ ({ \
if (!PagePrivate(page)) \ BUG_ON(!PagePrivate(page)); \
BUG(); \
((struct buffer_head *)(page)->private); \ ((struct buffer_head *)(page)->private); \
}) })
#define page_has_buffers(page) PagePrivate(page) #define page_has_buffers(page) PagePrivate(page)
......
...@@ -270,8 +270,7 @@ extern char * d_path(struct dentry *, struct vfsmount *, char *, int); ...@@ -270,8 +270,7 @@ extern char * d_path(struct dentry *, struct vfsmount *, char *, int);
static inline struct dentry *dget(struct dentry *dentry) static inline struct dentry *dget(struct dentry *dentry)
{ {
if (dentry) { if (dentry) {
if (!atomic_read(&dentry->d_count)) BUG_ON(!atomic_read(&dentry->d_count));
BUG();
atomic_inc(&dentry->d_count); atomic_inc(&dentry->d_count);
} }
return dentry; return dentry;
......
...@@ -56,8 +56,7 @@ static inline void memclear_highpage_flush(struct page *page, unsigned int offse ...@@ -56,8 +56,7 @@ static inline void memclear_highpage_flush(struct page *page, unsigned int offse
{ {
void *kaddr; void *kaddr;
if (offset + size > PAGE_SIZE) BUG_ON(offset + size > PAGE_SIZE);
BUG();
kaddr = kmap_atomic(page, KM_USER0); kaddr = kmap_atomic(page, KM_USER0);
memset((char *)kaddr + offset, 0, size); memset((char *)kaddr + offset, 0, size);
......
...@@ -831,7 +831,7 @@ static inline void netif_rx_complete(struct net_device *dev) ...@@ -831,7 +831,7 @@ static inline void netif_rx_complete(struct net_device *dev)
unsigned long flags; unsigned long flags;
local_irq_save(flags); local_irq_save(flags);
if (!test_bit(__LINK_STATE_RX_SCHED, &dev->state)) BUG(); BUG_ON(!test_bit(__LINK_STATE_RX_SCHED, &dev->state));
list_del(&dev->poll_list); list_del(&dev->poll_list);
smp_mb__before_clear_bit(); smp_mb__before_clear_bit();
clear_bit(__LINK_STATE_RX_SCHED, &dev->state); clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
...@@ -857,7 +857,7 @@ static inline void netif_poll_enable(struct net_device *dev) ...@@ -857,7 +857,7 @@ static inline void netif_poll_enable(struct net_device *dev)
*/ */
static inline void __netif_rx_complete(struct net_device *dev) static inline void __netif_rx_complete(struct net_device *dev)
{ {
if (!test_bit(__LINK_STATE_RX_SCHED, &dev->state)) BUG(); BUG_ON(!test_bit(__LINK_STATE_RX_SCHED, &dev->state));
list_del(&dev->poll_list); list_del(&dev->poll_list);
smp_mb__before_clear_bit(); smp_mb__before_clear_bit();
clear_bit(__LINK_STATE_RX_SCHED, &dev->state); clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
......
...@@ -255,8 +255,7 @@ nfs_file_cred(struct file *file) ...@@ -255,8 +255,7 @@ nfs_file_cred(struct file *file)
if (file) if (file)
cred = (struct rpc_cred *)file->private_data; cred = (struct rpc_cred *)file->private_data;
#ifdef RPC_DEBUG #ifdef RPC_DEBUG
if (cred && cred->cr_magic != RPCAUTH_CRED_MAGIC) BUG_ON(cred && cred->cr_magic != RPCAUTH_CRED_MAGIC);
BUG();
#endif #endif
return cred; return cred;
} }
......
...@@ -44,8 +44,7 @@ extern struct quotactl_ops vfs_quotactl_ops; ...@@ -44,8 +44,7 @@ extern struct quotactl_ops vfs_quotactl_ops;
static __inline__ void DQUOT_INIT(struct inode *inode) static __inline__ void DQUOT_INIT(struct inode *inode)
{ {
if (!inode->i_sb) BUG_ON(!inode->i_sb);
BUG();
if (sb_any_quota_enabled(inode->i_sb) && !IS_NOQUOTA(inode)) if (sb_any_quota_enabled(inode->i_sb) && !IS_NOQUOTA(inode))
inode->i_sb->dq_op->initialize(inode, -1); inode->i_sb->dq_op->initialize(inode, -1);
} }
...@@ -53,8 +52,7 @@ static __inline__ void DQUOT_INIT(struct inode *inode) ...@@ -53,8 +52,7 @@ static __inline__ void DQUOT_INIT(struct inode *inode)
static __inline__ void DQUOT_DROP(struct inode *inode) static __inline__ void DQUOT_DROP(struct inode *inode)
{ {
if (IS_QUOTAINIT(inode)) { if (IS_QUOTAINIT(inode)) {
if (!inode->i_sb) BUG_ON(!inode->i_sb);
BUG();
inode->i_sb->dq_op->drop(inode); /* Ops must be set when there's any quota... */ inode->i_sb->dq_op->drop(inode); /* Ops must be set when there's any quota... */
} }
} }
......
...@@ -49,8 +49,7 @@ static inline void lock_kernel(void) ...@@ -49,8 +49,7 @@ static inline void lock_kernel(void)
static inline void unlock_kernel(void) static inline void unlock_kernel(void)
{ {
if (unlikely(current->lock_depth < 0)) BUG_ON(current->lock_depth < 0);
BUG();
if (likely(--current->lock_depth < 0)) if (likely(--current->lock_depth < 0))
put_kernel_lock(); put_kernel_lock();
} }
......
...@@ -473,8 +473,8 @@ static __inline__ void sk_set_owner(struct sock *sk, struct module *owner) ...@@ -473,8 +473,8 @@ static __inline__ void sk_set_owner(struct sock *sk, struct module *owner)
* change the ownership of this struct sock, with one not needed * change the ownership of this struct sock, with one not needed
* transient sk_set_owner call. * transient sk_set_owner call.
*/ */
if (unlikely(sk->sk_owner != NULL)) BUG_ON(sk->sk_owner != NULL);
BUG();
sk->sk_owner = owner; sk->sk_owner = owner;
__module_get(owner); __module_get(owner);
} }
......
...@@ -1457,7 +1457,7 @@ static __inline__ int tcp_prequeue(struct sock *sk, struct sk_buff *skb) ...@@ -1457,7 +1457,7 @@ static __inline__ int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
if (tp->ucopy.memory > sk->sk_rcvbuf) { if (tp->ucopy.memory > sk->sk_rcvbuf) {
struct sk_buff *skb1; struct sk_buff *skb1;
if (sock_owned_by_user(sk)) BUG(); BUG_ON(sock_owned_by_user(sk));
while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) { while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
sk->sk_backlog_rcv(sk, skb1); sk->sk_backlog_rcv(sk, skb1);
......
...@@ -187,8 +187,7 @@ extern int rxrpc_incoming_call(struct rxrpc_connection *conn, ...@@ -187,8 +187,7 @@ extern int rxrpc_incoming_call(struct rxrpc_connection *conn,
static inline void rxrpc_get_call(struct rxrpc_call *call) static inline void rxrpc_get_call(struct rxrpc_call *call)
{ {
if (atomic_read(&call->usage)<=0) BUG_ON(atomic_read(&call->usage)<=0);
BUG();
atomic_inc(&call->usage); atomic_inc(&call->usage);
/*printk("rxrpc_get_call(%p{u=%d})\n",(C),atomic_read(&(C)->usage));*/ /*printk("rxrpc_get_call(%p{u=%d})\n",(C),atomic_read(&(C)->usage));*/
} }
......
...@@ -67,8 +67,7 @@ extern int rxrpc_connection_lookup(struct rxrpc_peer *peer, ...@@ -67,8 +67,7 @@ extern int rxrpc_connection_lookup(struct rxrpc_peer *peer,
static inline void rxrpc_get_connection(struct rxrpc_connection *conn) static inline void rxrpc_get_connection(struct rxrpc_connection *conn)
{ {
if (atomic_read(&conn->usage)<0) BUG_ON(atomic_read(&conn->usage)<0);
BUG();
atomic_inc(&conn->usage); atomic_inc(&conn->usage);
//printk("rxrpc_get_conn(%p{u=%d})\n",conn,atomic_read(&conn->usage)); //printk("rxrpc_get_conn(%p{u=%d})\n",conn,atomic_read(&conn->usage));
} }
......
...@@ -53,8 +53,7 @@ struct rxrpc_message ...@@ -53,8 +53,7 @@ struct rxrpc_message
extern void __rxrpc_put_message(struct rxrpc_message *msg); extern void __rxrpc_put_message(struct rxrpc_message *msg);
static inline void rxrpc_put_message(struct rxrpc_message *msg) static inline void rxrpc_put_message(struct rxrpc_message *msg)
{ {
if (atomic_read(&msg->usage)<=0) BUG_ON(atomic_read(&msg->usage)<=0);
BUG();
if (atomic_dec_and_test(&msg->usage)) if (atomic_dec_and_test(&msg->usage))
__rxrpc_put_message(msg); __rxrpc_put_message(msg);
} }
......
...@@ -72,8 +72,7 @@ extern int rxrpc_peer_lookup(struct rxrpc_transport *trans, ...@@ -72,8 +72,7 @@ extern int rxrpc_peer_lookup(struct rxrpc_transport *trans,
static inline void rxrpc_get_peer(struct rxrpc_peer *peer) static inline void rxrpc_get_peer(struct rxrpc_peer *peer)
{ {
if (atomic_read(&peer->usage)<0) BUG_ON(atomic_read(&peer->usage)<0);
BUG();
atomic_inc(&peer->usage); atomic_inc(&peer->usage);
//printk("rxrpc_get_peer(%p{u=%d})\n",peer,atomic_read(&peer->usage)); //printk("rxrpc_get_peer(%p{u=%d})\n",peer,atomic_read(&peer->usage));
} }
......
...@@ -85,8 +85,7 @@ extern int rxrpc_create_transport(unsigned short port, ...@@ -85,8 +85,7 @@ extern int rxrpc_create_transport(unsigned short port,
static inline void rxrpc_get_transport(struct rxrpc_transport *trans) static inline void rxrpc_get_transport(struct rxrpc_transport *trans)
{ {
if (atomic_read(&trans->usage) <= 0) BUG_ON(atomic_read(&trans->usage) <= 0);
BUG();
atomic_inc(&trans->usage); atomic_inc(&trans->usage);
//printk("rxrpc_get_transport(%p{u=%d})\n", //printk("rxrpc_get_transport(%p{u=%d})\n",
// trans, atomic_read(&trans->usage)); // trans, atomic_read(&trans->usage));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment