Commit 78330667 authored by Christoph Hellwig's avatar Christoph Hellwig

XFS: XFS: Make pagebuf use the generic xfs ASSERT() instead of it's own assert()

Modid: 2.5.x-xfs:slinx:127736a
parent 6cd43d77
......@@ -58,7 +58,9 @@
#include <linux/sysctl.h>
#include <linux/proc_fs.h>
#include <support/debug.h>
#include <support/kmem.h>
#include "page_buf_internal.h"
#define SECTOR_SHIFT 9
......@@ -547,8 +549,8 @@ _pagebuf_lookup_pages(
} else if (!PagePrivate(page)) {
unsigned long i, range = (offset + nbytes) >> SECTOR_SHIFT;
assert(blocksize < PAGE_CACHE_SIZE);
assert(!(pb->pb_flags & _PBF_PRIVATE_BH));
ASSERT(blocksize < PAGE_CACHE_SIZE);
ASSERT(!(pb->pb_flags & _PBF_PRIVATE_BH));
/*
* In this case page->private holds a bitmap
* of uptodate sectors (512) within the page
......@@ -1316,8 +1318,8 @@ bio_end_io_pagebuf(
} else if (!PagePrivate(page)) {
unsigned int j, range;
assert(blocksize < PAGE_CACHE_SIZE);
assert(!(pb->pb_flags & _PBF_PRIVATE_BH));
ASSERT(blocksize < PAGE_CACHE_SIZE);
ASSERT(!(pb->pb_flags & _PBF_PRIVATE_BH));
range = (bvec->bv_offset + bvec->bv_len)>>SECTOR_SHIFT;
for (j = bvec->bv_offset>>SECTOR_SHIFT; j < range; j++)
......@@ -1606,7 +1608,7 @@ pagebuf_iomove(
while (cboff < boff) {
pagebuf_segment(pb, &cboff, &page, &cpoff, &csize);
assert(((csize + cpoff) <= PAGE_CACHE_SIZE));
ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE));
switch (mode) {
case PBRW_ZERO:
......
......@@ -151,18 +151,6 @@ extern struct pbstats pbstats;
#define PB_STATS_INC(count) ( count ++ )
#undef assert
#ifdef PAGEBUF_DEBUG
# define assert(expr) \
if (!(expr)) { \
printk("Assertion failed: %s\n%s::%s line %d\n",\
#expr,__FILE__,__FUNCTION__,__LINE__); \
BUG(); \
}
#else
# define assert(x) do { } while (0)
#endif
#ifndef STATIC
# define STATIC static
#endif
......
......@@ -54,6 +54,8 @@
#include <linux/init.h>
#include <linux/major.h>
#include <support/debug.h>
#include "page_buf_internal.h"
#ifndef EVMS_MAJOR
......@@ -76,7 +78,7 @@ pagebuf_cond_lock( /* lock buffer, if not locked */
{
int locked;
assert(pb->pb_flags & _PBF_LOCKABLE);
ASSERT(pb->pb_flags & _PBF_LOCKABLE);
locked = down_trylock(&PBP(pb)->pb_sema) == 0;
if (locked) {
......@@ -97,7 +99,7 @@ int
pagebuf_lock_value(
page_buf_t *pb)
{
assert(pb->pb_flags & _PBF_LOCKABLE);
ASSERT(pb->pb_flags & _PBF_LOCKABLE);
return(atomic_read(&PBP(pb)->pb_sema.count));
}
......@@ -113,7 +115,7 @@ int
pagebuf_lock(
page_buf_t *pb)
{
assert(pb->pb_flags & _PBF_LOCKABLE);
ASSERT(pb->pb_flags & _PBF_LOCKABLE);
PB_TRACE(pb, PB_TRACE_REC(lock), 0);
if (atomic_read(&PBP(pb)->pb_io_remaining))
......@@ -219,7 +221,7 @@ void
pagebuf_unlock( /* unlock buffer */
page_buf_t *pb) /* buffer to unlock */
{
assert(pb->pb_flags & _PBF_LOCKABLE);
ASSERT(pb->pb_flags & _PBF_LOCKABLE);
PB_CLEAR_OWNER(pb);
up(&PBP(pb)->pb_sema);
PB_TRACE(pb, PB_TRACE_REC(unlock), 0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment