Commit 4f990f49 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] remove buffer_error()

From: Jeff Garzik <jgarzik@pobox.com>

It was debug code, no longer required.
parent d012f668
......@@ -51,25 +51,6 @@ static struct bh_wait_queue_head {
wait_queue_head_t wqh;
} ____cacheline_aligned_in_smp bh_wait_queue_heads[1<<BH_WAIT_TABLE_ORDER];
/*
* Debug/devel support stuff
*/
void __buffer_error(char *file, int line)
{
static int enough;
if (enough > 10)
return;
enough++;
printk("buffer layer error at %s:%d\n", file, line);
#ifndef CONFIG_KALLSYMS
printk("Pass this trace through ksymoops for reporting\n");
#endif
dump_stack();
}
EXPORT_SYMBOL(__buffer_error);
inline void
init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
{
......@@ -99,17 +80,6 @@ EXPORT_SYMBOL(wake_up_buffer);
void fastcall unlock_buffer(struct buffer_head *bh)
{
/*
* unlock_buffer against a zero-count bh is a bug, if the page
* is not locked. Because then nothing protects the buffer's
* waitqueue, which is used here. (Well. Other locked buffers
* against the page will pin it. But complain anyway).
*/
if (atomic_read(&bh->b_count) == 0 &&
!PageLocked(bh->b_page) &&
!PageWriteback(bh->b_page))
buffer_error();
clear_buffer_locked(bh);
smp_mb__after_clear_bit();
wake_up_buffer(bh);
......@@ -125,10 +95,6 @@ void __wait_on_buffer(struct buffer_head * bh)
wait_queue_head_t *wqh = bh_waitq_head(bh);
DEFINE_WAIT(wait);
if (atomic_read(&bh->b_count) == 0 &&
(!bh->b_page || !PageLocked(bh->b_page)))
buffer_error();
do {
prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
if (buffer_locked(bh)) {
......@@ -146,8 +112,6 @@ void __wait_on_buffer(struct buffer_head * bh)
static void
__set_page_buffers(struct page *page, struct buffer_head *head)
{
if (page_has_buffers(page))
buffer_error();
page_cache_get(page);
SetPagePrivate(page);
page->private = (unsigned long)head;
......@@ -433,10 +397,12 @@ __find_get_block_slow(struct block_device *bdev, sector_t block, int unused)
}
bh = bh->b_this_page;
} while (bh != head);
buffer_error();
printk("block=%llu, b_blocknr=%llu\n",
printk("__find_get_block_slow() failed. "
"block=%llu, b_blocknr=%llu\n",
(unsigned long long)block, (unsigned long long)bh->b_blocknr);
printk("b_state=0x%08lx, b_size=%u\n", bh->b_state, bh->b_size);
printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
out_unlock:
spin_unlock(&bd_mapping->private_lock);
page_cache_release(page);
......@@ -847,10 +813,7 @@ int __set_page_dirty_buffers(struct page *page)
struct buffer_head *bh = head;
do {
if (buffer_uptodate(bh))
set_buffer_dirty(bh);
else
buffer_error();
set_buffer_dirty(bh);
bh = bh->b_this_page;
} while (bh != head);
}
......@@ -1151,7 +1114,7 @@ grow_dev_page(struct block_device *bdev, sector_t block,
return page;
failed:
buffer_error();
BUG();
unlock_page(page);
page_cache_release(page);
return NULL;
......@@ -1247,8 +1210,6 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
*/
void fastcall mark_buffer_dirty(struct buffer_head *bh)
{
if (!buffer_uptodate(bh))
buffer_error();
if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
__set_page_dirty_nobuffers(bh->b_page);
}
......@@ -1267,7 +1228,7 @@ void __brelse(struct buffer_head * buf)
return;
}
printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
buffer_error(); /* For the stack backtrace */
WARN_ON(1);
}
/*
......@@ -1294,8 +1255,6 @@ static struct buffer_head *__bread_slow(struct buffer_head *bh)
unlock_buffer(bh);
return bh;
} else {
if (buffer_dirty(bh))
buffer_error();
get_bh(bh);
bh->b_end_io = end_buffer_read_sync;
submit_bh(READ, bh);
......@@ -1686,10 +1645,6 @@ void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
old_bh = __find_get_block_slow(bdev, block, 0);
if (old_bh) {
#if 0 /* This happens. Later. */
if (buffer_dirty(old_bh))
buffer_error();
#endif
clear_buffer_dirty(old_bh);
wait_on_buffer(old_bh);
clear_buffer_req(old_bh);
......@@ -1737,8 +1692,6 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
if (!page_has_buffers(page)) {
if (!PageUptodate(page))
buffer_error();
create_empty_buffers(page, 1 << inode->i_blkbits,
(1 << BH_Dirty)|(1 << BH_Uptodate));
}
......@@ -1767,9 +1720,6 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
* mapped buffers outside i_size will occur, because
* this page can be outside i_size when there is a
* truncate in progress.
*
* if (buffer_mapped(bh))
* buffer_error();
*/
/*
* The buffer was zeroed by block_write_full_page()
......@@ -1777,8 +1727,6 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
clear_buffer_dirty(bh);
set_buffer_uptodate(bh);
} else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
if (buffer_new(bh))
buffer_error();
err = get_block(inode, block, bh, 1);
if (err)
goto recover;
......@@ -1811,8 +1759,6 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
continue;
}
if (test_clear_buffer_dirty(bh)) {
if (!buffer_uptodate(bh))
buffer_error();
mark_buffer_async_write(bh);
} else {
unlock_buffer(bh);
......@@ -1942,8 +1888,6 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
unmap_underlying_metadata(bh->b_bdev,
bh->b_blocknr);
if (PageUptodate(page)) {
if (!buffer_mapped(bh))
buffer_error();
set_buffer_uptodate(bh);
continue;
}
......@@ -2001,8 +1945,6 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
void *kaddr;
clear_buffer_new(bh);
if (buffer_uptodate(bh))
buffer_error();
kaddr = kmap_atomic(page, KM_USER0);
memset(kaddr+block_start, 0, bh->b_size);
kunmap_atomic(kaddr, KM_USER0);
......@@ -2068,8 +2010,6 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
if (!PageLocked(page))
PAGE_BUG(page);
if (PageUptodate(page))
buffer_error();
blocksize = 1 << inode->i_blkbits;
if (!page_has_buffers(page))
create_empty_buffers(page, blocksize, 0);
......@@ -2692,13 +2632,6 @@ void submit_bh(int rw, struct buffer_head * bh)
BUG_ON(!buffer_mapped(bh));
BUG_ON(!bh->b_end_io);
if ((rw == READ || rw == READA) && buffer_uptodate(bh))
buffer_error();
if (rw == WRITE && !buffer_uptodate(bh))
buffer_error();
if (rw == READ && buffer_dirty(bh))
buffer_error();
/* Only clear out a write error when rewriting */
if (test_set_buffer_req(bh) && rw == WRITE)
clear_buffer_write_io_error(bh);
......@@ -2797,21 +2730,6 @@ void sync_dirty_buffer(struct buffer_head *bh)
}
}
/*
* Sanity checks for try_to_free_buffers.
*/
static void check_ttfb_buffer(struct page *page, struct buffer_head *bh)
{
if (!buffer_uptodate(bh) && !buffer_req(bh)) {
if (PageUptodate(page) && page->mapping
&& buffer_mapped(bh) /* discard_buffer */
&& S_ISBLK(page->mapping->host->i_mode))
{
buffer_error();
}
}
}
/*
* try_to_free_buffers() checks if all the buffers on this particular page
* are unused, and releases them if so.
......@@ -2847,7 +2765,6 @@ drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
bh = head;
do {
check_ttfb_buffer(page, bh);
if (buffer_write_io_error(bh))
set_bit(AS_EIO, &page->mapping->flags);
if (buffer_busy(bh))
......@@ -2857,9 +2774,6 @@ drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
bh = bh->b_this_page;
} while (bh != head);
if (!was_uptodate && PageUptodate(page) && !PageError(page))
buffer_error();
do {
struct buffer_head *next = bh->b_this_page;
......
......@@ -1358,8 +1358,6 @@ static int ext3_ordered_writepage(struct page *page,
}
if (!page_has_buffers(page)) {
if (!PageUptodate(page))
buffer_error();
create_empty_buffers(page, inode->i_sb->s_blocksize,
(1 << BH_Dirty)|(1 << BH_Uptodate));
}
......
......@@ -485,8 +485,7 @@ mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block,
break;
block_in_file++;
}
if (page_block == 0)
buffer_error();
BUG_ON(page_block == 0);
first_unmapped = page_block;
......
......@@ -1340,8 +1340,6 @@ static int ntfs_prepare_nonresident_write(struct page *page,
void *kaddr;
clear_buffer_new(bh);
if (buffer_uptodate(bh))
buffer_error();
kaddr = kmap_atomic(page, KM_USER0);
memset(kaddr + block_start, 0, bh->b_size);
kunmap_atomic(kaddr, KM_USER0);
......
......@@ -1925,7 +1925,6 @@ static int map_block_for_writepage(struct inode *inode,
th.t_trans_id = 0;
if (!buffer_uptodate(bh_result)) {
buffer_error();
return -EIO;
}
......@@ -2057,8 +2056,6 @@ static int reiserfs_write_full_page(struct page *page, struct writeback_control
* in the BH_Uptodate is just a sanity check.
*/
if (!page_has_buffers(page)) {
if (!PageUptodate(page))
buffer_error();
create_empty_buffers(page, inode->i_sb->s_blocksize,
(1 << BH_Dirty) | (1 << BH_Uptodate));
}
......@@ -2120,8 +2117,6 @@ static int reiserfs_write_full_page(struct page *page, struct writeback_control
}
}
if (test_clear_buffer_dirty(bh)) {
if (!buffer_uptodate(bh))
buffer_error();
mark_buffer_async_write(bh);
} else {
unlock_buffer(bh);
......
......@@ -61,13 +61,6 @@ struct buffer_head {
struct list_head b_assoc_buffers; /* associated with another mapping */
};
/*
* Debug
*/
void __buffer_error(char *file, int line);
#define buffer_error() __buffer_error(__FILE__, __LINE__)
/*
* macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
* and buffer_foo() functions.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment