Commit 4ac833da authored by Andrew Morton's avatar Andrew Morton Committed by Russell King

[PATCH] remove write_mapping_buffers()

When the global buffer LRU was present, dirty ext2 indirect blocks were
automatically scheduled for writeback alongside their data.

I added write_mapping_buffers() to replace this - the idea was to
schedule the indirects close in time to the scheduling of their data.

It works OK for small-to-medium sized files but for large, linear writes
it doesn't work: the request queue is completely full of file data and
when we later come to scheduling the indirects, their neighbouring data
has already been written.

So writeback of really huge files tends to be a bit seeky.

So.  Kill it.  Will fix this problem by other means.
parent e3b12fc1
......@@ -735,81 +735,6 @@ int sync_mapping_buffers(struct address_space *mapping)
}
EXPORT_SYMBOL(sync_mapping_buffers);
/**
* write_mapping_buffers - Start writeout of a mapping's "associated" buffers.
* @mapping - the mapping which wants those buffers written.
*
* Starts I/O against dirty buffers which are on @mapping->private_list.
* Those buffers must be backed by @mapping->assoc_mapping.
*
* The private_list buffers generally contain filesystem indirect blocks.
* The idea is that the filesystem can start I/O against the indirects at
* the same time as running generic_writepages(), so the indirect's
* I/O will be merged with the data.
*
* We sneakliy write the buffers in probable tail-to-head order. This is
* because generic_writepages() writes in probable head-to-tail
* order. If the file is so huge that the data or the indirects overflow
* the request queue we will at least get some merging this way.
*
* Any clean+unlocked buffers are de-listed. clean/locked buffers must be
* left on the list for an fsync() to wait on.
*
* Couldn't think of a smart way of avoiding livelock, so chose the dumb
* way instead.
*
* FIXME: duplicates fsync_inode_buffers() functionality a bit.
*/
int write_mapping_buffers(struct address_space *mapping)
{
spinlock_t *lock;
struct address_space *buffer_mapping;
unsigned nr_to_write; /* livelock avoidance */
struct list_head *lh;
int ret = 0;
if (list_empty(&mapping->private_list))
goto out;
buffer_mapping = mapping->assoc_mapping;
lock = &buffer_mapping->private_lock;
spin_lock(lock);
nr_to_write = 0;
lh = mapping->private_list.next;
while (lh != &mapping->private_list) {
lh = lh->next;
nr_to_write++;
}
nr_to_write *= 2; /* Allow for some late additions */
while (nr_to_write-- && !list_empty(&mapping->private_list)) {
struct buffer_head *bh;
bh = BH_ENTRY(mapping->private_list.prev);
list_del_init(&bh->b_assoc_buffers);
if (!buffer_dirty(bh) && !buffer_locked(bh))
continue;
/* Stick it on the far end of the list. Order is preserved. */
list_add(&bh->b_assoc_buffers, &mapping->private_list);
if (test_set_buffer_locked(bh))
continue;
get_bh(bh);
spin_unlock(lock);
if (test_clear_buffer_dirty(bh)) {
bh->b_end_io = end_buffer_io_sync;
submit_bh(WRITE, bh);
} else {
unlock_buffer(bh);
put_bh(bh);
}
spin_lock(lock);
}
spin_unlock(lock);
out:
return ret;
}
EXPORT_SYMBOL(write_mapping_buffers);
void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
{
struct address_space *mapping = inode->i_mapping;
......
......@@ -629,14 +629,7 @@ ext2_direct_IO(int rw, struct inode *inode, const struct iovec *iov,
static int
ext2_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
int ret;
int err;
ret = write_mapping_buffers(mapping);
err = mpage_writepages(mapping, wbc, ext2_get_block);
if (!ret)
ret = err;
return ret;
return mpage_writepages(mapping, wbc, ext2_get_block);
}
struct address_space_operations ext2_aops = {
......
......@@ -1477,14 +1477,7 @@ struct address_space_operations ext3_aops = {
static int
ext3_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
int ret;
int err;
ret = write_mapping_buffers(mapping);
err = mpage_writepages(mapping, wbc, ext3_get_block);
if (!ret)
ret = err;
return ret;
return mpage_writepages(mapping, wbc, ext3_get_block);
}
#endif
......
......@@ -140,7 +140,6 @@ void end_buffer_io_sync(struct buffer_head *bh, int uptodate);
void buffer_insert_list(spinlock_t *lock,
struct buffer_head *, struct list_head *);
void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
int write_mapping_buffers(struct address_space *mapping);
int inode_has_buffers(struct inode *);
void invalidate_inode_buffers(struct inode *);
int fsync_buffers_list(spinlock_t *lock, struct list_head *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment