Commit b8c2f347 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

writeback: simplify wakeup_flusher_threads

bdi_writeback_all only has one caller, so fold it to simplify the code and
flatten the call stack.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <jaxboe@fusionio.com>
parent d19de7ed
......@@ -920,42 +920,32 @@ int bdi_writeback_task(struct bdi_writeback *wb)
}
/*
* Schedule writeback for all backing devices. This does WB_SYNC_NONE
* writeback, for integrity writeback see bdi_queue_work_onstack().
* Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
* the whole world.
*/
static void bdi_writeback_all(struct super_block *sb, long nr_pages)
void wakeup_flusher_threads(long nr_pages)
{
struct backing_dev_info *bdi;
struct wb_writeback_args args = {
.sb = sb,
.nr_pages = nr_pages,
.sync_mode = WB_SYNC_NONE,
};
struct backing_dev_info *bdi;
rcu_read_lock();
if (nr_pages) {
args.nr_pages = nr_pages;
} else {
args.nr_pages = global_page_state(NR_FILE_DIRTY) +
global_page_state(NR_UNSTABLE_NFS);
}
rcu_read_lock();
list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
if (!bdi_has_dirty_io(bdi))
continue;
bdi_alloc_queue_work(bdi, &args);
}
rcu_read_unlock();
}
/*
* Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
* the whole world.
*/
void wakeup_flusher_threads(long nr_pages)
{
if (nr_pages == 0)
nr_pages = global_page_state(NR_FILE_DIRTY) +
global_page_state(NR_UNSTABLE_NFS);
bdi_writeback_all(NULL, nr_pages);
}
static noinline void block_dump___mark_inode_dirty(struct inode *inode)
{
if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment