Commit 1cc127b5 authored by Chris Mason's avatar Chris Mason

Btrfs: Add a thread pool just for submit_bio

If a bio submission is after a lock holder waiting for the bio
on the work queue, it is possible to deadlock.  Move the bios
into their own pool.
Signed-off-by: default avatarChris Mason <chris.mason@oracle.com>
parent df5b5520
...@@ -529,9 +529,13 @@ struct btrfs_fs_info { ...@@ -529,9 +529,13 @@ struct btrfs_fs_info {
* can run with FS locks held, and the writers may be waiting for * can run with FS locks held, and the writers may be waiting for
* those locks. We don't want ordering in the pending list to cause * those locks. We don't want ordering in the pending list to cause
* deadlocks, and so the two are serviced separately. * deadlocks, and so the two are serviced separately.
*
* A third pool does submit_bio to avoid deadlocking with the other
* two
*/ */
struct btrfs_workers workers; struct btrfs_workers workers;
struct btrfs_workers endio_workers; struct btrfs_workers endio_workers;
struct btrfs_workers submit_workers;
int thread_pool_size; int thread_pool_size;
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18) #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
......
...@@ -1233,8 +1233,10 @@ struct btrfs_root *open_ctree(struct super_block *sb, ...@@ -1233,8 +1233,10 @@ struct btrfs_root *open_ctree(struct super_block *sb,
* cannot dynamically grow. * cannot dynamically grow.
*/ */
btrfs_init_workers(&fs_info->workers, fs_info->thread_pool_size); btrfs_init_workers(&fs_info->workers, fs_info->thread_pool_size);
btrfs_init_workers(&fs_info->submit_workers, fs_info->thread_pool_size);
btrfs_init_workers(&fs_info->endio_workers, fs_info->thread_pool_size); btrfs_init_workers(&fs_info->endio_workers, fs_info->thread_pool_size);
btrfs_start_workers(&fs_info->workers, 1); btrfs_start_workers(&fs_info->workers, 1);
btrfs_start_workers(&fs_info->submit_workers, 1);
btrfs_start_workers(&fs_info->endio_workers, fs_info->thread_pool_size); btrfs_start_workers(&fs_info->endio_workers, fs_info->thread_pool_size);
...@@ -1343,6 +1345,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, ...@@ -1343,6 +1345,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
extent_io_tree_empty_lru(&BTRFS_I(fs_info->btree_inode)->io_tree); extent_io_tree_empty_lru(&BTRFS_I(fs_info->btree_inode)->io_tree);
btrfs_stop_workers(&fs_info->workers); btrfs_stop_workers(&fs_info->workers);
btrfs_stop_workers(&fs_info->endio_workers); btrfs_stop_workers(&fs_info->endio_workers);
btrfs_stop_workers(&fs_info->submit_workers);
fail_iput: fail_iput:
iput(fs_info->btree_inode); iput(fs_info->btree_inode);
fail: fail:
...@@ -1597,6 +1600,7 @@ int close_ctree(struct btrfs_root *root) ...@@ -1597,6 +1600,7 @@ int close_ctree(struct btrfs_root *root)
btrfs_stop_workers(&fs_info->workers); btrfs_stop_workers(&fs_info->workers);
btrfs_stop_workers(&fs_info->endio_workers); btrfs_stop_workers(&fs_info->endio_workers);
btrfs_stop_workers(&fs_info->submit_workers);
iput(fs_info->btree_inode); iput(fs_info->btree_inode);
#if 0 #if 0
......
...@@ -2112,7 +2112,8 @@ int schedule_bio(struct btrfs_root *root, struct btrfs_device *device, ...@@ -2112,7 +2112,8 @@ int schedule_bio(struct btrfs_root *root, struct btrfs_device *device,
spin_unlock(&device->io_lock); spin_unlock(&device->io_lock);
if (should_queue) if (should_queue)
btrfs_queue_worker(&root->fs_info->workers, &device->work); btrfs_queue_worker(&root->fs_info->submit_workers,
&device->work);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment