Commit cb7ab021 authored by Wang Shilong's avatar Wang Shilong Committed by Chris Mason

Btrfs: wrap repeated code into scrub_blocked_if_needed()

Just wrap same code into one function scrub_blocked_if_needed().

This make a change that we will move waiting (@workers_pending = 0)
before we can wake up commiting transaction(atomic_inc(@scrub_paused)),
we must take carefully to not deadlock here.

Thread 1			Thread 2
				|->btrfs_commit_transaction()
					|->set trans type(COMMIT_DOING)
					|->btrfs_scrub_paused()(blocked)
|->join_transaction(blocked)

Move btrfs_scrub_paused() before setting trans type which means we can
still join a transaction when commiting_transaction is blocked.
Signed-off-by: default avatarWang Shilong <wangsl.fnst@cn.fujitsu.com>
Suggested-by: default avatarMiao Xie <miaox@cn.fujitsu.com>
Signed-off-by: default avatarJosef Bacik <jbacik@fb.com>
Signed-off-by: default avatarChris Mason <clm@fb.com>
parent 3cb0929a
...@@ -256,6 +256,7 @@ static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, ...@@ -256,6 +256,7 @@ static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len, static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
int mirror_num, u64 physical_for_dev_replace); int mirror_num, u64 physical_for_dev_replace);
static void copy_nocow_pages_worker(struct btrfs_work *work); static void copy_nocow_pages_worker(struct btrfs_work *work);
static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info); static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
...@@ -270,7 +271,7 @@ static void scrub_pending_bio_dec(struct scrub_ctx *sctx) ...@@ -270,7 +271,7 @@ static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
wake_up(&sctx->list_wait); wake_up(&sctx->list_wait);
} }
static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info) static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
{ {
while (atomic_read(&fs_info->scrub_pause_req)) { while (atomic_read(&fs_info->scrub_pause_req)) {
mutex_unlock(&fs_info->scrub_lock); mutex_unlock(&fs_info->scrub_lock);
...@@ -280,6 +281,19 @@ static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info) ...@@ -280,6 +281,19 @@ static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
} }
} }
static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
{
atomic_inc(&fs_info->scrubs_paused);
wake_up(&fs_info->scrub_pause_wait);
mutex_lock(&fs_info->scrub_lock);
__scrub_blocked_if_needed(fs_info);
atomic_dec(&fs_info->scrubs_paused);
mutex_unlock(&fs_info->scrub_lock);
wake_up(&fs_info->scrub_pause_wait);
}
/* /*
* used for workers that require transaction commits (i.e., for the * used for workers that require transaction commits (i.e., for the
* NOCOW case) * NOCOW case)
...@@ -2295,8 +2309,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, ...@@ -2295,8 +2309,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
wait_event(sctx->list_wait, wait_event(sctx->list_wait,
atomic_read(&sctx->bios_in_flight) == 0); atomic_read(&sctx->bios_in_flight) == 0);
atomic_inc(&fs_info->scrubs_paused); scrub_blocked_if_needed(fs_info);
wake_up(&fs_info->scrub_pause_wait);
/* FIXME it might be better to start readahead at commit root */ /* FIXME it might be better to start readahead at commit root */
key_start.objectid = logical; key_start.objectid = logical;
...@@ -2320,12 +2333,6 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, ...@@ -2320,12 +2333,6 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
if (!IS_ERR(reada2)) if (!IS_ERR(reada2))
btrfs_reada_wait(reada2); btrfs_reada_wait(reada2);
mutex_lock(&fs_info->scrub_lock);
scrub_blocked_if_needed(fs_info);
atomic_dec(&fs_info->scrubs_paused);
mutex_unlock(&fs_info->scrub_lock);
wake_up(&fs_info->scrub_pause_wait);
/* /*
* collect all data csums for the stripe to avoid seeking during * collect all data csums for the stripe to avoid seeking during
...@@ -2362,15 +2369,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, ...@@ -2362,15 +2369,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
wait_event(sctx->list_wait, wait_event(sctx->list_wait,
atomic_read(&sctx->bios_in_flight) == 0); atomic_read(&sctx->bios_in_flight) == 0);
atomic_set(&sctx->wr_ctx.flush_all_writes, 0); atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
atomic_inc(&fs_info->scrubs_paused);
wake_up(&fs_info->scrub_pause_wait);
mutex_lock(&fs_info->scrub_lock);
scrub_blocked_if_needed(fs_info); scrub_blocked_if_needed(fs_info);
atomic_dec(&fs_info->scrubs_paused);
mutex_unlock(&fs_info->scrub_lock);
wake_up(&fs_info->scrub_pause_wait);
} }
key.objectid = logical; key.objectid = logical;
...@@ -2685,17 +2684,9 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, ...@@ -2685,17 +2684,9 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
wait_event(sctx->list_wait, wait_event(sctx->list_wait,
atomic_read(&sctx->bios_in_flight) == 0); atomic_read(&sctx->bios_in_flight) == 0);
atomic_set(&sctx->wr_ctx.flush_all_writes, 0); atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
atomic_inc(&fs_info->scrubs_paused);
wake_up(&fs_info->scrub_pause_wait);
wait_event(sctx->list_wait, wait_event(sctx->list_wait,
atomic_read(&sctx->workers_pending) == 0); atomic_read(&sctx->workers_pending) == 0);
mutex_lock(&fs_info->scrub_lock);
scrub_blocked_if_needed(fs_info); scrub_blocked_if_needed(fs_info);
atomic_dec(&fs_info->scrubs_paused);
mutex_unlock(&fs_info->scrub_lock);
wake_up(&fs_info->scrub_pause_wait);
btrfs_put_block_group(cache); btrfs_put_block_group(cache);
if (ret) if (ret)
...@@ -2912,7 +2903,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, ...@@ -2912,7 +2903,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
* checking @scrub_pause_req here, we can avoid * checking @scrub_pause_req here, we can avoid
* race between committing transaction and scrubbing. * race between committing transaction and scrubbing.
*/ */
scrub_blocked_if_needed(fs_info); __scrub_blocked_if_needed(fs_info);
atomic_inc(&fs_info->scrubs_running); atomic_inc(&fs_info->scrubs_running);
mutex_unlock(&fs_info->scrub_lock); mutex_unlock(&fs_info->scrub_lock);
......
...@@ -1748,6 +1748,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, ...@@ -1748,6 +1748,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
goto cleanup_transaction; goto cleanup_transaction;
btrfs_wait_delalloc_flush(root->fs_info); btrfs_wait_delalloc_flush(root->fs_info);
btrfs_scrub_pause(root);
/* /*
* Ok now we need to make sure to block out any other joins while we * Ok now we need to make sure to block out any other joins while we
* commit the transaction. We could have started a join before setting * commit the transaction. We could have started a join before setting
...@@ -1812,7 +1814,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, ...@@ -1812,7 +1814,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
WARN_ON(cur_trans != trans->transaction); WARN_ON(cur_trans != trans->transaction);
btrfs_scrub_pause(root);
/* btrfs_commit_tree_roots is responsible for getting the /* btrfs_commit_tree_roots is responsible for getting the
* various roots consistent with each other. Every pointer * various roots consistent with each other. Every pointer
* in the tree of tree roots has to point to the most up to date * in the tree of tree roots has to point to the most up to date
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment