Commit b6bfebc1 authored by Stefan Behrens's avatar Stefan Behrens Committed by Josef Bacik

Btrfs: cleanup scrub bio and worker wait code

Just move some code into functions to make everything more readable.
Signed-off-by: default avatarStefan Behrens <sbehrens@giantdisaster.de>
Signed-off-by: default avatarChris Mason <chris.mason@fusionio.com>
parent 34f5c8e9
/* /*
* Copyright (C) 2011 STRATO. All rights reserved. * Copyright (C) 2011, 2012 STRATO. All rights reserved.
* *
* This program is free software; you can redistribute it and/or * This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public * modify it under the terms of the GNU General Public
...@@ -104,8 +104,8 @@ struct scrub_ctx { ...@@ -104,8 +104,8 @@ struct scrub_ctx {
struct btrfs_root *dev_root; struct btrfs_root *dev_root;
int first_free; int first_free;
int curr; int curr;
atomic_t in_flight; atomic_t bios_in_flight;
atomic_t fixup_cnt; atomic_t workers_pending;
spinlock_t list_lock; spinlock_t list_lock;
wait_queue_head_t list_wait; wait_queue_head_t list_wait;
u16 csum_size; u16 csum_size;
...@@ -146,6 +146,10 @@ struct scrub_warning { ...@@ -146,6 +146,10 @@ struct scrub_warning {
}; };
static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
static int scrub_handle_errored_block(struct scrub_block *sblock_to_check); static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
static int scrub_setup_recheck_block(struct scrub_ctx *sctx, static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
struct btrfs_mapping_tree *map_tree, struct btrfs_mapping_tree *map_tree,
...@@ -184,6 +188,59 @@ static void scrub_bio_end_io_worker(struct btrfs_work *work); ...@@ -184,6 +188,59 @@ static void scrub_bio_end_io_worker(struct btrfs_work *work);
static void scrub_block_complete(struct scrub_block *sblock); static void scrub_block_complete(struct scrub_block *sblock);
static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
{
atomic_inc(&sctx->bios_in_flight);
}
static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
{
atomic_dec(&sctx->bios_in_flight);
wake_up(&sctx->list_wait);
}
/*
* used for workers that require transaction commits (i.e., for the
* NOCOW case)
*/
static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
{
struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
/*
* increment scrubs_running to prevent cancel requests from
* completing as long as a worker is running. we must also
* increment scrubs_paused to prevent deadlocking on pause
* requests used for transactions commits (as the worker uses a
* transaction context). it is safe to regard the worker
* as paused for all matters practical. effectively, we only
* avoid cancellation requests from completing.
*/
mutex_lock(&fs_info->scrub_lock);
atomic_inc(&fs_info->scrubs_running);
atomic_inc(&fs_info->scrubs_paused);
mutex_unlock(&fs_info->scrub_lock);
atomic_inc(&sctx->workers_pending);
}
/* used for workers that require transaction commits */
static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
{
struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
/*
* see scrub_pending_trans_workers_inc() why we're pretending
* to be paused in the scrub counters
*/
mutex_lock(&fs_info->scrub_lock);
atomic_dec(&fs_info->scrubs_running);
atomic_dec(&fs_info->scrubs_paused);
mutex_unlock(&fs_info->scrub_lock);
atomic_dec(&sctx->workers_pending);
wake_up(&fs_info->scrub_pause_wait);
wake_up(&sctx->list_wait);
}
static void scrub_free_csums(struct scrub_ctx *sctx) static void scrub_free_csums(struct scrub_ctx *sctx)
{ {
while (!list_empty(&sctx->csum_list)) { while (!list_empty(&sctx->csum_list)) {
...@@ -264,8 +321,8 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev) ...@@ -264,8 +321,8 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev)
sctx->nodesize = dev->dev_root->nodesize; sctx->nodesize = dev->dev_root->nodesize;
sctx->leafsize = dev->dev_root->leafsize; sctx->leafsize = dev->dev_root->leafsize;
sctx->sectorsize = dev->dev_root->sectorsize; sctx->sectorsize = dev->dev_root->sectorsize;
atomic_set(&sctx->in_flight, 0); atomic_set(&sctx->bios_in_flight, 0);
atomic_set(&sctx->fixup_cnt, 0); atomic_set(&sctx->workers_pending, 0);
atomic_set(&sctx->cancel_req, 0); atomic_set(&sctx->cancel_req, 0);
sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy); sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
INIT_LIST_HEAD(&sctx->csum_list); INIT_LIST_HEAD(&sctx->csum_list);
...@@ -609,14 +666,7 @@ static void scrub_fixup_nodatasum(struct btrfs_work *work) ...@@ -609,14 +666,7 @@ static void scrub_fixup_nodatasum(struct btrfs_work *work)
btrfs_free_path(path); btrfs_free_path(path);
kfree(fixup); kfree(fixup);
/* see caller why we're pretending to be paused in the scrub counters */ scrub_pending_trans_workers_dec(sctx);
mutex_lock(&fs_info->scrub_lock);
atomic_dec(&fs_info->scrubs_running);
atomic_dec(&fs_info->scrubs_paused);
mutex_unlock(&fs_info->scrub_lock);
atomic_dec(&sctx->fixup_cnt);
wake_up(&fs_info->scrub_pause_wait);
wake_up(&sctx->list_wait);
} }
/* /*
...@@ -789,20 +839,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) ...@@ -789,20 +839,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
fixup_nodatasum->logical = logical; fixup_nodatasum->logical = logical;
fixup_nodatasum->root = fs_info->extent_root; fixup_nodatasum->root = fs_info->extent_root;
fixup_nodatasum->mirror_num = failed_mirror_index + 1; fixup_nodatasum->mirror_num = failed_mirror_index + 1;
/* scrub_pending_trans_workers_inc(sctx);
* increment scrubs_running to prevent cancel requests from
* completing as long as a fixup worker is running. we must also
* increment scrubs_paused to prevent deadlocking on pause
* requests used for transactions commits (as the worker uses a
* transaction context). it is safe to regard the fixup worker
* as paused for all matters practical. effectively, we only
* avoid cancellation requests from completing.
*/
mutex_lock(&fs_info->scrub_lock);
atomic_inc(&fs_info->scrubs_running);
atomic_inc(&fs_info->scrubs_paused);
mutex_unlock(&fs_info->scrub_lock);
atomic_inc(&sctx->fixup_cnt);
fixup_nodatasum->work.func = scrub_fixup_nodatasum; fixup_nodatasum->work.func = scrub_fixup_nodatasum;
btrfs_queue_worker(&fs_info->scrub_workers, btrfs_queue_worker(&fs_info->scrub_workers,
&fixup_nodatasum->work); &fixup_nodatasum->work);
...@@ -1491,7 +1528,7 @@ static void scrub_submit(struct scrub_ctx *sctx) ...@@ -1491,7 +1528,7 @@ static void scrub_submit(struct scrub_ctx *sctx)
sbio = sctx->bios[sctx->curr]; sbio = sctx->bios[sctx->curr];
sctx->curr = -1; sctx->curr = -1;
atomic_inc(&sctx->in_flight); scrub_pending_bio_inc(sctx);
btrfsic_submit_bio(READ, sbio->bio); btrfsic_submit_bio(READ, sbio->bio);
} }
...@@ -1692,8 +1729,7 @@ static void scrub_bio_end_io_worker(struct btrfs_work *work) ...@@ -1692,8 +1729,7 @@ static void scrub_bio_end_io_worker(struct btrfs_work *work)
sbio->next_free = sctx->first_free; sbio->next_free = sctx->first_free;
sctx->first_free = sbio->index; sctx->first_free = sbio->index;
spin_unlock(&sctx->list_lock); spin_unlock(&sctx->list_lock);
atomic_dec(&sctx->in_flight); scrub_pending_bio_dec(sctx);
wake_up(&sctx->list_wait);
} }
static void scrub_block_complete(struct scrub_block *sblock) static void scrub_block_complete(struct scrub_block *sblock)
...@@ -1863,7 +1899,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, ...@@ -1863,7 +1899,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
logical = base + offset; logical = base + offset;
wait_event(sctx->list_wait, wait_event(sctx->list_wait,
atomic_read(&sctx->in_flight) == 0); atomic_read(&sctx->bios_in_flight) == 0);
atomic_inc(&fs_info->scrubs_paused); atomic_inc(&fs_info->scrubs_paused);
wake_up(&fs_info->scrub_pause_wait); wake_up(&fs_info->scrub_pause_wait);
...@@ -1928,7 +1964,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, ...@@ -1928,7 +1964,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
/* push queued extents */ /* push queued extents */
scrub_submit(sctx); scrub_submit(sctx);
wait_event(sctx->list_wait, wait_event(sctx->list_wait,
atomic_read(&sctx->in_flight) == 0); atomic_read(&sctx->bios_in_flight) == 0);
atomic_inc(&fs_info->scrubs_paused); atomic_inc(&fs_info->scrubs_paused);
wake_up(&fs_info->scrub_pause_wait); wake_up(&fs_info->scrub_pause_wait);
mutex_lock(&fs_info->scrub_lock); mutex_lock(&fs_info->scrub_lock);
...@@ -2218,7 +2254,7 @@ static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx, ...@@ -2218,7 +2254,7 @@ static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
if (ret) if (ret)
return ret; return ret;
} }
wait_event(sctx->list_wait, atomic_read(&sctx->in_flight) == 0); wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
return 0; return 0;
} }
...@@ -2363,11 +2399,11 @@ int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end, ...@@ -2363,11 +2399,11 @@ int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end,
if (!ret) if (!ret)
ret = scrub_enumerate_chunks(sctx, dev, start, end); ret = scrub_enumerate_chunks(sctx, dev, start, end);
wait_event(sctx->list_wait, atomic_read(&sctx->in_flight) == 0); wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
atomic_dec(&fs_info->scrubs_running); atomic_dec(&fs_info->scrubs_running);
wake_up(&fs_info->scrub_pause_wait); wake_up(&fs_info->scrub_pause_wait);
wait_event(sctx->list_wait, atomic_read(&sctx->fixup_cnt) == 0); wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
if (progress) if (progress)
memcpy(progress, &sctx->stat, sizeof(*progress)); memcpy(progress, &sctx->stat, sizeof(*progress));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment