Commit e9255d6c authored by Qu Wenruo's avatar Qu Wenruo Committed by David Sterba

btrfs: scrub: remove the old scrub recheck code

The old scrub code has different entrance to verify the content, and
since we have removed the writeback path, now we can start removing the
re-check part, including:

- scrub_recover structure
- scrub_sector::recover member
- function scrub_setup_recheck_block()
- function scrub_recheck_block()
- function scrub_recheck_block_checksum()
- function scrub_repair_block_group_good_copy()
- function scrub_repair_sector_from_good_copy()
- function scrub_is_page_on_raid56()

- function full_stripe_lock()
- function search_full_stripe_lock()
- function get_full_stripe_logical()
- function insert_full_stripe_lock()
- function lock_full_stripe()
- function unlock_full_stripe()
- btrfs_block_group::full_stripe_locks_root member
- btrfs_full_stripe_locks_tree structure
  This infrastructure is to ensure RAID56 scrub is properly handling
  recovery and P/Q scrub correctly.

  This is no longer needed, before P/Q scrub we will wait for all
  the involved data stripes to be scrubbed first, and RAID56 code has
  internal lock to ensure no race in the same full stripe.

- function scrub_print_warning()
- function scrub_get_recover()
- function scrub_put_recover()
- function scrub_handle_errored_block()
- function scrub_setup_recheck_block()
- function scrub_bio_wait_endio()
- function scrub_submit_raid56_bio_wait()
- function scrub_recheck_block_on_raid56()
- function scrub_recheck_block()
- function scrub_recheck_block_checksum()
- function scrub_repair_block_from_good_copy()
- function scrub_repair_sector_from_good_copy()

And two more functions exported temporarily for later cleanup:

- alloc_scrub_sector()
- alloc_scrub_block()
Signed-off-by: default avatarQu Wenruo <wqu@suse.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 16f93993
...@@ -160,15 +160,6 @@ void btrfs_put_block_group(struct btrfs_block_group *cache) ...@@ -160,15 +160,6 @@ void btrfs_put_block_group(struct btrfs_block_group *cache)
btrfs_discard_cancel_work(&cache->fs_info->discard_ctl, btrfs_discard_cancel_work(&cache->fs_info->discard_ctl,
cache); cache);
/*
* If not empty, someone is still holding mutex of
* full_stripe_lock, which can only be released by caller.
* And it will definitely cause use-after-free when caller
* tries to release full stripe lock.
*
* No better way to resolve, but only to warn.
*/
WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root));
kfree(cache->free_space_ctl); kfree(cache->free_space_ctl);
kfree(cache->physical_map); kfree(cache->physical_map);
kfree(cache); kfree(cache);
...@@ -2124,8 +2115,6 @@ static struct btrfs_block_group *btrfs_create_block_group_cache( ...@@ -2124,8 +2115,6 @@ static struct btrfs_block_group *btrfs_create_block_group_cache(
btrfs_init_free_space_ctl(cache, cache->free_space_ctl); btrfs_init_free_space_ctl(cache, cache->free_space_ctl);
atomic_set(&cache->frozen, 0); atomic_set(&cache->frozen, 0);
mutex_init(&cache->free_space_lock); mutex_init(&cache->free_space_lock);
cache->full_stripe_locks_root.root = RB_ROOT;
mutex_init(&cache->full_stripe_locks_root.lock);
return cache; return cache;
} }
......
...@@ -91,14 +91,6 @@ struct btrfs_caching_control { ...@@ -91,14 +91,6 @@ struct btrfs_caching_control {
/* Once caching_thread() finds this much free space, it will wake up waiters. */ /* Once caching_thread() finds this much free space, it will wake up waiters. */
#define CACHING_CTL_WAKE_UP SZ_2M #define CACHING_CTL_WAKE_UP SZ_2M
/*
* Tree to record all locked full stripes of a RAID5/6 block group
*/
struct btrfs_full_stripe_locks_tree {
struct rb_root root;
struct mutex lock;
};
struct btrfs_block_group { struct btrfs_block_group {
struct btrfs_fs_info *fs_info; struct btrfs_fs_info *fs_info;
struct inode *inode; struct inode *inode;
...@@ -229,9 +221,6 @@ struct btrfs_block_group { ...@@ -229,9 +221,6 @@ struct btrfs_block_group {
*/ */
int swap_extents; int swap_extents;
/* Record locked full stripes for RAID5/6 block group */
struct btrfs_full_stripe_locks_tree full_stripe_locks_root;
/* /*
* Allocation offset for the block group to implement sequential * Allocation offset for the block group to implement sequential
* allocation. This is used only on a zoned filesystem. * allocation. This is used only on a zoned filesystem.
......
This diff is collapsed.
...@@ -16,9 +16,16 @@ int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid, ...@@ -16,9 +16,16 @@ int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
/* Temporary declaration, would be deleted later. */ /* Temporary declaration, would be deleted later. */
struct scrub_ctx; struct scrub_ctx;
struct scrub_sector; struct scrub_sector;
struct scrub_block;
int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum); int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum);
int scrub_add_sector_to_rd_bio(struct scrub_ctx *sctx, int scrub_add_sector_to_rd_bio(struct scrub_ctx *sctx,
struct scrub_sector *sector); struct scrub_sector *sector);
void scrub_sector_get(struct scrub_sector *sector); void scrub_sector_get(struct scrub_sector *sector);
struct scrub_sector *alloc_scrub_sector(struct scrub_block *sblock, u64 logical);
struct scrub_block *alloc_scrub_block(struct scrub_ctx *sctx,
struct btrfs_device *dev,
u64 logical, u64 physical,
u64 physical_for_dev_replace,
int mirror_num);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment