Commit 10f11900 authored by Zhao Lei's avatar Zhao Lei Committed by Chris Mason

Btrfs: Include map_type in raid_bio

Corrent code use many kinds of "clever" way to determine operation
target's raid type, as:
  raid_map != NULL
  or
  raid_map[MAX_NR] == RAID[56]_Q_STRIPE

To make code easy to maintenance, this patch put raid type into
bbio, and we can always get raid type from bbio with a "stupid"
way.
Signed-off-by: default avatarZhao Lei <zhaolei@cn.fujitsu.com>
Signed-off-by: default avatarMiao Xie <miaox@cn.fujitsu.com>
Signed-off-by: default avatarChris Mason <clm@fb.com>
parent be50a8dd
...@@ -994,10 +994,12 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root, ...@@ -994,10 +994,12 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
rbio->bio_pages = p + sizeof(struct page *) * num_pages; rbio->bio_pages = p + sizeof(struct page *) * num_pages;
rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2; rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2;
if (bbio->raid_map[real_stripes - 1] == RAID6_Q_STRIPE) if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
nr_data = real_stripes - 1;
else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
nr_data = real_stripes - 2; nr_data = real_stripes - 2;
else else
nr_data = real_stripes - 1; BUG();
rbio->nr_data = nr_data; rbio->nr_data = nr_data;
return rbio; return rbio;
...@@ -1850,9 +1852,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) ...@@ -1850,9 +1852,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
} }
/* all raid6 handling here */ /* all raid6 handling here */
if (rbio->bbio->raid_map[rbio->real_stripes - 1] == if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) {
RAID6_Q_STRIPE) {
/* /*
* single failure, rebuild from parity raid5 * single failure, rebuild from parity raid5
* style * style
......
...@@ -1256,19 +1256,16 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) ...@@ -1256,19 +1256,16 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio) static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio)
{ {
if (bbio->raid_map) { if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
int real_stripes = bbio->num_stripes - bbio->num_tgtdevs; return 2;
else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
if (bbio->raid_map[real_stripes - 1] == RAID6_Q_STRIPE) return 3;
return 3; else
else
return 2;
} else {
return (int)bbio->num_stripes; return (int)bbio->num_stripes;
}
} }
static inline void scrub_stripe_index_and_offset(u64 logical, u64 *raid_map, static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
u64 *raid_map,
u64 mapped_length, u64 mapped_length,
int nstripes, int mirror, int nstripes, int mirror,
int *stripe_index, int *stripe_index,
...@@ -1276,7 +1273,7 @@ static inline void scrub_stripe_index_and_offset(u64 logical, u64 *raid_map, ...@@ -1276,7 +1273,7 @@ static inline void scrub_stripe_index_and_offset(u64 logical, u64 *raid_map,
{ {
int i; int i;
if (raid_map) { if (map_type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
/* RAID5/6 */ /* RAID5/6 */
for (i = 0; i < nstripes; i++) { for (i = 0; i < nstripes; i++) {
if (raid_map[i] == RAID6_Q_STRIPE || if (raid_map[i] == RAID6_Q_STRIPE ||
...@@ -1350,6 +1347,7 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock, ...@@ -1350,6 +1347,7 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO); BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO);
nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS); nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS);
for (mirror_index = 0; mirror_index < nmirrors; for (mirror_index = 0; mirror_index < nmirrors;
mirror_index++) { mirror_index++) {
struct scrub_block *sblock; struct scrub_block *sblock;
...@@ -1370,7 +1368,9 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock, ...@@ -1370,7 +1368,9 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
sblock->pagev[page_index] = page; sblock->pagev[page_index] = page;
page->logical = logical; page->logical = logical;
scrub_stripe_index_and_offset(logical, bbio->raid_map, scrub_stripe_index_and_offset(logical,
bbio->map_type,
bbio->raid_map,
mapped_length, mapped_length,
bbio->num_stripes - bbio->num_stripes -
bbio->num_tgtdevs, bbio->num_tgtdevs,
...@@ -1419,7 +1419,9 @@ static void scrub_bio_wait_endio(struct bio *bio, int error) ...@@ -1419,7 +1419,9 @@ static void scrub_bio_wait_endio(struct bio *bio, int error)
static inline int scrub_is_page_on_raid56(struct scrub_page *page) static inline int scrub_is_page_on_raid56(struct scrub_page *page)
{ {
return page->recover && page->recover->bbio->raid_map; return page->recover &&
(page->recover->bbio->map_type & (BTRFS_BLOCK_GROUP_RAID5 |
BTRFS_BLOCK_GROUP_RAID6));
} }
static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info, static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
......
...@@ -5453,6 +5453,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, ...@@ -5453,6 +5453,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
} }
*bbio_ret = bbio; *bbio_ret = bbio;
bbio->map_type = map->type;
bbio->num_stripes = num_stripes; bbio->num_stripes = num_stripes;
bbio->max_errors = max_errors; bbio->max_errors = max_errors;
bbio->mirror_num = mirror_num; bbio->mirror_num = mirror_num;
......
...@@ -298,6 +298,7 @@ struct btrfs_bio { ...@@ -298,6 +298,7 @@ struct btrfs_bio {
atomic_t refs; atomic_t refs;
atomic_t stripes_pending; atomic_t stripes_pending;
struct btrfs_fs_info *fs_info; struct btrfs_fs_info *fs_info;
u64 map_type; /* get from map_lookup->type */
bio_end_io_t *end_io; bio_end_io_t *end_io;
struct bio *orig_bio; struct bio *orig_bio;
unsigned long flags; unsigned long flags;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment