Commit 8e5cfb55 authored by Zhao Lei's avatar Zhao Lei Committed by Chris Mason

Btrfs: Make raid_map array be inlined in btrfs_bio structure

It can make code more simple and clear, we need not care about
free bbio and raid_map together.
Signed-off-by: default avatarMiao Xie <miaox@cn.fujitsu.com>
Signed-off-by: default avatarZhao Lei <zhaolei@cn.fujitsu.com>
Signed-off-by: default avatarChris Mason <clm@fb.com>
parent cc7539ed
...@@ -79,13 +79,6 @@ struct btrfs_raid_bio { ...@@ -79,13 +79,6 @@ struct btrfs_raid_bio {
struct btrfs_fs_info *fs_info; struct btrfs_fs_info *fs_info;
struct btrfs_bio *bbio; struct btrfs_bio *bbio;
/*
* logical block numbers for the start of each stripe
* The last one or two are p/q. These are sorted,
* so raid_map[0] is the start of our full stripe
*/
u64 *raid_map;
/* while we're doing rmw on a stripe /* while we're doing rmw on a stripe
* we put it into a hash table so we can * we put it into a hash table so we can
* lock the stripe and merge more rbios * lock the stripe and merge more rbios
...@@ -303,7 +296,7 @@ static void cache_rbio_pages(struct btrfs_raid_bio *rbio) ...@@ -303,7 +296,7 @@ static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
*/ */
static int rbio_bucket(struct btrfs_raid_bio *rbio) static int rbio_bucket(struct btrfs_raid_bio *rbio)
{ {
u64 num = rbio->raid_map[0]; u64 num = rbio->bbio->raid_map[0];
/* /*
* we shift down quite a bit. We're using byte * we shift down quite a bit. We're using byte
...@@ -606,8 +599,8 @@ static int rbio_can_merge(struct btrfs_raid_bio *last, ...@@ -606,8 +599,8 @@ static int rbio_can_merge(struct btrfs_raid_bio *last,
test_bit(RBIO_CACHE_BIT, &cur->flags)) test_bit(RBIO_CACHE_BIT, &cur->flags))
return 0; return 0;
if (last->raid_map[0] != if (last->bbio->raid_map[0] !=
cur->raid_map[0]) cur->bbio->raid_map[0])
return 0; return 0;
/* we can't merge with different operations */ /* we can't merge with different operations */
...@@ -689,7 +682,7 @@ static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio) ...@@ -689,7 +682,7 @@ static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
spin_lock_irqsave(&h->lock, flags); spin_lock_irqsave(&h->lock, flags);
list_for_each_entry(cur, &h->hash_list, hash_list) { list_for_each_entry(cur, &h->hash_list, hash_list) {
walk++; walk++;
if (cur->raid_map[0] == rbio->raid_map[0]) { if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) {
spin_lock(&cur->bio_list_lock); spin_lock(&cur->bio_list_lock);
/* can we steal this cached rbio's pages? */ /* can we steal this cached rbio's pages? */
...@@ -842,18 +835,16 @@ static noinline void unlock_stripe(struct btrfs_raid_bio *rbio) ...@@ -842,18 +835,16 @@ static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
} }
static inline void static inline void
__free_bbio_and_raid_map(struct btrfs_bio *bbio, u64 *raid_map, int need) __free_bbio(struct btrfs_bio *bbio, int need)
{ {
if (need) { if (need)
kfree(raid_map);
kfree(bbio); kfree(bbio);
}
} }
static inline void free_bbio_and_raid_map(struct btrfs_raid_bio *rbio) static inline void free_bbio(struct btrfs_raid_bio *rbio)
{ {
__free_bbio_and_raid_map(rbio->bbio, rbio->raid_map, __free_bbio(rbio->bbio,
!test_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags)); !test_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags));
} }
static void __free_raid_bio(struct btrfs_raid_bio *rbio) static void __free_raid_bio(struct btrfs_raid_bio *rbio)
...@@ -875,7 +866,7 @@ static void __free_raid_bio(struct btrfs_raid_bio *rbio) ...@@ -875,7 +866,7 @@ static void __free_raid_bio(struct btrfs_raid_bio *rbio)
} }
} }
free_bbio_and_raid_map(rbio); free_bbio(rbio);
kfree(rbio); kfree(rbio);
} }
...@@ -985,8 +976,7 @@ static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes) ...@@ -985,8 +976,7 @@ static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
* this does not allocate any pages for rbio->pages. * this does not allocate any pages for rbio->pages.
*/ */
static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root, static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
struct btrfs_bio *bbio, u64 *raid_map, struct btrfs_bio *bbio, u64 stripe_len)
u64 stripe_len)
{ {
struct btrfs_raid_bio *rbio; struct btrfs_raid_bio *rbio;
int nr_data = 0; int nr_data = 0;
...@@ -1007,7 +997,6 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root, ...@@ -1007,7 +997,6 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
INIT_LIST_HEAD(&rbio->stripe_cache); INIT_LIST_HEAD(&rbio->stripe_cache);
INIT_LIST_HEAD(&rbio->hash_list); INIT_LIST_HEAD(&rbio->hash_list);
rbio->bbio = bbio; rbio->bbio = bbio;
rbio->raid_map = raid_map;
rbio->fs_info = root->fs_info; rbio->fs_info = root->fs_info;
rbio->stripe_len = stripe_len; rbio->stripe_len = stripe_len;
rbio->nr_pages = num_pages; rbio->nr_pages = num_pages;
...@@ -1028,7 +1017,7 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root, ...@@ -1028,7 +1017,7 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
rbio->bio_pages = p + sizeof(struct page *) * num_pages; rbio->bio_pages = p + sizeof(struct page *) * num_pages;
rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2; rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2;
if (raid_map[real_stripes - 1] == RAID6_Q_STRIPE) if (bbio->raid_map[real_stripes - 1] == RAID6_Q_STRIPE)
nr_data = real_stripes - 2; nr_data = real_stripes - 2;
else else
nr_data = real_stripes - 1; nr_data = real_stripes - 1;
...@@ -1182,7 +1171,7 @@ static void index_rbio_pages(struct btrfs_raid_bio *rbio) ...@@ -1182,7 +1171,7 @@ static void index_rbio_pages(struct btrfs_raid_bio *rbio)
spin_lock_irq(&rbio->bio_list_lock); spin_lock_irq(&rbio->bio_list_lock);
bio_list_for_each(bio, &rbio->bio_list) { bio_list_for_each(bio, &rbio->bio_list) {
start = (u64)bio->bi_iter.bi_sector << 9; start = (u64)bio->bi_iter.bi_sector << 9;
stripe_offset = start - rbio->raid_map[0]; stripe_offset = start - rbio->bbio->raid_map[0];
page_index = stripe_offset >> PAGE_CACHE_SHIFT; page_index = stripe_offset >> PAGE_CACHE_SHIFT;
for (i = 0; i < bio->bi_vcnt; i++) { for (i = 0; i < bio->bi_vcnt; i++) {
...@@ -1402,7 +1391,7 @@ static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio, ...@@ -1402,7 +1391,7 @@ static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
logical <<= 9; logical <<= 9;
for (i = 0; i < rbio->nr_data; i++) { for (i = 0; i < rbio->nr_data; i++) {
stripe_start = rbio->raid_map[i]; stripe_start = rbio->bbio->raid_map[i];
if (logical >= stripe_start && if (logical >= stripe_start &&
logical < stripe_start + rbio->stripe_len) { logical < stripe_start + rbio->stripe_len) {
return i; return i;
...@@ -1776,17 +1765,16 @@ static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule) ...@@ -1776,17 +1765,16 @@ static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
* our main entry point for writes from the rest of the FS. * our main entry point for writes from the rest of the FS.
*/ */
int raid56_parity_write(struct btrfs_root *root, struct bio *bio, int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
struct btrfs_bio *bbio, u64 *raid_map, struct btrfs_bio *bbio, u64 stripe_len)
u64 stripe_len)
{ {
struct btrfs_raid_bio *rbio; struct btrfs_raid_bio *rbio;
struct btrfs_plug_cb *plug = NULL; struct btrfs_plug_cb *plug = NULL;
struct blk_plug_cb *cb; struct blk_plug_cb *cb;
int ret; int ret;
rbio = alloc_rbio(root, bbio, raid_map, stripe_len); rbio = alloc_rbio(root, bbio, stripe_len);
if (IS_ERR(rbio)) { if (IS_ERR(rbio)) {
__free_bbio_and_raid_map(bbio, raid_map, 1); __free_bbio(bbio, 1);
return PTR_ERR(rbio); return PTR_ERR(rbio);
} }
bio_list_add(&rbio->bio_list, bio); bio_list_add(&rbio->bio_list, bio);
...@@ -1885,7 +1873,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) ...@@ -1885,7 +1873,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
} }
/* all raid6 handling here */ /* all raid6 handling here */
if (rbio->raid_map[rbio->real_stripes - 1] == if (rbio->bbio->raid_map[rbio->real_stripes - 1] ==
RAID6_Q_STRIPE) { RAID6_Q_STRIPE) {
/* /*
...@@ -1922,8 +1910,9 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) ...@@ -1922,8 +1910,9 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
* here due to a crc mismatch and we can't give them the * here due to a crc mismatch and we can't give them the
* data they want * data they want
*/ */
if (rbio->raid_map[failb] == RAID6_Q_STRIPE) { if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
if (rbio->raid_map[faila] == RAID5_P_STRIPE) { if (rbio->bbio->raid_map[faila] ==
RAID5_P_STRIPE) {
err = -EIO; err = -EIO;
goto cleanup; goto cleanup;
} }
...@@ -1934,7 +1923,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) ...@@ -1934,7 +1923,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
goto pstripe; goto pstripe;
} }
if (rbio->raid_map[failb] == RAID5_P_STRIPE) { if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) {
raid6_datap_recov(rbio->real_stripes, raid6_datap_recov(rbio->real_stripes,
PAGE_SIZE, faila, pointers); PAGE_SIZE, faila, pointers);
} else { } else {
...@@ -2156,15 +2145,15 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio) ...@@ -2156,15 +2145,15 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
* of the drive. * of the drive.
*/ */
int raid56_parity_recover(struct btrfs_root *root, struct bio *bio, int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
struct btrfs_bio *bbio, u64 *raid_map, struct btrfs_bio *bbio, u64 stripe_len,
u64 stripe_len, int mirror_num, int generic_io) int mirror_num, int generic_io)
{ {
struct btrfs_raid_bio *rbio; struct btrfs_raid_bio *rbio;
int ret; int ret;
rbio = alloc_rbio(root, bbio, raid_map, stripe_len); rbio = alloc_rbio(root, bbio, stripe_len);
if (IS_ERR(rbio)) { if (IS_ERR(rbio)) {
__free_bbio_and_raid_map(bbio, raid_map, generic_io); __free_bbio(bbio, generic_io);
return PTR_ERR(rbio); return PTR_ERR(rbio);
} }
...@@ -2175,7 +2164,7 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio, ...@@ -2175,7 +2164,7 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
rbio->faila = find_logical_bio_stripe(rbio, bio); rbio->faila = find_logical_bio_stripe(rbio, bio);
if (rbio->faila == -1) { if (rbio->faila == -1) {
BUG(); BUG();
__free_bbio_and_raid_map(bbio, raid_map, generic_io); __free_bbio(bbio, generic_io);
kfree(rbio); kfree(rbio);
return -EIO; return -EIO;
} }
...@@ -2240,14 +2229,14 @@ static void read_rebuild_work(struct btrfs_work *work) ...@@ -2240,14 +2229,14 @@ static void read_rebuild_work(struct btrfs_work *work)
struct btrfs_raid_bio * struct btrfs_raid_bio *
raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio, raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
struct btrfs_bio *bbio, u64 *raid_map, struct btrfs_bio *bbio, u64 stripe_len,
u64 stripe_len, struct btrfs_device *scrub_dev, struct btrfs_device *scrub_dev,
unsigned long *dbitmap, int stripe_nsectors) unsigned long *dbitmap, int stripe_nsectors)
{ {
struct btrfs_raid_bio *rbio; struct btrfs_raid_bio *rbio;
int i; int i;
rbio = alloc_rbio(root, bbio, raid_map, stripe_len); rbio = alloc_rbio(root, bbio, stripe_len);
if (IS_ERR(rbio)) if (IS_ERR(rbio))
return NULL; return NULL;
bio_list_add(&rbio->bio_list, bio); bio_list_add(&rbio->bio_list, bio);
...@@ -2279,10 +2268,10 @@ void raid56_parity_add_scrub_pages(struct btrfs_raid_bio *rbio, ...@@ -2279,10 +2268,10 @@ void raid56_parity_add_scrub_pages(struct btrfs_raid_bio *rbio,
int stripe_offset; int stripe_offset;
int index; int index;
ASSERT(logical >= rbio->raid_map[0]); ASSERT(logical >= rbio->bbio->raid_map[0]);
ASSERT(logical + PAGE_SIZE <= rbio->raid_map[0] + ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
rbio->stripe_len * rbio->nr_data); rbio->stripe_len * rbio->nr_data);
stripe_offset = (int)(logical - rbio->raid_map[0]); stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
index = stripe_offset >> PAGE_CACHE_SHIFT; index = stripe_offset >> PAGE_CACHE_SHIFT;
rbio->bio_pages[index] = page; rbio->bio_pages[index] = page;
} }
......
...@@ -43,16 +43,15 @@ struct btrfs_raid_bio; ...@@ -43,16 +43,15 @@ struct btrfs_raid_bio;
struct btrfs_device; struct btrfs_device;
int raid56_parity_recover(struct btrfs_root *root, struct bio *bio, int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
struct btrfs_bio *bbio, u64 *raid_map, struct btrfs_bio *bbio, u64 stripe_len,
u64 stripe_len, int mirror_num, int generic_io); int mirror_num, int generic_io);
int raid56_parity_write(struct btrfs_root *root, struct bio *bio, int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
struct btrfs_bio *bbio, u64 *raid_map, struct btrfs_bio *bbio, u64 stripe_len);
u64 stripe_len);
struct btrfs_raid_bio * struct btrfs_raid_bio *
raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio, raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
struct btrfs_bio *bbio, u64 *raid_map, struct btrfs_bio *bbio, u64 stripe_len,
u64 stripe_len, struct btrfs_device *scrub_dev, struct btrfs_device *scrub_dev,
unsigned long *dbitmap, int stripe_nsectors); unsigned long *dbitmap, int stripe_nsectors);
void raid56_parity_add_scrub_pages(struct btrfs_raid_bio *rbio, void raid56_parity_add_scrub_pages(struct btrfs_raid_bio *rbio,
struct page *page, u64 logical); struct page *page, u64 logical);
......
...@@ -66,7 +66,6 @@ struct scrub_ctx; ...@@ -66,7 +66,6 @@ struct scrub_ctx;
struct scrub_recover { struct scrub_recover {
atomic_t refs; atomic_t refs;
struct btrfs_bio *bbio; struct btrfs_bio *bbio;
u64 *raid_map;
u64 map_length; u64 map_length;
}; };
...@@ -857,7 +856,6 @@ static inline void scrub_put_recover(struct scrub_recover *recover) ...@@ -857,7 +856,6 @@ static inline void scrub_put_recover(struct scrub_recover *recover)
{ {
if (atomic_dec_and_test(&recover->refs)) { if (atomic_dec_and_test(&recover->refs)) {
kfree(recover->bbio); kfree(recover->bbio);
kfree(recover->raid_map);
kfree(recover); kfree(recover);
} }
} }
...@@ -1296,12 +1294,12 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) ...@@ -1296,12 +1294,12 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
return 0; return 0;
} }
static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio, u64 *raid_map) static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio)
{ {
if (raid_map) { if (bbio->raid_map) {
int real_stripes = bbio->num_stripes - bbio->num_tgtdevs; int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
if (raid_map[real_stripes - 1] == RAID6_Q_STRIPE) if (bbio->raid_map[real_stripes - 1] == RAID6_Q_STRIPE)
return 3; return 3;
else else
return 2; return 2;
...@@ -1347,7 +1345,6 @@ static int scrub_setup_recheck_block(struct scrub_ctx *sctx, ...@@ -1347,7 +1345,6 @@ static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
{ {
struct scrub_recover *recover; struct scrub_recover *recover;
struct btrfs_bio *bbio; struct btrfs_bio *bbio;
u64 *raid_map;
u64 sublen; u64 sublen;
u64 mapped_length; u64 mapped_length;
u64 stripe_offset; u64 stripe_offset;
...@@ -1368,35 +1365,31 @@ static int scrub_setup_recheck_block(struct scrub_ctx *sctx, ...@@ -1368,35 +1365,31 @@ static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
sublen = min_t(u64, length, PAGE_SIZE); sublen = min_t(u64, length, PAGE_SIZE);
mapped_length = sublen; mapped_length = sublen;
bbio = NULL; bbio = NULL;
raid_map = NULL;
/* /*
* with a length of PAGE_SIZE, each returned stripe * with a length of PAGE_SIZE, each returned stripe
* represents one mirror * represents one mirror
*/ */
ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical, ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical,
&mapped_length, &bbio, 0, &raid_map); &mapped_length, &bbio, 0, 1);
if (ret || !bbio || mapped_length < sublen) { if (ret || !bbio || mapped_length < sublen) {
kfree(bbio); kfree(bbio);
kfree(raid_map);
return -EIO; return -EIO;
} }
recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS); recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
if (!recover) { if (!recover) {
kfree(bbio); kfree(bbio);
kfree(raid_map);
return -ENOMEM; return -ENOMEM;
} }
atomic_set(&recover->refs, 1); atomic_set(&recover->refs, 1);
recover->bbio = bbio; recover->bbio = bbio;
recover->raid_map = raid_map;
recover->map_length = mapped_length; recover->map_length = mapped_length;
BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO); BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO);
nmirrors = scrub_nr_raid_mirrors(bbio, raid_map); nmirrors = scrub_nr_raid_mirrors(bbio);
for (mirror_index = 0; mirror_index < nmirrors; for (mirror_index = 0; mirror_index < nmirrors;
mirror_index++) { mirror_index++) {
struct scrub_block *sblock; struct scrub_block *sblock;
...@@ -1420,7 +1413,7 @@ static int scrub_setup_recheck_block(struct scrub_ctx *sctx, ...@@ -1420,7 +1413,7 @@ static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
sblock->pagev[page_index] = page; sblock->pagev[page_index] = page;
page->logical = logical; page->logical = logical;
scrub_stripe_index_and_offset(logical, raid_map, scrub_stripe_index_and_offset(logical, bbio->raid_map,
mapped_length, mapped_length,
bbio->num_stripes - bbio->num_stripes -
bbio->num_tgtdevs, bbio->num_tgtdevs,
...@@ -1469,7 +1462,7 @@ static void scrub_bio_wait_endio(struct bio *bio, int error) ...@@ -1469,7 +1462,7 @@ static void scrub_bio_wait_endio(struct bio *bio, int error)
static inline int scrub_is_page_on_raid56(struct scrub_page *page) static inline int scrub_is_page_on_raid56(struct scrub_page *page)
{ {
return page->recover && page->recover->raid_map; return page->recover && page->recover->bbio->raid_map;
} }
static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info, static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
...@@ -1486,7 +1479,6 @@ static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info, ...@@ -1486,7 +1479,6 @@ static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
bio->bi_end_io = scrub_bio_wait_endio; bio->bi_end_io = scrub_bio_wait_endio;
ret = raid56_parity_recover(fs_info->fs_root, bio, page->recover->bbio, ret = raid56_parity_recover(fs_info->fs_root, bio, page->recover->bbio,
page->recover->raid_map,
page->recover->map_length, page->recover->map_length,
page->mirror_num, 0); page->mirror_num, 0);
if (ret) if (ret)
...@@ -2716,7 +2708,6 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity) ...@@ -2716,7 +2708,6 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
struct btrfs_raid_bio *rbio; struct btrfs_raid_bio *rbio;
struct scrub_page *spage; struct scrub_page *spage;
struct btrfs_bio *bbio = NULL; struct btrfs_bio *bbio = NULL;
u64 *raid_map = NULL;
u64 length; u64 length;
int ret; int ret;
...@@ -2727,8 +2718,8 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity) ...@@ -2727,8 +2718,8 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
length = sparity->logic_end - sparity->logic_start + 1; length = sparity->logic_end - sparity->logic_start + 1;
ret = btrfs_map_sblock(sctx->dev_root->fs_info, WRITE, ret = btrfs_map_sblock(sctx->dev_root->fs_info, WRITE,
sparity->logic_start, sparity->logic_start,
&length, &bbio, 0, &raid_map); &length, &bbio, 0, 1);
if (ret || !bbio || !raid_map) if (ret || !bbio || !bbio->raid_map)
goto bbio_out; goto bbio_out;
bio = btrfs_io_bio_alloc(GFP_NOFS, 0); bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
...@@ -2740,8 +2731,7 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity) ...@@ -2740,8 +2731,7 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
bio->bi_end_io = scrub_parity_bio_endio; bio->bi_end_io = scrub_parity_bio_endio;
rbio = raid56_parity_alloc_scrub_rbio(sctx->dev_root, bio, bbio, rbio = raid56_parity_alloc_scrub_rbio(sctx->dev_root, bio, bbio,
raid_map, length, length, sparity->scrub_dev,
sparity->scrub_dev,
sparity->dbitmap, sparity->dbitmap,
sparity->nsectors); sparity->nsectors);
if (!rbio) if (!rbio)
...@@ -2759,7 +2749,6 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity) ...@@ -2759,7 +2749,6 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
bio_put(bio); bio_put(bio);
bbio_out: bbio_out:
kfree(bbio); kfree(bbio);
kfree(raid_map);
bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap, bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
sparity->nsectors); sparity->nsectors);
spin_lock(&sctx->stat_lock); spin_lock(&sctx->stat_lock);
......
...@@ -4876,8 +4876,7 @@ static inline int parity_smaller(u64 a, u64 b) ...@@ -4876,8 +4876,7 @@ static inline int parity_smaller(u64 a, u64 b)
} }
/* Bubble-sort the stripe set to put the parity/syndrome stripes last */ /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map, static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes)
int num_stripes)
{ {
struct btrfs_bio_stripe s; struct btrfs_bio_stripe s;
int i; int i;
...@@ -4887,13 +4886,14 @@ static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map, ...@@ -4887,13 +4886,14 @@ static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map,
while (again) { while (again) {
again = 0; again = 0;
for (i = 0; i < num_stripes - 1; i++) { for (i = 0; i < num_stripes - 1; i++) {
if (parity_smaller(raid_map[i], raid_map[i+1])) { if (parity_smaller(bbio->raid_map[i],
bbio->raid_map[i+1])) {
s = bbio->stripes[i]; s = bbio->stripes[i];
l = raid_map[i]; l = bbio->raid_map[i];
bbio->stripes[i] = bbio->stripes[i+1]; bbio->stripes[i] = bbio->stripes[i+1];
raid_map[i] = raid_map[i+1]; bbio->raid_map[i] = bbio->raid_map[i+1];
bbio->stripes[i+1] = s; bbio->stripes[i+1] = s;
raid_map[i+1] = l; bbio->raid_map[i+1] = l;
again = 1; again = 1;
} }
...@@ -4904,7 +4904,7 @@ static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map, ...@@ -4904,7 +4904,7 @@ static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map,
static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
u64 logical, u64 *length, u64 logical, u64 *length,
struct btrfs_bio **bbio_ret, struct btrfs_bio **bbio_ret,
int mirror_num, u64 **raid_map_ret) int mirror_num, int need_raid_map)
{ {
struct extent_map *em; struct extent_map *em;
struct map_lookup *map; struct map_lookup *map;
...@@ -4917,7 +4917,6 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, ...@@ -4917,7 +4917,6 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
u64 stripe_nr_orig; u64 stripe_nr_orig;
u64 stripe_nr_end; u64 stripe_nr_end;
u64 stripe_len; u64 stripe_len;
u64 *raid_map = NULL;
int stripe_index; int stripe_index;
int i; int i;
int ret = 0; int ret = 0;
...@@ -5039,7 +5038,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, ...@@ -5039,7 +5038,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
u64 physical_of_found = 0; u64 physical_of_found = 0;
ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS, ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS,
logical, &tmp_length, &tmp_bbio, 0, NULL); logical, &tmp_length, &tmp_bbio, 0, 0);
if (ret) { if (ret) {
WARN_ON(tmp_bbio != NULL); WARN_ON(tmp_bbio != NULL);
goto out; goto out;
...@@ -5160,13 +5159,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, ...@@ -5160,13 +5159,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
} else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
BTRFS_BLOCK_GROUP_RAID6)) { BTRFS_BLOCK_GROUP_RAID6)) {
u64 tmp; if (need_raid_map &&
if (raid_map_ret &&
((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) || ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
mirror_num > 1)) { mirror_num > 1)) {
int i, rot;
/* push stripe_nr back to the start of the full stripe */ /* push stripe_nr back to the start of the full stripe */
stripe_nr = raid56_full_stripe_start; stripe_nr = raid56_full_stripe_start;
do_div(stripe_nr, stripe_len * nr_data_stripes(map)); do_div(stripe_nr, stripe_len * nr_data_stripes(map));
...@@ -5175,32 +5170,12 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, ...@@ -5175,32 +5170,12 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
num_stripes = map->num_stripes; num_stripes = map->num_stripes;
max_errors = nr_parity_stripes(map); max_errors = nr_parity_stripes(map);
raid_map = kmalloc_array(num_stripes, sizeof(u64),
GFP_NOFS);
if (!raid_map) {
ret = -ENOMEM;
goto out;
}
/* Work out the disk rotation on this stripe-set */
tmp = stripe_nr;
rot = do_div(tmp, num_stripes);
/* Fill in the logical address of each stripe */
tmp = stripe_nr * nr_data_stripes(map);
for (i = 0; i < nr_data_stripes(map); i++)
raid_map[(i+rot) % num_stripes] =
em->start + (tmp + i) * map->stripe_len;
raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
if (map->type & BTRFS_BLOCK_GROUP_RAID6)
raid_map[(i+rot+1) % num_stripes] =
RAID6_Q_STRIPE;
*length = map->stripe_len; *length = map->stripe_len;
stripe_index = 0; stripe_index = 0;
stripe_offset = 0; stripe_offset = 0;
} else { } else {
u64 tmp;
/* /*
* Mirror #0 or #1 means the original data block. * Mirror #0 or #1 means the original data block.
* Mirror #2 is RAID5 parity block. * Mirror #2 is RAID5 parity block.
...@@ -5241,7 +5216,6 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, ...@@ -5241,7 +5216,6 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
bbio = kzalloc(btrfs_bio_size(num_alloc_stripes, tgtdev_indexes), bbio = kzalloc(btrfs_bio_size(num_alloc_stripes, tgtdev_indexes),
GFP_NOFS); GFP_NOFS);
if (!bbio) { if (!bbio) {
kfree(raid_map);
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
...@@ -5249,6 +5223,34 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, ...@@ -5249,6 +5223,34 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
if (dev_replace_is_ongoing) if (dev_replace_is_ongoing)
bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes); bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes);
/* build raid_map */
if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6) &&
need_raid_map && ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
mirror_num > 1)) {
u64 tmp;
int i, rot;
bbio->raid_map = (u64 *)((void *)bbio->stripes +
sizeof(struct btrfs_bio_stripe) *
num_alloc_stripes +
sizeof(int) * tgtdev_indexes);
/* Work out the disk rotation on this stripe-set */
tmp = stripe_nr;
rot = do_div(tmp, num_stripes);
/* Fill in the logical address of each stripe */
tmp = stripe_nr * nr_data_stripes(map);
for (i = 0; i < nr_data_stripes(map); i++)
bbio->raid_map[(i+rot) % num_stripes] =
em->start + (tmp + i) * map->stripe_len;
bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
if (map->type & BTRFS_BLOCK_GROUP_RAID6)
bbio->raid_map[(i+rot+1) % num_stripes] =
RAID6_Q_STRIPE;
}
if (rw & REQ_DISCARD) { if (rw & REQ_DISCARD) {
int factor = 0; int factor = 0;
int sub_stripes = 0; int sub_stripes = 0;
...@@ -5332,8 +5334,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, ...@@ -5332,8 +5334,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
max_errors = btrfs_chunk_max_errors(map); max_errors = btrfs_chunk_max_errors(map);
if (raid_map) if (bbio->raid_map)
sort_parity_stripes(bbio, raid_map, num_stripes); sort_parity_stripes(bbio, num_stripes);
tgtdev_indexes = 0; tgtdev_indexes = 0;
if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) && if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) &&
...@@ -5438,9 +5440,6 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, ...@@ -5438,9 +5440,6 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
bbio->stripes[0].physical = physical_to_patch_in_first_stripe; bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
bbio->mirror_num = map->num_stripes + 1; bbio->mirror_num = map->num_stripes + 1;
} }
if (raid_map_ret)
*raid_map_ret = raid_map;
out: out:
if (dev_replace_is_ongoing) if (dev_replace_is_ongoing)
btrfs_dev_replace_unlock(dev_replace); btrfs_dev_replace_unlock(dev_replace);
...@@ -5453,17 +5452,17 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, ...@@ -5453,17 +5452,17 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
struct btrfs_bio **bbio_ret, int mirror_num) struct btrfs_bio **bbio_ret, int mirror_num)
{ {
return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret, return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
mirror_num, NULL); mirror_num, 0);
} }
/* For Scrub/replace */ /* For Scrub/replace */
int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw, int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw,
u64 logical, u64 *length, u64 logical, u64 *length,
struct btrfs_bio **bbio_ret, int mirror_num, struct btrfs_bio **bbio_ret, int mirror_num,
u64 **raid_map_ret) int need_raid_map)
{ {
return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret, return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
mirror_num, raid_map_ret); mirror_num, need_raid_map);
} }
int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
...@@ -5802,7 +5801,6 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, ...@@ -5802,7 +5801,6 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
u64 logical = (u64)bio->bi_iter.bi_sector << 9; u64 logical = (u64)bio->bi_iter.bi_sector << 9;
u64 length = 0; u64 length = 0;
u64 map_length; u64 map_length;
u64 *raid_map = NULL;
int ret; int ret;
int dev_nr = 0; int dev_nr = 0;
int total_devs = 1; int total_devs = 1;
...@@ -5813,7 +5811,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, ...@@ -5813,7 +5811,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
btrfs_bio_counter_inc_blocked(root->fs_info); btrfs_bio_counter_inc_blocked(root->fs_info);
ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio, ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
mirror_num, &raid_map); mirror_num, 1);
if (ret) { if (ret) {
btrfs_bio_counter_dec(root->fs_info); btrfs_bio_counter_dec(root->fs_info);
return ret; return ret;
...@@ -5826,15 +5824,13 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, ...@@ -5826,15 +5824,13 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
bbio->fs_info = root->fs_info; bbio->fs_info = root->fs_info;
atomic_set(&bbio->stripes_pending, bbio->num_stripes); atomic_set(&bbio->stripes_pending, bbio->num_stripes);
if (raid_map) { if (bbio->raid_map) {
/* In this case, map_length has been set to the length of /* In this case, map_length has been set to the length of
a single stripe; not the whole write */ a single stripe; not the whole write */
if (rw & WRITE) { if (rw & WRITE) {
ret = raid56_parity_write(root, bio, bbio, ret = raid56_parity_write(root, bio, bbio, map_length);
raid_map, map_length);
} else { } else {
ret = raid56_parity_recover(root, bio, bbio, ret = raid56_parity_recover(root, bio, bbio, map_length,
raid_map, map_length,
mirror_num, 1); mirror_num, 1);
} }
......
...@@ -307,6 +307,12 @@ struct btrfs_bio { ...@@ -307,6 +307,12 @@ struct btrfs_bio {
int mirror_num; int mirror_num;
int num_tgtdevs; int num_tgtdevs;
int *tgtdev_map; int *tgtdev_map;
/*
* logical block numbers for the start of each stripe
* The last one or two are p/q. These are sorted,
* so raid_map[0] is the start of our full stripe
*/
u64 *raid_map;
struct btrfs_bio_stripe stripes[]; struct btrfs_bio_stripe stripes[];
}; };
...@@ -392,7 +398,8 @@ int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start, ...@@ -392,7 +398,8 @@ int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
#define btrfs_bio_size(total_stripes, real_stripes) \ #define btrfs_bio_size(total_stripes, real_stripes) \
(sizeof(struct btrfs_bio) + \ (sizeof(struct btrfs_bio) + \
(sizeof(struct btrfs_bio_stripe) * (total_stripes)) + \ (sizeof(struct btrfs_bio_stripe) * (total_stripes)) + \
(sizeof(int) * (real_stripes))) (sizeof(int) * (real_stripes)) + \
(sizeof(u64) * (real_stripes)))
int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
u64 logical, u64 *length, u64 logical, u64 *length,
...@@ -400,7 +407,7 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, ...@@ -400,7 +407,7 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw, int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw,
u64 logical, u64 *length, u64 logical, u64 *length,
struct btrfs_bio **bbio_ret, int mirror_num, struct btrfs_bio **bbio_ret, int mirror_num,
u64 **raid_map_ret); int need_raid_map);
int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
u64 chunk_start, u64 physical, u64 devid, u64 chunk_start, u64 physical, u64 devid,
u64 **logical, int *naddrs, int *stripe_len); u64 **logical, int *naddrs, int *stripe_len);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment