Commit c85ba149 authored by Ming Lei's avatar Ming Lei Committed by Shaohua Li

md: raid1/raid10: don't handle failure of bio_add_page()

All bio_add_page() is for adding one page into resync bio,
which is big enough to hold RESYNC_PAGES pages, and
the current bio_add_page() doesn't check queue limit any more,
so it won't fail at all.

remove unused label (shaohua)
Signed-off-by: default avatarMing Lei <tom.leiming@gmail.com>
Signed-off-by: default avatarShaohua Li <shli@fb.com>
parent 3560741e
...@@ -2894,28 +2894,18 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, ...@@ -2894,28 +2894,18 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
bio = r1_bio->bios[i]; bio = r1_bio->bios[i];
if (bio->bi_end_io) { if (bio->bi_end_io) {
page = bio->bi_io_vec[bio->bi_vcnt].bv_page; page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
if (bio_add_page(bio, page, len, 0) == 0) {
/* stop here */ /*
bio->bi_io_vec[bio->bi_vcnt].bv_page = page; * won't fail because the vec table is big
while (i > 0) { * enough to hold all these pages
i--; */
bio = r1_bio->bios[i]; bio_add_page(bio, page, len, 0);
if (bio->bi_end_io==NULL)
continue;
/* remove last page from this bio */
bio->bi_vcnt--;
bio->bi_iter.bi_size -= len;
bio_clear_flag(bio, BIO_SEG_VALID);
}
goto bio_full;
}
} }
} }
nr_sectors += len>>9; nr_sectors += len>>9;
sector_nr += len>>9; sector_nr += len>>9;
sync_blocks -= (len>>9); sync_blocks -= (len>>9);
} while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES); } while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES);
bio_full:
r1_bio->sectors = nr_sectors; r1_bio->sectors = nr_sectors;
if (mddev_is_clustered(mddev) && if (mddev_is_clustered(mddev) &&
......
...@@ -3413,27 +3413,16 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, ...@@ -3413,27 +3413,16 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
if (len == 0) if (len == 0)
break; break;
for (bio= biolist ; bio ; bio=bio->bi_next) { for (bio= biolist ; bio ; bio=bio->bi_next) {
struct bio *bio2;
page = bio->bi_io_vec[bio->bi_vcnt].bv_page; page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
if (bio_add_page(bio, page, len, 0)) /*
continue; * won't fail because the vec table is big enough
* to hold all these pages
/* stop here */ */
bio->bi_io_vec[bio->bi_vcnt].bv_page = page; bio_add_page(bio, page, len, 0);
for (bio2 = biolist;
bio2 && bio2 != bio;
bio2 = bio2->bi_next) {
/* remove last page from this bio */
bio2->bi_vcnt--;
bio2->bi_iter.bi_size -= len;
bio_clear_flag(bio2, BIO_SEG_VALID);
}
goto bio_full;
} }
nr_sectors += len>>9; nr_sectors += len>>9;
sector_nr += len>>9; sector_nr += len>>9;
} while (biolist->bi_vcnt < RESYNC_PAGES); } while (biolist->bi_vcnt < RESYNC_PAGES);
bio_full:
r10_bio->sectors = nr_sectors; r10_bio->sectors = nr_sectors;
while (biolist) { while (biolist) {
...@@ -4502,25 +4491,15 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, ...@@ -4502,25 +4491,15 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
if (len > PAGE_SIZE) if (len > PAGE_SIZE)
len = PAGE_SIZE; len = PAGE_SIZE;
for (bio = blist; bio ; bio = bio->bi_next) { for (bio = blist; bio ; bio = bio->bi_next) {
struct bio *bio2; /*
if (bio_add_page(bio, page, len, 0)) * won't fail because the vec table is big enough
continue; * to hold all these pages
*/
/* Didn't fit, must stop */ bio_add_page(bio, page, len, 0);
for (bio2 = blist;
bio2 && bio2 != bio;
bio2 = bio2->bi_next) {
/* Remove last page from this bio */
bio2->bi_vcnt--;
bio2->bi_iter.bi_size -= len;
bio_clear_flag(bio2, BIO_SEG_VALID);
}
goto bio_full;
} }
sector_nr += len >> 9; sector_nr += len >> 9;
nr_sectors += len >> 9; nr_sectors += len >> 9;
} }
bio_full:
rcu_read_unlock(); rcu_read_unlock();
r10_bio->sectors = nr_sectors; r10_bio->sectors = nr_sectors;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment