Commit 62096bce authored by NeilBrown's avatar NeilBrown

md/raid1: factor several functions out or raid1d()

raid1d is too big with several deep branches.
So separate them out into their own functions.
Signed-off-by: default avatarNeilBrown <neilb@suse.de>
Reviewed-by: default avatarNamhyung Kim <namhyung@gmail.com>
parent 3a9f28a5
...@@ -1861,105 +1861,66 @@ static int narrow_write_error(r1bio_t *r1_bio, int i) ...@@ -1861,105 +1861,66 @@ static int narrow_write_error(r1bio_t *r1_bio, int i)
return ok; return ok;
} }
static void raid1d(mddev_t *mddev) static void handle_sync_write_finished(conf_t *conf, r1bio_t *r1_bio)
{ {
r1bio_t *r1_bio;
struct bio *bio;
unsigned long flags;
conf_t *conf = mddev->private;
struct list_head *head = &conf->retry_list;
mdk_rdev_t *rdev;
struct blk_plug plug;
md_check_recovery(mddev);
blk_start_plug(&plug);
for (;;) {
char b[BDEVNAME_SIZE];
if (atomic_read(&mddev->plug_cnt) == 0)
flush_pending_writes(conf);
spin_lock_irqsave(&conf->device_lock, flags);
if (list_empty(head)) {
spin_unlock_irqrestore(&conf->device_lock, flags);
break;
}
r1_bio = list_entry(head->prev, r1bio_t, retry_list);
list_del(head->prev);
conf->nr_queued--;
spin_unlock_irqrestore(&conf->device_lock, flags);
mddev = r1_bio->mddev;
conf = mddev->private;
if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
test_bit(R1BIO_WriteError, &r1_bio->state)) {
int m; int m;
int s = r1_bio->sectors; int s = r1_bio->sectors;
for (m = 0; m < conf->raid_disks ; m++) { for (m = 0; m < conf->raid_disks ; m++) {
mdk_rdev_t *rdev mdk_rdev_t *rdev = conf->mirrors[m].rdev;
= conf->mirrors[m].rdev;
struct bio *bio = r1_bio->bios[m]; struct bio *bio = r1_bio->bios[m];
if (bio->bi_end_io == NULL) if (bio->bi_end_io == NULL)
continue; continue;
if (test_bit(BIO_UPTODATE, if (test_bit(BIO_UPTODATE, &bio->bi_flags) &&
&bio->bi_flags) && test_bit(R1BIO_MadeGood, &r1_bio->state)) {
test_bit(R1BIO_MadeGood, rdev_clear_badblocks(rdev, r1_bio->sector, s);
&r1_bio->state)) {
rdev_clear_badblocks(
rdev,
r1_bio->sector,
r1_bio->sectors);
} }
if (!test_bit(BIO_UPTODATE, if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
&bio->bi_flags) && test_bit(R1BIO_WriteError, &r1_bio->state)) {
test_bit(R1BIO_WriteError, if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
&r1_bio->state)) { md_error(conf->mddev, rdev);
if (!rdev_set_badblocks(
rdev,
r1_bio->sector,
r1_bio->sectors, 0))
md_error(mddev, rdev);
} }
} }
put_buf(r1_bio); put_buf(r1_bio);
md_done_sync(mddev, s, 1); md_done_sync(conf->mddev, s, 1);
} else }
sync_request_write(mddev, r1_bio);
} else if (test_bit(R1BIO_MadeGood, &r1_bio->state) || static void handle_write_finished(conf_t *conf, r1bio_t *r1_bio)
test_bit(R1BIO_WriteError, &r1_bio->state)) { {
int m; int m;
for (m = 0; m < conf->raid_disks ; m++) for (m = 0; m < conf->raid_disks ; m++)
if (r1_bio->bios[m] == IO_MADE_GOOD) { if (r1_bio->bios[m] == IO_MADE_GOOD) {
rdev = conf->mirrors[m].rdev; mdk_rdev_t *rdev = conf->mirrors[m].rdev;
rdev_clear_badblocks( rdev_clear_badblocks(rdev,
rdev,
r1_bio->sector, r1_bio->sector,
r1_bio->sectors); r1_bio->sectors);
rdev_dec_pending(rdev, mddev); rdev_dec_pending(rdev, conf->mddev);
} else if (r1_bio->bios[m] != NULL) { } else if (r1_bio->bios[m] != NULL) {
/* This drive got a write error. We /* This drive got a write error. We need to
* need to narrow down and record * narrow down and record precise write
* precise write errors. * errors.
*/ */
if (!narrow_write_error(r1_bio, m)) { if (!narrow_write_error(r1_bio, m)) {
md_error(mddev, md_error(conf->mddev,
conf->mirrors[m].rdev); conf->mirrors[m].rdev);
/* an I/O failed, we can't clear /* an I/O failed, we can't clear the bitmap */
* the bitmap */ set_bit(R1BIO_Degraded, &r1_bio->state);
set_bit(R1BIO_Degraded,
&r1_bio->state);
} }
rdev_dec_pending(conf->mirrors[m].rdev, rdev_dec_pending(conf->mirrors[m].rdev,
mddev); conf->mddev);
} }
if (test_bit(R1BIO_WriteError, &r1_bio->state)) if (test_bit(R1BIO_WriteError, &r1_bio->state))
close_write(r1_bio); close_write(r1_bio);
raid_end_bio_io(r1_bio); raid_end_bio_io(r1_bio);
} else if (test_bit(R1BIO_ReadError, &r1_bio->state)) { }
static void handle_read_error(conf_t *conf, r1bio_t *r1_bio)
{
int disk; int disk;
int max_sectors; int max_sectors;
mddev_t *mddev = conf->mddev;
struct bio *bio;
char b[BDEVNAME_SIZE];
mdk_rdev_t *rdev;
clear_bit(R1BIO_ReadError, &r1_bio->state); clear_bit(R1BIO_ReadError, &r1_bio->state);
/* we got a read error. Maybe the drive is bad. Maybe just /* we got a read error. Maybe the drive is bad. Maybe just
...@@ -1973,12 +1934,10 @@ static void raid1d(mddev_t *mddev) ...@@ -1973,12 +1934,10 @@ static void raid1d(mddev_t *mddev)
if (mddev->ro == 0) { if (mddev->ro == 0) {
freeze_array(conf); freeze_array(conf);
fix_read_error(conf, r1_bio->read_disk, fix_read_error(conf, r1_bio->read_disk,
r1_bio->sector, r1_bio->sector, r1_bio->sectors);
r1_bio->sectors);
unfreeze_array(conf); unfreeze_array(conf);
} else } else
md_error(mddev, md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
conf->mirrors[r1_bio->read_disk].rdev);
bio = r1_bio->bios[r1_bio->read_disk]; bio = r1_bio->bios[r1_bio->read_disk];
bdevname(bio->bi_bdev, b); bdevname(bio->bi_bdev, b);
...@@ -1987,26 +1946,22 @@ static void raid1d(mddev_t *mddev) ...@@ -1987,26 +1946,22 @@ static void raid1d(mddev_t *mddev)
if (disk == -1) { if (disk == -1) {
printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O" printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O"
" read error for block %llu\n", " read error for block %llu\n",
mdname(mddev), b, mdname(mddev), b, (unsigned long long)r1_bio->sector);
(unsigned long long)r1_bio->sector);
raid_end_bio_io(r1_bio); raid_end_bio_io(r1_bio);
} else { } else {
const unsigned long do_sync = r1_bio->master_bio->bi_rw & REQ_SYNC; const unsigned long do_sync
= r1_bio->master_bio->bi_rw & REQ_SYNC;
if (bio) { if (bio) {
r1_bio->bios[r1_bio->read_disk] = r1_bio->bios[r1_bio->read_disk] =
mddev->ro ? IO_BLOCKED : NULL; mddev->ro ? IO_BLOCKED : NULL;
bio_put(bio); bio_put(bio);
} }
r1_bio->read_disk = disk; r1_bio->read_disk = disk;
bio = bio_clone_mddev(r1_bio->master_bio, bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
GFP_NOIO, mddev); md_trim_bio(bio, r1_bio->sector - bio->bi_sector, max_sectors);
md_trim_bio(bio,
r1_bio->sector - bio->bi_sector,
max_sectors);
r1_bio->bios[r1_bio->read_disk] = bio; r1_bio->bios[r1_bio->read_disk] = bio;
rdev = conf->mirrors[disk].rdev; rdev = conf->mirrors[disk].rdev;
printk_ratelimited( printk_ratelimited(KERN_ERR
KERN_ERR
"md/raid1:%s: redirecting sector %llu" "md/raid1:%s: redirecting sector %llu"
" to other mirror: %s\n", " to other mirror: %s\n",
mdname(mddev), mdname(mddev),
...@@ -2020,9 +1975,8 @@ static void raid1d(mddev_t *mddev) ...@@ -2020,9 +1975,8 @@ static void raid1d(mddev_t *mddev)
if (max_sectors < r1_bio->sectors) { if (max_sectors < r1_bio->sectors) {
/* Drat - have to split this up more */ /* Drat - have to split this up more */
struct bio *mbio = r1_bio->master_bio; struct bio *mbio = r1_bio->master_bio;
int sectors_handled = int sectors_handled = (r1_bio->sector + max_sectors
r1_bio->sector + max_sectors - mbio->bi_sector);
- mbio->bi_sector;
r1_bio->sectors = max_sectors; r1_bio->sectors = max_sectors;
spin_lock_irq(&conf->device_lock); spin_lock_irq(&conf->device_lock);
if (mbio->bi_phys_segments == 0) if (mbio->bi_phys_segments == 0)
...@@ -2033,29 +1987,67 @@ static void raid1d(mddev_t *mddev) ...@@ -2033,29 +1987,67 @@ static void raid1d(mddev_t *mddev)
generic_make_request(bio); generic_make_request(bio);
bio = NULL; bio = NULL;
r1_bio = mempool_alloc(conf->r1bio_pool, r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
GFP_NOIO);
r1_bio->master_bio = mbio; r1_bio->master_bio = mbio;
r1_bio->sectors = (mbio->bi_size >> 9) r1_bio->sectors = (mbio->bi_size >> 9)
- sectors_handled; - sectors_handled;
r1_bio->state = 0; r1_bio->state = 0;
set_bit(R1BIO_ReadError, set_bit(R1BIO_ReadError, &r1_bio->state);
&r1_bio->state);
r1_bio->mddev = mddev; r1_bio->mddev = mddev;
r1_bio->sector = mbio->bi_sector r1_bio->sector = mbio->bi_sector + sectors_handled;
+ sectors_handled;
goto read_more; goto read_more;
} else } else
generic_make_request(bio); generic_make_request(bio);
} }
} else { }
static void raid1d(mddev_t *mddev)
{
r1bio_t *r1_bio;
unsigned long flags;
conf_t *conf = mddev->private;
struct list_head *head = &conf->retry_list;
struct blk_plug plug;
md_check_recovery(mddev);
blk_start_plug(&plug);
for (;;) {
if (atomic_read(&mddev->plug_cnt) == 0)
flush_pending_writes(conf);
spin_lock_irqsave(&conf->device_lock, flags);
if (list_empty(head)) {
spin_unlock_irqrestore(&conf->device_lock, flags);
break;
}
r1_bio = list_entry(head->prev, r1bio_t, retry_list);
list_del(head->prev);
conf->nr_queued--;
spin_unlock_irqrestore(&conf->device_lock, flags);
mddev = r1_bio->mddev;
conf = mddev->private;
if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
test_bit(R1BIO_WriteError, &r1_bio->state))
handle_sync_write_finished(conf, r1_bio);
else
sync_request_write(mddev, r1_bio);
} else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
test_bit(R1BIO_WriteError, &r1_bio->state))
handle_write_finished(conf, r1_bio);
else if (test_bit(R1BIO_ReadError, &r1_bio->state))
handle_read_error(conf, r1_bio);
else
/* just a partial read to be scheduled from separate /* just a partial read to be scheduled from separate
* context * context
*/ */
generic_make_request(r1_bio->bios[r1_bio->read_disk]); generic_make_request(r1_bio->bios[r1_bio->read_disk]);
}
cond_resched(); cond_resched();
if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
md_check_recovery(mddev); md_check_recovery(mddev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment