Commit 560f8e55 authored by NeilBrown's avatar NeilBrown

md/raid10: Split handle_read_error out from raid10d.

raid10d() is too big and is about to get bigger, so split
handle_read_error() out as a separate function.
Signed-off-by: default avatarNeilBrown <neilb@suse.de>
parent 1294b9c9
...@@ -1618,43 +1618,16 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio) ...@@ -1618,43 +1618,16 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
} }
} }
static void raid10d(mddev_t *mddev) static void handle_read_error(mddev_t *mddev, r10bio_t *r10_bio)
{ {
r10bio_t *r10_bio; int slot = r10_bio->read_slot;
int mirror = r10_bio->devs[slot].devnum;
struct bio *bio; struct bio *bio;
unsigned long flags;
conf_t *conf = mddev->private; conf_t *conf = mddev->private;
struct list_head *head = &conf->retry_list;
mdk_rdev_t *rdev; mdk_rdev_t *rdev;
struct blk_plug plug;
md_check_recovery(mddev);
blk_start_plug(&plug);
for (;;) {
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
unsigned long do_sync;
flush_pending_writes(conf);
spin_lock_irqsave(&conf->device_lock, flags);
if (list_empty(head)) {
spin_unlock_irqrestore(&conf->device_lock, flags);
break;
}
r10_bio = list_entry(head->prev, r10bio_t, retry_list);
list_del(head->prev);
conf->nr_queued--;
spin_unlock_irqrestore(&conf->device_lock, flags);
mddev = r10_bio->mddev;
conf = mddev->private;
if (test_bit(R10BIO_IsSync, &r10_bio->state))
sync_request_write(mddev, r10_bio);
else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
recovery_request_write(mddev, r10_bio);
else {
int slot = r10_bio->read_slot;
int mirror = r10_bio->devs[slot].devnum;
/* we got a read error. Maybe the drive is bad. Maybe just /* we got a read error. Maybe the drive is bad. Maybe just
* the block and we can fix it. * the block and we can fix it.
* We freeze all other IO, and try reading the block from * We freeze all other IO, and try reading the block from
...@@ -1678,12 +1651,14 @@ static void raid10d(mddev_t *mddev) ...@@ -1678,12 +1651,14 @@ static void raid10d(mddev_t *mddev)
printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O" printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
" read error for block %llu\n", " read error for block %llu\n",
mdname(mddev), mdname(mddev),
bdevname(bio->bi_bdev,b), bdevname(bio->bi_bdev, b),
(unsigned long long)r10_bio->sector); (unsigned long long)r10_bio->sector);
raid_end_bio_io(r10_bio); raid_end_bio_io(r10_bio);
bio_put(bio); bio_put(bio);
} else { return;
const unsigned long do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC); }
do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
bio_put(bio); bio_put(bio);
slot = r10_bio->read_slot; slot = r10_bio->read_slot;
rdev = conf->mirrors[mirror].rdev; rdev = conf->mirrors[mirror].rdev;
...@@ -1704,8 +1679,42 @@ static void raid10d(mddev_t *mddev) ...@@ -1704,8 +1679,42 @@ static void raid10d(mddev_t *mddev)
bio->bi_private = r10_bio; bio->bi_private = r10_bio;
bio->bi_end_io = raid10_end_read_request; bio->bi_end_io = raid10_end_read_request;
generic_make_request(bio); generic_make_request(bio);
}
static void raid10d(mddev_t *mddev)
{
r10bio_t *r10_bio;
unsigned long flags;
conf_t *conf = mddev->private;
struct list_head *head = &conf->retry_list;
struct blk_plug plug;
md_check_recovery(mddev);
blk_start_plug(&plug);
for (;;) {
flush_pending_writes(conf);
spin_lock_irqsave(&conf->device_lock, flags);
if (list_empty(head)) {
spin_unlock_irqrestore(&conf->device_lock, flags);
break;
} }
} r10_bio = list_entry(head->prev, r10bio_t, retry_list);
list_del(head->prev);
conf->nr_queued--;
spin_unlock_irqrestore(&conf->device_lock, flags);
mddev = r10_bio->mddev;
conf = mddev->private;
if (test_bit(R10BIO_IsSync, &r10_bio->state))
sync_request_write(mddev, r10_bio);
else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
recovery_request_write(mddev, r10_bio);
else
handle_read_error(mddev, r10_bio);
cond_resched(); cond_resched();
if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
md_check_recovery(mddev); md_check_recovery(mddev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment