Commit 2e38a37f authored by Song Liu's avatar Song Liu Committed by Shaohua Li

md/r5cache: disable write back for degraded array

write-back cache in degraded mode introduces corner cases to the array.
Although we try to cover all these corner cases, it is safer to just
disable write-back cache when the array is in degraded mode.

In this patch, we disable writeback cache for degraded mode:
1. On device failure, if the array enters degraded mode, raid5_error()
   will submit async job r5c_disable_writeback_async to disable
   writeback;
2. In r5c_journal_mode_store(), it is invalid to enable writeback in
   degraded mode;
3. In r5c_try_caching_write(), stripes with s->failed>0 will be handled
   in write-through mode.
Signed-off-by: default avatarSong Liu <songliubraving@fb.com>
Signed-off-by: default avatarShaohua Li <shli@fb.com>
parent 07e83364
...@@ -162,6 +162,8 @@ struct r5l_log { ...@@ -162,6 +162,8 @@ struct r5l_log {
/* to submit async io_units, to fulfill ordering of flush */ /* to submit async io_units, to fulfill ordering of flush */
struct work_struct deferred_io_work; struct work_struct deferred_io_work;
/* to disable write back during in degraded mode */
struct work_struct disable_writeback_work;
}; };
/* /*
...@@ -611,6 +613,21 @@ static void r5l_submit_io_async(struct work_struct *work) ...@@ -611,6 +613,21 @@ static void r5l_submit_io_async(struct work_struct *work)
r5l_do_submit_io(log, io); r5l_do_submit_io(log, io);
} }
static void r5c_disable_writeback_async(struct work_struct *work)
{
struct r5l_log *log = container_of(work, struct r5l_log,
disable_writeback_work);
struct mddev *mddev = log->rdev->mddev;
if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
return;
pr_info("md/raid:%s: Disabling writeback cache for degraded array.\n",
mdname(mddev));
mddev_suspend(mddev);
log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
mddev_resume(mddev);
}
static void r5l_submit_current_io(struct r5l_log *log) static void r5l_submit_current_io(struct r5l_log *log)
{ {
struct r5l_io_unit *io = log->current_io; struct r5l_io_unit *io = log->current_io;
...@@ -2269,6 +2286,10 @@ static ssize_t r5c_journal_mode_store(struct mddev *mddev, ...@@ -2269,6 +2286,10 @@ static ssize_t r5c_journal_mode_store(struct mddev *mddev,
val > R5C_JOURNAL_MODE_WRITE_BACK) val > R5C_JOURNAL_MODE_WRITE_BACK)
return -EINVAL; return -EINVAL;
if (raid5_calc_degraded(conf) > 0 &&
val == R5C_JOURNAL_MODE_WRITE_BACK)
return -EINVAL;
mddev_suspend(mddev); mddev_suspend(mddev);
conf->log->r5c_journal_mode = val; conf->log->r5c_journal_mode = val;
mddev_resume(mddev); mddev_resume(mddev);
...@@ -2323,6 +2344,16 @@ int r5c_try_caching_write(struct r5conf *conf, ...@@ -2323,6 +2344,16 @@ int r5c_try_caching_write(struct r5conf *conf,
set_bit(STRIPE_R5C_CACHING, &sh->state); set_bit(STRIPE_R5C_CACHING, &sh->state);
} }
/*
* When run in degraded mode, array is set to write-through mode.
* This check helps drain pending write safely in the transition to
* write-through mode.
*/
if (s->failed) {
r5c_make_stripe_write_out(sh);
return -EAGAIN;
}
for (i = disks; i--; ) { for (i = disks; i--; ) {
dev = &sh->dev[i]; dev = &sh->dev[i];
/* if non-overwrite, use writing-out phase */ /* if non-overwrite, use writing-out phase */
...@@ -2579,6 +2610,19 @@ static int r5l_load_log(struct r5l_log *log) ...@@ -2579,6 +2610,19 @@ static int r5l_load_log(struct r5l_log *log)
return ret; return ret;
} }
void r5c_update_on_rdev_error(struct mddev *mddev)
{
struct r5conf *conf = mddev->private;
struct r5l_log *log = conf->log;
if (!log)
return;
if (raid5_calc_degraded(conf) > 0 &&
conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK)
schedule_work(&log->disable_writeback_work);
}
int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
{ {
struct request_queue *q = bdev_get_queue(rdev->bdev); struct request_queue *q = bdev_get_queue(rdev->bdev);
...@@ -2651,6 +2695,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) ...@@ -2651,6 +2695,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
spin_lock_init(&log->no_space_stripes_lock); spin_lock_init(&log->no_space_stripes_lock);
INIT_WORK(&log->deferred_io_work, r5l_submit_io_async); INIT_WORK(&log->deferred_io_work, r5l_submit_io_async);
INIT_WORK(&log->disable_writeback_work, r5c_disable_writeback_async);
log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
INIT_LIST_HEAD(&log->stripe_in_journal_list); INIT_LIST_HEAD(&log->stripe_in_journal_list);
...@@ -2683,6 +2728,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) ...@@ -2683,6 +2728,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
void r5l_exit_log(struct r5l_log *log) void r5l_exit_log(struct r5l_log *log)
{ {
flush_work(&log->disable_writeback_work);
md_unregister_thread(&log->reclaim_thread); md_unregister_thread(&log->reclaim_thread);
mempool_destroy(log->meta_pool); mempool_destroy(log->meta_pool);
bioset_free(log->bs); bioset_free(log->bs);
......
...@@ -556,7 +556,7 @@ static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector, ...@@ -556,7 +556,7 @@ static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
* of the two sections, and some non-in_sync devices may * of the two sections, and some non-in_sync devices may
* be insync in the section most affected by failed devices. * be insync in the section most affected by failed devices.
*/ */
static int calc_degraded(struct r5conf *conf) int raid5_calc_degraded(struct r5conf *conf)
{ {
int degraded, degraded2; int degraded, degraded2;
int i; int i;
...@@ -619,7 +619,7 @@ static int has_failed(struct r5conf *conf) ...@@ -619,7 +619,7 @@ static int has_failed(struct r5conf *conf)
if (conf->mddev->reshape_position == MaxSector) if (conf->mddev->reshape_position == MaxSector)
return conf->mddev->degraded > conf->max_degraded; return conf->mddev->degraded > conf->max_degraded;
degraded = calc_degraded(conf); degraded = raid5_calc_degraded(conf);
if (degraded > conf->max_degraded) if (degraded > conf->max_degraded)
return 1; return 1;
return 0; return 0;
...@@ -2555,7 +2555,7 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev) ...@@ -2555,7 +2555,7 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
spin_lock_irqsave(&conf->device_lock, flags); spin_lock_irqsave(&conf->device_lock, flags);
clear_bit(In_sync, &rdev->flags); clear_bit(In_sync, &rdev->flags);
mddev->degraded = calc_degraded(conf); mddev->degraded = raid5_calc_degraded(conf);
spin_unlock_irqrestore(&conf->device_lock, flags); spin_unlock_irqrestore(&conf->device_lock, flags);
set_bit(MD_RECOVERY_INTR, &mddev->recovery); set_bit(MD_RECOVERY_INTR, &mddev->recovery);
...@@ -2569,6 +2569,7 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev) ...@@ -2569,6 +2569,7 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
bdevname(rdev->bdev, b), bdevname(rdev->bdev, b),
mdname(mddev), mdname(mddev),
conf->raid_disks - mddev->degraded); conf->raid_disks - mddev->degraded);
r5c_update_on_rdev_error(mddev);
} }
/* /*
...@@ -7091,7 +7092,7 @@ static int raid5_run(struct mddev *mddev) ...@@ -7091,7 +7092,7 @@ static int raid5_run(struct mddev *mddev)
/* /*
* 0 for a fully functional array, 1 or 2 for a degraded array. * 0 for a fully functional array, 1 or 2 for a degraded array.
*/ */
mddev->degraded = calc_degraded(conf); mddev->degraded = raid5_calc_degraded(conf);
if (has_failed(conf)) { if (has_failed(conf)) {
pr_crit("md/raid:%s: not enough operational devices (%d/%d failed)\n", pr_crit("md/raid:%s: not enough operational devices (%d/%d failed)\n",
...@@ -7338,7 +7339,7 @@ static int raid5_spare_active(struct mddev *mddev) ...@@ -7338,7 +7339,7 @@ static int raid5_spare_active(struct mddev *mddev)
} }
} }
spin_lock_irqsave(&conf->device_lock, flags); spin_lock_irqsave(&conf->device_lock, flags);
mddev->degraded = calc_degraded(conf); mddev->degraded = raid5_calc_degraded(conf);
spin_unlock_irqrestore(&conf->device_lock, flags); spin_unlock_irqrestore(&conf->device_lock, flags);
print_raid5_conf(conf); print_raid5_conf(conf);
return count; return count;
...@@ -7698,7 +7699,7 @@ static int raid5_start_reshape(struct mddev *mddev) ...@@ -7698,7 +7699,7 @@ static int raid5_start_reshape(struct mddev *mddev)
* pre and post number of devices. * pre and post number of devices.
*/ */
spin_lock_irqsave(&conf->device_lock, flags); spin_lock_irqsave(&conf->device_lock, flags);
mddev->degraded = calc_degraded(conf); mddev->degraded = raid5_calc_degraded(conf);
spin_unlock_irqrestore(&conf->device_lock, flags); spin_unlock_irqrestore(&conf->device_lock, flags);
} }
mddev->raid_disks = conf->raid_disks; mddev->raid_disks = conf->raid_disks;
...@@ -7786,7 +7787,7 @@ static void raid5_finish_reshape(struct mddev *mddev) ...@@ -7786,7 +7787,7 @@ static void raid5_finish_reshape(struct mddev *mddev)
} else { } else {
int d; int d;
spin_lock_irq(&conf->device_lock); spin_lock_irq(&conf->device_lock);
mddev->degraded = calc_degraded(conf); mddev->degraded = raid5_calc_degraded(conf);
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
for (d = conf->raid_disks ; for (d = conf->raid_disks ;
d < conf->raid_disks - mddev->delta_disks; d < conf->raid_disks - mddev->delta_disks;
......
...@@ -758,6 +758,7 @@ extern sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector, ...@@ -758,6 +758,7 @@ extern sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
extern struct stripe_head * extern struct stripe_head *
raid5_get_active_stripe(struct r5conf *conf, sector_t sector, raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
int previous, int noblock, int noquiesce); int previous, int noblock, int noquiesce);
extern int raid5_calc_degraded(struct r5conf *conf);
extern int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev); extern int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev);
extern void r5l_exit_log(struct r5l_log *log); extern void r5l_exit_log(struct r5l_log *log);
extern int r5l_write_stripe(struct r5l_log *log, struct stripe_head *head_sh); extern int r5l_write_stripe(struct r5l_log *log, struct stripe_head *head_sh);
...@@ -786,4 +787,5 @@ extern void r5c_flush_cache(struct r5conf *conf, int num); ...@@ -786,4 +787,5 @@ extern void r5c_flush_cache(struct r5conf *conf, int num);
extern void r5c_check_stripe_cache_usage(struct r5conf *conf); extern void r5c_check_stripe_cache_usage(struct r5conf *conf);
extern void r5c_check_cached_full_stripe(struct r5conf *conf); extern void r5c_check_cached_full_stripe(struct r5conf *conf);
extern struct md_sysfs_entry r5c_journal_mode; extern struct md_sysfs_entry r5c_journal_mode;
extern void r5c_update_on_rdev_error(struct mddev *mddev);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment