Commit 3a889fdc authored by Song Liu's avatar Song Liu

Merge branch 'dmraid-fix-6.9' into md-6.9

This is the second half of fixes for dmraid. The first half is available
at [1].

This set contains fixes:
 - reshape can start unexpected, cause data corruption, patch 1,5,6;
 - deadlocks that reshape concurrent with IO, patch 8;
 - a lockdep warning, patch 9;

For all the dmraid related tests in lvm2 suite, there is no new
regressions compared against 6.6 kernels (which is good baseline before
recent regressions).

[1] https://lore.kernel.org/all/CAPhsuW7u1UKHCDOBDhD7DzOVtkGemDz_QnJ4DUq_kSN-Q3G66Q@mail.gmail.com/

* dmraid-fix-6.9:
  dm-raid: fix lockdep waring in "pers->hot_add_disk"
  dm-raid456, md/raid456: fix a deadlock for dm-raid456 while io concurrent with reshape
  dm-raid: add a new helper prepare_suspend() in md_personality
  md/dm-raid: don't call md_reap_sync_thread() directly
  dm-raid: really frozen sync_thread during suspend
  md: add a new helper reshape_interrupted()
  md: export helper md_is_rdwr()
  md: export helpers to stop sync_thread
  md: don't clear MD_RECOVERY_FROZEN for new dm-raid until resume
parents 3445139e 95009ae9
...@@ -213,6 +213,7 @@ struct raid_dev { ...@@ -213,6 +213,7 @@ struct raid_dev {
#define RT_FLAG_RS_IN_SYNC 6 #define RT_FLAG_RS_IN_SYNC 6
#define RT_FLAG_RS_RESYNCING 7 #define RT_FLAG_RS_RESYNCING 7
#define RT_FLAG_RS_GROW 8 #define RT_FLAG_RS_GROW 8
#define RT_FLAG_RS_FROZEN 9
/* Array elements of 64 bit needed for rebuild/failed disk bits */ /* Array elements of 64 bit needed for rebuild/failed disk bits */
#define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8) #define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8)
...@@ -3240,11 +3241,12 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -3240,11 +3241,12 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
rs->md.ro = 1; rs->md.ro = 1;
rs->md.in_sync = 1; rs->md.in_sync = 1;
/* Keep array frozen until resume. */
set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
/* Has to be held on running the array */ /* Has to be held on running the array */
mddev_suspend_and_lock_nointr(&rs->md); mddev_suspend_and_lock_nointr(&rs->md);
/* Keep array frozen until resume. */
md_frozen_sync_thread(&rs->md);
r = md_run(&rs->md); r = md_run(&rs->md);
rs->md.in_sync = 0; /* Assume already marked dirty */ rs->md.in_sync = 0; /* Assume already marked dirty */
if (r) { if (r) {
...@@ -3339,7 +3341,8 @@ static int raid_map(struct dm_target *ti, struct bio *bio) ...@@ -3339,7 +3341,8 @@ static int raid_map(struct dm_target *ti, struct bio *bio)
if (unlikely(bio_end_sector(bio) > mddev->array_sectors)) if (unlikely(bio_end_sector(bio) > mddev->array_sectors))
return DM_MAPIO_REQUEUE; return DM_MAPIO_REQUEUE;
md_handle_request(mddev, bio); if (unlikely(!md_handle_request(mddev, bio)))
return DM_MAPIO_REQUEUE;
return DM_MAPIO_SUBMITTED; return DM_MAPIO_SUBMITTED;
} }
...@@ -3718,21 +3721,33 @@ static int raid_message(struct dm_target *ti, unsigned int argc, char **argv, ...@@ -3718,21 +3721,33 @@ static int raid_message(struct dm_target *ti, unsigned int argc, char **argv,
{ {
struct raid_set *rs = ti->private; struct raid_set *rs = ti->private;
struct mddev *mddev = &rs->md; struct mddev *mddev = &rs->md;
int ret = 0;
if (!mddev->pers || !mddev->pers->sync_request) if (!mddev->pers || !mddev->pers->sync_request)
return -EINVAL; return -EINVAL;
if (!strcasecmp(argv[0], "frozen")) if (test_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags) ||
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); test_bit(RT_FLAG_RS_FROZEN, &rs->runtime_flags))
else return -EBUSY;
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) { if (!strcasecmp(argv[0], "frozen")) {
if (mddev->sync_thread) { ret = mddev_lock(mddev);
set_bit(MD_RECOVERY_INTR, &mddev->recovery); if (ret)
md_reap_sync_thread(mddev); return ret;
}
} else if (decipher_sync_action(mddev, mddev->recovery) != st_idle) md_frozen_sync_thread(mddev);
mddev_unlock(mddev);
} else if (!strcasecmp(argv[0], "idle")) {
ret = mddev_lock(mddev);
if (ret)
return ret;
md_idle_sync_thread(mddev);
mddev_unlock(mddev);
}
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
if (decipher_sync_action(mddev, mddev->recovery) != st_idle)
return -EBUSY; return -EBUSY;
else if (!strcasecmp(argv[0], "resync")) else if (!strcasecmp(argv[0], "resync"))
; /* MD_RECOVERY_NEEDED set below */ ; /* MD_RECOVERY_NEEDED set below */
...@@ -3791,15 +3806,46 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits) ...@@ -3791,15 +3806,46 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs)); blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs));
} }
static void raid_presuspend(struct dm_target *ti)
{
struct raid_set *rs = ti->private;
struct mddev *mddev = &rs->md;
/*
* From now on, disallow raid_message() to change sync_thread until
* resume, raid_postsuspend() is too late.
*/
set_bit(RT_FLAG_RS_FROZEN, &rs->runtime_flags);
if (!reshape_interrupted(mddev))
return;
/*
* For raid456, if reshape is interrupted, IO across reshape position
* will never make progress, while caller will wait for IO to be done.
* Inform raid456 to handle those IO to prevent deadlock.
*/
if (mddev->pers && mddev->pers->prepare_suspend)
mddev->pers->prepare_suspend(mddev);
}
static void raid_presuspend_undo(struct dm_target *ti)
{
struct raid_set *rs = ti->private;
clear_bit(RT_FLAG_RS_FROZEN, &rs->runtime_flags);
}
static void raid_postsuspend(struct dm_target *ti) static void raid_postsuspend(struct dm_target *ti)
{ {
struct raid_set *rs = ti->private; struct raid_set *rs = ti->private;
if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) { if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
/* Writes have to be stopped before suspending to avoid deadlocks. */ /*
if (!test_bit(MD_RECOVERY_FROZEN, &rs->md.recovery)) * sync_thread must be stopped during suspend, and writes have
md_stop_writes(&rs->md); * to be stopped before suspending to avoid deadlocks.
*/
md_stop_writes(&rs->md);
mddev_suspend(&rs->md, false); mddev_suspend(&rs->md, false);
} }
} }
...@@ -4012,8 +4058,6 @@ static int raid_preresume(struct dm_target *ti) ...@@ -4012,8 +4058,6 @@ static int raid_preresume(struct dm_target *ti)
} }
/* Check for any resize/reshape on @rs and adjust/initiate */ /* Check for any resize/reshape on @rs and adjust/initiate */
/* Be prepared for mddev_resume() in raid_resume() */
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
if (mddev->recovery_cp && mddev->recovery_cp < MaxSector) { if (mddev->recovery_cp && mddev->recovery_cp < MaxSector) {
set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
mddev->resync_min = mddev->recovery_cp; mddev->resync_min = mddev->recovery_cp;
...@@ -4047,7 +4091,9 @@ static void raid_resume(struct dm_target *ti) ...@@ -4047,7 +4091,9 @@ static void raid_resume(struct dm_target *ti)
* Take this opportunity to check whether any failed * Take this opportunity to check whether any failed
* devices are reachable again. * devices are reachable again.
*/ */
mddev_lock_nointr(mddev);
attempt_restore_of_faulty_devices(rs); attempt_restore_of_faulty_devices(rs);
mddev_unlock(mddev);
} }
if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) { if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
...@@ -4055,10 +4101,13 @@ static void raid_resume(struct dm_target *ti) ...@@ -4055,10 +4101,13 @@ static void raid_resume(struct dm_target *ti)
if (mddev->delta_disks < 0) if (mddev->delta_disks < 0)
rs_set_capacity(rs); rs_set_capacity(rs);
WARN_ON_ONCE(!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery));
WARN_ON_ONCE(test_bit(MD_RECOVERY_RUNNING, &mddev->recovery));
clear_bit(RT_FLAG_RS_FROZEN, &rs->runtime_flags);
mddev_lock_nointr(mddev); mddev_lock_nointr(mddev);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
mddev->ro = 0; mddev->ro = 0;
mddev->in_sync = 0; mddev->in_sync = 0;
md_unfrozen_sync_thread(mddev);
mddev_unlock_and_resume(mddev); mddev_unlock_and_resume(mddev);
} }
} }
...@@ -4074,6 +4123,8 @@ static struct target_type raid_target = { ...@@ -4074,6 +4123,8 @@ static struct target_type raid_target = {
.message = raid_message, .message = raid_message,
.iterate_devices = raid_iterate_devices, .iterate_devices = raid_iterate_devices,
.io_hints = raid_io_hints, .io_hints = raid_io_hints,
.presuspend = raid_presuspend,
.presuspend_undo = raid_presuspend_undo,
.postsuspend = raid_postsuspend, .postsuspend = raid_postsuspend,
.preresume = raid_preresume, .preresume = raid_preresume,
.resume = raid_resume, .resume = raid_resume,
......
...@@ -99,18 +99,6 @@ static void mddev_detach(struct mddev *mddev); ...@@ -99,18 +99,6 @@ static void mddev_detach(struct mddev *mddev);
static void export_rdev(struct md_rdev *rdev, struct mddev *mddev); static void export_rdev(struct md_rdev *rdev, struct mddev *mddev);
static void md_wakeup_thread_directly(struct md_thread __rcu *thread); static void md_wakeup_thread_directly(struct md_thread __rcu *thread);
enum md_ro_state {
MD_RDWR,
MD_RDONLY,
MD_AUTO_READ,
MD_MAX_STATE
};
static bool md_is_rdwr(struct mddev *mddev)
{
return (mddev->ro == MD_RDWR);
}
/* /*
* Default number of read corrections we'll attempt on an rdev * Default number of read corrections we'll attempt on an rdev
* before ejecting it from the array. We divide the read error * before ejecting it from the array. We divide the read error
...@@ -378,7 +366,7 @@ static bool is_suspended(struct mddev *mddev, struct bio *bio) ...@@ -378,7 +366,7 @@ static bool is_suspended(struct mddev *mddev, struct bio *bio)
return true; return true;
} }
void md_handle_request(struct mddev *mddev, struct bio *bio) bool md_handle_request(struct mddev *mddev, struct bio *bio)
{ {
check_suspended: check_suspended:
if (is_suspended(mddev, bio)) { if (is_suspended(mddev, bio)) {
...@@ -386,7 +374,7 @@ void md_handle_request(struct mddev *mddev, struct bio *bio) ...@@ -386,7 +374,7 @@ void md_handle_request(struct mddev *mddev, struct bio *bio)
/* Bail out if REQ_NOWAIT is set for the bio */ /* Bail out if REQ_NOWAIT is set for the bio */
if (bio->bi_opf & REQ_NOWAIT) { if (bio->bi_opf & REQ_NOWAIT) {
bio_wouldblock_error(bio); bio_wouldblock_error(bio);
return; return true;
} }
for (;;) { for (;;) {
prepare_to_wait(&mddev->sb_wait, &__wait, prepare_to_wait(&mddev->sb_wait, &__wait,
...@@ -402,10 +390,13 @@ void md_handle_request(struct mddev *mddev, struct bio *bio) ...@@ -402,10 +390,13 @@ void md_handle_request(struct mddev *mddev, struct bio *bio)
if (!mddev->pers->make_request(mddev, bio)) { if (!mddev->pers->make_request(mddev, bio)) {
percpu_ref_put(&mddev->active_io); percpu_ref_put(&mddev->active_io);
if (!mddev->gendisk && mddev->pers->prepare_suspend)
return false;
goto check_suspended; goto check_suspended;
} }
percpu_ref_put(&mddev->active_io); percpu_ref_put(&mddev->active_io);
return true;
} }
EXPORT_SYMBOL(md_handle_request); EXPORT_SYMBOL(md_handle_request);
...@@ -4942,6 +4933,35 @@ static void stop_sync_thread(struct mddev *mddev, bool locked, bool check_seq) ...@@ -4942,6 +4933,35 @@ static void stop_sync_thread(struct mddev *mddev, bool locked, bool check_seq)
mddev_lock_nointr(mddev); mddev_lock_nointr(mddev);
} }
void md_idle_sync_thread(struct mddev *mddev)
{
lockdep_assert_held(&mddev->reconfig_mutex);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
stop_sync_thread(mddev, true, true);
}
EXPORT_SYMBOL_GPL(md_idle_sync_thread);
void md_frozen_sync_thread(struct mddev *mddev)
{
lockdep_assert_held(&mddev->reconfig_mutex);
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
stop_sync_thread(mddev, true, false);
}
EXPORT_SYMBOL_GPL(md_frozen_sync_thread);
void md_unfrozen_sync_thread(struct mddev *mddev)
{
lockdep_assert_held(&mddev->reconfig_mutex);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
sysfs_notify_dirent_safe(mddev->sysfs_action);
}
EXPORT_SYMBOL_GPL(md_unfrozen_sync_thread);
static void idle_sync_thread(struct mddev *mddev) static void idle_sync_thread(struct mddev *mddev)
{ {
mutex_lock(&mddev->sync_mutex); mutex_lock(&mddev->sync_mutex);
...@@ -6062,7 +6082,10 @@ int md_run(struct mddev *mddev) ...@@ -6062,7 +6082,10 @@ int md_run(struct mddev *mddev)
pr_warn("True protection against single-disk failure might be compromised.\n"); pr_warn("True protection against single-disk failure might be compromised.\n");
} }
mddev->recovery = 0; /* dm-raid expect sync_thread to be frozen until resume */
if (mddev->gendisk)
mddev->recovery = 0;
/* may be over-ridden by personality */ /* may be over-ridden by personality */
mddev->resync_max_sectors = mddev->dev_sectors; mddev->resync_max_sectors = mddev->dev_sectors;
...@@ -6344,7 +6367,6 @@ static void md_clean(struct mddev *mddev) ...@@ -6344,7 +6367,6 @@ static void md_clean(struct mddev *mddev)
static void __md_stop_writes(struct mddev *mddev) static void __md_stop_writes(struct mddev *mddev)
{ {
stop_sync_thread(mddev, true, false);
del_timer_sync(&mddev->safemode_timer); del_timer_sync(&mddev->safemode_timer);
if (mddev->pers && mddev->pers->quiesce) { if (mddev->pers && mddev->pers->quiesce) {
...@@ -6369,6 +6391,8 @@ static void __md_stop_writes(struct mddev *mddev) ...@@ -6369,6 +6391,8 @@ static void __md_stop_writes(struct mddev *mddev)
void md_stop_writes(struct mddev *mddev) void md_stop_writes(struct mddev *mddev)
{ {
mddev_lock_nointr(mddev); mddev_lock_nointr(mddev);
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
stop_sync_thread(mddev, true, false);
__md_stop_writes(mddev); __md_stop_writes(mddev);
mddev_unlock(mddev); mddev_unlock(mddev);
} }
...@@ -8712,6 +8736,23 @@ void md_account_bio(struct mddev *mddev, struct bio **bio) ...@@ -8712,6 +8736,23 @@ void md_account_bio(struct mddev *mddev, struct bio **bio)
} }
EXPORT_SYMBOL_GPL(md_account_bio); EXPORT_SYMBOL_GPL(md_account_bio);
void md_free_cloned_bio(struct bio *bio)
{
struct md_io_clone *md_io_clone = bio->bi_private;
struct bio *orig_bio = md_io_clone->orig_bio;
struct mddev *mddev = md_io_clone->mddev;
if (bio->bi_status && !orig_bio->bi_status)
orig_bio->bi_status = bio->bi_status;
if (md_io_clone->start_time)
bio_end_io_acct(orig_bio, md_io_clone->start_time);
bio_put(bio);
percpu_ref_put(&mddev->active_io);
}
EXPORT_SYMBOL_GPL(md_free_cloned_bio);
/* md_allow_write(mddev) /* md_allow_write(mddev)
* Calling this ensures that the array is marked 'active' so that writes * Calling this ensures that the array is marked 'active' so that writes
* may proceed without blocking. It is important to call this before * may proceed without blocking. It is important to call this before
......
...@@ -569,6 +569,37 @@ enum recovery_flags { ...@@ -569,6 +569,37 @@ enum recovery_flags {
MD_RESYNCING_REMOTE, /* remote node is running resync thread */ MD_RESYNCING_REMOTE, /* remote node is running resync thread */
}; };
enum md_ro_state {
MD_RDWR,
MD_RDONLY,
MD_AUTO_READ,
MD_MAX_STATE
};
static inline bool md_is_rdwr(struct mddev *mddev)
{
return (mddev->ro == MD_RDWR);
}
static inline bool reshape_interrupted(struct mddev *mddev)
{
/* reshape never start */
if (mddev->reshape_position == MaxSector)
return false;
/* interrupted */
if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
return true;
/* running reshape will be interrupted soon. */
if (test_bit(MD_RECOVERY_WAIT, &mddev->recovery) ||
test_bit(MD_RECOVERY_INTR, &mddev->recovery) ||
test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
return true;
return false;
}
static inline int __must_check mddev_lock(struct mddev *mddev) static inline int __must_check mddev_lock(struct mddev *mddev)
{ {
return mutex_lock_interruptible(&mddev->reconfig_mutex); return mutex_lock_interruptible(&mddev->reconfig_mutex);
...@@ -628,6 +659,7 @@ struct md_personality ...@@ -628,6 +659,7 @@ struct md_personality
int (*start_reshape) (struct mddev *mddev); int (*start_reshape) (struct mddev *mddev);
void (*finish_reshape) (struct mddev *mddev); void (*finish_reshape) (struct mddev *mddev);
void (*update_reshape_pos) (struct mddev *mddev); void (*update_reshape_pos) (struct mddev *mddev);
void (*prepare_suspend) (struct mddev *mddev);
/* quiesce suspends or resumes internal processing. /* quiesce suspends or resumes internal processing.
* 1 - stop new actions and wait for action io to complete * 1 - stop new actions and wait for action io to complete
* 0 - return to normal behaviour * 0 - return to normal behaviour
...@@ -761,6 +793,7 @@ extern void md_finish_reshape(struct mddev *mddev); ...@@ -761,6 +793,7 @@ extern void md_finish_reshape(struct mddev *mddev);
void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev, void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
struct bio *bio, sector_t start, sector_t size); struct bio *bio, sector_t start, sector_t size);
void md_account_bio(struct mddev *mddev, struct bio **bio); void md_account_bio(struct mddev *mddev, struct bio **bio);
void md_free_cloned_bio(struct bio *bio);
extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio); extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio);
extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev, extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
...@@ -789,9 +822,12 @@ extern void md_stop_writes(struct mddev *mddev); ...@@ -789,9 +822,12 @@ extern void md_stop_writes(struct mddev *mddev);
extern int md_rdev_init(struct md_rdev *rdev); extern int md_rdev_init(struct md_rdev *rdev);
extern void md_rdev_clear(struct md_rdev *rdev); extern void md_rdev_clear(struct md_rdev *rdev);
extern void md_handle_request(struct mddev *mddev, struct bio *bio); extern bool md_handle_request(struct mddev *mddev, struct bio *bio);
extern int mddev_suspend(struct mddev *mddev, bool interruptible); extern int mddev_suspend(struct mddev *mddev, bool interruptible);
extern void mddev_resume(struct mddev *mddev); extern void mddev_resume(struct mddev *mddev);
extern void md_idle_sync_thread(struct mddev *mddev);
extern void md_frozen_sync_thread(struct mddev *mddev);
extern void md_unfrozen_sync_thread(struct mddev *mddev);
extern void md_reload_sb(struct mddev *mddev, int raid_disk); extern void md_reload_sb(struct mddev *mddev, int raid_disk);
extern void md_update_sb(struct mddev *mddev, int force); extern void md_update_sb(struct mddev *mddev, int force);
......
...@@ -761,6 +761,7 @@ enum stripe_result { ...@@ -761,6 +761,7 @@ enum stripe_result {
STRIPE_RETRY, STRIPE_RETRY,
STRIPE_SCHEDULE_AND_RETRY, STRIPE_SCHEDULE_AND_RETRY,
STRIPE_FAIL, STRIPE_FAIL,
STRIPE_WAIT_RESHAPE,
}; };
struct stripe_request_ctx { struct stripe_request_ctx {
...@@ -5938,7 +5939,8 @@ static enum stripe_result make_stripe_request(struct mddev *mddev, ...@@ -5938,7 +5939,8 @@ static enum stripe_result make_stripe_request(struct mddev *mddev,
if (ahead_of_reshape(mddev, logical_sector, if (ahead_of_reshape(mddev, logical_sector,
conf->reshape_safe)) { conf->reshape_safe)) {
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
return STRIPE_SCHEDULE_AND_RETRY; ret = STRIPE_SCHEDULE_AND_RETRY;
goto out;
} }
} }
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
...@@ -6017,6 +6019,12 @@ static enum stripe_result make_stripe_request(struct mddev *mddev, ...@@ -6017,6 +6019,12 @@ static enum stripe_result make_stripe_request(struct mddev *mddev,
out_release: out_release:
raid5_release_stripe(sh); raid5_release_stripe(sh);
out:
if (ret == STRIPE_SCHEDULE_AND_RETRY && reshape_interrupted(mddev)) {
bi->bi_status = BLK_STS_RESOURCE;
ret = STRIPE_WAIT_RESHAPE;
pr_err_ratelimited("dm-raid456: io across reshape position while reshape can't make progress");
}
return ret; return ret;
} }
...@@ -6138,7 +6146,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi) ...@@ -6138,7 +6146,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
while (1) { while (1) {
res = make_stripe_request(mddev, conf, &ctx, logical_sector, res = make_stripe_request(mddev, conf, &ctx, logical_sector,
bi); bi);
if (res == STRIPE_FAIL) if (res == STRIPE_FAIL || res == STRIPE_WAIT_RESHAPE)
break; break;
if (res == STRIPE_RETRY) if (res == STRIPE_RETRY)
...@@ -6176,6 +6184,11 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi) ...@@ -6176,6 +6184,11 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
if (rw == WRITE) if (rw == WRITE)
md_write_end(mddev); md_write_end(mddev);
if (res == STRIPE_WAIT_RESHAPE) {
md_free_cloned_bio(bi);
return false;
}
bio_endio(bi); bio_endio(bi);
return true; return true;
} }
...@@ -8937,6 +8950,18 @@ static int raid5_start(struct mddev *mddev) ...@@ -8937,6 +8950,18 @@ static int raid5_start(struct mddev *mddev)
return r5l_start(conf->log); return r5l_start(conf->log);
} }
/*
* This is only used for dm-raid456, caller already frozen sync_thread, hence
* if rehsape is still in progress, io that is waiting for reshape can never be
* done now, hence wake up and handle those IO.
*/
static void raid5_prepare_suspend(struct mddev *mddev)
{
struct r5conf *conf = mddev->private;
wake_up(&conf->wait_for_overlap);
}
static struct md_personality raid6_personality = static struct md_personality raid6_personality =
{ {
.name = "raid6", .name = "raid6",
...@@ -8960,6 +8985,7 @@ static struct md_personality raid6_personality = ...@@ -8960,6 +8985,7 @@ static struct md_personality raid6_personality =
.quiesce = raid5_quiesce, .quiesce = raid5_quiesce,
.takeover = raid6_takeover, .takeover = raid6_takeover,
.change_consistency_policy = raid5_change_consistency_policy, .change_consistency_policy = raid5_change_consistency_policy,
.prepare_suspend = raid5_prepare_suspend,
}; };
static struct md_personality raid5_personality = static struct md_personality raid5_personality =
{ {
...@@ -8984,6 +9010,7 @@ static struct md_personality raid5_personality = ...@@ -8984,6 +9010,7 @@ static struct md_personality raid5_personality =
.quiesce = raid5_quiesce, .quiesce = raid5_quiesce,
.takeover = raid5_takeover, .takeover = raid5_takeover,
.change_consistency_policy = raid5_change_consistency_policy, .change_consistency_policy = raid5_change_consistency_policy,
.prepare_suspend = raid5_prepare_suspend,
}; };
static struct md_personality raid4_personality = static struct md_personality raid4_personality =
...@@ -9009,6 +9036,7 @@ static struct md_personality raid4_personality = ...@@ -9009,6 +9036,7 @@ static struct md_personality raid4_personality =
.quiesce = raid5_quiesce, .quiesce = raid5_quiesce,
.takeover = raid4_takeover, .takeover = raid4_takeover,
.change_consistency_policy = raid5_change_consistency_policy, .change_consistency_policy = raid5_change_consistency_policy,
.prepare_suspend = raid5_prepare_suspend,
}; };
static int __init raid5_init(void) static int __init raid5_init(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment