Commit 48332ff2 authored by Jens Axboe's avatar Jens Axboe

Merge branch 'md-next' of...

Merge branch 'md-next' of https://git.kernel.org/pub/scm/linux/kernel/git/song/md into for-5.11/drivers

Pull MD changes from Song:

"Summary:
 1. Fix race condition in md_ioctl(), by Dae R. Jeong;
 2. Initialize read_slot properly for raid10, by Kevin Vigor;
 3. Code cleanup, by Pankaj Gupta;
 4. md-cluster resync/reshape fix, by Zhao Heming."

* 'md-next' of https://git.kernel.org/pub/scm/linux/kernel/git/song/md:
  md/cluster: fix deadlock when node is doing resync job
  md/cluster: block reshape with remote resync job
  md: use current request time as base for ktime comparisons
  md: add comments in md_flush_request()
  md: improve variable names in md_flush_request()
  md/raid10: initialize r10_bio->read_slot before use.
  md: fix a warning caused by a race between concurrent md_ioctl()s
parents 4d063e64 bca5b065
...@@ -664,9 +664,27 @@ static void recv_daemon(struct md_thread *thread) ...@@ -664,9 +664,27 @@ static void recv_daemon(struct md_thread *thread)
* Takes the lock on the TOKEN lock resource so no other * Takes the lock on the TOKEN lock resource so no other
* node can communicate while the operation is underway. * node can communicate while the operation is underway.
*/ */
static int lock_token(struct md_cluster_info *cinfo, bool mddev_locked) static int lock_token(struct md_cluster_info *cinfo)
{ {
int error, set_bit = 0; int error;
error = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX);
if (error) {
pr_err("md-cluster(%s:%d): failed to get EX on TOKEN (%d)\n",
__func__, __LINE__, error);
} else {
/* Lock the receive sequence */
mutex_lock(&cinfo->recv_mutex);
}
return error;
}
/* lock_comm()
* Sets the MD_CLUSTER_SEND_LOCK bit to lock the send channel.
*/
static int lock_comm(struct md_cluster_info *cinfo, bool mddev_locked)
{
int rv, set_bit = 0;
struct mddev *mddev = cinfo->mddev; struct mddev *mddev = cinfo->mddev;
/* /*
...@@ -677,34 +695,19 @@ static int lock_token(struct md_cluster_info *cinfo, bool mddev_locked) ...@@ -677,34 +695,19 @@ static int lock_token(struct md_cluster_info *cinfo, bool mddev_locked)
*/ */
if (mddev_locked && !test_bit(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, if (mddev_locked && !test_bit(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD,
&cinfo->state)) { &cinfo->state)) {
error = test_and_set_bit_lock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, rv = test_and_set_bit_lock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD,
&cinfo->state); &cinfo->state);
WARN_ON_ONCE(error); WARN_ON_ONCE(rv);
md_wakeup_thread(mddev->thread); md_wakeup_thread(mddev->thread);
set_bit = 1; set_bit = 1;
} }
error = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX);
if (set_bit)
clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state);
if (error)
pr_err("md-cluster(%s:%d): failed to get EX on TOKEN (%d)\n",
__func__, __LINE__, error);
/* Lock the receive sequence */
mutex_lock(&cinfo->recv_mutex);
return error;
}
/* lock_comm()
* Sets the MD_CLUSTER_SEND_LOCK bit to lock the send channel.
*/
static int lock_comm(struct md_cluster_info *cinfo, bool mddev_locked)
{
wait_event(cinfo->wait, wait_event(cinfo->wait,
!test_and_set_bit(MD_CLUSTER_SEND_LOCK, &cinfo->state)); !test_and_set_bit(MD_CLUSTER_SEND_LOCK, &cinfo->state));
rv = lock_token(cinfo);
return lock_token(cinfo, mddev_locked); if (set_bit)
clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state);
return rv;
} }
static void unlock_comm(struct md_cluster_info *cinfo) static void unlock_comm(struct md_cluster_info *cinfo)
...@@ -784,9 +787,11 @@ static int sendmsg(struct md_cluster_info *cinfo, struct cluster_msg *cmsg, ...@@ -784,9 +787,11 @@ static int sendmsg(struct md_cluster_info *cinfo, struct cluster_msg *cmsg,
{ {
int ret; int ret;
lock_comm(cinfo, mddev_locked); ret = lock_comm(cinfo, mddev_locked);
ret = __sendmsg(cinfo, cmsg); if (!ret) {
unlock_comm(cinfo); ret = __sendmsg(cinfo, cmsg);
unlock_comm(cinfo);
}
return ret; return ret;
} }
...@@ -1061,7 +1066,7 @@ static int metadata_update_start(struct mddev *mddev) ...@@ -1061,7 +1066,7 @@ static int metadata_update_start(struct mddev *mddev)
return 0; return 0;
} }
ret = lock_token(cinfo, 1); ret = lock_token(cinfo);
clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state); clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state);
return ret; return ret;
} }
...@@ -1255,7 +1260,10 @@ static void update_size(struct mddev *mddev, sector_t old_dev_sectors) ...@@ -1255,7 +1260,10 @@ static void update_size(struct mddev *mddev, sector_t old_dev_sectors)
int raid_slot = -1; int raid_slot = -1;
md_update_sb(mddev, 1); md_update_sb(mddev, 1);
lock_comm(cinfo, 1); if (lock_comm(cinfo, 1)) {
pr_err("%s: lock_comm failed\n", __func__);
return;
}
memset(&cmsg, 0, sizeof(cmsg)); memset(&cmsg, 0, sizeof(cmsg));
cmsg.type = cpu_to_le32(METADATA_UPDATED); cmsg.type = cpu_to_le32(METADATA_UPDATED);
...@@ -1407,7 +1415,8 @@ static int add_new_disk(struct mddev *mddev, struct md_rdev *rdev) ...@@ -1407,7 +1415,8 @@ static int add_new_disk(struct mddev *mddev, struct md_rdev *rdev)
cmsg.type = cpu_to_le32(NEWDISK); cmsg.type = cpu_to_le32(NEWDISK);
memcpy(cmsg.uuid, uuid, 16); memcpy(cmsg.uuid, uuid, 16);
cmsg.raid_slot = cpu_to_le32(rdev->desc_nr); cmsg.raid_slot = cpu_to_le32(rdev->desc_nr);
lock_comm(cinfo, 1); if (lock_comm(cinfo, 1))
return -EAGAIN;
ret = __sendmsg(cinfo, &cmsg); ret = __sendmsg(cinfo, &cmsg);
if (ret) { if (ret) {
unlock_comm(cinfo); unlock_comm(cinfo);
......
...@@ -639,7 +639,7 @@ static void md_submit_flush_data(struct work_struct *ws) ...@@ -639,7 +639,7 @@ static void md_submit_flush_data(struct work_struct *ws)
* could wait for this and below md_handle_request could wait for those * could wait for this and below md_handle_request could wait for those
* bios because of suspend check * bios because of suspend check
*/ */
mddev->last_flush = mddev->start_flush; mddev->prev_flush_start = mddev->start_flush;
mddev->flush_bio = NULL; mddev->flush_bio = NULL;
wake_up(&mddev->sb_wait); wake_up(&mddev->sb_wait);
...@@ -660,13 +660,17 @@ static void md_submit_flush_data(struct work_struct *ws) ...@@ -660,13 +660,17 @@ static void md_submit_flush_data(struct work_struct *ws)
*/ */
bool md_flush_request(struct mddev *mddev, struct bio *bio) bool md_flush_request(struct mddev *mddev, struct bio *bio)
{ {
ktime_t start = ktime_get_boottime(); ktime_t req_start = ktime_get_boottime();
spin_lock_irq(&mddev->lock); spin_lock_irq(&mddev->lock);
/* flush requests wait until ongoing flush completes,
* hence coalescing all the pending requests.
*/
wait_event_lock_irq(mddev->sb_wait, wait_event_lock_irq(mddev->sb_wait,
!mddev->flush_bio || !mddev->flush_bio ||
ktime_after(mddev->last_flush, start), ktime_before(req_start, mddev->prev_flush_start),
mddev->lock); mddev->lock);
if (!ktime_after(mddev->last_flush, start)) { /* new request after previous flush is completed */
if (ktime_after(req_start, mddev->prev_flush_start)) {
WARN_ON(mddev->flush_bio); WARN_ON(mddev->flush_bio);
mddev->flush_bio = bio; mddev->flush_bio = bio;
bio = NULL; bio = NULL;
...@@ -6949,8 +6953,10 @@ static int hot_remove_disk(struct mddev *mddev, dev_t dev) ...@@ -6949,8 +6953,10 @@ static int hot_remove_disk(struct mddev *mddev, dev_t dev)
goto busy; goto busy;
kick_rdev: kick_rdev:
if (mddev_is_clustered(mddev)) if (mddev_is_clustered(mddev)) {
md_cluster_ops->remove_disk(mddev, rdev); if (md_cluster_ops->remove_disk(mddev, rdev))
goto busy;
}
md_kick_rdev_from_array(rdev); md_kick_rdev_from_array(rdev);
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
...@@ -7279,6 +7285,7 @@ static int update_raid_disks(struct mddev *mddev, int raid_disks) ...@@ -7279,6 +7285,7 @@ static int update_raid_disks(struct mddev *mddev, int raid_disks)
return -EINVAL; return -EINVAL;
if (mddev->sync_thread || if (mddev->sync_thread ||
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) ||
mddev->reshape_position != MaxSector) mddev->reshape_position != MaxSector)
return -EBUSY; return -EBUSY;
...@@ -7589,8 +7596,11 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode, ...@@ -7589,8 +7596,11 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
err = -EBUSY; err = -EBUSY;
goto out; goto out;
} }
WARN_ON_ONCE(test_bit(MD_CLOSING, &mddev->flags)); if (test_and_set_bit(MD_CLOSING, &mddev->flags)) {
set_bit(MD_CLOSING, &mddev->flags); mutex_unlock(&mddev->open_mutex);
err = -EBUSY;
goto out;
}
did_set_md_closing = true; did_set_md_closing = true;
mutex_unlock(&mddev->open_mutex); mutex_unlock(&mddev->open_mutex);
sync_blockdev(bdev); sync_blockdev(bdev);
...@@ -9660,8 +9670,11 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev) ...@@ -9660,8 +9670,11 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
} }
} }
if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) {
update_raid_disks(mddev, le32_to_cpu(sb->raid_disks)); ret = update_raid_disks(mddev, le32_to_cpu(sb->raid_disks));
if (ret)
pr_warn("md: updating array disks failed. %d\n", ret);
}
/* /*
* Since mddev->delta_disks has already updated in update_raid_disks, * Since mddev->delta_disks has already updated in update_raid_disks,
......
...@@ -495,9 +495,9 @@ struct mddev { ...@@ -495,9 +495,9 @@ struct mddev {
*/ */
struct bio *flush_bio; struct bio *flush_bio;
atomic_t flush_pending; atomic_t flush_pending;
ktime_t start_flush, last_flush; /* last_flush is when the last completed ktime_t start_flush, prev_flush_start; /* prev_flush_start is when the previous completed
* flush was started. * flush was started.
*/ */
struct work_struct flush_work; struct work_struct flush_work;
struct work_struct event_work; /* used by dm to report failure event */ struct work_struct event_work; /* used by dm to report failure event */
mempool_t *serial_info_pool; mempool_t *serial_info_pool;
......
...@@ -1127,7 +1127,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, ...@@ -1127,7 +1127,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
struct md_rdev *err_rdev = NULL; struct md_rdev *err_rdev = NULL;
gfp_t gfp = GFP_NOIO; gfp_t gfp = GFP_NOIO;
if (r10_bio->devs[slot].rdev) { if (slot >= 0 && r10_bio->devs[slot].rdev) {
/* /*
* This is an error retry, but we cannot * This is an error retry, but we cannot
* safely dereference the rdev in the r10_bio, * safely dereference the rdev in the r10_bio,
...@@ -1508,6 +1508,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio, int sectors) ...@@ -1508,6 +1508,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio, int sectors)
r10_bio->mddev = mddev; r10_bio->mddev = mddev;
r10_bio->sector = bio->bi_iter.bi_sector; r10_bio->sector = bio->bi_iter.bi_sector;
r10_bio->state = 0; r10_bio->state = 0;
r10_bio->read_slot = -1;
memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * conf->geo.raid_disks); memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * conf->geo.raid_disks);
if (bio_data_dir(bio) == READ) if (bio_data_dir(bio) == READ)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment