Commit d58eff83 authored by Yu Kuai's avatar Yu Kuai Committed by Song Liu

md: initialize 'active_io' while allocating mddev

'active_io' is used for mddev_suspend() and it's initialized in
md_run(), this restrict that 'reconfig_mutex' must be held and
"mddev->pers" must be set before calling mddev_suspend().

Initialize 'active_io' early so that mddev_suspend() is safe to call
once mddev is allocated, this will be helpful to refactor
mddev_suspend() in following patches.
Signed-off-by: default avatarYu Kuai <yukuai3@huawei.com>
Signed-off-by: default avatarSong Liu <song@kernel.org>
Link: https://lore.kernel.org/r/20230825030956.1527023-2-yukuai1@huaweicloud.com
parent 81e2ce1b
......@@ -749,7 +749,11 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
return ERR_PTR(-ENOMEM);
}
mddev_init(&rs->md);
if (mddev_init(&rs->md)) {
kfree(rs);
ti->error = "Cannot initialize raid context";
return ERR_PTR(-ENOMEM);
}
rs->raid_disks = raid_devs;
rs->delta_disks = 0;
......@@ -798,6 +802,7 @@ static void raid_set_free(struct raid_set *rs)
dm_put_device(rs->ti, rs->dev[i].data_dev);
}
mddev_destroy(&rs->md);
kfree(rs);
}
......
......@@ -639,8 +639,20 @@ void mddev_put(struct mddev *mddev)
static void md_safemode_timeout(struct timer_list *t);
static void md_start_sync(struct work_struct *ws);
void mddev_init(struct mddev *mddev)
static void active_io_release(struct percpu_ref *ref)
{
struct mddev *mddev = container_of(ref, struct mddev, active_io);
wake_up(&mddev->sb_wait);
}
int mddev_init(struct mddev *mddev)
{
if (percpu_ref_init(&mddev->active_io, active_io_release,
PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
return -ENOMEM;
mutex_init(&mddev->open_mutex);
mutex_init(&mddev->reconfig_mutex);
mutex_init(&mddev->sync_mutex);
......@@ -665,9 +677,17 @@ void mddev_init(struct mddev *mddev)
INIT_WORK(&mddev->sync_work, md_start_sync);
INIT_WORK(&mddev->del_work, mddev_delayed_delete);
return 0;
}
EXPORT_SYMBOL_GPL(mddev_init);
void mddev_destroy(struct mddev *mddev)
{
percpu_ref_exit(&mddev->active_io);
}
EXPORT_SYMBOL_GPL(mddev_destroy);
static struct mddev *mddev_find_locked(dev_t unit)
{
struct mddev *mddev;
......@@ -711,13 +731,16 @@ static struct mddev *mddev_alloc(dev_t unit)
new = kzalloc(sizeof(*new), GFP_KERNEL);
if (!new)
return ERR_PTR(-ENOMEM);
mddev_init(new);
error = mddev_init(new);
if (error)
goto out_free_new;
spin_lock(&all_mddevs_lock);
if (unit) {
error = -EEXIST;
if (mddev_find_locked(unit))
goto out_free_new;
goto out_destroy_new;
new->unit = unit;
if (MAJOR(unit) == MD_MAJOR)
new->md_minor = MINOR(unit);
......@@ -728,7 +751,7 @@ static struct mddev *mddev_alloc(dev_t unit)
error = -ENODEV;
new->unit = mddev_alloc_unit();
if (!new->unit)
goto out_free_new;
goto out_destroy_new;
new->md_minor = MINOR(new->unit);
new->hold_active = UNTIL_STOP;
}
......@@ -736,8 +759,11 @@ static struct mddev *mddev_alloc(dev_t unit)
list_add(&new->all_mddevs, &all_mddevs);
spin_unlock(&all_mddevs_lock);
return new;
out_free_new:
out_destroy_new:
spin_unlock(&all_mddevs_lock);
mddev_destroy(new);
out_free_new:
kfree(new);
return ERR_PTR(error);
}
......@@ -748,6 +774,7 @@ static void mddev_free(struct mddev *mddev)
list_del(&mddev->all_mddevs);
spin_unlock(&all_mddevs_lock);
mddev_destroy(mddev);
kfree(mddev);
}
......@@ -5787,12 +5814,6 @@ static void md_safemode_timeout(struct timer_list *t)
}
static int start_dirty_degraded;
static void active_io_release(struct percpu_ref *ref)
{
struct mddev *mddev = container_of(ref, struct mddev, active_io);
wake_up(&mddev->sb_wait);
}
int md_run(struct mddev *mddev)
{
......@@ -5873,15 +5894,10 @@ int md_run(struct mddev *mddev)
nowait = nowait && bdev_nowait(rdev->bdev);
}
err = percpu_ref_init(&mddev->active_io, active_io_release,
PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
if (err)
return err;
if (!bioset_initialized(&mddev->bio_set)) {
err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
if (err)
goto exit_active_io;
return err;
}
if (!bioset_initialized(&mddev->sync_set)) {
err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
......@@ -6078,8 +6094,6 @@ int md_run(struct mddev *mddev)
bioset_exit(&mddev->sync_set);
exit_bio_set:
bioset_exit(&mddev->bio_set);
exit_active_io:
percpu_ref_exit(&mddev->active_io);
return err;
}
EXPORT_SYMBOL_GPL(md_run);
......@@ -6295,7 +6309,6 @@ static void __md_stop(struct mddev *mddev)
module_put(pers->owner);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
percpu_ref_exit(&mddev->active_io);
bioset_exit(&mddev->bio_set);
bioset_exit(&mddev->sync_set);
bioset_exit(&mddev->io_clone_set);
......
......@@ -798,7 +798,8 @@ extern int md_integrity_register(struct mddev *mddev);
extern int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev);
extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
extern void mddev_init(struct mddev *mddev);
extern int mddev_init(struct mddev *mddev);
extern void mddev_destroy(struct mddev *mddev);
struct mddev *md_alloc(dev_t dev, char *name);
void mddev_put(struct mddev *mddev);
extern int md_run(struct mddev *mddev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment