Commit 3d8466ba authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Song Liu

md/raid10: use the atomic queue limit update APIs

Build the queue limits outside the queue and apply them using
queue_limits_set.   To make the code more obvious also split the queue
limits handling into separate helpers.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed--by: default avatarSong Liu <song@kernel.org>
Tested-by: default avatarSong Liu <song@kernel.org>
Signed-off-by: default avatarSong Liu <song@kernel.org>
Link: https://lore.kernel.org/r/20240303140150.5435-9-hch@lst.de
parent f63f1735
...@@ -2106,10 +2106,9 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) ...@@ -2106,10 +2106,9 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
continue; continue;
} }
if (!mddev_is_dm(mddev)) err = mddev_stack_new_rdev(mddev, rdev);
disk_stack_limits(mddev->gendisk, rdev->bdev, if (err)
rdev->data_offset << 9); return err;
p->head_position = 0; p->head_position = 0;
p->recovery_disabled = mddev->recovery_disabled - 1; p->recovery_disabled = mddev->recovery_disabled - 1;
rdev->raid_disk = mirror; rdev->raid_disk = mirror;
...@@ -2125,10 +2124,9 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) ...@@ -2125,10 +2124,9 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
clear_bit(In_sync, &rdev->flags); clear_bit(In_sync, &rdev->flags);
set_bit(Replacement, &rdev->flags); set_bit(Replacement, &rdev->flags);
rdev->raid_disk = repl_slot; rdev->raid_disk = repl_slot;
err = 0; err = mddev_stack_new_rdev(mddev, rdev);
if (!mddev_is_dm(mddev)) if (err)
disk_stack_limits(mddev->gendisk, rdev->bdev, return err;
rdev->data_offset << 9);
conf->fullsync = 1; conf->fullsync = 1;
WRITE_ONCE(p->replacement, rdev); WRITE_ONCE(p->replacement, rdev);
} }
...@@ -3969,14 +3967,26 @@ static struct r10conf *setup_conf(struct mddev *mddev) ...@@ -3969,14 +3967,26 @@ static struct r10conf *setup_conf(struct mddev *mddev)
return ERR_PTR(err); return ERR_PTR(err);
} }
static void raid10_set_io_opt(struct r10conf *conf) static unsigned int raid10_nr_stripes(struct r10conf *conf)
{ {
int raid_disks = conf->geo.raid_disks; unsigned int raid_disks = conf->geo.raid_disks;
if (conf->geo.raid_disks % conf->geo.near_copies)
return raid_disks;
return raid_disks / conf->geo.near_copies;
}
if (!(conf->geo.raid_disks % conf->geo.near_copies)) static int raid10_set_queue_limits(struct mddev *mddev)
raid_disks /= conf->geo.near_copies; {
blk_queue_io_opt(conf->mddev->queue, (conf->mddev->chunk_sectors << 9) * struct r10conf *conf = mddev->private;
raid_disks); struct queue_limits lim;
blk_set_stacking_limits(&lim);
lim.max_write_zeroes_sectors = 0;
lim.io_min = mddev->chunk_sectors << 9;
lim.io_opt = lim.io_min * raid10_nr_stripes(conf);
mddev_stack_rdev_limits(mddev, &lim);
return queue_limits_set(mddev->queue, &lim);
} }
static int raid10_run(struct mddev *mddev) static int raid10_run(struct mddev *mddev)
...@@ -3988,6 +3998,7 @@ static int raid10_run(struct mddev *mddev) ...@@ -3988,6 +3998,7 @@ static int raid10_run(struct mddev *mddev)
sector_t size; sector_t size;
sector_t min_offset_diff = 0; sector_t min_offset_diff = 0;
int first = 1; int first = 1;
int ret = -EIO;
if (mddev->private == NULL) { if (mddev->private == NULL) {
conf = setup_conf(mddev); conf = setup_conf(mddev);
...@@ -4014,12 +4025,6 @@ static int raid10_run(struct mddev *mddev) ...@@ -4014,12 +4025,6 @@ static int raid10_run(struct mddev *mddev)
} }
} }
if (!mddev_is_dm(conf->mddev)) {
blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
raid10_set_io_opt(conf);
}
rdev_for_each(rdev, mddev) { rdev_for_each(rdev, mddev) {
long long diff; long long diff;
...@@ -4048,14 +4053,16 @@ static int raid10_run(struct mddev *mddev) ...@@ -4048,14 +4053,16 @@ static int raid10_run(struct mddev *mddev)
if (first || diff < min_offset_diff) if (first || diff < min_offset_diff)
min_offset_diff = diff; min_offset_diff = diff;
if (!mddev_is_dm(mddev))
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
disk->head_position = 0; disk->head_position = 0;
first = 0; first = 0;
} }
if (!mddev_is_dm(conf->mddev)) {
ret = raid10_set_queue_limits(mddev);
if (ret)
goto out_free_conf;
}
/* need to check that every block has at least one working mirror */ /* need to check that every block has at least one working mirror */
if (!enough(conf, -1)) { if (!enough(conf, -1)) {
pr_err("md/raid10:%s: not enough operational mirrors.\n", pr_err("md/raid10:%s: not enough operational mirrors.\n",
...@@ -4156,7 +4163,7 @@ static int raid10_run(struct mddev *mddev) ...@@ -4156,7 +4163,7 @@ static int raid10_run(struct mddev *mddev)
raid10_free_conf(conf); raid10_free_conf(conf);
mddev->private = NULL; mddev->private = NULL;
out: out:
return -EIO; return ret;
} }
static void raid10_free(struct mddev *mddev, void *priv) static void raid10_free(struct mddev *mddev, void *priv)
...@@ -4933,8 +4940,7 @@ static void end_reshape(struct r10conf *conf) ...@@ -4933,8 +4940,7 @@ static void end_reshape(struct r10conf *conf)
conf->reshape_safe = MaxSector; conf->reshape_safe = MaxSector;
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
if (!mddev_is_dm(conf->mddev)) mddev_update_io_opt(conf->mddev, raid10_nr_stripes(conf));
raid10_set_io_opt(conf);
conf->fullsync = 0; conf->fullsync = 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment