Commit 176df894 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Song Liu

md: add a mddev_is_dm helper

Add a helper to check for a DM-mapped MD device instead of using
the obfuscated ->gendisk or ->queue NULL checks.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed--by: default avatarSong Liu <song@kernel.org>
Tested-by: default avatarSong Liu <song@kernel.org>
Signed-off-by: default avatarSong Liu <song@kernel.org>
Link: https://lore.kernel.org/r/20240303140150.5435-4-hch@lst.de
parent 28be4fd3
...@@ -2410,7 +2410,7 @@ int md_integrity_register(struct mddev *mddev) ...@@ -2410,7 +2410,7 @@ int md_integrity_register(struct mddev *mddev)
if (list_empty(&mddev->disks)) if (list_empty(&mddev->disks))
return 0; /* nothing to do */ return 0; /* nothing to do */
if (!mddev->gendisk || blk_get_integrity(mddev->gendisk)) if (mddev_is_dm(mddev) || blk_get_integrity(mddev->gendisk))
return 0; /* shouldn't register, or already is */ return 0; /* shouldn't register, or already is */
rdev_for_each(rdev, mddev) { rdev_for_each(rdev, mddev) {
/* skip spares and non-functional disks */ /* skip spares and non-functional disks */
...@@ -2463,7 +2463,7 @@ int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev) ...@@ -2463,7 +2463,7 @@ int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
{ {
struct blk_integrity *bi_mddev; struct blk_integrity *bi_mddev;
if (!mddev->gendisk) if (mddev_is_dm(mddev))
return 0; return 0;
bi_mddev = blk_get_integrity(mddev->gendisk); bi_mddev = blk_get_integrity(mddev->gendisk);
...@@ -5977,7 +5977,7 @@ int md_run(struct mddev *mddev) ...@@ -5977,7 +5977,7 @@ int md_run(struct mddev *mddev)
invalidate_bdev(rdev->bdev); invalidate_bdev(rdev->bdev);
if (mddev->ro != MD_RDONLY && rdev_read_only(rdev)) { if (mddev->ro != MD_RDONLY && rdev_read_only(rdev)) {
mddev->ro = MD_RDONLY; mddev->ro = MD_RDONLY;
if (mddev->gendisk) if (!mddev_is_dm(mddev))
set_disk_ro(mddev->gendisk, 1); set_disk_ro(mddev->gendisk, 1);
} }
...@@ -6139,7 +6139,7 @@ int md_run(struct mddev *mddev) ...@@ -6139,7 +6139,7 @@ int md_run(struct mddev *mddev)
} }
} }
if (mddev->queue) { if (!mddev_is_dm(mddev)) {
bool nonrot = true; bool nonrot = true;
rdev_for_each(rdev, mddev) { rdev_for_each(rdev, mddev) {
...@@ -6404,7 +6404,7 @@ static void mddev_detach(struct mddev *mddev) ...@@ -6404,7 +6404,7 @@ static void mddev_detach(struct mddev *mddev)
mddev->pers->quiesce(mddev, 0); mddev->pers->quiesce(mddev, 0);
} }
md_unregister_thread(mddev, &mddev->thread); md_unregister_thread(mddev, &mddev->thread);
if (mddev->queue) if (!mddev_is_dm(mddev))
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
} }
...@@ -7360,10 +7360,9 @@ static int update_size(struct mddev *mddev, sector_t num_sectors) ...@@ -7360,10 +7360,9 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)
if (!rv) { if (!rv) {
if (mddev_is_clustered(mddev)) if (mddev_is_clustered(mddev))
md_cluster_ops->update_size(mddev, old_dev_sectors); md_cluster_ops->update_size(mddev, old_dev_sectors);
else if (mddev->queue) { else if (!mddev_is_dm(mddev))
set_capacity_and_notify(mddev->gendisk, set_capacity_and_notify(mddev->gendisk,
mddev->array_sectors); mddev->array_sectors);
}
} }
return rv; return rv;
} }
...@@ -9177,7 +9176,7 @@ void md_do_sync(struct md_thread *thread) ...@@ -9177,7 +9176,7 @@ void md_do_sync(struct md_thread *thread)
mddev->delta_disks > 0 && mddev->delta_disks > 0 &&
mddev->pers->finish_reshape && mddev->pers->finish_reshape &&
mddev->pers->size && mddev->pers->size &&
mddev->queue) { !mddev_is_dm(mddev)) {
mddev_lock_nointr(mddev); mddev_lock_nointr(mddev);
md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0)); md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
mddev_unlock(mddev); mddev_unlock(mddev);
......
...@@ -911,16 +911,24 @@ int do_md_run(struct mddev *mddev); ...@@ -911,16 +911,24 @@ int do_md_run(struct mddev *mddev);
extern const struct block_device_operations md_fops; extern const struct block_device_operations md_fops;
/*
* MD devices can be used undeneath by DM, in which case ->gendisk is NULL.
*/
static inline bool mddev_is_dm(struct mddev *mddev)
{
return !mddev->gendisk;
}
static inline void mddev_trace_remap(struct mddev *mddev, struct bio *bio, static inline void mddev_trace_remap(struct mddev *mddev, struct bio *bio,
sector_t sector) sector_t sector)
{ {
if (mddev->gendisk) if (!mddev_is_dm(mddev))
trace_block_bio_remap(bio, disk_devt(mddev->gendisk), sector); trace_block_bio_remap(bio, disk_devt(mddev->gendisk), sector);
} }
#define mddev_add_trace_msg(mddev, fmt, args...) \ #define mddev_add_trace_msg(mddev, fmt, args...) \
do { \ do { \
if ((mddev)->gendisk) \ if (!mddev_is_dm(mddev)) \
blk_add_trace_msg((mddev)->queue, fmt, ##args); \ blk_add_trace_msg((mddev)->queue, fmt, ##args); \
} while (0) } while (0)
......
...@@ -399,7 +399,7 @@ static int raid0_run(struct mddev *mddev) ...@@ -399,7 +399,7 @@ static int raid0_run(struct mddev *mddev)
mddev->private = conf; mddev->private = conf;
} }
conf = mddev->private; conf = mddev->private;
if (mddev->queue) { if (!mddev_is_dm(mddev)) {
struct md_rdev *rdev; struct md_rdev *rdev;
blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
......
...@@ -1926,7 +1926,7 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) ...@@ -1926,7 +1926,7 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
for (mirror = first; mirror <= last; mirror++) { for (mirror = first; mirror <= last; mirror++) {
p = conf->mirrors + mirror; p = conf->mirrors + mirror;
if (!p->rdev) { if (!p->rdev) {
if (mddev->gendisk) if (!mddev_is_dm(mddev))
disk_stack_limits(mddev->gendisk, rdev->bdev, disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9); rdev->data_offset << 9);
...@@ -3227,14 +3227,11 @@ static int raid1_run(struct mddev *mddev) ...@@ -3227,14 +3227,11 @@ static int raid1_run(struct mddev *mddev)
if (IS_ERR(conf)) if (IS_ERR(conf))
return PTR_ERR(conf); return PTR_ERR(conf);
if (mddev->queue) if (!mddev_is_dm(mddev)) {
blk_queue_max_write_zeroes_sectors(mddev->queue, 0); blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
rdev_for_each(rdev, mddev)
rdev_for_each(rdev, mddev) { disk_stack_limits(mddev->gendisk, rdev->bdev,
if (!mddev->gendisk) rdev->data_offset << 9);
continue;
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
} }
mddev->degraded = 0; mddev->degraded = 0;
......
...@@ -2106,7 +2106,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) ...@@ -2106,7 +2106,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
continue; continue;
} }
if (mddev->gendisk) if (!mddev_is_dm(mddev))
disk_stack_limits(mddev->gendisk, rdev->bdev, disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9); rdev->data_offset << 9);
...@@ -2126,7 +2126,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) ...@@ -2126,7 +2126,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
set_bit(Replacement, &rdev->flags); set_bit(Replacement, &rdev->flags);
rdev->raid_disk = repl_slot; rdev->raid_disk = repl_slot;
err = 0; err = 0;
if (mddev->gendisk) if (!mddev_is_dm(mddev))
disk_stack_limits(mddev->gendisk, rdev->bdev, disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9); rdev->data_offset << 9);
conf->fullsync = 1; conf->fullsync = 1;
...@@ -4014,7 +4014,7 @@ static int raid10_run(struct mddev *mddev) ...@@ -4014,7 +4014,7 @@ static int raid10_run(struct mddev *mddev)
} }
} }
if (mddev->queue) { if (!mddev_is_dm(conf->mddev)) {
blk_queue_max_write_zeroes_sectors(mddev->queue, 0); blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9); blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
raid10_set_io_opt(conf); raid10_set_io_opt(conf);
...@@ -4048,7 +4048,7 @@ static int raid10_run(struct mddev *mddev) ...@@ -4048,7 +4048,7 @@ static int raid10_run(struct mddev *mddev)
if (first || diff < min_offset_diff) if (first || diff < min_offset_diff)
min_offset_diff = diff; min_offset_diff = diff;
if (mddev->gendisk) if (!mddev_is_dm(mddev))
disk_stack_limits(mddev->gendisk, rdev->bdev, disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9); rdev->data_offset << 9);
...@@ -4933,7 +4933,7 @@ static void end_reshape(struct r10conf *conf) ...@@ -4933,7 +4933,7 @@ static void end_reshape(struct r10conf *conf)
conf->reshape_safe = MaxSector; conf->reshape_safe = MaxSector;
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
if (conf->mddev->queue) if (!mddev_is_dm(conf->mddev))
raid10_set_io_opt(conf); raid10_set_io_opt(conf);
conf->fullsync = 0; conf->fullsync = 0;
} }
......
...@@ -2416,12 +2416,12 @@ static int grow_stripes(struct r5conf *conf, int num) ...@@ -2416,12 +2416,12 @@ static int grow_stripes(struct r5conf *conf, int num)
size_t namelen = sizeof(conf->cache_name[0]); size_t namelen = sizeof(conf->cache_name[0]);
int devs = max(conf->raid_disks, conf->previous_raid_disks); int devs = max(conf->raid_disks, conf->previous_raid_disks);
if (conf->mddev->gendisk) if (mddev_is_dm(conf->mddev))
snprintf(conf->cache_name[0], namelen, snprintf(conf->cache_name[0], namelen,
"raid%d-%s", conf->level, mdname(conf->mddev)); "raid%d-%p", conf->level, conf->mddev);
else else
snprintf(conf->cache_name[0], namelen, snprintf(conf->cache_name[0], namelen,
"raid%d-%p", conf->level, conf->mddev); "raid%d-%s", conf->level, mdname(conf->mddev));
snprintf(conf->cache_name[1], namelen, "%.27s-alt", conf->cache_name[0]); snprintf(conf->cache_name[1], namelen, "%.27s-alt", conf->cache_name[0]);
conf->active_name = 0; conf->active_name = 0;
...@@ -4274,11 +4274,10 @@ static int handle_stripe_dirtying(struct r5conf *conf, ...@@ -4274,11 +4274,10 @@ static int handle_stripe_dirtying(struct r5conf *conf,
set_bit(STRIPE_DELAYED, &sh->state); set_bit(STRIPE_DELAYED, &sh->state);
} }
} }
if (rcw && conf->mddev->queue) if (rcw && !mddev_is_dm(conf->mddev))
mddev_add_trace_msg(conf->mddev, blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d",
"raid5 rcw %llu %d %d %d", (unsigned long long)sh->sector,
sh->sector, rcw, qread, rcw, qread, test_bit(STRIPE_DELAYED, &sh->state));
test_bit(STRIPE_DELAYED, &sh->state));
} }
if (rcw > disks && rmw > disks && if (rcw > disks && rmw > disks &&
...@@ -5686,7 +5685,7 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule) ...@@ -5686,7 +5685,7 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
} }
release_inactive_stripe_list(conf, cb->temp_inactive_list, release_inactive_stripe_list(conf, cb->temp_inactive_list,
NR_STRIPE_HASH_LOCKS); NR_STRIPE_HASH_LOCKS);
if (mddev->queue) if (!mddev_is_dm(mddev))
trace_block_unplug(mddev->queue, cnt, !from_schedule); trace_block_unplug(mddev->queue, cnt, !from_schedule);
kfree(cb); kfree(cb);
} }
...@@ -7960,7 +7959,7 @@ static int raid5_run(struct mddev *mddev) ...@@ -7960,7 +7959,7 @@ static int raid5_run(struct mddev *mddev)
mdname(mddev)); mdname(mddev));
md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
if (mddev->queue) { if (!mddev_is_dm(mddev)) {
int chunk_size; int chunk_size;
/* read-ahead size must cover two whole stripes, which /* read-ahead size must cover two whole stripes, which
* is 2 * (datadisks) * chunksize where 'n' is the * is 2 * (datadisks) * chunksize where 'n' is the
...@@ -8564,7 +8563,7 @@ static void end_reshape(struct r5conf *conf) ...@@ -8564,7 +8563,7 @@ static void end_reshape(struct r5conf *conf)
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
wake_up(&conf->wait_for_overlap); wake_up(&conf->wait_for_overlap);
if (conf->mddev->queue) if (!mddev_is_dm(conf->mddev))
raid5_set_io_opt(conf); raid5_set_io_opt(conf);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment