Commit b1fc937a authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: move the zoned flag into the features field

Move the zoned flags into the features field to reclaim a little
bit of space.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDamien Le Moal <dlemoal@kernel.org>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Link: https://lore.kernel.org/r/20240617060532.127975-23-hch@lst.deSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 8023e144
...@@ -68,7 +68,7 @@ static void blk_apply_bdi_limits(struct backing_dev_info *bdi, ...@@ -68,7 +68,7 @@ static void blk_apply_bdi_limits(struct backing_dev_info *bdi,
static int blk_validate_zoned_limits(struct queue_limits *lim) static int blk_validate_zoned_limits(struct queue_limits *lim)
{ {
if (!lim->zoned) { if (!(lim->features & BLK_FEAT_ZONED)) {
if (WARN_ON_ONCE(lim->max_open_zones) || if (WARN_ON_ONCE(lim->max_open_zones) ||
WARN_ON_ONCE(lim->max_active_zones) || WARN_ON_ONCE(lim->max_active_zones) ||
WARN_ON_ONCE(lim->zone_write_granularity) || WARN_ON_ONCE(lim->zone_write_granularity) ||
...@@ -602,8 +602,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, ...@@ -602,8 +602,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
b->max_secure_erase_sectors); b->max_secure_erase_sectors);
t->zone_write_granularity = max(t->zone_write_granularity, t->zone_write_granularity = max(t->zone_write_granularity,
b->zone_write_granularity); b->zone_write_granularity);
t->zoned = max(t->zoned, b->zoned); if (!(t->features & BLK_FEAT_ZONED)) {
if (!t->zoned) {
t->zone_write_granularity = 0; t->zone_write_granularity = 0;
t->max_zone_append_sectors = 0; t->max_zone_append_sectors = 0;
} }
......
...@@ -158,7 +158,7 @@ int null_init_zoned_dev(struct nullb_device *dev, ...@@ -158,7 +158,7 @@ int null_init_zoned_dev(struct nullb_device *dev,
sector += dev->zone_size_sects; sector += dev->zone_size_sects;
} }
lim->zoned = true; lim->features |= BLK_FEAT_ZONED;
lim->chunk_sectors = dev->zone_size_sects; lim->chunk_sectors = dev->zone_size_sects;
lim->max_zone_append_sectors = dev->zone_append_max_sectors; lim->max_zone_append_sectors = dev->zone_append_max_sectors;
lim->max_open_zones = dev->zone_max_open; lim->max_open_zones = dev->zone_max_open;
......
...@@ -2196,7 +2196,7 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd) ...@@ -2196,7 +2196,7 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED))
return -EOPNOTSUPP; return -EOPNOTSUPP;
lim.zoned = true; lim.features |= BLK_FEAT_ZONED;
lim.max_active_zones = p->max_active_zones; lim.max_active_zones = p->max_active_zones;
lim.max_open_zones = p->max_open_zones; lim.max_open_zones = p->max_open_zones;
lim.max_zone_append_sectors = p->max_zone_append_sectors; lim.max_zone_append_sectors = p->max_zone_append_sectors;
......
...@@ -728,7 +728,7 @@ static int virtblk_read_zoned_limits(struct virtio_blk *vblk, ...@@ -728,7 +728,7 @@ static int virtblk_read_zoned_limits(struct virtio_blk *vblk,
dev_dbg(&vdev->dev, "probing host-managed zoned device\n"); dev_dbg(&vdev->dev, "probing host-managed zoned device\n");
lim->zoned = true; lim->features |= BLK_FEAT_ZONED;
virtio_cread(vdev, struct virtio_blk_config, virtio_cread(vdev, struct virtio_blk_config,
zoned.max_open_zones, &v); zoned.max_open_zones, &v);
...@@ -1546,7 +1546,8 @@ static int virtblk_probe(struct virtio_device *vdev) ...@@ -1546,7 +1546,8 @@ static int virtblk_probe(struct virtio_device *vdev)
* All steps that follow use the VQs therefore they need to be * All steps that follow use the VQs therefore they need to be
* placed after the virtio_device_ready() call above. * placed after the virtio_device_ready() call above.
*/ */
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && lim.zoned) { if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
(lim.features & BLK_FEAT_ZONED)) {
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, vblk->disk->queue); blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, vblk->disk->queue);
err = blk_revalidate_disk_zones(vblk->disk); err = blk_revalidate_disk_zones(vblk->disk);
if (err) if (err)
......
...@@ -1605,12 +1605,12 @@ int dm_calculate_queue_limits(struct dm_table *t, ...@@ -1605,12 +1605,12 @@ int dm_calculate_queue_limits(struct dm_table *t,
ti->type->iterate_devices(ti, dm_set_device_limits, ti->type->iterate_devices(ti, dm_set_device_limits,
&ti_limits); &ti_limits);
if (!zoned && ti_limits.zoned) { if (!zoned && (ti_limits.features & BLK_FEAT_ZONED)) {
/* /*
* After stacking all limits, validate all devices * After stacking all limits, validate all devices
* in table support this zoned model and zone sectors. * in table support this zoned model and zone sectors.
*/ */
zoned = ti_limits.zoned; zoned = (ti_limits.features & BLK_FEAT_ZONED);
zone_sectors = ti_limits.chunk_sectors; zone_sectors = ti_limits.chunk_sectors;
} }
...@@ -1658,12 +1658,12 @@ int dm_calculate_queue_limits(struct dm_table *t, ...@@ -1658,12 +1658,12 @@ int dm_calculate_queue_limits(struct dm_table *t,
* zoned model on host-managed zoned block devices. * zoned model on host-managed zoned block devices.
* BUT... * BUT...
*/ */
if (limits->zoned) { if (limits->features & BLK_FEAT_ZONED) {
/* /*
* ...IF the above limits stacking determined a zoned model * ...IF the above limits stacking determined a zoned model
* validate that all of the table's devices conform to it. * validate that all of the table's devices conform to it.
*/ */
zoned = limits->zoned; zoned = limits->features & BLK_FEAT_ZONED;
zone_sectors = limits->chunk_sectors; zone_sectors = limits->chunk_sectors;
} }
if (validate_hardware_zoned(t, zoned, zone_sectors)) if (validate_hardware_zoned(t, zoned, zone_sectors))
...@@ -1834,7 +1834,8 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, ...@@ -1834,7 +1834,8 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
* For a zoned target, setup the zones related queue attributes * For a zoned target, setup the zones related queue attributes
* and resources necessary for zone append emulation if necessary. * and resources necessary for zone append emulation if necessary.
*/ */
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && limits->zoned) { if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
(limits->features & limits->features & BLK_FEAT_ZONED)) {
r = dm_set_zones_restrictions(t, q, limits); r = dm_set_zones_restrictions(t, q, limits);
if (r) if (r)
return r; return r;
......
...@@ -263,7 +263,7 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q, ...@@ -263,7 +263,7 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q,
if (nr_conv_zones >= ret) { if (nr_conv_zones >= ret) {
lim->max_open_zones = 0; lim->max_open_zones = 0;
lim->max_active_zones = 0; lim->max_active_zones = 0;
lim->zoned = false; lim->features &= ~BLK_FEAT_ZONED;
clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags); clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
disk->nr_zones = 0; disk->nr_zones = 0;
return 0; return 0;
......
...@@ -1009,7 +1009,7 @@ static void dmz_io_hints(struct dm_target *ti, struct queue_limits *limits) ...@@ -1009,7 +1009,7 @@ static void dmz_io_hints(struct dm_target *ti, struct queue_limits *limits)
limits->max_sectors = chunk_sectors; limits->max_sectors = chunk_sectors;
/* We are exposing a drive-managed zoned block device */ /* We are exposing a drive-managed zoned block device */
limits->zoned = false; limits->features &= ~BLK_FEAT_ZONED;
} }
/* /*
......
...@@ -108,7 +108,7 @@ int nvme_query_zone_info(struct nvme_ns *ns, unsigned lbaf, ...@@ -108,7 +108,7 @@ int nvme_query_zone_info(struct nvme_ns *ns, unsigned lbaf,
void nvme_update_zone_info(struct nvme_ns *ns, struct queue_limits *lim, void nvme_update_zone_info(struct nvme_ns *ns, struct queue_limits *lim,
struct nvme_zone_info *zi) struct nvme_zone_info *zi)
{ {
lim->zoned = 1; lim->features |= BLK_FEAT_ZONED;
lim->max_open_zones = zi->max_open_zones; lim->max_open_zones = zi->max_open_zones;
lim->max_active_zones = zi->max_active_zones; lim->max_active_zones = zi->max_active_zones;
lim->max_zone_append_sectors = ns->ctrl->max_zone_append; lim->max_zone_append_sectors = ns->ctrl->max_zone_append;
......
...@@ -601,7 +601,7 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, struct queue_limits *lim, ...@@ -601,7 +601,7 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, struct queue_limits *lim,
if (sdkp->device->type != TYPE_ZBC) if (sdkp->device->type != TYPE_ZBC)
return 0; return 0;
lim->zoned = true; lim->features |= BLK_FEAT_ZONED;
/* /*
* Per ZBC and ZAC specifications, writes in sequential write required * Per ZBC and ZAC specifications, writes in sequential write required
......
...@@ -313,6 +313,9 @@ enum { ...@@ -313,6 +313,9 @@ enum {
/* supports I/O polling */ /* supports I/O polling */
BLK_FEAT_POLL = (1u << 9), BLK_FEAT_POLL = (1u << 9),
/* is a zoned device */
BLK_FEAT_ZONED = (1u << 10),
}; };
/* /*
...@@ -320,7 +323,7 @@ enum { ...@@ -320,7 +323,7 @@ enum {
*/ */
#define BLK_FEAT_INHERIT_MASK \ #define BLK_FEAT_INHERIT_MASK \
(BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | BLK_FEAT_ROTATIONAL | \ (BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | BLK_FEAT_ROTATIONAL | \
BLK_FEAT_STABLE_WRITES) BLK_FEAT_STABLE_WRITES | BLK_FEAT_ZONED)
/* internal flags in queue_limits.flags */ /* internal flags in queue_limits.flags */
enum { enum {
...@@ -372,7 +375,6 @@ struct queue_limits { ...@@ -372,7 +375,6 @@ struct queue_limits {
unsigned char misaligned; unsigned char misaligned;
unsigned char discard_misaligned; unsigned char discard_misaligned;
unsigned char raid_partial_stripes_expensive; unsigned char raid_partial_stripes_expensive;
bool zoned;
unsigned int max_open_zones; unsigned int max_open_zones;
unsigned int max_active_zones; unsigned int max_active_zones;
...@@ -654,7 +656,8 @@ static inline enum rpm_status queue_rpm_status(struct request_queue *q) ...@@ -654,7 +656,8 @@ static inline enum rpm_status queue_rpm_status(struct request_queue *q)
static inline bool blk_queue_is_zoned(struct request_queue *q) static inline bool blk_queue_is_zoned(struct request_queue *q)
{ {
return IS_ENABLED(CONFIG_BLK_DEV_ZONED) && q->limits.zoned; return IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
(q->limits.features & BLK_FEAT_ZONED);
} }
#ifdef CONFIG_BLK_DEV_ZONED #ifdef CONFIG_BLK_DEV_ZONED
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment