Commit a52758a3 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: move the zone_resetall flag to queue_limits

Move the zone_resetall flag into the queue_limits feature field so that
it can be set atomically with the queue frozen.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDamien Le Moal <dlemoal@kernel.org>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Link: https://lore.kernel.org/r/20240617060532.127975-24-hch@lst.deSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent b1fc937a
...@@ -91,7 +91,6 @@ static const char *const blk_queue_flag_name[] = { ...@@ -91,7 +91,6 @@ static const char *const blk_queue_flag_name[] = {
QUEUE_FLAG_NAME(REGISTERED), QUEUE_FLAG_NAME(REGISTERED),
QUEUE_FLAG_NAME(QUIESCED), QUEUE_FLAG_NAME(QUIESCED),
QUEUE_FLAG_NAME(PCI_P2PDMA), QUEUE_FLAG_NAME(PCI_P2PDMA),
QUEUE_FLAG_NAME(ZONE_RESETALL),
QUEUE_FLAG_NAME(RQ_ALLOC_TIME), QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
QUEUE_FLAG_NAME(HCTX_ACTIVE), QUEUE_FLAG_NAME(HCTX_ACTIVE),
QUEUE_FLAG_NAME(SQ_SCHED), QUEUE_FLAG_NAME(SQ_SCHED),
......
...@@ -158,7 +158,7 @@ int null_init_zoned_dev(struct nullb_device *dev, ...@@ -158,7 +158,7 @@ int null_init_zoned_dev(struct nullb_device *dev,
sector += dev->zone_size_sects; sector += dev->zone_size_sects;
} }
lim->features |= BLK_FEAT_ZONED; lim->features |= BLK_FEAT_ZONED | BLK_FEAT_ZONE_RESETALL;
lim->chunk_sectors = dev->zone_size_sects; lim->chunk_sectors = dev->zone_size_sects;
lim->max_zone_append_sectors = dev->zone_append_max_sectors; lim->max_zone_append_sectors = dev->zone_append_max_sectors;
lim->max_open_zones = dev->zone_max_open; lim->max_open_zones = dev->zone_max_open;
...@@ -171,7 +171,6 @@ int null_register_zoned_dev(struct nullb *nullb) ...@@ -171,7 +171,6 @@ int null_register_zoned_dev(struct nullb *nullb)
struct request_queue *q = nullb->q; struct request_queue *q = nullb->q;
struct gendisk *disk = nullb->disk; struct gendisk *disk = nullb->disk;
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
disk->nr_zones = bdev_nr_zones(disk->part0); disk->nr_zones = bdev_nr_zones(disk->part0);
pr_info("%s: using %s zone append\n", pr_info("%s: using %s zone append\n",
......
...@@ -248,8 +248,6 @@ static int ublk_dev_param_zoned_validate(const struct ublk_device *ub) ...@@ -248,8 +248,6 @@ static int ublk_dev_param_zoned_validate(const struct ublk_device *ub)
static void ublk_dev_param_zoned_apply(struct ublk_device *ub) static void ublk_dev_param_zoned_apply(struct ublk_device *ub)
{ {
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ub->ub_disk->queue);
ub->ub_disk->nr_zones = ublk_get_nr_zones(ub); ub->ub_disk->nr_zones = ublk_get_nr_zones(ub);
} }
...@@ -2196,7 +2194,7 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd) ...@@ -2196,7 +2194,7 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED))
return -EOPNOTSUPP; return -EOPNOTSUPP;
lim.features |= BLK_FEAT_ZONED; lim.features |= BLK_FEAT_ZONED | BLK_FEAT_ZONE_RESETALL;
lim.max_active_zones = p->max_active_zones; lim.max_active_zones = p->max_active_zones;
lim.max_open_zones = p->max_open_zones; lim.max_open_zones = p->max_open_zones;
lim.max_zone_append_sectors = p->max_zone_append_sectors; lim.max_zone_append_sectors = p->max_zone_append_sectors;
......
...@@ -728,7 +728,7 @@ static int virtblk_read_zoned_limits(struct virtio_blk *vblk, ...@@ -728,7 +728,7 @@ static int virtblk_read_zoned_limits(struct virtio_blk *vblk,
dev_dbg(&vdev->dev, "probing host-managed zoned device\n"); dev_dbg(&vdev->dev, "probing host-managed zoned device\n");
lim->features |= BLK_FEAT_ZONED; lim->features |= BLK_FEAT_ZONED | BLK_FEAT_ZONE_RESETALL;
virtio_cread(vdev, struct virtio_blk_config, virtio_cread(vdev, struct virtio_blk_config,
zoned.max_open_zones, &v); zoned.max_open_zones, &v);
...@@ -1548,7 +1548,6 @@ static int virtblk_probe(struct virtio_device *vdev) ...@@ -1548,7 +1548,6 @@ static int virtblk_probe(struct virtio_device *vdev)
*/ */
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
(lim.features & BLK_FEAT_ZONED)) { (lim.features & BLK_FEAT_ZONED)) {
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, vblk->disk->queue);
err = blk_revalidate_disk_zones(vblk->disk); err = blk_revalidate_disk_zones(vblk->disk);
if (err) if (err)
goto out_cleanup_disk; goto out_cleanup_disk;
......
...@@ -108,13 +108,12 @@ int nvme_query_zone_info(struct nvme_ns *ns, unsigned lbaf, ...@@ -108,13 +108,12 @@ int nvme_query_zone_info(struct nvme_ns *ns, unsigned lbaf,
void nvme_update_zone_info(struct nvme_ns *ns, struct queue_limits *lim, void nvme_update_zone_info(struct nvme_ns *ns, struct queue_limits *lim,
struct nvme_zone_info *zi) struct nvme_zone_info *zi)
{ {
lim->features |= BLK_FEAT_ZONED; lim->features |= BLK_FEAT_ZONED | BLK_FEAT_ZONE_RESETALL;
lim->max_open_zones = zi->max_open_zones; lim->max_open_zones = zi->max_open_zones;
lim->max_active_zones = zi->max_active_zones; lim->max_active_zones = zi->max_active_zones;
lim->max_zone_append_sectors = ns->ctrl->max_zone_append; lim->max_zone_append_sectors = ns->ctrl->max_zone_append;
lim->chunk_sectors = ns->head->zsze = lim->chunk_sectors = ns->head->zsze =
nvme_lba_to_sect(ns->head, zi->zone_size); nvme_lba_to_sect(ns->head, zi->zone_size);
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ns->queue);
} }
static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns, static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns,
......
...@@ -592,8 +592,6 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp) ...@@ -592,8 +592,6 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
int sd_zbc_read_zones(struct scsi_disk *sdkp, struct queue_limits *lim, int sd_zbc_read_zones(struct scsi_disk *sdkp, struct queue_limits *lim,
u8 buf[SD_BUF_SIZE]) u8 buf[SD_BUF_SIZE])
{ {
struct gendisk *disk = sdkp->disk;
struct request_queue *q = disk->queue;
unsigned int nr_zones; unsigned int nr_zones;
u32 zone_blocks = 0; u32 zone_blocks = 0;
int ret; int ret;
...@@ -601,7 +599,7 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, struct queue_limits *lim, ...@@ -601,7 +599,7 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, struct queue_limits *lim,
if (sdkp->device->type != TYPE_ZBC) if (sdkp->device->type != TYPE_ZBC)
return 0; return 0;
lim->features |= BLK_FEAT_ZONED; lim->features |= BLK_FEAT_ZONED | BLK_FEAT_ZONE_RESETALL;
/* /*
* Per ZBC and ZAC specifications, writes in sequential write required * Per ZBC and ZAC specifications, writes in sequential write required
...@@ -630,7 +628,6 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, struct queue_limits *lim, ...@@ -630,7 +628,6 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, struct queue_limits *lim,
sdkp->early_zone_info.zone_blocks = zone_blocks; sdkp->early_zone_info.zone_blocks = zone_blocks;
/* The drive satisfies the kernel restrictions: set it up */ /* The drive satisfies the kernel restrictions: set it up */
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
if (sdkp->zones_max_open == U32_MAX) if (sdkp->zones_max_open == U32_MAX)
lim->max_open_zones = 0; lim->max_open_zones = 0;
else else
......
...@@ -316,6 +316,9 @@ enum { ...@@ -316,6 +316,9 @@ enum {
/* is a zoned device */ /* is a zoned device */
BLK_FEAT_ZONED = (1u << 10), BLK_FEAT_ZONED = (1u << 10),
/* supports Zone Reset All */
BLK_FEAT_ZONE_RESETALL = (1u << 11),
}; };
/* /*
...@@ -586,7 +589,6 @@ struct request_queue { ...@@ -586,7 +589,6 @@ struct request_queue {
#define QUEUE_FLAG_REGISTERED 22 /* queue has been registered to a disk */ #define QUEUE_FLAG_REGISTERED 22 /* queue has been registered to a disk */
#define QUEUE_FLAG_QUIESCED 24 /* queue has been quiesced */ #define QUEUE_FLAG_QUIESCED 24 /* queue has been quiesced */
#define QUEUE_FLAG_PCI_P2PDMA 25 /* device supports PCI p2p requests */ #define QUEUE_FLAG_PCI_P2PDMA 25 /* device supports PCI p2p requests */
#define QUEUE_FLAG_ZONE_RESETALL 26 /* supports Zone Reset All */
#define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */ #define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */
#define QUEUE_FLAG_HCTX_ACTIVE 28 /* at least one blk-mq hctx is active */ #define QUEUE_FLAG_HCTX_ACTIVE 28 /* at least one blk-mq hctx is active */
#define QUEUE_FLAG_SQ_SCHED 30 /* single queue style io dispatch */ #define QUEUE_FLAG_SQ_SCHED 30 /* single queue style io dispatch */
...@@ -607,7 +609,7 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); ...@@ -607,7 +609,7 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
#define blk_queue_nonrot(q) ((q)->limits.features & BLK_FEAT_ROTATIONAL) #define blk_queue_nonrot(q) ((q)->limits.features & BLK_FEAT_ROTATIONAL)
#define blk_queue_io_stat(q) ((q)->limits.features & BLK_FEAT_IO_STAT) #define blk_queue_io_stat(q) ((q)->limits.features & BLK_FEAT_IO_STAT)
#define blk_queue_zone_resetall(q) \ #define blk_queue_zone_resetall(q) \
test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags) ((q)->limits.features & BLK_FEAT_ZONE_RESETALL)
#define blk_queue_dax(q) ((q)->limits.features & BLK_FEAT_DAX) #define blk_queue_dax(q) ((q)->limits.features & BLK_FEAT_DAX)
#define blk_queue_pci_p2pdma(q) \ #define blk_queue_pci_p2pdma(q) \
test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags) test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment