Commit 3b62c140 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

xen-blkfront: use blk_mq_alloc_disk and blk_cleanup_disk

Use blk_mq_alloc_disk and blk_cleanup_disk to simplify the gendisk and
request_queue allocation.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarChaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Link: https://lore.kernel.org/r/20210602065345.355274-26-hch@lst.deSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 69387403
...@@ -968,48 +968,6 @@ static void blkif_set_queue_limits(struct blkfront_info *info) ...@@ -968,48 +968,6 @@ static void blkif_set_queue_limits(struct blkfront_info *info)
blk_queue_dma_alignment(rq, 511); blk_queue_dma_alignment(rq, 511);
} }
static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
unsigned int physical_sector_size)
{
struct request_queue *rq;
struct blkfront_info *info = gd->private_data;
memset(&info->tag_set, 0, sizeof(info->tag_set));
info->tag_set.ops = &blkfront_mq_ops;
info->tag_set.nr_hw_queues = info->nr_rings;
if (HAS_EXTRA_REQ && info->max_indirect_segments == 0) {
/*
* When indirect descriptior is not supported, the I/O request
* will be split between multiple request in the ring.
* To avoid problems when sending the request, divide by
* 2 the depth of the queue.
*/
info->tag_set.queue_depth = BLK_RING_SIZE(info) / 2;
} else
info->tag_set.queue_depth = BLK_RING_SIZE(info);
info->tag_set.numa_node = NUMA_NO_NODE;
info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
info->tag_set.cmd_size = sizeof(struct blkif_req);
info->tag_set.driver_data = info;
if (blk_mq_alloc_tag_set(&info->tag_set))
return -EINVAL;
rq = blk_mq_init_queue(&info->tag_set);
if (IS_ERR(rq)) {
blk_mq_free_tag_set(&info->tag_set);
return PTR_ERR(rq);
}
rq->queuedata = info;
info->rq = gd->queue = rq;
info->gd = gd;
info->sector_size = sector_size;
info->physical_sector_size = physical_sector_size;
blkif_set_queue_limits(info);
return 0;
}
static const char *flush_info(struct blkfront_info *info) static const char *flush_info(struct blkfront_info *info)
{ {
if (info->feature_flush && info->feature_fua) if (info->feature_flush && info->feature_fua)
...@@ -1146,12 +1104,36 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity, ...@@ -1146,12 +1104,36 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
err = xlbd_reserve_minors(minor, nr_minors); err = xlbd_reserve_minors(minor, nr_minors);
if (err) if (err)
goto out; return err;
err = -ENODEV; err = -ENODEV;
gd = alloc_disk(nr_minors); memset(&info->tag_set, 0, sizeof(info->tag_set));
if (gd == NULL) info->tag_set.ops = &blkfront_mq_ops;
goto release; info->tag_set.nr_hw_queues = info->nr_rings;
if (HAS_EXTRA_REQ && info->max_indirect_segments == 0) {
/*
* When indirect descriptior is not supported, the I/O request
* will be split between multiple request in the ring.
* To avoid problems when sending the request, divide by
* 2 the depth of the queue.
*/
info->tag_set.queue_depth = BLK_RING_SIZE(info) / 2;
} else
info->tag_set.queue_depth = BLK_RING_SIZE(info);
info->tag_set.numa_node = NUMA_NO_NODE;
info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
info->tag_set.cmd_size = sizeof(struct blkif_req);
info->tag_set.driver_data = info;
err = blk_mq_alloc_tag_set(&info->tag_set);
if (err)
goto out_release_minors;
gd = blk_mq_alloc_disk(&info->tag_set, info);
if (IS_ERR(gd)) {
err = PTR_ERR(gd);
goto out_free_tag_set;
}
strcpy(gd->disk_name, DEV_NAME); strcpy(gd->disk_name, DEV_NAME);
ptr = encode_disk_name(gd->disk_name + sizeof(DEV_NAME) - 1, offset); ptr = encode_disk_name(gd->disk_name + sizeof(DEV_NAME) - 1, offset);
...@@ -1164,14 +1146,16 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity, ...@@ -1164,14 +1146,16 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
gd->major = XENVBD_MAJOR; gd->major = XENVBD_MAJOR;
gd->first_minor = minor; gd->first_minor = minor;
gd->minors = nr_minors;
gd->fops = &xlvbd_block_fops; gd->fops = &xlvbd_block_fops;
gd->private_data = info; gd->private_data = info;
set_capacity(gd, capacity); set_capacity(gd, capacity);
if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size)) { info->rq = gd->queue;
del_gendisk(gd); info->gd = gd;
goto release; info->sector_size = sector_size;
} info->physical_sector_size = physical_sector_size;
blkif_set_queue_limits(info);
xlvbd_flush(info); xlvbd_flush(info);
...@@ -1186,9 +1170,10 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity, ...@@ -1186,9 +1170,10 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
return 0; return 0;
release: out_free_tag_set:
blk_mq_free_tag_set(&info->tag_set);
out_release_minors:
xlbd_release_minors(minor, nr_minors); xlbd_release_minors(minor, nr_minors);
out:
return err; return err;
} }
...@@ -1217,12 +1202,9 @@ static void xlvbd_release_gendisk(struct blkfront_info *info) ...@@ -1217,12 +1202,9 @@ static void xlvbd_release_gendisk(struct blkfront_info *info)
nr_minors = info->gd->minors; nr_minors = info->gd->minors;
xlbd_release_minors(minor, nr_minors); xlbd_release_minors(minor, nr_minors);
blk_cleanup_queue(info->rq); blk_cleanup_disk(info->gd);
blk_mq_free_tag_set(&info->tag_set);
info->rq = NULL;
put_disk(info->gd);
info->gd = NULL; info->gd = NULL;
blk_mq_free_tag_set(&info->tag_set);
} }
/* Already hold rinfo->ring_lock. */ /* Already hold rinfo->ring_lock. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment