Commit 681cc5e8 authored by Mike Snitzer's avatar Mike Snitzer

dm: fix request-based DM to not bounce through indirect dm_submit_bio

It is unnecessary to force request-based DM to call into bio-based
dm_submit_bio (via indirect disk->fops->submit_bio) only to have it then
call blk_mq_submit_bio().

Fix this by establishing a request-based DM block_device_operations
(dm_rq_blk_dops, which doesn't have .submit_bio) and update
dm_setup_md_queue() to set md->disk->fops to it for
DM_TYPE_REQUEST_BASED.

Remove DM_TYPE_REQUEST_BASED conditional in dm_submit_bio and unexport
blk_mq_submit_bio.

Fixes: c62b37d9 ("block: move ->make_request_fn to struct block_device_operations")
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent 9c37de29
...@@ -2265,7 +2265,6 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) ...@@ -2265,7 +2265,6 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
blk_queue_exit(q); blk_queue_exit(q);
return BLK_QC_T_NONE; return BLK_QC_T_NONE;
} }
EXPORT_SYMBOL_GPL(blk_mq_submit_bio); /* only for request based dm */
void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
unsigned int hctx_idx) unsigned int hctx_idx)
......
...@@ -1633,18 +1633,6 @@ static blk_qc_t dm_submit_bio(struct bio *bio) ...@@ -1633,18 +1633,6 @@ static blk_qc_t dm_submit_bio(struct bio *bio)
int srcu_idx; int srcu_idx;
struct dm_table *map; struct dm_table *map;
if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) {
/*
* We are called with a live reference on q_usage_counter, but
* that one will be released as soon as we return. Grab an
* extra one as blk_mq_submit_bio expects to be able to consume
* a reference (which lives until the request is freed in case a
* request is allocated).
*/
percpu_ref_get(&bio->bi_disk->queue->q_usage_counter);
return blk_mq_submit_bio(bio);
}
map = dm_get_live_table(md, &srcu_idx); map = dm_get_live_table(md, &srcu_idx);
if (unlikely(!map)) { if (unlikely(!map)) {
DMERR_LIMIT("%s: mapping table unavailable, erroring io", DMERR_LIMIT("%s: mapping table unavailable, erroring io",
...@@ -1727,6 +1715,7 @@ static int next_free_minor(int *minor) ...@@ -1727,6 +1715,7 @@ static int next_free_minor(int *minor)
} }
static const struct block_device_operations dm_blk_dops; static const struct block_device_operations dm_blk_dops;
static const struct block_device_operations dm_rq_blk_dops;
static const struct dax_operations dm_dax_ops; static const struct dax_operations dm_dax_ops;
static void dm_wq_work(struct work_struct *work); static void dm_wq_work(struct work_struct *work);
...@@ -2113,9 +2102,10 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) ...@@ -2113,9 +2102,10 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
switch (type) { switch (type) {
case DM_TYPE_REQUEST_BASED: case DM_TYPE_REQUEST_BASED:
md->disk->fops = &dm_rq_blk_dops;
r = dm_mq_init_request_queue(md, t); r = dm_mq_init_request_queue(md, t);
if (r) { if (r) {
DMERR("Cannot initialize queue for request-based dm-mq mapped device"); DMERR("Cannot initialize queue for request-based dm mapped device");
return r; return r;
} }
break; break;
...@@ -3095,6 +3085,15 @@ static const struct block_device_operations dm_blk_dops = { ...@@ -3095,6 +3085,15 @@ static const struct block_device_operations dm_blk_dops = {
.owner = THIS_MODULE .owner = THIS_MODULE
}; };
static const struct block_device_operations dm_rq_blk_dops = {
.open = dm_blk_open,
.release = dm_blk_close,
.ioctl = dm_blk_ioctl,
.getgeo = dm_blk_getgeo,
.pr_ops = &dm_pr_ops,
.owner = THIS_MODULE
};
static const struct dax_operations dm_dax_ops = { static const struct dax_operations dm_dax_ops = {
.direct_access = dm_dax_direct_access, .direct_access = dm_dax_direct_access,
.dax_supported = dm_dax_supported, .dax_supported = dm_dax_supported,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment