Commit 24113d48 authored by Mikulas Patocka's avatar Mikulas Patocka Committed by Mike Snitzer

dm: avoid indirect call in __dm_make_request

Indirect calls are inefficient because of retpolines that are used for
spectre workaround. This patch replaces an indirect call with a condition
(that can be predicted by the branch predictor).
Signed-off-by: default avatarMikulas Patocka <mpatocka@redhat.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent cd19181b
...@@ -1696,10 +1696,7 @@ static blk_qc_t __process_bio(struct mapped_device *md, ...@@ -1696,10 +1696,7 @@ static blk_qc_t __process_bio(struct mapped_device *md,
return ret; return ret;
} }
typedef blk_qc_t (process_bio_fn)(struct mapped_device *, struct dm_table *, struct bio *); static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
static blk_qc_t __dm_make_request(struct request_queue *q, struct bio *bio,
process_bio_fn process_bio)
{ {
struct mapped_device *md = q->queuedata; struct mapped_device *md = q->queuedata;
blk_qc_t ret = BLK_QC_T_NONE; blk_qc_t ret = BLK_QC_T_NONE;
...@@ -1719,26 +1716,15 @@ static blk_qc_t __dm_make_request(struct request_queue *q, struct bio *bio, ...@@ -1719,26 +1716,15 @@ static blk_qc_t __dm_make_request(struct request_queue *q, struct bio *bio,
return ret; return ret;
} }
ret = process_bio(md, map, bio); if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
ret = __process_bio(md, map, bio);
else
ret = __split_and_process_bio(md, map, bio);
dm_put_live_table(md, srcu_idx); dm_put_live_table(md, srcu_idx);
return ret; return ret;
} }
/*
* The request function that remaps the bio to one target and
* splits off any remainder.
*/
static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
{
return __dm_make_request(q, bio, __split_and_process_bio);
}
static blk_qc_t dm_make_request_nvme(struct request_queue *q, struct bio *bio)
{
return __dm_make_request(q, bio, __process_bio);
}
static int dm_any_congested(void *congested_data, int bdi_bits) static int dm_any_congested(void *congested_data, int bdi_bits)
{ {
int r = bdi_bits; int r = bdi_bits;
...@@ -2229,12 +2215,9 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) ...@@ -2229,12 +2215,9 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
break; break;
case DM_TYPE_BIO_BASED: case DM_TYPE_BIO_BASED:
case DM_TYPE_DAX_BIO_BASED: case DM_TYPE_DAX_BIO_BASED:
dm_init_normal_md_queue(md);
blk_queue_make_request(md->queue, dm_make_request);
break;
case DM_TYPE_NVME_BIO_BASED: case DM_TYPE_NVME_BIO_BASED:
dm_init_normal_md_queue(md); dm_init_normal_md_queue(md);
blk_queue_make_request(md->queue, dm_make_request_nvme); blk_queue_make_request(md->queue, dm_make_request);
break; break;
case DM_TYPE_NONE: case DM_TYPE_NONE:
WARN_ON_ONCE(true); WARN_ON_ONCE(true);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment