Commit c91852ff authored by Mike Snitzer's avatar Mike Snitzer

dm: optimize dm_request_fn()

DM multipath is the only request-based DM target -- which only supports
tables with a single target that is immutable.  Leverage this fact in
dm_request_fn().
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent 16f12266
...@@ -2071,12 +2071,18 @@ static bool dm_request_peeked_before_merge_deadline(struct mapped_device *md) ...@@ -2071,12 +2071,18 @@ static bool dm_request_peeked_before_merge_deadline(struct mapped_device *md)
static void dm_request_fn(struct request_queue *q) static void dm_request_fn(struct request_queue *q)
{ {
struct mapped_device *md = q->queuedata; struct mapped_device *md = q->queuedata;
int srcu_idx; struct dm_target *ti = md->immutable_target;
struct dm_table *map = dm_get_live_table(md, &srcu_idx);
struct dm_target *ti;
struct request *rq; struct request *rq;
struct dm_rq_target_io *tio; struct dm_rq_target_io *tio;
sector_t pos; sector_t pos = 0;
if (unlikely(!ti)) {
int srcu_idx;
struct dm_table *map = dm_get_live_table(md, &srcu_idx);
ti = dm_table_find_target(map, pos);
dm_put_live_table(md, srcu_idx);
}
/* /*
* For suspend, check blk_queue_stopped() and increment * For suspend, check blk_queue_stopped() and increment
...@@ -2087,33 +2093,21 @@ static void dm_request_fn(struct request_queue *q) ...@@ -2087,33 +2093,21 @@ static void dm_request_fn(struct request_queue *q)
while (!blk_queue_stopped(q)) { while (!blk_queue_stopped(q)) {
rq = blk_peek_request(q); rq = blk_peek_request(q);
if (!rq) if (!rq)
goto out; return;
/* always use block 0 to find the target for flushes for now */ /* always use block 0 to find the target for flushes for now */
pos = 0; pos = 0;
if (!(rq->cmd_flags & REQ_FLUSH)) if (!(rq->cmd_flags & REQ_FLUSH))
pos = blk_rq_pos(rq); pos = blk_rq_pos(rq);
ti = dm_table_find_target(map, pos); if ((dm_request_peeked_before_merge_deadline(md) &&
if (!dm_target_is_valid(ti)) { md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 &&
/* md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) ||
* Must perform setup, that rq_completed() requires, (ti->type->busy && ti->type->busy(ti))) {
* before calling dm_kill_unmapped_request blk_delay_queue(q, HZ / 100);
*/ return;
DMERR_LIMIT("request attempted access beyond the end of device");
dm_start_request(md, rq);
dm_kill_unmapped_request(rq, -EIO);
continue;
} }
if (dm_request_peeked_before_merge_deadline(md) &&
md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 &&
md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq))
goto delay_and_out;
if (ti->type->busy && ti->type->busy(ti))
goto delay_and_out;
dm_start_request(md, rq); dm_start_request(md, rq);
tio = tio_from_request(rq); tio = tio_from_request(rq);
...@@ -2122,13 +2116,6 @@ static void dm_request_fn(struct request_queue *q) ...@@ -2122,13 +2116,6 @@ static void dm_request_fn(struct request_queue *q)
queue_kthread_work(&md->kworker, &tio->work); queue_kthread_work(&md->kworker, &tio->work);
BUG_ON(!irqs_disabled()); BUG_ON(!irqs_disabled());
} }
goto out;
delay_and_out:
blk_delay_queue(q, HZ / 100);
out:
dm_put_live_table(md, srcu_idx);
} }
static int dm_any_congested(void *congested_data, int bdi_bits) static int dm_any_congested(void *congested_data, int bdi_bits)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment