Commit 9a0e609e authored by Mike Snitzer's avatar Mike Snitzer

dm: only run the queue on completion if congested or no requests pending

On really fast storage it can be beneficial to delay running the
request_queue to allow the elevator more opportunity to merge requests.

Otherwise, it has been observed that requests are being sent to
q->request_fn much quicker than is ideal on IOPS-bound backends.
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent ff36ab34
...@@ -1024,10 +1024,13 @@ static void end_clone_bio(struct bio *clone, int error) ...@@ -1024,10 +1024,13 @@ static void end_clone_bio(struct bio *clone, int error)
*/ */
static void rq_completed(struct mapped_device *md, int rw, bool run_queue) static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
{ {
int nr_requests_pending;
atomic_dec(&md->pending[rw]); atomic_dec(&md->pending[rw]);
/* nudge anyone waiting on suspend queue */ /* nudge anyone waiting on suspend queue */
if (!md_in_flight(md)) nr_requests_pending = md_in_flight(md);
if (!nr_requests_pending)
wake_up(&md->wait); wake_up(&md->wait);
/* /*
...@@ -1036,8 +1039,11 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue) ...@@ -1036,8 +1039,11 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
* back into ->request_fn() could deadlock attempting to grab the * back into ->request_fn() could deadlock attempting to grab the
* queue lock again. * queue lock again.
*/ */
if (run_queue) if (run_queue) {
blk_run_queue_async(md->queue); if (!nr_requests_pending ||
(nr_requests_pending >= md->queue->nr_congestion_on))
blk_run_queue_async(md->queue);
}
/* /*
* dm_put() must be at the end of this function. See the comment above * dm_put() must be at the end of this function. See the comment above
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment