Commit ef9e3fac authored by Kiyoshi Ueda's avatar Kiyoshi Ueda Committed by Jens Axboe

block: add lld busy state exporting interface

This patch adds an new interface, blk_lld_busy(), to check lld's
busy state from the block layer.
blk_lld_busy() calls down into low-level drivers for the checking
if the drivers set q->lld_busy_fn() using blk_queue_lld_busy().

This resolves a performance problem on request stacking devices below.

Some drivers like scsi mid layer stop dispatching request when
they detect busy state on its low-level device like host/target/device.
It allows other requests to stay in the I/O scheduler's queue
for a chance of merging.

Request stacking drivers like request-based dm should follow
the same logic.
However, there is no generic interface for the stacked device
to check if the underlying device(s) are busy.
If the request stacking driver dispatches and submits requests to
the busy underlying device, the requests will stay in
the underlying device's queue without a chance of merging.
This causes performance problem on burst I/O load.

With this patch, busy state of the underlying device is exported
via q->lld_busy_fn().  So the request stacking driver can check it
and stop dispatching requests if busy.

The underlying device driver must return the busy state appropriately:
    1: when the device driver can't process requests immediately.
    0: when the device driver can process requests immediately,
       including abnormal situations where the device driver needs
       to kill all requests.
Signed-off-by: default avatarKiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: default avatarJun'ichi Nomura <j-nomura@ce.jp.nec.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent 336c3d8c
...@@ -2100,6 +2100,34 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, ...@@ -2100,6 +2100,34 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
rq->rq_disk = bio->bi_bdev->bd_disk; rq->rq_disk = bio->bi_bdev->bd_disk;
} }
/**
* blk_lld_busy - Check if underlying low-level drivers of a device are busy
* @q : the queue of the device being checked
*
* Description:
* Check if underlying low-level drivers of a device are busy.
* If the drivers want to export their busy state, they must set own
* exporting function using blk_queue_lld_busy() first.
*
* Basically, this function is used only by request stacking drivers
* to stop dispatching requests to underlying devices when underlying
* devices are busy. This behavior helps more I/O merging on the queue
* of the request stacking driver and prevents I/O throughput regression
* on burst I/O load.
*
* Return:
* 0 - Not busy (The request stacking driver should dispatch request)
* 1 - Busy (The request stacking driver should stop dispatching request)
*/
int blk_lld_busy(struct request_queue *q)
{
if (q->lld_busy_fn)
return q->lld_busy_fn(q);
return 0;
}
EXPORT_SYMBOL_GPL(blk_lld_busy);
int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
{ {
return queue_work(kblockd_workqueue, work); return queue_work(kblockd_workqueue, work);
......
...@@ -89,6 +89,12 @@ void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn) ...@@ -89,6 +89,12 @@ void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
} }
EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out); EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
{
q->lld_busy_fn = fn;
}
EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
/** /**
* blk_queue_make_request - define an alternate make_request function for a device * blk_queue_make_request - define an alternate make_request function for a device
* @q: the request queue for the device to be affected * @q: the request queue for the device to be affected
......
...@@ -269,6 +269,7 @@ typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, ...@@ -269,6 +269,7 @@ typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *,
typedef void (prepare_flush_fn) (struct request_queue *, struct request *); typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
typedef void (softirq_done_fn)(struct request *); typedef void (softirq_done_fn)(struct request *);
typedef int (dma_drain_needed_fn)(struct request *); typedef int (dma_drain_needed_fn)(struct request *);
typedef int (lld_busy_fn) (struct request_queue *q);
enum blk_eh_timer_return { enum blk_eh_timer_return {
BLK_EH_NOT_HANDLED, BLK_EH_NOT_HANDLED,
...@@ -325,6 +326,7 @@ struct request_queue ...@@ -325,6 +326,7 @@ struct request_queue
softirq_done_fn *softirq_done_fn; softirq_done_fn *softirq_done_fn;
rq_timed_out_fn *rq_timed_out_fn; rq_timed_out_fn *rq_timed_out_fn;
dma_drain_needed_fn *dma_drain_needed; dma_drain_needed_fn *dma_drain_needed;
lld_busy_fn *lld_busy_fn;
/* /*
* Dispatch queue sorting * Dispatch queue sorting
...@@ -699,6 +701,7 @@ extern struct request *blk_get_request(struct request_queue *, int, gfp_t); ...@@ -699,6 +701,7 @@ extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
extern void blk_insert_request(struct request_queue *, struct request *, int, void *); extern void blk_insert_request(struct request_queue *, struct request *, int, void *);
extern void blk_requeue_request(struct request_queue *, struct request *); extern void blk_requeue_request(struct request_queue *, struct request *);
extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
extern int blk_lld_busy(struct request_queue *q);
extern int blk_insert_cloned_request(struct request_queue *q, extern int blk_insert_cloned_request(struct request_queue *q,
struct request *rq); struct request *rq);
extern void blk_plug_device(struct request_queue *); extern void blk_plug_device(struct request_queue *);
...@@ -835,6 +838,7 @@ extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); ...@@ -835,6 +838,7 @@ extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
extern int blk_queue_dma_drain(struct request_queue *q, extern int blk_queue_dma_drain(struct request_queue *q,
dma_drain_needed_fn *dma_drain_needed, dma_drain_needed_fn *dma_drain_needed,
void *buf, unsigned int size); void *buf, unsigned int size);
extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment