Commit a68bbddb authored by Jens Axboe's avatar Jens Axboe

block: add queue flag for SSD/non-rotational devices

We don't want to idle in AS/CFQ if the device doesn't have a seek
penalty. So add a QUEUE_FLAG_NONROT to indicate a non-rotational
device, low level drivers should set this flag upon discovery of
an SSD or similar device type.
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent 9e49184c
...@@ -745,6 +745,12 @@ static int as_can_break_anticipation(struct as_data *ad, struct request *rq) ...@@ -745,6 +745,12 @@ static int as_can_break_anticipation(struct as_data *ad, struct request *rq)
*/ */
static int as_can_anticipate(struct as_data *ad, struct request *rq) static int as_can_anticipate(struct as_data *ad, struct request *rq)
{ {
/*
* SSD device without seek penalty, disable idling
*/
if (blk_queue_nonrot(ad->q))
return 0;
if (!ad->io_context) if (!ad->io_context)
/* /*
* Last request submitted was a write * Last request submitted was a write
......
...@@ -878,6 +878,12 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) ...@@ -878,6 +878,12 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
struct cfq_io_context *cic; struct cfq_io_context *cic;
unsigned long sl; unsigned long sl;
/*
* SSD device without seek penalty, disable idling
*/
if (blk_queue_nonrot(cfqd->queue))
return;
WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list)); WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
WARN_ON(cfq_cfqq_slice_new(cfqq)); WARN_ON(cfq_cfqq_slice_new(cfqq));
......
...@@ -442,6 +442,7 @@ struct request_queue ...@@ -442,6 +442,7 @@ struct request_queue
#define QUEUE_FLAG_SAME_COMP 11 /* force complete on same CPU */ #define QUEUE_FLAG_SAME_COMP 11 /* force complete on same CPU */
#define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */ #define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */
#define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */ #define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */
#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */
static inline int queue_is_locked(struct request_queue *q) static inline int queue_is_locked(struct request_queue *q)
{ {
...@@ -547,6 +548,7 @@ enum { ...@@ -547,6 +548,7 @@ enum {
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
#define blk_queue_flushing(q) ((q)->ordseq) #define blk_queue_flushing(q) ((q)->ordseq)
#define blk_queue_stackable(q) \ #define blk_queue_stackable(q) \
test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment