Commit 3dde36dd authored by Corrado Zoccolo's avatar Corrado Zoccolo Committed by Jens Axboe

cfq-iosched: rework seeky detection

Current seeky detection is based on average seek lenght.
This is suboptimal, since the average will not distinguish between:
* a process doing medium sized seeks
* a process doing some sequential requests interleaved with larger seeks
and even a medium seek can take lot of time, if the requested sector
happens to be behind the disk head in the rotation (50% probability).

Therefore, we change the seeky queue detection to work as follows:
* each request can be classified as sequential if it is very close to
  the current head position, i.e. it is likely in the disk cache (disks
  usually read more data than requested, and put it in cache for
  subsequent reads). Otherwise, the request is classified as seeky.
* an history window of the last 32 requests is kept, storing the
  classification result.
* A queue is marked as seeky if more than 1/8 of the last 32 requests
  were seeky.

This patch fixes a regression reported by Yanmin, on mmap 64k random
reads.
Reported-by: default avatarYanmin Zhang <yanmin_zhang@linux.intel.com>
Signed-off-by: default avatarCorrado Zoccolo <czoccolo@gmail.com>
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent 6fc2de06
...@@ -46,8 +46,8 @@ static const int cfq_hist_divisor = 4; ...@@ -46,8 +46,8 @@ static const int cfq_hist_divisor = 4;
#define CFQ_HW_QUEUE_MIN (5) #define CFQ_HW_QUEUE_MIN (5)
#define CFQ_SERVICE_SHIFT 12 #define CFQ_SERVICE_SHIFT 12
#define CFQQ_SEEK_THR 8 * 1024 #define CFQQ_SEEK_THR (sector_t)(8 * 100)
#define CFQQ_SEEKY(cfqq) ((cfqq)->seek_mean > CFQQ_SEEK_THR) #define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8)
#define RQ_CIC(rq) \ #define RQ_CIC(rq) \
((struct cfq_io_context *) (rq)->elevator_private) ((struct cfq_io_context *) (rq)->elevator_private)
...@@ -132,9 +132,7 @@ struct cfq_queue { ...@@ -132,9 +132,7 @@ struct cfq_queue {
pid_t pid; pid_t pid;
unsigned int seek_samples; u32 seek_history;
u64 seek_total;
sector_t seek_mean;
sector_t last_request_pos; sector_t last_request_pos;
struct cfq_rb_root *service_tree; struct cfq_rb_root *service_tree;
...@@ -1668,16 +1666,7 @@ static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd, ...@@ -1668,16 +1666,7 @@ static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq, static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
struct request *rq, bool for_preempt) struct request *rq, bool for_preempt)
{ {
sector_t sdist = cfqq->seek_mean; return cfq_dist_from_last(cfqd, rq) <= CFQQ_SEEK_THR;
if (!sample_valid(cfqq->seek_samples))
sdist = CFQQ_SEEK_THR;
/* if seek_mean is big, using it as close criteria is meaningless */
if (sdist > CFQQ_SEEK_THR && !for_preempt)
sdist = CFQQ_SEEK_THR;
return cfq_dist_from_last(cfqd, rq) <= sdist;
} }
static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
...@@ -2975,30 +2964,16 @@ static void ...@@ -2975,30 +2964,16 @@ static void
cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq, cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
struct request *rq) struct request *rq)
{ {
sector_t sdist; sector_t sdist = 0;
u64 total; if (cfqq->last_request_pos) {
if (cfqq->last_request_pos < blk_rq_pos(rq))
if (!cfqq->last_request_pos) sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
sdist = 0; else
else if (cfqq->last_request_pos < blk_rq_pos(rq)) sdist = cfqq->last_request_pos - blk_rq_pos(rq);
sdist = blk_rq_pos(rq) - cfqq->last_request_pos; }
else
sdist = cfqq->last_request_pos - blk_rq_pos(rq);
/*
* Don't allow the seek distance to get too large from the
* odd fragment, pagein, etc
*/
if (cfqq->seek_samples <= 60) /* second&third seek */
sdist = min(sdist, (cfqq->seek_mean * 4) + 2*1024*1024);
else
sdist = min(sdist, (cfqq->seek_mean * 4) + 2*1024*64);
cfqq->seek_samples = (7*cfqq->seek_samples + 256) / 8; cfqq->seek_history <<= 1;
cfqq->seek_total = (7*cfqq->seek_total + (u64)256*sdist) / 8; cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
total = cfqq->seek_total + (cfqq->seek_samples/2);
do_div(total, cfqq->seek_samples);
cfqq->seek_mean = (sector_t)total;
} }
/* /*
...@@ -3023,8 +2998,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, ...@@ -3023,8 +2998,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfq_mark_cfqq_deep(cfqq); cfq_mark_cfqq_deep(cfqq);
if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
(!cfq_cfqq_deep(cfqq) && sample_valid(cfqq->seek_samples) (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
&& CFQQ_SEEKY(cfqq)))
enable_idle = 0; enable_idle = 0;
else if (sample_valid(cic->ttime_samples)) { else if (sample_valid(cic->ttime_samples)) {
if (cic->ttime_mean > cfqd->cfq_slice_idle) if (cic->ttime_mean > cfqd->cfq_slice_idle)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment