Commit b18ec803 authored by Mike Galbraith's avatar Mike Galbraith Committed by Linus Torvalds

[PATCH] sched: improve migration accuracy

Co-opt rq->timestamp_last_tick to maintain a cache_hot_time evaluation
reference timestamp at both tick and sched times to prevent said reference,
formerly rq->timestamp_last_tick, from being behind task->last_ran at
evaluation time, and to move said reference closer to current time on the
remote processor, intent being to improve cache hot evaluation and
timestamp adjustment accuracy for task migration.

Fix minor sched_time double accounting error which occurs when a task
passing through schedule() does not schedule off, and takes the next timer
tick.

[kenneth.w.chen@intel.com: cleanup]
Signed-off-by: default avatarMike Galbraith <efault@gmx.de>
Acked-by: default avatarIngo Molnar <mingo@elte.hu>
Acked-by: default avatarKen Chen <kenneth.w.chen@intel.com>
Cc: Don Mullis <dwm@meer.net>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 08c183f3
...@@ -225,7 +225,8 @@ struct rq { ...@@ -225,7 +225,8 @@ struct rq {
unsigned long nr_uninterruptible; unsigned long nr_uninterruptible;
unsigned long expired_timestamp; unsigned long expired_timestamp;
unsigned long long timestamp_last_tick; /* Cached timestamp set by update_cpu_clock() */
unsigned long long most_recent_timestamp;
struct task_struct *curr, *idle; struct task_struct *curr, *idle;
unsigned long next_balance; unsigned long next_balance;
struct mm_struct *prev_mm; struct mm_struct *prev_mm;
...@@ -944,8 +945,8 @@ static void activate_task(struct task_struct *p, struct rq *rq, int local) ...@@ -944,8 +945,8 @@ static void activate_task(struct task_struct *p, struct rq *rq, int local)
if (!local) { if (!local) {
/* Compensate for drifting sched_clock */ /* Compensate for drifting sched_clock */
struct rq *this_rq = this_rq(); struct rq *this_rq = this_rq();
now = (now - this_rq->timestamp_last_tick) now = (now - this_rq->most_recent_timestamp)
+ rq->timestamp_last_tick; + rq->most_recent_timestamp;
} }
#endif #endif
...@@ -1689,8 +1690,8 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags) ...@@ -1689,8 +1690,8 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
* Not the local CPU - must adjust timestamp. This should * Not the local CPU - must adjust timestamp. This should
* get optimised away in the !CONFIG_SMP case. * get optimised away in the !CONFIG_SMP case.
*/ */
p->timestamp = (p->timestamp - this_rq->timestamp_last_tick) p->timestamp = (p->timestamp - this_rq->most_recent_timestamp)
+ rq->timestamp_last_tick; + rq->most_recent_timestamp;
__activate_task(p, rq); __activate_task(p, rq);
if (TASK_PREEMPTS_CURR(p, rq)) if (TASK_PREEMPTS_CURR(p, rq))
resched_task(rq->curr); resched_task(rq->curr);
...@@ -2068,8 +2069,8 @@ static void pull_task(struct rq *src_rq, struct prio_array *src_array, ...@@ -2068,8 +2069,8 @@ static void pull_task(struct rq *src_rq, struct prio_array *src_array,
set_task_cpu(p, this_cpu); set_task_cpu(p, this_cpu);
inc_nr_running(p, this_rq); inc_nr_running(p, this_rq);
enqueue_task(p, this_array); enqueue_task(p, this_array);
p->timestamp = (p->timestamp - src_rq->timestamp_last_tick) p->timestamp = (p->timestamp - src_rq->most_recent_timestamp)
+ this_rq->timestamp_last_tick; + this_rq->most_recent_timestamp;
/* /*
* Note that idle threads have a prio of MAX_PRIO, for this test * Note that idle threads have a prio of MAX_PRIO, for this test
* to be always true for them. * to be always true for them.
...@@ -2105,10 +2106,15 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, ...@@ -2105,10 +2106,15 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
* 2) too many balance attempts have failed. * 2) too many balance attempts have failed.
*/ */
if (sd->nr_balance_failed > sd->cache_nice_tries) if (sd->nr_balance_failed > sd->cache_nice_tries) {
#ifdef CONFIG_SCHEDSTATS
if (task_hot(p, rq->most_recent_timestamp, sd))
schedstat_inc(sd, lb_hot_gained[idle]);
#endif
return 1; return 1;
}
if (task_hot(p, rq->timestamp_last_tick, sd)) if (task_hot(p, rq->most_recent_timestamp, sd))
return 0; return 0;
return 1; return 1;
} }
...@@ -2206,11 +2212,6 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, ...@@ -2206,11 +2212,6 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
goto skip_bitmap; goto skip_bitmap;
} }
#ifdef CONFIG_SCHEDSTATS
if (task_hot(tmp, busiest->timestamp_last_tick, sd))
schedstat_inc(sd, lb_hot_gained[idle]);
#endif
pull_task(busiest, array, tmp, this_rq, dst_array, this_cpu); pull_task(busiest, array, tmp, this_rq, dst_array, this_cpu);
pulled++; pulled++;
rem_load_move -= tmp->load_weight; rem_load_move -= tmp->load_weight;
...@@ -2971,7 +2972,8 @@ EXPORT_PER_CPU_SYMBOL(kstat); ...@@ -2971,7 +2972,8 @@ EXPORT_PER_CPU_SYMBOL(kstat);
static inline void static inline void
update_cpu_clock(struct task_struct *p, struct rq *rq, unsigned long long now) update_cpu_clock(struct task_struct *p, struct rq *rq, unsigned long long now)
{ {
p->sched_time += now - max(p->timestamp, rq->timestamp_last_tick); p->sched_time += now - p->last_ran;
p->last_ran = rq->most_recent_timestamp = now;
} }
/* /*
...@@ -2984,8 +2986,7 @@ unsigned long long current_sched_time(const struct task_struct *p) ...@@ -2984,8 +2986,7 @@ unsigned long long current_sched_time(const struct task_struct *p)
unsigned long flags; unsigned long flags;
local_irq_save(flags); local_irq_save(flags);
ns = max(p->timestamp, task_rq(p)->timestamp_last_tick); ns = p->sched_time + sched_clock() - p->last_ran;
ns = p->sched_time + sched_clock() - ns;
local_irq_restore(flags); local_irq_restore(flags);
return ns; return ns;
...@@ -3176,8 +3177,6 @@ void scheduler_tick(void) ...@@ -3176,8 +3177,6 @@ void scheduler_tick(void)
update_cpu_clock(p, rq, now); update_cpu_clock(p, rq, now);
rq->timestamp_last_tick = now;
if (p == rq->idle) if (p == rq->idle)
/* Task on the idle queue */ /* Task on the idle queue */
wake_priority_sleeper(rq); wake_priority_sleeper(rq);
...@@ -5032,8 +5031,8 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) ...@@ -5032,8 +5031,8 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
* afterwards, and pretending it was a local activate. * afterwards, and pretending it was a local activate.
* This way is cleaner and logically correct. * This way is cleaner and logically correct.
*/ */
p->timestamp = p->timestamp - rq_src->timestamp_last_tick p->timestamp = p->timestamp - rq_src->most_recent_timestamp
+ rq_dest->timestamp_last_tick; + rq_dest->most_recent_timestamp;
deactivate_task(p, rq_src); deactivate_task(p, rq_src);
__activate_task(p, rq_dest); __activate_task(p, rq_dest);
if (TASK_PREEMPTS_CURR(p, rq_dest)) if (TASK_PREEMPTS_CURR(p, rq_dest))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment