Commit a8a51d5e authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched: persistent average load per task

Remove the fall-back to SCHED_LOAD_SCALE by remembering the previous value of
cpu_avg_load_per_task() - this is useful because of the hierarchical group
model in which task weight can be much smaller.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 039a1c41
...@@ -554,6 +554,8 @@ struct rq { ...@@ -554,6 +554,8 @@ struct rq {
int cpu; int cpu;
int online; int online;
unsigned long avg_load_per_task;
struct task_struct *migration_thread; struct task_struct *migration_thread;
struct list_head migration_queue; struct list_head migration_queue;
#endif #endif
...@@ -1427,9 +1429,18 @@ static inline void dec_cpu_load(struct rq *rq, unsigned long load) ...@@ -1427,9 +1429,18 @@ static inline void dec_cpu_load(struct rq *rq, unsigned long load)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static unsigned long source_load(int cpu, int type); static unsigned long source_load(int cpu, int type);
static unsigned long target_load(int cpu, int type); static unsigned long target_load(int cpu, int type);
static unsigned long cpu_avg_load_per_task(int cpu);
static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
static unsigned long cpu_avg_load_per_task(int cpu)
{
struct rq *rq = cpu_rq(cpu);
if (rq->nr_running)
rq->avg_load_per_task = rq->load.weight / rq->nr_running;
return rq->avg_load_per_task;
}
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
typedef void (*tg_visitor)(struct task_group *, int, struct sched_domain *); typedef void (*tg_visitor)(struct task_group *, int, struct sched_domain *);
...@@ -2010,18 +2021,6 @@ static unsigned long target_load(int cpu, int type) ...@@ -2010,18 +2021,6 @@ static unsigned long target_load(int cpu, int type)
return max(rq->cpu_load[type-1], total); return max(rq->cpu_load[type-1], total);
} }
/*
* Return the average load per task on the cpu's run queue
*/
static unsigned long cpu_avg_load_per_task(int cpu)
{
struct rq *rq = cpu_rq(cpu);
unsigned long total = weighted_cpuload(cpu);
unsigned long n = rq->nr_running;
return n ? total / n : SCHED_LOAD_SCALE;
}
/* /*
* find_idlest_group finds and returns the least busy CPU group within the * find_idlest_group finds and returns the least busy CPU group within the
* domain. * domain.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment