Commit afe06efd authored by Tim Chen's avatar Tim Chen Committed by Thomas Gleixner

sched: Extend scheduler's asym packing

We generalize the scheduler's asym packing to provide an ordering
of the cpu beyond just the cpu number.  This allows the use of the
ASYM_PACKING scheduler machinery to move loads to preferred CPU in a
sched domain. The preference is defined with the cpu priority
given by arch_asym_cpu_priority(cpu).

We also record the most preferred cpu in a sched group when
we build the cpu's capacity for fast lookup of preferred cpu
during load balancing.
Co-developed-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarTim Chen <tim.c.chen@linux.intel.com>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: linux-pm@vger.kernel.org
Cc: jolsa@redhat.com
Cc: rjw@rjwysocki.net
Cc: linux-acpi@vger.kernel.org
Cc: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
Cc: bp@suse.de
Link: http://lkml.kernel.org/r/0e73ae12737dfaafa46c07066cc7c5d3f1675e46.1479844244.git.tim.c.chen@linux.intel.comSigned-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 2b4d5b25
...@@ -1077,6 +1077,8 @@ static inline int cpu_numa_flags(void) ...@@ -1077,6 +1077,8 @@ static inline int cpu_numa_flags(void)
} }
#endif #endif
extern int arch_asym_cpu_priority(int cpu);
struct sched_domain_attr { struct sched_domain_attr {
int relax_domain_level; int relax_domain_level;
}; };
......
...@@ -6303,7 +6303,22 @@ static void init_sched_groups_capacity(int cpu, struct sched_domain *sd) ...@@ -6303,7 +6303,22 @@ static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
WARN_ON(!sg); WARN_ON(!sg);
do { do {
int cpu, max_cpu = -1;
sg->group_weight = cpumask_weight(sched_group_cpus(sg)); sg->group_weight = cpumask_weight(sched_group_cpus(sg));
if (!(sd->flags & SD_ASYM_PACKING))
goto next;
for_each_cpu(cpu, sched_group_cpus(sg)) {
if (max_cpu < 0)
max_cpu = cpu;
else if (sched_asym_prefer(cpu, max_cpu))
max_cpu = cpu;
}
sg->asym_prefer_cpu = max_cpu;
next:
sg = sg->next; sg = sg->next;
} while (sg != sd->groups); } while (sg != sd->groups);
......
...@@ -97,6 +97,16 @@ unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL; ...@@ -97,6 +97,16 @@ unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
const_debug unsigned int sysctl_sched_migration_cost = 500000UL; const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
#ifdef CONFIG_SMP
/*
* For asym packing, by default the lower numbered cpu has higher priority.
*/
int __weak arch_asym_cpu_priority(int cpu)
{
return -cpu;
}
#endif
#ifdef CONFIG_CFS_BANDWIDTH #ifdef CONFIG_CFS_BANDWIDTH
/* /*
* Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
...@@ -7388,16 +7398,18 @@ static bool update_sd_pick_busiest(struct lb_env *env, ...@@ -7388,16 +7398,18 @@ static bool update_sd_pick_busiest(struct lb_env *env,
if (env->idle == CPU_NOT_IDLE) if (env->idle == CPU_NOT_IDLE)
return true; return true;
/* /*
* ASYM_PACKING needs to move all the work to the lowest * ASYM_PACKING needs to move all the work to the highest
* numbered CPUs in the group, therefore mark all groups * prority CPUs in the group, therefore mark all groups
* higher than ourself as busy. * of lower priority than ourself as busy.
*/ */
if (sgs->sum_nr_running && env->dst_cpu < group_first_cpu(sg)) { if (sgs->sum_nr_running &&
sched_asym_prefer(env->dst_cpu, sg->asym_prefer_cpu)) {
if (!sds->busiest) if (!sds->busiest)
return true; return true;
/* Prefer to move from highest possible cpu's work */ /* Prefer to move from lowest priority cpu's work */
if (group_first_cpu(sds->busiest) < group_first_cpu(sg)) if (sched_asym_prefer(sds->busiest->asym_prefer_cpu,
sg->asym_prefer_cpu))
return true; return true;
} }
...@@ -7549,8 +7561,8 @@ static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds) ...@@ -7549,8 +7561,8 @@ static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
if (!sds->busiest) if (!sds->busiest)
return 0; return 0;
busiest_cpu = group_first_cpu(sds->busiest); busiest_cpu = sds->busiest->asym_prefer_cpu;
if (env->dst_cpu > busiest_cpu) if (sched_asym_prefer(busiest_cpu, env->dst_cpu))
return 0; return 0;
env->imbalance = DIV_ROUND_CLOSEST( env->imbalance = DIV_ROUND_CLOSEST(
...@@ -7888,10 +7900,11 @@ static int need_active_balance(struct lb_env *env) ...@@ -7888,10 +7900,11 @@ static int need_active_balance(struct lb_env *env)
/* /*
* ASYM_PACKING needs to force migrate tasks from busy but * ASYM_PACKING needs to force migrate tasks from busy but
* higher numbered CPUs in order to pack all tasks in the * lower priority CPUs in order to pack all tasks in the
* lowest numbered CPUs. * highest priority CPUs.
*/ */
if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu) if ((sd->flags & SD_ASYM_PACKING) &&
sched_asym_prefer(env->dst_cpu, env->src_cpu))
return 1; return 1;
} }
...@@ -8740,7 +8753,7 @@ static inline bool nohz_kick_needed(struct rq *rq) ...@@ -8740,7 +8753,7 @@ static inline bool nohz_kick_needed(struct rq *rq)
unsigned long now = jiffies; unsigned long now = jiffies;
struct sched_domain_shared *sds; struct sched_domain_shared *sds;
struct sched_domain *sd; struct sched_domain *sd;
int nr_busy, cpu = rq->cpu; int nr_busy, i, cpu = rq->cpu;
bool kick = false; bool kick = false;
if (unlikely(rq->idle_balance)) if (unlikely(rq->idle_balance))
...@@ -8791,12 +8804,18 @@ static inline bool nohz_kick_needed(struct rq *rq) ...@@ -8791,12 +8804,18 @@ static inline bool nohz_kick_needed(struct rq *rq)
} }
sd = rcu_dereference(per_cpu(sd_asym, cpu)); sd = rcu_dereference(per_cpu(sd_asym, cpu));
if (sd && (cpumask_first_and(nohz.idle_cpus_mask, if (sd) {
sched_domain_span(sd)) < cpu)) { for_each_cpu(i, sched_domain_span(sd)) {
kick = true; if (i == cpu ||
goto unlock; !cpumask_test_cpu(i, nohz.idle_cpus_mask))
} continue;
if (sched_asym_prefer(i, cpu)) {
kick = true;
goto unlock;
}
}
}
unlock: unlock:
rcu_read_unlock(); rcu_read_unlock();
return kick; return kick;
......
...@@ -540,6 +540,11 @@ struct dl_rq { ...@@ -540,6 +540,11 @@ struct dl_rq {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static inline bool sched_asym_prefer(int a, int b)
{
return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b);
}
/* /*
* We add the notion of a root-domain which will be used to define per-domain * We add the notion of a root-domain which will be used to define per-domain
* variables. Each exclusive cpuset essentially defines an island domain by * variables. Each exclusive cpuset essentially defines an island domain by
...@@ -908,6 +913,7 @@ struct sched_group { ...@@ -908,6 +913,7 @@ struct sched_group {
unsigned int group_weight; unsigned int group_weight;
struct sched_group_capacity *sgc; struct sched_group_capacity *sgc;
int asym_prefer_cpu; /* cpu of highest priority in group */
/* /*
* The CPUs this group covers. * The CPUs this group covers.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment