Commit 63b2ca30 authored by Nicolas Pitre's avatar Nicolas Pitre Committed by Ingo Molnar

sched: Let 'struct sched_group_power' care about CPU capacity

It is better not to think about compute capacity as being equivalent
to "CPU power".  The upcoming "power aware" scheduler work may create
confusion with the notion of energy consumption if "power" is used too
liberally.

Since struct sched_group_power is really about compute capacity of sched
groups, let's rename it to struct sched_group_capacity. Similarly sgp
becomes sgc. Related variables and functions dealing with groups are also
adjusted accordingly.
Signed-off-by: default avatarNicolas Pitre <nico@linaro.org>
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Cc: Daniel Lezcano <daniel.lezcano@linaro.org>
Cc: Morten Rasmussen <morten.rasmussen@arm.com>
Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
Cc: linaro-kernel@lists.linaro.org
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: linux-kernel@vger.kernel.org
Link: http://lkml.kernel.org/n/tip-5yeix833vvgf2uyj5o36hpu9@git.kernel.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 0fedc6c8
......@@ -1013,7 +1013,7 @@ typedef const int (*sched_domain_flags_f)(void);
struct sd_data {
struct sched_domain **__percpu sd;
struct sched_group **__percpu sg;
struct sched_group_power **__percpu sgp;
struct sched_group_capacity **__percpu sgc;
};
struct sched_domain_topology_level {
......
......@@ -5221,14 +5221,13 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
}
/*
* Even though we initialize ->power to something semi-sane,
* we leave power_orig unset. This allows us to detect if
* Even though we initialize ->capacity to something semi-sane,
* we leave capacity_orig unset. This allows us to detect if
* domain iteration is still funny without causing /0 traps.
*/
if (!group->sgp->power_orig) {
if (!group->sgc->capacity_orig) {
printk(KERN_CONT "\n");
printk(KERN_ERR "ERROR: domain->cpu_power not "
"set\n");
printk(KERN_ERR "ERROR: domain->cpu_capacity not set\n");
break;
}
......@@ -5250,9 +5249,9 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
printk(KERN_CONT " %s", str);
if (group->sgp->power != SCHED_POWER_SCALE) {
printk(KERN_CONT " (cpu_power = %d)",
group->sgp->power);
if (group->sgc->capacity != SCHED_POWER_SCALE) {
printk(KERN_CONT " (cpu_capacity = %d)",
group->sgc->capacity);
}
group = group->next;
......@@ -5466,7 +5465,7 @@ static struct root_domain *alloc_rootdomain(void)
return rd;
}
static void free_sched_groups(struct sched_group *sg, int free_sgp)
static void free_sched_groups(struct sched_group *sg, int free_sgc)
{
struct sched_group *tmp, *first;
......@@ -5477,8 +5476,8 @@ static void free_sched_groups(struct sched_group *sg, int free_sgp)
do {
tmp = sg->next;
if (free_sgp && atomic_dec_and_test(&sg->sgp->ref))
kfree(sg->sgp);
if (free_sgc && atomic_dec_and_test(&sg->sgc->ref))
kfree(sg->sgc);
kfree(sg);
sg = tmp;
......@@ -5496,7 +5495,7 @@ static void free_sched_domain(struct rcu_head *rcu)
if (sd->flags & SD_OVERLAP) {
free_sched_groups(sd->groups, 1);
} else if (atomic_dec_and_test(&sd->groups->ref)) {
kfree(sd->groups->sgp);
kfree(sd->groups->sgc);
kfree(sd->groups);
}
kfree(sd);
......@@ -5707,17 +5706,17 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
cpumask_or(covered, covered, sg_span);
sg->sgp = *per_cpu_ptr(sdd->sgp, i);
if (atomic_inc_return(&sg->sgp->ref) == 1)
sg->sgc = *per_cpu_ptr(sdd->sgc, i);
if (atomic_inc_return(&sg->sgc->ref) == 1)
build_group_mask(sd, sg);
/*
* Initialize sgp->power such that even if we mess up the
* Initialize sgc->capacity such that even if we mess up the
* domains and no possible iteration will get us here, we won't
* die on a /0 trap.
*/
sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span);
sg->sgp->power_orig = sg->sgp->power;
sg->sgc->capacity = SCHED_POWER_SCALE * cpumask_weight(sg_span);
sg->sgc->capacity_orig = sg->sgc->capacity;
/*
* Make sure the first group of this domain contains the
......@@ -5755,8 +5754,8 @@ static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
if (sg) {
*sg = *per_cpu_ptr(sdd->sg, cpu);
(*sg)->sgp = *per_cpu_ptr(sdd->sgp, cpu);
atomic_set(&(*sg)->sgp->ref, 1); /* for claim_allocations */
(*sg)->sgc = *per_cpu_ptr(sdd->sgc, cpu);
atomic_set(&(*sg)->sgc->ref, 1); /* for claim_allocations */
}
return cpu;
......@@ -5819,16 +5818,16 @@ build_sched_groups(struct sched_domain *sd, int cpu)
}
/*
* Initialize sched groups cpu_power.
* Initialize sched groups cpu_capacity.
*
* cpu_power indicates the capacity of sched group, which is used while
* cpu_capacity indicates the capacity of sched group, which is used while
* distributing the load between different sched groups in a sched domain.
* Typically cpu_power for all the groups in a sched domain will be same unless
* there are asymmetries in the topology. If there are asymmetries, group
* having more cpu_power will pickup more load compared to the group having
* less cpu_power.
* Typically cpu_capacity for all the groups in a sched domain will be same
* unless there are asymmetries in the topology. If there are asymmetries,
* group having more cpu_capacity will pickup more load compared to the
* group having less cpu_capacity.
*/
static void init_sched_groups_power(int cpu, struct sched_domain *sd)
static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
{
struct sched_group *sg = sd->groups;
......@@ -5842,8 +5841,8 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
if (cpu != group_balance_cpu(sg))
return;
update_group_power(sd, cpu);
atomic_set(&sg->sgp->nr_busy_cpus, sg->group_weight);
update_group_capacity(sd, cpu);
atomic_set(&sg->sgc->nr_busy_cpus, sg->group_weight);
}
/*
......@@ -5934,8 +5933,8 @@ static void claim_allocations(int cpu, struct sched_domain *sd)
if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
*per_cpu_ptr(sdd->sg, cpu) = NULL;
if (atomic_read(&(*per_cpu_ptr(sdd->sgp, cpu))->ref))
*per_cpu_ptr(sdd->sgp, cpu) = NULL;
if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref))
*per_cpu_ptr(sdd->sgc, cpu) = NULL;
}
#ifdef CONFIG_NUMA
......@@ -6337,14 +6336,14 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
if (!sdd->sg)
return -ENOMEM;
sdd->sgp = alloc_percpu(struct sched_group_power *);
if (!sdd->sgp)
sdd->sgc = alloc_percpu(struct sched_group_capacity *);
if (!sdd->sgc)
return -ENOMEM;
for_each_cpu(j, cpu_map) {
struct sched_domain *sd;
struct sched_group *sg;
struct sched_group_power *sgp;
struct sched_group_capacity *sgc;
sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
GFP_KERNEL, cpu_to_node(j));
......@@ -6362,12 +6361,12 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
*per_cpu_ptr(sdd->sg, j) = sg;
sgp = kzalloc_node(sizeof(struct sched_group_power) + cpumask_size(),
sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(),
GFP_KERNEL, cpu_to_node(j));
if (!sgp)
if (!sgc)
return -ENOMEM;
*per_cpu_ptr(sdd->sgp, j) = sgp;
*per_cpu_ptr(sdd->sgc, j) = sgc;
}
}
......@@ -6394,15 +6393,15 @@ static void __sdt_free(const struct cpumask *cpu_map)
if (sdd->sg)
kfree(*per_cpu_ptr(sdd->sg, j));
if (sdd->sgp)
kfree(*per_cpu_ptr(sdd->sgp, j));
if (sdd->sgc)
kfree(*per_cpu_ptr(sdd->sgc, j));
}
free_percpu(sdd->sd);
sdd->sd = NULL;
free_percpu(sdd->sg);
sdd->sg = NULL;
free_percpu(sdd->sgp);
sdd->sgp = NULL;
free_percpu(sdd->sgc);
sdd->sgc = NULL;
}
}
......@@ -6479,7 +6478,7 @@ static int build_sched_domains(const struct cpumask *cpu_map,
for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
claim_allocations(i, sd);
init_sched_groups_power(i, sd);
init_sched_groups_capacity(i, sd);
}
}
......
This diff is collapsed.
......@@ -728,15 +728,15 @@ DECLARE_PER_CPU(struct sched_domain *, sd_numa);
DECLARE_PER_CPU(struct sched_domain *, sd_busy);
DECLARE_PER_CPU(struct sched_domain *, sd_asym);
struct sched_group_power {
struct sched_group_capacity {
atomic_t ref;
/*
* CPU power of this group, SCHED_LOAD_SCALE being max power for a
* single CPU.
* CPU capacity of this group, SCHED_LOAD_SCALE being max capacity
* for a single CPU.
*/
unsigned int power, power_orig;
unsigned int capacity, capacity_orig;
unsigned long next_update;
int imbalance; /* XXX unrelated to power but shared group state */
int imbalance; /* XXX unrelated to capacity but shared group state */
/*
* Number of busy cpus in this group.
*/
......@@ -750,7 +750,7 @@ struct sched_group {
atomic_t ref;
unsigned int group_weight;
struct sched_group_power *sgp;
struct sched_group_capacity *sgc;
/*
* The CPUs this group covers.
......@@ -773,7 +773,7 @@ static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
*/
static inline struct cpumask *sched_group_mask(struct sched_group *sg)
{
return to_cpumask(sg->sgp->cpumask);
return to_cpumask(sg->sgc->cpumask);
}
/**
......@@ -1167,7 +1167,7 @@ extern const struct sched_class idle_sched_class;
#ifdef CONFIG_SMP
extern void update_group_power(struct sched_domain *sd, int cpu);
extern void update_group_capacity(struct sched_domain *sd, int cpu);
extern void trigger_load_balance(struct rq *rq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment