Commit 0c0e776a authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched/topology: Rewrite get_group()

We want to attain:

  sg_cpus() & sg_mask() == sg_mask()

for this to be so we must initialize sg_mask() to sg_cpus() for the
!overlap case (its currently cpumask_setall()).

Since the code makes my head hurt bad, rewrite it into a simpler form,
inspired by the now fixed overlap code.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 35a566e6
...@@ -833,23 +833,34 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu) ...@@ -833,23 +833,34 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
* [*] in other words, the first group of each domain is its child domain. * [*] in other words, the first group of each domain is its child domain.
*/ */
static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg) static struct sched_group *get_group(int cpu, struct sd_data *sdd)
{ {
struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
struct sched_domain *child = sd->child; struct sched_domain *child = sd->child;
struct sched_group *sg;
if (child) if (child)
cpu = cpumask_first(sched_domain_span(child)); cpu = cpumask_first(sched_domain_span(child));
if (sg) { sg = *per_cpu_ptr(sdd->sg, cpu);
*sg = *per_cpu_ptr(sdd->sg, cpu); sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
(*sg)->sgc = *per_cpu_ptr(sdd->sgc, cpu);
/* For claim_allocations: */ /* For claim_allocations: */
atomic_set(&(*sg)->sgc->ref, 1); atomic_inc(&sg->ref);
atomic_inc(&sg->sgc->ref);
if (child) {
cpumask_copy(sched_group_cpus(sg), sched_domain_span(child));
cpumask_copy(sched_group_mask(sg), sched_group_cpus(sg));
} else {
cpumask_set_cpu(cpu, sched_group_cpus(sg));
cpumask_set_cpu(cpu, sched_group_mask(sg));
} }
return cpu; sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_cpus(sg));
sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
return sg;
} }
/* /*
...@@ -868,34 +879,20 @@ build_sched_groups(struct sched_domain *sd, int cpu) ...@@ -868,34 +879,20 @@ build_sched_groups(struct sched_domain *sd, int cpu)
struct cpumask *covered; struct cpumask *covered;
int i; int i;
get_group(cpu, sdd, &sd->groups);
atomic_inc(&sd->groups->ref);
if (cpu != cpumask_first(span))
return 0;
lockdep_assert_held(&sched_domains_mutex); lockdep_assert_held(&sched_domains_mutex);
covered = sched_domains_tmpmask; covered = sched_domains_tmpmask;
cpumask_clear(covered); cpumask_clear(covered);
for_each_cpu(i, span) { for_each_cpu_wrap(i, span, cpu) {
struct sched_group *sg; struct sched_group *sg;
int group, j;
if (cpumask_test_cpu(i, covered)) if (cpumask_test_cpu(i, covered))
continue; continue;
group = get_group(i, sdd, &sg); sg = get_group(i, sdd);
cpumask_setall(sched_group_mask(sg));
for_each_cpu(j, span) { cpumask_or(covered, covered, sched_group_cpus(sg));
if (get_group(j, sdd, NULL) != group)
continue;
cpumask_set_cpu(j, covered);
cpumask_set_cpu(j, sched_group_cpus(sg));
}
if (!first) if (!first)
first = sg; first = sg;
...@@ -904,6 +901,7 @@ build_sched_groups(struct sched_domain *sd, int cpu) ...@@ -904,6 +901,7 @@ build_sched_groups(struct sched_domain *sd, int cpu)
last = sg; last = sg;
} }
last->next = first; last->next = first;
sd->groups = first;
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment