Commit 1676330e authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched/topology: Fix overlapping sched_group_capacity

When building the overlapping groups we need to attach a consistent
sched_group_capacity structure. That is, all 'identical' sched_group's
should have the _same_ sched_group_capacity.

This can (once again) be demonstrated with a topology like:

  node   0   1   2   3
    0:  10  20  30  20
    1:  20  10  20  30
    2:  30  20  10  20
    3:  20  30  20  10

But we need at least 2 CPUs per node for this to show up, after all,
if there is only one CPU per node, our CPU @i is per definition a
unique CPU that reaches this domain (aka balance-cpu).

Given the above NUMA topo and 2 CPUs per node:

  [] CPU0 attaching sched-domain(s):
  []  domain-0: span=0,4 level=DIE
  []   groups: 0:{ span=0 }, 4:{ span=4 }
  []   domain-1: span=0-1,3-5,7 level=NUMA
  []    groups: 0:{ span=0,4 mask=0,4 cap=2048 }, 1:{ span=1,5 mask=1,5 cap=2048 }, 3:{ span=3,7 mask=3,7 cap=2048 }
  []    domain-2: span=0-7 level=NUMA
  []     groups: 0:{ span=0-1,3-5,7 mask=0,4 cap=6144 }, 2:{ span=1-3,5-7 mask=2,6 cap=6144 }
  [] CPU1 attaching sched-domain(s):
  []  domain-0: span=1,5 level=DIE
  []   groups: 1:{ span=1 }, 5:{ span=5 }
  []   domain-1: span=0-2,4-6 level=NUMA
  []    groups: 1:{ span=1,5 mask=1,5 cap=2048 }, 2:{ span=2,6 mask=2,6 cap=2048 }, 4:{ span=0,4 mask=0,4 cap=2048 }
  []    domain-2: span=0-7 level=NUMA
  []     groups: 1:{ span=0-2,4-6 mask=1,5 cap=6144 }, 3:{ span=0,2-4,6-7 mask=3,7 cap=6144 }

Observe how CPU0-domain1-group0 and CPU1-domain1-group4 are the
'same' but have a different id (0 vs 4).

To fix this, use the group balance CPU to select the SGC. This means
we have to compute the full mask for each CPU and require a second
temporary mask to store the group mask in (it otherwise lives in the
SGC).

The fixed topology looks like:

  [] CPU0 attaching sched-domain(s):
  []  domain-0: span=0,4 level=DIE
  []   groups: 0:{ span=0 }, 4:{ span=4 }
  []   domain-1: span=0-1,3-5,7 level=NUMA
  []    groups: 0:{ span=0,4 mask=0,4 cap=2048 }, 1:{ span=1,5 mask=1,5 cap=2048 }, 3:{ span=3,7 mask=3,7 cap=2048 }
  []    domain-2: span=0-7 level=NUMA
  []     groups: 0:{ span=0-1,3-5,7 mask=0,4 cap=6144 }, 2:{ span=1-3,5-7 mask=2,6 cap=6144 }
  [] CPU1 attaching sched-domain(s):
  []  domain-0: span=1,5 level=DIE
  []   groups: 1:{ span=1 }, 5:{ span=5 }
  []   domain-1: span=0-2,4-6 level=NUMA
  []    groups: 1:{ span=1,5 mask=1,5 cap=2048 }, 2:{ span=2,6 mask=2,6 cap=2048 }, 0:{ span=0,4 mask=0,4 cap=2048 }
  []    domain-2: span=0-7 level=NUMA
  []     groups: 1:{ span=0-2,4-6 mask=1,5 cap=6144 }, 3:{ span=0,2-4,6-7 mask=3,7 cap=6144 }
Debugged-by: default avatarLauro Ramos Venancio <lvenanci@redhat.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Fixes: e3589f6c ("sched: Allow for overlapping sched_domain spans")
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 005f874d
...@@ -10,6 +10,7 @@ DEFINE_MUTEX(sched_domains_mutex); ...@@ -10,6 +10,7 @@ DEFINE_MUTEX(sched_domains_mutex);
/* Protected by sched_domains_mutex: */ /* Protected by sched_domains_mutex: */
cpumask_var_t sched_domains_tmpmask; cpumask_var_t sched_domains_tmpmask;
cpumask_var_t sched_domains_tmpmask2;
#ifdef CONFIG_SCHED_DEBUG #ifdef CONFIG_SCHED_DEBUG
...@@ -500,13 +501,16 @@ enum s_alloc { ...@@ -500,13 +501,16 @@ enum s_alloc {
* Only CPUs that can arrive at this group should be considered to continue * Only CPUs that can arrive at this group should be considered to continue
* balancing. * balancing.
*/ */
static void build_group_mask(struct sched_domain *sd, struct sched_group *sg) static void
build_group_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask)
{ {
const struct cpumask *sg_span = sched_group_cpus(sg); const struct cpumask *sg_span = sched_group_cpus(sg);
struct sd_data *sdd = sd->private; struct sd_data *sdd = sd->private;
struct sched_domain *sibling; struct sched_domain *sibling;
int i; int i;
cpumask_clear(mask);
for_each_cpu(i, sg_span) { for_each_cpu(i, sg_span) {
sibling = *per_cpu_ptr(sdd->sd, i); sibling = *per_cpu_ptr(sdd->sd, i);
...@@ -522,11 +526,11 @@ static void build_group_mask(struct sched_domain *sd, struct sched_group *sg) ...@@ -522,11 +526,11 @@ static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
if (!cpumask_equal(sg_span, sched_domain_span(sibling->child))) if (!cpumask_equal(sg_span, sched_domain_span(sibling->child)))
continue; continue;
cpumask_set_cpu(i, sched_group_mask(sg)); cpumask_set_cpu(i, mask);
} }
/* We must not have empty masks here */ /* We must not have empty masks here */
WARN_ON_ONCE(cpumask_empty(sched_group_mask(sg))); WARN_ON_ONCE(cpumask_empty(mask));
} }
/* /*
...@@ -560,14 +564,19 @@ build_group_from_child_sched_domain(struct sched_domain *sd, int cpu) ...@@ -560,14 +564,19 @@ build_group_from_child_sched_domain(struct sched_domain *sd, int cpu)
} }
static void init_overlap_sched_group(struct sched_domain *sd, static void init_overlap_sched_group(struct sched_domain *sd,
struct sched_group *sg, int cpu) struct sched_group *sg)
{ {
struct cpumask *mask = sched_domains_tmpmask2;
struct sd_data *sdd = sd->private; struct sd_data *sdd = sd->private;
struct cpumask *sg_span; struct cpumask *sg_span;
int cpu;
build_group_mask(sd, sg, mask);
cpu = cpumask_first_and(sched_group_cpus(sg), mask);
sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
if (atomic_inc_return(&sg->sgc->ref) == 1) if (atomic_inc_return(&sg->sgc->ref) == 1)
build_group_mask(sd, sg); cpumask_copy(sched_group_mask(sg), mask);
/* /*
* Initialize sgc->capacity such that even if we mess up the * Initialize sgc->capacity such that even if we mess up the
...@@ -619,7 +628,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu) ...@@ -619,7 +628,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
sg_span = sched_group_cpus(sg); sg_span = sched_group_cpus(sg);
cpumask_or(covered, covered, sg_span); cpumask_or(covered, covered, sg_span);
init_overlap_sched_group(sd, sg, i); init_overlap_sched_group(sd, sg);
if (!first) if (!first)
first = sg; first = sg;
...@@ -1578,6 +1587,7 @@ int sched_init_domains(const struct cpumask *cpu_map) ...@@ -1578,6 +1587,7 @@ int sched_init_domains(const struct cpumask *cpu_map)
int err; int err;
zalloc_cpumask_var(&sched_domains_tmpmask, GFP_KERNEL); zalloc_cpumask_var(&sched_domains_tmpmask, GFP_KERNEL);
zalloc_cpumask_var(&sched_domains_tmpmask2, GFP_KERNEL);
zalloc_cpumask_var(&fallback_doms, GFP_KERNEL); zalloc_cpumask_var(&fallback_doms, GFP_KERNEL);
arch_update_cpu_topology(); arch_update_cpu_topology();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment