Commit 16b26943 authored by Xunlei Pang's avatar Xunlei Pang Committed by Ingo Molnar

sched/deadline: Modify cpudl::free_cpus to reflect rd->online

Currently, cpudl::free_cpus contains all CPUs during init, see
cpudl_init(). When calling cpudl_find(), we have to add rd->span
to avoid selecting the cpu outside the current root domain, because
cpus_allowed cannot be depended on when performing clustered
scheduling using the cpuset, see find_later_rq().

This patch adds cpudl_set_freecpu() and cpudl_clear_freecpu() for
changing cpudl::free_cpus when doing rq_online_dl()/rq_offline_dl(),
so we can avoid the rd->span operation when calling cpudl_find()
in find_later_rq().
Signed-off-by: default avatarXunlei Pang <pang.xunlei@linaro.org>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Juri Lelli <juri.lelli@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/1421642980-10045-1-git-send-email-pang.xunlei@linaro.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent ff6f2d29
......@@ -107,7 +107,9 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
int best_cpu = -1;
const struct sched_dl_entity *dl_se = &p->dl;
if (later_mask && cpumask_and(later_mask, later_mask, cp->free_cpus)) {
if (later_mask &&
cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed) &&
cpumask_and(later_mask, later_mask, cpu_active_mask)) {
best_cpu = cpumask_any(later_mask);
goto out;
} else if (cpumask_test_cpu(cpudl_maximum(cp), &p->cpus_allowed) &&
......@@ -185,6 +187,26 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid)
raw_spin_unlock_irqrestore(&cp->lock, flags);
}
/*
* cpudl_set_freecpu - Set the cpudl.free_cpus
* @cp: the cpudl max-heap context
* @cpu: rd attached cpu
*/
void cpudl_set_freecpu(struct cpudl *cp, int cpu)
{
cpumask_set_cpu(cpu, cp->free_cpus);
}
/*
* cpudl_clear_freecpu - Clear the cpudl.free_cpus
* @cp: the cpudl max-heap context
* @cpu: rd attached cpu
*/
void cpudl_clear_freecpu(struct cpudl *cp, int cpu)
{
cpumask_clear_cpu(cpu, cp->free_cpus);
}
/*
* cpudl_init - initialize the cpudl structure
* @cp: the cpudl max-heap context
......@@ -203,7 +225,7 @@ int cpudl_init(struct cpudl *cp)
if (!cp->elements)
return -ENOMEM;
if (!alloc_cpumask_var(&cp->free_cpus, GFP_KERNEL)) {
if (!zalloc_cpumask_var(&cp->free_cpus, GFP_KERNEL)) {
kfree(cp->elements);
return -ENOMEM;
}
......@@ -211,8 +233,6 @@ int cpudl_init(struct cpudl *cp)
for_each_possible_cpu(i)
cp->elements[i].idx = IDX_INVALID;
cpumask_setall(cp->free_cpus);
return 0;
}
......
......@@ -24,6 +24,8 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
struct cpumask *later_mask);
void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid);
int cpudl_init(struct cpudl *cp);
void cpudl_set_freecpu(struct cpudl *cp, int cpu);
void cpudl_clear_freecpu(struct cpudl *cp, int cpu);
void cpudl_cleanup(struct cpudl *cp);
#endif /* CONFIG_SMP */
......
......@@ -1165,9 +1165,6 @@ static int find_later_rq(struct task_struct *task)
* We have to consider system topology and task affinity
* first, then we can look for a suitable cpu.
*/
cpumask_copy(later_mask, task_rq(task)->rd->span);
cpumask_and(later_mask, later_mask, cpu_active_mask);
cpumask_and(later_mask, later_mask, &task->cpus_allowed);
best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
task, later_mask);
if (best_cpu == -1)
......@@ -1562,6 +1559,7 @@ static void rq_online_dl(struct rq *rq)
if (rq->dl.overloaded)
dl_set_overload(rq);
cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
if (rq->dl.dl_nr_running > 0)
cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1);
}
......@@ -1573,6 +1571,7 @@ static void rq_offline_dl(struct rq *rq)
dl_clear_overload(rq);
cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
}
void init_sched_dl_class(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment