Commit acb5a9ba authored by Jan H. Schönherr's avatar Jan H. Schönherr Committed by Ingo Molnar

sched: Separate group-scheduling code more clearly

Clean up cfs/rt runqueue initialization by moving group scheduling
related code into the corresponding functions.

Also, keep group scheduling as an add-on, so that things are only done
additionally, i. e. remove the init_*_rq() calls from init_tg_*_entry().
(This removes a redundant initalization during sched_init()).

In case of group scheduling rt_rq->highest_prio.curr is now initialized
twice, but adding another #ifdef seems not worth it.
Signed-off-by: default avatarJan H. Schönherr <schnhrr@cs.tu-berlin.de>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1310661163-16606-1-git-send-email-schnhrr@cs.tu-berlin.deSigned-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 26a148eb
...@@ -7859,17 +7859,10 @@ int in_sched_functions(unsigned long addr) ...@@ -7859,17 +7859,10 @@ int in_sched_functions(unsigned long addr)
&& addr < (unsigned long)__sched_text_end); && addr < (unsigned long)__sched_text_end);
} }
static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq) static void init_cfs_rq(struct cfs_rq *cfs_rq)
{ {
cfs_rq->tasks_timeline = RB_ROOT; cfs_rq->tasks_timeline = RB_ROOT;
INIT_LIST_HEAD(&cfs_rq->tasks); INIT_LIST_HEAD(&cfs_rq->tasks);
#ifdef CONFIG_FAIR_GROUP_SCHED
cfs_rq->rq = rq;
/* allow initial update_cfs_load() to truncate */
#ifdef CONFIG_SMP
cfs_rq->load_stamp = 1;
#endif
#endif
cfs_rq->min_vruntime = (u64)(-(1LL << 20)); cfs_rq->min_vruntime = (u64)(-(1LL << 20));
#ifndef CONFIG_64BIT #ifndef CONFIG_64BIT
cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
...@@ -7889,13 +7882,9 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq) ...@@ -7889,13 +7882,9 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
/* delimiter for bitsearch: */ /* delimiter for bitsearch: */
__set_bit(MAX_RT_PRIO, array->bitmap); __set_bit(MAX_RT_PRIO, array->bitmap);
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED #if defined CONFIG_SMP
rt_rq->highest_prio.curr = MAX_RT_PRIO; rt_rq->highest_prio.curr = MAX_RT_PRIO;
#ifdef CONFIG_SMP
rt_rq->highest_prio.next = MAX_RT_PRIO; rt_rq->highest_prio.next = MAX_RT_PRIO;
#endif
#endif
#ifdef CONFIG_SMP
rt_rq->rt_nr_migratory = 0; rt_rq->rt_nr_migratory = 0;
rt_rq->overloaded = 0; rt_rq->overloaded = 0;
plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock); plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock);
...@@ -7905,11 +7894,6 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq) ...@@ -7905,11 +7894,6 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
rt_rq->rt_throttled = 0; rt_rq->rt_throttled = 0;
rt_rq->rt_runtime = 0; rt_rq->rt_runtime = 0;
raw_spin_lock_init(&rt_rq->rt_runtime_lock); raw_spin_lock_init(&rt_rq->rt_runtime_lock);
#ifdef CONFIG_RT_GROUP_SCHED
rt_rq->rt_nr_boosted = 0;
rt_rq->rq = rq;
#endif
} }
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
...@@ -7918,11 +7902,17 @@ static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, ...@@ -7918,11 +7902,17 @@ static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
struct sched_entity *parent) struct sched_entity *parent)
{ {
struct rq *rq = cpu_rq(cpu); struct rq *rq = cpu_rq(cpu);
tg->cfs_rq[cpu] = cfs_rq;
init_cfs_rq(cfs_rq, rq);
cfs_rq->tg = tg; cfs_rq->tg = tg;
cfs_rq->rq = rq;
#ifdef CONFIG_SMP
/* allow initial update_cfs_load() to truncate */
cfs_rq->load_stamp = 1;
#endif
tg->cfs_rq[cpu] = cfs_rq;
tg->se[cpu] = se; tg->se[cpu] = se;
/* se could be NULL for root_task_group */ /* se could be NULL for root_task_group */
if (!se) if (!se)
return; return;
...@@ -7945,12 +7935,14 @@ static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, ...@@ -7945,12 +7935,14 @@ static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
{ {
struct rq *rq = cpu_rq(cpu); struct rq *rq = cpu_rq(cpu);
tg->rt_rq[cpu] = rt_rq; rt_rq->highest_prio.curr = MAX_RT_PRIO;
init_rt_rq(rt_rq, rq); rt_rq->rt_nr_boosted = 0;
rt_rq->rq = rq;
rt_rq->tg = tg; rt_rq->tg = tg;
rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
tg->rt_rq[cpu] = rt_rq;
tg->rt_se[cpu] = rt_se; tg->rt_se[cpu] = rt_se;
if (!rt_se) if (!rt_se)
return; return;
...@@ -8032,7 +8024,7 @@ void __init sched_init(void) ...@@ -8032,7 +8024,7 @@ void __init sched_init(void)
rq->nr_running = 0; rq->nr_running = 0;
rq->calc_load_active = 0; rq->calc_load_active = 0;
rq->calc_load_update = jiffies + LOAD_FREQ; rq->calc_load_update = jiffies + LOAD_FREQ;
init_cfs_rq(&rq->cfs, rq); init_cfs_rq(&rq->cfs);
init_rt_rq(&rq->rt, rq); init_rt_rq(&rq->rt, rq);
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
root_task_group.shares = root_task_group_load; root_task_group.shares = root_task_group_load;
...@@ -8335,6 +8327,7 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) ...@@ -8335,6 +8327,7 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
if (!se) if (!se)
goto err_free_rq; goto err_free_rq;
init_cfs_rq(cfs_rq);
init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
} }
...@@ -8425,6 +8418,8 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) ...@@ -8425,6 +8418,8 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
if (!rt_se) if (!rt_se)
goto err_free_rq; goto err_free_rq;
init_rt_rq(rt_rq, cpu_rq(i));
rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]); init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment