Commit 4a55bd5e authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched: fair-group: de-couple load-balancing from the rb-trees

De-couple load-balancing from the rb-trees, so that I can change their
organization.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent ac884dec
...@@ -151,6 +151,9 @@ extern struct group_info init_groups; ...@@ -151,6 +151,9 @@ extern struct group_info init_groups;
.cpus_allowed = CPU_MASK_ALL, \ .cpus_allowed = CPU_MASK_ALL, \
.mm = NULL, \ .mm = NULL, \
.active_mm = &init_mm, \ .active_mm = &init_mm, \
.se = { \
.group_node = LIST_HEAD_INIT(tsk.se.group_node), \
}, \
.rt = { \ .rt = { \
.run_list = LIST_HEAD_INIT(tsk.rt.run_list), \ .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \
.time_slice = HZ, \ .time_slice = HZ, \
......
...@@ -946,6 +946,7 @@ struct load_weight { ...@@ -946,6 +946,7 @@ struct load_weight {
struct sched_entity { struct sched_entity {
struct load_weight load; /* for load-balancing */ struct load_weight load; /* for load-balancing */
struct rb_node run_node; struct rb_node run_node;
struct list_head group_node;
unsigned int on_rq; unsigned int on_rq;
u64 exec_start; u64 exec_start;
......
...@@ -384,8 +384,12 @@ struct cfs_rq { ...@@ -384,8 +384,12 @@ struct cfs_rq {
struct rb_root tasks_timeline; struct rb_root tasks_timeline;
struct rb_node *rb_leftmost; struct rb_node *rb_leftmost;
struct rb_node *rb_load_balance_curr;
/* 'curr' points to currently running entity on this cfs_rq. struct list_head tasks;
struct list_head *balance_iterator;
/*
* 'curr' points to currently running entity on this cfs_rq.
* It is set to NULL otherwise (i.e when none are currently running). * It is set to NULL otherwise (i.e when none are currently running).
*/ */
struct sched_entity *curr, *next; struct sched_entity *curr, *next;
...@@ -2525,6 +2529,7 @@ static void __sched_fork(struct task_struct *p) ...@@ -2525,6 +2529,7 @@ static void __sched_fork(struct task_struct *p)
INIT_LIST_HEAD(&p->rt.run_list); INIT_LIST_HEAD(&p->rt.run_list);
p->se.on_rq = 0; p->se.on_rq = 0;
INIT_LIST_HEAD(&p->se.group_node);
#ifdef CONFIG_PREEMPT_NOTIFIERS #ifdef CONFIG_PREEMPT_NOTIFIERS
INIT_HLIST_HEAD(&p->preempt_notifiers); INIT_HLIST_HEAD(&p->preempt_notifiers);
...@@ -7898,6 +7903,7 @@ int in_sched_functions(unsigned long addr) ...@@ -7898,6 +7903,7 @@ int in_sched_functions(unsigned long addr)
static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq) static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
{ {
cfs_rq->tasks_timeline = RB_ROOT; cfs_rq->tasks_timeline = RB_ROOT;
INIT_LIST_HEAD(&cfs_rq->tasks);
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
cfs_rq->rq = rq; cfs_rq->rq = rq;
#endif #endif
......
...@@ -533,6 +533,7 @@ account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) ...@@ -533,6 +533,7 @@ account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
add_cfs_task_weight(cfs_rq, se->load.weight); add_cfs_task_weight(cfs_rq, se->load.weight);
cfs_rq->nr_running++; cfs_rq->nr_running++;
se->on_rq = 1; se->on_rq = 1;
list_add(&se->group_node, &cfs_rq->tasks);
} }
static void static void
...@@ -545,6 +546,7 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) ...@@ -545,6 +546,7 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
add_cfs_task_weight(cfs_rq, -se->load.weight); add_cfs_task_weight(cfs_rq, -se->load.weight);
cfs_rq->nr_running--; cfs_rq->nr_running--;
se->on_rq = 0; se->on_rq = 0;
list_del_init(&se->group_node);
} }
static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
...@@ -1289,21 +1291,24 @@ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) ...@@ -1289,21 +1291,24 @@ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
* the current task: * the current task:
*/ */
static struct task_struct * static struct task_struct *
__load_balance_iterator(struct cfs_rq *cfs_rq, struct rb_node *curr) __load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next)
{ {
struct task_struct *p = NULL; struct task_struct *p = NULL;
struct sched_entity *se; struct sched_entity *se;
if (!curr) if (next == &cfs_rq->tasks)
return NULL; return NULL;
/* Skip over entities that are not tasks */ /* Skip over entities that are not tasks */
do { do {
se = rb_entry(curr, struct sched_entity, run_node); se = list_entry(next, struct sched_entity, group_node);
curr = rb_next(curr); next = next->next;
} while (curr && !entity_is_task(se)); } while (next != &cfs_rq->tasks && !entity_is_task(se));
cfs_rq->rb_load_balance_curr = curr; if (next == &cfs_rq->tasks)
return NULL;
cfs_rq->balance_iterator = next;
if (entity_is_task(se)) if (entity_is_task(se))
p = task_of(se); p = task_of(se);
...@@ -1315,14 +1320,14 @@ static struct task_struct *load_balance_start_fair(void *arg) ...@@ -1315,14 +1320,14 @@ static struct task_struct *load_balance_start_fair(void *arg)
{ {
struct cfs_rq *cfs_rq = arg; struct cfs_rq *cfs_rq = arg;
return __load_balance_iterator(cfs_rq, first_fair(cfs_rq)); return __load_balance_iterator(cfs_rq, cfs_rq->tasks.next);
} }
static struct task_struct *load_balance_next_fair(void *arg) static struct task_struct *load_balance_next_fair(void *arg)
{ {
struct cfs_rq *cfs_rq = arg; struct cfs_rq *cfs_rq = arg;
return __load_balance_iterator(cfs_rq, cfs_rq->rb_load_balance_curr); return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator);
} }
static unsigned long static unsigned long
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment