Commit 8277434e authored by Paul Turner's avatar Paul Turner Committed by Ingo Molnar

sched: Allow for positional tg_tree walks

Extend walk_tg_tree to accept a positional argument

static int walk_tg_tree_from(struct task_group *from,
			     tg_visitor down, tg_visitor up, void *data)

Existing semantics are preserved, caller must hold rcu_lock() or sufficient
analogue.
Signed-off-by: default avatarPaul Turner <pjt@google.com>
Reviewed-by: default avatarHidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/20110721184757.677889157@google.comSigned-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 671fd9da
...@@ -1591,20 +1591,23 @@ static inline void dec_cpu_load(struct rq *rq, unsigned long load) ...@@ -1591,20 +1591,23 @@ static inline void dec_cpu_load(struct rq *rq, unsigned long load)
typedef int (*tg_visitor)(struct task_group *, void *); typedef int (*tg_visitor)(struct task_group *, void *);
/* /*
* Iterate the full tree, calling @down when first entering a node and @up when * Iterate task_group tree rooted at *from, calling @down when first entering a
* leaving it for the final time. * node and @up when leaving it for the final time.
*
* Caller must hold rcu_lock or sufficient equivalent.
*/ */
static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) static int walk_tg_tree_from(struct task_group *from,
tg_visitor down, tg_visitor up, void *data)
{ {
struct task_group *parent, *child; struct task_group *parent, *child;
int ret; int ret;
rcu_read_lock(); parent = from;
parent = &root_task_group;
down: down:
ret = (*down)(parent, data); ret = (*down)(parent, data);
if (ret) if (ret)
goto out_unlock; goto out;
list_for_each_entry_rcu(child, &parent->children, siblings) { list_for_each_entry_rcu(child, &parent->children, siblings) {
parent = child; parent = child;
goto down; goto down;
...@@ -1613,19 +1616,29 @@ static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) ...@@ -1613,19 +1616,29 @@ static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
continue; continue;
} }
ret = (*up)(parent, data); ret = (*up)(parent, data);
if (ret) if (ret || parent == from)
goto out_unlock; goto out;
child = parent; child = parent;
parent = parent->parent; parent = parent->parent;
if (parent) if (parent)
goto up; goto up;
out_unlock: out:
rcu_read_unlock();
return ret; return ret;
} }
/*
* Iterate the full tree, calling @down when first entering a node and @up when
* leaving it for the final time.
*
* Caller must hold rcu_lock or sufficient equivalent.
*/
static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
{
return walk_tg_tree_from(&root_task_group, down, up, data);
}
static int tg_nop(struct task_group *tg, void *data) static int tg_nop(struct task_group *tg, void *data)
{ {
return 0; return 0;
...@@ -8870,13 +8883,19 @@ static int tg_rt_schedulable(struct task_group *tg, void *data) ...@@ -8870,13 +8883,19 @@ static int tg_rt_schedulable(struct task_group *tg, void *data)
static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
{ {
int ret;
struct rt_schedulable_data data = { struct rt_schedulable_data data = {
.tg = tg, .tg = tg,
.rt_period = period, .rt_period = period,
.rt_runtime = runtime, .rt_runtime = runtime,
}; };
return walk_tg_tree(tg_rt_schedulable, tg_nop, &data); rcu_read_lock();
ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
rcu_read_unlock();
return ret;
} }
static int tg_set_rt_bandwidth(struct task_group *tg, static int tg_set_rt_bandwidth(struct task_group *tg,
...@@ -9333,6 +9352,7 @@ static int tg_cfs_schedulable_down(struct task_group *tg, void *data) ...@@ -9333,6 +9352,7 @@ static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
{ {
int ret;
struct cfs_schedulable_data data = { struct cfs_schedulable_data data = {
.tg = tg, .tg = tg,
.period = period, .period = period,
...@@ -9344,7 +9364,11 @@ static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) ...@@ -9344,7 +9364,11 @@ static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
do_div(data.quota, NSEC_PER_USEC); do_div(data.quota, NSEC_PER_USEC);
} }
return walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data); rcu_read_lock();
ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
rcu_read_unlock();
return ret;
} }
#endif /* CONFIG_CFS_BANDWIDTH */ #endif /* CONFIG_CFS_BANDWIDTH */
#endif /* CONFIG_FAIR_GROUP_SCHED */ #endif /* CONFIG_FAIR_GROUP_SCHED */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment