Commit 72ec7029 authored by Tejun Heo's avatar Tejun Heo

cgroup: make task iterators deal with cgroup_subsys_state instead of cgroup

cgroup is in the process of converting to css (cgroup_subsys_state)
from cgroup as the principal subsystem interface handle.  This is
mostly to prepare for the unified hierarchy support where css's will
be created and destroyed dynamically but also helps cleaning up
subsystem implementations as css is usually what they are interested
in anyway.

This patch converts task iterators to deal with css instead of cgroup.
Note that under unified hierarchy, different sets of tasks will be
considered belonging to a given cgroup depending on the subsystem in
question and making the iterators deal with css instead cgroup
provides them with enough information about the iteration.

While at it, fix several function comment formats in cpuset.c.

This patch doesn't introduce any behavior differences.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Acked-by: default avatarLi Zefan <lizefan@huawei.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: Matt Helsley <matthltc@us.ibm.com>
parent e535837b
...@@ -880,21 +880,22 @@ css_next_descendant_post(struct cgroup_subsys_state *pos, ...@@ -880,21 +880,22 @@ css_next_descendant_post(struct cgroup_subsys_state *pos,
for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \ for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \
(pos) = css_next_descendant_post((pos), (css))) (pos) = css_next_descendant_post((pos), (css)))
/* A cgroup_task_iter should be treated as an opaque object */ /* A css_task_iter should be treated as an opaque object */
struct cgroup_task_iter { struct css_task_iter {
struct cgroup *origin_cgrp; struct cgroup_subsys_state *origin_css;
struct list_head *cset_link; struct list_head *cset_link;
struct list_head *task; struct list_head *task;
}; };
void cgroup_task_iter_start(struct cgroup *cgrp, struct cgroup_task_iter *it); void css_task_iter_start(struct cgroup_subsys_state *css,
struct task_struct *cgroup_task_iter_next(struct cgroup_task_iter *it); struct css_task_iter *it);
void cgroup_task_iter_end(struct cgroup_task_iter *it); struct task_struct *css_task_iter_next(struct css_task_iter *it);
void css_task_iter_end(struct css_task_iter *it);
int cgroup_scan_tasks(struct cgroup *cgrp, int css_scan_tasks(struct cgroup_subsys_state *css,
bool (*test)(struct task_struct *, void *), bool (*test)(struct task_struct *, void *),
void (*process)(struct task_struct *, void *), void (*process)(struct task_struct *, void *),
void *data, struct ptr_heap *heap); void *data, struct ptr_heap *heap);
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
......
This diff is collapsed.
...@@ -258,7 +258,7 @@ static void update_if_frozen(struct cgroup_subsys_state *css) ...@@ -258,7 +258,7 @@ static void update_if_frozen(struct cgroup_subsys_state *css)
{ {
struct freezer *freezer = css_freezer(css); struct freezer *freezer = css_freezer(css);
struct cgroup_subsys_state *pos; struct cgroup_subsys_state *pos;
struct cgroup_task_iter it; struct css_task_iter it;
struct task_struct *task; struct task_struct *task;
WARN_ON_ONCE(!rcu_read_lock_held()); WARN_ON_ONCE(!rcu_read_lock_held());
...@@ -279,9 +279,9 @@ static void update_if_frozen(struct cgroup_subsys_state *css) ...@@ -279,9 +279,9 @@ static void update_if_frozen(struct cgroup_subsys_state *css)
} }
/* are all tasks frozen? */ /* are all tasks frozen? */
cgroup_task_iter_start(css->cgroup, &it); css_task_iter_start(css, &it);
while ((task = cgroup_task_iter_next(&it))) { while ((task = css_task_iter_next(&it))) {
if (freezing(task)) { if (freezing(task)) {
/* /*
* freezer_should_skip() indicates that the task * freezer_should_skip() indicates that the task
...@@ -296,7 +296,7 @@ static void update_if_frozen(struct cgroup_subsys_state *css) ...@@ -296,7 +296,7 @@ static void update_if_frozen(struct cgroup_subsys_state *css)
freezer->state |= CGROUP_FROZEN; freezer->state |= CGROUP_FROZEN;
out_iter_end: out_iter_end:
cgroup_task_iter_end(&it); css_task_iter_end(&it);
out_unlock: out_unlock:
spin_unlock_irq(&freezer->lock); spin_unlock_irq(&freezer->lock);
} }
...@@ -322,26 +322,24 @@ static int freezer_read(struct cgroup_subsys_state *css, struct cftype *cft, ...@@ -322,26 +322,24 @@ static int freezer_read(struct cgroup_subsys_state *css, struct cftype *cft,
static void freeze_cgroup(struct freezer *freezer) static void freeze_cgroup(struct freezer *freezer)
{ {
struct cgroup *cgroup = freezer->css.cgroup; struct css_task_iter it;
struct cgroup_task_iter it;
struct task_struct *task; struct task_struct *task;
cgroup_task_iter_start(cgroup, &it); css_task_iter_start(&freezer->css, &it);
while ((task = cgroup_task_iter_next(&it))) while ((task = css_task_iter_next(&it)))
freeze_task(task); freeze_task(task);
cgroup_task_iter_end(&it); css_task_iter_end(&it);
} }
static void unfreeze_cgroup(struct freezer *freezer) static void unfreeze_cgroup(struct freezer *freezer)
{ {
struct cgroup *cgroup = freezer->css.cgroup; struct css_task_iter it;
struct cgroup_task_iter it;
struct task_struct *task; struct task_struct *task;
cgroup_task_iter_start(cgroup, &it); css_task_iter_start(&freezer->css, &it);
while ((task = cgroup_task_iter_next(&it))) while ((task = css_task_iter_next(&it)))
__thaw_task(task); __thaw_task(task);
cgroup_task_iter_end(&it); css_task_iter_end(&it);
} }
/** /**
......
...@@ -832,8 +832,8 @@ static struct cpuset *effective_nodemask_cpuset(struct cpuset *cs) ...@@ -832,8 +832,8 @@ static struct cpuset *effective_nodemask_cpuset(struct cpuset *cs)
* @tsk: task to test * @tsk: task to test
* @data: cpuset to @tsk belongs to * @data: cpuset to @tsk belongs to
* *
* Called by cgroup_scan_tasks() for each task in a cgroup whose * Called by css_scan_tasks() for each task in a cgroup whose cpus_allowed
* cpus_allowed mask needs to be changed. * mask needs to be changed.
* *
* We don't need to re-check for the cgroup/cpuset membership, since we're * We don't need to re-check for the cgroup/cpuset membership, since we're
* holding cpuset_mutex at this point. * holding cpuset_mutex at this point.
...@@ -849,27 +849,26 @@ static void cpuset_change_cpumask(struct task_struct *tsk, void *data) ...@@ -849,27 +849,26 @@ static void cpuset_change_cpumask(struct task_struct *tsk, void *data)
/** /**
* update_tasks_cpumask - Update the cpumasks of tasks in the cpuset. * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
* @cs: the cpuset in which each task's cpus_allowed mask needs to be changed * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
* @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks() * @heap: if NULL, defer allocating heap memory to css_scan_tasks()
* *
* Called with cpuset_mutex held * Called with cpuset_mutex held
* *
* The cgroup_scan_tasks() function will scan all the tasks in a cgroup, * The css_scan_tasks() function will scan all the tasks in a cgroup,
* calling callback functions for each. * calling callback functions for each.
* *
* No return value. It's guaranteed that cgroup_scan_tasks() always returns 0 * No return value. It's guaranteed that css_scan_tasks() always returns 0
* if @heap != NULL. * if @heap != NULL.
*/ */
static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap) static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap)
{ {
cgroup_scan_tasks(cs->css.cgroup, NULL, cpuset_change_cpumask, cs, css_scan_tasks(&cs->css, NULL, cpuset_change_cpumask, cs, heap);
heap);
} }
/* /*
* update_tasks_cpumask_hier - Update the cpumasks of tasks in the hierarchy. * update_tasks_cpumask_hier - Update the cpumasks of tasks in the hierarchy.
* @root_cs: the root cpuset of the hierarchy * @root_cs: the root cpuset of the hierarchy
* @update_root: update root cpuset or not? * @update_root: update root cpuset or not?
* @heap: the heap used by cgroup_scan_tasks() * @heap: the heap used by css_scan_tasks()
* *
* This will update cpumasks of tasks in @root_cs and all other empty cpusets * This will update cpumasks of tasks in @root_cs and all other empty cpusets
* which take on cpumask of @root_cs. * which take on cpumask of @root_cs.
...@@ -1082,11 +1081,10 @@ static void *cpuset_being_rebound; ...@@ -1082,11 +1081,10 @@ static void *cpuset_being_rebound;
/** /**
* update_tasks_nodemask - Update the nodemasks of tasks in the cpuset. * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
* @cs: the cpuset in which each task's mems_allowed mask needs to be changed * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
* @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks() * @heap: if NULL, defer allocating heap memory to css_scan_tasks()
* *
* Called with cpuset_mutex held * Called with cpuset_mutex held. No return value. It's guaranteed that
* No return value. It's guaranteed that cgroup_scan_tasks() always returns 0 * css_scan_tasks() always returns 0 if @heap != NULL.
* if @heap != NULL.
*/ */
static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap) static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap)
{ {
...@@ -1109,8 +1107,7 @@ static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap) ...@@ -1109,8 +1107,7 @@ static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap)
* It's ok if we rebind the same mm twice; mpol_rebind_mm() * It's ok if we rebind the same mm twice; mpol_rebind_mm()
* is idempotent. Also migrate pages in each mm to new nodes. * is idempotent. Also migrate pages in each mm to new nodes.
*/ */
cgroup_scan_tasks(cs->css.cgroup, NULL, cpuset_change_nodemask, &arg, css_scan_tasks(&cs->css, NULL, cpuset_change_nodemask, &arg, heap);
heap);
/* /*
* All the tasks' nodemasks have been updated, update * All the tasks' nodemasks have been updated, update
...@@ -1126,7 +1123,7 @@ static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap) ...@@ -1126,7 +1123,7 @@ static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap)
* update_tasks_nodemask_hier - Update the nodemasks of tasks in the hierarchy. * update_tasks_nodemask_hier - Update the nodemasks of tasks in the hierarchy.
* @cs: the root cpuset of the hierarchy * @cs: the root cpuset of the hierarchy
* @update_root: update the root cpuset or not? * @update_root: update the root cpuset or not?
* @heap: the heap used by cgroup_scan_tasks() * @heap: the heap used by css_scan_tasks()
* *
* This will update nodemasks of tasks in @root_cs and all other empty cpusets * This will update nodemasks of tasks in @root_cs and all other empty cpusets
* which take on nodemask of @root_cs. * which take on nodemask of @root_cs.
...@@ -1254,12 +1251,12 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val) ...@@ -1254,12 +1251,12 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val)
return 0; return 0;
} }
/* /**
* cpuset_change_flag - make a task's spread flags the same as its cpuset's * cpuset_change_flag - make a task's spread flags the same as its cpuset's
* @tsk: task to be updated * @tsk: task to be updated
* @data: cpuset to @tsk belongs to * @data: cpuset to @tsk belongs to
* *
* Called by cgroup_scan_tasks() for each task in a cgroup. * Called by css_scan_tasks() for each task in a cgroup.
* *
* We don't need to re-check for the cgroup/cpuset membership, since we're * We don't need to re-check for the cgroup/cpuset membership, since we're
* holding cpuset_mutex at this point. * holding cpuset_mutex at this point.
...@@ -1271,22 +1268,22 @@ static void cpuset_change_flag(struct task_struct *tsk, void *data) ...@@ -1271,22 +1268,22 @@ static void cpuset_change_flag(struct task_struct *tsk, void *data)
cpuset_update_task_spread_flag(cs, tsk); cpuset_update_task_spread_flag(cs, tsk);
} }
/* /**
* update_tasks_flags - update the spread flags of tasks in the cpuset. * update_tasks_flags - update the spread flags of tasks in the cpuset.
* @cs: the cpuset in which each task's spread flags needs to be changed * @cs: the cpuset in which each task's spread flags needs to be changed
* @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks() * @heap: if NULL, defer allocating heap memory to css_scan_tasks()
* *
* Called with cpuset_mutex held * Called with cpuset_mutex held
* *
* The cgroup_scan_tasks() function will scan all the tasks in a cgroup, * The css_scan_tasks() function will scan all the tasks in a cgroup,
* calling callback functions for each. * calling callback functions for each.
* *
* No return value. It's guaranteed that cgroup_scan_tasks() always returns 0 * No return value. It's guaranteed that css_scan_tasks() always returns 0
* if @heap != NULL. * if @heap != NULL.
*/ */
static void update_tasks_flags(struct cpuset *cs, struct ptr_heap *heap) static void update_tasks_flags(struct cpuset *cs, struct ptr_heap *heap)
{ {
cgroup_scan_tasks(cs->css.cgroup, NULL, cpuset_change_flag, cs, heap); css_scan_tasks(&cs->css, NULL, cpuset_change_flag, cs, heap);
} }
/* /*
......
...@@ -1799,12 +1799,11 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, ...@@ -1799,12 +1799,11 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL); check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL);
totalpages = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT ? : 1; totalpages = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT ? : 1;
for_each_mem_cgroup_tree(iter, memcg) { for_each_mem_cgroup_tree(iter, memcg) {
struct cgroup *cgroup = iter->css.cgroup; struct css_task_iter it;
struct cgroup_task_iter it;
struct task_struct *task; struct task_struct *task;
cgroup_task_iter_start(cgroup, &it); css_task_iter_start(&iter->css, &it);
while ((task = cgroup_task_iter_next(&it))) { while ((task = css_task_iter_next(&it))) {
switch (oom_scan_process_thread(task, totalpages, NULL, switch (oom_scan_process_thread(task, totalpages, NULL,
false)) { false)) {
case OOM_SCAN_SELECT: case OOM_SCAN_SELECT:
...@@ -1817,7 +1816,7 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, ...@@ -1817,7 +1816,7 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
case OOM_SCAN_CONTINUE: case OOM_SCAN_CONTINUE:
continue; continue;
case OOM_SCAN_ABORT: case OOM_SCAN_ABORT:
cgroup_task_iter_end(&it); css_task_iter_end(&it);
mem_cgroup_iter_break(memcg, iter); mem_cgroup_iter_break(memcg, iter);
if (chosen) if (chosen)
put_task_struct(chosen); put_task_struct(chosen);
...@@ -1834,7 +1833,7 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, ...@@ -1834,7 +1833,7 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
get_task_struct(chosen); get_task_struct(chosen);
} }
} }
cgroup_task_iter_end(&it); css_task_iter_end(&it);
} }
if (!chosen) if (!chosen)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment