Commit d99c8727 authored by Tejun Heo's avatar Tejun Heo

cgroup: make cgroup_taskset deal with cgroup_subsys_state instead of cgroup

cgroup is in the process of converting to css (cgroup_subsys_state)
from cgroup as the principal subsystem interface handle.  This is
mostly to prepare for the unified hierarchy support where css's will
be created and destroyed dynamically but also helps cleaning up
subsystem implementations as css is usually what they are interested
in anyway.

cgroup_taskset which is used by the subsystem attach methods is the
last cgroup subsystem API which isn't using css as the handle.  Update
cgroup_taskset_cur_cgroup() to cgroup_taskset_cur_css() and
cgroup_taskset_for_each() to take @skip_css instead of @skip_cgrp.

The conversions are pretty mechanical.  One exception is
cpuset::cgroup_cs(), which lost its last user and got removed.

This patch shouldn't introduce any functional changes.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Acked-by: default avatarLi Zefan <lizefan@huawei.com>
Acked-by: default avatarDaniel Wagner <daniel.wagner@bmw-carit.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Matt Helsley <matthltc@us.ibm.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
parent 81eeaf04
...@@ -891,7 +891,7 @@ static int blkcg_can_attach(struct cgroup_subsys_state *css, ...@@ -891,7 +891,7 @@ static int blkcg_can_attach(struct cgroup_subsys_state *css,
int ret = 0; int ret = 0;
/* task_lock() is needed to avoid races with exit_io_context() */ /* task_lock() is needed to avoid races with exit_io_context() */
cgroup_taskset_for_each(task, css->cgroup, tset) { cgroup_taskset_for_each(task, css, tset) {
task_lock(task); task_lock(task);
ioc = task->io_context; ioc = task->io_context;
if (ioc && atomic_read(&ioc->nr_tasks) > 1) if (ioc && atomic_read(&ioc->nr_tasks) > 1)
......
...@@ -562,20 +562,22 @@ int cgroup_task_count(const struct cgroup *cgrp); ...@@ -562,20 +562,22 @@ int cgroup_task_count(const struct cgroup *cgrp);
struct cgroup_taskset; struct cgroup_taskset;
struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset); struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset);
struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset); struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset);
struct cgroup *cgroup_taskset_cur_cgroup(struct cgroup_taskset *tset); struct cgroup_subsys_state *cgroup_taskset_cur_css(struct cgroup_taskset *tset,
int subsys_id);
int cgroup_taskset_size(struct cgroup_taskset *tset); int cgroup_taskset_size(struct cgroup_taskset *tset);
/** /**
* cgroup_taskset_for_each - iterate cgroup_taskset * cgroup_taskset_for_each - iterate cgroup_taskset
* @task: the loop cursor * @task: the loop cursor
* @skip_cgrp: skip if task's cgroup matches this, %NULL to iterate through all * @skip_css: skip if task's css matches this, %NULL to iterate through all
* @tset: taskset to iterate * @tset: taskset to iterate
*/ */
#define cgroup_taskset_for_each(task, skip_cgrp, tset) \ #define cgroup_taskset_for_each(task, skip_css, tset) \
for ((task) = cgroup_taskset_first((tset)); (task); \ for ((task) = cgroup_taskset_first((tset)); (task); \
(task) = cgroup_taskset_next((tset))) \ (task) = cgroup_taskset_next((tset))) \
if (!(skip_cgrp) || \ if (!(skip_css) || \
cgroup_taskset_cur_cgroup((tset)) != (skip_cgrp)) cgroup_taskset_cur_css((tset), \
(skip_css)->ss->subsys_id) != (skip_css))
/* /*
* Control Group subsystem type. * Control Group subsystem type.
......
...@@ -1907,18 +1907,20 @@ struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset) ...@@ -1907,18 +1907,20 @@ struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset)
EXPORT_SYMBOL_GPL(cgroup_taskset_next); EXPORT_SYMBOL_GPL(cgroup_taskset_next);
/** /**
* cgroup_taskset_cur_cgroup - return the matching cgroup for the current task * cgroup_taskset_cur_css - return the matching css for the current task
* @tset: taskset of interest * @tset: taskset of interest
* @subsys_id: the ID of the target subsystem
* *
* Return the cgroup for the current (last returned) task of @tset. This * Return the css for the current (last returned) task of @tset for
* function must be preceded by either cgroup_taskset_first() or * subsystem specified by @subsys_id. This function must be preceded by
* cgroup_taskset_next(). * either cgroup_taskset_first() or cgroup_taskset_next().
*/ */
struct cgroup *cgroup_taskset_cur_cgroup(struct cgroup_taskset *tset) struct cgroup_subsys_state *cgroup_taskset_cur_css(struct cgroup_taskset *tset,
int subsys_id)
{ {
return tset->cur_cgrp; return cgroup_css(tset->cur_cgrp, subsys_id);
} }
EXPORT_SYMBOL_GPL(cgroup_taskset_cur_cgroup); EXPORT_SYMBOL_GPL(cgroup_taskset_cur_css);
/** /**
* cgroup_taskset_size - return the number of tasks in taskset * cgroup_taskset_size - return the number of tasks in taskset
......
...@@ -189,7 +189,7 @@ static void freezer_attach(struct cgroup_subsys_state *new_css, ...@@ -189,7 +189,7 @@ static void freezer_attach(struct cgroup_subsys_state *new_css,
* current state before executing the following - !frozen tasks may * current state before executing the following - !frozen tasks may
* be visible in a FROZEN cgroup and frozen tasks in a THAWED one. * be visible in a FROZEN cgroup and frozen tasks in a THAWED one.
*/ */
cgroup_taskset_for_each(task, new_css->cgroup, tset) { cgroup_taskset_for_each(task, new_css, tset) {
if (!(freezer->state & CGROUP_FREEZING)) { if (!(freezer->state & CGROUP_FREEZING)) {
__thaw_task(task); __thaw_task(task);
} else { } else {
......
...@@ -119,12 +119,6 @@ static inline struct cpuset *css_cs(struct cgroup_subsys_state *css) ...@@ -119,12 +119,6 @@ static inline struct cpuset *css_cs(struct cgroup_subsys_state *css)
return css ? container_of(css, struct cpuset, css) : NULL; return css ? container_of(css, struct cpuset, css) : NULL;
} }
/* Retrieve the cpuset for a cgroup */
static inline struct cpuset *cgroup_cs(struct cgroup *cgrp)
{
return css_cs(cgroup_css(cgrp, cpuset_subsys_id));
}
/* Retrieve the cpuset for a task */ /* Retrieve the cpuset for a task */
static inline struct cpuset *task_cs(struct task_struct *task) static inline struct cpuset *task_cs(struct task_struct *task)
{ {
...@@ -1459,7 +1453,7 @@ static int cpuset_can_attach(struct cgroup_subsys_state *css, ...@@ -1459,7 +1453,7 @@ static int cpuset_can_attach(struct cgroup_subsys_state *css,
(cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))) (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)))
goto out_unlock; goto out_unlock;
cgroup_taskset_for_each(task, css->cgroup, tset) { cgroup_taskset_for_each(task, css, tset) {
/* /*
* Kthreads which disallow setaffinity shouldn't be moved * Kthreads which disallow setaffinity shouldn't be moved
* to a new cpuset; we don't want to change their cpu * to a new cpuset; we don't want to change their cpu
...@@ -1511,9 +1505,10 @@ static void cpuset_attach(struct cgroup_subsys_state *css, ...@@ -1511,9 +1505,10 @@ static void cpuset_attach(struct cgroup_subsys_state *css,
struct mm_struct *mm; struct mm_struct *mm;
struct task_struct *task; struct task_struct *task;
struct task_struct *leader = cgroup_taskset_first(tset); struct task_struct *leader = cgroup_taskset_first(tset);
struct cgroup *oldcgrp = cgroup_taskset_cur_cgroup(tset); struct cgroup_subsys_state *oldcss = cgroup_taskset_cur_css(tset,
cpuset_subsys_id);
struct cpuset *cs = css_cs(css); struct cpuset *cs = css_cs(css);
struct cpuset *oldcs = cgroup_cs(oldcgrp); struct cpuset *oldcs = css_cs(oldcss);
struct cpuset *cpus_cs = effective_cpumask_cpuset(cs); struct cpuset *cpus_cs = effective_cpumask_cpuset(cs);
struct cpuset *mems_cs = effective_nodemask_cpuset(cs); struct cpuset *mems_cs = effective_nodemask_cpuset(cs);
...@@ -1527,7 +1522,7 @@ static void cpuset_attach(struct cgroup_subsys_state *css, ...@@ -1527,7 +1522,7 @@ static void cpuset_attach(struct cgroup_subsys_state *css,
guarantee_online_mems(mems_cs, &cpuset_attach_nodemask_to); guarantee_online_mems(mems_cs, &cpuset_attach_nodemask_to);
cgroup_taskset_for_each(task, css->cgroup, tset) { cgroup_taskset_for_each(task, css, tset) {
/* /*
* can_attach beforehand should guarantee that this doesn't * can_attach beforehand should guarantee that this doesn't
* fail. TODO: have a better way to handle failure here * fail. TODO: have a better way to handle failure here
......
...@@ -7816,7 +7816,7 @@ static void perf_cgroup_attach(struct cgroup_subsys_state *css, ...@@ -7816,7 +7816,7 @@ static void perf_cgroup_attach(struct cgroup_subsys_state *css,
{ {
struct task_struct *task; struct task_struct *task;
cgroup_taskset_for_each(task, css->cgroup, tset) cgroup_taskset_for_each(task, css, tset)
task_function_call(task, __perf_cgroup_move, task); task_function_call(task, __perf_cgroup_move, task);
} }
......
...@@ -7135,7 +7135,7 @@ static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css, ...@@ -7135,7 +7135,7 @@ static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
{ {
struct task_struct *task; struct task_struct *task;
cgroup_taskset_for_each(task, css->cgroup, tset) { cgroup_taskset_for_each(task, css, tset) {
#ifdef CONFIG_RT_GROUP_SCHED #ifdef CONFIG_RT_GROUP_SCHED
if (!sched_rt_can_attach(css_tg(css), task)) if (!sched_rt_can_attach(css_tg(css), task))
return -EINVAL; return -EINVAL;
...@@ -7153,7 +7153,7 @@ static void cpu_cgroup_attach(struct cgroup_subsys_state *css, ...@@ -7153,7 +7153,7 @@ static void cpu_cgroup_attach(struct cgroup_subsys_state *css,
{ {
struct task_struct *task; struct task_struct *task;
cgroup_taskset_for_each(task, css->cgroup, tset) cgroup_taskset_for_each(task, css, tset)
sched_move_task(task); sched_move_task(task);
} }
......
...@@ -224,7 +224,7 @@ static void net_prio_attach(struct cgroup_subsys_state *css, ...@@ -224,7 +224,7 @@ static void net_prio_attach(struct cgroup_subsys_state *css,
struct task_struct *p; struct task_struct *p;
void *v; void *v;
cgroup_taskset_for_each(p, css->cgroup, tset) { cgroup_taskset_for_each(p, css, tset) {
task_lock(p); task_lock(p);
v = (void *)(unsigned long)task_netprioidx(p); v = (void *)(unsigned long)task_netprioidx(p);
iterate_fd(p->files, 0, update_netprio, v); iterate_fd(p->files, 0, update_netprio, v);
......
...@@ -74,7 +74,7 @@ static void cgrp_attach(struct cgroup_subsys_state *css, ...@@ -74,7 +74,7 @@ static void cgrp_attach(struct cgroup_subsys_state *css,
struct task_struct *p; struct task_struct *p;
void *v; void *v;
cgroup_taskset_for_each(p, css->cgroup, tset) { cgroup_taskset_for_each(p, css, tset) {
task_lock(p); task_lock(p);
v = (void *)(unsigned long)task_cls_classid(p); v = (void *)(unsigned long)task_cls_classid(p);
iterate_fd(p->files, 0, update_classid, v); iterate_fd(p->files, 0, update_classid, v);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment