Commit 1c6727af authored by Tejun Heo's avatar Tejun Heo

cgroup: implement for_each_css()

There are enough places where css's of a cgroup are iterated, which
currently uses for_each_root_subsys() + explicit cgroup_css().  This
patch implements for_each_css() and replaces the above combination
with it.

This patch doesn't introduce any behavior changes.

v2: Updated to apply cleanly on top of v2 of "cgroup: fix css leaks on
    online_css() failure"
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Acked-by: default avatarLi Zefan <lizefan@huawei.com>
parent c81c925a
...@@ -242,6 +242,21 @@ static int notify_on_release(const struct cgroup *cgrp) ...@@ -242,6 +242,21 @@ static int notify_on_release(const struct cgroup *cgrp)
return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags); return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
} }
/**
* for_each_css - iterate all css's of a cgroup
* @css: the iteration cursor
* @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end
* @cgrp: the target cgroup to iterate css's of
*
* Should be called under cgroup_mutex.
*/
#define for_each_css(css, ssid, cgrp) \
for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \
if (!((css) = rcu_dereference_check( \
(cgrp)->subsys[(ssid)], \
lockdep_is_held(&cgroup_mutex)))) { } \
else
/** /**
* for_each_subsys - iterate all loaded cgroup subsystems * for_each_subsys - iterate all loaded cgroup subsystems
* @ss: the iteration cursor * @ss: the iteration cursor
...@@ -1942,8 +1957,8 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk, ...@@ -1942,8 +1957,8 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
bool threadgroup) bool threadgroup)
{ {
int retval, i, group_size; int retval, i, group_size;
struct cgroup_subsys *ss, *failed_ss = NULL;
struct cgroupfs_root *root = cgrp->root; struct cgroupfs_root *root = cgrp->root;
struct cgroup_subsys_state *css, *failed_css = NULL;
/* threadgroup list cursor and array */ /* threadgroup list cursor and array */
struct task_struct *leader = tsk; struct task_struct *leader = tsk;
struct task_and_cgroup *tc; struct task_and_cgroup *tc;
...@@ -2016,13 +2031,11 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk, ...@@ -2016,13 +2031,11 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
/* /*
* step 1: check that we can legitimately attach to the cgroup. * step 1: check that we can legitimately attach to the cgroup.
*/ */
for_each_root_subsys(root, ss) { for_each_css(css, i, cgrp) {
struct cgroup_subsys_state *css = cgroup_css(cgrp, ss); if (css->ss->can_attach) {
retval = css->ss->can_attach(css, &tset);
if (ss->can_attach) {
retval = ss->can_attach(css, &tset);
if (retval) { if (retval) {
failed_ss = ss; failed_css = css;
goto out_cancel_attach; goto out_cancel_attach;
} }
} }
...@@ -2058,12 +2071,9 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk, ...@@ -2058,12 +2071,9 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
/* /*
* step 4: do subsystem attach callbacks. * step 4: do subsystem attach callbacks.
*/ */
for_each_root_subsys(root, ss) { for_each_css(css, i, cgrp)
struct cgroup_subsys_state *css = cgroup_css(cgrp, ss); if (css->ss->attach)
css->ss->attach(css, &tset);
if (ss->attach)
ss->attach(css, &tset);
}
/* /*
* step 5: success! and cleanup * step 5: success! and cleanup
...@@ -2080,13 +2090,11 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk, ...@@ -2080,13 +2090,11 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
} }
out_cancel_attach: out_cancel_attach:
if (retval) { if (retval) {
for_each_root_subsys(root, ss) { for_each_css(css, i, cgrp) {
struct cgroup_subsys_state *css = cgroup_css(cgrp, ss); if (css == failed_css)
if (ss == failed_ss)
break; break;
if (ss->cancel_attach) if (css->ss->cancel_attach)
ss->cancel_attach(css, &tset); css->ss->cancel_attach(css, &tset);
} }
} }
out_free_group_list: out_free_group_list:
...@@ -4375,9 +4383,10 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) ...@@ -4375,9 +4383,10 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
__releases(&cgroup_mutex) __acquires(&cgroup_mutex) __releases(&cgroup_mutex) __acquires(&cgroup_mutex)
{ {
struct dentry *d = cgrp->dentry; struct dentry *d = cgrp->dentry;
struct cgroup_subsys *ss; struct cgroup_subsys_state *css;
struct cgroup *child; struct cgroup *child;
bool empty; bool empty;
int ssid;
lockdep_assert_held(&d->d_inode->i_mutex); lockdep_assert_held(&d->d_inode->i_mutex);
lockdep_assert_held(&cgroup_mutex); lockdep_assert_held(&cgroup_mutex);
...@@ -4413,12 +4422,8 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) ...@@ -4413,12 +4422,8 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
* will be invoked to perform the rest of destruction once the * will be invoked to perform the rest of destruction once the
* percpu refs of all css's are confirmed to be killed. * percpu refs of all css's are confirmed to be killed.
*/ */
for_each_root_subsys(cgrp->root, ss) { for_each_css(css, ssid, cgrp)
struct cgroup_subsys_state *css = cgroup_css(cgrp, ss); kill_css(css);
if (css)
kill_css(css);
}
/* /*
* Mark @cgrp dead. This prevents further task migration and child * Mark @cgrp dead. This prevents further task migration and child
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment