Commit d5c419b6 authored by Tejun Heo's avatar Tejun Heo

cgroup: move cgroup->sibling and ->children into cgroup_subsys_state

We're moving towards using cgroup_subsys_states as the fundamental
structural blocks.  Let's move cgroup->sibling and ->children into
cgroup_subsys_state.  This is pure move without functional change and
only cgroup->self's fields are actually used.  Other csses will make
use of the fields later.

While at it, update init_and_link_css() so that it zeroes the whole
css before initializing it and remove explicit zeroing of ->flags.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Acked-by: default avatarLi Zefan <lizefan@huawei.com>
parent d51f39b0
...@@ -68,6 +68,10 @@ struct cgroup_subsys_state { ...@@ -68,6 +68,10 @@ struct cgroup_subsys_state {
/* PI: the parent css */ /* PI: the parent css */
struct cgroup_subsys_state *parent; struct cgroup_subsys_state *parent;
/* siblings list anchored at the parent's ->children */
struct list_head sibling;
struct list_head children;
/* /*
* PI: Subsys-unique ID. 0 is unused and root is always 1. The * PI: Subsys-unique ID. 0 is unused and root is always 1. The
* matching css can be looked up using css_from_id(). * matching css can be looked up using css_from_id().
...@@ -171,13 +175,6 @@ struct cgroup { ...@@ -171,13 +175,6 @@ struct cgroup {
*/ */
int populated_cnt; int populated_cnt;
/*
* We link our 'sibling' struct into our parent's 'children'.
* Our children link their 'sibling' into our 'children'.
*/
struct list_head sibling; /* my parent's children */
struct list_head children; /* my children */
struct kernfs_node *kn; /* cgroup kernfs entry */ struct kernfs_node *kn; /* cgroup kernfs entry */
struct kernfs_node *populated_kn; /* kn for "cgroup.subtree_populated" */ struct kernfs_node *populated_kn; /* kn for "cgroup.subtree_populated" */
......
...@@ -378,7 +378,7 @@ static int notify_on_release(const struct cgroup *cgrp) ...@@ -378,7 +378,7 @@ static int notify_on_release(const struct cgroup *cgrp)
/* iterate over child cgrps, lock should be held throughout iteration */ /* iterate over child cgrps, lock should be held throughout iteration */
#define cgroup_for_each_live_child(child, cgrp) \ #define cgroup_for_each_live_child(child, cgrp) \
list_for_each_entry((child), &(cgrp)->children, sibling) \ list_for_each_entry((child), &(cgrp)->self.children, self.sibling) \
if (({ lockdep_assert_held(&cgroup_mutex); \ if (({ lockdep_assert_held(&cgroup_mutex); \
cgroup_is_dead(child); })) \ cgroup_is_dead(child); })) \
; \ ; \
...@@ -870,7 +870,7 @@ static void cgroup_destroy_root(struct cgroup_root *root) ...@@ -870,7 +870,7 @@ static void cgroup_destroy_root(struct cgroup_root *root)
mutex_lock(&cgroup_mutex); mutex_lock(&cgroup_mutex);
BUG_ON(atomic_read(&root->nr_cgrps)); BUG_ON(atomic_read(&root->nr_cgrps));
BUG_ON(!list_empty(&cgrp->children)); BUG_ON(!list_empty(&cgrp->self.children));
/* Rebind all subsystems back to the default hierarchy */ /* Rebind all subsystems back to the default hierarchy */
rebind_subsystems(&cgrp_dfl_root, root->subsys_mask); rebind_subsystems(&cgrp_dfl_root, root->subsys_mask);
...@@ -1432,7 +1432,7 @@ static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data) ...@@ -1432,7 +1432,7 @@ static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data)
} }
/* remounting is not allowed for populated hierarchies */ /* remounting is not allowed for populated hierarchies */
if (!list_empty(&root->cgrp.children)) { if (!list_empty(&root->cgrp.self.children)) {
ret = -EBUSY; ret = -EBUSY;
goto out_unlock; goto out_unlock;
} }
...@@ -1512,8 +1512,8 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp) ...@@ -1512,8 +1512,8 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp)
struct cgroup_subsys *ss; struct cgroup_subsys *ss;
int ssid; int ssid;
INIT_LIST_HEAD(&cgrp->sibling); INIT_LIST_HEAD(&cgrp->self.sibling);
INIT_LIST_HEAD(&cgrp->children); INIT_LIST_HEAD(&cgrp->self.children);
INIT_LIST_HEAD(&cgrp->cset_links); INIT_LIST_HEAD(&cgrp->cset_links);
INIT_LIST_HEAD(&cgrp->release_list); INIT_LIST_HEAD(&cgrp->release_list);
INIT_LIST_HEAD(&cgrp->pidlists); INIT_LIST_HEAD(&cgrp->pidlists);
...@@ -1612,7 +1612,7 @@ static int cgroup_setup_root(struct cgroup_root *root, unsigned int ss_mask) ...@@ -1612,7 +1612,7 @@ static int cgroup_setup_root(struct cgroup_root *root, unsigned int ss_mask)
link_css_set(&tmp_links, cset, root_cgrp); link_css_set(&tmp_links, cset, root_cgrp);
up_write(&css_set_rwsem); up_write(&css_set_rwsem);
BUG_ON(!list_empty(&root_cgrp->children)); BUG_ON(!list_empty(&root_cgrp->self.children));
BUG_ON(atomic_read(&root->nr_cgrps) != 1); BUG_ON(atomic_read(&root->nr_cgrps) != 1);
kernfs_activate(root_cgrp->kn); kernfs_activate(root_cgrp->kn);
...@@ -3128,11 +3128,11 @@ css_next_child(struct cgroup_subsys_state *pos_css, ...@@ -3128,11 +3128,11 @@ css_next_child(struct cgroup_subsys_state *pos_css,
* cgroup is removed or iteration and removal race. * cgroup is removed or iteration and removal race.
*/ */
if (!pos) { if (!pos) {
next = list_entry_rcu(cgrp->children.next, struct cgroup, sibling); next = list_entry_rcu(cgrp->self.children.next, struct cgroup, self.sibling);
} else if (likely(!cgroup_is_dead(pos))) { } else if (likely(!cgroup_is_dead(pos))) {
next = list_entry_rcu(pos->sibling.next, struct cgroup, sibling); next = list_entry_rcu(pos->self.sibling.next, struct cgroup, self.sibling);
} else { } else {
list_for_each_entry_rcu(next, &cgrp->children, sibling) list_for_each_entry_rcu(next, &cgrp->self.children, self.sibling)
if (next->serial_nr > pos->serial_nr) if (next->serial_nr > pos->serial_nr)
break; break;
} }
...@@ -3142,12 +3142,12 @@ css_next_child(struct cgroup_subsys_state *pos_css, ...@@ -3142,12 +3142,12 @@ css_next_child(struct cgroup_subsys_state *pos_css,
* the next sibling; however, it might have @ss disabled. If so, * the next sibling; however, it might have @ss disabled. If so,
* fast-forward to the next enabled one. * fast-forward to the next enabled one.
*/ */
while (&next->sibling != &cgrp->children) { while (&next->self.sibling != &cgrp->self.children) {
struct cgroup_subsys_state *next_css = cgroup_css(next, parent_css->ss); struct cgroup_subsys_state *next_css = cgroup_css(next, parent_css->ss);
if (next_css) if (next_css)
return next_css; return next_css;
next = list_entry_rcu(next->sibling.next, struct cgroup, sibling); next = list_entry_rcu(next->self.sibling.next, struct cgroup, self.sibling);
} }
return NULL; return NULL;
} }
...@@ -3283,7 +3283,7 @@ static bool cgroup_has_live_children(struct cgroup *cgrp) ...@@ -3283,7 +3283,7 @@ static bool cgroup_has_live_children(struct cgroup *cgrp)
struct cgroup *child; struct cgroup *child;
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(child, &cgrp->children, sibling) { list_for_each_entry_rcu(child, &cgrp->self.children, self.sibling) {
if (!cgroup_is_dead(child)) { if (!cgroup_is_dead(child)) {
rcu_read_unlock(); rcu_read_unlock();
return true; return true;
...@@ -4144,7 +4144,7 @@ static void css_release_work_fn(struct work_struct *work) ...@@ -4144,7 +4144,7 @@ static void css_release_work_fn(struct work_struct *work)
} else { } else {
/* cgroup release path */ /* cgroup release path */
mutex_lock(&cgroup_mutex); mutex_lock(&cgroup_mutex);
list_del_rcu(&cgrp->sibling); list_del_rcu(&cgrp->self.sibling);
mutex_unlock(&cgroup_mutex); mutex_unlock(&cgroup_mutex);
cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id); cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
...@@ -4168,9 +4168,11 @@ static void init_and_link_css(struct cgroup_subsys_state *css, ...@@ -4168,9 +4168,11 @@ static void init_and_link_css(struct cgroup_subsys_state *css,
{ {
cgroup_get(cgrp); cgroup_get(cgrp);
memset(css, 0, sizeof(*css));
css->cgroup = cgrp; css->cgroup = cgrp;
css->ss = ss; css->ss = ss;
css->flags = 0; INIT_LIST_HEAD(&css->sibling);
INIT_LIST_HEAD(&css->children);
if (cgroup_parent(cgrp)) { if (cgroup_parent(cgrp)) {
css->parent = cgroup_css(cgroup_parent(cgrp), ss); css->parent = cgroup_css(cgroup_parent(cgrp), ss);
...@@ -4344,7 +4346,7 @@ static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, ...@@ -4344,7 +4346,7 @@ static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
cgrp->serial_nr = cgroup_serial_nr_next++; cgrp->serial_nr = cgroup_serial_nr_next++;
/* allocation complete, commit to creation */ /* allocation complete, commit to creation */
list_add_tail_rcu(&cgrp->sibling, &cgroup_parent(cgrp)->children); list_add_tail_rcu(&cgrp->self.sibling, &cgroup_parent(cgrp)->self.children);
atomic_inc(&root->nr_cgrps); atomic_inc(&root->nr_cgrps);
cgroup_get(parent); cgroup_get(parent);
...@@ -4507,9 +4509,9 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) ...@@ -4507,9 +4509,9 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
return -EBUSY; return -EBUSY;
/* /*
* Make sure there's no live children. We can't test ->children * Make sure there's no live children. We can't test emptiness of
* emptiness as dead children linger on it while being destroyed; * ->self.children as dead children linger on it while being
* otherwise, "rmdir parent/child parent" may fail with -EBUSY. * drained; otherwise, "rmdir parent/child parent" may fail.
*/ */
if (cgroup_has_live_children(cgrp)) if (cgroup_has_live_children(cgrp))
return -EBUSY; return -EBUSY;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment