Commit 89c5509b authored by Tejun Heo's avatar Tejun Heo

cgroup: separate out put_css_set_locked() and remove put_css_set_taskexit()

put_css_set() is performed in two steps - it first tries to put
without grabbing css_set_rwsem if such put wouldn't make the count
zero.  If that fails, it puts after write-locking css_set_rwsem.  This
patch separates out the second phase into put_css_set_locked() which
should be called with css_set_rwsem locked.

Also, put_css_set_taskexit() is droped and put_css_set() is made to
take @taskexit.  There are only a handful users of these functions.
No point in providing different variants.

put_css_locked() will be used by later changes.  This patch doesn't
introduce any functional changes.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Acked-by: default avatarLi Zefan <lizefan@huawei.com>
parent 889ed9ce
...@@ -369,22 +369,14 @@ static unsigned long css_set_hash(struct cgroup_subsys_state *css[]) ...@@ -369,22 +369,14 @@ static unsigned long css_set_hash(struct cgroup_subsys_state *css[])
return key; return key;
} }
static void __put_css_set(struct css_set *cset, int taskexit) static void put_css_set_locked(struct css_set *cset, bool taskexit)
{ {
struct cgrp_cset_link *link, *tmp_link; struct cgrp_cset_link *link, *tmp_link;
/* lockdep_assert_held(&css_set_rwsem);
* Ensure that the refcount doesn't hit zero while any readers
* can see it. Similar to atomic_dec_and_lock(), but for an if (!atomic_dec_and_test(&cset->refcount))
* rwlock
*/
if (atomic_add_unless(&cset->refcount, -1, 1))
return;
down_write(&css_set_rwsem);
if (!atomic_dec_and_test(&cset->refcount)) {
up_write(&css_set_rwsem);
return; return;
}
/* This css_set is dead. unlink it and release cgroup refcounts */ /* This css_set is dead. unlink it and release cgroup refcounts */
hash_del(&cset->hlist); hash_del(&cset->hlist);
...@@ -406,10 +398,24 @@ static void __put_css_set(struct css_set *cset, int taskexit) ...@@ -406,10 +398,24 @@ static void __put_css_set(struct css_set *cset, int taskexit)
kfree(link); kfree(link);
} }
up_write(&css_set_rwsem);
kfree_rcu(cset, rcu_head); kfree_rcu(cset, rcu_head);
} }
static void put_css_set(struct css_set *cset, bool taskexit)
{
/*
* Ensure that the refcount doesn't hit zero while any readers
* can see it. Similar to atomic_dec_and_lock(), but for an
* rwlock
*/
if (atomic_add_unless(&cset->refcount, -1, 1))
return;
down_write(&css_set_rwsem);
put_css_set_locked(cset, taskexit);
up_write(&css_set_rwsem);
}
/* /*
* refcounted get/put for css_set objects * refcounted get/put for css_set objects
*/ */
...@@ -418,16 +424,6 @@ static inline void get_css_set(struct css_set *cset) ...@@ -418,16 +424,6 @@ static inline void get_css_set(struct css_set *cset)
atomic_inc(&cset->refcount); atomic_inc(&cset->refcount);
} }
static inline void put_css_set(struct css_set *cset)
{
__put_css_set(cset, 0);
}
static inline void put_css_set_taskexit(struct css_set *cset)
{
__put_css_set(cset, 1);
}
/** /**
* compare_css_sets - helper function for find_existing_css_set(). * compare_css_sets - helper function for find_existing_css_set().
* @cset: candidate css_set being tested * @cset: candidate css_set being tested
...@@ -1752,7 +1748,7 @@ static void cgroup_task_migrate(struct cgroup *old_cgrp, ...@@ -1752,7 +1748,7 @@ static void cgroup_task_migrate(struct cgroup *old_cgrp,
* we're safe to drop it here; it will be freed under RCU. * we're safe to drop it here; it will be freed under RCU.
*/ */
set_bit(CGRP_RELEASABLE, &old_cgrp->flags); set_bit(CGRP_RELEASABLE, &old_cgrp->flags);
put_css_set(old_cset); put_css_set(old_cset, false);
} }
/** /**
...@@ -1898,7 +1894,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk, ...@@ -1898,7 +1894,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
tc = flex_array_get(group, i); tc = flex_array_get(group, i);
if (!tc->cset) if (!tc->cset)
break; break;
put_css_set(tc->cset); put_css_set(tc->cset, false);
} }
} }
out_cancel_attach: out_cancel_attach:
...@@ -3715,7 +3711,7 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) ...@@ -3715,7 +3711,7 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
/* /*
* css_set_rwsem synchronizes access to ->cset_links and prevents * css_set_rwsem synchronizes access to ->cset_links and prevents
* @cgrp from being removed while __put_css_set() is in progress. * @cgrp from being removed while put_css_set() is in progress.
*/ */
down_read(&css_set_rwsem); down_read(&css_set_rwsem);
empty = list_empty(&cgrp->cset_links); empty = list_empty(&cgrp->cset_links);
...@@ -4267,7 +4263,7 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks) ...@@ -4267,7 +4263,7 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks)
} }
task_unlock(tsk); task_unlock(tsk);
put_css_set_taskexit(cset); put_css_set(cset, true);
} }
static void check_for_release(struct cgroup *cgrp) static void check_for_release(struct cgroup *cgrp)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment