Commit bcee19f4 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-4.3-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup

Pull cgroup fixes from Tejun Heo:
 "The threadgroup locking changes which went in during 4.2 devel cycle
  added write locking of a percpu_rwsem in cgroup task migration path;
  unfortunately, that involved expedited rcu syncing which turned out to
  be too slow and heavy for certain workloads.  The patchset which is
  dependent on this one didn't get committed during that devel cycle, so
  these two patches can be reverted safely.

  Oleg reworked percpu_rwsem for 4.4 so that the writer path is a lot
  lighter.  The reported issue goes away with Oleg's reworked
  percpu_rwsem and I'll reapply these patches on the for-4.4 branch so
  that they can land together with Oleg's changes"

* 'for-4.3-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup:
  Revert "sched, cgroup: replace signal_struct->group_rwsem with a global percpu_rwsem"
  Revert "cgroup: simplify threadgroup locking"
parents ac2fc4b9 0c986253
...@@ -473,31 +473,8 @@ struct cgroup_subsys { ...@@ -473,31 +473,8 @@ struct cgroup_subsys {
unsigned int depends_on; unsigned int depends_on;
}; };
extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem; void cgroup_threadgroup_change_begin(struct task_struct *tsk);
void cgroup_threadgroup_change_end(struct task_struct *tsk);
/**
* cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups
* @tsk: target task
*
* Called from threadgroup_change_begin() and allows cgroup operations to
* synchronize against threadgroup changes using a percpu_rw_semaphore.
*/
static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
{
percpu_down_read(&cgroup_threadgroup_rwsem);
}
/**
* cgroup_threadgroup_change_end - threadgroup exclusion for cgroups
* @tsk: target task
*
* Called from threadgroup_change_end(). Counterpart of
* cgroup_threadcgroup_change_begin().
*/
static inline void cgroup_threadgroup_change_end(struct task_struct *tsk)
{
percpu_up_read(&cgroup_threadgroup_rwsem);
}
#else /* CONFIG_CGROUPS */ #else /* CONFIG_CGROUPS */
......
...@@ -25,6 +25,13 @@ ...@@ -25,6 +25,13 @@
extern struct files_struct init_files; extern struct files_struct init_files;
extern struct fs_struct init_fs; extern struct fs_struct init_fs;
#ifdef CONFIG_CGROUPS
#define INIT_GROUP_RWSEM(sig) \
.group_rwsem = __RWSEM_INITIALIZER(sig.group_rwsem),
#else
#define INIT_GROUP_RWSEM(sig)
#endif
#ifdef CONFIG_CPUSETS #ifdef CONFIG_CPUSETS
#define INIT_CPUSET_SEQ(tsk) \ #define INIT_CPUSET_SEQ(tsk) \
.mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq), .mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq),
...@@ -57,6 +64,7 @@ extern struct fs_struct init_fs; ...@@ -57,6 +64,7 @@ extern struct fs_struct init_fs;
INIT_PREV_CPUTIME(sig) \ INIT_PREV_CPUTIME(sig) \
.cred_guard_mutex = \ .cred_guard_mutex = \
__MUTEX_INITIALIZER(sig.cred_guard_mutex), \ __MUTEX_INITIALIZER(sig.cred_guard_mutex), \
INIT_GROUP_RWSEM(sig) \
} }
extern struct nsproxy init_nsproxy; extern struct nsproxy init_nsproxy;
......
...@@ -762,6 +762,18 @@ struct signal_struct { ...@@ -762,6 +762,18 @@ struct signal_struct {
unsigned audit_tty_log_passwd; unsigned audit_tty_log_passwd;
struct tty_audit_buf *tty_audit_buf; struct tty_audit_buf *tty_audit_buf;
#endif #endif
#ifdef CONFIG_CGROUPS
/*
* group_rwsem prevents new tasks from entering the threadgroup and
* member tasks from exiting,a more specifically, setting of
* PF_EXITING. fork and exit paths are protected with this rwsem
* using threadgroup_change_begin/end(). Users which require
* threadgroup to remain stable should use threadgroup_[un]lock()
* which also takes care of exec path. Currently, cgroup is the
* only user.
*/
struct rw_semaphore group_rwsem;
#endif
oom_flags_t oom_flags; oom_flags_t oom_flags;
short oom_score_adj; /* OOM kill score adjustment */ short oom_score_adj; /* OOM kill score adjustment */
......
...@@ -46,7 +46,6 @@ ...@@ -46,7 +46,6 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/rwsem.h> #include <linux/rwsem.h>
#include <linux/percpu-rwsem.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/sort.h> #include <linux/sort.h>
#include <linux/kmod.h> #include <linux/kmod.h>
...@@ -104,8 +103,6 @@ static DEFINE_SPINLOCK(cgroup_idr_lock); ...@@ -104,8 +103,6 @@ static DEFINE_SPINLOCK(cgroup_idr_lock);
*/ */
static DEFINE_SPINLOCK(release_agent_path_lock); static DEFINE_SPINLOCK(release_agent_path_lock);
struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
#define cgroup_assert_mutex_or_rcu_locked() \ #define cgroup_assert_mutex_or_rcu_locked() \
RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
!lockdep_is_held(&cgroup_mutex), \ !lockdep_is_held(&cgroup_mutex), \
...@@ -874,6 +871,48 @@ static struct css_set *find_css_set(struct css_set *old_cset, ...@@ -874,6 +871,48 @@ static struct css_set *find_css_set(struct css_set *old_cset,
return cset; return cset;
} }
void cgroup_threadgroup_change_begin(struct task_struct *tsk)
{
down_read(&tsk->signal->group_rwsem);
}
void cgroup_threadgroup_change_end(struct task_struct *tsk)
{
up_read(&tsk->signal->group_rwsem);
}
/**
* threadgroup_lock - lock threadgroup
* @tsk: member task of the threadgroup to lock
*
* Lock the threadgroup @tsk belongs to. No new task is allowed to enter
* and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
* change ->group_leader/pid. This is useful for cases where the threadgroup
* needs to stay stable across blockable operations.
*
* fork and exit explicitly call threadgroup_change_{begin|end}() for
* synchronization. While held, no new task will be added to threadgroup
* and no existing live task will have its PF_EXITING set.
*
* de_thread() does threadgroup_change_{begin|end}() when a non-leader
* sub-thread becomes a new leader.
*/
static void threadgroup_lock(struct task_struct *tsk)
{
down_write(&tsk->signal->group_rwsem);
}
/**
* threadgroup_unlock - unlock threadgroup
* @tsk: member task of the threadgroup to unlock
*
* Reverse threadgroup_lock().
*/
static inline void threadgroup_unlock(struct task_struct *tsk)
{
up_write(&tsk->signal->group_rwsem);
}
static struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root) static struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root)
{ {
struct cgroup *root_cgrp = kf_root->kn->priv; struct cgroup *root_cgrp = kf_root->kn->priv;
...@@ -2074,9 +2113,9 @@ static void cgroup_task_migrate(struct cgroup *old_cgrp, ...@@ -2074,9 +2113,9 @@ static void cgroup_task_migrate(struct cgroup *old_cgrp,
lockdep_assert_held(&css_set_rwsem); lockdep_assert_held(&css_set_rwsem);
/* /*
* We are synchronized through cgroup_threadgroup_rwsem against * We are synchronized through threadgroup_lock() against PF_EXITING
* PF_EXITING setting such that we can't race against cgroup_exit() * setting such that we can't race against cgroup_exit() changing the
* changing the css_set to init_css_set and dropping the old one. * css_set to init_css_set and dropping the old one.
*/ */
WARN_ON_ONCE(tsk->flags & PF_EXITING); WARN_ON_ONCE(tsk->flags & PF_EXITING);
old_cset = task_css_set(tsk); old_cset = task_css_set(tsk);
...@@ -2133,11 +2172,10 @@ static void cgroup_migrate_finish(struct list_head *preloaded_csets) ...@@ -2133,11 +2172,10 @@ static void cgroup_migrate_finish(struct list_head *preloaded_csets)
* @src_cset and add it to @preloaded_csets, which should later be cleaned * @src_cset and add it to @preloaded_csets, which should later be cleaned
* up by cgroup_migrate_finish(). * up by cgroup_migrate_finish().
* *
* This function may be called without holding cgroup_threadgroup_rwsem * This function may be called without holding threadgroup_lock even if the
* even if the target is a process. Threads may be created and destroyed * target is a process. Threads may be created and destroyed but as long
* but as long as cgroup_mutex is not dropped, no new css_set can be put * as cgroup_mutex is not dropped, no new css_set can be put into play and
* into play and the preloaded css_sets are guaranteed to cover all * the preloaded css_sets are guaranteed to cover all migrations.
* migrations.
*/ */
static void cgroup_migrate_add_src(struct css_set *src_cset, static void cgroup_migrate_add_src(struct css_set *src_cset,
struct cgroup *dst_cgrp, struct cgroup *dst_cgrp,
...@@ -2240,7 +2278,7 @@ static int cgroup_migrate_prepare_dst(struct cgroup *dst_cgrp, ...@@ -2240,7 +2278,7 @@ static int cgroup_migrate_prepare_dst(struct cgroup *dst_cgrp,
* @threadgroup: whether @leader points to the whole process or a single task * @threadgroup: whether @leader points to the whole process or a single task
* *
* Migrate a process or task denoted by @leader to @cgrp. If migrating a * Migrate a process or task denoted by @leader to @cgrp. If migrating a
* process, the caller must be holding cgroup_threadgroup_rwsem. The * process, the caller must be holding threadgroup_lock of @leader. The
* caller is also responsible for invoking cgroup_migrate_add_src() and * caller is also responsible for invoking cgroup_migrate_add_src() and
* cgroup_migrate_prepare_dst() on the targets before invoking this * cgroup_migrate_prepare_dst() on the targets before invoking this
* function and following up with cgroup_migrate_finish(). * function and following up with cgroup_migrate_finish().
...@@ -2368,7 +2406,7 @@ static int cgroup_migrate(struct cgroup *cgrp, struct task_struct *leader, ...@@ -2368,7 +2406,7 @@ static int cgroup_migrate(struct cgroup *cgrp, struct task_struct *leader,
* @leader: the task or the leader of the threadgroup to be attached * @leader: the task or the leader of the threadgroup to be attached
* @threadgroup: attach the whole threadgroup? * @threadgroup: attach the whole threadgroup?
* *
* Call holding cgroup_mutex and cgroup_threadgroup_rwsem. * Call holding cgroup_mutex and threadgroup_lock of @leader.
*/ */
static int cgroup_attach_task(struct cgroup *dst_cgrp, static int cgroup_attach_task(struct cgroup *dst_cgrp,
struct task_struct *leader, bool threadgroup) struct task_struct *leader, bool threadgroup)
...@@ -2460,13 +2498,14 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf, ...@@ -2460,13 +2498,14 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
if (!cgrp) if (!cgrp)
return -ENODEV; return -ENODEV;
percpu_down_write(&cgroup_threadgroup_rwsem); retry_find_task:
rcu_read_lock(); rcu_read_lock();
if (pid) { if (pid) {
tsk = find_task_by_vpid(pid); tsk = find_task_by_vpid(pid);
if (!tsk) { if (!tsk) {
rcu_read_unlock();
ret = -ESRCH; ret = -ESRCH;
goto out_unlock_rcu; goto out_unlock_cgroup;
} }
} else { } else {
tsk = current; tsk = current;
...@@ -2482,23 +2521,37 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf, ...@@ -2482,23 +2521,37 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
*/ */
if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) { if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) {
ret = -EINVAL; ret = -EINVAL;
goto out_unlock_rcu; rcu_read_unlock();
goto out_unlock_cgroup;
} }
get_task_struct(tsk); get_task_struct(tsk);
rcu_read_unlock(); rcu_read_unlock();
threadgroup_lock(tsk);
if (threadgroup) {
if (!thread_group_leader(tsk)) {
/*
* a race with de_thread from another thread's exec()
* may strip us of our leadership, if this happens,
* there is no choice but to throw this task away and
* try again; this is
* "double-double-toil-and-trouble-check locking".
*/
threadgroup_unlock(tsk);
put_task_struct(tsk);
goto retry_find_task;
}
}
ret = cgroup_procs_write_permission(tsk, cgrp, of); ret = cgroup_procs_write_permission(tsk, cgrp, of);
if (!ret) if (!ret)
ret = cgroup_attach_task(cgrp, tsk, threadgroup); ret = cgroup_attach_task(cgrp, tsk, threadgroup);
put_task_struct(tsk); threadgroup_unlock(tsk);
goto out_unlock_threadgroup;
out_unlock_rcu: put_task_struct(tsk);
rcu_read_unlock(); out_unlock_cgroup:
out_unlock_threadgroup:
percpu_up_write(&cgroup_threadgroup_rwsem);
cgroup_kn_unlock(of->kn); cgroup_kn_unlock(of->kn);
return ret ?: nbytes; return ret ?: nbytes;
} }
...@@ -2643,8 +2696,6 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp) ...@@ -2643,8 +2696,6 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
lockdep_assert_held(&cgroup_mutex); lockdep_assert_held(&cgroup_mutex);
percpu_down_write(&cgroup_threadgroup_rwsem);
/* look up all csses currently attached to @cgrp's subtree */ /* look up all csses currently attached to @cgrp's subtree */
down_read(&css_set_rwsem); down_read(&css_set_rwsem);
css_for_each_descendant_pre(css, cgroup_css(cgrp, NULL)) { css_for_each_descendant_pre(css, cgroup_css(cgrp, NULL)) {
...@@ -2700,8 +2751,17 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp) ...@@ -2700,8 +2751,17 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
goto out_finish; goto out_finish;
last_task = task; last_task = task;
threadgroup_lock(task);
/* raced against de_thread() from another thread? */
if (!thread_group_leader(task)) {
threadgroup_unlock(task);
put_task_struct(task);
continue;
}
ret = cgroup_migrate(src_cset->dfl_cgrp, task, true); ret = cgroup_migrate(src_cset->dfl_cgrp, task, true);
threadgroup_unlock(task);
put_task_struct(task); put_task_struct(task);
if (WARN(ret, "cgroup: failed to update controllers for the default hierarchy (%d), further operations may crash or hang\n", ret)) if (WARN(ret, "cgroup: failed to update controllers for the default hierarchy (%d), further operations may crash or hang\n", ret))
...@@ -2711,7 +2771,6 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp) ...@@ -2711,7 +2771,6 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
out_finish: out_finish:
cgroup_migrate_finish(&preloaded_csets); cgroup_migrate_finish(&preloaded_csets);
percpu_up_write(&cgroup_threadgroup_rwsem);
return ret; return ret;
} }
...@@ -5024,7 +5083,6 @@ int __init cgroup_init(void) ...@@ -5024,7 +5083,6 @@ int __init cgroup_init(void)
unsigned long key; unsigned long key;
int ssid, err; int ssid, err;
BUG_ON(percpu_init_rwsem(&cgroup_threadgroup_rwsem));
BUG_ON(cgroup_init_cftypes(NULL, cgroup_dfl_base_files)); BUG_ON(cgroup_init_cftypes(NULL, cgroup_dfl_base_files));
BUG_ON(cgroup_init_cftypes(NULL, cgroup_legacy_base_files)); BUG_ON(cgroup_init_cftypes(NULL, cgroup_legacy_base_files));
......
...@@ -1149,6 +1149,10 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) ...@@ -1149,6 +1149,10 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
tty_audit_fork(sig); tty_audit_fork(sig);
sched_autogroup_fork(sig); sched_autogroup_fork(sig);
#ifdef CONFIG_CGROUPS
init_rwsem(&sig->group_rwsem);
#endif
sig->oom_score_adj = current->signal->oom_score_adj; sig->oom_score_adj = current->signal->oom_score_adj;
sig->oom_score_adj_min = current->signal->oom_score_adj_min; sig->oom_score_adj_min = current->signal->oom_score_adj_min;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment