Commit 5258f386 authored by Mike Galbraith's avatar Mike Galbraith Committed by Ingo Molnar

sched/autogroup: Fix crash on reboot when autogroup is disabled

Due to these two commits:

  8323f26c sched: Fix race in task_group()
  800d4d30 sched, autogroup: Stop going ahead if autogroup is disabled

... autogroup scheduling's dynamic knobs are wrecked.

With both patches applied, all you have to do to crash a box is
disable autogroup during boot up, then reboot.. boom, NULL pointer
dereference due to 800d4d30 not allowing autogroup to move things,
and 8323f26c making that the only way to switch runqueues.

Remove most of the (dysfunctional) knobs and turn the remaining
sched_autogroup_enabled knob readonly.

If the user fiddles with cgroups hereafter, once tasks
are moved, autogroup won't mess with them again unless
they call setsid().

No knobs, no glitz, nada, just a cute little thing folks can
turn on if they don't want to muck about with cgroups and/or
systemd.
Signed-off-by: default avatarMike Galbraith <efault@gmx.de>
Cc: Xiaotian Feng <xtfeng@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Xiaotian Feng <dannyfeng@tencent.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: <stable@vger.kernel.org> # v3.6
Link: http://lkml.kernel.org/r/1351451963.4999.8.camel@maggy.simpson.netSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 8ed92e51
...@@ -1271,81 +1271,6 @@ static const struct file_operations proc_pid_sched_operations = { ...@@ -1271,81 +1271,6 @@ static const struct file_operations proc_pid_sched_operations = {
#endif #endif
#ifdef CONFIG_SCHED_AUTOGROUP
/*
* Print out autogroup related information:
*/
static int sched_autogroup_show(struct seq_file *m, void *v)
{
struct inode *inode = m->private;
struct task_struct *p;
p = get_proc_task(inode);
if (!p)
return -ESRCH;
proc_sched_autogroup_show_task(p, m);
put_task_struct(p);
return 0;
}
static ssize_t
sched_autogroup_write(struct file *file, const char __user *buf,
size_t count, loff_t *offset)
{
struct inode *inode = file->f_path.dentry->d_inode;
struct task_struct *p;
char buffer[PROC_NUMBUF];
int nice;
int err;
memset(buffer, 0, sizeof(buffer));
if (count > sizeof(buffer) - 1)
count = sizeof(buffer) - 1;
if (copy_from_user(buffer, buf, count))
return -EFAULT;
err = kstrtoint(strstrip(buffer), 0, &nice);
if (err < 0)
return err;
p = get_proc_task(inode);
if (!p)
return -ESRCH;
err = proc_sched_autogroup_set_nice(p, nice);
if (err)
count = err;
put_task_struct(p);
return count;
}
static int sched_autogroup_open(struct inode *inode, struct file *filp)
{
int ret;
ret = single_open(filp, sched_autogroup_show, NULL);
if (!ret) {
struct seq_file *m = filp->private_data;
m->private = inode;
}
return ret;
}
static const struct file_operations proc_pid_sched_autogroup_operations = {
.open = sched_autogroup_open,
.read = seq_read,
.write = sched_autogroup_write,
.llseek = seq_lseek,
.release = single_release,
};
#endif /* CONFIG_SCHED_AUTOGROUP */
static ssize_t comm_write(struct file *file, const char __user *buf, static ssize_t comm_write(struct file *file, const char __user *buf,
size_t count, loff_t *offset) size_t count, loff_t *offset)
{ {
...@@ -3035,9 +2960,6 @@ static const struct pid_entry tgid_base_stuff[] = { ...@@ -3035,9 +2960,6 @@ static const struct pid_entry tgid_base_stuff[] = {
INF("limits", S_IRUGO, proc_pid_limits), INF("limits", S_IRUGO, proc_pid_limits),
#ifdef CONFIG_SCHED_DEBUG #ifdef CONFIG_SCHED_DEBUG
REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
#endif
#ifdef CONFIG_SCHED_AUTOGROUP
REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
#endif #endif
REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations), REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
#ifdef CONFIG_HAVE_ARCH_TRACEHOOK #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
......
...@@ -110,6 +110,9 @@ static inline struct autogroup *autogroup_create(void) ...@@ -110,6 +110,9 @@ static inline struct autogroup *autogroup_create(void)
bool task_wants_autogroup(struct task_struct *p, struct task_group *tg) bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
{ {
if (!sysctl_sched_autogroup_enabled)
return false;
if (tg != &root_task_group) if (tg != &root_task_group)
return false; return false;
...@@ -143,15 +146,11 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag) ...@@ -143,15 +146,11 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag)
p->signal->autogroup = autogroup_kref_get(ag); p->signal->autogroup = autogroup_kref_get(ag);
if (!ACCESS_ONCE(sysctl_sched_autogroup_enabled))
goto out;
t = p; t = p;
do { do {
sched_move_task(t); sched_move_task(t);
} while_each_thread(p, t); } while_each_thread(p, t);
out:
unlock_task_sighand(p, &flags); unlock_task_sighand(p, &flags);
autogroup_kref_put(prev); autogroup_kref_put(prev);
} }
...@@ -159,8 +158,11 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag) ...@@ -159,8 +158,11 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag)
/* Allocates GFP_KERNEL, cannot be called under any spinlock */ /* Allocates GFP_KERNEL, cannot be called under any spinlock */
void sched_autogroup_create_attach(struct task_struct *p) void sched_autogroup_create_attach(struct task_struct *p)
{ {
struct autogroup *ag = autogroup_create(); struct autogroup *ag;
if (!sysctl_sched_autogroup_enabled)
return;
ag = autogroup_create();
autogroup_move_group(p, ag); autogroup_move_group(p, ag);
/* drop extra reference added by autogroup_create() */ /* drop extra reference added by autogroup_create() */
autogroup_kref_put(ag); autogroup_kref_put(ag);
...@@ -176,11 +178,15 @@ EXPORT_SYMBOL(sched_autogroup_detach); ...@@ -176,11 +178,15 @@ EXPORT_SYMBOL(sched_autogroup_detach);
void sched_autogroup_fork(struct signal_struct *sig) void sched_autogroup_fork(struct signal_struct *sig)
{ {
if (!sysctl_sched_autogroup_enabled)
return;
sig->autogroup = autogroup_task_get(current); sig->autogroup = autogroup_task_get(current);
} }
void sched_autogroup_exit(struct signal_struct *sig) void sched_autogroup_exit(struct signal_struct *sig)
{ {
if (!sysctl_sched_autogroup_enabled)
return;
autogroup_kref_put(sig->autogroup); autogroup_kref_put(sig->autogroup);
} }
...@@ -193,58 +199,6 @@ static int __init setup_autogroup(char *str) ...@@ -193,58 +199,6 @@ static int __init setup_autogroup(char *str)
__setup("noautogroup", setup_autogroup); __setup("noautogroup", setup_autogroup);
#ifdef CONFIG_PROC_FS
int proc_sched_autogroup_set_nice(struct task_struct *p, int nice)
{
static unsigned long next = INITIAL_JIFFIES;
struct autogroup *ag;
int err;
if (nice < -20 || nice > 19)
return -EINVAL;
err = security_task_setnice(current, nice);
if (err)
return err;
if (nice < 0 && !can_nice(current, nice))
return -EPERM;
/* this is a heavy operation taking global locks.. */
if (!capable(CAP_SYS_ADMIN) && time_before(jiffies, next))
return -EAGAIN;
next = HZ / 10 + jiffies;
ag = autogroup_task_get(p);
down_write(&ag->lock);
err = sched_group_set_shares(ag->tg, prio_to_weight[nice + 20]);
if (!err)
ag->nice = nice;
up_write(&ag->lock);
autogroup_kref_put(ag);
return err;
}
void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m)
{
struct autogroup *ag = autogroup_task_get(p);
if (!task_group_is_autogroup(ag->tg))
goto out;
down_read(&ag->lock);
seq_printf(m, "/autogroup-%ld nice %d\n", ag->id, ag->nice);
up_read(&ag->lock);
out:
autogroup_kref_put(ag);
}
#endif /* CONFIG_PROC_FS */
#ifdef CONFIG_SCHED_DEBUG #ifdef CONFIG_SCHED_DEBUG
int autogroup_path(struct task_group *tg, char *buf, int buflen) int autogroup_path(struct task_group *tg, char *buf, int buflen)
{ {
......
...@@ -4,11 +4,6 @@ ...@@ -4,11 +4,6 @@
#include <linux/rwsem.h> #include <linux/rwsem.h>
struct autogroup { struct autogroup {
/*
* reference doesn't mean how many thread attach to this
* autogroup now. It just stands for the number of task
* could use this autogroup.
*/
struct kref kref; struct kref kref;
struct task_group *tg; struct task_group *tg;
struct rw_semaphore lock; struct rw_semaphore lock;
...@@ -29,9 +24,7 @@ extern bool task_wants_autogroup(struct task_struct *p, struct task_group *tg); ...@@ -29,9 +24,7 @@ extern bool task_wants_autogroup(struct task_struct *p, struct task_group *tg);
static inline struct task_group * static inline struct task_group *
autogroup_task_group(struct task_struct *p, struct task_group *tg) autogroup_task_group(struct task_struct *p, struct task_group *tg)
{ {
int enabled = ACCESS_ONCE(sysctl_sched_autogroup_enabled); if (task_wants_autogroup(p, tg))
if (enabled && task_wants_autogroup(p, tg))
return p->signal->autogroup->tg; return p->signal->autogroup->tg;
return tg; return tg;
......
...@@ -363,10 +363,8 @@ static struct ctl_table kern_table[] = { ...@@ -363,10 +363,8 @@ static struct ctl_table kern_table[] = {
.procname = "sched_autogroup_enabled", .procname = "sched_autogroup_enabled",
.data = &sysctl_sched_autogroup_enabled, .data = &sysctl_sched_autogroup_enabled,
.maxlen = sizeof(unsigned int), .maxlen = sizeof(unsigned int),
.mode = 0644, .mode = 0444,
.proc_handler = proc_dointvec_minmax, .proc_handler = proc_dointvec,
.extra1 = &zero,
.extra2 = &one,
}, },
#endif #endif
#ifdef CONFIG_CFS_BANDWIDTH #ifdef CONFIG_CFS_BANDWIDTH
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment