Commit 4ef0c5c6 authored by Zhang Qiao's avatar Zhang Qiao Committed by Peter Zijlstra

kernel/sched: Fix sched_fork() access an invalid sched_task_group

There is a small race between copy_process() and sched_fork()
where child->sched_task_group point to an already freed pointer.

	parent doing fork()      | someone moving the parent
				 | to another cgroup
  -------------------------------+-------------------------------
  copy_process()
      + dup_task_struct()<1>
				  parent move to another cgroup,
				  and free the old cgroup. <2>
      + sched_fork()
	+ __set_task_cpu()<3>
	+ task_fork_fair()
	  + sched_slice()<4>

In the worst case, this bug can lead to "use-after-free" and
cause panic as shown above:

  (1) parent copy its sched_task_group to child at <1>;

  (2) someone move the parent to another cgroup and free the old
      cgroup at <2>;

  (3) the sched_task_group and cfs_rq that belong to the old cgroup
      will be accessed at <3> and <4>, which cause a panic:

  [] BUG: unable to handle kernel NULL pointer dereference at 0000000000000000
  [] PGD 8000001fa0a86067 P4D 8000001fa0a86067 PUD 2029955067 PMD 0
  [] Oops: 0000 [#1] SMP PTI
  [] CPU: 7 PID: 648398 Comm: ebizzy Kdump: loaded Tainted: G           OE    --------- -  - 4.18.0.x86_64+ #1
  [] RIP: 0010:sched_slice+0x84/0xc0

  [] Call Trace:
  []  task_fork_fair+0x81/0x120
  []  sched_fork+0x132/0x240
  []  copy_process.part.5+0x675/0x20e0
  []  ? __handle_mm_fault+0x63f/0x690
  []  _do_fork+0xcd/0x3b0
  []  do_syscall_64+0x5d/0x1d0
  []  entry_SYSCALL_64_after_hwframe+0x65/0xca
  [] RIP: 0033:0x7f04418cd7e1

Between cgroup_can_fork() and cgroup_post_fork(), the cgroup
membership and thus sched_task_group can't change. So update child's
sched_task_group at sched_post_fork() and move task_fork() and
__set_task_cpu() (where accees the sched_task_group) from sched_fork()
to sched_post_fork().

Fixes: 8323f26c ("sched: Fix race in task_group")
Signed-off-by: default avatarZhang Qiao <zhangqiao22@huawei.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarTejun Heo <tj@kernel.org>
Link: https://lkml.kernel.org/r/20210915064030.2231-1-zhangqiao22@huawei.com
parent f9ec6fea
...@@ -54,7 +54,8 @@ extern asmlinkage void schedule_tail(struct task_struct *prev); ...@@ -54,7 +54,8 @@ extern asmlinkage void schedule_tail(struct task_struct *prev);
extern void init_idle(struct task_struct *idle, int cpu); extern void init_idle(struct task_struct *idle, int cpu);
extern int sched_fork(unsigned long clone_flags, struct task_struct *p); extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
extern void sched_post_fork(struct task_struct *p); extern void sched_post_fork(struct task_struct *p,
struct kernel_clone_args *kargs);
extern void sched_dead(struct task_struct *p); extern void sched_dead(struct task_struct *p);
void __noreturn do_task_dead(void); void __noreturn do_task_dead(void);
......
...@@ -2405,7 +2405,7 @@ static __latent_entropy struct task_struct *copy_process( ...@@ -2405,7 +2405,7 @@ static __latent_entropy struct task_struct *copy_process(
write_unlock_irq(&tasklist_lock); write_unlock_irq(&tasklist_lock);
proc_fork_connector(p); proc_fork_connector(p);
sched_post_fork(p); sched_post_fork(p, args);
cgroup_post_fork(p, args); cgroup_post_fork(p, args);
perf_event_fork(p); perf_event_fork(p);
......
...@@ -4343,8 +4343,6 @@ int sysctl_schedstats(struct ctl_table *table, int write, void *buffer, ...@@ -4343,8 +4343,6 @@ int sysctl_schedstats(struct ctl_table *table, int write, void *buffer,
*/ */
int sched_fork(unsigned long clone_flags, struct task_struct *p) int sched_fork(unsigned long clone_flags, struct task_struct *p)
{ {
unsigned long flags;
__sched_fork(clone_flags, p); __sched_fork(clone_flags, p);
/* /*
* We mark the process as NEW here. This guarantees that * We mark the process as NEW here. This guarantees that
...@@ -4390,24 +4388,6 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) ...@@ -4390,24 +4388,6 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
init_entity_runnable_average(&p->se); init_entity_runnable_average(&p->se);
/*
* The child is not yet in the pid-hash so no cgroup attach races,
* and the cgroup is pinned to this child due to cgroup_fork()
* is ran before sched_fork().
*
* Silence PROVE_RCU.
*/
raw_spin_lock_irqsave(&p->pi_lock, flags);
rseq_migrate(p);
/*
* We're setting the CPU for the first time, we don't migrate,
* so use __set_task_cpu().
*/
__set_task_cpu(p, smp_processor_id());
if (p->sched_class->task_fork)
p->sched_class->task_fork(p);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
#ifdef CONFIG_SCHED_INFO #ifdef CONFIG_SCHED_INFO
if (likely(sched_info_on())) if (likely(sched_info_on()))
memset(&p->sched_info, 0, sizeof(p->sched_info)); memset(&p->sched_info, 0, sizeof(p->sched_info));
...@@ -4423,8 +4403,29 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) ...@@ -4423,8 +4403,29 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
return 0; return 0;
} }
void sched_post_fork(struct task_struct *p) void sched_post_fork(struct task_struct *p, struct kernel_clone_args *kargs)
{ {
unsigned long flags;
#ifdef CONFIG_CGROUP_SCHED
struct task_group *tg;
#endif
raw_spin_lock_irqsave(&p->pi_lock, flags);
#ifdef CONFIG_CGROUP_SCHED
tg = container_of(kargs->cset->subsys[cpu_cgrp_id],
struct task_group, css);
p->sched_task_group = autogroup_task_group(p, tg);
#endif
rseq_migrate(p);
/*
* We're setting the CPU for the first time, we don't migrate,
* so use __set_task_cpu().
*/
__set_task_cpu(p, smp_processor_id());
if (p->sched_class->task_fork)
p->sched_class->task_fork(p);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
uclamp_post_fork(p); uclamp_post_fork(p);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment