Commit 15faafc6 authored by Peter Zijlstra's avatar Peter Zijlstra

sched,init: Fix DEBUG_PREEMPT vs early boot

Extend 8fb12156 ("init: Pin init task to the boot CPU, initially")
to cover the new PF_NO_SETAFFINITY requirement.

While there, move wait_for_completion(&kthreadd_done) into kernel_init()
to make it absolutely clear it is the very first thing done by the init
thread.

Fixes: 570a752b ("lib/smp_processor_id: Use is_percpu_thread() instead of nr_cpus_allowed")
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarValentin Schneider <valentin.schneider@arm.com>
Tested-by: default avatarValentin Schneider <valentin.schneider@arm.com>
Tested-by: default avatarBorislav Petkov <bp@alien8.de>
Link: https://lkml.kernel.org/r/YLS4mbKUrA3Gnb4t@hirez.programming.kicks-ass.net
parent 7b419f47
...@@ -692,6 +692,7 @@ noinline void __ref rest_init(void) ...@@ -692,6 +692,7 @@ noinline void __ref rest_init(void)
*/ */
rcu_read_lock(); rcu_read_lock();
tsk = find_task_by_pid_ns(pid, &init_pid_ns); tsk = find_task_by_pid_ns(pid, &init_pid_ns);
tsk->flags |= PF_NO_SETAFFINITY;
set_cpus_allowed_ptr(tsk, cpumask_of(smp_processor_id())); set_cpus_allowed_ptr(tsk, cpumask_of(smp_processor_id()));
rcu_read_unlock(); rcu_read_unlock();
...@@ -1440,6 +1441,11 @@ static int __ref kernel_init(void *unused) ...@@ -1440,6 +1441,11 @@ static int __ref kernel_init(void *unused)
{ {
int ret; int ret;
/*
* Wait until kthreadd is all set-up.
*/
wait_for_completion(&kthreadd_done);
kernel_init_freeable(); kernel_init_freeable();
/* need to finish all async __init code before freeing the memory */ /* need to finish all async __init code before freeing the memory */
async_synchronize_full(); async_synchronize_full();
...@@ -1520,11 +1526,6 @@ void __init console_on_rootfs(void) ...@@ -1520,11 +1526,6 @@ void __init console_on_rootfs(void)
static noinline void __init kernel_init_freeable(void) static noinline void __init kernel_init_freeable(void)
{ {
/*
* Wait until kthreadd is all set-up.
*/
wait_for_completion(&kthreadd_done);
/* Now the scheduler is fully set up and can do blocking allocations */ /* Now the scheduler is fully set up and can do blocking allocations */
gfp_allowed_mask = __GFP_BITS_MASK; gfp_allowed_mask = __GFP_BITS_MASK;
......
...@@ -8862,6 +8862,7 @@ void __init sched_init_smp(void) ...@@ -8862,6 +8862,7 @@ void __init sched_init_smp(void)
/* Move init over to a non-isolated CPU */ /* Move init over to a non-isolated CPU */
if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0) if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0)
BUG(); BUG();
current->flags &= ~PF_NO_SETAFFINITY;
sched_init_granularity(); sched_init_granularity();
init_sched_rt_class(); init_sched_rt_class();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment