Commit c5e06207 authored by Rusty Russell's avatar Rusty Russell Committed by Linus Torvalds

[PATCH] Hot-plug CPU Boot Changes

This patch alters the boot sequence to "plug in" each CPU, one at a
time.  You need the patch for each architecture, as well.  The
interface used to be "smp_boot_cpus()", "smp_commence()", and each
arch implemented the "maxcpus" boot arg itself.  With this patch,
it is:
  smp_prepare_cpus(maxcpus): probe for cpus and set up cpu_possible(cpu).
  __cpu_up(cpu): called *after* initcalls, for each cpu where
     cpu_possible(cpu) is true.
  smp_cpus_done(maxcpus): called after every cpu has been brought up
parent e1eec525
......@@ -60,5 +60,7 @@ extern int notifier_call_chain(struct notifier_block **n, unsigned long val, voi
#define NETLINK_URELEASE 0x0001 /* Unicast netlink socket released */
#define CPU_ONLINE 0x0002 /* CPU (unsigned)v coming up */
#endif /* __KERNEL__ */
#endif /* _LINUX_NOTIFIER_H */
......@@ -150,7 +150,6 @@ extern void update_process_times(int user);
extern void update_one_process(struct task_struct *p, unsigned long user,
unsigned long system, int cpu);
extern void scheduler_tick(int user_tick, int system);
extern void migration_init(void);
extern unsigned long cache_decay_ticks;
......
......@@ -32,19 +32,19 @@ extern void FASTCALL(smp_send_reschedule(int cpu));
/*
* Boot processor call to load the other CPU's
* Prepare machine for booting other CPUs.
*/
extern void smp_boot_cpus(void);
extern void smp_prepare_cpus(unsigned int max_cpus);
/*
* Processor call in. Must hold processors until ..
* Bring a CPU up
*/
extern void smp_callin(void);
extern int __cpu_up(unsigned int cpunum);
/*
* Multiprocessors may now schedule
* Final polishing of CPUs
*/
extern void smp_commence(void);
extern void smp_cpus_done(unsigned int max_cpus);
/*
* Call a function on all other processors
......@@ -71,6 +71,13 @@ extern volatile int smp_msg_id;
#define MSG_RESCHEDULE 0x0003 /* Reschedule request from master CPU*/
#define MSG_CALL_FUNCTION 0x0004 /* Call function on all other CPUs */
struct notifier_block;
/* Need to know about CPUs going up/down? */
extern int register_cpu_notifier(struct notifier_block *nb);
extern void unregister_cpu_notifier(struct notifier_block *nb);
int cpu_up(unsigned int cpu);
#else /* !SMP */
/*
......@@ -93,6 +100,10 @@ static inline void smp_send_reschedule_all(void) { }
#define per_cpu(var, cpu) var
#define this_cpu(var) var
/* Need to know about CPUs going up/down? */
#define register_cpu_notifier(nb) 0
#define unregister_cpu_notifier(nb) do { } while(0)
#endif /* !SMP */
#define get_cpu() ({ preempt_disable(); smp_processor_id(); })
......
......@@ -95,6 +95,35 @@ int rows, cols;
char *execute_command;
/* Setup configured maximum number of CPUs to activate */
static unsigned int max_cpus = UINT_MAX;
/*
* Setup routine for controlling SMP activation
*
* Command-line option of "nosmp" or "maxcpus=0" will disable SMP
* activation entirely (the MPS table probe still happens, though).
*
* Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
* greater than 0, limits the maximum number of CPUs activated in
* SMP mode to <NUM>.
*/
static int __init nosmp(char *str)
{
max_cpus = 0;
return 1;
}
__setup("nosmp", nosmp);
static int __init maxcpus(char *str)
{
get_option(&str, &max_cpus);
return 1;
}
__setup("maxcpus=", maxcpus);
static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
......@@ -275,6 +304,7 @@ static void __init smp_init(void)
#endif
static inline void setup_per_cpu_areas(void) { }
static inline void smp_prepare_cpus(unsigned int maxcpus) { }
#else
......@@ -305,11 +335,27 @@ static void __init setup_per_cpu_areas(void)
/* Called by boot processor to activate the rest. */
static void __init smp_init(void)
{
unsigned int i;
/* FIXME: This should be done in userspace --RR */
for (i = 0; i < NR_CPUS; i++) {
if (num_online_cpus() >= max_cpus)
break;
if (cpu_possible(i) && !cpu_online(i)) {
printk("Bringing up %i\n", i);
cpu_up(i);
}
}
/* Any cleanup work */
printk("CPUS done %u\n", max_cpus);
smp_cpus_done(max_cpus);
#if 0
/* Get other processors into their bootup holding patterns. */
smp_boot_cpus();
smp_threads_ready=1;
smp_commence();
#endif
}
#endif
......@@ -405,14 +451,12 @@ asmlinkage void __init start_kernel(void)
check_bugs();
printk("POSIX conformance testing by UNIFIX\n");
init_idle(current, smp_processor_id());
/*
* We count on the initial thread going ok
* Like idlers init is an unlocked kernel thread, which will
* make syscalls (and thus be locked).
*/
smp_init();
init_idle(current, smp_processor_id());
/* Do the rest non-__init'ed, we're now alive */
rest_init();
......@@ -443,12 +487,6 @@ static void __init do_initcalls(void)
*/
static void __init do_basic_setup(void)
{
/*
* Let the per-CPU migration threads start up:
*/
#if CONFIG_SMP
migration_init();
#endif
/*
* Tell the world that we're going to be the grim
* reaper of innocent orphaned children.
......@@ -493,7 +531,10 @@ static int init(void * unused)
static char * argv_sh[] = { "sh", NULL, };
lock_kernel();
/* Sets up cpus_possible() */
smp_prepare_cpus(max_cpus);
do_basic_setup();
smp_init();
prepare_namespace();
......
......@@ -17,6 +17,7 @@ obj-y = sched.o dma.o fork.o exec_domain.o panic.o printk.o \
sysctl.o capability.o ptrace.o timer.o user.o \
signal.o sys.o kmod.o context.o futex.o platform.o
obj-$(CONFIG_SMP) += cpu.o
obj-$(CONFIG_UID16) += uid16.o
obj-$(CONFIG_MODULES) += ksyms.o
obj-$(CONFIG_PM) += pm.o
......
/* CPU control.
* (C) 2001 Rusty Russell
* This code is licenced under the GPL.
*/
#include <linux/proc_fs.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/notifier.h>
#include <linux/sched.h>
#include <linux/unistd.h>
#include <asm/semaphore.h>
/* This protects CPUs going up and down... */
DECLARE_MUTEX(cpucontrol);
static struct notifier_block *cpu_chain = NULL;
/* Need to know about CPUs going up/down? */
int register_cpu_notifier(struct notifier_block *nb)
{
return notifier_chain_register(&cpu_chain, nb);
}
void unregister_cpu_notifier(struct notifier_block *nb)
{
notifier_chain_unregister(&cpu_chain,nb);
}
int __devinit cpu_up(unsigned int cpu)
{
int ret;
if ((ret = down_interruptible(&cpucontrol)) != 0)
return ret;
if (cpu_online(cpu)) {
ret = -EINVAL;
goto out;
}
/* Arch-specific enabling code. */
ret = __cpu_up(cpu);
if (ret != 0) goto out;
if (!cpu_online(cpu))
BUG();
/* Now call notifier in preparation. */
printk("CPU %u IS NOW UP!\n", cpu);
notifier_call_chain(&cpu_chain, CPU_ONLINE, (void *)cpu);
out:
up(&cpucontrol);
return ret;
}
......@@ -27,6 +27,8 @@
#include <linux/completion.h>
#include <linux/kernel_stat.h>
#include <linux/security.h>
#include <linux/notifier.h>
#include <linux/delay.h>
/*
* Convert user-nice values [ -20 ... 0 ... 19 ]
......@@ -1777,9 +1779,11 @@ void set_cpus_allowed(task_t *p, unsigned long new_mask)
migration_req_t req;
runqueue_t *rq;
#if 0 /* FIXME: Grab cpu_lock, return error on this case. --RR */
new_mask &= cpu_online_map;
if (!new_mask)
BUG();
#endif
preempt_disable();
rq = task_rq_lock(p, &flags);
......@@ -1812,8 +1816,6 @@ void set_cpus_allowed(task_t *p, unsigned long new_mask)
preempt_enable();
}
static __initdata int master_migration_thread;
static int migration_thread(void * bind_cpu)
{
int cpu = (int) (long) bind_cpu;
......@@ -1825,15 +1827,7 @@ static int migration_thread(void * bind_cpu)
sigfillset(&current->blocked);
set_fs(KERNEL_DS);
/*
* The first migration thread is started on the boot CPU, it
* migrates the other migration threads to their destination CPUs.
*/
if (cpu != master_migration_thread) {
while (!cpu_rq(master_migration_thread)->migration_thread)
yield();
set_cpus_allowed(current, 1UL << cpu);
}
set_cpus_allowed(current, 1UL << cpu);
printk("migration_task %d on cpu=%d\n", cpu, smp_processor_id());
ret = setscheduler(0, SCHED_FIFO, &param);
......@@ -1890,29 +1884,33 @@ static int migration_thread(void * bind_cpu)
}
}
void __init migration_init(void)
static int migration_call(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
int cpu;
master_migration_thread = smp_processor_id();
current->cpus_allowed = 1UL << master_migration_thread;
for (cpu = 0; cpu < NR_CPUS; cpu++) {
if (!cpu_online(cpu))
continue;
if (kernel_thread(migration_thread, (void *) (long) cpu,
CLONE_FS | CLONE_FILES | CLONE_SIGNAL) < 0)
BUG();
switch (action) {
case CPU_ONLINE:
printk("Starting migration thread for cpu %li\n",
(long)hcpu);
kernel_thread(migration_thread, hcpu,
CLONE_FS | CLONE_FILES | CLONE_SIGNAL);
break;
}
current->cpus_allowed = -1L;
return NOTIFY_OK;
}
for (cpu = 0; cpu < NR_CPUS; cpu++) {
if (!cpu_online(cpu))
continue;
while (!cpu_rq(cpu)->migration_thread)
schedule_timeout(2);
}
static struct notifier_block migration_notifier = { &migration_call, NULL, 0 };
int __init migration_init(void)
{
/* Start one for boot CPU. */
migration_call(&migration_notifier, CPU_ONLINE,
(void *)smp_processor_id());
register_cpu_notifier(&migration_notifier);
return 0;
}
__initcall(migration_init);
#endif
extern void init_timervecs(void);
......
......@@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/tqueue.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
/*
- No shared variables, all the data are CPU local.
......@@ -387,20 +388,32 @@ static int ksoftirqd(void * __bind_cpu)
}
}
static __init int spawn_ksoftirqd(void)
static int __devinit cpu_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
int cpu;
int hotcpu = (unsigned long)hcpu;
for (cpu = 0; cpu < NR_CPUS; cpu++) {
if (!cpu_online(cpu))
continue;
if (kernel_thread(ksoftirqd, (void *) (long) cpu,
CLONE_FS | CLONE_FILES | CLONE_SIGNAL) < 0)
printk("spawn_ksoftirqd() failed for cpu %d\n", cpu);
else
while (!ksoftirqd_task(cpu))
yield();
}
if (action == CPU_ONLINE) {
if (kernel_thread(ksoftirqd, hcpu,
CLONE_FS | CLONE_FILES | CLONE_SIGNAL) < 0) {
printk("ksoftirqd for %i failed\n", hotcpu);
return NOTIFY_BAD;
}
while (!ksoftirqd_task(hotcpu))
yield();
return NOTIFY_OK;
}
return NOTIFY_BAD;
}
static struct notifier_block cpu_nfb = { &cpu_callback, NULL, 0 };
static __init int spawn_ksoftirqd(void)
{
cpu_callback(&cpu_nfb, CPU_ONLINE, (void *)smp_processor_id());
register_cpu_notifier(&cpu_nfb);
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment