Commit 58ce9508 authored by Rusty Russell's avatar Rusty Russell Committed by Linus Torvalds

[PATCH] Hot-plug CPU Boot Rewrite for i386

This modifies the i386 boot sequence to "plug in" CPUs one at a
time.  This is the minimal change to make it work (the CPUs are
brought up as normal during the "smp_prepare_cpus()" probe phase).
parent c5e06207
...@@ -796,9 +796,9 @@ void __setup_APIC_LVTT(unsigned int clocks) ...@@ -796,9 +796,9 @@ void __setup_APIC_LVTT(unsigned int clocks)
apic_write_around(APIC_TMICT, clocks/APIC_DIVISOR); apic_write_around(APIC_TMICT, clocks/APIC_DIVISOR);
} }
void setup_APIC_timer(void * data) static void setup_APIC_timer(unsigned int clocks)
{ {
unsigned int clocks = (unsigned int) data, slice, t0, t1; unsigned int slice, t0, t1;
unsigned long flags; unsigned long flags;
int delta; int delta;
...@@ -924,7 +924,7 @@ static unsigned int calibration_result; ...@@ -924,7 +924,7 @@ static unsigned int calibration_result;
int dont_use_local_apic_timer __initdata = 0; int dont_use_local_apic_timer __initdata = 0;
void __init setup_APIC_clocks (void) void __init setup_boot_APIC_clock(void)
{ {
/* Disabled by DMI scan or kernel option? */ /* Disabled by DMI scan or kernel option? */
if (dont_use_local_apic_timer) if (dont_use_local_apic_timer)
...@@ -939,12 +939,16 @@ void __init setup_APIC_clocks (void) ...@@ -939,12 +939,16 @@ void __init setup_APIC_clocks (void)
/* /*
* Now set up the timer for real. * Now set up the timer for real.
*/ */
setup_APIC_timer((void *)calibration_result); setup_APIC_timer(calibration_result);
local_irq_enable(); local_irq_enable();
}
/* and update all other cpus */ void __init setup_secondary_APIC_clock(void)
smp_call_function(setup_APIC_timer, (void *)calibration_result, 1, 1); {
local_irq_disable(); /* FIXME: Do we need this? --RR */
setup_APIC_timer(calibration_result);
local_irq_enable();
} }
void __init disable_APIC_timer(void) void __init disable_APIC_timer(void)
...@@ -1177,7 +1181,7 @@ int __init APIC_init_uniprocessor (void) ...@@ -1177,7 +1181,7 @@ int __init APIC_init_uniprocessor (void)
if (!skip_ioapic_setup && nr_ioapics) if (!skip_ioapic_setup && nr_ioapics)
setup_IO_APIC(); setup_IO_APIC();
#endif #endif
setup_APIC_clocks(); setup_boot_APIC_clock();
return 0; return 0;
} }
...@@ -1589,7 +1589,7 @@ static int apm_get_info(char *buf, char **start, off_t fpos, int length) ...@@ -1589,7 +1589,7 @@ static int apm_get_info(char *buf, char **start, off_t fpos, int length)
p = buf; p = buf;
if ((num_online_cpus() == 1) && if ((num_possible_cpus() == 1) &&
!(error = apm_get_power_status(&bx, &cx, &dx))) { !(error = apm_get_power_status(&bx, &cx, &dx))) {
ac_line_status = (bx >> 8) & 0xff; ac_line_status = (bx >> 8) & 0xff;
battery_status = bx & 0xff; battery_status = bx & 0xff;
...@@ -1720,7 +1720,7 @@ static int apm(void *unused) ...@@ -1720,7 +1720,7 @@ static int apm(void *unused)
} }
} }
if (debug && (num_online_cpus() == 1)) { if (debug && (num_possible_cpus() == 1)) {
error = apm_get_power_status(&bx, &cx, &dx); error = apm_get_power_status(&bx, &cx, &dx);
if (error) if (error)
printk(KERN_INFO "apm: power status not available\n"); printk(KERN_INFO "apm: power status not available\n");
...@@ -1764,7 +1764,7 @@ static int apm(void *unused) ...@@ -1764,7 +1764,7 @@ static int apm(void *unused)
pm_power_off = apm_power_off; pm_power_off = apm_power_off;
register_sysrq_key('o', &sysrq_poweroff_op); register_sysrq_key('o', &sysrq_poweroff_op);
if (num_online_cpus() == 1) { if (num_possible_cpus() == 1) {
#if defined(CONFIG_APM_DISPLAY_BLANK) && defined(CONFIG_VT) #if defined(CONFIG_APM_DISPLAY_BLANK) && defined(CONFIG_VT)
console_blank_hook = apm_console_blank; console_blank_hook = apm_console_blank;
#endif #endif
...@@ -1907,9 +1907,7 @@ static int __init apm_init(void) ...@@ -1907,9 +1907,7 @@ static int __init apm_init(void)
printk(KERN_NOTICE "apm: disabled on user request.\n"); printk(KERN_NOTICE "apm: disabled on user request.\n");
return -ENODEV; return -ENODEV;
} }
/* FIXME: When boot code changes, this will need to be if ((num_possible_cpus() > 1) && !power_off) {
deactivated when/if a CPU comes up --RR */
if ((num_online_cpus() > 1) && !power_off) {
printk(KERN_NOTICE "apm: disabled - APM is not SMP safe.\n"); printk(KERN_NOTICE "apm: disabled - APM is not SMP safe.\n");
return -ENODEV; return -ENODEV;
} }
...@@ -1966,9 +1964,7 @@ static int __init apm_init(void) ...@@ -1966,9 +1964,7 @@ static int __init apm_init(void)
kernel_thread(apm, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGHAND | SIGCHLD); kernel_thread(apm, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGHAND | SIGCHLD);
/* FIXME: When boot code changes, this will need to be if (num_possible_cpus() > 1) {
deactivated when/if a CPU comes up --RR */
if (num_online_cpus() > 1) {
printk(KERN_NOTICE printk(KERN_NOTICE
"apm: disabled - APM is not SMP safe (power off active).\n"); "apm: disabled - APM is not SMP safe (power off active).\n");
return 0; return 0;
......
...@@ -1055,7 +1055,7 @@ static void set_mtrr_smp (unsigned int reg, unsigned long base, ...@@ -1055,7 +1055,7 @@ static void set_mtrr_smp (unsigned int reg, unsigned long base,
wait_barrier_cache_disable = TRUE; wait_barrier_cache_disable = TRUE;
wait_barrier_execute = TRUE; wait_barrier_execute = TRUE;
wait_barrier_cache_enable = TRUE; wait_barrier_cache_enable = TRUE;
atomic_set (&undone_count, num_online_cpus() - 1); atomic_set (&undone_count, num_booting_cpus() - 1);
/* Start the ball rolling on other CPUs */ /* Start the ball rolling on other CPUs */
if (smp_call_function (ipi_handler, &data, 1, 0) != 0) if (smp_call_function (ipi_handler, &data, 1, 0) != 0)
panic ("mtrr: timed out waiting for other CPUs\n"); panic ("mtrr: timed out waiting for other CPUs\n");
...@@ -1064,14 +1064,14 @@ static void set_mtrr_smp (unsigned int reg, unsigned long base, ...@@ -1064,14 +1064,14 @@ static void set_mtrr_smp (unsigned int reg, unsigned long base,
/* Wait for all other CPUs to flush and disable their caches */ /* Wait for all other CPUs to flush and disable their caches */
while (atomic_read (&undone_count) > 0) { rep_nop(); barrier(); } while (atomic_read (&undone_count) > 0) { rep_nop(); barrier(); }
/* Set up for completion wait and then release other CPUs to change MTRRs*/ /* Set up for completion wait and then release other CPUs to change MTRRs*/
atomic_set (&undone_count, num_online_cpus() - 1); atomic_set (&undone_count, num_booting_cpus() - 1);
wait_barrier_cache_disable = FALSE; wait_barrier_cache_disable = FALSE;
set_mtrr_cache_disable (&ctxt); set_mtrr_cache_disable (&ctxt);
/* Wait for all other CPUs to flush and disable their caches */ /* Wait for all other CPUs to flush and disable their caches */
while (atomic_read (&undone_count) > 0) { rep_nop(); barrier(); } while (atomic_read (&undone_count) > 0) { rep_nop(); barrier(); }
/* Set up for completion wait and then release other CPUs to change MTRRs*/ /* Set up for completion wait and then release other CPUs to change MTRRs*/
atomic_set (&undone_count, num_online_cpus() - 1); atomic_set (&undone_count, num_booting_cpus() - 1);
wait_barrier_execute = FALSE; wait_barrier_execute = FALSE;
(*set_mtrr_up) (reg, base, size, type, FALSE); (*set_mtrr_up) (reg, base, size, type, FALSE);
/* Now wait for other CPUs to complete the function */ /* Now wait for other CPUs to complete the function */
......
...@@ -82,6 +82,8 @@ int __init check_nmi_watchdog (void) ...@@ -82,6 +82,8 @@ int __init check_nmi_watchdog (void)
local_irq_enable(); local_irq_enable();
mdelay((10*1000)/nmi_hz); // wait 10 ticks mdelay((10*1000)/nmi_hz); // wait 10 ticks
/* FIXME: Only boot CPU is online at this stage. Check CPUs
as they come up. */
for (cpu = 0; cpu < NR_CPUS; cpu++) { for (cpu = 0; cpu < NR_CPUS; cpu++) {
if (!cpu_online(cpu)) if (!cpu_online(cpu))
continue; continue;
......
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
* Maciej W. Rozycki : Bits for genuine 82489DX APICs * Maciej W. Rozycki : Bits for genuine 82489DX APICs
* Martin J. Bligh : Added support for multi-quad systems * Martin J. Bligh : Added support for multi-quad systems
* Dave Jones : Report invalid combinations of Athlon CPUs. * Dave Jones : Report invalid combinations of Athlon CPUs.
*/ * Rusty Russell : Hacked into shape for new "hotplug" boot process. */
#include <linux/config.h> #include <linux/config.h>
#include <linux/init.h> #include <linux/init.h>
...@@ -53,9 +53,6 @@ ...@@ -53,9 +53,6 @@
/* Set if we find a B stepping CPU */ /* Set if we find a B stepping CPU */
static int __initdata smp_b_stepping; static int __initdata smp_b_stepping;
/* Setup configured maximum number of CPUs to activate */
static int __initdata max_cpus = NR_CPUS;
/* Number of siblings per CPU package */ /* Number of siblings per CPU package */
int smp_num_siblings = 1; int smp_num_siblings = 1;
int __initdata phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */ int __initdata phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */
...@@ -64,7 +61,8 @@ int __initdata phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */ ...@@ -64,7 +61,8 @@ int __initdata phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */
unsigned long cpu_online_map; unsigned long cpu_online_map;
static volatile unsigned long cpu_callin_map; static volatile unsigned long cpu_callin_map;
static volatile unsigned long cpu_callout_map; volatile unsigned long cpu_callout_map;
static unsigned long smp_commenced_mask;
/* Per CPU bogomips and other parameters */ /* Per CPU bogomips and other parameters */
struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned; struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
...@@ -72,33 +70,6 @@ struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned; ...@@ -72,33 +70,6 @@ struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
/* Set when the idlers are all forked */ /* Set when the idlers are all forked */
int smp_threads_ready; int smp_threads_ready;
/*
* Setup routine for controlling SMP activation
*
* Command-line option of "nosmp" or "maxcpus=0" will disable SMP
* activation entirely (the MPS table probe still happens, though).
*
* Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
* greater than 0, limits the maximum number of CPUs activated in
* SMP mode to <NUM>.
*/
static int __init nosmp(char *str)
{
max_cpus = 0;
return 1;
}
__setup("nosmp", nosmp);
static int __init maxcpus(char *str)
{
get_option(&str, &max_cpus);
return 1;
}
__setup("maxcpus=", maxcpus);
/* /*
* Trampoline 80x86 program as an array. * Trampoline 80x86 program as an array.
*/ */
...@@ -139,7 +110,7 @@ void __init smp_alloc_memory(void) ...@@ -139,7 +110,7 @@ void __init smp_alloc_memory(void)
* a given CPU * a given CPU
*/ */
void __init smp_store_cpu_info(int id) static void __init smp_store_cpu_info(int id)
{ {
struct cpuinfo_x86 *c = cpu_data + id; struct cpuinfo_x86 *c = cpu_data + id;
...@@ -192,29 +163,6 @@ void __init smp_store_cpu_info(int id) ...@@ -192,29 +163,6 @@ void __init smp_store_cpu_info(int id)
; ;
} }
/*
* Architecture specific routine called by the kernel just before init is
* fired off. This allows the BP to have everything in order [we hope].
* At the end of this all the APs will hit the system scheduling and off
* we go. Each AP will load the system gdt's and jump through the kernel
* init into idle(). At this point the scheduler will one day take over
* and give them jobs to do. smp_callin is a standard routine
* we use to track CPUs as they power up.
*/
static atomic_t smp_commenced = ATOMIC_INIT(0);
void __init smp_commence(void)
{
/*
* Lets the callins below out of their loop.
*/
Dprintk("Setting commenced=1, go go go\n");
wmb();
atomic_set(&smp_commenced,1);
}
/* /*
* TSC synchronization. * TSC synchronization.
* *
...@@ -268,7 +216,7 @@ static void __init synchronize_tsc_bp (void) ...@@ -268,7 +216,7 @@ static void __init synchronize_tsc_bp (void)
unsigned long one_usec; unsigned long one_usec;
int buggy = 0; int buggy = 0;
printk("checking TSC synchronization across CPUs: "); printk("checking TSC synchronization across %u CPUs: ", num_booting_cpus());
one_usec = ((1<<30)/fast_gettimeoffset_quotient)*(1<<2); one_usec = ((1<<30)/fast_gettimeoffset_quotient)*(1<<2);
...@@ -289,7 +237,7 @@ static void __init synchronize_tsc_bp (void) ...@@ -289,7 +237,7 @@ static void __init synchronize_tsc_bp (void)
/* /*
* all APs synchronize but they loop on '== num_cpus' * all APs synchronize but they loop on '== num_cpus'
*/ */
while (atomic_read(&tsc_count_start) != num_online_cpus()-1) while (atomic_read(&tsc_count_start) != num_booting_cpus()-1)
mb(); mb();
atomic_set(&tsc_count_stop, 0); atomic_set(&tsc_count_stop, 0);
wmb(); wmb();
...@@ -308,7 +256,7 @@ static void __init synchronize_tsc_bp (void) ...@@ -308,7 +256,7 @@ static void __init synchronize_tsc_bp (void)
/* /*
* Wait for all APs to leave the synchronization point: * Wait for all APs to leave the synchronization point:
*/ */
while (atomic_read(&tsc_count_stop) != num_online_cpus()-1) while (atomic_read(&tsc_count_stop) != num_booting_cpus()-1)
mb(); mb();
atomic_set(&tsc_count_start, 0); atomic_set(&tsc_count_start, 0);
wmb(); wmb();
...@@ -317,16 +265,16 @@ static void __init synchronize_tsc_bp (void) ...@@ -317,16 +265,16 @@ static void __init synchronize_tsc_bp (void)
sum = 0; sum = 0;
for (i = 0; i < NR_CPUS; i++) { for (i = 0; i < NR_CPUS; i++) {
if (cpu_online(i)) { if (test_bit(i, &cpu_callout_map)) {
t0 = tsc_values[i]; t0 = tsc_values[i];
sum += t0; sum += t0;
} }
} }
avg = div64(sum, num_online_cpus()); avg = div64(sum, num_booting_cpus());
sum = 0; sum = 0;
for (i = 0; i < NR_CPUS; i++) { for (i = 0; i < NR_CPUS; i++) {
if (!cpu_online(i)) if (!test_bit(i, &cpu_callout_map))
continue; continue;
delta = tsc_values[i] - avg; delta = tsc_values[i] - avg;
if (delta < 0) if (delta < 0)
...@@ -359,7 +307,7 @@ static void __init synchronize_tsc_ap (void) ...@@ -359,7 +307,7 @@ static void __init synchronize_tsc_ap (void)
int i; int i;
/* /*
* num_online_cpus is not necessarily known at the time * Not every cpu is online at the time
* this gets called, so we first wait for the BP to * this gets called, so we first wait for the BP to
* finish SMP initialization: * finish SMP initialization:
*/ */
...@@ -367,7 +315,7 @@ static void __init synchronize_tsc_ap (void) ...@@ -367,7 +315,7 @@ static void __init synchronize_tsc_ap (void)
for (i = 0; i < NR_LOOPS; i++) { for (i = 0; i < NR_LOOPS; i++) {
atomic_inc(&tsc_count_start); atomic_inc(&tsc_count_start);
while (atomic_read(&tsc_count_start) != num_online_cpus()) while (atomic_read(&tsc_count_start) != num_booting_cpus())
mb(); mb();
rdtscll(tsc_values[smp_processor_id()]); rdtscll(tsc_values[smp_processor_id()]);
...@@ -375,7 +323,7 @@ static void __init synchronize_tsc_ap (void) ...@@ -375,7 +323,7 @@ static void __init synchronize_tsc_ap (void)
write_tsc(0, 0); write_tsc(0, 0);
atomic_inc(&tsc_count_stop); atomic_inc(&tsc_count_stop);
while (atomic_read(&tsc_count_stop) != num_online_cpus()) mb(); while (atomic_read(&tsc_count_stop) != num_booting_cpus()) mb();
} }
} }
#undef NR_LOOPS #undef NR_LOOPS
...@@ -403,7 +351,7 @@ void __init smp_callin(void) ...@@ -403,7 +351,7 @@ void __init smp_callin(void)
*/ */
phys_id = GET_APIC_ID(apic_read(APIC_ID)); phys_id = GET_APIC_ID(apic_read(APIC_ID));
cpuid = smp_processor_id(); cpuid = smp_processor_id();
if (test_and_set_bit(cpuid, &cpu_online_map)) { if (test_bit(cpuid, &cpu_callin_map)) {
printk("huh, phys CPU#%d, CPU#%d already present??\n", printk("huh, phys CPU#%d, CPU#%d already present??\n",
phys_id, cpuid); phys_id, cpuid);
BUG(); BUG();
...@@ -501,15 +449,17 @@ int __init start_secondary(void *unused) ...@@ -501,15 +449,17 @@ int __init start_secondary(void *unused)
*/ */
cpu_init(); cpu_init();
smp_callin(); smp_callin();
while (!atomic_read(&smp_commenced)) while (!test_bit(smp_processor_id(), &smp_commenced_mask))
rep_nop(); rep_nop();
setup_secondary_APIC_clock();
enable_APIC_timer(); enable_APIC_timer();
/* /*
* low-memory mappings have been cleared, flush them from * low-memory mappings have been cleared, flush them from
* the local TLBs too. * the local TLBs too.
*/ */
local_flush_tlb(); local_flush_tlb();
set_bit(smp_processor_id(), &cpu_online_map);
wmb();
return cpu_idle(); return cpu_idle();
} }
...@@ -943,7 +893,6 @@ static void __init do_boot_cpu (int apicid) ...@@ -943,7 +893,6 @@ static void __init do_boot_cpu (int apicid)
unmap_cpu_to_boot_apicid(cpu, apicid); unmap_cpu_to_boot_apicid(cpu, apicid);
clear_bit(cpu, &cpu_callout_map); /* was set here (do_boot_cpu()) */ clear_bit(cpu, &cpu_callout_map); /* was set here (do_boot_cpu()) */
clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */ clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
clear_bit(cpu, &cpu_online_map); /* was set in smp_callin() */
cpucount--; cpucount--;
} }
...@@ -1015,7 +964,7 @@ void *xquad_portio; ...@@ -1015,7 +964,7 @@ void *xquad_portio;
int cpu_sibling_map[NR_CPUS] __cacheline_aligned; int cpu_sibling_map[NR_CPUS] __cacheline_aligned;
void __init smp_boot_cpus(void) static void __init smp_boot_cpus(unsigned int max_cpus)
{ {
int apicid, cpu, bit; int apicid, cpu, bit;
...@@ -1057,6 +1006,7 @@ void __init smp_boot_cpus(void) ...@@ -1057,6 +1006,7 @@ void __init smp_boot_cpus(void)
* We have the boot CPU online for sure. * We have the boot CPU online for sure.
*/ */
set_bit(0, &cpu_online_map); set_bit(0, &cpu_online_map);
set_bit(0, &cpu_callout_map);
boot_cpu_logical_apicid = logical_smp_processor_id(); boot_cpu_logical_apicid = logical_smp_processor_id();
map_cpu_to_boot_apicid(0, boot_cpu_apicid); map_cpu_to_boot_apicid(0, boot_cpu_apicid);
...@@ -1072,11 +1022,11 @@ void __init smp_boot_cpus(void) ...@@ -1072,11 +1022,11 @@ void __init smp_boot_cpus(void)
#ifndef CONFIG_VISWS #ifndef CONFIG_VISWS
io_apic_irqs = 0; io_apic_irqs = 0;
#endif #endif
cpu_online_map = phys_cpu_present_map = 1; phys_cpu_present_map = 1;
if (APIC_init_uniprocessor()) if (APIC_init_uniprocessor())
printk(KERN_NOTICE "Local APIC not detected." printk(KERN_NOTICE "Local APIC not detected."
" Using dummy APIC emulation.\n"); " Using dummy APIC emulation.\n");
goto smp_done; return;
} }
/* /*
...@@ -1101,8 +1051,8 @@ void __init smp_boot_cpus(void) ...@@ -1101,8 +1051,8 @@ void __init smp_boot_cpus(void)
#ifndef CONFIG_VISWS #ifndef CONFIG_VISWS
io_apic_irqs = 0; io_apic_irqs = 0;
#endif #endif
cpu_online_map = phys_cpu_present_map = 1; phys_cpu_present_map = 1;
goto smp_done; return;
} }
verify_local_APIC(); verify_local_APIC();
...@@ -1116,8 +1066,8 @@ void __init smp_boot_cpus(void) ...@@ -1116,8 +1066,8 @@ void __init smp_boot_cpus(void)
#ifndef CONFIG_VISWS #ifndef CONFIG_VISWS
io_apic_irqs = 0; io_apic_irqs = 0;
#endif #endif
cpu_online_map = phys_cpu_present_map = 1; phys_cpu_present_map = 1;
goto smp_done; return;
} }
connect_bsp_APIC(); connect_bsp_APIC();
...@@ -1189,7 +1139,7 @@ void __init smp_boot_cpus(void) ...@@ -1189,7 +1139,7 @@ void __init smp_boot_cpus(void)
} else { } else {
unsigned long bogosum = 0; unsigned long bogosum = 0;
for (cpu = 0; cpu < NR_CPUS; cpu++) for (cpu = 0; cpu < NR_CPUS; cpu++)
if (cpu_online_map & (1<<cpu)) if (cpu_callout_map & (1<<cpu))
bogosum += cpu_data[cpu].loops_per_jiffy; bogosum += cpu_data[cpu].loops_per_jiffy;
printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
cpucount+1, cpucount+1,
...@@ -1212,10 +1162,10 @@ void __init smp_boot_cpus(void) ...@@ -1212,10 +1162,10 @@ void __init smp_boot_cpus(void)
for (cpu = 0; cpu < NR_CPUS; cpu++) { for (cpu = 0; cpu < NR_CPUS; cpu++) {
int i; int i;
if (!cpu_online(cpu)) continue; if (!test_bit(cpu, &cpu_callout_map)) continue;
for (i = 0; i < NR_CPUS; i++) { for (i = 0; i < NR_CPUS; i++) {
if (i == cpu || !cpu_online(i)) if (i == cpu || !test_bit(i, &cpu_callout_map))
continue; continue;
if (phys_proc_id[cpu] == phys_proc_id[i]) { if (phys_proc_id[cpu] == phys_proc_id[i]) {
cpu_sibling_map[cpu] = i; cpu_sibling_map[cpu] = i;
...@@ -1239,17 +1189,40 @@ void __init smp_boot_cpus(void) ...@@ -1239,17 +1189,40 @@ void __init smp_boot_cpus(void)
setup_IO_APIC(); setup_IO_APIC();
#endif #endif
/* setup_boot_APIC_clock();
* Set up all local APIC timers in the system:
*/
setup_APIC_clocks();
/* /*
* Synchronize the TSC with the AP * Synchronize the TSC with the AP
*/ */
if (cpu_has_tsc && cpucount) if (cpu_has_tsc && cpucount)
synchronize_tsc_bp(); synchronize_tsc_bp();
}
/* These are wrappers to interface to the new boot process. Someone
who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */
void __init smp_prepare_cpus(unsigned int max_cpus)
{
smp_boot_cpus(max_cpus);
}
smp_done: int __devinit __cpu_up(unsigned int cpu)
{
/* This only works at boot for x86. See "rewrite" above. */
if (test_bit(cpu, &smp_commenced_mask))
return -ENOSYS;
/* In case one didn't come up */
if (!test_bit(cpu, &cpu_callin_map))
return -EIO;
/* Unleash the CPU! */
set_bit(cpu, &smp_commenced_mask);
while (!test_bit(cpu, &cpu_online_map))
mb();
return 0;
}
void __init smp_cpus_done(unsigned int max_cpus)
{
zap_low_mappings(); zap_low_mappings();
} }
...@@ -76,7 +76,8 @@ extern void init_bsp_APIC (void); ...@@ -76,7 +76,8 @@ extern void init_bsp_APIC (void);
extern void setup_local_APIC (void); extern void setup_local_APIC (void);
extern void init_apic_mappings (void); extern void init_apic_mappings (void);
extern void smp_local_timer_interrupt (struct pt_regs * regs); extern void smp_local_timer_interrupt (struct pt_regs * regs);
extern void setup_APIC_clocks (void); extern void setup_boot_APIC_clock (void);
extern void setup_secondary_APIC_clock (void);
extern void setup_apic_nmi_watchdog (void); extern void setup_apic_nmi_watchdog (void);
extern inline void nmi_watchdog_tick (struct pt_regs * regs); extern inline void nmi_watchdog_tick (struct pt_regs * regs);
extern int APIC_init_uniprocessor (void); extern int APIC_init_uniprocessor (void);
......
...@@ -78,13 +78,6 @@ extern volatile int physical_apicid_to_cpu[MAX_APICID]; ...@@ -78,13 +78,6 @@ extern volatile int physical_apicid_to_cpu[MAX_APICID];
extern volatile int cpu_to_logical_apicid[NR_CPUS]; extern volatile int cpu_to_logical_apicid[NR_CPUS];
extern volatile int logical_apicid_to_cpu[MAX_APICID]; extern volatile int logical_apicid_to_cpu[MAX_APICID];
/*
* General functions that each host system must provide.
*/
extern void smp_boot_cpus(void);
extern void smp_store_cpu_info(int id); /* Store per CPU info (like the initial udelay numbers */
/* /*
* This function is needed by all SMP systems. It must _always_ be valid * This function is needed by all SMP systems. It must _always_ be valid
* from the initial startup. We map APIC_BASE very early in page_setup(), * from the initial startup. We map APIC_BASE very early in page_setup(),
...@@ -92,6 +85,7 @@ extern void smp_store_cpu_info(int id); /* Store per CPU info (like the initial ...@@ -92,6 +85,7 @@ extern void smp_store_cpu_info(int id); /* Store per CPU info (like the initial
*/ */
#define smp_processor_id() (current_thread_info()->cpu) #define smp_processor_id() (current_thread_info()->cpu)
#define cpu_possible(cpu) (phys_cpu_present_map & (1<<(cpu)))
#define cpu_online(cpu) (cpu_online_map & (1<<(cpu))) #define cpu_online(cpu) (cpu_online_map & (1<<(cpu)))
extern inline unsigned int num_online_cpus(void) extern inline unsigned int num_online_cpus(void)
...@@ -119,6 +113,13 @@ static __inline int logical_smp_processor_id(void) ...@@ -119,6 +113,13 @@ static __inline int logical_smp_processor_id(void)
return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR)); return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
} }
extern volatile unsigned long cpu_callout_map;
/* We don't mark CPUs online until __cpu_up(), so we need another measure */
static inline int num_booting_cpus(void)
{
return hweight32(cpu_callout_map);
}
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#define NO_PROC_ID 0xFF /* No processor magic marker */ #define NO_PROC_ID 0xFF /* No processor magic marker */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment