Commit d643d8e6 authored by David S. Miller's avatar David S. Miller

SPARC64: Port to new cpu hotplug startup sequence.

parent 1225fb90
...@@ -50,24 +50,12 @@ static int smp_activated; ...@@ -50,24 +50,12 @@ static int smp_activated;
/* Kernel spinlock */ /* Kernel spinlock */
spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
volatile int smp_processors_ready = 0;
atomic_t sparc64_num_cpus_online = ATOMIC_INIT(0); atomic_t sparc64_num_cpus_online = ATOMIC_INIT(0);
unsigned long cpu_online_map = 0; unsigned long cpu_online_map = 0;
int smp_threads_ready = 0; atomic_t sparc64_num_cpus_possible = ATOMIC_INIT(0);
unsigned long phys_cpu_present_map = 0;
void __init smp_setup(char *str, int *ints) static unsigned long smp_commenced_mask;
{ static unsigned long cpu_callout_map;
/* XXX implement me XXX */
}
static int max_cpus = NR_CPUS;
static int __init maxcpus(char *str)
{
get_option(&str, &max_cpus);
return 1;
}
__setup("maxcpus=", maxcpus);
void smp_info(struct seq_file *m) void smp_info(struct seq_file *m)
{ {
...@@ -121,10 +109,6 @@ void __init smp_store_cpu_info(int id) ...@@ -121,10 +109,6 @@ void __init smp_store_cpu_info(int id)
cpu_data[id].irq_worklists[i] = 0; cpu_data[id].irq_worklists[i] = 0;
} }
void __init smp_commence(void)
{
}
static void smp_setup_percpu_timer(void); static void smp_setup_percpu_timer(void);
static volatile unsigned long callin_flag = 0; static volatile unsigned long callin_flag = 0;
...@@ -216,8 +200,11 @@ void __init smp_callin(void) ...@@ -216,8 +200,11 @@ void __init smp_callin(void)
atomic_inc(&init_mm.mm_count); atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm; current->active_mm = &init_mm;
while (!smp_threads_ready) while (!test_bit(cpuid, &smp_commenced_mask))
membar("#LoadLoad"); membar("#LoadLoad");
set_bit(cpuid, &cpu_online_map);
atomic_inc(&sparc64_num_cpus_online);
} }
void cpu_panic(void) void cpu_panic(void)
...@@ -236,9 +223,7 @@ extern unsigned long sparc64_cpu_startup; ...@@ -236,9 +223,7 @@ extern unsigned long sparc64_cpu_startup;
*/ */
static struct thread_info *cpu_new_thread = NULL; static struct thread_info *cpu_new_thread = NULL;
static void smp_tune_scheduling(void); static void __init smp_boot_cpus(unsigned int max_cpus)
void __init smp_boot_cpus(void)
{ {
int cpucount = 0, i; int cpucount = 0, i;
...@@ -246,10 +231,8 @@ void __init smp_boot_cpus(void) ...@@ -246,10 +231,8 @@ void __init smp_boot_cpus(void)
local_irq_enable(); local_irq_enable();
smp_store_cpu_info(boot_cpu_id); smp_store_cpu_info(boot_cpu_id);
if (linux_num_cpus == 1) { if (linux_num_cpus == 1)
smp_tune_scheduling();
return; return;
}
for (i = 0; i < NR_CPUS; i++) { for (i = 0; i < NR_CPUS; i++) {
if (i == boot_cpu_id) if (i == boot_cpu_id)
...@@ -257,7 +240,7 @@ void __init smp_boot_cpus(void) ...@@ -257,7 +240,7 @@ void __init smp_boot_cpus(void)
if ((cpucount + 1) == max_cpus) if ((cpucount + 1) == max_cpus)
goto ignorecpu; goto ignorecpu;
if (cpu_online(i)) { if (test_bit(i, &phys_cpu_present_map)) {
unsigned long entry = unsigned long entry =
(unsigned long)(&sparc64_cpu_startup); (unsigned long)(&sparc64_cpu_startup);
unsigned long cookie = unsigned long cookie =
...@@ -281,6 +264,7 @@ void __init smp_boot_cpus(void) ...@@ -281,6 +264,7 @@ void __init smp_boot_cpus(void)
if (linux_cpus[no].mid == i) if (linux_cpus[no].mid == i)
break; break;
cpu_new_thread = p->thread_info; cpu_new_thread = p->thread_info;
set_bit(i, &cpu_callout_map);
prom_startcpu(linux_cpus[no].prom_node, prom_startcpu(linux_cpus[no].prom_node,
entry, cookie); entry, cookie);
for (timeout = 0; timeout < 5000000; timeout++) { for (timeout = 0; timeout < 5000000; timeout++) {
...@@ -289,28 +273,21 @@ void __init smp_boot_cpus(void) ...@@ -289,28 +273,21 @@ void __init smp_boot_cpus(void)
udelay(100); udelay(100);
} }
if (callin_flag) { if (callin_flag) {
atomic_inc(&sparc64_num_cpus_online);
prom_cpu_nodes[i] = linux_cpus[no].prom_node; prom_cpu_nodes[i] = linux_cpus[no].prom_node;
prom_printf("OK\n"); prom_printf("OK\n");
} else { } else {
cpucount--; cpucount--;
printk("Processor %d is stuck.\n", i); printk("Processor %d is stuck.\n", i);
prom_printf("FAILED\n"); prom_printf("FAILED\n");
clear_bit(i, &cpu_callout_map);
} }
if (!callin_flag) {
ignorecpu: ignorecpu:
clear_bit(i, &cpu_online_map);
}
} }
} }
cpu_new_thread = NULL; cpu_new_thread = NULL;
if (cpucount == 0) { if (cpucount == 0) {
if (max_cpus != 1) if (max_cpus != 1)
printk("Error: only one processor found.\n"); printk("Error: only one processor found.\n");
memset(&cpu_online_map, 0, sizeof(cpu_online_map));
set_bit(smp_processor_id(), &cpu_online_map);
atomic_set(&sparc64_num_cpus_online, 1);
} else { } else {
unsigned long bogosum = 0; unsigned long bogosum = 0;
...@@ -325,14 +302,6 @@ void __init smp_boot_cpus(void) ...@@ -325,14 +302,6 @@ void __init smp_boot_cpus(void)
(bogosum/(5000/HZ))%100); (bogosum/(5000/HZ))%100);
smp_activated = 1; smp_activated = 1;
} }
/* We want to run this with all the other cpus spinning
* in the kernel.
*/
smp_tune_scheduling();
smp_processors_ready = 1;
membar("#StoreStore | #StoreLoad");
} }
static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu) static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
...@@ -532,7 +501,6 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, unsigned long ...@@ -532,7 +501,6 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, unsigned long
*/ */
static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, unsigned long mask) static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, unsigned long mask)
{ {
if (smp_processors_ready) {
u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff)); u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
mask &= cpu_online_map; mask &= cpu_online_map;
...@@ -542,9 +510,7 @@ static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 d ...@@ -542,9 +510,7 @@ static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 d
spitfire_xcall_deliver(data0, data1, data2, mask); spitfire_xcall_deliver(data0, data1, data2, mask);
else else
cheetah_xcall_deliver(data0, data1, data2, mask); cheetah_xcall_deliver(data0, data1, data2, mask);
/* NOTE: Caller runs local copy on master. */ /* NOTE: Caller runs local copy on master. */
}
} }
/* Send cross call to all processors except self. */ /* Send cross call to all processors except self. */
...@@ -660,7 +626,6 @@ static __inline__ void __local_flush_dcache_page(struct page *page) ...@@ -660,7 +626,6 @@ static __inline__ void __local_flush_dcache_page(struct page *page)
void smp_flush_dcache_page_impl(struct page *page, int cpu) void smp_flush_dcache_page_impl(struct page *page, int cpu)
{ {
if (smp_processors_ready) {
unsigned long mask = 1UL << cpu; unsigned long mask = 1UL << cpu;
#ifdef CONFIG_DEBUG_DCFLUSH #ifdef CONFIG_DEBUG_DCFLUSH
...@@ -691,14 +656,11 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu) ...@@ -691,14 +656,11 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
atomic_inc(&dcpage_flushes_xcall); atomic_inc(&dcpage_flushes_xcall);
#endif #endif
} }
}
} }
void flush_dcache_page_all(struct mm_struct *mm, struct page *page) void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
{ {
if (smp_processors_ready) { unsigned long mask = cpu_online_map & ~(1UL << smp_processor_id());
unsigned long mask =
cpu_online_map & ~(1UL << smp_processor_id());
u64 data0; u64 data0;
#ifdef CONFIG_DEBUG_DCFLUSH #ifdef CONFIG_DEBUG_DCFLUSH
...@@ -725,24 +687,20 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) ...@@ -725,24 +687,20 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
#endif #endif
flush_self: flush_self:
__local_flush_dcache_page(page); __local_flush_dcache_page(page);
}
} }
void smp_receive_signal(int cpu) void smp_receive_signal(int cpu)
{ {
if (smp_processors_ready) {
unsigned long mask = 1UL << cpu; unsigned long mask = 1UL << cpu;
if ((cpu_online_map & mask) != 0) { if ((cpu_online_map & mask) != 0) {
u64 data0 = u64 data0 = (((u64)&xcall_receive_signal) & 0xffffffff);
(((u64)&xcall_receive_signal) & 0xffffffff);
if (tlb_type == spitfire) if (tlb_type == spitfire)
spitfire_xcall_deliver(data0, 0, 0, mask); spitfire_xcall_deliver(data0, 0, 0, mask);
else else
cheetah_xcall_deliver(data0, 0, 0, mask); cheetah_xcall_deliver(data0, 0, 0, mask);
} }
}
} }
void smp_receive_signal_client(int irq, struct pt_regs *regs) void smp_receive_signal_client(int irq, struct pt_regs *regs)
...@@ -934,7 +892,6 @@ static unsigned long penguins_are_doing_time; ...@@ -934,7 +892,6 @@ static unsigned long penguins_are_doing_time;
void smp_capture(void) void smp_capture(void)
{ {
if (smp_processors_ready) {
int result = __atomic_add(1, &smp_capture_depth); int result = __atomic_add(1, &smp_capture_depth);
membar("#StoreStore | #LoadStore"); membar("#StoreStore | #LoadStore");
...@@ -955,12 +912,10 @@ void smp_capture(void) ...@@ -955,12 +912,10 @@ void smp_capture(void)
printk("done\n"); printk("done\n");
#endif #endif
} }
}
} }
void smp_release(void) void smp_release(void)
{ {
if (smp_processors_ready) {
if (atomic_dec_and_test(&smp_capture_depth)) { if (atomic_dec_and_test(&smp_capture_depth)) {
#ifdef CAPTURE_DEBUG #ifdef CAPTURE_DEBUG
printk("CPU[%d]: Giving pardon to " printk("CPU[%d]: Giving pardon to "
...@@ -971,7 +926,6 @@ void smp_release(void) ...@@ -971,7 +926,6 @@ void smp_release(void)
membar("#StoreStore | #StoreLoad"); membar("#StoreStore | #StoreLoad");
atomic_dec(&smp_capture_registry); atomic_dec(&smp_capture_registry);
} }
}
} }
/* Imprisoned penguins run with %pil == 15, but PSTATE_IE set, so they /* Imprisoned penguins run with %pil == 15, but PSTATE_IE set, so they
...@@ -1006,7 +960,6 @@ extern unsigned long xcall_promstop; ...@@ -1006,7 +960,6 @@ extern unsigned long xcall_promstop;
void smp_promstop_others(void) void smp_promstop_others(void)
{ {
if (smp_processors_ready)
smp_cross_call(&xcall_promstop, 0, 0, 0); smp_cross_call(&xcall_promstop, 0, 0, 0);
} }
...@@ -1178,13 +1131,17 @@ void __init smp_tick_init(void) ...@@ -1178,13 +1131,17 @@ void __init smp_tick_init(void)
atomic_set(&sparc64_num_cpus_online, 1); atomic_set(&sparc64_num_cpus_online, 1);
memset(&cpu_online_map, 0, sizeof(cpu_online_map)); memset(&cpu_online_map, 0, sizeof(cpu_online_map));
for (i = 0; i < linux_num_cpus; i++) { set_bit(boot_cpu_id, &cpu_online_map);
if (linux_cpus[i].mid < NR_CPUS)
set_bit(linux_cpus[i].mid, &cpu_online_map);
}
prom_cpu_nodes[boot_cpu_id] = linux_cpus[0].prom_node; prom_cpu_nodes[boot_cpu_id] = linux_cpus[0].prom_node;
prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1; prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1;
for (i = 0; i < linux_num_cpus; i++) {
if (linux_cpus[i].mid < NR_CPUS) {
set_bit(linux_cpus[i].mid,
&phys_cpu_present_map);
atomic_inc(&sparc64_num_cpus_possible);
}
}
} }
cycles_t cacheflush_time; cycles_t cacheflush_time;
...@@ -1312,3 +1269,24 @@ int setup_profiling_timer(unsigned int multiplier) ...@@ -1312,3 +1269,24 @@ int setup_profiling_timer(unsigned int multiplier)
return 0; return 0;
} }
void __init smp_prepare_cpus(unsigned int max_cpus)
{
smp_boot_cpus(max_cpus);
}
int __devinit __cpu_up(unsigned int cpu)
{
set_bit(cpu, &smp_commenced_mask);
while (!test_bit(cpu, &cpu_online_map))
mb();
return 0;
}
void __init smp_cpus_done(unsigned int max_cpus)
{
/* We want to run this with all the other cpus spinning
* in the kernel.
*/
smp_tune_scheduling();
}
...@@ -145,6 +145,8 @@ EXPORT_SYMBOL(cpu_data); ...@@ -145,6 +145,8 @@ EXPORT_SYMBOL(cpu_data);
/* CPU online map and active count. */ /* CPU online map and active count. */
EXPORT_SYMBOL(cpu_online_map); EXPORT_SYMBOL(cpu_online_map);
EXPORT_SYMBOL(sparc64_num_cpus_online); EXPORT_SYMBOL(sparc64_num_cpus_online);
EXPORT_SYMBOL(phys_cpu_present_map);
EXPORT_SYMBOL(sparc64_num_cpus_possible);
/* Spinlock debugging library, optional. */ /* Spinlock debugging library, optional. */
#ifdef CONFIG_DEBUG_SPINLOCK #ifdef CONFIG_DEBUG_SPINLOCK
......
...@@ -65,11 +65,19 @@ extern cpuinfo_sparc cpu_data[NR_CPUS]; ...@@ -65,11 +65,19 @@ extern cpuinfo_sparc cpu_data[NR_CPUS];
#include <asm/atomic.h> #include <asm/atomic.h>
extern unsigned char boot_cpu_id; extern unsigned char boot_cpu_id;
extern unsigned long phys_cpu_present_map;
#define cpu_possible(cpu) (phys_cpu_present_map & (1UL << (cpu)))
extern unsigned long cpu_online_map; extern unsigned long cpu_online_map;
#define cpu_online(cpu) (cpu_online_map & (1UL << (cpu))) #define cpu_online(cpu) (cpu_online_map & (1UL << (cpu)))
extern atomic_t sparc64_num_cpus_online; extern atomic_t sparc64_num_cpus_online;
#define num_online_cpus() (atomic_read(&sparc64_num_cpus_online)) #define num_online_cpus() (atomic_read(&sparc64_num_cpus_online))
extern atomic_t sparc64_num_cpus_possible;
#define num_possible_cpus() (atomic_read(&sparc64_num_cpus_possible))
static inline int any_online_cpu(unsigned long mask) static inline int any_online_cpu(unsigned long mask)
{ {
if ((mask &= cpu_online_map) != 0UL) if ((mask &= cpu_online_map) != 0UL)
...@@ -81,10 +89,6 @@ static inline int any_online_cpu(unsigned long mask) ...@@ -81,10 +89,6 @@ static inline int any_online_cpu(unsigned long mask)
* General functions that each host system must provide. * General functions that each host system must provide.
*/ */
extern void smp_callin(void);
extern void smp_boot_cpus(void);
extern void smp_store_cpu_info(int id);
extern __inline__ int hard_smp_processor_id(void) extern __inline__ int hard_smp_processor_id(void)
{ {
if (tlb_type == cheetah) { if (tlb_type == cheetah) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment