Commit a9d1ed0d authored by Anton Blanchard's avatar Anton Blanchard

ppc64: non linear cpu support

parent e7c3270d
......@@ -346,8 +346,10 @@ int show_interrupts(struct seq_file *p, void *v)
struct irqaction * action;
seq_printf(p, " ");
for (j=0; j<smp_num_cpus; j++)
for (j=0; j<NR_CPUS; j++) {
if (cpu_online(j))
seq_printf(p, "CPU%d ",j);
}
seq_putc(p, '\n');
for (i = 0 ; i < NR_IRQS ; i++) {
......@@ -356,9 +358,10 @@ int show_interrupts(struct seq_file *p, void *v)
continue;
seq_printf(p, "%3d: ", i);
#ifdef CONFIG_SMP
for (j = 0; j < smp_num_cpus; j++)
seq_printf(p, "%10u ",
kstat.irqs[cpu_logical_map(j)][i]);
for (j = 0; j < NR_CPUS; j++) {
if (cpu_online(j))
seq_printf(p, "%10u ", kstat.irqs[j][i]);
}
#else
seq_printf(p, "%10u ", kstat_irqs(i));
#endif /* CONFIG_SMP */
......@@ -425,14 +428,14 @@ static unsigned long move(unsigned long curr_cpu, unsigned long allowed_mask,
inside:
if (direction == 1) {
cpu++;
if (cpu >= smp_num_cpus)
if (cpu >= NR_CPUS)
cpu = 0;
} else {
cpu--;
if (cpu == -1)
cpu = smp_num_cpus-1;
cpu = NR_CPUS-1;
}
} while (!IRQ_ALLOWED(cpu,allowed_mask) ||
} while (!cpu_online(cpu) || !IRQ_ALLOWED(cpu,allowed_mask) ||
(search_idle && !IDLE_ENOUGH(cpu,now)));
return cpu;
......@@ -653,12 +656,16 @@ static void show(char * str)
printk("\n%s, CPU %d:\n", str, cpu);
printk("irq: %d [ ", irqs_running());
for (i = 0; i < smp_num_cpus; i++)
for (i = 0; i < NR_CPUS; i++) {
if (cpu_online(i))
printk("%u ", __brlock_array[i][BR_GLOBALIRQ_LOCK]);
}
printk("]\nbh: %d [ ",
(spin_is_locked(&global_bh_lock) ? 1 : 0));
for (i = 0; i < smp_num_cpus; i++)
for (i = 0; i < NR_CPUS; i++) {
if (cpu_online(i))
printk("%u ", local_bh_count(i));
}
printk("]\n");
}
......
......@@ -85,10 +85,10 @@ unsigned int openpic_vec_spurious;
*/
#ifdef CONFIG_SMP
#define THIS_CPU Processor[cpu]
#define DECL_THIS_CPU int cpu = hard_smp_processor_id()
#define DECL_THIS_CPU int cpu = smp_processor_id()
#define CHECK_THIS_CPU check_arg_cpu(cpu)
#else
#define THIS_CPU Processor[hard_smp_processor_id()]
#define THIS_CPU Processor[smp_processor_id()]
#define DECL_THIS_CPU
#define CHECK_THIS_CPU
#endif /* CONFIG_SMP */
......@@ -356,7 +356,7 @@ void __init openpic_init(int main_pic, int offset, unsigned char* chrp_ack,
/* SIOint (8259 cascade) is special */
if (offset) {
openpic_initirq(0, 8, offset, 1, 1);
openpic_mapirq(0, 1<<get_hard_smp_processor_id(0));
openpic_mapirq(0, 1 << boot_cpuid);
}
/* Init all external sources */
......@@ -374,7 +374,7 @@ void __init openpic_init(int main_pic, int offset, unsigned char* chrp_ack,
/* Enabled, Priority 8 or 9 */
openpic_initirq(i, pri, i+offset, !sense, sense);
/* Processor 0 */
openpic_mapirq(i, 1<<get_hard_smp_processor_id(0));
openpic_mapirq(i, 1 << boot_cpuid);
}
/* Init descriptors */
......@@ -503,23 +503,10 @@ static void openpic_set_spurious(u_int vec)
vec);
}
/*
* Convert a cpu mask from logical to physical cpu numbers.
*/
static inline u32 physmask(u32 cpumask)
{
int i;
u32 mask = 0;
for (i = 0; i < smp_num_cpus; ++i, cpumask >>= 1)
mask |= (cpumask & 1) << get_hard_smp_processor_id(i);
return mask;
}
void openpic_init_processor(u_int cpumask)
{
openpic_write(&OpenPIC->Global.Processor_Initialization,
physmask(cpumask));
cpumask & cpu_online_map);
}
#ifdef CONFIG_SMP
......@@ -553,7 +540,7 @@ void openpic_cause_IPI(u_int ipi, u_int cpumask)
CHECK_THIS_CPU;
check_arg_ipi(ipi);
openpic_write(&OpenPIC->THIS_CPU.IPI_Dispatch(ipi),
physmask(cpumask));
cpumask & cpu_online_map);
}
void openpic_request_IPIs(void)
......@@ -593,7 +580,7 @@ void __init do_openpic_setup_cpu(void)
{
#ifdef CONFIG_IRQ_ALL_CPUS
int i;
u32 msk = 1 << hard_smp_processor_id();
u32 msk = 1 << smp_processor_id();
#endif
spin_lock(&openpic_setup_lock);
......@@ -638,7 +625,7 @@ static void __init openpic_maptimer(u_int timer, u_int cpumask)
{
check_arg_timer(timer);
openpic_write(&OpenPIC->Global.Timer[timer].Destination,
physmask(cpumask));
cpumask & cpu_online_map);
}
......@@ -761,7 +748,7 @@ static void openpic_end_irq(unsigned int irq_nr)
static void openpic_set_affinity(unsigned int irq_nr, unsigned long cpumask)
{
openpic_mapirq(irq_nr - open_pic_irq_offset, physmask(cpumask));
openpic_mapirq(irq_nr - open_pic_irq_offset, cpumask & cpu_online_map);
}
#ifdef CONFIG_SMP
......
......@@ -309,7 +309,7 @@ static void pSeriesLP_qirr_info(int n_cpu , u8 value)
{
unsigned long lpar_rc;
lpar_rc = plpar_ipi(get_hard_smp_processor_id(n_cpu),value);
lpar_rc = plpar_ipi(n_cpu, value);
if (lpar_rc != H_Success) {
panic(" bad return code qirr -ipi - rc = %lx \n", lpar_rc);
}
......
......@@ -59,7 +59,9 @@ ppc64_pmc_stab(int file)
stab_faults = stab_capacity_castouts = stab_invalidations = n = 0;
if (file == -1) {
for (i = 0; i < smp_num_cpus; i++) {
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_online(i))
continue;
stab_faults += pmc_sw_cpu[i].stab_faults;
stab_capacity_castouts += pmc_sw_cpu[i].stab_capacity_castouts;
stab_invalidations += pmc_sw_cpu[i].stab_invalidations;
......
......@@ -87,7 +87,6 @@ EXPORT_SYMBOL(disable_irq_nosync);
#ifdef CONFIG_SMP
EXPORT_SYMBOL(kernel_flag);
EXPORT_SYMBOL(synchronize_irq);
EXPORT_SYMBOL(smp_num_cpus);
#endif /* CONFIG_SMP */
EXPORT_SYMBOL(register_ioctl32_conversion);
......
......@@ -109,14 +109,19 @@ void proc_ppc64_init(void)
proc_ppc64_pmc_root = proc_mkdir("pmc", proc_ppc64_root);
proc_ppc64_pmc_system_root = proc_mkdir("system", proc_ppc64_pmc_root);
for (i = 0; i < naca->processorCount; i++) {
for (i = 0; i < NR_CPUS; i++) {
if (cpu_online(i)) {
sprintf(buf, "cpu%ld", i);
proc_ppc64_pmc_cpu_root[i] = proc_mkdir(buf, proc_ppc64_pmc_root);
proc_ppc64_pmc_cpu_root[i] =
proc_mkdir(buf, proc_ppc64_pmc_root);
}
}
/* Create directories for the software counters. */
for (i = 0; i < naca->processorCount; i++) {
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_online(i))
continue;
ent = create_proc_entry("stab", S_IRUGO | S_IWUSR,
proc_ppc64_pmc_cpu_root[i]);
if (ent) {
......@@ -155,7 +160,9 @@ void proc_ppc64_init(void)
}
/* Create directories for the hardware counters. */
for (i = 0; i < naca->processorCount; i++) {
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_online(i))
continue;
ent = create_proc_entry("hardware", S_IRUGO | S_IWUSR,
proc_ppc64_pmc_cpu_root[i]);
if (ent) {
......@@ -191,7 +198,9 @@ int proc_ppc64_pmc_find_file(void *data)
(unsigned long) proc_ppc64_pmc_system_root) {
return(-1);
} else {
for (i = 0; i < naca->processorCount; i++) {
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_online(i))
continue;
if ((unsigned long)data ==
(unsigned long)proc_ppc64_pmc_cpu_root[i]) {
return(i);
......@@ -383,7 +392,8 @@ int proc_get_lpevents
(unsigned long)xItLpQueue.xLpIntCountByType[i] );
}
len += sprintf( page+len, "\n events processed by processor:\n" );
for (i=0; i<naca->processorCount; ++i) {
for (i = 0; i < NR_CPUS; ++i) {
if (cpu_online(i))
len += sprintf( page+len, " CPU%02d %10u\n",
i, paca[i].lpEvent_count );
}
......
......@@ -304,7 +304,9 @@ void initialize_paca_hardware_interrupt_stack(void)
unsigned long stack;
unsigned long end_of_stack =0;
for (i=1; i < naca->processorCount; i++) {
for (i=1; i < NR_CPUS; i++) {
if (!cpu_possible(i))
continue;
/* Carve out storage for the hardware interrupt stack */
stack = __get_free_pages(GFP_KERNEL, get_order(8*PAGE_SIZE));
......@@ -327,7 +329,9 @@ void initialize_paca_hardware_interrupt_stack(void)
if (__is_processor(PV_POWER4))
return;
for (i=0; i < naca->processorCount; i++) {
for (i=0; i < NR_CPUS; i++) {
if (!cpu_possible(i))
continue;
/* set page at the top of stack to be protected - prevent overflow */
end_of_stack = paca[i].xHrdIntStack - (8*PAGE_SIZE - STACK_FRAME_OVERHEAD);
ppc_md.hpte_updateboltedpp(PP_RXRX,end_of_stack);
......
......@@ -445,9 +445,12 @@ prom_initialize_naca(unsigned long mem)
}
/* We gotta have at least 1 cpu... */
if ( (_naca->processorCount = num_cpus) < 1 )
if (num_cpus < 1)
PROM_BUG();
if (num_cpus > 1)
RELOC(ppc64_is_smp) = 1;
_naca->physicalMemorySize = lmb_phys_mem_size();
if (_naca->platform == PLATFORM_PSERIES) {
......@@ -477,10 +480,6 @@ prom_initialize_naca(unsigned long mem)
_naca->slb_size = 64;
#ifdef DEBUG_PROM
prom_print(RELOC("naca->processorCount = 0x"));
prom_print_hex(_naca->processorCount);
prom_print_nl();
prom_print(RELOC("naca->physicalMemorySize = 0x"));
prom_print_hex(_naca->physicalMemorySize);
prom_print_nl();
......@@ -1044,20 +1043,15 @@ prom_hold_cpus(unsigned long mem)
phandle node;
unsigned long offset = reloc_offset();
char type[64], *path;
int cpuid = 0;
extern void __secondary_hold(void);
extern unsigned long __secondary_hold_spinloop;
extern unsigned long __secondary_hold_acknowledge;
unsigned long *spinloop = __v2a(&__secondary_hold_spinloop);
unsigned long *acknowledge = __v2a(&__secondary_hold_acknowledge);
unsigned long secondary_hold = (unsigned long)__v2a(*PTRRELOC((unsigned long *)__secondary_hold));
struct naca_struct *_naca = RELOC(naca);
struct paca_struct *_xPaca = PTRRELOC(&paca[0]);
struct prom_t *_prom = PTRRELOC(&prom);
/* Initially, we must have one active CPU. */
_naca->processorCount = 1;
#ifdef DEBUG_PROM
prom_print(RELOC("prom_hold_cpus: start...\n"));
prom_print(RELOC(" 1) spinloop = 0x"));
......@@ -1117,19 +1111,12 @@ prom_hold_cpus(unsigned long mem)
node, path, 255) < 0)
continue;
cpuid++;
#ifdef DEBUG_PROM
prom_print_nl();
prom_print(RELOC("cpuid = 0x"));
prom_print_hex(cpuid);
prom_print_nl();
prom_print(RELOC("cpu hw idx = 0x"));
prom_print_hex(reg);
prom_print_nl();
#endif
_xPaca[cpuid].xHwProcNum = reg;
prom_print(RELOC("starting cpu "));
prom_print(path);
......@@ -1155,11 +1142,9 @@ prom_hold_cpus(unsigned long mem)
prom_print(RELOC(" 3) secondary_hold = 0x"));
prom_print_hex(secondary_hold);
prom_print_nl();
prom_print(RELOC(" 3) cpuid = 0x"));
prom_print_hex(cpuid);
prom_print_nl();
#endif
call_prom(RELOC("start-cpu"), 3, 0, node, secondary_hold, cpuid);
call_prom(RELOC("start-cpu"), 3, 0, node, secondary_hold, reg);
prom_print(RELOC("..."));
for ( i = 0 ; (i < 100000000) &&
(*acknowledge == ((unsigned long)-1)); i++ ) ;
......@@ -1171,10 +1156,10 @@ prom_hold_cpus(unsigned long mem)
prom_print_nl();
}
#endif
if (*acknowledge == cpuid) {
if (*acknowledge == reg) {
prom_print(RELOC("ok\n"));
/* Set the number of active processors. */
_naca->processorCount++;
_xPaca[reg].active = 1;
} else {
prom_print(RELOC("failed: "));
prom_print_hex(*acknowledge);
......@@ -1188,10 +1173,11 @@ prom_hold_cpus(unsigned long mem)
__is_processor(PV_SSTAR)) {
prom_print(RELOC(" starting secondary threads\n"));
for (i=0; i < _naca->processorCount ;i++) {
unsigned long threadid = _naca->processorCount*2-1-i;
for (i = 0; i < NR_CPUS; i += 2) {
if (!_xPaca[i].active)
continue;
if (i == 0) {
if (i == boot_cpuid) {
unsigned long pir = _get_PIR();
if (__is_processor(PV_PULSAR)) {
RELOC(hmt_thread_data)[i].pir =
......@@ -1201,21 +1187,9 @@ prom_hold_cpus(unsigned long mem)
pir & 0x3ff;
}
}
RELOC(hmt_thread_data)[i].threadid = threadid;
#ifdef DEBUG_PROM
prom_print(RELOC(" cpuid 0x"));
prom_print_hex(i);
prom_print(RELOC(" maps to threadid 0x"));
prom_print_hex(threadid);
prom_print_nl();
prom_print(RELOC(" pir 0x"));
prom_print_hex(RELOC(hmt_thread_data)[i].pir);
prom_print_nl();
#endif
_xPaca[threadid].xHwProcNum = _xPaca[i].xHwProcNum+1;
_xPaca[i+1].active = 1;
RELOC(hmt_thread_data)[i].threadid = i+1;
}
_naca->processorCount *= 2;
} else {
prom_print(RELOC("Processor is not HMT capable\n"));
}
......@@ -1372,7 +1346,9 @@ prom_init(unsigned long r3, unsigned long r4, unsigned long pp,
cpu_pkg, RELOC("reg"),
&getprop_rval, sizeof(getprop_rval));
_prom->cpu = (int)(unsigned long)getprop_rval;
_xPaca[0].xHwProcNum = _prom->cpu;
_xPaca[_prom->cpu].active = 1;
RELOC(cpu_online_map) = 1 << _prom->cpu;
RELOC(boot_cpuid) = _prom->cpu;
#ifdef DEBUG_PROM
prom_print(RELOC("Booting CPU hw index = 0x"));
......@@ -1408,7 +1384,7 @@ prom_init(unsigned long r3, unsigned long r4, unsigned long pp,
* following, regardless of whether we have an SMP
* kernel or not.
*/
if ( _naca->processorCount > 1 )
if (RELOC(ppc64_is_smp))
prom_hold_cpus(mem);
mem = check_display(mem);
......
......@@ -220,10 +220,15 @@ static int rtasd(void *unused)
current->nice = sys_sched_get_priority_max(SCHED_FIFO) + 1;
#endif
cpu = 0;
set_cpus_allowed(current, 1UL << cpu_logical_map(cpu));
repeat:
for (cpu = 0; cpu < NR_CPUS; cpu++) {
if (!cpu_online(cpu))
continue;
DEBUG("scheduling on %d\n", cpu);
set_cpus_allowed(current, 1UL << cpu);
DEBUG("watchdog scheduled on cpu %d\n", smp_processor_id());
while(1) {
do {
memset(logdata, 0, rtas_error_log_max);
error = rtas_call(event_scan, 4, 1, NULL,
......@@ -239,31 +244,23 @@ static int rtasd(void *unused)
} while(error == 0);
DEBUG("watchdog scheduled on cpu %d\n", smp_processor_id());
cpu++;
if (cpu >= smp_num_cpus) {
/* Check all cpus for pending events before sleeping*/
if (!first_pass) {
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout((HZ*60/rtas_event_scan_rate) / 2);
}
}
if (first_pass && surveillance_requested) {
DEBUG("enabling surveillance\n");
if (enable_surveillance())
goto error_vfree;
DEBUG("surveillance enabled\n");
}
} else {
first_pass = 0;
cpu = 0;
}
set_cpus_allowed(current, 1UL << cpu_logical_map(cpu));
/* Check all cpus for pending events before sleeping*/
if (!first_pass) {
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout((HZ*60/rtas_event_scan_rate) / 2);
}
}
goto repeat;
error_vfree:
vfree(rtas_log_buf);
......
......@@ -168,10 +168,6 @@ void setup_system(unsigned long r3, unsigned long r4, unsigned long r5,
udbg_puthex((unsigned long)naca);
udbg_putc('\n');
udbg_puts("naca->processorCount = 0x");
udbg_puthex(naca->processorCount);
udbg_putc('\n');
udbg_puts("naca->physicalMemorySize = 0x");
udbg_puthex(naca->physicalMemorySize);
udbg_putc('\n');
......
......@@ -54,13 +54,15 @@
int smp_threads_ready = 0;
volatile int smp_commenced = 0;
int smp_num_cpus = 1;
int smp_tb_synchronized = 0;
spinlock_t kernel_flag __cacheline_aligned = SPIN_LOCK_UNLOCKED;
unsigned long cache_decay_ticks;
static int max_cpus __initdata = NR_CPUS;
unsigned long cpu_online_map;
/* initialised so it doesnt end up in bss */
unsigned long cpu_online_map = 0;
int boot_cpuid = 0;
int ppc64_is_smp = 0;
volatile unsigned long cpu_callin_map[NR_CPUS] = {0,};
......@@ -98,7 +100,7 @@ void iSeries_smp_message_recv( struct pt_regs * regs )
int cpu = smp_processor_id();
int msg;
if ( smp_num_cpus < 2 )
if ( num_online_cpus() < 2 )
return;
for ( msg = 0; msg < 4; ++msg )
......@@ -110,11 +112,16 @@ void iSeries_smp_message_recv( struct pt_regs * regs )
static void smp_iSeries_message_pass(int target, int msg, unsigned long data, int wait)
{
int i;
for (i = 0; i < smp_num_cpus; ++i) {
if ( (target == MSG_ALL) ||
for (i = 0; i < NR_CPUS; ++i) {
if (!cpu_online(i))
continue;
if ((target == MSG_ALL) ||
(target == i) ||
((target == MSG_ALL_BUT_SELF) && (i != smp_processor_id())) ) {
set_bit( msg, &iSeries_smp_message[i] );
((target == MSG_ALL_BUT_SELF) &&
(i != smp_processor_id())) ) {
set_bit(msg, &iSeries_smp_message[i]);
HvCall_sendIPI(&(paca[i]));
}
}
......@@ -135,7 +142,7 @@ static int smp_iSeries_numProcs(void)
return np;
}
static int smp_iSeries_probe(void)
static void smp_iSeries_probe(void)
{
unsigned i;
unsigned np;
......@@ -151,7 +158,6 @@ static int smp_iSeries_probe(void)
}
smp_tb_synchronized = 1;
return np;
}
static void smp_iSeries_kick_cpu(int nr)
......@@ -191,8 +197,10 @@ void smp_init_iSeries(void)
ppc_md.smp_probe = smp_iSeries_probe;
ppc_md.smp_kick_cpu = smp_iSeries_kick_cpu;
ppc_md.smp_setup_cpu = smp_iSeries_setup_cpu;
#ifdef CONFIG_PPC_ISERIES
#warning fix for iseries
naca->processorCount = smp_iSeries_numProcs();
#endif
}
......@@ -220,13 +228,10 @@ smp_openpic_message_pass(int target, int msg, unsigned long data, int wait)
}
}
static int
smp_chrp_probe(void)
static void smp_chrp_probe(void)
{
if (naca->processorCount > 1)
if (ppc64_is_smp)
openpic_request_IPIs();
return naca->processorCount;
}
static void
......@@ -253,14 +258,14 @@ smp_kick_cpu(int nr)
extern struct gettimeofday_struct do_gtod;
static void smp_space_timers( unsigned nr )
static void smp_space_timers()
{
unsigned long offset, i;
int i;
unsigned long offset = tb_ticks_per_jiffy / NR_CPUS;
offset = tb_ticks_per_jiffy / nr;
for ( i=1; i<nr; ++i ) {
paca[i].next_jiffy_update_tb = paca[i-1].next_jiffy_update_tb + offset;
}
for (i = 1; i < NR_CPUS; ++i)
paca[i].next_jiffy_update_tb =
paca[i-1].next_jiffy_update_tb + offset;
}
static void
......@@ -272,7 +277,7 @@ smp_chrp_setup_cpu(int cpu_nr)
if (naca->platform == PLATFORM_PSERIES_LPAR) {
/* timebases already synced under the hypervisor. */
paca[cpu_nr].next_jiffy_update_tb = tb_last_stamp = get_tb();
if (cpu_nr == 0) {
if (cpu_nr == boot_cpuid) {
do_gtod.tb_orig_stamp = tb_last_stamp;
/* Should update do_gtod.stamp_xsec.
* For now we leave it which means the time can be some
......@@ -281,9 +286,9 @@ smp_chrp_setup_cpu(int cpu_nr)
}
smp_tb_synchronized = 1;
} else {
if (cpu_nr == 0) {
if (cpu_nr == boot_cpuid) {
/* wait for all the others */
while (atomic_read(&ready) < smp_num_cpus)
while (atomic_read(&ready) < num_online_cpus())
barrier();
atomic_set(&ready, 1);
/* freeze the timebase */
......@@ -291,9 +296,9 @@ smp_chrp_setup_cpu(int cpu_nr)
mb();
frozen = 1;
set_tb(0, 0);
paca[0].next_jiffy_update_tb = 0;
smp_space_timers(smp_num_cpus);
while (atomic_read(&ready) < smp_num_cpus)
paca[boot_cpuid].next_jiffy_update_tb = 0;
smp_space_timers();
while (atomic_read(&ready) < num_online_cpus())
barrier();
/* thaw the timebase again */
rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
......@@ -317,7 +322,7 @@ smp_chrp_setup_cpu(int cpu_nr)
if (OpenPIC_Addr) {
do_openpic_setup_cpu();
} else {
if (cpu_nr > 0)
if (cpu_nr != boot_cpuid)
xics_setup_cpu();
}
}
......@@ -327,7 +332,10 @@ smp_xics_message_pass(int target, int msg, unsigned long data, int wait)
{
int i;
for (i = 0; i < smp_num_cpus; ++i) {
for (i = 0; i < NR_CPUS; ++i) {
if (!cpu_online(i))
continue;
if (target == MSG_ALL || target == i
|| (target == MSG_ALL_BUT_SELF
&& i != smp_processor_id())) {
......@@ -338,10 +346,8 @@ smp_xics_message_pass(int target, int msg, unsigned long data, int wait)
}
}
static int
smp_xics_probe(void)
static void smp_xics_probe(void)
{
return naca->processorCount;
}
/* This is called very early */
......@@ -429,7 +435,6 @@ static void stop_this_cpu(void *dummy)
void smp_send_stop(void)
{
smp_call_function(stop_this_cpu, NULL, 1, 0);
smp_num_cpus = 1;
}
/*
......@@ -467,7 +472,7 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
{
struct call_data_struct data;
int ret = -1, cpus = smp_num_cpus-1;
int ret = -1, cpus = num_online_cpus()-1;
int timeout;
if (!cpus)
......@@ -554,29 +559,23 @@ struct thread_struct *current_set[NR_CPUS] = {&init_thread_union, 0};
void __init smp_boot_cpus(void)
{
int i, cpu_nr;
int i, cpu_nr = 0;
struct task_struct *p;
printk("Entering SMP Mode...\n");
smp_num_cpus = 1;
smp_store_cpu_info(0);
cpu_online_map = 1UL;
smp_store_cpu_info(boot_cpuid);
cpu_callin_map[boot_cpuid] = 1;
/*
* assume for now that the first cpu booted is
* cpu 0, the master -- Cort
*/
cpu_callin_map[0] = 1;
/* XXX buggy - Anton */
current_thread_info()->cpu = 0;
for (i = 0; i < NR_CPUS; i++) {
paca[i].prof_counter = 1;
paca[i].prof_multiplier = 1;
if(i != 0) {
if (i != boot_cpuid) {
/*
* Processor 0's segment table is statically
* the boot cpu segment table is statically
* initialized to real address 0x5000. The
* Other processor's tables are created and
* initialized here.
......@@ -593,28 +592,33 @@ void __init smp_boot_cpus(void)
*/
cache_decay_ticks = HZ/100;
/* Probe arch for CPUs */
cpu_nr = ppc_md.smp_probe();
ppc_md.smp_probe();
for (i = 0; i < NR_CPUS; i++) {
if (paca[i].active)
cpu_nr++;
}
printk("Probe found %d CPUs\n", cpu_nr);
/*
* only check for cpus we know exist. We keep the callin map
* with cpus at the bottom -- Cort
*/
if (cpu_nr > max_cpus)
cpu_nr = max_cpus;
#ifdef CONFIG_ISERIES
smp_space_timers( cpu_nr );
smp_space_timers();
#endif
printk("Waiting for %d CPUs\n", cpu_nr-1);
for ( i = 1 ; i < cpu_nr; i++ ) {
for (i = 1 ; i < NR_CPUS; i++) {
int c;
struct pt_regs regs;
if (!paca[i].active)
continue;
if (i == boot_cpuid)
continue;
if (num_online_cpus() >= max_cpus)
break;
/* create a process for the processor */
/* we don't care about the values in regs since we'll
never reschedule the forked task. */
......@@ -658,16 +662,15 @@ void __init smp_boot_cpus(void)
{
printk("Processor %d found.\n", i);
/* this sync's the decr's -- Cort */
smp_num_cpus++;
} else {
printk("Processor %d is stuck.\n", i);
}
}
/* Setup CPU 0 last (important) */
ppc_md.smp_setup_cpu(0);
/* Setup boot cpu last (important) */
ppc_md.smp_setup_cpu(boot_cpuid);
if (smp_num_cpus < 2) {
if (num_online_cpus() < 2) {
tb_last_stamp = get_tb();
smp_tb_synchronized = 1;
}
......@@ -689,8 +692,9 @@ void __init smp_callin(void)
smp_store_cpu_info(cpu);
set_dec(paca[cpu].default_decr);
cpu_callin_map[cpu] = 1;
set_bit(smp_processor_id(), &cpu_online_map);
smp_mb();
cpu_callin_map[cpu] = 1;
ppc_md.smp_setup_cpu(cpu);
......
......@@ -340,7 +340,7 @@ xics_init_IRQ( void )
/* Find the server numbers for the boot cpu. */
for (np = find_type_devices("cpu"); np; np = np->next) {
ireg = (uint *)get_property(np, "reg", &ilen);
if (ireg && ireg[0] == hard_smp_processor_id()) {
if (ireg && ireg[0] == smp_processor_id()) {
ireg = (uint *)get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen);
i = ilen / sizeof(int);
if (ireg && i > 0) {
......@@ -371,10 +371,12 @@ xics_init_IRQ( void )
if (naca->platform == PLATFORM_PSERIES) {
#ifdef CONFIG_SMP
for (i = 0; i < naca->processorCount; ++i) {
for (i = 0; i < NR_CPUS; ++i) {
if (!paca[i].active)
continue;
xics_info.per_cpu[i] =
__ioremap((ulong)inodes[get_hard_smp_processor_id(i)].addr,
(ulong)inodes[get_hard_smp_processor_id(i)].size, _PAGE_NO_CACHE);
__ioremap((ulong)inodes[i].addr,
(ulong)inodes[i].size, _PAGE_NO_CACHE);
}
#else
xics_info.per_cpu[0] = __ioremap((ulong)intr_base, intr_size, _PAGE_NO_CACHE);
......@@ -395,7 +397,7 @@ xics_init_IRQ( void )
for (; i < NR_IRQS; ++i)
irq_desc[i].handler = &xics_pic;
ops->cppr_info(0, 0xff);
ops->cppr_info(boot_cpuid, 0xff);
iosync();
if (xics_irq_8259_cascade != -1) {
if (request_irq(xics_irq_8259_cascade + XICS_IRQ_OFFSET, no_action,
......@@ -420,23 +422,6 @@ void xics_isa_init(void)
i8259_init();
}
/*
* Find first logical cpu and return its physical cpu number
*/
static inline u32 physmask(u32 cpumask)
{
int i;
for (i = 0; i < smp_num_cpus; ++i, cpumask >>= 1) {
if (cpumask & 1)
return get_hard_smp_processor_id(i);
}
printk(KERN_ERR "xics_set_affinity: invalid irq mask\n");
return default_distrib_server;
}
void xics_set_affinity(unsigned int virq, unsigned long cpumask)
{
irq_desc_t *desc = irq_desc + virq;
......@@ -462,10 +447,13 @@ void xics_set_affinity(unsigned int virq, unsigned long cpumask)
}
/* For the moment only implement delivery to all cpus or one cpu */
if (cpumask == 0xffffffff)
if (cpumask == 0xffffffff) {
newmask = default_distrib_server;
else
newmask = physmask(cpumask);
} else {
if (!(cpumask & cpu_online_map))
goto out;
newmask = find_first_bit(&cpumask, 32);
}
status = rtas_call(ibm_set_xive, 3, 1, NULL,
irq, newmask, xics_status[1]);
......
......@@ -15,7 +15,7 @@
#include <linux/config.h>
#include <linux/brlock.h>
#include <linux/spinlock.h>
#include <asm/smp.h>
typedef struct {
unsigned long __softirq_pending;
......@@ -67,8 +67,8 @@ static __inline__ int irqs_running(void)
{
int i;
for (i = 0; i < smp_num_cpus; i++)
if (local_irq_count(cpu_logical_map(i)))
for (i = 0; i < NR_CPUS; i++)
if (local_irq_count(i))
return 1;
return 0;
}
......
......@@ -62,7 +62,7 @@ struct machdep_calls {
int msg,
unsigned long data,
int wait);
int (*smp_probe)(void);
void (*smp_probe)(void);
void (*smp_kick_cpu)(int nr);
void (*smp_setup_cpu)(int nr);
......
......@@ -18,7 +18,6 @@ struct naca_struct {
u64 xRamDiskSize; /* In pages */
struct paca_struct *paca; /* Ptr to an array of pacas */
u64 debug_switch; /* Bits to control debug printing */
u16 processorCount; /* # of physical processors */
u16 dCacheL1LineSize; /* Line size of L1 DCache in bytes */
u16 dCacheL1LogLineSize; /* Log-2 of DCache line size */
u16 dCacheL1LinesPerPage; /* DCache lines per page */
......
......@@ -71,7 +71,7 @@ struct paca_struct {
struct ItLpRegSave *xLpRegSavePtr; /* Pointer to LpRegSave for PLIC 0x08 */
u64 xCurrent; /* Pointer to current 0x10 */
u16 xPacaIndex; /* Logical processor number 0x18 */
u16 xHwProcNum; /* Actual Hardware Processor Number 0x1a */
u16 active; /* Is this cpu active? 0x1a */
u32 default_decr; /* Default decrementer value 0x1c */
u64 xHrdIntStack; /* Stack for hardware interrupts 0x20 */
u64 xKsave; /* Saved Kernel stack addr or zero 0x28 */
......
......@@ -39,16 +39,27 @@ extern void smp_send_reschedule_all(void);
#define NO_PROC_ID 0xFF /* No processor magic marker */
/* 1 to 1 mapping on PPC -- Cort */
#define cpu_logical_map(cpu) (cpu)
#define cpu_number_map(x) (x)
#define cpu_online(cpu) test_bit((cpu), &cpu_online_map)
#define cpu_possible(cpu) paca[cpu].active
static inline int num_online_cpus(void)
{
int i, nr = 0;
for (i = 0; i < NR_CPUS; i++)
nr += test_bit(i, &cpu_online_map);
return nr;
}
extern volatile unsigned long cpu_callin_map[NR_CPUS];
#define smp_processor_id() (get_paca()->xPacaIndex)
#define hard_smp_processor_id() (get_paca()->xHwProcNum)
#define get_hard_smp_processor_id(CPU) (paca[(CPU)].xHwProcNum)
/* remove when the boot sequence gets rewritten to use hotplug interface */
extern int boot_cpuid;
extern int ppc64_is_smp;
/* Since OpenPIC has only 4 IPIs, we use slightly different message numbers.
*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment