Commit 3c2dac15 authored by Keith M. Wesolowski's avatar Keith M. Wesolowski

Merge ssh://kernel.bkbits.net/sparc32-2.6

into foobazco.org:/sources/2.5-sparc-smp
parents 91211cd2 60ccf05c
......@@ -4,6 +4,7 @@
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/smp.h>
......@@ -13,6 +14,9 @@
#include <asm/head.h>
#include <asm/psr.h>
#include <asm/mbus.h>
#include <asm/cpudata.h>
DEFINE_PER_CPU(cpuinfo_sparc, __cpu_data) = { 0 };
struct cpu_iu_info {
int psr_impl;
......@@ -118,17 +122,15 @@ struct cpu_iu_info linux_sparc_chips[] = {
#define NSPARCCHIPS (sizeof(linux_sparc_chips)/sizeof(struct cpu_iu_info))
char *sparc_cpu_type[NR_CPUS] = { 0 };
char *sparc_fpu_type[NR_CPUS] = { 0 };
char *sparc_cpu_type;
char *sparc_fpu_type;
unsigned int fsr_storage;
void __init cpu_probe(void)
{
int psr_impl, psr_vers, fpu_vers;
int i, cpuid, psr;
cpuid = hard_smp_processor_id();
int i, psr;
psr_impl = ((get_psr()>>28)&0xf);
psr_vers = ((get_psr()>>24)&0xf);
......@@ -141,7 +143,7 @@ void __init cpu_probe(void)
for(i = 0; i<NSPARCCHIPS; i++) {
if(linux_sparc_chips[i].psr_impl == psr_impl)
if(linux_sparc_chips[i].psr_vers == psr_vers) {
sparc_cpu_type[cpuid] = linux_sparc_chips[i].cpu_name;
sparc_cpu_type = linux_sparc_chips[i].cpu_name;
break;
}
}
......@@ -153,7 +155,7 @@ void __init cpu_probe(void)
for(i = 0; i<NSPARCFPU; i++) {
if(linux_sparc_fpu[i].psr_impl == psr_impl)
if(linux_sparc_fpu[i].fp_vers == fpu_vers) {
sparc_fpu_type[cpuid] = linux_sparc_fpu[i].fp_name;
sparc_fpu_type = linux_sparc_fpu[i].fp_name;
break;
}
}
......@@ -161,6 +163,6 @@ void __init cpu_probe(void)
if(i == NSPARCFPU) {
printk("DEBUG: psr.impl = 0x%x fsr.vers = 0x%x\n", psr_impl,
fpu_vers);
sparc_fpu_type[cpuid] = linux_sparc_fpu[31].fp_name;
sparc_fpu_type = linux_sparc_fpu[31].fp_name;
}
}
/* devices.c: Initial scan of the prom device tree for important
* Sparc device nodes which we need to find.
*
* This is based on the sparc64 version, but sun4m doesn't always use
* the hardware MIDs, so be careful.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
......@@ -9,73 +12,135 @@
#include <linux/threads.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <asm/page.h>
#include <asm/oplib.h>
#include <asm/smp.h>
#include <asm/system.h>
struct prom_cpuinfo linux_cpus[32];
int linux_num_cpus = 0;
#include <asm/cpudata.h>
extern void cpu_probe(void);
extern void clock_stop_probe(void); /* tadpole.c */
extern void sun4c_probe_memerr_reg(void);
void __init
device_scan(void)
static char *cpu_mid_prop(void)
{
if (sparc_cpu_model == sun4d)
return "cpu-id";
return "mid";
}
static int check_cpu_node(int nd, int *cur_inst,
int (*compare)(int, int, void *), void *compare_arg,
int *prom_node, int *mid)
{
char node_str[128];
int thismid;
prom_getstring(prom_root_node, "device_type", node_str, sizeof(node_str));
prom_getstring(nd, "device_type", node_str, sizeof(node_str));
if (strcmp(node_str, "cpu"))
return -ENODEV;
if (!compare(nd, *cur_inst, compare_arg)) {
if (prom_node)
*prom_node = nd;
if (mid) {
*mid = prom_getintdefault(nd, cpu_mid_prop(), 0);
if (sparc_cpu_model == sun4m)
*mid &= 3;
}
return 0;
}
(*cur_inst)++;
return -ENODEV;
}
static int __cpu_find_by(int (*compare)(int, int, void *), void *compare_arg,
int *prom_node, int *mid)
{
int nd, cur_inst, err;
nd = prom_root_node;
cur_inst = 0;
err = check_cpu_node(nd, &cur_inst, compare, compare_arg,
prom_node, mid);
if (!err)
return 0;
nd = prom_getchild(nd);
while ((nd = prom_getsibling(nd)) != 0) {
err = check_cpu_node(nd, &cur_inst, compare, compare_arg,
prom_node, mid);
if (!err)
return 0;
}
return -ENODEV;
}
static int cpu_instance_compare(int nd, int instance, void *_arg)
{
int desired_instance = (int) _arg;
if (instance == desired_instance)
return 0;
return -ENODEV;
}
int cpu_find_by_instance(int instance, int *prom_node, int *mid)
{
return __cpu_find_by(cpu_instance_compare, (void *)instance,
prom_node, mid);
}
static int cpu_mid_compare(int nd, int instance, void *_arg)
{
int desired_mid = (int) _arg;
int this_mid;
this_mid = prom_getintdefault(nd, cpu_mid_prop(), 0);
if (this_mid == desired_mid
|| (sparc_cpu_model == sun4m && (this_mid & 3) == desired_mid))
return 0;
return -ENODEV;
}
int cpu_find_by_mid(int mid, int *prom_node)
{
return __cpu_find_by(cpu_mid_compare, (void *)mid,
prom_node, NULL);
}
/* sun4m uses truncated mids since we base the cpuid on the ttable/irqset
* address (0-3). This gives us the true hardware mid, which might have
* some other bits set. On 4d hardware and software mids are the same.
*/
int cpu_get_hwmid(int prom_node)
{
return prom_getintdefault(prom_node, cpu_mid_prop(), -ENODEV);
}
void __init device_scan(void)
{
prom_printf("Booting Linux...\n");
if(strcmp(node_str, "cpu") == 0) {
linux_num_cpus++;
} else {
int scan;
scan = prom_getchild(prom_root_node);
/* One can look it up in PROM instead */
while ((scan = prom_getsibling(scan)) != 0) {
prom_getstring(scan, "device_type",
node_str, sizeof(node_str));
if (strcmp(node_str, "cpu") == 0) {
linux_cpus[linux_num_cpus].prom_node = scan;
prom_getproperty(scan, "mid",
(char *) &thismid, sizeof(thismid));
linux_cpus[linux_num_cpus].mid = thismid;
printk("Found CPU %d <node=%08lx,mid=%d>\n",
linux_num_cpus, (unsigned long) scan, thismid);
linux_num_cpus++;
}
}
if (linux_num_cpus == 0 && sparc_cpu_model == sun4d) {
scan = prom_getchild(prom_root_node);
for (scan = prom_searchsiblings(scan, "cpu-unit"); scan;
scan = prom_searchsiblings(prom_getsibling(scan), "cpu-unit")) {
int node = prom_getchild(scan);
prom_getstring(node, "device_type",
node_str, sizeof(node_str));
if (strcmp(node_str, "cpu") == 0) {
prom_getproperty(node, "cpu-id",
(char *) &thismid, sizeof(thismid));
linux_cpus[linux_num_cpus].prom_node = node;
linux_cpus[linux_num_cpus].mid = thismid;
printk("Found CPU %d <node=%08lx,mid=%d>\n",
linux_num_cpus, (unsigned long) node, thismid);
linux_num_cpus++;
}
}
}
if (linux_num_cpus == 0) {
printk("No CPU nodes found, cannot continue.\n");
#ifndef CONFIG_SMP
{
int err, cpu_node;
err = cpu_find_by_instance(0, &cpu_node, NULL);
if (err) {
/* Probably a sun4e, Sun is trying to trick us ;-) */
prom_printf("No cpu nodes, cannot continue\n");
prom_halt();
}
printk("Found %d CPU prom device tree node(s).\n", linux_num_cpus);
cpu_data(0).clock_tick = prom_getintdefault(cpu_node,
"clock-frequency",
0);
}
#endif /* !CONFIG_SMP */
cpu_probe();
#ifdef CONFIG_SUN_AUXIO
......
......@@ -349,7 +349,7 @@ smp4m_ticker:
* for cross calls. That has a separate entry point below.
*/
maybe_smp4m_msg:
GET_PROCESSOR_MID(o3, o2)
GET_PROCESSOR_MID(o3)
set sun4m_interrupts, %l5
ld [%l5], %o5
sethi %hi(0x60000000), %o4
......@@ -389,7 +389,7 @@ maybe_smp4m_msg:
linux_trap_ipi15_sun4m:
SAVE_ALL
sethi %hi(0x80000000), %o2
GET_PROCESSOR_MID(o0, o1)
GET_PROCESSOR_MID(o0)
set sun4m_interrupts, %l5
ld [%l5], %o5
sll %o0, 12, %o0
......
......@@ -46,6 +46,7 @@
#include <asm/idprom.h>
#include <asm/hardirq.h>
#include <asm/machines.h>
#include <asm/cpudata.h>
struct screen_info screen_info = {
0, 0, /* orig-x, orig-y */
......@@ -389,13 +390,11 @@ static int __init set_preferred_console(void)
}
console_initcall(set_preferred_console);
extern char *sparc_cpu_type[];
extern char *sparc_fpu_type[];
extern char *sparc_cpu_type;
extern char *sparc_fpu_type;
static int show_cpuinfo(struct seq_file *m, void *__unused)
{
int cpuid = hard_smp_processor_id();
seq_printf(m,
"cpu\t\t: %s\n"
"fpu\t\t: %s\n"
......@@ -405,26 +404,28 @@ static int show_cpuinfo(struct seq_file *m, void *__unused)
"ncpus probed\t: %d\n"
"ncpus active\t: %d\n"
#ifndef CONFIG_SMP
"BogoMips\t: %lu.%02lu\n"
"CPU0Bogo\t: %lu.%02lu\n"
"CPU0ClkTck\t: %ld\n"
#endif
,
sparc_cpu_type[cpuid] ? : "undetermined",
sparc_fpu_type[cpuid] ? : "undetermined",
sparc_cpu_type ? sparc_cpu_type : "undetermined",
sparc_fpu_type ? sparc_fpu_type : "undetermined",
romvec->pv_romvers,
prom_rev,
romvec->pv_printrev >> 16,
(short) romvec->pv_printrev,
romvec->pv_printrev & 0xffff,
&cputypval,
linux_num_cpus,
num_possible_cpus(),
num_online_cpus()
#ifndef CONFIG_SMP
, loops_per_jiffy/(500000/HZ),
(loops_per_jiffy/(5000/HZ)) % 100
, cpu_data(0).udelay_val/(500000/HZ),
(cpu_data(0).udelay_val/(5000/HZ)) % 100,
cpu_data(0).clock_tick
#endif
);
#ifdef CONFIG_SMP
smp_bogo_info(m);
smp_bogo(m);
#endif
mmu_info(m);
#ifdef CONFIG_SMP
......
......@@ -2,6 +2,7 @@
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
* Copyright (C) 2004 Keith M Wesolowski (wesolows@foobazco.org)
*/
#include <asm/head.h>
......@@ -32,23 +33,12 @@
#include <asm/hardirq.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#define IRQ_RESCHEDULE 13
#define IRQ_STOP_CPU 14
#define IRQ_CROSS_CALL 15
#include <asm/cpudata.h>
volatile int smp_processors_ready = 0;
unsigned long cpu_present_map = 0;
int smp_num_cpus = 1;
int smp_threads_ready=0;
unsigned char mid_xlate[NR_CPUS] = { 0, 0, 0, 0, };
volatile unsigned long cpu_callin_map[NR_CPUS] __initdata = {0,};
#ifdef NOTUSED
volatile unsigned long smp_spinning[NR_CPUS] = { 0, };
#endif
unsigned long smp_proc_in_lock[NR_CPUS] = { 0, };
struct cpuinfo_sparc cpu_data[NR_CPUS];
unsigned long cpu_offset[NR_CPUS];
unsigned char boot_cpu_id = 0;
unsigned char boot_cpu_id4 = 0; /* boot_cpu_id << 2 */
int smp_activated = 0;
......@@ -56,6 +46,9 @@ volatile int __cpu_number_map[NR_CPUS];
volatile int __cpu_logical_map[NR_CPUS];
cycles_t cacheflush_time = 0; /* XXX */
cpumask_t cpu_online_map = CPU_MASK_NONE;
cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
/* The only guaranteed locking primitive available on all Sparc
* processors is 'ldstub [%reg + immediate], %dest_reg' which atomically
* places the current byte at the effective address into dest_reg and
......@@ -72,40 +65,23 @@ volatile unsigned long ipi_count;
volatile int smp_process_available=0;
volatile int smp_commenced = 0;
/* Not supported on Sparc yet. */
void __init smp_setup(char *str, int *ints)
{
}
/*
* The bootstrap kernel entry code has set these up. Save them for
* a given CPU
*/
void __init smp_store_cpu_info(int id)
{
cpu_data[id].udelay_val = loops_per_jiffy; /* this is it on sparc. */
}
int cpu_node;
void __init smp_commence(void)
{
/*
* Lets the callin's below out of their loop.
*/
local_flush_cache_all();
local_flush_tlb_all();
smp_commenced = 1;
local_flush_cache_all();
local_flush_tlb_all();
}
cpu_data(id).udelay_val = loops_per_jiffy;
extern int cpu_idle(void);
cpu_find_by_mid(id, &cpu_node);
cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
"clock-frequency", 0);
cpu_data(id).prom_node = cpu_node;
cpu_data(id).mid = cpu_get_hwmid(cpu_node);
if (cpu_data(id).mid < 0)
panic("No MID found for CPU%d at node 0x%08d", id, cpu_node);
}
/* Activate a secondary processor. */
int start_secondary(void *unused)
void __init smp_cpus_done(unsigned int max_cpus)
{
prom_printf("Start secondary called. Should not happen\n");
return cpu_idle();
}
void cpu_panic(void)
......@@ -114,11 +90,6 @@ void cpu_panic(void)
panic("SMP bolixed\n");
}
/*
* Cycle through the processors asking the PROM to start each one.
*/
extern struct prom_cpuinfo linux_cpus[NR_CPUS];
struct linux_prom_registers smp_penguin_ctable __initdata = { 0 };
void __init smp_boot_cpus(void)
......@@ -134,12 +105,11 @@ void __init smp_boot_cpus(void)
void smp_send_reschedule(int cpu)
{
smp_message_pass (cpu, MSG_RESCHEDULE, 0, 0);
/* See sparc64 */
}
void smp_send_stop(void)
{
smp_message_pass (MSG_ALL_BUT_SELF, MSG_STOP_CPU, 0, 0);
}
void smp_flush_cache_all(void)
......@@ -242,22 +212,6 @@ void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
local_flush_sig_insns(mm, insn_addr);
}
/* Reschedule call back. */
void smp_reschedule_irq(void)
{
current->work.need_resched = 1;
}
/* Stopping processors. */
void smp_stop_cpu_irq(void)
{
local_irq_enable();
while(1)
barrier();
}
unsigned int prof_multiplier[NR_CPUS];
unsigned int prof_counter[NR_CPUS];
extern unsigned int lvl14_resolution;
/* /proc/profile writes can call this, don't __init it please. */
......@@ -274,36 +228,52 @@ int setup_profiling_timer(unsigned int multiplier)
spin_lock_irqsave(&prof_setup_lock, flags);
for(i = 0; i < NR_CPUS; i++) {
if(cpu_present_map & (1 << i)) {
load_profile_irq(mid_xlate[i], lvl14_resolution / multiplier);
prof_multiplier[i] = multiplier;
}
if (cpu_possible(i))
load_profile_irq(i, lvl14_resolution / multiplier);
prof_multiplier(i) = multiplier;
}
spin_unlock_irqrestore(&prof_setup_lock, flags);
return 0;
}
void smp_bogo_info(struct seq_file *m)
void __init smp_prepare_cpus(unsigned int maxcpus)
{
}
void __devinit smp_prepare_boot_cpu(void)
{
current_thread_info()->cpu = hard_smp_processor_id();
cpu_set(smp_processor_id(), cpu_online_map);
cpu_set(smp_processor_id(), phys_cpu_present_map);
}
int __devinit __cpu_up(unsigned int cpu)
{
panic("smp doesn't work\n");
}
void smp_bogo(struct seq_file *m)
{
int i;
for (i = 0; i < NR_CPUS; i++) {
if (cpu_present_map & (1 << i))
if (cpu_online(i))
seq_printf(m,
"Cpu%dBogo\t: %lu.%02lu\n",
i,
cpu_data[i].udelay_val/(500000/HZ),
(cpu_data[i].udelay_val/(5000/HZ))%100);
cpu_data(i).udelay_val/(500000/HZ),
(cpu_data(i).udelay_val/(5000/HZ))%100);
}
}
void smp_info(struct seq_file *m)
{
int i;
seq_printf(m, "State:\n");
for (i = 0; i < NR_CPUS; i++) {
if (cpu_present_map & (1 << i))
if (cpu_online(i))
seq_printf(m, "CPU%d\t\t: online\n", i);
}
}
......@@ -472,9 +472,9 @@ static void sun4d_load_profile_irq(int cpu, unsigned int limit)
static void __init sun4d_init_timers(irqreturn_t (*counter_fn)(int, void *, struct pt_regs *))
{
int irq;
extern struct prom_cpuinfo linux_cpus[NR_CPUS];
int cpu;
struct resource r;
int mid;
/* Map the User Timer registers. */
memset(&r, 0, sizeof(r));
......@@ -502,9 +502,12 @@ static void __init sun4d_init_timers(irqreturn_t (*counter_fn)(int, void *, stru
/* Enable user timer free run for CPU 0 in BW */
/* bw_set_ctrl(0, bw_get_ctrl(0) | BW_CTRL_USER_TIMER); */
for(cpu = 0; cpu < linux_num_cpus; cpu++)
sun4d_load_profile_irq((linux_cpus[cpu].mid >> 3), 0);
cpu = 0;
while (!cpu_find_by_instance(cpu, NULL, &mid)) {
sun4d_load_profile_irq(mid >> 3, 0);
cpu++;
}
#ifdef CONFIG_SMP
{
......
......@@ -18,6 +18,7 @@
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <asm/ptrace.h>
#include <asm/atomic.h>
......@@ -31,11 +32,13 @@
#include <asm/hardirq.h>
#include <asm/sbus.h>
#include <asm/sbi.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include <asm/cpudata.h>
#define IRQ_CROSS_CALL 15
extern ctxd_t *srmmu_ctx_table_phys;
extern int linux_num_cpus;
extern void calibrate_delay(void);
......@@ -44,9 +47,7 @@ extern unsigned long cpu_present_map;
extern int smp_num_cpus;
static int smp_highest_cpu;
extern int smp_threads_ready;
extern unsigned char mid_xlate[NR_CPUS];
extern volatile unsigned long cpu_callin_map[NR_CPUS];
extern unsigned long smp_proc_in_lock[NR_CPUS];
extern struct cpuinfo_sparc cpu_data[NR_CPUS];
extern unsigned long cpu_offset[NR_CPUS];
extern unsigned char boot_cpu_id;
......@@ -158,7 +159,6 @@ extern int start_secondary(void *unused);
* Cycle through the processors asking the PROM to start each one.
*/
extern struct prom_cpuinfo linux_cpus[NR_CPUS];
extern struct linux_prom_registers smp_penguin_ctable;
extern unsigned long trapbase_cpu1[];
extern unsigned long trapbase_cpu2[];
......@@ -167,34 +167,34 @@ extern unsigned long trapbase_cpu3[];
void __init smp4d_boot_cpus(void)
{
int cpucount = 0;
int i = 0;
int i, mid;
printk("Entering SMP Mode...\n");
for (i = 0; i < NR_CPUS; i++)
cpu_offset[i] = (char *)&cpu_data[i] - (char *)&cpu_data;
cpu_offset[i] = (char *)&(cpu_data(i)) - (char *)&(cpu_data(0));
if (boot_cpu_id)
current_set[0] = NULL;
local_irq_enable();
cpu_present_map = 0;
for(i=0; i < linux_num_cpus; i++)
cpu_present_map |= (1<<linux_cpus[i].mid);
/* XXX This whole thing has to go. See sparc64. */
for (i = 0; !cpu_find_by_instance(i, NULL, &mid); i++)
cpu_present_map |= (1<<mid);
SMP_PRINTK(("cpu_present_map %08lx\n", cpu_present_map));
for(i=0; i < NR_CPUS; i++)
__cpu_number_map[i] = -1;
for(i=0; i < NR_CPUS; i++)
__cpu_logical_map[i] = -1;
for(i=0; i < NR_CPUS; i++)
mid_xlate[i] = i;
__cpu_number_map[boot_cpu_id] = 0;
__cpu_logical_map[0] = boot_cpu_id;
current->cpu = boot_cpu_id;
current_thread_info()->cpu = boot_cpu_id;
smp_store_cpu_info(boot_cpu_id);
smp_setup_percpu_timer();
local_flush_cache_all();
if(linux_num_cpus == 1)
if (cpu_find_by_instance(1, NULL, NULL))
return; /* Not an MP box. */
SMP_PRINTK(("Iterating over CPUs\n"));
for(i = 0; i < NR_CPUS; i++) {
......@@ -215,15 +215,14 @@ void __init smp4d_boot_cpus(void)
p = prev_task(&init_task);
p->cpu = i;
init_idle(p, i);
current_set[i] = p;
current_set[i] = p->thread_info;
unhash_process(p);
for (no = 0; no < linux_num_cpus; no++)
if (linux_cpus[no].mid == i)
break;
for (no = 0; !cpu_find_by_instance(no, NULL, &mid)
&& mid != i; no++) ;
/*
* Initialize the contexts table
......@@ -235,9 +234,9 @@ void __init smp4d_boot_cpus(void)
smp_penguin_ctable.reg_size = 0;
/* whirrr, whirrr, whirrrrrrrrr... */
SMP_PRINTK(("Starting CPU %d at %p task %d node %08x\n", i, entry, cpucount, linux_cpus[no].prom_node));
SMP_PRINTK(("Starting CPU %d at %p task %d node %08x\n", i, entry, cpucount, cpu_data(no).prom_node));
local_flush_cache_all();
prom_startcpu(linux_cpus[no].prom_node,
prom_startcpu(cpu_data(no).prom_node,
&smp_penguin_ctable, 0, (char *)entry);
SMP_PRINTK(("prom_startcpu returned :)\n"));
......@@ -272,7 +271,7 @@ void __init smp4d_boot_cpus(void)
for(i = 0; i < NR_CPUS; i++) {
if(cpu_present_map & (1 << i)) {
bogosum += cpu_data[i].udelay_val;
bogosum += cpu_data(i).udelay_val;
smp_highest_cpu = i;
}
}
......@@ -334,12 +333,12 @@ void smp4d_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2,
{
/* If you make changes here, make sure gcc generates proper code... */
smpfunc_t f asm("i0") = func;
unsigned long a1 asm("i1") = arg1;
unsigned long a2 asm("i2") = arg2;
unsigned long a3 asm("i3") = arg3;
unsigned long a4 asm("i4") = arg4;
unsigned long a5 asm("i5") = arg5;
register smpfunc_t f asm("i0") = func;
register unsigned long a1 asm("i1") = arg1;
register unsigned long a2 asm("i2") = arg2;
register unsigned long a3 asm("i3") = arg3;
register unsigned long a4 asm("i4") = arg4;
register unsigned long a5 asm("i5") = arg5;
__asm__ __volatile__(
"std %0, [%6]\n\t"
......@@ -426,11 +425,11 @@ void smp4d_message_pass(int target, int msg, unsigned long data, int wait)
panic("Bogon SMP message pass.");
}
extern unsigned int prof_multiplier[NR_CPUS];
extern unsigned int prof_counter[NR_CPUS];
extern void sparc_do_profile(unsigned long pc, unsigned long o7);
#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
#define prof_counter(__cpu) cpu_data(__cpu).counter
void smp4d_percpu_timer_interrupt(struct pt_regs *regs)
{
int cpu = hard_smp4d_processor_id();
......@@ -451,14 +450,14 @@ void smp4d_percpu_timer_interrupt(struct pt_regs *regs)
if(!user_mode(regs))
sparc_do_profile(regs->pc, regs->u_regs[UREG_RETPC]);
if(!--prof_counter[cpu]) {
if(!--prof_counter(cpu)) {
int user = user_mode(regs);
irq_enter();
update_process_times(user);
irq_exit();
prof_counter[cpu] = prof_multiplier[cpu];
prof_counter(cpu) = prof_multiplier(cpu);
}
}
......@@ -468,7 +467,7 @@ static void __init smp_setup_percpu_timer(void)
{
int cpu = hard_smp4d_processor_id();
prof_counter[cpu] = prof_multiplier[cpu] = 1;
prof_counter(cpu) = prof_multiplier(cpu) = 1;
load_profile_irq(cpu, lvl14_resolution);
}
......
......@@ -292,8 +292,8 @@ static void __init sun4m_init_timers(irqreturn_t (*counter_fn)(int, void *, stru
prom_printf("time_init: unable to attach IRQ%d\n",TIMER_IRQ);
prom_halt();
}
if(linux_num_cpus > 1) {
if (!cpu_find_by_instance(1, NULL, NULL)) {
for(cpu = 0; cpu < 4; cpu++)
sun4m_timers->cpu_timers[cpu].l14_timer_limit = 0;
sun4m_interrupts->set = SUN4M_INT_E14;
......@@ -327,6 +327,7 @@ void __init sun4m_init_IRQ(void)
struct linux_prom_registers int_regs[PROMREG_MAX];
int num_regs;
struct resource r;
int mid;
local_irq_disable();
if((ie_node = prom_searchsiblings(prom_getchild(prom_root_node), "obio")) == 0 ||
......@@ -364,10 +365,10 @@ void __init sun4m_init_IRQ(void)
sbus_ioremap(&r, 0, int_regs[4].reg_size, "interrupts_system");
sun4m_interrupts->set = ~SUN4M_INT_MASKALL;
for (i=0; i<linux_num_cpus; i++)
sun4m_interrupts->cpu_intregs[i].clear = ~0x17fff;
if (linux_num_cpus > 1) {
for (i = 0; !cpu_find_by_instance(i, NULL, &mid); i++)
sun4m_interrupts->cpu_intregs[mid].clear = ~0x17fff;
if (!cpu_find_by_instance(1, NULL, NULL)) {
/* system wide interrupts go to cpu 0, this should always
* be safe because it is guaranteed to be fitted or OBP doesn't
* come up
......
......@@ -15,6 +15,9 @@
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/ptrace.h>
#include <asm/atomic.h>
......@@ -26,13 +29,13 @@
#include <asm/pgtable.h>
#include <asm/oplib.h>
#include <asm/hardirq.h>
#include <asm/cpudata.h>
#define IRQ_RESCHEDULE 13
#define IRQ_STOP_CPU 14
#define IRQ_CROSS_CALL 15
extern ctxd_t *srmmu_ctx_table_phys;
extern int linux_num_cpus;
extern void calibrate_delay(void);
......@@ -40,10 +43,7 @@ extern volatile int smp_processors_ready;
extern unsigned long cpu_present_map;
extern int smp_num_cpus;
extern int smp_threads_ready;
extern unsigned char mid_xlate[NR_CPUS];
extern volatile unsigned long cpu_callin_map[NR_CPUS];
extern unsigned long smp_proc_in_lock[NR_CPUS];
extern struct cpuinfo_sparc cpu_data[NR_CPUS];
extern unsigned long cpu_offset[NR_CPUS];
extern unsigned char boot_cpu_id;
extern int smp_activated;
......@@ -80,7 +80,7 @@ void __init smp4m_callin(void)
local_flush_cache_all();
local_flush_tlb_all();
set_irq_udt(mid_xlate[boot_cpu_id]);
set_irq_udt(boot_cpu_id);
/* Get our local ticker going. */
smp_setup_percpu_timer();
......@@ -134,7 +134,6 @@ extern int start_secondary(void *unused);
* Cycle through the processors asking the PROM to start each one.
*/
extern struct prom_cpuinfo linux_cpus[NR_CPUS];
extern struct linux_prom_registers smp_penguin_ctable;
extern unsigned long trapbase_cpu1[];
extern unsigned long trapbase_cpu2[];
......@@ -143,33 +142,32 @@ extern unsigned long trapbase_cpu3[];
void __init smp4m_boot_cpus(void)
{
int cpucount = 0;
int i = 0;
int first, prev;
int i, mid;
printk("Entering SMP Mode...\n");
local_irq_enable();
cpu_present_map = 0;
for(i=0; i < linux_num_cpus; i++)
cpu_present_map |= (1<<i);
for (i = 0; !cpu_find_by_instance(i, NULL, &mid); i++)
cpu_present_map |= (1<<mid);
/* XXX cpu_offset is broken -Keith */
for(i=0; i < NR_CPUS; i++) {
cpu_offset[i] = (char *)&cpu_data[i] - (char *)&cpu_data;
cpu_offset[i] = (char *)&(cpu_data(i)) - (char *)&(cpu_data(0));
__cpu_number_map[i] = -1;
__cpu_logical_map[i] = -1;
}
mid_xlate[boot_cpu_id] = (linux_cpus[boot_cpu_id].mid & ~8);
__cpu_number_map[boot_cpu_id] = 0;
__cpu_logical_map[0] = boot_cpu_id;
current->cpu = boot_cpu_id;
current_thread_info()->cpu = boot_cpu_id;
smp_store_cpu_info(boot_cpu_id);
set_irq_udt(mid_xlate[boot_cpu_id]);
set_irq_udt(boot_cpu_id);
smp_setup_percpu_timer();
local_flush_cache_all();
if(linux_num_cpus == 1)
if(cpu_find_by_instance(1, NULL, NULL))
return; /* Not an MP box. */
for(i = 0; i < NR_CPUS; i++) {
if(i == boot_cpu_id)
......@@ -188,9 +186,9 @@ void __init smp4m_boot_cpus(void)
p = prev_task(&init_task);
p->cpu = i;
init_idle(p, i);
current_set[i] = p;
current_set[i] = p->thread_info;
unhash_process(p);
......@@ -208,9 +206,8 @@ void __init smp4m_boot_cpus(void)
/* whirrr, whirrr, whirrrrrrrrr... */
printk("Starting CPU %d at %p\n", i, entry);
mid_xlate[i] = (linux_cpus[i].mid & ~8);
local_flush_cache_all();
prom_startcpu(linux_cpus[i].prom_node,
prom_startcpu(cpu_data(i).prom_node,
&smp_penguin_ctable, 0, (char *)entry);
/* wheee... it's going... */
......@@ -241,7 +238,7 @@ void __init smp4m_boot_cpus(void)
unsigned long bogosum = 0;
for(i = 0; i < NR_CPUS; i++) {
if(cpu_present_map & (1 << i))
bogosum += cpu_data[i].udelay_val;
bogosum += cpu_data(i).udelay_val;
}
printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n",
cpucount + 1,
......@@ -251,20 +248,6 @@ void __init smp4m_boot_cpus(void)
smp_num_cpus = cpucount + 1;
}
/* Setup CPU list for IRQ distribution scheme. */
first = prev = -1;
for(i = 0; i < NR_CPUS; i++) {
if(cpu_present_map & (1 << i)) {
if(first == -1)
first = i;
if(prev != -1)
cpu_data[prev].next = i;
cpu_data[i].mid = mid_xlate[i];
prev = i;
}
}
cpu_data[prev].next = first;
/* Free unneeded trap tables */
if (!(cpu_present_map & (1 << 1))) {
ClearPageReserved(virt_to_page(trapbase_cpu1));
......@@ -295,11 +278,11 @@ void __init smp4m_boot_cpus(void)
/* At each hardware IRQ, we get this called to forward IRQ reception
* to the next processor. The caller must disable the IRQ level being
* serviced globally so that there are no double interrupts received.
*
* XXX See sparc64 irq.c.
*/
void smp4m_irq_rotate(int cpu)
{
if(smp_processors_ready)
set_irq_udt(cpu_data[cpu_data[cpu].next].mid);
}
/* Cross calls, in order to work efficiently and atomically do all
......@@ -331,10 +314,10 @@ void smp4m_message_pass(int target, int msg, unsigned long data, int wait)
mask &= ~(1 << me);
for(i = 0; i < 4; i++) {
if(mask & (1 << i))
set_cpu_int(mid_xlate[i], irq);
set_cpu_int(i, irq);
}
} else {
set_cpu_int(mid_xlate[target], irq);
set_cpu_int(target, irq);
}
smp_cpu_in_msg[me]--;
......@@ -385,7 +368,7 @@ void smp4m_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2,
if(mask & (1 << i)) {
ccall_info.processors_in[i] = 0;
ccall_info.processors_out[i] = 0;
set_cpu_int(mid_xlate[i], IRQ_CROSS_CALL);
set_cpu_int(i, IRQ_CROSS_CALL);
} else {
ccall_info.processors_in[i] = 1;
ccall_info.processors_out[i] = 1;
......@@ -424,28 +407,28 @@ void smp4m_cross_call_irq(void)
ccall_info.processors_out[i] = 1;
}
extern unsigned int prof_multiplier[NR_CPUS];
extern unsigned int prof_counter[NR_CPUS];
extern void sparc_do_profile(unsigned long pc, unsigned long o7);
#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
#define prof_counter(__cpu) cpu_data(__cpu).counter
void smp4m_percpu_timer_interrupt(struct pt_regs *regs)
{
int cpu = smp_processor_id();
clear_profile_irq(mid_xlate[cpu]);
clear_profile_irq(cpu);
if(!user_mode(regs))
sparc_do_profile(regs->pc, regs->u_regs[UREG_RETPC]);
if(!--prof_counter[cpu]) {
if(!--prof_counter(cpu)) {
int user = user_mode(regs);
irq_enter();
update_process_times(user);
irq_exit();
prof_counter[cpu] = prof_multiplier[cpu];
prof_counter(cpu) = prof_multiplier(cpu);
}
}
......@@ -455,8 +438,8 @@ static void __init smp_setup_percpu_timer(void)
{
int cpu = smp_processor_id();
prof_counter[cpu] = prof_multiplier[cpu] = 1;
load_profile_irq(mid_xlate[cpu], lvl14_resolution);
prof_counter(cpu) = prof_multiplier(cpu) = 1;
load_profile_irq(cpu, lvl14_resolution);
if(cpu == boot_cpu_id)
enable_pil_irq(14);
......
......@@ -1230,8 +1230,6 @@ static inline void map_kernel(void)
/* Paging initialization on the Sparc Reference MMU. */
extern void sparc_context_init(int);
extern int linux_num_cpus;
void (*poke_srmmu)(void) __initdata = NULL;
extern unsigned long bootmem_init(unsigned long *pages_avail);
......
......@@ -24,13 +24,10 @@
sethi %hi(boot_cpu_id), %reg; \
ldub [%reg + %lo(boot_cpu_id)], %reg;
#define GET_PROCESSOR_MID(reg, tmp) \
#define GET_PROCESSOR_MID(reg) \
rd %tbr, %reg; \
sethi %hi(mid_xlate), %tmp; \
srl %reg, 12, %reg; \
or %tmp, %lo(mid_xlate), %tmp; \
and %reg, 3, %reg; \
ldub [%tmp + %reg], %reg;
and %reg, 3, %reg;
#define GET_PROCESSOR_OFFSET(reg, tmp) \
GET_PROCESSOR_ID(reg) \
......
/* $Id: bugs.h,v 1.5 1995/11/25 02:31:18 davem Exp $
/* $Id: bugs.h,v 1.1 1996/12/26 13:25:20 davem Exp $
* include/asm-sparc/bugs.h: Sparc probes for various bugs.
*
* Copyright (C) 1994 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
static void check_bugs(void) { }
#include <asm/cpudata.h>
#include <linux/config.h>
extern unsigned long loops_per_jiffy;
static void check_bugs(void)
{
#ifndef CONFIG_SMP
cpu_data(0).udelay_val = loops_per_jiffy;
#endif
}
......@@ -306,6 +306,11 @@ extern void prom_apply_obio_ranges(struct linux_prom_registers *obioregs, int nr
extern void prom_apply_generic_ranges(int node, int parent,
struct linux_prom_registers *sbusregs, int nregs);
/* CPU probing helpers. */
int cpu_find_by_instance(int instance, int *prom_node, int *mid);
int cpu_find_by_mid(int mid, int *prom_node);
int cpu_get_hwmid(int prom_node);
extern spinlock_t prom_lock;
#endif /* !(__SPARC_OPLIB_H) */
......@@ -15,17 +15,7 @@
#include <linux/cpumask.h>
/* PROM provided per-processor information we need
* to start them all up.
*/
struct prom_cpuinfo {
int prom_node;
int mid;
};
extern int linux_num_cpus; /* number of CPUs probed */
#endif /* !(__ASSEMBLY__) */
#endif /* __ASSEMBLY__ */
#ifdef CONFIG_SMP
......@@ -33,27 +23,15 @@ extern int linux_num_cpus; /* number of CPUs probed */
#include <asm/ptrace.h>
#include <asm/asi.h>
extern struct prom_cpuinfo linux_cpus[NR_CPUS];
/* Per processor Sparc parameters we need. */
struct cpuinfo_sparc {
unsigned long udelay_val; /* that's it */
unsigned short next;
unsigned short mid;
};
extern struct cpuinfo_sparc cpu_data[NR_CPUS];
extern unsigned long cpu_offset[NR_CPUS];
#include <asm/atomic.h>
/*
* Private routines/data
*/
extern unsigned char boot_cpu_id;
extern unsigned long cpu_present_map;
#define cpu_online_map cpu_present_map
extern cpumask_t phys_cpu_present_map;
#define cpu_possible_map phys_cpu_present_map
typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long,
unsigned long, unsigned long);
......@@ -70,7 +48,7 @@ void smp_boot_cpus(void);
void smp_store_cpu_info(int);
struct seq_file;
void smp_bogo_info(struct seq_file *);
void smp_bogo(struct seq_file *);
void smp_info(struct seq_file *);
BTFIXUPDEF_CALL(void, smp_cross_call, smpfunc_t, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long)
......@@ -105,7 +83,6 @@ extern __inline__ int smp_call_function(void (*func)(void *info), void *info, in
extern __volatile__ int __cpu_number_map[NR_CPUS];
extern __volatile__ int __cpu_logical_map[NR_CPUS];
extern unsigned long smp_proc_in_lock[NR_CPUS];
extern __inline__ int cpu_logical_map(int cpu)
{
......@@ -189,7 +166,7 @@ extern __inline__ int hard_smp_processor_id(void)
#define MBOX_IDLECPU2 0xFD
#define MBOX_STOPCPU2 0xFE
#endif /* !(CONFIG_SMP) */
#endif /* SMP */
#define NO_PROC_ID 0xFF
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment