Commit 0939a7a4 authored by Linus Torvalds's avatar Linus Torvalds

Import 2.1.30

parent b7b4d2d2
...@@ -142,8 +142,8 @@ S: Trinity College ...@@ -142,8 +142,8 @@ S: Trinity College
S: Cambridge, UK. CB2 1TQ S: Cambridge, UK. CB2 1TQ
N: Thomas Bogendoerfer N: Thomas Bogendoerfer
E: tsbogend@bigbug.franken.de E: tsbogend@alpha.franken.de
D: Lance32 driver D: PCnet32 driver
D: strace for Linux/Alpha D: strace for Linux/Alpha
S: Baumgartenweg 5 S: Baumgartenweg 5
S: 91452 Wilhermsdorf S: 91452 Wilhermsdorf
......
This diff is collapsed.
...@@ -266,6 +266,14 @@ L: linux-net@vger.rutgers.edu ...@@ -266,6 +266,14 @@ L: linux-net@vger.rutgers.edu
W: http://www.uk.linux.org/NetNews.html W: http://www.uk.linux.org/NetNews.html
S: Maintained S: Maintained
NETWORKING [IPv4/IPv6]:
P: David S. Miller
M: davem@caip.rutgers.edu
P: Eric Schenk
M: Eric.Schenk@dna.lth.se
L: netdev@roxanne.nuclecu.unam.mx
S: Maintained
PPP PROTOCOL DRIVERS AND COMPRESSORS PPP PROTOCOL DRIVERS AND COMPRESSORS
P: Al Longyear P: Al Longyear
M: longyear@pobox.com M: longyear@pobox.com
...@@ -304,8 +312,8 @@ L: linux-smp@vger.rutgers.edu ...@@ -304,8 +312,8 @@ L: linux-smp@vger.rutgers.edu
S: Maintained S: Maintained
SPARC: SPARC:
P: Eddie C. Dost P: David S. Miller
M: ecd@skynet.be M: davem@caip.rutgers.edu
L: sparclinux@vger.rutgers.edu L: sparclinux@vger.rutgers.edu
S: Maintained S: Maintained
...@@ -369,9 +377,9 @@ P: Jens Maurer ...@@ -369,9 +377,9 @@ P: Jens Maurer
M: jmaurer@cck.uni-kl.de M: jmaurer@cck.uni-kl.de
S: Maintained S: Maintained
LANCE AND LANCE32 NETWORK DRIVER PCNET32 NETWORK DRIVER
P: Thomas Bogendoerfer P: Thomas Bogendoerfer
M: tsbogend@bigbug.franken.de M: tsbogend@alpha.franken.de
L: linux-net@vger.rutgers.edu L: linux-net@vger.rutgers.edu
S: Maintained S: Maintained
......
VERSION = 2 VERSION = 2
PATCHLEVEL = 1 PATCHLEVEL = 1
SUBLEVEL = 29 SUBLEVEL = 30
ARCH = i386 ARCH = i386
......
...@@ -82,8 +82,7 @@ endif ...@@ -82,8 +82,7 @@ endif
# This make dependencies quickly # This make dependencies quickly
# #
fastdep: dummy fastdep: dummy
if [ -n "$(wildcard *.[chS])" ]; then \ $(TOPDIR)/scripts/mkdep *.[chS] > .depend
$(TOPDIR)/scripts/mkdep *.[chS] > .depend; fi
ifdef ALL_SUB_DIRS ifdef ALL_SUB_DIRS
set -e; for i in $(ALL_SUB_DIRS); do $(MAKE) -C $$i fastdep; done set -e; for i in $(ALL_SUB_DIRS); do $(MAKE) -C $$i fastdep; done
endif endif
......
...@@ -81,9 +81,11 @@ EXPORT_SYMBOL(wrusp); ...@@ -81,9 +81,11 @@ EXPORT_SYMBOL(wrusp);
EXPORT_SYMBOL(__kernel_thread); EXPORT_SYMBOL(__kernel_thread);
EXPORT_SYMBOL(start_thread); EXPORT_SYMBOL(start_thread);
/* Networking helper routines. */
EXPORT_SYMBOL(csum_tcpudp_magic); EXPORT_SYMBOL(csum_tcpudp_magic);
EXPORT_SYMBOL(ip_fast_csum); EXPORT_SYMBOL(ip_fast_csum);
EXPORT_SYMBOL(ip_compute_csum); EXPORT_SYMBOL(ip_compute_csum);
EXPORT_SYMBOL(csum_partial_copy);
/* /*
* The following are specially called from the uaccess assembly stubs. * The following are specially called from the uaccess assembly stubs.
......
...@@ -526,14 +526,9 @@ entSys: ...@@ -526,14 +526,9 @@ entSys:
ret_from_sys_call: ret_from_sys_call:
cmovne $26,0,$19 /* $19 = 0 => non-restartable */ cmovne $26,0,$19 /* $19 = 0 => non-restartable */
/* check bottom half interrupts */ /* check bottom half interrupts */
lda $0,intr_count
ldl $1,0($0)
bne $1,ret_from_handle_bh bne $1,ret_from_handle_bh
lda $2,bh_active ldq $3,bh_active
ldq $3,0($2) ldq $4,bh_mask
lda $2,bh_mask
ldq $4,0($2)
addq $1,1,$1
and $3,$4,$2 and $3,$4,$2
bne $2,handle_bottom_half bne $2,handle_bottom_half
ret_from_handle_bh: ret_from_handle_bh:
...@@ -618,20 +613,13 @@ strace_error: ...@@ -618,20 +613,13 @@ strace_error:
.align 3 .align 3
handle_bottom_half: handle_bottom_half:
/*
* We're called with $0 containing the address of
* 'intr_count' and $1 containing 'intr_count+1'
*/
stl $1,0($0) /* intr_count = 1 */
subq $30,16,$30 subq $30,16,$30
stq $19,0($30) /* save syscall nr */ stq $19,0($30) /* save syscall nr */
stq $20,8($30) /* and error indication (a3) */ stq $20,8($30) /* and error indication (a3) */
jsr $26,do_bottom_half jsr $26,do_bottom_half
lda $0,intr_count
ldq $19,0($30) ldq $19,0($30)
ldq $20,8($30) ldq $20,8($30)
addq $30,16,$30 addq $30,16,$30
stl $31,0($0) /* intr_count = 0 */
br $31,ret_from_handle_bh br $31,ret_from_handle_bh
.align 3 .align 3
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <linux/stat.h> #include <linux/stat.h>
#include <linux/mman.h> #include <linux/mman.h>
#include <linux/elfcore.h> #include <linux/elfcore.h>
#include <linux/reboot.h>
#include <asm/reg.h> #include <asm/reg.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
...@@ -63,7 +64,7 @@ asmlinkage int sys_idle(void) ...@@ -63,7 +64,7 @@ asmlinkage int sys_idle(void)
return ret; return ret;
} }
void hard_reset_now(void) void machine_restart(char * __unused)
{ {
#if defined(CONFIG_ALPHA_SRM) && defined(CONFIG_ALPHA_ALCOR) #if defined(CONFIG_ALPHA_SRM) && defined(CONFIG_ALPHA_ALCOR)
/* who said DEC engineer's have no sense of humor? ;-)) */ /* who said DEC engineer's have no sense of humor? ;-)) */
...@@ -73,6 +74,14 @@ void hard_reset_now(void) ...@@ -73,6 +74,14 @@ void hard_reset_now(void)
halt(); halt();
} }
void machine_halt(void)
{
}
void machine_power_off(void)
{
}
void show_regs(struct pt_regs * regs) void show_regs(struct pt_regs * regs)
{ {
printk("\nps: %04lx pc: [<%016lx>]\n", regs->ps, regs->pc); printk("\nps: %04lx pc: [<%016lx>]\n", regs->ps, regs->pc);
......
...@@ -152,6 +152,7 @@ CONFIG_NET_ETHERNET=y ...@@ -152,6 +152,7 @@ CONFIG_NET_ETHERNET=y
# CONFIG_NET_VENDOR_SMC is not set # CONFIG_NET_VENDOR_SMC is not set
# CONFIG_NET_ISA is not set # CONFIG_NET_ISA is not set
CONFIG_NET_EISA=y CONFIG_NET_EISA=y
# CONFIG_PCNET32 is not set
# CONFIG_APRICOT is not set # CONFIG_APRICOT is not set
# CONFIG_CS89x0 is not set # CONFIG_CS89x0 is not set
CONFIG_DE4X5=y CONFIG_DE4X5=y
......
...@@ -153,12 +153,8 @@ ENTRY(lcall7) ...@@ -153,12 +153,8 @@ ENTRY(lcall7)
ALIGN ALIGN
.globl ret_from_smpfork .globl ret_from_smpfork
ret_from_smpfork: ret_from_smpfork:
GET_CURRENT(%ebx) btrl $0, SYMBOL_NAME(scheduler_lock)
movl $NO_PROC_ID, SYMBOL_NAME(active_kernel_processor) jmp ret_from_sys_call
lock
btrl $0, SYMBOL_NAME(kernel_flag)
sti
jmp 9f
#endif /* __SMP__ */ #endif /* __SMP__ */
ALIGN ALIGN
...@@ -187,13 +183,7 @@ ENTRY(system_call) ...@@ -187,13 +183,7 @@ ENTRY(system_call)
ret_from_intr: ret_from_intr:
ret_from_sys_call: ret_from_sys_call:
GET_CURRENT(%ebx) GET_CURRENT(%ebx)
cmpl $0,SYMBOL_NAME(intr_count) movl SYMBOL_NAME(bh_mask),%eax
#ifdef __SMP__
jne 2f
#else
jne 1f
#endif
9: movl SYMBOL_NAME(bh_mask),%eax
andl SYMBOL_NAME(bh_active),%eax andl SYMBOL_NAME(bh_active),%eax
jne handle_bottom_half jne handle_bottom_half
2: movl EFLAGS(%esp),%eax # mix EFLAGS and CS 2: movl EFLAGS(%esp),%eax # mix EFLAGS and CS
......
...@@ -5,10 +5,12 @@ ...@@ -5,10 +5,12 @@
#include <linux/elfcore.h> #include <linux/elfcore.h>
#include <linux/mca.h> #include <linux/mca.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/in6.h>
#include <asm/semaphore.h> #include <asm/semaphore.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/checksum.h>
#include <asm/io.h> #include <asm/io.h>
extern void dump_thread(struct pt_regs *, struct user *); extern void dump_thread(struct pt_regs *, struct user *);
...@@ -25,8 +27,12 @@ EXPORT_SYMBOL(dump_fpu); ...@@ -25,8 +27,12 @@ EXPORT_SYMBOL(dump_fpu);
EXPORT_SYMBOL(ioremap); EXPORT_SYMBOL(ioremap);
EXPORT_SYMBOL(iounmap); EXPORT_SYMBOL(iounmap);
EXPORT_SYMBOL_NOVERS(__down_failed); EXPORT_SYMBOL_NOVERS(__down_failed);
EXPORT_SYMBOL_NOVERS(__down_failed_interruptible);
EXPORT_SYMBOL_NOVERS(__up_wakeup); EXPORT_SYMBOL_NOVERS(__up_wakeup);
/* Networking helper routines. */
EXPORT_SYMBOL(csum_partial_copy);
#ifdef __SMP__ #ifdef __SMP__
EXPORT_SYMBOL(apic_reg); /* Needed internally for the I386 inlines */ EXPORT_SYMBOL(apic_reg); /* Needed internally for the I386 inlines */
EXPORT_SYMBOL(cpu_data); EXPORT_SYMBOL(cpu_data);
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/bitops.h> #include <asm/bitops.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/pgtable.h>
#define CR0_NE 32 #define CR0_NE 32
...@@ -126,15 +127,14 @@ BUILD_IRQ(SECOND,9,0x02) ...@@ -126,15 +127,14 @@ BUILD_IRQ(SECOND,9,0x02)
BUILD_IRQ(SECOND,10,0x04) BUILD_IRQ(SECOND,10,0x04)
BUILD_IRQ(SECOND,11,0x08) BUILD_IRQ(SECOND,11,0x08)
BUILD_IRQ(SECOND,12,0x10) BUILD_IRQ(SECOND,12,0x10)
#ifdef __SMP__
BUILD_MSGIRQ(SECOND,13,0x20)
#else
BUILD_IRQ(SECOND,13,0x20) BUILD_IRQ(SECOND,13,0x20)
#endif
BUILD_IRQ(SECOND,14,0x40) BUILD_IRQ(SECOND,14,0x40)
BUILD_IRQ(SECOND,15,0x80) BUILD_IRQ(SECOND,15,0x80)
#ifdef __SMP__ #ifdef __SMP__
BUILD_RESCHEDIRQ(16) BUILD_SMP_INTERRUPT(reschedule_interrupt)
BUILD_SMP_INTERRUPT(invalidate_interrupt)
BUILD_SMP_INTERRUPT(stop_cpu_interrupt)
#endif #endif
/* /*
...@@ -146,9 +146,6 @@ static void (*interrupt[17])(void) = { ...@@ -146,9 +146,6 @@ static void (*interrupt[17])(void) = {
IRQ4_interrupt, IRQ5_interrupt, IRQ6_interrupt, IRQ7_interrupt, IRQ4_interrupt, IRQ5_interrupt, IRQ6_interrupt, IRQ7_interrupt,
IRQ8_interrupt, IRQ9_interrupt, IRQ10_interrupt, IRQ11_interrupt, IRQ8_interrupt, IRQ9_interrupt, IRQ10_interrupt, IRQ11_interrupt,
IRQ12_interrupt, IRQ13_interrupt, IRQ14_interrupt, IRQ15_interrupt IRQ12_interrupt, IRQ13_interrupt, IRQ14_interrupt, IRQ15_interrupt
#ifdef __SMP__
,IRQ16_interrupt
#endif
}; };
static void (*fast_interrupt[16])(void) = { static void (*fast_interrupt[16])(void) = {
...@@ -179,15 +176,6 @@ static void (*bad_interrupt[16])(void) = { ...@@ -179,15 +176,6 @@ static void (*bad_interrupt[16])(void) = {
static void no_action(int cpl, void *dev_id, struct pt_regs *regs) { } static void no_action(int cpl, void *dev_id, struct pt_regs *regs) { }
#ifdef __SMP__
/*
* On SMP boards, irq13 is used for interprocessor interrupts (IPI's).
*/
static struct irqaction irq13 = { smp_message_irq, SA_INTERRUPT, 0, "IPI", NULL, NULL };
#else
/* /*
* Note that on a 486, we don't want to do a SIGFPE on an irq13 * Note that on a 486, we don't want to do a SIGFPE on an irq13
* as the irq is unreliable, and exception 16 works correctly * as the irq is unreliable, and exception 16 works correctly
...@@ -211,8 +199,6 @@ static void math_error_irq(int cpl, void *dev_id, struct pt_regs *regs) ...@@ -211,8 +199,6 @@ static void math_error_irq(int cpl, void *dev_id, struct pt_regs *regs)
static struct irqaction irq13 = { math_error_irq, 0, 0, "math error", NULL, NULL }; static struct irqaction irq13 = { math_error_irq, 0, 0, "math error", NULL, NULL };
#endif
/* /*
* IRQ2 is cascade interrupt to second interrupt controller * IRQ2 is cascade interrupt to second interrupt controller
*/ */
...@@ -335,6 +321,174 @@ int get_smp_prof_list(char *buf) { ...@@ -335,6 +321,174 @@ int get_smp_prof_list(char *buf) {
#endif #endif
/*
* Global interrupt locks for SMP. Allow interrupts to come in on any
* CPU, yet make cli/sti act globally to protect critical regions..
*/
#ifdef __SMP__
unsigned char global_irq_holder = NO_PROC_ID;
unsigned volatile int global_irq_lock;
unsigned volatile int global_irq_count;
unsigned int local_irq_count[NR_CPUS];
#define irq_active(cpu) \
(global_irq_count != local_irq_count[cpu])
#define INIT_STUCK 10000000
#define STUCK(x) \
if (!--stuck) {printk(#x " stuck at %08lx, waiting for %08lx\n", where, previous); stuck = INIT_STUCK;}
/*
* "global_cli()" is a special case, in that it can hold the
* interrupts disabled for a longish time, and also because
* we may be doing TLB invalidates when holding the global
* IRQ lock for historical reasons. Thus we may need to check
* SMP invalidate events specially by hand here (but not in
* any normal spinlocks)
*/
static inline void check_smp_invalidate(int cpu)
{
if (test_bit(cpu, &smp_invalidate_needed)) {
clear_bit(cpu, &smp_invalidate_needed);
local_flush_tlb();
}
}
static inline void get_irqlock(int cpu, unsigned long where)
{
static unsigned long previous;
int local_count;
int stuck = INIT_STUCK;
if (set_bit(0,&global_irq_lock)) {
/* do we already hold the lock? */
if ((unsigned char) cpu == global_irq_holder)
return;
/* Uhhuh.. Somebody else got it. Wait.. */
do {
do {
STUCK(irqlock1);
check_smp_invalidate(cpu);
} while (test_bit(0,&global_irq_lock));
} while (set_bit(0,&global_irq_lock));
}
/*
* Ok, we got the lock bit.
* But that's actually just the easy part.. Now
* we need to make sure that nobody else is running
* in an interrupt context.
*/
local_count = local_irq_count[cpu];
/* Are we the only one in an interrupt context? */
while (local_count != global_irq_count) {
/*
* No such luck. Now we need to release the lock,
* _and_ release our interrupt context, because
* otherwise we'd have dead-locks and live-locks
* and other fun things.
*/
atomic_sub(local_count, &global_irq_count);
global_irq_lock = 0;
/*
* Wait for everybody else to go away and release
* their things before trying to get the lock again.
*/
for (;;) {
STUCK(irqlock2);
check_smp_invalidate(cpu);
if (global_irq_count)
continue;
if (global_irq_lock)
continue;
if (!set_bit(0,&global_irq_lock))
break;
}
atomic_add(local_count, &global_irq_count);
}
/*
* Finally.
*/
global_irq_holder = cpu;
previous = where;
}
void __global_cli(void)
{
int cpu = smp_processor_id();
unsigned long where;
__asm__("movl 12(%%esp),%0":"=r" (where));
__cli();
get_irqlock(cpu, where);
}
void __global_sti(void)
{
release_irqlock(smp_processor_id());
__sti();
}
unsigned long __global_save_flags(void)
{
return global_irq_holder == (unsigned char) smp_processor_id();
}
void __global_restore_flags(unsigned long flags)
{
switch (flags) {
case 0:
__global_sti();
break;
case 1:
__global_cli();
break;
default:
printk("global_restore_flags: %08lx (%08lx)\n",
flags, (&flags)[-1]);
}
}
#undef INIT_STUCK
#define INIT_STUCK 200000000
#undef STUCK
#define STUCK \
if (!--stuck) {printk("irq_enter stuck (irq=%d, cpu=%d, global=%d)\n",irq,cpu,global_irq_holder); stuck = INIT_STUCK;}
static inline void irq_enter(int cpu, int irq)
{
int stuck = INIT_STUCK;
hardirq_enter(cpu);
while (test_bit(0,&global_irq_lock)) {
if ((unsigned char) cpu == global_irq_holder) {
printk("BAD! Local interrupts enabled, global disabled\n");
break;
}
STUCK;
/* nothing */;
}
atomic_inc(&intr_count);
}
static inline void irq_exit(int cpu, int irq)
{
__cli();
atomic_dec(&intr_count);
hardirq_exit(cpu);
release_irqlock(cpu);
}
#else
#define irq_enter(cpu, irq) do { } while (0)
#define irq_exit(cpu, irq) do { } while (0)
#endif
/* /*
* do_IRQ handles IRQ's that have been installed without the * do_IRQ handles IRQ's that have been installed without the
...@@ -345,20 +499,16 @@ int get_smp_prof_list(char *buf) { ...@@ -345,20 +499,16 @@ int get_smp_prof_list(char *buf) {
*/ */
asmlinkage void do_IRQ(int irq, struct pt_regs * regs) asmlinkage void do_IRQ(int irq, struct pt_regs * regs)
{ {
struct irqaction * action = *(irq + irq_action); struct irqaction * action;
int do_random = 0; int do_random, cpu = smp_processor_id();
lock_kernel();
atomic_inc(&intr_count);
#ifdef __SMP__
if(smp_threads_ready && active_kernel_processor!=smp_processor_id())
panic("IRQ %d: active processor set wrongly(%d not %d).\n", irq, active_kernel_processor, smp_processor_id());
#endif
irq_enter(cpu, irq);
kstat.interrupts[irq]++; kstat.interrupts[irq]++;
#ifdef __SMP_PROF__
int_count[smp_processor_id()][irq]++; /* slow interrupts run with interrupts enabled */
#endif __sti();
action = *(irq + irq_action);
do_random = 0;
while (action) { while (action) {
do_random |= action->flags; do_random |= action->flags;
action->handler(irq, action->dev_id, regs); action->handler(irq, action->dev_id, regs);
...@@ -366,8 +516,7 @@ asmlinkage void do_IRQ(int irq, struct pt_regs * regs) ...@@ -366,8 +516,7 @@ asmlinkage void do_IRQ(int irq, struct pt_regs * regs)
} }
if (do_random & SA_SAMPLE_RANDOM) if (do_random & SA_SAMPLE_RANDOM)
add_interrupt_randomness(irq); add_interrupt_randomness(irq);
atomic_dec(&intr_count); irq_exit(cpu, irq);
unlock_kernel();
} }
/* /*
...@@ -377,21 +526,13 @@ asmlinkage void do_IRQ(int irq, struct pt_regs * regs) ...@@ -377,21 +526,13 @@ asmlinkage void do_IRQ(int irq, struct pt_regs * regs)
*/ */
asmlinkage void do_fast_IRQ(int irq) asmlinkage void do_fast_IRQ(int irq)
{ {
struct irqaction * action = *(irq + irq_action); struct irqaction * action;
int do_random = 0; int do_random, cpu = smp_processor_id();
lock_kernel();
intr_count++;
#ifdef __SMP__
/* IRQ 13 is allowed - that's a flush tlb */
if(smp_threads_ready && active_kernel_processor!=smp_processor_id() && irq!=13)
panic("fast_IRQ %d: active processor set wrongly(%d not %d).\n", irq, active_kernel_processor, smp_processor_id());
#endif
irq_enter(cpu, irq);
kstat.interrupts[irq]++; kstat.interrupts[irq]++;
#ifdef __SMP_PROF__ action = *(irq + irq_action);
int_count[smp_processor_id()][irq]++; do_random = 0;
#endif
while (action) { while (action) {
do_random |= action->flags; do_random |= action->flags;
action->handler(irq, action->dev_id, NULL); action->handler(irq, action->dev_id, NULL);
...@@ -399,8 +540,7 @@ asmlinkage void do_fast_IRQ(int irq) ...@@ -399,8 +540,7 @@ asmlinkage void do_fast_IRQ(int irq)
} }
if (do_random & SA_SAMPLE_RANDOM) if (do_random & SA_SAMPLE_RANDOM)
add_interrupt_randomness(irq); add_interrupt_randomness(irq);
intr_count--; irq_exit(cpu, irq);
unlock_kernel();
} }
int setup_x86_irq(int irq, struct irqaction * new) int setup_x86_irq(int irq, struct irqaction * new)
...@@ -561,7 +701,9 @@ void init_IRQ(void) ...@@ -561,7 +701,9 @@ void init_IRQ(void)
/* This bit is a hack because we don't send timer messages to all processors yet */ /* This bit is a hack because we don't send timer messages to all processors yet */
/* It has to be here .. it doesn't work if you put it down the bottom - assembler explodes 8) */ /* It has to be here .. it doesn't work if you put it down the bottom - assembler explodes 8) */
#ifdef __SMP__ #ifdef __SMP__
set_intr_gate(0x20+i, interrupt[i]); /* IRQ '16' - IPI for rescheduling */ set_intr_gate(0x20+i, reschedule_interrupt); /* IRQ '16' - IPI for rescheduling */
set_intr_gate(0x21+i, invalidate_interrupt); /* IRQ '17' - IPI for invalidation */
set_intr_gate(0x22+i, stop_cpu_interrupt); /* IRQ '18' - IPI for CPU halt */
#endif #endif
request_region(0x20,0x20,"pic1"); request_region(0x20,0x20,"pic1");
request_region(0xa0,0x20,"pic2"); request_region(0xa0,0x20,"pic2");
......
...@@ -29,6 +29,10 @@ ...@@ -29,6 +29,10 @@
#include <linux/unistd.h> #include <linux/unistd.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/reboot.h>
#if defined(CONFIG_APM) && defined(CONFIG_APM_POWER_OFF)
#include <linux/apm_bios.h>
#endif
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
...@@ -122,6 +126,7 @@ asmlinkage int sys_idle(void) ...@@ -122,6 +126,7 @@ asmlinkage int sys_idle(void)
if (hlt_works_ok && !hlt_counter && !need_resched) if (hlt_works_ok && !hlt_counter && !need_resched)
__asm__("hlt"); __asm__("hlt");
} }
run_task_queue(&tq_scheduler);
if (need_resched) if (need_resched)
start_idle = 0; start_idle = 0;
schedule(); schedule();
...@@ -134,30 +139,6 @@ asmlinkage int sys_idle(void) ...@@ -134,30 +139,6 @@ asmlinkage int sys_idle(void)
#else #else
/*
* In the SMP world we hlt outside of kernel syscall rather than within
* so as to get the right locking semantics.
*/
asmlinkage int sys_idle(void)
{
int ret = -EPERM;
lock_kernel();
if(current->pid != 0)
goto out;
#ifdef __SMP_PROF__
smp_spins_sys_idle[smp_processor_id()]+=
smp_spins_syscall_cur[smp_processor_id()];
#endif
current->counter= -100;
schedule();
ret = 0;
out:
unlock_kernel();
return ret;
}
/* /*
* This is being executed in task 0 'user space'. * This is being executed in task 0 'user space'.
*/ */
...@@ -168,32 +149,17 @@ int cpu_idle(void *unused) ...@@ -168,32 +149,17 @@ int cpu_idle(void *unused)
{ {
if(cpu_data[smp_processor_id()].hlt_works_ok && !hlt_counter && !need_resched) if(cpu_data[smp_processor_id()].hlt_works_ok && !hlt_counter && !need_resched)
__asm("hlt"); __asm("hlt");
if(0==(read_smp_counter(&smp_process_available))) run_task_queue(&tq_scheduler);
continue; schedule();
while(0x80000000 & smp_process_available)
;
cli();
while(set_bit(31,&smp_process_available))
while(test_bit(31,&smp_process_available))
{
/*
* Oops.. This is kind of important in some cases...
*/
if(clear_bit(smp_processor_id(), &smp_invalidate_needed))
local_flush_tlb();
}
if (0==(read_smp_counter(&smp_process_available))) {
clear_bit(31,&smp_process_available);
sti();
continue;
}
smp_process_available--;
clear_bit(31,&smp_process_available);
sti();
idle();
} }
} }
asmlinkage int sys_idle(void)
{
cpu_idle(NULL);
return 0;
}
#endif #endif
/* /*
...@@ -297,11 +263,13 @@ static inline void kb_wait(void) ...@@ -297,11 +263,13 @@ static inline void kb_wait(void)
break; break;
} }
void hard_reset_now (void) void machine_restart(char * __unused)
{ {
if(!reboot_thru_bios) { if(!reboot_thru_bios) {
#if 0
sti(); sti();
#endif
/* rebooting needs to touch the page at absolute addr 0 */ /* rebooting needs to touch the page at absolute addr 0 */
*((unsigned short *)__va(0x472)) = reboot_mode; *((unsigned short *)__va(0x472)) = reboot_mode;
for (;;) { for (;;) {
...@@ -399,6 +367,18 @@ void hard_reset_now (void) ...@@ -399,6 +367,18 @@ void hard_reset_now (void)
: "i" ((void *) (0x1000 - sizeof (real_mode_switch)))); : "i" ((void *) (0x1000 - sizeof (real_mode_switch))));
} }
void machine_halt(void)
{
}
void machine_power_off(void)
{
#if defined(CONFIG_APM) && defined(CONFIG_APM_POWER_OFF)
apm_set_power_state(APM_STATE_OFF);
#endif
}
void show_regs(struct pt_regs * regs) void show_regs(struct pt_regs * regs)
{ {
printk("\n"); printk("\n");
......
...@@ -44,6 +44,8 @@ ...@@ -44,6 +44,8 @@
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/io.h> #include <asm/io.h>
extern unsigned long start_kernel, _etext;
/* /*
* Some notes on processor bugs: * Some notes on processor bugs:
* *
...@@ -122,9 +124,6 @@ unsigned long apic_retval; /* Just debugging the assembler.. */ ...@@ -122,9 +124,6 @@ unsigned long apic_retval; /* Just debugging the assembler.. */
unsigned char *kernel_stacks[NR_CPUS]; /* Kernel stack pointers for CPU's (debugging) */ unsigned char *kernel_stacks[NR_CPUS]; /* Kernel stack pointers for CPU's (debugging) */
static volatile unsigned char smp_cpu_in_msg[NR_CPUS]; /* True if this processor is sending an IPI */ static volatile unsigned char smp_cpu_in_msg[NR_CPUS]; /* True if this processor is sending an IPI */
static volatile unsigned long smp_msg_data; /* IPI data pointer */
static volatile int smp_src_cpu; /* IPI sender processor */
static volatile int smp_msg_id; /* Message being sent */
volatile unsigned long kernel_flag=0; /* Kernel spinlock */ volatile unsigned long kernel_flag=0; /* Kernel spinlock */
volatile unsigned char active_kernel_processor = NO_PROC_ID; /* Processor holding kernel spinlock */ volatile unsigned char active_kernel_processor = NO_PROC_ID; /* Processor holding kernel spinlock */
...@@ -491,6 +490,7 @@ int smp_scan_config(unsigned long base, unsigned long length) ...@@ -491,6 +490,7 @@ int smp_scan_config(unsigned long base, unsigned long length)
*/ */
nlong = boot_cpu_id<<24; /* Dummy 'self' for bootup */ nlong = boot_cpu_id<<24; /* Dummy 'self' for bootup */
cpu_logical_map[0] = boot_cpu_id; cpu_logical_map[0] = boot_cpu_id;
global_irq_holder = boot_cpu_id;
printk("Processors: %d\n", num_processors); printk("Processors: %d\n", num_processors);
/* /*
...@@ -1051,7 +1051,6 @@ void smp_boot_cpus(void) ...@@ -1051,7 +1051,6 @@ void smp_boot_cpus(void)
SMP_PRINTK(("Boot done.\n")); SMP_PRINTK(("Boot done.\n"));
} }
/* /*
* A non wait message cannot pass data or cpu source info. This current setup * A non wait message cannot pass data or cpu source info. This current setup
* is only safe because the kernel lock owner is the only person who can send a message. * is only safe because the kernel lock owner is the only person who can send a message.
...@@ -1070,9 +1069,8 @@ void smp_message_pass(int target, int msg, unsigned long data, int wait) ...@@ -1070,9 +1069,8 @@ void smp_message_pass(int target, int msg, unsigned long data, int wait)
unsigned long cfg; unsigned long cfg;
unsigned long target_map; unsigned long target_map;
int p=smp_processor_id(); int p=smp_processor_id();
int irq=0x2d; /* IRQ 13 */ int irq;
int ct=0; int ct=0;
static volatile int message_cpu = NO_PROC_ID;
/* /*
* During boot up send no messages * During boot up send no messages
...@@ -1088,10 +1086,23 @@ void smp_message_pass(int target, int msg, unsigned long data, int wait) ...@@ -1088,10 +1086,23 @@ void smp_message_pass(int target, int msg, unsigned long data, int wait)
* but is not critical. * but is not critical.
*/ */
if(msg==MSG_RESCHEDULE) /* Reschedules we do via trap 0x30 */ switch (msg) {
{ case MSG_RESCHEDULE:
irq=0x30; irq = 0x30;
if(smp_cpu_in_msg[p]) if (smp_cpu_in_msg[p])
return;
break;
case MSG_INVALIDATE_TLB:
irq = 0x31;
break;
case MSG_STOP_CPU:
irq = 0x32;
break;
default:
printk("Unknown SMP message %d\n", msg);
return; return;
} }
...@@ -1103,31 +1114,12 @@ void smp_message_pass(int target, int msg, unsigned long data, int wait) ...@@ -1103,31 +1114,12 @@ void smp_message_pass(int target, int msg, unsigned long data, int wait)
* I got to notice this bug... * I got to notice this bug...
*/ */
if(message_cpu!=NO_PROC_ID && msg!=MSG_STOP_CPU && msg!=MSG_RESCHEDULE)
{
panic("CPU #%d: Message pass %d but pass in progress by %d of %d\n",
smp_processor_id(),msg,message_cpu, smp_msg_id);
}
message_cpu=smp_processor_id();
/* /*
* We are busy * We are busy
*/ */
smp_cpu_in_msg[p]++; smp_cpu_in_msg[p]++;
/*
* Reschedule is currently special
*/
if(msg!=MSG_RESCHEDULE)
{
smp_src_cpu=p;
smp_msg_id=msg;
smp_msg_data=data;
}
/* printk("SMP message pass #%d to %d of %d\n", /* printk("SMP message pass #%d to %d of %d\n",
p, msg, target);*/ p, msg, target);*/
...@@ -1150,7 +1142,7 @@ void smp_message_pass(int target, int msg, unsigned long data, int wait) ...@@ -1150,7 +1142,7 @@ void smp_message_pass(int target, int msg, unsigned long data, int wait)
*/ */
if(ct==1000) if(ct==1000)
printk("CPU #%d: previous IPI still not cleared after 10mS", smp_processor_id()); printk("CPU #%d: previous IPI still not cleared after 10mS", p);
/* /*
* Program the APIC to deliver the IPI * Program the APIC to deliver the IPI
...@@ -1171,7 +1163,7 @@ void smp_message_pass(int target, int msg, unsigned long data, int wait) ...@@ -1171,7 +1163,7 @@ void smp_message_pass(int target, int msg, unsigned long data, int wait)
{ {
cfg|=APIC_DEST_ALLBUT; cfg|=APIC_DEST_ALLBUT;
target_map=cpu_present_map; target_map=cpu_present_map;
cpu_callin_map[0]=(1<<smp_src_cpu); cpu_callin_map[0]=(1<<p);
} }
else if(target==MSG_ALL) else if(target==MSG_ALL)
{ {
...@@ -1197,11 +1189,30 @@ void smp_message_pass(int target, int msg, unsigned long data, int wait) ...@@ -1197,11 +1189,30 @@ void smp_message_pass(int target, int msg, unsigned long data, int wait)
switch(wait) switch(wait)
{ {
int stuck;
case 1: case 1:
while(cpu_callin_map[0]!=target_map); /* Spin on the pass */ stuck = 50000000;
while(cpu_callin_map[0]!=target_map) {
--stuck;
if (!stuck) {
printk("stuck on target_map IPI wait\n");
break;
}
}
break; break;
case 2: case 2:
while(smp_invalidate_needed); /* Wait for invalidate map to clear */ stuck = 50000000;
/* Wait for invalidate map to clear */
while (smp_invalidate_needed) {
/* Take care of "crossing" invalidates */
if (test_bit(p, &smp_invalidate_needed))
clear_bit(p, &smp_invalidate_needed);
--stuck;
if (!stuck) {
printk("stuck on smp_invalidate_needed IPI wait\n");
break;
}
}
break; break;
} }
...@@ -1210,7 +1221,6 @@ void smp_message_pass(int target, int msg, unsigned long data, int wait) ...@@ -1210,7 +1221,6 @@ void smp_message_pass(int target, int msg, unsigned long data, int wait)
*/ */
smp_cpu_in_msg[p]--; smp_cpu_in_msg[p]--;
message_cpu=NO_PROC_ID;
} }
/* /*
...@@ -1233,15 +1243,15 @@ void smp_flush_tlb(void) ...@@ -1233,15 +1243,15 @@ void smp_flush_tlb(void)
* bus locked or. * bus locked or.
*/ */
smp_invalidate_needed=cpu_present_map&~(1<<smp_processor_id()); smp_invalidate_needed=cpu_present_map;
/* /*
* Processors spinning on the lock will see this IRQ late. The smp_invalidate_needed map will * Processors spinning on the lock will see this IRQ late. The smp_invalidate_needed map will
* ensure they don't do a spurious flush tlb or miss one. * ensure they don't do a spurious flush tlb or miss one.
*/ */
save_flags(flags); __save_flags(flags);
cli(); __cli();
smp_message_pass(MSG_ALL_BUT_SELF, MSG_INVALIDATE_TLB, 0L, 2); smp_message_pass(MSG_ALL_BUT_SELF, MSG_INVALIDATE_TLB, 0L, 2);
/* /*
...@@ -1250,7 +1260,7 @@ void smp_flush_tlb(void) ...@@ -1250,7 +1260,7 @@ void smp_flush_tlb(void)
local_flush_tlb(); local_flush_tlb();
restore_flags(flags); __restore_flags(flags);
/* /*
* Completed. * Completed.
...@@ -1262,69 +1272,34 @@ void smp_flush_tlb(void) ...@@ -1262,69 +1272,34 @@ void smp_flush_tlb(void)
/* /*
* Reschedule call back * Reschedule call back
*/ */
asmlinkage void smp_reschedule_interrupt(void)
void smp_reschedule_irq(int cpl, struct pt_regs *regs)
{ {
lock_kernel();
intr_count++;
if(smp_processor_id()!=active_kernel_processor)
panic("SMP Reschedule on CPU #%d, but #%d is active.\n",
smp_processor_id(), active_kernel_processor);
need_resched=1; need_resched=1;
/* Clear the IPI */ /* Clear the IPI */
apic_read(APIC_SPIV); /* Dummy read */ apic_read(APIC_SPIV); /* Dummy read */
apic_write(APIC_EOI, 0); /* Docs say use 0 for future compatibility */ apic_write(APIC_EOI, 0); /* Docs say use 0 for future compatibility */
intr_count--;
unlock_kernel();
} }
/* /*
* Message call back. * Invalidate call-back
*/ */
asmlinkage void smp_invalidate_interrupt(void)
void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs)
{ {
int i=smp_processor_id(); if (clear_bit(smp_processor_id(), &smp_invalidate_needed))
/* static int n=0;
if(n++<NR_CPUS)
printk("IPI %d->%d(%d,%ld)\n",smp_src_cpu,i,smp_msg_id,smp_msg_data);*/
switch(smp_msg_id)
{
case 0: /* IRQ 13 testing - boring */
return;
/*
* A TLB flush is needed.
*/
case MSG_INVALIDATE_TLB:
if(clear_bit(i,(unsigned long *)&smp_invalidate_needed))
local_flush_tlb(); local_flush_tlb();
set_bit(i, (unsigned long *)&cpu_callin_map[0]);
/* cpu_callin_map[0]|=1<<smp_processor_id();*/
break;
/*
* Halt other CPU's for a panic or reboot
*/
case MSG_STOP_CPU:
while(1)
{
if(cpu_data[smp_processor_id()].hlt_works_ok)
__asm__("hlt");
}
default:
printk("CPU #%d sent invalid cross CPU message to CPU #%d: %X(%lX).\n",
smp_src_cpu,smp_processor_id(),smp_msg_id,smp_msg_data);
break;
}
/*
* Clear the IPI, so we can receive future IPI's
*/
/* Clear the IPI */
apic_read(APIC_SPIV); /* Dummy read */ apic_read(APIC_SPIV); /* Dummy read */
apic_write(APIC_EOI, 0); /* Docs say use 0 for future compatibility */ apic_write(APIC_EOI, 0); /* Docs say use 0 for future compatibility */
} }
/*
* CPU halt call-back
*/
asmlinkage void smp_stop_cpu_interrupt(void)
{
if (cpu_data[smp_processor_id()].hlt_works_ok)
for(;;) __asm__("hlt");
for (;;) ;
}
...@@ -388,7 +388,7 @@ static inline void timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) ...@@ -388,7 +388,7 @@ static inline void timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
basically because we don't yet share IRQ's around. This message is basically because we don't yet share IRQ's around. This message is
rigged to be safe on the 386 - basically it's a hack, so don't look rigged to be safe on the 386 - basically it's a hack, so don't look
closely for now.. */ closely for now.. */
/*smp_message_pass(MSG_ALL_BUT_SELF, MSG_RESCHEDULE, 0L, 0); */ smp_message_pass(MSG_ALL_BUT_SELF, MSG_RESCHEDULE, 0L, 0);
#ifdef CONFIG_MCA #ifdef CONFIG_MCA
if( MCA_bus ) { if( MCA_bus ) {
......
...@@ -231,7 +231,6 @@ asmlinkage void do_general_protection(struct pt_regs * regs, long error_code) ...@@ -231,7 +231,6 @@ asmlinkage void do_general_protection(struct pt_regs * regs, long error_code)
asmlinkage void do_nmi(struct pt_regs * regs, long error_code) asmlinkage void do_nmi(struct pt_regs * regs, long error_code)
{ {
lock_kernel();
#ifdef CONFIG_SMP_NMI_INVAL #ifdef CONFIG_SMP_NMI_INVAL
smp_flush_tlb_rcv(); smp_flush_tlb_rcv();
#else #else
...@@ -241,7 +240,6 @@ asmlinkage void do_nmi(struct pt_regs * regs, long error_code) ...@@ -241,7 +240,6 @@ asmlinkage void do_nmi(struct pt_regs * regs, long error_code)
printk("power saving mode enabled.\n"); printk("power saving mode enabled.\n");
#endif #endif
#endif #endif
unlock_kernel();
} }
asmlinkage void do_debug(struct pt_regs * regs, long error_code) asmlinkage void do_debug(struct pt_regs * regs, long error_code)
...@@ -305,18 +303,14 @@ void math_error(void) ...@@ -305,18 +303,14 @@ void math_error(void)
asmlinkage void do_coprocessor_error(struct pt_regs * regs, long error_code) asmlinkage void do_coprocessor_error(struct pt_regs * regs, long error_code)
{ {
lock_kernel();
ignore_irq13 = 1; ignore_irq13 = 1;
math_error(); math_error();
unlock_kernel();
} }
asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs, asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs,
long error_code) long error_code)
{ {
lock_kernel();
printk("Ignoring P6 Local APIC Spurious Interrupt Bug...\n"); printk("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
unlock_kernel();
} }
/* /*
...@@ -328,7 +322,6 @@ asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs, ...@@ -328,7 +322,6 @@ asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs,
*/ */
asmlinkage void math_state_restore(void) asmlinkage void math_state_restore(void)
{ {
lock_kernel();
__asm__ __volatile__("clts"); /* Allow maths ops (or we recurse) */ __asm__ __volatile__("clts"); /* Allow maths ops (or we recurse) */
/* /*
...@@ -341,7 +334,7 @@ asmlinkage void math_state_restore(void) ...@@ -341,7 +334,7 @@ asmlinkage void math_state_restore(void)
*/ */
#ifndef __SMP__ #ifndef __SMP__
if (last_task_used_math == current) if (last_task_used_math == current)
goto out; return;
if (last_task_used_math) if (last_task_used_math)
__asm__("fnsave %0":"=m" (last_task_used_math->tss.i387)); __asm__("fnsave %0":"=m" (last_task_used_math->tss.i387));
else else
...@@ -360,10 +353,6 @@ asmlinkage void math_state_restore(void) ...@@ -360,10 +353,6 @@ asmlinkage void math_state_restore(void)
current->used_math = 1; current->used_math = 1;
} }
current->flags|=PF_USEDFPU; /* So we fnsave on switch_to() */ current->flags|=PF_USEDFPU; /* So we fnsave on switch_to() */
#ifndef __SMP__
out:
#endif
unlock_kernel();
} }
#ifndef CONFIG_MATH_EMULATION #ifndef CONFIG_MATH_EMULATION
......
...@@ -48,4 +48,5 @@ EXPORT_SYMBOL_NOVERS(memcpy); ...@@ -48,4 +48,5 @@ EXPORT_SYMBOL_NOVERS(memcpy);
EXPORT_SYMBOL_NOVERS(memset); EXPORT_SYMBOL_NOVERS(memset);
EXPORT_SYMBOL_NOVERS(__down_failed); EXPORT_SYMBOL_NOVERS(__down_failed);
EXPORT_SYMBOL_NOVERS(__down_failed_interruptible);
EXPORT_SYMBOL_NOVERS(__up_wakeup); EXPORT_SYMBOL_NOVERS(__up_wakeup);
...@@ -78,6 +78,7 @@ CONFIG_BLK_DEV_LOOP=m ...@@ -78,6 +78,7 @@ CONFIG_BLK_DEV_LOOP=m
CONFIG_NETLINK=y CONFIG_NETLINK=y
CONFIG_RTNETLINK=y CONFIG_RTNETLINK=y
CONFIG_FIREWALL=y CONFIG_FIREWALL=y
# CONFIG_NET_SECURITY is not set
CONFIG_NET_ALIAS=y CONFIG_NET_ALIAS=y
CONFIG_INET=y CONFIG_INET=y
CONFIG_IP_MULTICAST=y CONFIG_IP_MULTICAST=y
......
/* $Id: setup.c,v 1.81 1997/01/29 10:32:55 davem Exp $ /* $Id: setup.c,v 1.82 1997/03/08 08:27:04 ecd Exp $
* linux/arch/sparc/kernel/setup.c * linux/arch/sparc/kernel/setup.c
* *
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
...@@ -115,6 +115,7 @@ unsigned int boot_flags; ...@@ -115,6 +115,7 @@ unsigned int boot_flags;
extern char *console_fb_path; extern char *console_fb_path;
static int console_fb = 0; static int console_fb = 0;
#endif #endif
static unsigned long memory_size = 0;
void kernel_enter_debugger(void) void kernel_enter_debugger(void)
{ {
...@@ -215,8 +216,23 @@ __initfunc(static void boot_flags_init(char *commands)) ...@@ -215,8 +216,23 @@ __initfunc(static void boot_flags_init(char *commands))
console_fb = 1; console_fb = 1;
console_fb_path = commands; console_fb_path = commands;
} }
} } else
#endif #endif
if (!strncmp(commands, "mem=", 4)) {
/*
* "mem=XXX[kKmM] overrides the PROM-reported
* memory size.
*/
memory_size = simple_strtoul(commands + 4,
&commands, 0);
if (*commands == 'K' || *commands == 'k') {
memory_size <<= 10;
commands++;
} else if (*commands=='M' || *commands=='m') {
memory_size <<= 20;
commands++;
}
}
while (*commands && *commands != ' ') while (*commands && *commands != ' ')
commands++; commands++;
} }
...@@ -337,15 +353,35 @@ __initfunc(void setup_arch(char **cmdline_p, ...@@ -337,15 +353,35 @@ __initfunc(void setup_arch(char **cmdline_p,
*memory_start_p = (((unsigned long) &end)); *memory_start_p = (((unsigned long) &end));
if(!packed) { if(!packed) {
for(i=0; sp_banks[i].num_bytes != 0; i++) for(i=0; sp_banks[i].num_bytes != 0; i++) {
end_of_phys_memory = sp_banks[i].base_addr + end_of_phys_memory = sp_banks[i].base_addr +
sp_banks[i].num_bytes; sp_banks[i].num_bytes;
if (memory_size) {
if (end_of_phys_memory > memory_size) {
sp_banks[i].num_bytes -=
(end_of_phys_memory - memory_size);
end_of_phys_memory = memory_size;
sp_banks[++i].base_addr = 0xdeadbeef;
sp_banks[i].num_bytes = 0;
}
}
}
} else { } else {
unsigned int sum = 0; unsigned int sum = 0;
for(i = 0; sp_banks[i].num_bytes != 0; i++) for(i = 0; sp_banks[i].num_bytes != 0; i++) {
sum += sp_banks[i].num_bytes; sum += sp_banks[i].num_bytes;
if (memory_size) {
if (sum > memory_size) {
sp_banks[i].num_bytes -=
(sum - memory_size);
sum = memory_size;
sp_banks[++i].base_addr = 0xdeadbeef;
sp_banks[i].num_bytes = 0;
break;
}
}
}
end_of_phys_memory = sum; end_of_phys_memory = sum;
} }
......
/* $Id: sparc_ksyms.c,v 1.47 1997/03/03 16:51:41 jj Exp $ /* $Id: sparc_ksyms.c,v 1.49 1997/03/15 07:47:45 davem Exp $
* arch/sparc/kernel/ksyms.c: Sparc specific ksyms support. * arch/sparc/kernel/ksyms.c: Sparc specific ksyms support.
* *
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/in6.h>
#include <asm/oplib.h> #include <asm/oplib.h>
#include <asm/delay.h> #include <asm/delay.h>
...@@ -27,6 +28,7 @@ ...@@ -27,6 +28,7 @@
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/user.h> #include <asm/user.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/checksum.h>
#ifdef CONFIG_SBUS #ifdef CONFIG_SBUS
#include <asm/sbus.h> #include <asm/sbus.h>
#include <asm/dma.h> #include <asm/dma.h>
...@@ -52,7 +54,6 @@ extern void *__memscan_zero(void *, size_t); ...@@ -52,7 +54,6 @@ extern void *__memscan_zero(void *, size_t);
extern void *__memscan_generic(void *, int, size_t); extern void *__memscan_generic(void *, int, size_t);
extern int __memcmp(const void *, const void *, __kernel_size_t); extern int __memcmp(const void *, const void *, __kernel_size_t);
extern int __strncmp(const char *, const char *, __kernel_size_t); extern int __strncmp(const char *, const char *, __kernel_size_t);
extern unsigned int __csum_partial_copy_sparc_generic (const char *, char *);
extern void bcopy (const char *, char *, int); extern void bcopy (const char *, char *, int);
extern int __ashrdi3(int, int); extern int __ashrdi3(int, int);
...@@ -183,12 +184,14 @@ EXPORT_SYMBOL(__memcmp); ...@@ -183,12 +184,14 @@ EXPORT_SYMBOL(__memcmp);
EXPORT_SYMBOL(__strncmp); EXPORT_SYMBOL(__strncmp);
EXPORT_SYMBOL(__memmove); EXPORT_SYMBOL(__memmove);
EXPORT_SYMBOL(__csum_partial_copy_sparc_generic);
/* Moving data to/from userspace. */ /* Moving data to/from userspace. */
EXPORT_SYMBOL(__copy_user); EXPORT_SYMBOL(__copy_user);
EXPORT_SYMBOL(__strncpy_from_user); EXPORT_SYMBOL(__strncpy_from_user);
/* Networking helper routines. */
/* XXX This is NOVERS because C_LABEL_STR doesn't get the version number. -DaveM */
EXPORT_SYMBOL_NOVERS(__csum_partial_copy_sparc_generic);
/* No version information on this, heavily used in inline asm, /* No version information on this, heavily used in inline asm,
* and will always be 'void __ret_efault(void)'. * and will always be 'void __ret_efault(void)'.
*/ */
......
/* $Id: unaligned.c,v 1.15 1997/01/16 14:14:42 davem Exp $ /* $Id: unaligned.c,v 1.16 1997/03/18 17:53:44 jj Exp $
* unaligned.c: Unaligned load/store trap handling with special * unaligned.c: Unaligned load/store trap handling with special
* cases for the kernel to do them more quickly. * cases for the kernel to do them more quickly.
* *
...@@ -19,8 +19,6 @@ ...@@ -19,8 +19,6 @@
/* #define DEBUG_MNA */ /* #define DEBUG_MNA */
extern void die_if_kernel(char *, struct pt_regs *);
enum direction { enum direction {
load, /* ld, ldd, ldh, ldsh */ load, /* ld, ldd, ldh, ldsh */
store, /* st, std, sth, stsh */ store, /* st, std, sth, stsh */
......
# $Id: Makefile,v 1.21 1997/01/25 06:48:43 davem Exp $ # $Id: Makefile,v 1.22 1997/03/14 21:04:17 jj Exp $
# Makefile for Sparc library files.. # Makefile for Sparc library files..
# #
...@@ -7,7 +7,7 @@ CFLAGS := $(CFLAGS) -ansi ...@@ -7,7 +7,7 @@ CFLAGS := $(CFLAGS) -ansi
OBJS = mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o memcpy.o memset.o \ OBJS = mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o memcpy.o memset.o \
strlen.o checksum.o blockops.o memscan.o memcmp.o strncmp.o \ strlen.o checksum.o blockops.o memscan.o memcmp.o strncmp.o \
strncpy_from_user.o divdi3.o udivdi3.o strlen_user.o \ strncpy_from_user.o divdi3.o udivdi3.o strlen_user.o \
copy_user.o clear_user.o locks.o atomic.o bitops.o copy_user.o locks.o atomic.o bitops.o
lib.a: $(OBJS) lib.a: $(OBJS)
$(AR) rcs lib.a $(OBJS) $(AR) rcs lib.a $(OBJS)
...@@ -37,9 +37,6 @@ strlen_user.o: strlen_user.S ...@@ -37,9 +37,6 @@ strlen_user.o: strlen_user.S
copy_user.o: copy_user.S copy_user.o: copy_user.S
$(CC) -D__ASSEMBLY__ -ansi -c -o copy_user.o copy_user.S $(CC) -D__ASSEMBLY__ -ansi -c -o copy_user.o copy_user.S
clear_user.o: clear_user.S
$(CC) -D__ASSEMBLY__ -ansi -c -o clear_user.o clear_user.S
blockops.o: blockops.S blockops.o: blockops.S
$(CC) -ansi -c -o blockops.o blockops.S $(CC) -ansi -c -o blockops.o blockops.S
......
/* linux/arch/sparc/lib/clear_user.S: Sparc optimized clear_user code
*
* Zero %o1 bytes at user %o0, handling exceptions as we go.
* Returns 0 if successfull, # of bytes still not cleared otherwise
*
* Copyright (C) 1991,1996 Free Software Foundation
* Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
#include <asm/cprefix.h>
#include <asm/ptrace.h>
#define EX(x,y,a,b,z) \
98: x,y; \
.section .fixup,z##alloc,z##execinstr; \
.align 4; \
99: retl; \
a, b, %o0; \
.section __ex_table,z##alloc; \
.align 4; \
.word 98b, 99b; \
.text; \
.align 4
#define EXT(start,end,handler,z) \
.section __ex_table,z##alloc; \
.align 4; \
.word start, 0, end, handler; \
.text; \
.align 4
/* Please don't change these macros, unless you change the login
* in the .fixup section below as well */
/* Store 64 bytes at (BASE + OFFSET) using value SOURCE. */
#define ZERO_BIG_BLOCK(base, offset, source) \
std source, [base + offset + 0x00]; \
std source, [base + offset + 0x08]; \
std source, [base + offset + 0x10]; \
std source, [base + offset + 0x18]; \
std source, [base + offset + 0x20]; \
std source, [base + offset + 0x28]; \
std source, [base + offset + 0x30]; \
std source, [base + offset + 0x38];
#define ZERO_LAST_BLOCKS(base, offset, source) \
std source, [base - offset - 0x38]; \
std source, [base - offset - 0x30]; \
std source, [base - offset - 0x28]; \
std source, [base - offset - 0x20]; \
std source, [base - offset - 0x18]; \
std source, [base - offset - 0x10]; \
std source, [base - offset - 0x08]; \
std source, [base - offset - 0x00];
.text
.align 4
.globl C_LABEL(__clear_user)
3:
cmp %o2, 3
be 2f
EX(stb %g0, [%o0], sub %o1, 0,#)
cmp %o2, 2
be 2f
EX(stb %g0, [%o0 + 0x01], sub %o1, 1,#)
EX(stb %g0, [%o0 + 0x02], sub %o1, 2,#)
2:
sub %o2, 4, %o2
add %o1, %o2, %o1
b 4f
sub %o0, %o2, %o0
C_LABEL(__clear_user):
cmp %o1, 7
bleu 7f
andcc %o0, 3, %o2
bne 3b
4:
andcc %o0, 4, %g0
be 2f
mov %g0, %g1
EX(st %g0, [%o0], sub %o1, 0,#)
sub %o1, 4, %o1
add %o0, 4, %o0
2:
andcc %o1, 0xffffff80, %o3 ! Now everything is 8 aligned and o1 is len to run
be 9f
andcc %o1, 0x78, %o2
10:
ZERO_BIG_BLOCK(%o0, 0x00, %g0)
subcc %o3, 128, %o3
ZERO_BIG_BLOCK(%o0, 0x40, %g0)
11:
EXT(10b, 11b, 20f,#)
bne 10b
add %o0, 128, %o0
orcc %o2, %g0, %g0
9:
be 13f
andcc %o1, 7, %o1
srl %o2, 1, %o3
set 13f, %o4
sub %o4, %o3, %o4
jmp %o4
add %o0, %o2, %o0
12:
ZERO_LAST_BLOCKS(%o0, 0x48, %g0)
ZERO_LAST_BLOCKS(%o0, 0x08, %g0)
13:
EXT(12b, 13b, 21f,#)
be 8f
andcc %o1, 4, %g0
be 1f
andcc %o1, 2, %g0
EX(st %g0, [%o0], and %o1, 7,#)
add %o0, 4, %o0
1:
be 1f
andcc %o1, 1, %g0
EX(sth %g0, [%o0], and %o1, 3,#)
add %o0, 2, %o0
1:
bne,a 8f
EX(stb %g0, [%o0], add %g0, 1,#)
8:
retl
clr %o0
7:
be 13b
orcc %o1, 0, %g0
andcc %o1, 4, %g0
be 1f
andcc %o1, 2, %g0
EX(stb %g0, [%o0 + 0], sub %o1, 0,#)
EX(stb %g0, [%o0 + 1], sub %o1, 1,#)
EX(stb %g0, [%o0 + 2], sub %o1, 2,#)
EX(stb %g0, [%o0 + 3], sub %o1, 3,#)
add %o0, 4, %o0
1:
be 1f
andcc %o1, 1, %o2
EX(stb %g0, [%o0 + 0], add %o2, 2,#)
EX(stb %g0, [%o0 + 1], add %o2, 1,#)
add %o0, 2, %o0
1:
bne,a 8b
EX(stb %g0, [%o0], add %g0, 1,#)
retl
clr %o0
.section .fixup,#alloc,#execinstr
.align 4
20:
cmp %g2, 8
bleu 1f
and %o1, 0x7f, %o1
sub %g2, 9, %g2
add %o3, 64, %o3
1:
sll %g2, 3, %g2
add %o3, %o1, %o0
retl
sub %o0, %g2, %o0
21:
mov 8, %o0
and %o1, 7, %o1
sub %o0, %g2, %o0
sll %o0, 3, %o0
retl
add %o0, %o1, %o0
# $Id: Makefile,v 1.22 1996/11/22 11:57:03 ecd Exp $ # $Id: Makefile,v 1.23 1997/03/10 09:16:52 davem Exp $
# Makefile for the linux Sparc-specific parts of the memory manager. # Makefile for the linux Sparc-specific parts of the memory manager.
# #
# Note! Dependencies are done automagically by 'make dep', which also # Note! Dependencies are done automagically by 'make dep', which also
...@@ -8,7 +8,10 @@ ...@@ -8,7 +8,10 @@
# Note 2! The CFLAGS definition is now in the main makefile... # Note 2! The CFLAGS definition is now in the main makefile...
O_TARGET := mm.o O_TARGET := mm.o
O_OBJS := fault.o init.o sun4c.o srmmu.o loadmmu.o generic.o asyncd.o \ O_OBJS := fault.o init.o sun4c.o srmmu.o hypersparc.o loadmmu.o \
extable.o generic.o asyncd.o extable.o
include $(TOPDIR)/Rules.make include $(TOPDIR)/Rules.make
hypersparc.o: hypersparc.S
$(CC) -D__ASSEMBLY__ -ansi -c -o hypersparc.o hypersparc.S
/* $Id: fault.c,v 1.89 1997/03/04 16:26:46 jj Exp $ /* $Id: fault.c,v 1.91 1997/03/18 17:56:00 jj Exp $
* fault.c: Page fault handlers for the Sparc. * fault.c: Page fault handlers for the Sparc.
* *
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
...@@ -36,8 +36,6 @@ ...@@ -36,8 +36,6 @@
extern struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS]; extern struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
extern int prom_node_root; extern int prom_node_root;
extern void die_if_kernel(char *,struct pt_regs *);
struct linux_romvec *romvec; struct linux_romvec *romvec;
/* At boot time we determine these two values necessary for setting /* At boot time we determine these two values necessary for setting
...@@ -134,7 +132,10 @@ asmlinkage void sparc_lvl15_nmi(struct pt_regs *regs, unsigned long serr, ...@@ -134,7 +132,10 @@ asmlinkage void sparc_lvl15_nmi(struct pt_regs *regs, unsigned long serr,
prom_halt(); prom_halt();
} }
void unhandled_fault(unsigned long address, struct task_struct *tsk, static void unhandled_fault(unsigned long, struct task_struct *,
struct pt_regs *) __attribute__ ((noreturn));
static void unhandled_fault(unsigned long address, struct task_struct *tsk,
struct pt_regs *regs) struct pt_regs *regs)
{ {
if((unsigned long) address < PAGE_SIZE) { if((unsigned long) address < PAGE_SIZE) {
...@@ -165,10 +166,10 @@ asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc, ...@@ -165,10 +166,10 @@ asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
case 3: return 3; case 3: return 3;
/* store will be handled by fixup, load will bump out */ /* store will be handled by fixup, load will bump out */
/* for _to_ macros */ /* for _to_ macros */
case 1: insn = (unsigned *)pc; if ((insn >> 21) & 1) return 1; break; case 1: insn = (unsigned)pc; if ((insn >> 21) & 1) return 1; break;
/* load will be handled by fixup, store will bump out */ /* load will be handled by fixup, store will bump out */
/* for _from_ macros */ /* for _from_ macros */
case 2: insn = (unsigned *)pc; case 2: insn = (unsigned)pc;
if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15) return 2; if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15) return 2;
break; break;
default: break; default: break;
......
/* $Id: hypersparc.S,v 1.1 1997/03/10 09:16:52 davem Exp $
* hypersparc.S: High speed Hypersparc mmu/cache operations.
*
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
*/
#include <asm/ptrace.h>
#include <asm/psr.h>
#include <asm/asi.h>
#include <asm/page.h>
#include <asm/pgtsrmmu.h>
#define WINDOW_FLUSH(tmp1, tmp2) \
mov 0, tmp1; \
98: ld [%g6 + AOFF_task_tss + AOFF_thread_uwinmask], tmp2; \
orcc %g0, tmp2, %g0; \
add tmp1, 1, tmp1; \
bne 98b; \
save %sp, -64, %sp; \
99: subcc tmp1, 1, tmp1; \
bne 99b; \
restore %g0, %g0, %g0;
.text
.align 4
.globl hypersparc_flush_cache_all, hypersparc_flush_cache_mm
.globl hypersparc_flush_cache_range, hypersparc_flush_cache_page
.globl hypersparc_flush_page_to_ram, hypersparc_flush_chunk
.globl hypersparc_flush_page_for_dma, hypersparc_flush_sig_insns
.globl hypersparc_flush_tlb_all, hypersparc_flush_tlb_mm
.globl hypersparc_flush_tlb_range, hypersparc_flush_tlb_page
/* Verified... */
hypersparc_flush_cache_all:
WINDOW_FLUSH(%g4, %g5)
sethi %hi(vac_cache_size), %g4
ld [%g4 + %lo(vac_cache_size)], %g5
sethi %hi(vac_line_size), %g1
ld [%g1 + %lo(vac_line_size)], %g2
1:
subcc %g5, %g2, %g5 ! hyper_flush_unconditional_combined
bne 1b
sta %g0, [%g5] ASI_M_FLUSH_CTX
retl
sta %g0, [%g0] ASI_M_FLUSH_IWHOLE ! hyper_flush_whole_icache
/* We expand the window flush to get maximum performance. */
/* Verified... */
hypersparc_flush_cache_mm:
#ifndef __SMP__
ld [%o0 + AOFF_mm_context], %g1
cmp %g1, -1
be hypersparc_flush_cache_mm_out
#endif
WINDOW_FLUSH(%g4, %g5)
sethi %hi(vac_line_size), %g1
ld [%g1 + %lo(vac_line_size)], %o1
sethi %hi(vac_cache_size), %g2
ld [%g2 + %lo(vac_cache_size)], %o0
add %o1, %o1, %g1
add %o1, %g1, %g2
add %o1, %g2, %g3
add %o1, %g3, %g4
add %o1, %g4, %g5
add %o1, %g5, %o4
add %o1, %o4, %o5
/* BLAMMO! */
1:
subcc %o0, %o5, %o0 ! hyper_flush_cache_user
sta %g0, [%o0 + %g0] ASI_M_FLUSH_USER
sta %g0, [%o0 + %o1] ASI_M_FLUSH_USER
sta %g0, [%o0 + %g1] ASI_M_FLUSH_USER
sta %g0, [%o0 + %g2] ASI_M_FLUSH_USER
sta %g0, [%o0 + %g3] ASI_M_FLUSH_USER
sta %g0, [%o0 + %g4] ASI_M_FLUSH_USER
sta %g0, [%o0 + %g5] ASI_M_FLUSH_USER
bne 1b
sta %g0, [%o0 + %o4] ASI_M_FLUSH_USER
hypersparc_flush_cache_mm_out:
retl
sta %g0, [%g0 + %g0] ASI_M_FLUSH_IWHOLE ! hyper_flush_whole_icache
/* The things we do for performance... */
/* Verified... */
hypersparc_flush_cache_range:
#ifndef __SMP__
ld [%o0 + AOFF_mm_context], %g1
cmp %g1, -1
be hypersparc_flush_cache_range_out
#endif
WINDOW_FLUSH(%g4, %g5)
sethi %hi(vac_line_size), %g1
ld [%g1 + %lo(vac_line_size)], %o4
sethi %hi(vac_cache_size), %g2
ld [%g2 + %lo(vac_cache_size)], %o3
/* Here comes the fun part... */
add %o2, (PAGE_SIZE - 1), %o2
andn %o1, (PAGE_SIZE - 1), %o1
add %o4, %o4, %o5
andn %o2, (PAGE_SIZE - 1), %o2
add %o4, %o5, %g1
sub %o2, %o1, %g4
add %o4, %g1, %g2
sll %o3, 2, %g5
add %o4, %g2, %g3
cmp %g4, %g5
add %o4, %g3, %g4
blu 0f
add %o4, %g4, %g5
add %o4, %g5, %g7
/* Flush entire user space, believe it or not this is quicker
* than page at a time flushings for range > (cache_size<<2).
*/
1:
subcc %o3, %g7, %o3
sta %g0, [%o3 + %g0] ASI_M_FLUSH_USER
sta %g0, [%o3 + %o4] ASI_M_FLUSH_USER
sta %g0, [%o3 + %o5] ASI_M_FLUSH_USER
sta %g0, [%o3 + %g1] ASI_M_FLUSH_USER
sta %g0, [%o3 + %g2] ASI_M_FLUSH_USER
sta %g0, [%o3 + %g3] ASI_M_FLUSH_USER
sta %g0, [%o3 + %g4] ASI_M_FLUSH_USER
bne 1b
sta %g0, [%o3 + %g5] ASI_M_FLUSH_USER
retl
sta %g0, [%g0 + %g0] ASI_M_FLUSH_IWHOLE
/* Below our threshold, flush one page at a time. */
0:
ld [%o0 + AOFF_mm_context], %o0
mov SRMMU_CTX_REG, %g7
lda [%g7] ASI_M_MMUREGS, %o3
sta %o0, [%g7] ASI_M_MMUREGS
sethi %hi(PAGE_SIZE), %g7 /* XXX ick, stupid stalls... */
sub %o2, %g7, %o0
1:
or %o0, 0x400, %g7
lda [%g7] ASI_M_FLUSH_PROBE, %g7
orcc %g7, 0, %g0
be,a 3f
mov %o0, %o2
add %o4, %g5, %g7
2:
sub %o2, %g7, %o2
sta %g0, [%o2 + %g0] ASI_M_FLUSH_PAGE
sta %g0, [%o2 + %o4] ASI_M_FLUSH_PAGE
sta %g0, [%o2 + %o5] ASI_M_FLUSH_PAGE
sta %g0, [%o2 + %g1] ASI_M_FLUSH_PAGE
sta %g0, [%o2 + %g2] ASI_M_FLUSH_PAGE
sta %g0, [%o2 + %g3] ASI_M_FLUSH_PAGE
andcc %o2, 0xffc, %g0
sta %g0, [%o2 + %g4] ASI_M_FLUSH_PAGE
bne 2b
sta %g0, [%o2 + %g5] ASI_M_FLUSH_PAGE
3:
sethi %hi(PAGE_SIZE), %g7
cmp %o2, %o1
bne 1b
sub %o2, %g7, %o0
mov SRMMU_FAULT_STATUS, %g5
lda [%g5] ASI_M_MMUREGS, %g0
mov SRMMU_CTX_REG, %g7
sta %o3, [%g7] ASI_M_MMUREGS
hypersparc_flush_cache_range_out:
retl
sta %g0, [%g0 + %g0] ASI_M_FLUSH_IWHOLE
/* HyperSparc requires a valid mapping where we are about to flush
* in order to check for a physical tag match during the flush.
*/
/* Verified... */
hypersparc_flush_cache_page:
ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
#ifndef __SMP__
ld [%o0 + AOFF_mm_context], %g1
cmp %g1, -1
be hypersparc_flush_cache_page_out
#endif
WINDOW_FLUSH(%g4, %g5)
sethi %hi(vac_line_size), %g1
ld [%g1 + %lo(vac_line_size)], %o4
mov SRMMU_CTX_REG, %o3
andn %o1, (PAGE_SIZE - 1), %o1
lda [%o3] ASI_M_MMUREGS, %o2
sta %g1, [%o3] ASI_M_MMUREGS
or %o1, 0x400, %o5
lda [%o5] ASI_M_FLUSH_PROBE, %g1
orcc %g0, %g1, %g0
sethi %hi(PAGE_SIZE), %g7
be 2f
add %o4, %o4, %o5
add %o1, %g7, %o1
add %o4, %o5, %g1
add %o4, %g1, %g2
add %o4, %g2, %g3
add %o4, %g3, %g4
add %o4, %g4, %g5
add %o4, %g5, %g7
/* BLAMMO! */
1:
sub %o1, %g7, %o1
sta %g0, [%o1 + %g0] ASI_M_FLUSH_PAGE
sta %g0, [%o1 + %o4] ASI_M_FLUSH_PAGE
sta %g0, [%o1 + %o5] ASI_M_FLUSH_PAGE
sta %g0, [%o1 + %g1] ASI_M_FLUSH_PAGE
sta %g0, [%o1 + %g2] ASI_M_FLUSH_PAGE
sta %g0, [%o1 + %g3] ASI_M_FLUSH_PAGE
andcc %o1, 0xffc, %g0
sta %g0, [%o1 + %g4] ASI_M_FLUSH_PAGE
bne 1b
sta %g0, [%o1 + %g5] ASI_M_FLUSH_PAGE
2:
mov SRMMU_FAULT_STATUS, %g7
mov SRMMU_CTX_REG, %g4
lda [%g7] ASI_M_MMUREGS, %g0
sta %o2, [%g4] ASI_M_MMUREGS
hypersparc_flush_cache_page_out:
hypersparc_flush_sig_insns: /* This is "neat"... */
retl
sta %g0, [%g0 + %g0] ASI_M_FLUSH_IWHOLE
/* HyperSparc is copy-back. */
/* Verified... */
hypersparc_flush_page_to_ram:
hypersparc_flush_chunk:
sethi %hi(vac_line_size), %g1
ld [%g1 + %lo(vac_line_size)], %o4
andn %o0, (PAGE_SIZE - 1), %o0
add %o4, %o4, %o5
or %o0, 0x400, %g7
lda [%g7] ASI_M_FLUSH_PROBE, %g5
add %o4, %o5, %g1
orcc %g5, 0, %g0
be 2f
add %o4, %g1, %g2
sethi %hi(PAGE_SIZE), %g5
add %o4, %g2, %g3
add %o0, %g5, %o0
add %o4, %g3, %g4
add %o4, %g4, %g5
add %o4, %g5, %g7
/* BLAMMO! */
1:
sub %o0, %g7, %o0
sta %g0, [%o0 + %g0] ASI_M_FLUSH_PAGE
sta %g0, [%o0 + %o4] ASI_M_FLUSH_PAGE
sta %g0, [%o0 + %o5] ASI_M_FLUSH_PAGE
sta %g0, [%o0 + %g1] ASI_M_FLUSH_PAGE
sta %g0, [%o0 + %g2] ASI_M_FLUSH_PAGE
sta %g0, [%o0 + %g3] ASI_M_FLUSH_PAGE
andcc %o0, 0xffc, %g0
sta %g0, [%o0 + %g4] ASI_M_FLUSH_PAGE
bne 1b
sta %g0, [%o0 + %g5] ASI_M_FLUSH_PAGE
2:
mov SRMMU_FAULT_STATUS, %g1
retl
lda [%g1] ASI_M_MMUREGS, %g0
/* HyperSparc is IO cache coherent. */
hypersparc_flush_page_for_dma:
retl
nop
/* Verified... */
hypersparc_flush_tlb_all:
mov 0x400, %g1
retl
sta %g0, [%g1] ASI_M_FLUSH_PROBE
/* Verified... */
hypersparc_flush_tlb_mm:
mov SRMMU_CTX_REG, %g1
ld [%o0 + AOFF_mm_context], %o1
lda [%g1] ASI_M_MMUREGS, %g5
#ifndef __SMP__
cmp %o1, -1
be hypersparc_flush_tlb_mm_out
#endif
mov 0x300, %g2
sta %o1, [%g1] ASI_M_MMUREGS
sta %g0, [%g2] ASI_M_FLUSH_PROBE
hypersparc_flush_tlb_mm_out:
retl
sta %g5, [%g1] ASI_M_MMUREGS
/* Verified... */
hypersparc_flush_tlb_range:
mov SRMMU_CTX_REG, %g1
ld [%o0 + AOFF_mm_context], %o3
lda [%g1] ASI_M_MMUREGS, %g5
#ifndef __SMP__
cmp %o3, -1
be hypersparc_flush_tlb_range_out
#endif
srl %o1, SRMMU_PGDIR_SHIFT, %o1
sta %o3, [%g1] ASI_M_MMUREGS
sll %o1, SRMMU_PGDIR_SHIFT, %o1
sethi %hi(1 << SRMMU_PGDIR_SHIFT), %o4
add %o1, 0x200, %o1
sta %g0, [%o1] ASI_M_FLUSH_PROBE
1:
add %o1, %o4, %o1
cmp %o1, %o2
blu,a 1b
sta %g0, [%o1] ASI_M_FLUSH_PROBE
hypersparc_flush_tlb_range_out:
retl
sta %g5, [%g1] ASI_M_MMUREGS
/* Verified... */
hypersparc_flush_tlb_page:
ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
mov SRMMU_CTX_REG, %g1
ld [%o0 + AOFF_mm_context], %o3
andn %o1, (PAGE_SIZE - 1), %o1
lda [%g1] ASI_M_MMUREGS, %g5
#ifndef __SMP__
cmp %o3, -1
be hypersparc_flush_tlb_page_out
#endif
sta %o3, [%g1] ASI_M_MMUREGS
sta %g0, [%o1] ASI_M_FLUSH_PROBE
hypersparc_flush_tlb_page_out:
retl
sta %g5, [%g1] ASI_M_MMUREGS
This diff is collapsed.
/* $Id: console.c,v 1.10 1996/12/18 06:46:54 tridge Exp $ /* $Id: console.c,v 1.11 1997/03/18 17:58:10 jj Exp $
* console.c: Routines that deal with sending and receiving IO * console.c: Routines that deal with sending and receiving IO
* to/from the current console device using the PROM. * to/from the current console device using the PROM.
* *
...@@ -31,7 +31,6 @@ prom_nbgetchar(void) ...@@ -31,7 +31,6 @@ prom_nbgetchar(void)
break; break;
case PROM_V2: case PROM_V2:
case PROM_V3: case PROM_V3:
case PROM_P1275:
if( (*(romvec->pv_v2devops).v2_dev_read)(*romvec->pv_v2bootargs.fd_stdin , &inc, 0x1) == 1) { if( (*(romvec->pv_v2devops).v2_dev_read)(*romvec->pv_v2bootargs.fd_stdin , &inc, 0x1) == 1) {
i = inc; i = inc;
} else { } else {
...@@ -66,7 +65,6 @@ prom_nbputchar(char c) ...@@ -66,7 +65,6 @@ prom_nbputchar(char c)
break; break;
case PROM_V2: case PROM_V2:
case PROM_V3: case PROM_V3:
case PROM_P1275:
outc = c; outc = c;
if( (*(romvec->pv_v2devops).v2_dev_write)(*romvec->pv_v2bootargs.fd_stdout, &outc, 0x1) == 1) if( (*(romvec->pv_v2devops).v2_dev_write)(*romvec->pv_v2bootargs.fd_stdout, &outc, 0x1) == 1)
i = 0; i = 0;
...@@ -129,7 +127,6 @@ prom_query_input_device() ...@@ -129,7 +127,6 @@ prom_query_input_device()
return PROMDEV_I_UNK; return PROMDEV_I_UNK;
}; };
case PROM_V3: case PROM_V3:
case PROM_P1275:
save_flags(flags); cli(); save_flags(flags); cli();
st_p = (*romvec->pv_v2devops.v2_inst2pkg)(*romvec->pv_v2bootargs.fd_stdin); st_p = (*romvec->pv_v2devops.v2_inst2pkg)(*romvec->pv_v2bootargs.fd_stdin);
__asm__ __volatile__("ld [%0], %%g6\n\t" : : __asm__ __volatile__("ld [%0], %%g6\n\t" : :
...@@ -177,7 +174,6 @@ prom_query_output_device() ...@@ -177,7 +174,6 @@ prom_query_output_device()
break; break;
case PROM_V2: case PROM_V2:
case PROM_V3: case PROM_V3:
case PROM_P1275:
save_flags(flags); cli(); save_flags(flags); cli();
st_p = (*romvec->pv_v2devops.v2_inst2pkg)(*romvec->pv_v2bootargs.fd_stdout); st_p = (*romvec->pv_v2devops.v2_inst2pkg)(*romvec->pv_v2bootargs.fd_stdout);
__asm__ __volatile__("ld [%0], %%g6\n\t" : : __asm__ __volatile__("ld [%0], %%g6\n\t" : :
......
/* $Id: devops.c,v 1.6 1996/10/12 12:37:38 davem Exp $ /* $Id: devops.c,v 1.7 1997/03/18 17:58:19 jj Exp $
* devops.c: Device operations using the PROM. * devops.c: Device operations using the PROM.
* *
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
...@@ -27,7 +27,6 @@ prom_devopen(char *dstr) ...@@ -27,7 +27,6 @@ prom_devopen(char *dstr)
break; break;
case PROM_V2: case PROM_V2:
case PROM_V3: case PROM_V3:
case PROM_P1275:
handle = (*(romvec->pv_v2devops.v2_dev_open))(dstr); handle = (*(romvec->pv_v2devops.v2_dev_open))(dstr);
break; break;
case PROM_AP1000: case PROM_AP1000:
...@@ -55,7 +54,6 @@ prom_devclose(int dhandle) ...@@ -55,7 +54,6 @@ prom_devclose(int dhandle)
break; break;
case PROM_V2: case PROM_V2:
case PROM_V3: case PROM_V3:
case PROM_P1275:
(*(romvec->pv_v2devops.v2_dev_close))(dhandle); (*(romvec->pv_v2devops.v2_dev_close))(dhandle);
break; break;
case PROM_AP1000: case PROM_AP1000:
...@@ -82,7 +80,6 @@ prom_seek(int dhandle, unsigned int seekhi, unsigned int seeklo) ...@@ -82,7 +80,6 @@ prom_seek(int dhandle, unsigned int seekhi, unsigned int seeklo)
break; break;
case PROM_V2: case PROM_V2:
case PROM_V3: case PROM_V3:
case PROM_P1275:
(*(romvec->pv_v2devops.v2_dev_seek))(dhandle, seekhi, seeklo); (*(romvec->pv_v2devops.v2_dev_seek))(dhandle, seekhi, seeklo);
break; break;
case PROM_AP1000: case PROM_AP1000:
......
/* $Id: init.c,v 1.9 1996/12/18 06:46:55 tridge Exp $ /* $Id: init.c,v 1.11 1997/03/18 17:58:24 jj Exp $
* init.c: Initialize internal variables used by the PROM * init.c: Initialize internal variables used by the PROM
* library functions. * library functions.
* *
...@@ -19,6 +19,8 @@ unsigned int prom_rev, prom_prev; ...@@ -19,6 +19,8 @@ unsigned int prom_rev, prom_prev;
/* The root node of the prom device tree. */ /* The root node of the prom device tree. */
int prom_root_node; int prom_root_node;
int prom_stdin, prom_stdout;
/* Pointer to the device tree operations structure. */ /* Pointer to the device tree operations structure. */
struct linux_nodeops *prom_nodeops; struct linux_nodeops *prom_nodeops;
...@@ -49,11 +51,6 @@ __initfunc(void prom_init(struct linux_romvec *rp)) ...@@ -49,11 +51,6 @@ __initfunc(void prom_init(struct linux_romvec *rp))
case 3: case 3:
prom_vers = PROM_V3; prom_vers = PROM_V3;
break; break;
case 4:
prom_vers = PROM_P1275;
prom_printf("PROMLIB: Sun IEEE Prom not supported yet\n");
prom_halt();
break;
case 42: /* why not :-) */ case 42: /* why not :-) */
prom_vers = PROM_AP1000; prom_vers = PROM_AP1000;
break; break;
...@@ -77,6 +74,11 @@ __initfunc(void prom_init(struct linux_romvec *rp)) ...@@ -77,6 +74,11 @@ __initfunc(void prom_init(struct linux_romvec *rp))
(((unsigned long) prom_nodeops) == -1)) (((unsigned long) prom_nodeops) == -1))
prom_halt(); prom_halt();
if(prom_vers == PROM_V2 || prom_vers == PROM_V3) {
prom_stdout = *romvec->pv_v2bootargs.fd_stdout;
prom_stdin = *romvec->pv_v2bootargs.fd_stdin;
}
prom_meminit(); prom_meminit();
prom_ranges_init(); prom_ranges_init();
......
/* $Id: memory.c,v 1.9 1996/11/13 05:10:09 davem Exp $ /* $Id: memory.c,v 1.10 1997/03/18 17:58:27 jj Exp $
* memory.c: Prom routine for acquiring various bits of information * memory.c: Prom routine for acquiring various bits of information
* about RAM on the machine, both virtual and physical. * about RAM on the machine, both virtual and physical.
* *
...@@ -107,7 +107,6 @@ __initfunc(void prom_meminit(void)) ...@@ -107,7 +107,6 @@ __initfunc(void prom_meminit(void))
break; break;
case PROM_V2: case PROM_V2:
case PROM_V3: case PROM_V3:
case PROM_P1275:
/* Grrr, have to traverse the prom device tree ;( */ /* Grrr, have to traverse the prom device tree ;( */
node = prom_getchild(prom_root_node); node = prom_getchild(prom_root_node);
node = prom_searchsiblings(node, "memory"); node = prom_searchsiblings(node, "memory");
......
/* $Id: mp.c,v 1.6 1996/09/19 20:27:25 davem Exp $ /* $Id: mp.c,v 1.7 1997/03/18 17:58:23 jj Exp $
* mp.c: OpenBoot Prom Multiprocessor support routines. Don't call * mp.c: OpenBoot Prom Multiprocessor support routines. Don't call
* these on a UP or else you will halt and catch fire. ;) * these on a UP or else you will halt and catch fire. ;)
* *
...@@ -32,7 +32,6 @@ prom_startcpu(int cpunode, struct linux_prom_registers *ctable_reg, int ctx, cha ...@@ -32,7 +32,6 @@ prom_startcpu(int cpunode, struct linux_prom_registers *ctable_reg, int ctx, cha
ret = -1; ret = -1;
break; break;
case PROM_V3: case PROM_V3:
case PROM_P1275:
ret = (*(romvec->v3_cpustart))(cpunode, (int) ctable_reg, ctx, pc); ret = (*(romvec->v3_cpustart))(cpunode, (int) ctable_reg, ctx, pc);
break; break;
}; };
...@@ -62,7 +61,6 @@ prom_stopcpu(int cpunode) ...@@ -62,7 +61,6 @@ prom_stopcpu(int cpunode)
ret = -1; ret = -1;
break; break;
case PROM_V3: case PROM_V3:
case PROM_P1275:
ret = (*(romvec->v3_cpustop))(cpunode); ret = (*(romvec->v3_cpustop))(cpunode);
break; break;
}; };
...@@ -92,7 +90,6 @@ prom_idlecpu(int cpunode) ...@@ -92,7 +90,6 @@ prom_idlecpu(int cpunode)
ret = -1; ret = -1;
break; break;
case PROM_V3: case PROM_V3:
case PROM_P1275:
ret = (*(romvec->v3_cpuidle))(cpunode); ret = (*(romvec->v3_cpuidle))(cpunode);
break; break;
}; };
...@@ -122,7 +119,6 @@ prom_restartcpu(int cpunode) ...@@ -122,7 +119,6 @@ prom_restartcpu(int cpunode)
ret = -1; ret = -1;
break; break;
case PROM_V3: case PROM_V3:
case PROM_P1275:
ret = (*(romvec->v3_cpuresume))(cpunode); ret = (*(romvec->v3_cpuresume))(cpunode);
break; break;
}; };
......
/* $Id: tree.c,v 1.15 1997/01/31 00:17:04 tdyas Exp $ /* $Id: tree.c,v 1.16 1997/03/19 14:53:16 davem Exp $
* tree.c: Basic device tree traversal/scanning for the Linux * tree.c: Basic device tree traversal/scanning for the Linux
* prom library. * prom library.
* *
...@@ -259,6 +259,33 @@ char * prom_nextprop(int node, char *oprop) ...@@ -259,6 +259,33 @@ char * prom_nextprop(int node, char *oprop)
return ret; return ret;
} }
int prom_finddevice(char *name)
{
int topnd = prom_getchild(prom_root_node);
int srch;
if(name[0] == '/')
name++;
if(sparc_cpu_model == sun4d) {
if(!strcmp(name, "sbus"))
name = "sbi";
if((srch = prom_searchsiblings(topnd, "io-unit")) == 0 ||
(srch = prom_getchild(srch)) == 0 ||
(srch = prom_searchsiblings(srch, name)) == 0) {
prom_printf("%s prom node not found.\n", name);
prom_halt();
}
} else if((srch = prom_searchsiblings(topnd, name)) == 0) {
if((srch = prom_searchsiblings(topnd, "iommu")) == 0 ||
(srch = prom_getchild(srch)) == 0 ||
(srch = prom_searchsiblings(srch, name)) == 0) {
prom_printf("Cannot find node %s\n", name);
prom_halt();
}
}
return srch;
}
int prom_node_has_property(int node, char *prop) int prom_node_has_property(int node, char *prop)
{ {
char *current_property = ""; char *current_property = "";
......
# $Id: Makefile,v 1.6 1997/03/04 16:27:18 jj Exp $ # $Id: Makefile,v 1.9 1997/03/14 21:04:39 jj Exp $
# sparc64/Makefile # sparc64/Makefile
# #
# Makefile for the architecture dependent flags and dependencies on the # Makefile for the architecture dependent flags and dependencies on the
...@@ -41,15 +41,5 @@ archclean: ...@@ -41,15 +41,5 @@ archclean:
archdep: archdep:
# <hack> Temporary hack, until we get a clean compile of everything...
vmlinux64: $(CONFIGURATION) init/main.o init/version.o
set -e; for i in arch/sparc64/kernel arch/sparc64/lib arch/sparc64/prom lib; do $(MAKE) -C $$i; done
$(LD) $(LINKFLAGS) $(HEAD) init/main.o init/version.o \
arch/sparc64/kernel/kernel.o \
lib/lib.a arch/sparc64/prom/promlib.a arch/sparc64/lib/lib.a \
-o vmlinux
$(NM) vmlinux | grep -v '\(compiled\)\|\(\.o$$\)\|\( a \)\|\(\.\.ng$$\)' | sort > System.map
# </hack>
check_asm: check_asm:
$(MAKE) -C arch/sparc64/kernel check_asm $(MAKE) -C arch/sparc64/kernel check_asm
...@@ -10,7 +10,9 @@ CONFIG_EXPERIMENTAL=y ...@@ -10,7 +10,9 @@ CONFIG_EXPERIMENTAL=y
# #
# Loadable module support # Loadable module support
# #
# CONFIG_MODULES is not set CONFIG_MODULES=y
CONFIG_MODVERSIONS=y
CONFIG_KERNELD=y
# #
# General setup # General setup
...@@ -38,22 +40,26 @@ SUN_FB_CGFOURTEEN=y ...@@ -38,22 +40,26 @@ SUN_FB_CGFOURTEEN=y
SUN_FB_BWTWO=y SUN_FB_BWTWO=y
SUN_FB_LEO=y SUN_FB_LEO=y
TADPOLE_FB_WEITEK=y TADPOLE_FB_WEITEK=y
SUN_FB_FAST_ONE=y
SUN_FB_FAST_TWO=y
SUN_FB_FAST_MONO=y
SUN_FB_GENERIC=y
# #
# Misc Linux/SPARC drivers # Misc Linux/SPARC drivers
# #
CONFIG_SUN_OPENPROMIO=y CONFIG_SUN_OPENPROMIO=m
CONFIG_SUN_MOSTEK_RTC=y CONFIG_SUN_MOSTEK_RTC=y
CONFIG_SUN_OPENPROMFS=y # CONFIG_SUN_BPP is not set
#
# Linux/SPARC audio subsystem (EXPERIMENTAL)
#
# CONFIG_SPARCAUDIO is not set
# CONFIG_SPARCAUDIO_AMD7930 is not set
# CONFIG_SPARCAUDIO_CS4231 is not set
CONFIG_SUN_OPENPROMFS=m
CONFIG_NET=y CONFIG_NET=y
CONFIG_SYSVIPC=y CONFIG_SYSVIPC=y
CONFIG_BINFMT_AOUT=y CONFIG_BINFMT_AOUT=y
CONFIG_BINFMT_ELF=y CONFIG_BINFMT_ELF=y
CONFIG_BINFMT_JAVA=y CONFIG_BINFMT_JAVA=m
# #
# Floppy, IDE, and other block devices # Floppy, IDE, and other block devices
...@@ -64,7 +70,7 @@ CONFIG_MD_LINEAR=y ...@@ -64,7 +70,7 @@ CONFIG_MD_LINEAR=y
CONFIG_MD_STRIPED=y CONFIG_MD_STRIPED=y
CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_INITRD=y CONFIG_BLK_DEV_INITRD=y
CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_LOOP=m
# #
# Networking options # Networking options
...@@ -74,7 +80,6 @@ CONFIG_RTNETLINK=y ...@@ -74,7 +80,6 @@ CONFIG_RTNETLINK=y
CONFIG_FIREWALL=y CONFIG_FIREWALL=y
CONFIG_NET_ALIAS=y CONFIG_NET_ALIAS=y
CONFIG_INET=y CONFIG_INET=y
CONFIG_IP_FORWARD=y
CONFIG_IP_MULTICAST=y CONFIG_IP_MULTICAST=y
CONFIG_IP_FIREWALL=y CONFIG_IP_FIREWALL=y
# CONFIG_IP_FIREWALL_NETLINK is not set # CONFIG_IP_FIREWALL_NETLINK is not set
...@@ -88,32 +93,35 @@ CONFIG_IP_MASQUERADE=y ...@@ -88,32 +93,35 @@ CONFIG_IP_MASQUERADE=y
# CONFIG_IP_ALWAYS_DEFRAG is not set # CONFIG_IP_ALWAYS_DEFRAG is not set
# CONFIG_IP_ACCT is not set # CONFIG_IP_ACCT is not set
# CONFIG_IP_ROUTER is not set # CONFIG_IP_ROUTER is not set
CONFIG_NET_IPIP=y # CONFIG_NET_IPIP is not set
# CONFIG_IP_MROUTE is not set # CONFIG_IP_MROUTE is not set
CONFIG_IP_ALIAS=y CONFIG_IP_ALIAS=m
# CONFIG_ARPD is not set # CONFIG_ARPD is not set
# #
# (it is safe to leave these untouched) # (it is safe to leave these untouched)
# #
# CONFIG_INET_PCTCP is not set # CONFIG_INET_PCTCP is not set
CONFIG_INET_RARP=y CONFIG_INET_RARP=m
# CONFIG_PATH_MTU_DISCOVERY is not set # CONFIG_PATH_MTU_DISCOVERY is not set
CONFIG_IP_NOSR=y CONFIG_IP_NOSR=y
CONFIG_SKB_LARGE=y CONFIG_SKB_LARGE=y
CONFIG_IPV6=y CONFIG_IPV6=m
# #
# #
# #
CONFIG_IPX=y CONFIG_IPX=m
# CONFIG_IPX_INTERN is not set # CONFIG_IPX_INTERN is not set
# CONFIG_IPX_PPROP_ROUTING is not set # CONFIG_IPX_PPROP_ROUTING is not set
CONFIG_ATALK=y CONFIG_ATALK=m
# CONFIG_IPDDP is not set
# CONFIG_AX25 is not set # CONFIG_AX25 is not set
CONFIG_X25=y CONFIG_X25=m
# CONFIG_LAPB is not set
# CONFIG_BRIDGE is not set # CONFIG_BRIDGE is not set
# CONFIG_LLC is not set # CONFIG_LLC is not set
# CONFIG_WAN_ROUTER is not set
# #
# SCSI support # SCSI support
...@@ -139,51 +147,50 @@ CONFIG_SCSI_CONSTANTS=y ...@@ -139,51 +147,50 @@ CONFIG_SCSI_CONSTANTS=y
# SCSI low-level drivers # SCSI low-level drivers
# #
CONFIG_SCSI_SUNESP=y CONFIG_SCSI_SUNESP=y
CONFIG_SCSI_QLOGICPTI=y CONFIG_SCSI_QLOGICPTI=m
# #
# Network device support # Network device support
# #
CONFIG_NETDEVICES=y CONFIG_NETDEVICES=y
CONFIG_DUMMY=y CONFIG_DUMMY=m
CONFIG_PPP=y CONFIG_PPP=m
# #
# CCP compressors for PPP are only built as modules. # CCP compressors for PPP are only built as modules.
# #
CONFIG_SLIP=y CONFIG_SLIP=m
CONFIG_SLIP_COMPRESSED=y CONFIG_SLIP_COMPRESSED=y
CONFIG_SLIP_SMART=y CONFIG_SLIP_SMART=y
# CONFIG_SLIP_MODE_SLIP6 is not set # CONFIG_SLIP_MODE_SLIP6 is not set
CONFIG_SUNLANCE=y CONFIG_SUNLANCE=y
CONFIG_HAPPYMEAL=y CONFIG_HAPPYMEAL=m
CONFIG_SUNQE=y CONFIG_SUNQE=m
CONFIG_MYRI_SBUS=y CONFIG_MYRI_SBUS=m
# #
# Filesystems # Filesystems
# #
CONFIG_QUOTA=y CONFIG_QUOTA=y
CONFIG_MINIX_FS=y CONFIG_MINIX_FS=m
CONFIG_EXT_FS=y
CONFIG_EXT2_FS=y CONFIG_EXT2_FS=y
CONFIG_XIA_FS=y CONFIG_FAT_FS=m
CONFIG_FAT_FS=y CONFIG_MSDOS_FS=m
CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=m
CONFIG_VFAT_FS=y CONFIG_UMSDOS_FS=m
CONFIG_UMSDOS_FS=y
CONFIG_PROC_FS=y CONFIG_PROC_FS=y
CONFIG_NFS_FS=y CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y CONFIG_ROOT_NFS=y
CONFIG_RNFS_BOOTP=y CONFIG_RNFS_BOOTP=y
CONFIG_RNFS_RARP=y CONFIG_RNFS_RARP=y
CONFIG_SMB_FS=y CONFIG_SMB_FS=m
CONFIG_SMB_WIN95=y CONFIG_SMB_WIN95=y
CONFIG_NCP_FS=y CONFIG_NCP_FS=m
CONFIG_ISO9660_FS=y CONFIG_ISO9660_FS=y
CONFIG_HPFS_FS=y CONFIG_HPFS_FS=m
CONFIG_SYSV_FS=y CONFIG_SYSV_FS=m
CONFIG_AFFS_FS=y CONFIG_AFFS_FS=m
CONFIG_ROMFS_FS=m
CONFIG_AMIGA_PARTITION=y CONFIG_AMIGA_PARTITION=y
CONFIG_UFS_FS=y CONFIG_UFS_FS=y
CONFIG_BSD_DISKLABEL=y CONFIG_BSD_DISKLABEL=y
......
# $Id: Makefile,v 1.5 1997/03/04 16:26:54 jj Exp $ # $Id: Makefile,v 1.7 1997/03/18 17:59:15 jj Exp $
# Makefile for the linux kernel. # Makefile for the linux kernel.
# #
# Note! Dependencies are done automagically by 'make dep', which also # Note! Dependencies are done automagically by 'make dep', which also
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
all: kernel.o head.o all: kernel.o head.o
O_TARGET := kernel.o O_TARGET := kernel.o
O_OBJS := etrap.o rtrap.o hack.o process.o # signal32.o O_OBJS := etrap.o rtrap.o hack.o process.o setup.o cpu.o idprom.o systbls.o traps.o entry.o devices.o auxio.o ioport.o # signal32.o
OX_OBJS := sparc64_ksyms.o OX_OBJS := sparc64_ksyms.o
head.o: head.S head.o: head.S
......
/* auxio.c: Probing for the Sparc AUXIO register at boot time.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
#include <linux/stddef.h>
#include <linux/init.h>
#include <asm/oplib.h>
#include <asm/io.h>
#include <asm/auxio.h>
#include <asm/sbus.h>
/* Probe and map in the Auxiliary I/O register */
unsigned char *auxio_register;
__initfunc(void auxio_probe(void))
{
struct linux_sbus *bus;
struct linux_sbus_device *sdev = 0;
struct linux_prom_registers auxregs[1];
for_each_sbus(bus) {
for_each_sbusdev(sdev, bus) {
if(!strcmp(sdev->prom_name, "auxio")) {
break;
}
}
}
if (!sdev) {
prom_printf("Cannot find auxio node, cannot continue...\n");
prom_halt();
}
prom_getproperty(sdev->prom_node, "reg", (char *) auxregs, sizeof(auxregs));
prom_apply_sbus_ranges(sdev->my_bus, auxregs, 0x1, sdev);
/* Map the register both read and write */
auxio_register = (unsigned char *) sparc_alloc_io(auxregs[0].phys_addr, 0,
auxregs[0].reg_size,
"auxiliaryIO",
auxregs[0].which_io, 0x0);
TURN_ON_LED;
}
/* cpu.c: Dinky routines to look for the kind of Sparc cpu
* we are on.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <asm/system.h>
struct cpu_iu_info {
short manuf;
short impl;
char* cpu_name; /* should be enough I hope... */
};
struct cpu_fp_info {
short manuf;
short impl;
char fpu_vers;
char* fp_name;
};
/* In order to get the fpu type correct, you need to take the IDPROM's
* machine type value into consideration too. I will fix this.
*/
struct cpu_fp_info linux_sparc_fpu[] = {
{ 0x17, 0x10, 0, "FIXME: UltraSparc I FPU"},
};
#define NSPARCFPU (sizeof(linux_sparc_fpu)/sizeof(struct cpu_fp_info))
struct cpu_iu_info linux_sparc_chips[] = {
{ 0x17, 0x10, "FIXME: UltraSparc I"},
};
#define NSPARCCHIPS (sizeof(linux_sparc_chips)/sizeof(struct cpu_iu_info))
char *sparc_cpu_type[NCPUS] = { "cpu-oops", "cpu-oops1", "cpu-oops2", "cpu-oops3" };
char *sparc_fpu_type[NCPUS] = { "fpu-oops", "fpu-oops1", "fpu-oops2", "fpu-oops3" };
unsigned int fsr_storage;
__initfunc(void cpu_probe(void))
{
int manuf, impl;
unsigned i, cpuid;
long ver, fpu_vers;
cpuid = get_cpuid();
__asm__ __volatile__ ("rdpr %%ver, %0; stx %%fsr, [%1]" : "=r" (ver) : "r" (&fpu_vers));
manuf = ((ver >> 48)&0xffff);
impl = ((ver >> 32)&0xffff);
fpu_vers = ((fpu_vers>>17)&0x7);
for(i = 0; i<NSPARCCHIPS; i++) {
if(linux_sparc_chips[i].manuf == manuf)
if(linux_sparc_chips[i].impl == impl) {
sparc_cpu_type[cpuid] = linux_sparc_chips[i].cpu_name;
break;
}
}
if(i==NSPARCCHIPS) {
printk("DEBUG: manuf = 0x%x impl = 0x%x\n", manuf,
impl);
sparc_cpu_type[cpuid] = "Unknow CPU";
}
for(i = 0; i<NSPARCFPU; i++) {
if(linux_sparc_fpu[i].manuf == manuf && linux_sparc_fpu[i].impl == impl)
if(linux_sparc_fpu[i].fpu_vers == fpu_vers) {
sparc_fpu_type[cpuid] = linux_sparc_fpu[i].fp_name;
break;
}
}
if(i == NSPARCFPU) {
printk("DEBUG: manuf = 0x%x impl = 0x%x fsr.vers = 0x%x\n", manuf, impl,
(unsigned)fpu_vers);
sparc_fpu_type[cpuid] = "Unknown FPU";
}
}
/* devices.c: Initial scan of the prom device tree for important
* Sparc device nodes which we need to find.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
#include <linux/kernel.h>
#include <linux/tasks.h>
#include <linux/config.h>
#include <linux/init.h>
#include <asm/page.h>
#include <asm/oplib.h>
#include <asm/smp.h>
#include <asm/system.h>
struct prom_cpuinfo linux_cpus[NCPUS];
int linux_num_cpus;
extern void cpu_probe(void);
__initfunc(unsigned long
device_scan(unsigned long mem_start))
{
char node_str[128];
int nd, prom_node_cpu, thismid;
int cpu_nds[NCPUS]; /* One node for each cpu */
int cpu_ctr = 0;
prom_getstring(prom_root_node, "device_type", node_str, sizeof(node_str));
if(strcmp(node_str, "cpu") == 0) {
cpu_nds[0] = prom_root_node;
cpu_ctr++;
} else {
int scan;
scan = prom_getchild(prom_root_node);
prom_printf("root child is %08x\n", (unsigned) scan);
nd = 0;
while((scan = prom_getsibling(scan)) != 0) {
prom_getstring(scan, "device_type", node_str, sizeof(node_str));
if(strcmp(node_str, "cpu") == 0) {
cpu_nds[cpu_ctr] = scan;
linux_cpus[cpu_ctr].prom_node = scan;
prom_getproperty(scan, "mid", (char *) &thismid, sizeof(thismid));
linux_cpus[cpu_ctr].mid = thismid;
prom_printf("Found CPU %d <node=%08x,mid=%d>\n",
cpu_ctr, (unsigned) scan,
thismid);
cpu_ctr++;
}
};
if(cpu_ctr == 0) {
printk("No CPU nodes found, cannot continue.\n");
halt();
}
printk("Found %d CPU prom device tree node(s).\n", cpu_ctr);
};
prom_node_cpu = cpu_nds[0];
linux_num_cpus = cpu_ctr;
cpu_probe();
return mem_start;
}
/* $Id: entry.S,v 1.1 1997/03/18 17:58:59 jj Exp $
* arch/sparc64/kernel/entry.S: Sparc64 trap low-level entry points.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
* Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <linux/config.h>
#include <linux/errno.h>
#include <asm/head.h>
#include <asm/asi.h>
#include <asm/smp.h>
#include <asm/ptrace.h>
#include <asm/page.h>
#include <asm/signal.h>
#define curptr g6
#define NR_SYSCALLS 256 /* Each OS is different... */
.text
.align 4
linux_sparc_ni_syscall:
sethi %hi(sys_ni_syscall), %l7
or %l7, %lo(sys_ni_syscall), %l7
ba,pt %xcc,syscall_is_too_hard
add %l7, %g4, %l7
linux_fast_syscall:
andn %l7, 3, %l7
mov %i0, %o0
mov %i1, %o1
mov %i2, %o2
jmpl %l7 + %g0, %g0
mov %i3, %o3
linux_syscall_trace:
call syscall_trace
nop
mov %i0, %o0
mov %i1, %o1
mov %i2, %o2
mov %i3, %o3
ba,pt %xcc, 2f
mov %i4, %o4
.globl ret_from_syscall
ret_from_syscall:
ba,pt %xcc, ret_sys_call
ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_I0], %o0
/* Linux native and SunOS system calls enter here... */
.align 4
.globl linux_sparc_syscall
linux_sparc_syscall:
/* Direct access to user regs, must faster. */
cmp %g1, NR_SYSCALLS
add %l7, %g4, %l7
bgeu,pn %xcc, linux_sparc_ni_syscall
sll %g1, 3, %l4
ldx [%l7 + %l4], %l7
andcc %l7, 1, %g0
bne,pn %icc, linux_fast_syscall
/* Just do the next insn in the delay slot */
.globl syscall_is_too_hard
syscall_is_too_hard:
mov %i0, %o0
mov %i1, %o1
mov %i2, %o2
ldx [%curptr + AOFF_task_flags], %l5
mov %i3, %o3
mov %i4, %o4
andcc %l5, 0x20, %g0
bne,pn %icc, linux_syscall_trace
mov %i0, %l5
2:
call %l7
mov %i5, %o5
stx %o0, [%sp + STACK_BIAS + REGWIN_SZ + PT_I0]
.globl ret_sys_call
ret_sys_call:
ldx [%curptr + AOFF_task_flags], %l6
mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2
ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_TSTATE], %g3
cmp %o0, -ENOIOCTLCMD
sllx %g2, 32, %g2
bgeu,pn %xcc, 1f
andcc %l6, 0x20, %l6
/* System call success, clear Carry condition code. */
andn %g3, %g2, %g3
clr %l6
stx %g3, [%sp + STACK_BIAS + REGWIN_SZ + PT_TSTATE]
bne,pn %icc, linux_syscall_trace2
ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_TNPC], %l1 /* pc = npc */
add %l1, 0x4, %l2 /* npc = npc+4 */
stx %l1, [%sp + STACK_BIAS + REGWIN_SZ + PT_TPC]
ba,pt %xcc, rtrap
stx %l2, [%sp + STACK_BIAS + REGWIN_SZ + PT_TNPC]
1:
/* System call failure, set Carry condition code.
* Also, get abs(errno) to return to the process.
*/
sub %g0, %o0, %o0
or %g3, %g2, %g3
stx %o0, [%sp + STACK_BIAS + REGWIN_SZ + PT_I0]
mov 1, %l6
stx %g3, [%sp + STACK_BIAS + REGWIN_SZ + PT_TSTATE]
bne,pn %icc, linux_syscall_trace2
ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_TNPC], %l1 /* pc = npc */
add %l1, 0x4, %l2 /* npc = npc+4 */
stx %l1, [%sp + STACK_BIAS + REGWIN_SZ + PT_TPC]
ba,pt %xcc, rtrap
stx %l2, [%sp + STACK_BIAS + REGWIN_SZ + PT_TNPC]
linux_syscall_trace2:
call syscall_trace
add %l1, 0x4, %l2 /* npc = npc+4 */
stx %l1, [%sp + STACK_BIAS + REGWIN_SZ + PT_TPC]
ba,pt %xcc, rtrap
stx %l2, [%sp + STACK_BIAS + REGWIN_SZ + PT_TNPC]
/* End of entry.S */
/* $Id: etrap.S,v 1.4 1997/03/04 16:26:58 jj Exp $ /* $Id: etrap.S,v 1.5 1997/03/13 08:24:01 jj Exp $
* etrap.S: Preparing for entry into the kernel on Sparc V9. * etrap.S: Preparing for entry into the kernel on Sparc V9.
* *
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
...@@ -18,16 +18,17 @@ etrap: ...@@ -18,16 +18,17 @@ etrap:
rdpr %tstate, %g1 rdpr %tstate, %g1
be,pn %xcc, 1f /* What happens more often? etrap when already in priv or from userland? */ be,pn %xcc, 1f /* What happens more often? etrap when already in priv or from userland? */
sllx %g6, 32, %g6 sllx %g6, 32, %g6
/* Just when going from userland to privileged mode, we have to change this stuff. */
sll %g2, 3, %g2 sll %g2, 3, %g2
wrpr %g2, %wstate wrpr %g2, %wstate
rdpr %canrestore, %g5
wrpr %g0, 0, %canrestore
wrpr %g5, 0, %otherwin
1: 1:
sethi %hi(current_set), %g4 sethi %hi(current_set), %g4
or %g4, %lo(current_set), %g4 or %g4, %lo(current_set), %g4
rdpr %tpc, %g2 rdpr %tpc, %g2
rdpr %canrestore, %g5
rdpr %tnpc, %g3 rdpr %tnpc, %g3
wrpr %g5, 0, %otherwin
wrpr %g0, 0, %canrestore
ldx [%g6 + %g4], %g6 ldx [%g6 + %g4], %g6
#ifdef __SMP__ #ifdef __SMP__
/* FIXME: Fix the above insn for SMP */ /* FIXME: Fix the above insn for SMP */
......
This diff is collapsed.
/* $Id: head.S,v 1.9 1997/02/26 11:09:25 jj Exp $ /* $Id: head.S,v 1.17 1997/03/18 17:59:37 jj Exp $
* head.S: Initial boot code for the Sparc64 port of Linux. * head.S: Initial boot code for the Sparc64 port of Linux.
* *
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1996 David Sitsky (David.Sitsky@anu.edu.au)
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
* Copyright (C) 1997 Miguel de Icaza (miguel@nuclecu.unam.mx)
*/ */
#include <linux/version.h> #include <linux/version.h>
...@@ -12,12 +14,14 @@ ...@@ -12,12 +14,14 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/lsu.h>
/* This section from from _start to sparc64_boot_end should fit into /* This section from from _start to sparc64_boot_end should fit into
0xfffff80000004000 to 0xfffff80000008000 and will be sharing space * 0xffff.f800.0000.4000 to 0xffff.f800.0000.8000 and will be sharing space
with bootup_user_stack, which is from 0xfffff80000004000 to * with bootup_user_stack, which is from 0xffff.f800.0000.4000 to
0xfffff80000006000 and bootup_kernel_stack, which is from * 0xffff.f800.0000.6000 and bootup_kernel_stack, which is from
0xfffff80000006000 to 0xfffff80000008000. */ * 0xffff.f800.0000.6000 to 0xffff.f800.0000.8000.
*/
.text .text
.globl start, _start .globl start, _start
...@@ -26,12 +30,15 @@ start: ...@@ -26,12 +30,15 @@ start:
bootup_user_stack: bootup_user_stack:
! 0xfffff80000004000 ! 0xfffff80000004000
b sparc64_boot b sparc64_boot
rdpr %ver, %g1 /* Get VERSION register. */ flushw /* Flush register file. */
/* This stuff has to be in sync with SILO and other potential boot loaders /* This stuff has to be in sync with SILO and other potential boot loaders
* Fields should be kept upward compatible and whenever any change is made, * Fields should be kept upward compatible and whenever any change is made,
* HdrS version should be incremented. * HdrS version should be incremented.
*/ */
.global root_flags, ram_flags, root_dev
.global ramdisk_image, ramdisk_size
.ascii "HdrS" .ascii "HdrS"
.word LINUX_VERSION_CODE .word LINUX_VERSION_CODE
.half 0x0201 /* HdrS version */ .half 0x0201 /* HdrS version */
...@@ -47,59 +54,80 @@ ramdisk_size: ...@@ -47,59 +54,80 @@ ramdisk_size:
.word 0 .word 0
.word reboot_command .word reboot_command
sparc64_boot:
/* We must be careful, 32-bit OpenBOOT will get confused if it /* We must be careful, 32-bit OpenBOOT will get confused if it
* tries to save away a register window to a 64-bit kernel * tries to save away a register window to a 64-bit kernel
* stack address. Flush all windows, disable interrupts, * stack address. Flush all windows, disable interrupts,
* remap if necessary, jump onto kernel trap table, then kernel * remap if necessary, jump onto kernel trap table, then kernel
* stack, or else we die. * stack, or else we die.
*
* PROM entry point is on %o4
*/ */
flushw /* Flush register file. */ sparc64_boot:
mov (LSU_CONTROL_IC|LSU_CONTROL_DC|LSU_CONTROL_IM|LSU_CONTROL_DM), %g1
stxa %g1, [%g0] ASI_LSU_CONTROL
/*
* Make sure we are in privileged mode, have address masking,
* using the ordinary globals and have enabled floating
* point.
*/
wrpr %g0, 0xf, %pil /* Interrupts off. */ wrpr %g0, 0xf, %pil /* Interrupts off. */
wrpr %g0, (PSTATE_PRIV|PSTATE_PEF), %pstate
/* Remap ourselves to upper 64-bit addresses if necessary. /* Check if we are mapped where we expect to be in virtual
* memory. The Solaris /boot elf format bootloader
* will peek into our elf header and load us where
* we want to be, otherwise we have to re-map.
*/ */
sethi %uhi(PAGE_OFFSET), %g4
current_pc: current_pc:
rd %pc, %g2 rd %pc, %g3
sethi %uhi(KERNBASE), %g4
sllx %g4, 32, %g4 sllx %g4, 32, %g4
sethi %hi(current_pc), %g3
or %g3, %lo(current_pc), %g3 /* Check the run time program counter. */
add %g4, %g3, %g3
cmp %g3, %g2 set current_pc, %g5
be go_to_highmem add %g5, %g4, %g5
cmp %g3, %g5
be %xcc,sun4u_init
nop nop
/* Remap ourselves into high addresses. */ create_mappings:
/* %g5 holds the tlb data */
sethi %uhi(_PAGE_VALID | _PAGE_SZ4MB), %g5 sethi %uhi(_PAGE_VALID | _PAGE_SZ4MB), %g5
sllx %g5, 32, %g5 sllx %g5, 32, %g5
or %g5, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W | _PAGE_G | _PAGE_L), %g5 or %g5, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W | _PAGE_G), %g5
/* Be real fucking anal... */ /* We aren't mapped in at KERNBASE, so we need to create
stxa %g0, [%g4] ASI_IMMU_DEMAP * an I and D tlb entry to map KERNBASE to 0. Both entries
stxa %g0, [%g4] ASI_DMMU_DEMAP * are 4 Megs mappings and are locked in.
membar #Sync */
flush %g4 set TLB_TAG_ACCESS, %g3 /* 0x30 */
mov TLB_TAG_ACCESS, %g6 stxa %g4, [%g3] ASI_IMMU /* 0x50 */
stxa %g4, [%g6] ASI_IMMU stxa %g5, [%g0] ASI_ITLB_DATA_IN /* 0x54 */
stxa %g5, [%g0] ASI_ITLB_DATA_IN
membar #Sync membar #Sync
flush %g4
stxa %g4, [%g6] ASI_DMMU /* Put KERNBASE into the I/D Tag Access Register (TAR) */
stxa %g4, [%g3] ASI_DMMU /* 0x58 */
stxa %g5, [%g0] ASI_DTLB_DATA_IN stxa %g5, [%g0] ASI_DTLB_DATA_IN
membar #Sync membar #Sync
flush %g4
/* FIXME: Should clean here the page @ phys. 0 and map one page @ */ nop
nop
membar #Sync
go_to_highmem: ba,pt %xcc, go_to_highmem
jmpl %g3 + (execute_in_high_mem - current_pc), %g0
nop nop
execute_in_high_mem: /* Now do a non-relative jump so that PC is in high-memory */
go_to_highmem:
set sun4u_init, %g1
jmpl %g1 + %g4, %g0
nop
sun4u_init:
/* Remap our prom interface code */ /* Remap our prom interface code */
sethi %hi(__p1275_loc), %g7 sethi %hi(__p1275_loc), %g7
or %g7, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W | _PAGE_G | _PAGE_L), %g7 or %g7, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W | _PAGE_G | _PAGE_L), %g7
...@@ -114,29 +142,40 @@ execute_in_high_mem: ...@@ -114,29 +142,40 @@ execute_in_high_mem:
stxa %g3, [%g6] ASI_IMMU stxa %g3, [%g6] ASI_IMMU
stxa %g5, [%g0] ASI_ITLB_DATA_IN stxa %g5, [%g0] ASI_ITLB_DATA_IN
membar #Sync membar #Sync
flush %g3
stxa %g3, [%g6] ASI_DMMU stxa %g3, [%g6] ASI_DMMU
stxa %g5, [%g0] ASI_DTLB_DATA_IN stxa %g5, [%g0] ASI_DTLB_DATA_IN
membar #Sync membar #Sync
flush %g3 flush %g3
sethi %hi(nwindows), %g7 nop
nop
membar #Sync
/* Compute the number of windows in this machine
* store this in nwindows and nwindowsm1
*/
rdpr %ver, %g1 /* Get VERSION register. */
sethi %hi(nwindows), %g2
and %g1, VERS_MAXWIN, %g5 and %g1, VERS_MAXWIN, %g5
add %g7, %lo(nwindows), %g7 or %g2,%lo(nwindows),%g2
add %g5, 1, %g6 add %g5, 1, %g6
add %g7, (nwindows - nwindowsm1), %g3 add %g2, (nwindows - nwindowsm1), %g3
stx %g6, [%g7 + %g4] stx %g6, [%g2 + %g4]
stx %g5, [%g3 + %g4] stx %g5, [%g3 + %g4]
mov %sp, %o1 ! second argument to prom_init
sethi %hi(init_task), %g6 sethi %hi(init_task), %g6
or %g6, %lo(init_task), %g6 or %g6, %lo(init_task), %g6
add %g6, %g4, %g6 ! g6 usage is fixed as well add %g6, %g4, %g6 ! g6 usage is fixed as well
mov %sp, %l6
mov %o4, %l7
/* FIXME: Initialize MMU globals??? */ #if 0
/* This is to dangerous for now... To many traps unfilled yet... */
sethi %hi(sparc64_ttable_tl0), %g5 sethi %hi(sparc64_ttable_tl0), %g5
add %g5, %g4, %g5 add %g5, %g4, %g5
wrpr %g5, %tba wrpr %g5, %tba
#endif
sethi %hi(bootup_kernel_stack + 0x2000 - STACK_BIAS - REGWIN_SZ), %g5 sethi %hi(bootup_kernel_stack + 0x2000 - STACK_BIAS - REGWIN_SZ), %g5
or %g5, %lo(bootup_kernel_stack + 0x2000 - STACK_BIAS - REGWIN_SZ), %g5 or %g5, %lo(bootup_kernel_stack + 0x2000 - STACK_BIAS - REGWIN_SZ), %g5
...@@ -145,9 +184,36 @@ execute_in_high_mem: ...@@ -145,9 +184,36 @@ execute_in_high_mem:
wrpr %g0, PSTATE_PEF | PSTATE_PRIV, %pstate wrpr %g0, PSTATE_PEF | PSTATE_PRIV, %pstate
wrpr %g0, 0, %wstate wrpr %g0, 0, %wstate
wrpr %g0, 0x0, %tl wrpr %g0, 0x0, %tl
fzero %f48
fzero %f50
fzero %f52
fzero %f54
fzero %f56
fzero %f58
fzero %f60
fzero %f62
/* Clear the bss */
sethi %hi(8191), %l2
or %l2, %lo(8191), %l2
sethi %hi(__bss_start), %l0
or %l0, %lo(__bss_start), %l0
sethi %hi(_end), %l1
or %l1, %lo(_end), %l1
add %l1, %l2, %l1
andn %l1, %l2, %l1
add %l2, 1, %l2
add %l0, %g4, %o0
1:
call bzero_1page
add %l0, %l2, %l0
cmp %l0, %l1
blu,pt %xcc, 1b
add %l0, %g4, %o0
mov %l6, %o1 ! OpenPROM stack
call prom_init call prom_init
mov %o4, %o0 ! OpenPROM cif handler mov %l7, %o0 ! OpenPROM cif handler
/* Off we go.... */ /* Off we go.... */
call start_kernel call start_kernel
...@@ -165,18 +231,11 @@ bootup_kernel_stack: ...@@ -165,18 +231,11 @@ bootup_kernel_stack:
#include "ttable.S" #include "ttable.S"
.global root_flags
.global ram_flags
.global root_dev
.global ramdisk_image
.global ramdisk_size
.data .data
.align 8 .align 8
.globl nwindows, nwindowsm1 .globl nwindows, nwindowsm1
nwindows: .xword 0 nwindows: .xword 0
nwindowsm1: .xword 0 nwindowsm1: .xword 0
.section ".fixup",#alloc,#execinstr .section ".fixup",#alloc,#execinstr
.globl __ret_efault .globl __ret_efault
__ret_efault: __ret_efault:
......
/* $Id: ioport.c,v 1.1 1996/12/28 18:39:39 davem Exp $ /* $Id: ioport.c,v 1.2 1997/03/18 17:59:31 jj Exp $
* ioport.c: Simple io mapping allocator. * ioport.c: Simple io mapping allocator.
* *
* Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu)
...@@ -29,21 +29,21 @@ unsigned long sparc_iobase_vaddr = IOBASE_VADDR; ...@@ -29,21 +29,21 @@ unsigned long sparc_iobase_vaddr = IOBASE_VADDR;
* to use your own mapping, but in practice this should not be used. * to use your own mapping, but in practice this should not be used.
* *
* Input: * Input:
* address: the obio address to map * address: Physical address to map
* virtual: if non zero, specifies a fixed virtual address where * virtual: if non zero, specifies a fixed virtual address where
* the mapping should take place. * the mapping should take place.
* len: the length of the mapping * len: the length of the mapping
* bus_type: The bus on which this io area sits. * bus_type: Optional high word of physical address.
* *
* Returns: * Returns:
* The virtual address where the mapping actually took place. * The virtual address where the mapping actually took place.
*/ */
void *sparc_alloc_io (void *address, void *virtual, int len, char *name, void *sparc_alloc_io (void *address, void *virtual, int len, char *name,
int bus_type, int rdonly) unsigned bus_type, int rdonly)
{ {
unsigned long vaddr, base_address; unsigned long vaddr, base_address;
unsigned long addr = (unsigned long) address; unsigned long addr = ((unsigned long) address) + (((unsigned long) bus_type) << 32);
unsigned long offset = (addr & (~PAGE_MASK)); unsigned long offset = (addr & (~PAGE_MASK));
if (virtual) { if (virtual) {
...@@ -74,7 +74,7 @@ void *sparc_alloc_io (void *address, void *virtual, int len, char *name, ...@@ -74,7 +74,7 @@ void *sparc_alloc_io (void *address, void *virtual, int len, char *name,
base_address = vaddr; base_address = vaddr;
/* Do the actual mapping */ /* Do the actual mapping */
for (; len > 0; len -= PAGE_SIZE) { for (; len > 0; len -= PAGE_SIZE) {
mapioaddr(addr, vaddr, bus_type, rdonly); mapioaddr(addr, vaddr, rdonly);
vaddr += PAGE_SIZE; vaddr += PAGE_SIZE;
addr += PAGE_SIZE; addr += PAGE_SIZE;
} }
......
/* $Id: rtrap.S,v 1.2 1997/02/26 11:09:25 jj Exp $ /* $Id: rtrap.S,v 1.4 1997/03/13 16:24:55 jj Exp $
* rtrap.S: Preparing for entry into the kernel on Sparc V9. * rtrap.S: Preparing for return from trap on Sparc V9.
* *
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/ */
#include <asm/pstate.h> #include <asm/pstate.h>
...@@ -11,4 +11,84 @@ ...@@ -11,4 +11,84 @@
.align 4 .align 4
.globl rtrap .globl rtrap
rtrap: rtrap:
/*not*/ done /*yet*/ sethi %hi(intr_count), %l0
or %l0, %lo(intr_count), %l0
ld [%l0 + %g4], %l1
sethi %hi(bh_active), %l2
brz,pt %l1, 2f
or %l2, %lo(bh_active), %l2
sethi %hi(bh_mask), %l1
or %l1, %lo(bh_mask), %l1
1:
ldx [%l2 + %g4], %l3
ldx [%l1 + %g4], %l4
andcc %l3, %l4, %g0
be,pt %xcc, 2f
mov 1, %l7
call do_bottom_half
st %l7, [%l0 + %g4]
ba,pt %xcc, 1b
st %g0, [%l0 + %g4]
2:
ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_TSTATE], %l1
ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_TPC], %l2
andcc %l1, TSTATE_PRIV, %l3
rdpr %pstate, %l7
be,pt %icc, to_user
andn %l7, PSTATE_IE, %l7
3:
ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_G1], %g1
ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_G2], %g2
ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_G3], %g3
ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_G4], %g4
ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_G5], %g5
ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_G6], %g6
ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_G7], %g7
ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_I0], %i0
ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_I1], %i1
ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_I2], %i2
ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_I3], %i3
ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_I4], %i4
ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_I5], %i5
ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_I6], %i6
ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_I7], %i7
ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_Y], %o3
ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_TNPC], %o2
wr %o3, %g0, %y
wrpr %l7, %g0, %pstate
wrpr %g0, 1, %tl
wrpr %l1, %g0, %tstate
wrpr %l2, %g0, %tpc
brnz,pn %l3, 1f
wrpr %o2, %g0, %tnpc
/* we came here from to_user, ie. we have now AG */
restore
rdpr %wstate, %g1
rdpr %otherwin, %g2
srl %g1, 3, %g1
wrpr %g2, %g0, %canrestore
wrpr %g1, %g0, %wstate
wrpr %g0, %g0, %otherwin
retry
1:
restore
retry
to_user:
sethi %hi(need_resched), %l0
or %l0, %lo(need_resched), %l0
ld [%l0 + %g4], %l0
wrpr %o4, PSTATE_IE, %pstate
brz,pt %l0, 1f
ldx [%g6 + AOFF_task_signal], %l0
call schedule
nop
1:
ldx [%g6 + AOFF_task_blocked], %o0
or %l7, PSTATE_AG, %l7 ! Will need this for setting back wstate
andcc %l0, %o0, %g0
be,pt %xcc, 3b
mov %l5, %o2
mov %l6, %o3
add %sp, STACK_BIAS + REGWIN_SZ, %o1
call do_signal
add %o7, 3b-.-4, %o7
This diff is collapsed.
/* $Id: sparc64_ksyms.c,v 1.1 1997/03/03 16:51:45 jj Exp $ /* $Id: sparc64_ksyms.c,v 1.3 1997/03/18 17:59:10 jj Exp $
* arch/sparc/kernel/ksyms.c: Sparc specific ksyms support. * arch/sparc/kernel/ksyms.c: Sparc specific ksyms support.
* *
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
...@@ -74,24 +74,25 @@ EXPORT_SYMBOL(klock_info); ...@@ -74,24 +74,25 @@ EXPORT_SYMBOL(klock_info);
#endif #endif
EXPORT_SYMBOL_PRIVATE(_lock_kernel); EXPORT_SYMBOL_PRIVATE(_lock_kernel);
EXPORT_SYMBOL_PRIVATE(_unlock_kernel); EXPORT_SYMBOL_PRIVATE(_unlock_kernel);
EXPORT_SYMBOL(page_offset);
EXPORT_SYMBOL(stack_top);
EXPORT_SYMBOL(mstk48t02_regs); EXPORT_SYMBOL(mstk48t02_regs);
EXPORT_SYMBOL(request_fast_irq); EXPORT_SYMBOL(request_fast_irq);
EXPORT_SYMBOL(sparc_alloc_io); EXPORT_SYMBOL(sparc_alloc_io);
EXPORT_SYMBOL(sparc_free_io); EXPORT_SYMBOL(sparc_free_io);
#if 0
EXPORT_SYMBOL(io_remap_page_range); EXPORT_SYMBOL(io_remap_page_range);
EXPORT_SYMBOL(mmu_v2p);
EXPORT_SYMBOL(mmu_unlockarea); EXPORT_SYMBOL(mmu_unlockarea);
EXPORT_SYMBOL(mmu_lockarea); EXPORT_SYMBOL(mmu_lockarea);
EXPORT_SYMBOL(mmu_get_scsi_sgl); EXPORT_SYMBOL(mmu_get_scsi_sgl);
EXPORT_SYMBOL(mmu_get_scsi_one); EXPORT_SYMBOL(mmu_get_scsi_one);
EXPORT_SYMBOL(mmu_release_scsi_sgl); EXPORT_SYMBOL(mmu_release_scsi_sgl);
EXPORT_SYMBOL(mmu_release_scsi_one); EXPORT_SYMBOL(mmu_release_scsi_one);
#endif
EXPORT_SYMBOL(sparc_dvma_malloc); EXPORT_SYMBOL(sparc_dvma_malloc);
#if 0
EXPORT_SYMBOL(sun4c_unmapioaddr); EXPORT_SYMBOL(sun4c_unmapioaddr);
EXPORT_SYMBOL(srmmu_unmapioaddr); EXPORT_SYMBOL(srmmu_unmapioaddr);
#endif
#if CONFIG_SBUS #if CONFIG_SBUS
EXPORT_SYMBOL(SBus_chain); EXPORT_SYMBOL(SBus_chain);
EXPORT_SYMBOL(dma_chain); EXPORT_SYMBOL(dma_chain);
...@@ -119,14 +120,12 @@ EXPORT_SYMBOL(prom_getproperty); ...@@ -119,14 +120,12 @@ EXPORT_SYMBOL(prom_getproperty);
EXPORT_SYMBOL(prom_node_has_property); EXPORT_SYMBOL(prom_node_has_property);
EXPORT_SYMBOL(prom_setprop); EXPORT_SYMBOL(prom_setprop);
EXPORT_SYMBOL(prom_getbootargs); EXPORT_SYMBOL(prom_getbootargs);
EXPORT_SYMBOL(prom_apply_obio_ranges);
EXPORT_SYMBOL(prom_getname); EXPORT_SYMBOL(prom_getname);
EXPORT_SYMBOL(prom_feval); EXPORT_SYMBOL(prom_feval);
EXPORT_SYMBOL(prom_getstring); EXPORT_SYMBOL(prom_getstring);
EXPORT_SYMBOL(prom_apply_sbus_ranges); EXPORT_SYMBOL(prom_apply_sbus_ranges);
EXPORT_SYMBOL(prom_getint); EXPORT_SYMBOL(prom_getint);
EXPORT_SYMBOL(prom_getintdefault); EXPORT_SYMBOL(prom_getintdefault);
EXPORT_SYMBOL(romvec);
EXPORT_SYMBOL(__prom_getchild); EXPORT_SYMBOL(__prom_getchild);
EXPORT_SYMBOL(__prom_getsibling); EXPORT_SYMBOL(__prom_getsibling);
...@@ -176,4 +175,3 @@ EXPORT_SYMBOL_NOVERS(memcmp); ...@@ -176,4 +175,3 @@ EXPORT_SYMBOL_NOVERS(memcmp);
EXPORT_SYMBOL_NOVERS(memcpy); EXPORT_SYMBOL_NOVERS(memcpy);
EXPORT_SYMBOL_NOVERS(memset); EXPORT_SYMBOL_NOVERS(memset);
EXPORT_SYMBOL_NOVERS(memmove); EXPORT_SYMBOL_NOVERS(memmove);
EXPORT_SYMBOL_NOVERS(__ashrdi3);
This diff is collapsed.
This diff is collapsed.
# $Id: Makefile,v 1.1 1996/12/27 17:28:35 davem Exp $ # $Id: Makefile,v 1.5 1997/03/14 21:04:27 jj Exp $
# Makefile for Sparc library files.. # Makefile for Sparc library files..
# #
CFLAGS := $(CFLAGS) -ansi CFLAGS := $(CFLAGS) -ansi
OBJS = memset.o blockops.o OBJS = memset.o blockops.o locks.o memcpy.o strlen.o strncmp.o \
memscan.o strncpy_from_user.o strlen_user.o memcmp.o
lib.a: $(OBJS) lib.a: $(OBJS)
$(AR) rcs lib.a $(OBJS) $(AR) rcs lib.a $(OBJS)
...@@ -16,6 +17,30 @@ blockops.o: blockops.S ...@@ -16,6 +17,30 @@ blockops.o: blockops.S
memset.o: memset.S memset.o: memset.S
$(CC) -D__ASSEMBLY__ -ansi -c -o memset.o memset.S $(CC) -D__ASSEMBLY__ -ansi -c -o memset.o memset.S
memcpy.o: memcpy.S
$(CC) -D__ASSEMBLY__ -ansi -c -o memcpy.o memcpy.S
strlen.o: strlen.S
$(CC) -D__ASSEMBLY__ -ansi -c -o strlen.o strlen.S
strncmp.o: strncmp.S
$(CC) -D__ASSEMBLY__ -ansi -c -o strncmp.o strncmp.S
memcmp.o: memcmp.S
$(CC) -D__ASSEMBLY__ -ansi -c -o memcmp.o memcmp.S
locks.o: locks.S
$(CC) -D__ASSEMBLY__ -ansi -c -o locks.o locks.S
memscan.o: memscan.S
$(CC) -D__ASSEMBLY__ -ansi -c -o memscan.o memscan.S
strncpy_from_user.o: strncpy_from_user.S
$(CC) -D__ASSEMBLY__ -ansi -c -o strncpy_from_user.o strncpy_from_user.S
strlen_user.o: strlen_user.S
$(CC) -D__ASSEMBLY__ -ansi -c -o strlen_user.o strlen_user.S
dep: dep:
include $(TOPDIR)/Rules.make include $(TOPDIR)/Rules.make
This diff is collapsed.
This diff is collapsed.
/* $Id: memcmp.S,v 1.1 1997/03/14 21:04:23 jj Exp $
* Sparc64 optimized memcmp code.
*
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
.text
.align 4
.globl __memcmp, memcmp
__memcmp:
memcmp:
brlez,pn %o2, 2f
sub %g0, %o2, %o3
add %o0, %o2, %o0
add %o1, %o2, %o1
ldub [%o0 + %o3], %o4
1:
ldub [%o1 + %o3], %o5
sub %o4, %o5, %o4
brnz,pn %o4, 3f
addcc %o3, 1, %o3
bne,a,pt %xcc, 1b
ldub [%o0 + %o3], %o5
2:
retl
clr %o0
3:
retl
mov %o4, %o0
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
The IDE driver Documentation is now in the linux/Documentation directory.
-mlord@pobox.com
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment