Commit f552bdf8 authored by Linus Torvalds's avatar Linus Torvalds

Import 2.1.19

parent f1bbcc41
......@@ -4063,6 +4063,11 @@ CONFIG_SUN_AUDIO
This is support for the soundcards on Sun workstations. The code
does not exist yet, so you might as well say N here.
SB32/AWE support
CONFIG_AWE32_SYNTH
Enable this option if you have a SB32 or SB AWE soundcard. See
linux/drivers/sound/lowlevel/README.awe for more info.
Kernel profiling support
CONFIG_PROFILE
This is for kernel hackers who want to know how much time the kernel
......
VERSION = 2
PATCHLEVEL = 1
SUBLEVEL = 18
SUBLEVEL = 19
ARCH = i386
......
......@@ -11,7 +11,7 @@ CONFIG_EXPERIMENTAL=y
# Loadable module support
#
CONFIG_MODULES=y
CONFIG_MODVERSIONS=y
# CONFIG_MODVERSIONS is not set
CONFIG_KERNELD=y
#
......
/* $Id: entry.S,v 1.128 1996/12/18 06:33:39 tridge Exp $
/* $Id: entry.S,v 1.129 1996/12/30 00:31:07 davem Exp $
* arch/sparc/kernel/entry.S: Sparc trap low-level entry points.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
......@@ -1722,4 +1722,42 @@ flush_patch_exception:
jmpl %o7 + 0xc, %g0 ! see asm-sparc/processor.h
mov 1, %g1 ! signal EFAULT condition
.align 4
.globl C_LABEL(kill_user_windows), kuw_patch1_7win
.globl kuw_patch1
kuw_patch1_7win: sll %o3, 6, %o3
/* No matter how much overhead this routine has in the worst
* case scenerio, it is several times better than taking the
* traps with the old method of just doing flush_user_windows().
*/
C_LABEL(kill_user_windows):
ld [%g6 + THREAD_UMASK], %o0 ! get current umask
orcc %g0, %o0, %g0 ! if no bits set, we are done
be 3f ! nothing to do
rd %psr, %o5 ! must clear interrupts
or %o5, PSR_PIL, %o4 ! or else that could change
wr %o4, 0x0, %psr ! the uwinmask state
WRITE_PAUSE ! burn them cycles
1:
ld [%g6 + THREAD_UMASK], %o0 ! get consistant state
orcc %g0, %o0, %g0 ! did an interrupt come in?
be 4f ! yep, we are done
rd %wim, %o3 ! get current wim
srl %o3, 1, %o4 ! simulate a save
kuw_patch1:
sll %o3, 7, %o3 ! compute next wim
or %o4, %o3, %o3 ! result
andncc %o0, %o3, %o0 ! clean this bit in umask
bne kuw_patch1 ! not done yet
srl %o3, 1, %o4 ! begin another save simulation
wr %o3, 0x0, %wim ! set the new wim
st %g0, [%g6 + THREAD_UMASK] ! clear uwinmask
4:
wr %o5, 0x0, %psr ! re-enable interrupts
WRITE_PAUSE ! burn baby burn
3:
retl ! return
st %g0, [%g6 + THREAD_W_SAVED] ! no windows saved
/* End of entry.S */
/* $Id: head.S,v 1.75 1996/12/20 07:54:57 davem Exp $
/* $Id: head.S,v 1.76 1996/12/30 00:31:09 davem Exp $
* head.S: The initial boot code for the Sparc port of Linux.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
......@@ -1062,6 +1062,10 @@ sun4c_continue_boot:
PATCH_INSN(rirq_7win_patch5, rirq_patch5)
#endif
/* Patch for killing user windows from the register file. */
PATCH_INSN(kuw_patch1_7win, kuw_patch1)
/* Now patch the kernel window flush sequences.
* This saves 2 traps on every switch and fork.
*/
......
/* $Id: process.c,v 1.85 1996/12/18 06:33:42 tridge Exp $
/* $Id: process.c,v 1.87 1996/12/30 06:16:21 davem Exp $
* linux/arch/sparc/kernel/process.c
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
......@@ -280,7 +280,7 @@ void show_thread(struct thread_struct *tss)
*/
void exit_thread(void)
{
flush_user_windows();
kill_user_windows();
#ifndef __SMP__
if(last_task_used_math == current) {
#else
......@@ -296,15 +296,13 @@ void exit_thread(void)
current->flags &= ~PF_USEDFPU;
#endif
}
mmu_exit_hook();
}
void flush_thread(void)
{
/* Make sure old user windows don't get in the way. */
flush_user_windows();
current->tss.w_saved = 0;
current->tss.uwinmask = 0;
kill_user_windows();
current->tss.sstk_info.cur_status = 0;
current->tss.sstk_info.the_stack = 0;
......@@ -326,7 +324,6 @@ void flush_thread(void)
#endif
}
mmu_flush_hook();
/* Now, this task is no longer a kernel thread. */
current->tss.flags &= ~SPARC_FLAG_KTHREAD;
current->tss.current_ds = USER_DS;
......
/* $Id: rtrap.S,v 1.40 1996/12/10 06:06:18 davem Exp $
/* $Id: rtrap.S,v 1.41 1996/12/28 18:14:21 davem Exp $
* rtrap.S: Return from Sparc trap low-level code.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
......@@ -70,7 +70,6 @@ ret_trap_entry:
sethi %hi(C_LABEL(need_resched)), %twin_tmp1
wr %t_psr, 0x0, %psr
WRITE_PAUSE
b ret_trap_kernel
nop
......@@ -179,7 +178,6 @@ ret_trap_unaligned_pc:
ld [%sp + REGWIN_SZ + PT_PSR], %o3
wr %t_wim, 0x0, %wim ! or else...
WRITE_PAUSE
wr %t_psr, PSR_ET, %psr
WRITE_PAUSE
......@@ -206,7 +204,6 @@ rtrap_patch4: srl %g2, 7, %g2
rtrap_patch5: and %g1, 0xff, %g1
wr %g1, 0x0, %wim
WRITE_PAUSE
/* Grrr, make sure we load from the right %sp... */
LOAD_PT_ALL(sp, t_psr, t_pc, t_npc, g1)
......@@ -232,7 +229,6 @@ rtrap_patch5: and %g1, 0xff, %g1
ret_trap_user_stack_is_bolixed:
wr %t_wim, 0x0, %wim
WRITE_PAUSE
wr %t_psr, PSR_ET, %psr
WRITE_PAUSE
......
/* $Id: setup.c,v 1.78 1996/12/19 08:06:30 davem Exp $
/* $Id: setup.c,v 1.79 1996/12/23 10:57:02 ecd Exp $
* linux/arch/sparc/kernel/setup.c
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
......@@ -248,7 +248,7 @@ extern unsigned ramdisk_size;
extern int root_mountflags;
extern void register_console(void (*proc)(char *));
extern void register_console(void (*proc)(const char *));
char saved_command_line[256];
char reboot_command[256];
......@@ -310,7 +310,7 @@ __initfunc(void setup_arch(char **cmdline_p,
printk("SUN4U\n");
break;
case ap1000:
register_console(prom_printf);
register_console((void (*) (const char *))prom_printf);
printk("AP1000\n");
packed = 1;
break;
......
/* $Id: sparc_ksyms.c,v 1.32 1996/12/18 06:33:45 tridge Exp $
/* $Id: sparc_ksyms.c,v 1.33 1996/12/29 20:46:01 davem Exp $
* arch/sparc/kernel/ksyms.c: Sparc specific ksyms support.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
......
/* $Id: sys_sparc.c,v 1.32 1996/12/19 05:25:46 davem Exp $
/* $Id: sys_sparc.c,v 1.33 1996/12/24 08:59:33 davem Exp $
* linux/arch/sparc/kernel/sys_sparc.c
*
* This file contains various random system calls that
......@@ -201,18 +201,47 @@ sparc_breakpoint (struct pt_regs *regs)
#endif
}
extern int
sys_sigaction (int signum, const struct sigaction *action, struct sigaction *oldaction);
extern void check_pending(int signum);
asmlinkage int
sparc_sigaction (int signum, const struct sigaction *action, struct sigaction *oldaction)
{
if (signum >= 0){
return sys_sigaction (signum, action, oldaction);
} else {
struct sigaction new_sa, *p;
if(signum < 0) {
current->tss.new_signal = 1;
return sys_sigaction (-signum, action, oldaction);
signum = -signum;
}
if (signum<1 || signum>32)
return -EINVAL;
p = signum - 1 + current->sig->action;
if (action) {
int err = verify_area(VERIFY_READ,action,sizeof(struct sigaction));
if (err)
return err;
if (signum==SIGKILL || signum==SIGSTOP)
return -EINVAL;
if(copy_from_user(&new_sa, action, sizeof(struct sigaction)))
return -EFAULT;
if (new_sa.sa_handler != SIG_DFL && new_sa.sa_handler != SIG_IGN) {
err = verify_area(VERIFY_READ, new_sa.sa_handler, 1);
if (err)
return err;
}
}
if (oldaction) {
if (copy_to_user(oldaction, p, sizeof(struct sigaction)))
return -EFAULT;
}
if (action) {
*p = new_sa;
check_pending(signum);
}
return 0;
}
#ifndef CONFIG_AP1000
......
/* $Id: sys_sunos.c,v 1.69 1996/12/21 04:50:38 tridge Exp $
/* $Id: sys_sunos.c,v 1.71 1996/12/29 20:46:02 davem Exp $
* sys_sunos.c: SunOS specific syscall compatibility support.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
......@@ -26,7 +26,6 @@
* to do the inverse mapping.
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/types.h>
......@@ -1155,49 +1154,52 @@ asmlinkage int sunos_accept(int fd, struct sockaddr *sa, int *addrlen)
#define SUNOS_SV_INTERRUPT 2
extern asmlinkage int sys_sigaction(int, const struct sigaction *, struct sigaction *);
extern void check_pending(int signum);
asmlinkage int sunos_sigaction(int signum, const struct sigaction *action,
struct sigaction *oldaction)
{
struct sigaction tmp_sa, *tmp_sap;
struct sigaction new_sa, *p;
const int sigaction_size = sizeof (struct sigaction) - sizeof (void *);
int err;
int old_fs = USER_DS;
current->personality |= PER_BSD;
if (signum<1 || signum>32)
return -EINVAL;
p = signum - 1 + current->sig->action;
if (action) {
if(copy_from_user(&tmp_sa, action, sigaction_size))
if(copy_from_user(&new_sa, action, sigaction_size))
return -EFAULT;
if (oldaction) {
err = verify_area(VERIFY_WRITE,oldaction,sigaction_size);
if (signum==SIGKILL || signum==SIGSTOP)
return -EINVAL;
memset(&new_sa, 0, sizeof(struct sigaction));
if(copy_from_user(&new_sa, action, sigaction_size))
return -EFAULT;
if (new_sa.sa_handler != SIG_DFL && new_sa.sa_handler != SIG_IGN) {
err = verify_area(VERIFY_READ, new_sa.sa_handler, 1);
if (err)
return err;
}
if (tmp_sa.sa_flags & SUNOS_SV_INTERRUPT)
tmp_sa.sa_flags &= ~SUNOS_SV_INTERRUPT;
else
tmp_sa.sa_flags |= SA_RESTART;
old_fs = get_fs ();
set_fs (get_ds ());
tmp_sap = &tmp_sa;
} else {
tmp_sap = (struct sigaction *) action;
new_sa.sa_flags ^= SUNOS_SV_INTERRUPT;
}
err = sys_sigaction (signum, tmp_sap, oldaction);
if (err == 0 && oldaction){
if (oldaction) {
if (copy_to_user(oldaction, p, sigaction_size))
return -EFAULT;
if (oldaction->sa_flags & SA_RESTART)
oldaction->sa_flags &= ~SA_RESTART;
else
oldaction->sa_flags |= SUNOS_SV_INTERRUPT;
}
if (action)
set_fs (old_fs);
return err;
if (action) {
*p = new_sa;
check_pending(signum);
}
return 0;
}
......
/* $Id: systbls.S,v 1.55 1996/12/18 06:33:47 tridge Exp $
/* $Id: systbls.S,v 1.56 1996/12/29 20:46:03 davem Exp $
* systbls.S: System call entry point tables for OS compatibility.
* The native Linux system call table lives here also.
*
......@@ -106,7 +106,7 @@ C_LABEL(sys_call_table):
.long C_LABEL(sys_nis_syscall), C_LABEL(sys_getdents), C_LABEL(sys_setsid)
.long C_LABEL(sys_fchdir), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
.long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
.long C_LABEL(sys_nis_syscall), C_LABEL(sys_sigpending), C_LABEL(sys_nis_syscall)
.long C_LABEL(sys_nis_syscall), C_LABEL(sys_sigpending), C_LABEL(sys_query_module)
.long C_LABEL(sys_setpgid), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
.long C_LABEL(sys_nis_syscall), C_LABEL(sys_newuname), C_LABEL(sys_init_module)
.long C_LABEL(sys_personality), C_LABEL(sys_prof), C_LABEL(sys_break)
......
/* $Id: traps.c,v 1.49 1996/12/18 06:33:49 tridge Exp $
/* $Id: traps.c,v 1.50 1996/12/29 20:46:05 davem Exp $
* arch/sparc/kernel/traps.c
*
* Copyright 1995 David S. Miller (davem@caip.rutgers.edu)
......@@ -10,7 +10,6 @@
#include <linux/sched.h> /* for jiffies */
#include <linux/kernel.h>
#include <linux/config.h>
#include <linux/signal.h>
#include <asm/delay.h>
......
/* $Id: wof.S,v 1.30 1996/12/10 06:06:19 davem Exp $
/* $Id: wof.S,v 1.31 1996/12/28 18:14:22 davem Exp $
* wof.S: Sparc window overflow handler.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
......@@ -205,7 +205,6 @@ spwin_user_stack_is_bolixed:
* c-code to gun down the process.
*/
rd %psr, %glob_tmp
WRITE_PAUSE
andcc %glob_tmp, PSR_PS, %g0
bne spwin_bad_ustack_from_kernel
nop
......@@ -249,8 +248,7 @@ spnwin_patch3: and %twin_tmp, 0xff, %twin_tmp ! patched on 7win Sparcs
/* Turn on traps and call c-code to deal with it. */
wr %t_psr, PSR_ET, %psr
WRITE_PAUSE
nop
call C_LABEL(window_overflow_fault)
nop
......@@ -316,7 +314,6 @@ C_LABEL(spwin_sun4c_stackchk):
sra %sp, 29, %glob_tmp
rd %psr, %glob_tmp
WRITE_PAUSE
b spwin_user_stack_is_bolixed + 0x4
nop
......@@ -327,7 +324,6 @@ C_LABEL(spwin_sun4c_stackchk):
and %sp, 0xfff, %glob_tmp ! delay slot
rd %psr, %glob_tmp
WRITE_PAUSE
b spwin_user_stack_is_bolixed + 0x4
nop
......@@ -348,7 +344,6 @@ spwin_sun4c_twopages:
add %sp, 0x38, %glob_tmp /* Is second page in vma hole? */
rd %psr, %glob_tmp
WRITE_PAUSE
b spwin_user_stack_is_bolixed + 0x4
nop
......@@ -360,7 +355,6 @@ spwin_sun4c_twopages:
add %sp, 0x38, %glob_tmp
rd %psr, %glob_tmp
WRITE_PAUSE
b spwin_user_stack_is_bolixed + 0x4
nop
......@@ -374,7 +368,6 @@ spwin_sun4c_onepage:
nop
rd %psr, %glob_tmp
WRITE_PAUSE
b spwin_user_stack_is_bolixed + 0x4
nop
......@@ -429,6 +422,5 @@ C_LABEL(spwin_srmmu_stackchk):
restore %g0, %g0, %g0
rd %psr, %glob_tmp
WRITE_PAUSE
b spwin_user_stack_is_bolixed + 0x4 ! we faulted, ugh
nop
/* $Id: wuf.S,v 1.28 1996/12/10 06:06:20 davem Exp $
/* $Id: wuf.S,v 1.29 1996/12/28 18:14:23 davem Exp $
* wuf.S: Window underflow trap handler for the Sparc.
*
* Copyright (C) 1995 David S. Miller
......@@ -184,8 +184,7 @@ fwin_user_stack_is_bolixed:
ENTER_SYSCALL
wr %t_psr, PSR_ET, %psr ! enable traps
WRITE_PAUSE
nop
call C_LABEL(window_underflow_fault)
mov %g4, %o0
......@@ -343,7 +342,7 @@ C_LABEL(srmmu_fwin_stackchk):
nop
wr %t_psr, 0x0, %psr
WRITE_PAUSE
nop
b fwin_user_finish_up + 0x4
nop
......
......@@ -37,7 +37,7 @@ search_one_table(const struct exception_table_entry *start,
}
if (first > start && first[-1].insn < value
&& !first[-1].fixup && first->insn < value) {
*g2 = (value - firstp[-1].insn)/4;
*g2 = (value - first[-1].insn)/4;
return first->fixup;
}
return 0;
......@@ -60,7 +60,7 @@ search_exception_table(unsigned long addr, unsigned long *g2)
if (mp->ex_table_start == NULL)
continue;
ret = search_one_table(mp->ex_table_start,
mp->ex_table_stop-1, addr, g2);
mp->ex_table_end-1, addr, g2);
if (ret) return ret;
}
#endif
......
/* $Id: loadmmu.c,v 1.43 1996/12/18 06:43:24 tridge Exp $
/* $Id: loadmmu.c,v 1.45 1996/12/30 06:16:28 davem Exp $
* loadmmu.c: This code loads up all the mm function pointers once the
* machine type has been determined. It also sets the static
* mmu values such as PAGE_NONE, etc.
......@@ -15,6 +15,7 @@
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/a.out.h>
#include <asm/mmu_context.h>
unsigned long page_offset = 0xf0000000;
unsigned long stack_top = 0xf0000000 - PAGE_SIZE;
......@@ -30,8 +31,8 @@ void (*free_task_struct)(struct task_struct *tsk);
void (*quick_kernel_fault)(unsigned long);
void (*mmu_exit_hook)(void);
void (*mmu_flush_hook)(void);
void (*init_new_context)(struct mm_struct *mm);
void (*destroy_context)(struct mm_struct *mm);
/* translate between physical and virtual addresses */
unsigned long (*mmu_v2p)(unsigned long);
......@@ -135,7 +136,6 @@ pmd_t * (*pmd_alloc)(pgd_t *, unsigned long);
void (*pgd_free)(pgd_t *);
pgd_t * (*pgd_alloc)(void);
void (*pgd_flush)(pgd_t *);
int (*pte_write)(pte_t);
int (*pte_dirty)(pte_t);
......
This diff is collapsed.
/* $Id: sun4c.c,v 1.135 1996/12/23 05:27:50 davem Exp $
/* $Id: sun4c.c,v 1.137 1996/12/30 06:16:36 davem Exp $
* sun4c.c: Doing in software what should be done in hardware.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
......@@ -21,6 +21,7 @@
#include <asm/io.h>
#include <asm/oplib.h>
#include <asm/openprom.h>
#include <asm/mmu_context.h>
extern int num_segmaps, num_contexts;
......@@ -1609,22 +1610,16 @@ static void sun4c_switch_to_context(struct task_struct *tsk)
restore_flags(flags);
}
static void sun4c_flush_hook(void)
static void sun4c_init_new_context(struct mm_struct *mm)
{
if(current->tss.flags & SPARC_FLAG_KTHREAD) {
unsigned long flags;
save_flags(flags);
sun4c_alloc_context(current->mm);
sun4c_set_context(current->mm->context);
restore_flags(flags);
}
sun4c_alloc_context(mm);
if(mm == current->mm)
sun4c_set_context(mm->context);
}
static void sun4c_exit_hook(void)
static void sun4c_destroy_context(struct mm_struct *mm)
{
struct ctx_list *ctx_old;
struct mm_struct *mm = current->mm;
if(mm->context != NO_CONTEXT && mm->count == 1) {
unsigned long flags;
......@@ -2038,10 +2033,6 @@ void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, p
restore_flags(flags);
}
static void sun4c_pgd_flush(pgd_t *pgdp)
{
}
extern unsigned long free_area_init(unsigned long, unsigned long);
extern unsigned long sparc_context_init(unsigned long, int);
extern unsigned long end;
......@@ -2177,7 +2168,6 @@ __initfunc(void ld_mmu_sun4c(void))
pmd_alloc = sun4c_pmd_alloc;
pgd_free = sun4c_pgd_free;
pgd_alloc = sun4c_pgd_alloc;
pgd_flush = sun4c_pgd_flush;
pte_write = sun4c_pte_write;
pte_dirty = sun4c_pte_dirty;
......@@ -2189,8 +2179,8 @@ __initfunc(void ld_mmu_sun4c(void))
pte_mkdirty = sun4c_pte_mkdirty;
pte_mkyoung = sun4c_pte_mkyoung;
update_mmu_cache = sun4c_update_mmu_cache;
mmu_exit_hook = sun4c_exit_hook;
mmu_flush_hook = sun4c_flush_hook;
destroy_context = sun4c_destroy_context;
init_new_context = sun4c_init_new_context;
mmu_lockarea = sun4c_lockarea;
mmu_unlockarea = sun4c_unlockarea;
......
/* $Id: tree.c,v 1.13 1996/12/18 06:46:56 tridge Exp $
/* $Id: tree.c,v 1.14 1996/12/29 20:46:12 davem Exp $
* tree.c: Basic device tree traversal/scanning for the Linux
* prom library.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
#include <linux/config.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/kernel.h>
......
# $Id: Makefile,v 1.2 1996/12/27 17:28:20 davem Exp $
# sparc64/Makefile
#
# Makefile for the architecture dependent flags and dependencies on the
# 64-bit Sparc.
#
# Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
#
# If the solaris /bin/sh wasn't so broken, I wouldn't need the following
# line...
SHELL =/bin/bash
CC = sparc64-linux-gcc -D__KERNEL__ -I$(TOPDIR)/include
AS = sparc64-linux-as
LD = sparc64-linux-ld
NM = sparc64-linux-nm
AR = sparc64-linux-ar
RANLIB = sparc64-linux-ranlib
#
# Uncomment the first CFLAGS if you are doing kgdb source level
# debugging of the kernel to get the proper debugging information.
#CFLAGS := $(CFLAGS) -g -pipe
CFLAGS := $(CFLAGS) -pipe
LINKFLAGS = -N -Ttext 0xFFFFF80000008000
HEAD := arch/sparc/kernel/head.o
SUBDIRS := $(SUBDIRS) arch/sparc64/kernel arch/sparc64/lib arch/sparc64/mm \
arch/sparc64/prom
ARCHIVES := arch/sparc64/kernel/kernel.o arch/sparc64/mm/mm.o $(ARCHIVES)
LIBS := $(TOPDIR)/lib/lib.a $(LIBS) $(TOPDIR)/arch/sparc64/prom/promlib.a \
$(TOPDIR)/arch/sparc64/lib/lib.a
INITOBJ = $(TOPDIR)/arch/sparc64/kernel/initobj.o
FINITOBJ = $(TOPDIR)/arch/sparc64/kernel/finitobj.o
archclean:
archdep:
# $Id: config.in,v 1.1 1996/12/27 17:28:19 davem Exp $
# For a description of the syntax of this configuration file,
# see the Configure script.
#
mainmenu_name "Linux/SPARC Kernel Configuration"
mainmenu_option next_comment
comment 'Code maturity level options'
bool 'Prompt for development and/or incomplete code/drivers' CONFIG_EXPERIMENTAL
endmenu
mainmenu_option next_comment
comment 'Loadable module support'
bool 'Enable loadable module support' CONFIG_MODULES
if [ "$CONFIG_MODULES" = "y" ]; then
bool 'Set version information on all symbols for modules' CONFIG_MODVERSIONS
bool 'Kernel daemon support (e.g. autoload of modules)' CONFIG_KERNELD
fi
endmenu
mainmenu_option next_comment
comment 'General setup'
bool 'Support for AP1000 multicomputer' CONFIG_AP1000
if [ "$CONFIG_AP1000" = "y" ]; then
define_bool CONFIG_NO_KEYBOARD y
define_bool CONFIG_APFDDI y
define_bool CONFIG_APBLOCK y
define_bool CONFIG_APBIF y
tristate 'OPIU DDV Driver' CONFIG_DDV
else
# Global things across all Sun machines.
define_bool CONFIG_SBUS y
define_bool CONFIG_SBUSCHAR y
define_bool CONFIG_SUN_MOUSE y
define_bool CONFIG_SERIAL y
define_bool CONFIG_SUN_SERIAL y
define_bool CONFIG_SUN_KEYBOARD y
define_bool CONFIG_SUN_CONSOLE y
define_bool CONFIG_SUN_AUXIO y
define_bool CONFIG_SUN_IO y
source drivers/sbus/char/Config.in
fi
tristate 'Openprom tree appears in /proc/openprom (EXPERIMENTAL)' CONFIG_SUN_OPENPROMFS
bool 'Networking support' CONFIG_NET
bool 'System V IPC' CONFIG_SYSVIPC
tristate 'Kernel support for a.out binaries' CONFIG_BINFMT_AOUT
tristate 'Kernel support for ELF binaries' CONFIG_BINFMT_ELF
if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
tristate 'Kernel support for JAVA binaries' CONFIG_BINFMT_JAVA
fi
endmenu
mainmenu_option next_comment
comment 'Floppy, IDE, and other block devices'
bool 'Normal floppy disk support' CONFIG_BLK_DEV_FD
bool 'Multiple devices driver support' CONFIG_BLK_DEV_MD
if [ "$CONFIG_BLK_DEV_MD" = "y" ]; then
tristate ' Linear (append) mode' CONFIG_MD_LINEAR
tristate ' RAID-0 (striping) mode' CONFIG_MD_STRIPED
# tristate ' RAID-1 (mirroring) mode' CONFIG_MD_MIRRORING
fi
tristate 'RAM disk support' CONFIG_BLK_DEV_RAM
if [ "$CONFIG_BLK_DEV_RAM" = "y" ]; then
bool ' Initial RAM disk (initrd) support' CONFIG_BLK_DEV_INITRD
fi
tristate 'Loopback device support' CONFIG_BLK_DEV_LOOP
endmenu
if [ "$CONFIG_NET" = "y" ]; then
source net/Config.in
fi
mainmenu_option next_comment
comment 'SCSI support'
tristate 'SCSI support' CONFIG_SCSI
if [ "$CONFIG_SCSI" != "n" ]; then
comment 'SCSI support type (disk, tape, CDrom)'
dep_tristate 'SCSI disk support' CONFIG_BLK_DEV_SD $CONFIG_SCSI
dep_tristate 'SCSI tape support' CONFIG_CHR_DEV_ST $CONFIG_SCSI
dep_tristate 'SCSI CDROM support' CONFIG_BLK_DEV_SR $CONFIG_SCSI
if [ "$CONFIG_BLK_DEV_SR" != "n" ]; then
bool ' Enable vendor-specific extentions (for SCSI CDROM)' CONFIG_BLK_DEV_SR_VENDOR
fi
dep_tristate 'SCSI generic support' CONFIG_CHR_DEV_SG $CONFIG_SCSI
comment 'Some SCSI devices (e.g. CD jukebox) support multiple LUNs'
bool 'Probe all LUNs on each SCSI device' CONFIG_SCSI_MULTI_LUN
bool 'Verbose SCSI error reporting (kernel size +=12K)' CONFIG_SCSI_CONSTANTS
mainmenu_option next_comment
comment 'SCSI low-level drivers'
bool 'Sparc ESP Scsi Driver' CONFIG_SCSI_SUNESP $CONFIG_SCSI
tristate 'PTI Qlogic,ISP Driver' CONFIG_SCSI_QLOGICPTI $CONFIG_SCSI
endmenu
fi
endmenu
if [ "$CONFIG_NET" = "y" ]; then
mainmenu_option next_comment
comment 'Network device support'
bool 'Network device support' CONFIG_NETDEVICES
if [ "$CONFIG_NETDEVICES" = "y" ]; then
tristate 'Dummy net driver support' CONFIG_DUMMY
tristate 'PPP (point-to-point) support' CONFIG_PPP
if [ ! "$CONFIG_PPP" = "n" ]; then
comment 'CCP compressors for PPP are only built as modules.'
fi
tristate 'SLIP (serial line) support' CONFIG_SLIP
if [ "$CONFIG_SLIP" != "n" ]; then
bool ' CSLIP compressed headers' CONFIG_SLIP_COMPRESSED
bool ' Keepalive and linefill' CONFIG_SLIP_SMART
bool ' Six bit SLIP encapsulation' CONFIG_SLIP_MODE_SLIP6
fi
bool 'Sun LANCE support' CONFIG_SUNLANCE
tristate 'Sun Happy Meal 10/100baseT support' CONFIG_HAPPYMEAL
tristate 'Sun QuadEthernet support' CONFIG_SUNQE
tristate 'MyriCOM Gigabit Ethernet support' CONFIG_MYRI_SBUS
# bool 'FDDI driver support' CONFIG_FDDI
# if [ "$CONFIG_FDDI" = "y" ]; then
# fi
fi
endmenu
fi
source fs/Config.in
mainmenu_option next_comment
comment 'Kernel hacking'
bool 'Kernel profiling support' CONFIG_PROFILE
if [ "$CONFIG_PROFILE" = "y" ]; then
int ' Profile shift count' CONFIG_PROFILE_SHIFT 2
fi
endmenu
#
# Automatically generated make config: don't edit
#
#
# Code maturity level options
#
CONFIG_EXPERIMENTAL=y
#
# Loadable module support
#
# CONFIG_MODULES is not set
#
# General setup
#
# CONFIG_AP1000 is not set
CONFIG_SBUS=y
CONFIG_SBUSCHAR=y
CONFIG_SUN_MOUSE=y
CONFIG_SERIAL=y
CONFIG_SUN_SERIAL=y
CONFIG_SUN_KEYBOARD=y
CONFIG_SUN_CONSOLE=y
CONFIG_SUN_AUXIO=y
CONFIG_SUN_IO=y
#
# SBUS Frame Buffer support
#
SUN_FBS_IN_PROCFS=y
CONFIG_SUN_FB_DISPLAY=y
SUN_FB_CGSIX=y
SUN_FB_TCX=y
SUN_FB_CGTHREE=y
SUN_FB_CGFOURTEEN=y
SUN_FB_BWTWO=y
SUN_FB_LEO=y
TADPOLE_FB_WEITEK=y
SUN_FB_FAST_ONE=y
SUN_FB_FAST_TWO=y
SUN_FB_FAST_MONO=y
SUN_FB_GENERIC=y
#
# Misc Linux/SPARC drivers
#
CONFIG_SUN_OPENPROMIO=y
CONFIG_SUN_MOSTEK_RTC=y
CONFIG_SUN_OPENPROMFS=y
CONFIG_NET=y
CONFIG_SYSVIPC=y
CONFIG_BINFMT_AOUT=y
CONFIG_BINFMT_ELF=y
CONFIG_BINFMT_JAVA=y
#
# Floppy, IDE, and other block devices
#
CONFIG_BLK_DEV_FD=y
CONFIG_BLK_DEV_MD=y
CONFIG_MD_LINEAR=y
CONFIG_MD_STRIPED=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_BLK_DEV_LOOP=y
#
# Networking options
#
CONFIG_NETLINK=y
CONFIG_RTNETLINK=y
CONFIG_FIREWALL=y
CONFIG_NET_ALIAS=y
CONFIG_INET=y
CONFIG_IP_FORWARD=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_FIREWALL=y
# CONFIG_IP_FIREWALL_NETLINK is not set
# CONFIG_IP_FIREWALL_VERBOSE is not set
CONFIG_IP_MASQUERADE=y
#
# Protocol-specific masquerading support will be built as modules.
#
# CONFIG_IP_TRANSPARENT_PROXY is not set
# CONFIG_IP_ALWAYS_DEFRAG is not set
# CONFIG_IP_ACCT is not set
# CONFIG_IP_ROUTER is not set
CONFIG_NET_IPIP=y
# CONFIG_IP_MROUTE is not set
CONFIG_IP_ALIAS=y
# CONFIG_ARPD is not set
#
# (it is safe to leave these untouched)
#
# CONFIG_INET_PCTCP is not set
CONFIG_INET_RARP=y
# CONFIG_PATH_MTU_DISCOVERY is not set
CONFIG_IP_NOSR=y
CONFIG_SKB_LARGE=y
CONFIG_IPV6=y
#
#
#
CONFIG_IPX=y
# CONFIG_IPX_INTERN is not set
# CONFIG_IPX_PPROP_ROUTING is not set
CONFIG_ATALK=y
# CONFIG_AX25 is not set
CONFIG_X25=y
# CONFIG_BRIDGE is not set
# CONFIG_LLC is not set
#
# SCSI support
#
CONFIG_SCSI=y
#
# SCSI support type (disk, tape, CDrom)
#
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
#
# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
#
# CONFIG_SCSI_MULTI_LUN is not set
CONFIG_SCSI_CONSTANTS=y
#
# SCSI low-level drivers
#
CONFIG_SCSI_SUNESP=y
CONFIG_SCSI_QLOGICPTI=y
#
# Network device support
#
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
CONFIG_PPP=y
#
# CCP compressors for PPP are only built as modules.
#
CONFIG_SLIP=y
CONFIG_SLIP_COMPRESSED=y
CONFIG_SLIP_SMART=y
# CONFIG_SLIP_MODE_SLIP6 is not set
CONFIG_SUNLANCE=y
CONFIG_HAPPYMEAL=y
CONFIG_SUNQE=y
CONFIG_MYRI_SBUS=y
#
# Filesystems
#
CONFIG_QUOTA=y
CONFIG_MINIX_FS=y
CONFIG_EXT_FS=y
CONFIG_EXT2_FS=y
CONFIG_XIA_FS=y
CONFIG_FAT_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_UMSDOS_FS=y
CONFIG_PROC_FS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
CONFIG_RNFS_BOOTP=y
CONFIG_RNFS_RARP=y
CONFIG_SMB_FS=y
CONFIG_SMB_WIN95=y
CONFIG_NCP_FS=y
CONFIG_ISO9660_FS=y
CONFIG_HPFS_FS=y
CONFIG_SYSV_FS=y
CONFIG_AFFS_FS=y
CONFIG_AMIGA_PARTITION=y
CONFIG_UFS_FS=y
CONFIG_BSD_DISKLABEL=y
CONFIG_SMD_DISKLABEL=y
#
# Kernel hacking
#
# CONFIG_PROFILE is not set
# $Id: Makefile,v 1.1 1996/12/26 10:16:41 davem Exp $
# Makefile for the linux kernel.
#
# Note! Dependencies are done automagically by 'make dep', which also
# removes any old dependencies. DON'T put your own dependencies here
# unless it's something special (ie not a .c file).
#
# Note 2! The CFLAGS definitions are now in the main makefile...
.S.s:
$(CPP) -D__ASSEMBLY__ -ansi $< -o $*.s
.S.o:
$(CC) -D__ASSEMBLY__ -ansi -c $< -o $*.o
all: kernel.o head.o initobj.o finitobj.o
O_TARGET := kernel.o
O_OBJS := etrap.o rtrap.o signal32.o
OX_OBJS := sparc64_ksyms.o
head.o: head.S
$(CC) -D__ASSEMBLY__ -ansi -c $*.S -o $*.o
initobj.o: initobj.S
$(CC) -D__ASSEMBLY__ -ansi -c initobj.S -o initobj.o
finitobj.o: initobj.S
$(CC) -D__ASSEMBLY__ -ansi -c finitobj.S -o finitobj.o
include $(TOPDIR)/Rules.make
/* $Id: dtlb_miss.S,v 1.4 1996/12/28 18:39:40 davem Exp $
* dtlb_miss.S: Data TLB miss code, this is included directly
* into the trap table.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
/* We are in the MMU globals, %g7 contains the physical
* address of current->mm->pgd at all times.
*
* Many subtle things are done here. The high bits of
* the virtual address missed are most easily obtained
* from the tag target (it is at address zero in ASI_IMMU
* so no address formation is necessary to get at this).
* This is used to compute the pgd and pmd table offsets.
*
* Even more clever is that physical page zero is always
* a page full of zeroes. This means we can just follow
* through with all the page table traversals even if nothing
* is mapped because we'll just do loads from page zero
* and get yet another zero. We only need to do the check
* for the valid bit being set in the final pte we obtain.
*
* Furthermore, we set the TSB base register to the address
* zero, and we use the 8KB tsb ptr to calculate the pte
* offset. Again it is at address zero in ASI_IMMU_TSB_8KB_PTR
* so no address formation is necessary, saves more instructions.
*
* We use physical address accesses to get at the page
* tables, and this is for two reasons. This makes it
* impossible to take a fault while we are servicing the
* miss. Also this physical bypass access only allocates
* in the E-cache, and thus we prevent D-cache pollution
* from the miss handlers probing the page tables.
*
* It looks very hairy and slow. But I take only 1 more
* overhead of loads from ram than the Solaris version, and
* my version is one instruction quicker for a true TLB miss.
* And more importantly, all true TLB misses under Linux will be
* serviced in _constant_ time. When using the TSB in the
* manner it was intended to be used (like solaris does) the
* overhead for a TLB miss is _indeterminate_ especially during
* processes startup when the TSB is cold.
*
* XXX I think I can knock off two more instructions here...
*/
dtlb_miss:
/* I-cache line 0 */
ldxa [%g0] ASI_DMMU, %g1 ! grab Tag Target either way
brlz,pnt %g1, 3f ! special kernel processing
srlx %g1, 8, %g3 ! put high vaddr bits in place
1:
and %g3, %g2, %g3 ! get offset
ldxa [%g7 + %g3] ASI_PHYS_USE_EC, %g5! load pgd
sllx %g1, 2, %g4 ! begin pmd_offset formation
and %g4, %g2, %g3 ! and now mask it
ldxa [%g5 + %g3] ASI_PHYS_USE_EC, %g4! load pmd
/* I-cache line 1 */
ldxa [%g0] ASI_DMMU_TSB_8KB_PTR, %g1 ! get 8KB pointer bits
srlx %g1, 1, %g1 ! shift right to get pte_offset
ldxa [%g4 + %g1] ASI_PHYS_USE_EC, %g3! load pte
brlz,a,pt %g3, 2f ! is valid bit clear?
stxa %g3, [%g0] ASI_DTLB_DATA_IN ! nope, load TTE into DTLB
ba,a,pt %xcc, sparc64_dtlb_refbit_catch ! longer processing needed
2:
retry ! return from trap
#define KTTE_HIGH_BITS (_PAGE_VALID | _PAGE_SZ4MB)
#define KTTE_LOW_BITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W | _PAGE_G)
nop ! align next insn on cache line
3:
/* I-cache line 2 */
srax %g1, 19, %g5 ! mask down high bits
cmp %g5, -1 ! if -1 this is VMALLOC area
be,pnt %xcc, 1b ! yep
sethi %uhi(KTTE_HIGH_BITS), %g4 ! begin pte formation
sllx %g1, 23, %g1 ! begin masking for physpage
sllx %g4, 32, %g4 ! high protection TTE bits
or %g4, (KTTE_LOW_BITS), %g4 ! low protection TTE bits
srlx %g1, 41, %g1 ! put physpage into place
/* I-cache line 3 */
or %g4, %g1, %g1 ! finish TTE computation
stxa %g1, [%g0] ASI_DTLB_DATA_IN ! load TTE into DTLB
retry ! return from trap
nop; nop; nop; nop; nop;
#undef KTTE_HIGH_BITS
#undef KTTE_LOW_BITS
/* $Id: dtlb_prot.S,v 1.3 1996/12/28 18:39:41 davem Exp $
* dtlb_prot.S: Fast TLB protection trap processing.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
/* We are in the MMU globals, %g7 contains the physical
* address of current->mm->pgd at all times. %g2 is
* also preloaded with the mask 0x1ff8 to make things
* even quicker.
*
* Many subtle things are done here. The high bits of
* the virtual address missed are most easily obtained
* from the tag target (it is at address zero in ASI_IMMU
* so no address formation is necessary to get at this).
* This is used to compute the pgd and pmd table offsets.
*
* Even more clever is that physical page zero is always
* a page full of zeroes. This means we can just follow
* through with all the page table traversals even if nothing
* is mapped because we'll just do loads from page zero
* and get yet another zero. We only need to do the check
* for the valid bit being set in the final pte we obtain.
*
* Furthermore, we set the TSB base register to the address
* zero, and we use the 8KB tsb ptr to calculate the pte
* offset. Again it is at address zero in ASI_IMMU_TSB_8KB_PTR
* so no address formation is necessary, saves more instructions.
*
* We use physical address accesses to get at the page
* tables, and this is for two reasons. This makes it
* impossible to take a fault while we are servicing the
* miss. Also this physical bypass access only allocates
* in the E-cache, and thus we prevent D-cache pollution
* from the miss handlers probing the page tables.
*/
dtlb_prot:
/* I-cache line 0 */
ldxa [%g0] ASI_DMMU, %g1
srlx %g1, 8, %g3
and %g3, %g2, %g3
ldxa [%g7 + %g3] ASI_PHYS_USE_EC, %g5
sllx %g1, 2, %g4
and %g4, %g2, %g3
ldxa [%g5 + %g3] ASI_PHYS_USE_EC, %g4
ldxa [%g0] ASI_DMMU_TSB_8KB_PTR, %g1
/* I-cache line 1 */
srlx %g1, 1, %g1
ldxa [%g4 + %g1] ASI_PHYS_USE_EC, %g3
andcc %g3, _PAGE_WRITE, %g0
be,pnt %xcc, sparc64_dtlb_fault
or %g3, (_PAGE_WRITE|_PAGE_W|_PAGE_MODIFIED|_PAGE_ACCESSED), %g3
/* Blamo... */
stxa %g3, [%g4 + %g1] %asi
stxa %g3, [%g0] ASI_DTLB_DATA_IN
retry
/* I-cache line 2 */
nop; nop; nop; nop; nop; nop; nop; nop;
/* I-cache line 3 */
nop; nop; nop; nop; nop; nop; nop; nop;
/* $Id: etrap.S,v 1.1 1996/12/26 10:16:42 davem Exp $
* etrap.S: Preparing for entry into the kernel on Sparc V9.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
#include <asm/pstate.h>
#include <asm/ptrace.h>
.text
.align 4
.globl etrap
etrap:
sethi %uhi(current_set), %g6
rd %tstate, %g1
or %g6, %ulo(current_set), %g6
rd %tpc, %g2
sllx %g6, 32, %g6
rd %tnpc, %g3
sethi %hi(current_set), %g4
or %g4, %lo(current_set), %g4
or %g6, %g4, %g6
rd %y, %g4
ldx [%g6 + THREAD_KSTACK], %g5
stx %g1, [%g5 + REGWIN_SZ + PT_TSTATE]
stx %g2, [%g5 + REGWIN_SZ + PT_TPC]
stx %g3, [%g5 + REGWIN_SZ + PT_TNPC]
stx %g4, [%g5 + REGWIN_SZ + PT_Y]
rd %pstate, %g1
andn %g1, (PSTATE_IG | PSTATE_MG | PSTATE_AG), %g2
save %g5, 0x0, %sp
mov %g2, %l1
wr %g0, 0x0, %tl
mov %g7, %l2
wr %l1, 0x0, %pstate
stx %g1, [%sp + REGWIN_SZ + PT_G1]
stx %g2, [%sp + REGWIN_SZ + PT_G2]
stx %g3, [%sp + REGWIN_SZ + PT_G3]
stx %g4, [%sp + REGWIN_SZ + PT_G4]
stx %g5, [%sp + REGWIN_SZ + PT_G5]
stx %g6, [%sp + REGWIN_SZ + PT_G6]
stx %g7, [%sp + REGWIN_SZ + PT_G7]
stx %i0, [%sp + REGWIN_SZ + PT_I0]
stx %i1, [%sp + REGWIN_SZ + PT_I1]
stx %i2, [%sp + REGWIN_SZ + PT_I2]
stx %i3, [%sp + REGWIN_SZ + PT_I3]
stx %i4, [%sp + REGWIN_SZ + PT_I4]
stx %i5, [%sp + REGWIN_SZ + PT_I5]
stx %i6, [%sp + REGWIN_SZ + PT_I6]
stx %i7, [%sp + REGWIN_SZ + PT_I7]
jmpl %l2 + 0x4, %g0
wr %l1, PSTATE_IE, %pstate
.section ".text.init",#alloc,#execinstr
.globl text_init_end
text_init_end:
.section ".data.init",#alloc,#write
.globl data_init_end
data_init_end:
/* $Id: head.S,v 1.4 1996/12/28 18:39:42 davem Exp $
* head.S: Initial boot code for the Sparc64 port of Linux.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
#include <asm/pstate.h>
#include <asm/spitfire.h>
#include <asm/pgtable.h>
.text
#include "ttable.S"
sparc64_boot:
rdpr %ver, %g1 /* Get VERSION register. */
mov %o4, %g2 /* Get OpenPROM vector. */
/* We must be careful, 32-bit OpenBOOT will get confused if it
* tries to save away a register window to a 64-bit kernel
* stack address. Flush all windows, disable interrupts,
* remap if necessary, jump onto kernel trap table, then kernel
* stack, or else we die.
*/
flushw /* Flush register file. */
wrpr %g0, 0xf, %pil /* Interrupts off. */
/* Remap ourselves to upper 64-bit addresses if necessary.
* SILO64 will have loaded us to the right location already.
*/
mov %o7, %g4
current_pc:
call 1f
mov %o7, %g3
1:
mov %g4, %o7
set current_pc, %g7
cmp %g3, %g7
be go_to_highmem
nop
/* Remap ourselves into high addresses. */
set PAGE_OFFSET, %g4
sethi %uhi(_PAGE_VALID | _PAGE_SZ4MB), %g5
sllx %g5, 32, %g5
or %g5, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W | _PAGE_G | _PAGE_L), %g5
/* Be real fucking anal... */
stxa %g0, [%g4] ASI_IMMU_DEMAP
stxa %g0, [%g4] ASI_DMMU_DEMAP
membar #Sync
flush %g4
mov TLB_TAG_ACCESS, %g6
stxa %g4, [%g6] ASI_IMMU
stxa %g5, [%g0] ASI_ITLB_DATA_IN
membar #Sync
flush %g4
stxa %g4, [%g6] ASI_DMMU
stxa %g5, [%g0] ASI_DTLB_DATA_IN
membar #Sync
flush %g4
go_to_highmem:
set execute_in_high_mem, %g7
jmpl %g7, %g0
nop
execute_in_high_mem:
set nwindows, %g7
and %g1, VERS_MAXWIN, %g5
add %g5, 1, %g4
stx %g4, [%g7]
set nwindowsm1, %g6
stx %g5, [%g6]
set romvec, %g7
stx %g2, [%g7]
set prom_sp, %g7
stx %sp, [%g7]
set swapper_pg_dir, %g6
set PAGE_OFFSET, %g4 ! this stays here for a long time
sub %g6, %g4, %g5
set init_task, %g6 ! g6 usage is fixed as well
set sparc64_ttable_tl0, %g5
wrpr %g5, %tba
set bootup_kernel_stack, %sp
mov 0, %fp
wrpr %g0, PSTATE_KERNEL, %pstate
wrpr %g0, WSTATE_KERNEL, %wstate
wrpr %g0, 0x0, %tl
/* XXX Map in PROM 32-bit trampoline code. */
call prom_init
mov %o4, %o0
/* Off we go.... */
call start_kernel
nop
/* Not reached... */
.data
.align 4
.globl nwindows, nwindowsm1, romvec, prom_sp
nwindows: .xword 0
nwindowsm1: .xword 0
romvec: .xword 0
prom_sp: .xword 0
/* $Id: idprom.c,v 1.1 1996/12/28 18:39:38 davem Exp $
* idprom.c: Routines to load the idprom into kernel addresses and
* interpret the data contained within.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
#include <asm/oplib.h>
#include <asm/idprom.h>
struct idprom *idprom;
static struct idprom idprom_buffer;
/* Calculate the IDPROM checksum (xor of the data bytes). */
__initfunc(static unsigned char calc_idprom_cksum(struct idprom *idprom))
{
unsigned char cksum, i, *ptr = (unsigned char *)idprom;
for (i = cksum = 0; i <= 0x0E; i++)
cksum ^= *ptr++;
return cksum;
}
/* Create a local IDPROM copy and verify integrity. */
__initfunc(void idprom_init(void))
{
prom_get_idprom((char *) &idprom_buffer, sizeof(idprom_buffer));
idprom = &idprom_buffer;
if (idprom->id_format != 0x01) {
prom_printf("IDPROM: Unknown format type!\n");
prom_halt();
}
if (idprom->id_cksum != calc_idprom_cksum(idprom)) {
prom_printf("IDPROM: Checksum failure (nvram=%x, calc=%x)!\n",
idprom->id_cksum, calc_idprom_cksum(idprom));
prom_halt();
}
printk("Ethernet address: %02x:%02x:%02x:%02x:%02x:%02x\n",
idprom->id_ethaddr[0], idprom->id_ethaddr[1],
idprom->id_ethaddr[2], idprom->id_ethaddr[3],
idprom->id_ethaddr[4], idprom->id_ethaddr[5]);
}
#include <asm/errno.h>
.section ".text.init",#alloc,#execinstr
.globl text_init_begin
text_init_begin:
.section ".data.init",#alloc,#write
.globl data_init_begin
data_init_begin:
.section ".fixup",#alloc,#execinstr
.globl __ret_efault
__ret_efault:
ret
restore %g0, -EFAULT, %o0
/* $Id: ioport.c,v 1.1 1996/12/28 18:39:39 davem Exp $
* ioport.c: Simple io mapping allocator.
*
* Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/ioport.h>
#include <linux/mm.h>
#include <asm/io.h>
#include <asm/vaddrs.h>
#include <asm/oplib.h>
#include <asm/page.h>
#include <asm/pgtable.h>
/* This points to the next to use virtual memory for io mappings */
static unsigned long dvma_next_free = DVMA_VADDR;
unsigned long sparc_iobase_vaddr = IOBASE_VADDR;
/*
* sparc_alloc_io:
* Map and allocates an obio device.
* Implements a simple linear allocator, you can force the function
* to use your own mapping, but in practice this should not be used.
*
* Input:
* address: the obio address to map
* virtual: if non zero, specifies a fixed virtual address where
* the mapping should take place.
* len: the length of the mapping
* bus_type: The bus on which this io area sits.
*
* Returns:
* The virtual address where the mapping actually took place.
*/
void *sparc_alloc_io (void *address, void *virtual, int len, char *name,
int bus_type, int rdonly)
{
unsigned long vaddr, base_address;
unsigned long addr = (unsigned long) address;
unsigned long offset = (addr & (~PAGE_MASK));
if (virtual) {
vaddr = (unsigned long) virtual;
len += offset;
if(((unsigned long) virtual + len) > (IOBASE_VADDR + IOBASE_LEN)) {
prom_printf("alloc_io: Mapping outside IOBASE area\n");
prom_halt();
}
if(check_region ((vaddr | offset), len)) {
prom_printf("alloc_io: 0x%lx is already in use\n", vaddr);
prom_halt();
}
/* Tell Linux resource manager about the mapping */
request_region ((vaddr | offset), len, name);
} else {
vaddr = occupy_region(sparc_iobase_vaddr, IOBASE_END,
(offset + len + PAGE_SIZE-1) & PAGE_MASK, PAGE_SIZE, name);
if (vaddr == 0) {
/* Usually we cannot see printks in this case. */
prom_printf("alloc_io: cannot occupy %d region\n", len);
prom_halt();
}
}
base_address = vaddr;
/* Do the actual mapping */
for (; len > 0; len -= PAGE_SIZE) {
mapioaddr(addr, vaddr, bus_type, rdonly);
vaddr += PAGE_SIZE;
addr += PAGE_SIZE;
}
return (void *) (base_address | offset);
}
void sparc_free_io (void *virtual, int len)
{
unsigned long vaddr = (unsigned long) virtual & PAGE_MASK;
unsigned long plen = (((unsigned long)virtual & ~PAGE_MASK) + len + PAGE_SIZE-1) & PAGE_MASK;
release_region(vaddr, plen);
for (; plen != 0;) {
plen -= PAGE_SIZE;
unmapioaddr(vaddr + plen);
}
}
/* Does DVMA allocations with PAGE_SIZE granularity. How this basically
* works is that the ESP chip can do DVMA transfers at ANY address with
* certain size and boundary restrictions. But other devices that are
* attached to it and would like to do DVMA have to set things up in
* a special way, if the DVMA sees a device attached to it transfer data
* at addresses above DVMA_VADDR it will grab them, this way it does not
* now have to know the peculiarities of where to read the Lance data
* from. (for example)
*/
void *sparc_dvma_malloc (int len, char *name)
{
unsigned long vaddr, base_address;
vaddr = dvma_next_free;
if(check_region (vaddr, len)) {
prom_printf("alloc_dma: 0x%lx is already in use\n", vaddr);
prom_halt();
}
if(vaddr + len > (DVMA_VADDR + DVMA_LEN)) {
prom_printf("alloc_dvma: out of dvma memory\n");
prom_halt();
}
/* Basically these can be mapped just like any old
* IO pages, cacheable bit off, etc. The physical
* pages are now mapped dynamically to save space.
*/
base_address = vaddr;
mmu_map_dma_area(base_address, len);
/* Assign the memory area. */
dvma_next_free = PAGE_ALIGN(dvma_next_free+len);
request_region(base_address, len, name);
return (void *) base_address;
}
/* $Id: itlb_miss.S,v 1.4 1996/12/28 18:39:42 davem Exp $
* itlb_miss.S: Instruction TLB miss code, this is included directly
* into the trap table.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
/* We are in the MMU globals, %g7 contains the physical
* address of current->mm->pgd at all times. %g2 is
* also preloaded with the mask 0x1ff8 to make things
* even quicker.
*
* Many subtle things are done here. The high bits of
* the virtual address missed are most easily obtained
* from the tag target (it is at address zero in ASI_IMMU
* so no address formation is necessary to get at this).
* This is used to compute the pgd and pmd table offsets.
*
* Even more clever is that physical page zero is always
* a page full of zeroes. This means we can just follow
* through with all the page table traversals even if nothing
* is mapped because we'll just do loads from page zero
* and get yet another zero. We only need to do the check
* for the valid bit being set in the final pte we obtain.
*
* Furthermore, we set the TSB base register to the address
* zero, and we use the 8KB tsb ptr to calculate the pte
* offset. Again it is at address zero in ASI_IMMU_TSB_8KB_PTR
* so no address formation is necessary, saves more instructions.
*
* We use physical address accesses to get at the page
* tables, and this is for two reasons. This makes it
* impossible to take a fault while we are servicing the
* miss. Also this physical bypass access only allocates
* in the E-cache, and thus we prevent D-cache pollution
* from the miss handlers probing the page tables.
*
* It looks very hairy and slow. But I take only 1 more
* overhead of loads from ram than the Solaris version, and
* my version is one instruction quicker for a true TLB miss.
* And more importantly, all true TLB misses under Linux will be
* serviced in _constant_ time. When using the TSB in the
* manner it was intended to be used (like solaris does) the
* overhead for a TLB miss is _indeterminate_ especially during
* processes startup when the TSB is cold.
*/
itlb_miss:
/* I-cache line 0 */
ldxa [%g0] ASI_IMMU, %g1 ! grab Tag Target
srlx %g1, 8, %g3 ! put high vaddr bits in place
and %g3, %g2, %g3 ! get offset
ldxa [%g7 + %g3] ASI_PHYS_USE_EC, %g5! load pgd
sllx %g1, 2, %g4 ! begin pmd_offset formation
and %g4, %g2, %g3 ! and now mask it
ldxa [%g5 + %g3] ASI_PHYS_USE_EC, %g4! load pmd
ldxa [%g0] ASI_IMMU_TSB_8KB_PTR, %g1 ! get 8KB pointer bits
/* I-cache line 1 */
srlx %g1, 1, %g1 ! shift right to get pte_offset
ldxa [%g4 + %g1] ASI_PHYS_USE_EC, %g2! load pte
brlz,a,pt %g2, 1f ! is valid bit clear?
stxa %g2, [%g0] ASI_ITLB_DATA_IN ! nope, load TTE into ITLB
ba,a,pt %xcc, sparc64_itlb_refbit_catch ! longer processing needed
1:
retry ! return from trap
nop; nop;
/* I-cache line 2 */
nop; nop; nop; nop; nop; nop; nop; nop;
/* I-cache line 3 */
nop; nop; nop; nop; nop; nop; nop; nop;
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
# $Id: Makefile,v 1.1 1996/12/27 17:28:35 davem Exp $
# Makefile for Sparc library files..
#
CFLAGS := $(CFLAGS) -ansi
OBJS = memset.o blockops.o
lib.a: $(OBJS)
$(AR) rcs lib.a $(OBJS)
sync
blockops.o: blockops.S
$(CC) -ansi -c -o blockops.o blockops.S
memset.o: memset.S
$(CC) -D__ASSEMBLY__ -ansi -c -o memset.o memset.S
dep:
include $(TOPDIR)/Rules.make
/* $Id: blockops.S,v 1.1 1996/12/22 07:42:15 davem Exp $
* arch/sparc64/lib/blockops.S: UltraSparc block zero optimized routines.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
#include <asm/asi.h>
/* Zero out 64 bytes of memory at (buf + offset). */
#define BLAST_BLOCK(buf, offset) \
stx %g0, [buf + offset + 0x38]; \
stx %g0, [buf + offset + 0x30]; \
stx %g0, [buf + offset + 0x28]; \
stx %g0, [buf + offset + 0x20]; \
stx %g0, [buf + offset + 0x18]; \
stx %g0, [buf + offset + 0x10]; \
stx %g0, [buf + offset + 0x08]; \
stx %g0, [buf + offset + 0x00];
/* Copy 32 bytes of memory at (src + offset) to
* (dst + offset).
*/
#define MIRROR_BLOCK(dst, src, offset, t0, t1, t2, t3) \
ldx [src + offset + 0x18], t0; \
ldx [src + offset + 0x10], t1; \
ldx [src + offset + 0x08], t2; \
ldx [src + offset + 0x00], t3; \
stx t0, [dst + offset + 0x18]; \
stx t1, [dst + offset + 0x10]; \
stx t2, [dst + offset + 0x08]; \
stx t3, [dst + offset + 0x00];
.text
.align 4
.globl C_LABEL(bzero_2page), C_LABEL(bzero_1page)
C_LABEL(bzero_2page):
/* %o0 = buf */
or %o0, %g0, %o1
or %g0, 0x40, %g2
1:
BLAST_BLOCK(%o0, 0x00)
BLAST_BLOCK(%o0, 0x40)
BLAST_BLOCK(%o0, 0x80)
BLAST_BLOCK(%o0, 0xc0)
subcc %g2, 1, %g2
bne,pt %icc, 1b
add %o0, 0x100, %o0
retl
mov %o1, %o0
C_LABEL(bzero_1page):
/* %o0 = buf */
or %o0, %g0, %o1
or %g0, 0x20, %g2
1:
BLAST_BLOCK(%o0, 0x00)
BLAST_BLOCK(%o0, 0x40)
BLAST_BLOCK(%o0, 0x80)
BLAST_BLOCK(%o0, 0xc0)
subcc %g2, 1, %g2
bne,pt %icc, 1b
add %o0, 0x100, %o0
retl
mov %o1, %o0
.globl C_LABEL(__copy_1page)
C_LABEL(__copy_1page):
/* %o0 = dst, %o1 = src */
or %g0, 0x10, %g1
1:
MIRROR_BLOCK(%o0, %o1, 0x00, %o2, %o3, %o4, %o5)
MIRROR_BLOCK(%o0, %o1, 0x20, %o2, %o3, %o4, %o5)
MIRROR_BLOCK(%o0, %o1, 0x40, %o2, %o3, %o4, %o5)
MIRROR_BLOCK(%o0, %o1, 0x60, %o2, %o3, %o4, %o5)
MIRROR_BLOCK(%o0, %o1, 0x80, %o2, %o3, %o4, %o5)
MIRROR_BLOCK(%o0, %o1, 0xa0, %o2, %o3, %o4, %o5)
MIRROR_BLOCK(%o0, %o1, 0xc0, %o2, %o3, %o4, %o5)
MIRROR_BLOCK(%o0, %o1, 0xe0, %o2, %o3, %o4, %o5)
subcc %g1, 1, %g1
add %o0, 0x100, %o0
bne,pt %icc, 1b
add %o1, 0x100, %o1
retl
nop
/* $Id: memset.S,v 1.1 1996/12/22 07:42:16 davem Exp $
* arch/sparc64/lib/memset.S: UltraSparc optimized memset and bzero code
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
#include <asm/asi.h>
/* Store 64 bytes at (BASE + OFFSET) using value SOURCE. */
#define ZERO_BIG_BLOCK(base, offset, source) \
stx source, [base + offset + 0x00]; \
stx source, [base + offset + 0x08]; \
stx source, [base + offset + 0x10]; \
stx source, [base + offset + 0x18]; \
stx source, [base + offset + 0x20]; \
stx source, [base + offset + 0x28]; \
stx source, [base + offset + 0x30]; \
stx source, [base + offset + 0x38];
#define ZERO_LAST_BLOCKS(base, offset, source) \
stx source, [base - offset - 0x38]; \
stx source, [base - offset - 0x30]; \
stx source, [base - offset - 0x28]; \
stx source, [base - offset - 0x20]; \
stx source, [base - offset - 0x18]; \
stx source, [base - offset - 0x10]; \
stx source, [base - offset - 0x08]; \
stx source, [base - offset - 0x00];
.text
.align 4
.globl C_LABEL(__bzero), C_LABEL(__memset), C_LABEL(memset)
C_LABEL(__memset):
C_LABEL(memset):
and %o1, 0xff, %g3
sll %g3, 8, %g2
or %g3, %g2, %g3
sll %g3, 16, %g2
or %g3, %g2, %g3
sllx %g3, 32, %g2
or %g3, %g2, %g3
b 1f
mov %o2, %o1
3:
cmp %o2, 3
be 2f
stb %g3, [%o0]
cmp %o2, 2
be 2f
stb %g3, [%o0 + 0x01]
stb %g3, [%o0 + 0x02]
2:
sub %o2, 4, %o2
add %o1, %o2, %o1
b 4f
sub %o0, %o2, %o0
C_LABEL(__bzero):
mov %g0, %g3
1:
cmp %o1, 7
bleu,pnt %icc, 7f
mov %o0, %g1
andcc %o0, 3, %o2
bne,pnt %icc, 3b
4:
andcc %o0, 4, %g0
be,a,pt %icc, 2f
andcc %o1, 0xffffff80, %o3 ! everything 8 aligned, o1 is len to run
stw %g3, [%o0]
sub %o1, 4, %o1
add %o0, 4, %o0
andcc %o1, 0xffffff80, %o3 ! everything 8 aligned, o1 is len to run
2:
be 9f
andcc %o1, 0x78, %o2
4:
ZERO_BIG_BLOCK(%o0, 0x00, %g2)
subcc %o3, 128, %o3
ZERO_BIG_BLOCK(%o0, 0x40, %g2)
bne,pt %icc, 4b
add %o0, 128, %o0
orcc %o2, %g0, %g0
9:
be,pnt %icc, 6f
andcc %o1, 7, %o1
srl %o2, 1, %o3
set bzero_table + 64, %o4
sub %o4, %o3, %o4
jmp %o4
add %o0, %o2, %o0
bzero_table:
ZERO_LAST_BLOCKS(%o0, 0x48, %g2)
ZERO_LAST_BLOCKS(%o0, 0x08, %g2)
6:
be,pt %icc, 8f
andcc %o1, 4, %g0
be,pnt %icc, 1f
andcc %o1, 2, %g0
stw %g3, [%o0]
add %o0, 4, %o0
1:
be,pt %icc, 1f
andcc %o1, 1, %g0
sth %g3, [%o0]
add %o0, 2, %o0
1:
bne,a,pnt %icc, 8f
stb %g3, [%o0]
8:
retl
mov %g1, %o0
/* Don't care about alignment here. It is highly
* unprobable and at most two traps may happen
*/
7:
ba,pt %xcc, 6b
orcc %o1, 0, %g0
# $Id: Makefile,v 1.1 1996/12/26 10:24:22 davem Exp $
# Makefile for the linux Sparc64-specific parts of the memory manager.
#
# Note! Dependencies are done automagically by 'make dep', which also
# removes any old dependencies. DON'T put your own dependencies here
# unless it's something special (ie not a .c file).
#
# Note 2! The CFLAGS definition is now in the main makefile...
O_TARGET := mm.o
O_OBJS := fault.o init.o generic.o asyncd.o extable.o
include $(TOPDIR)/Rules.make
/* $Id: asyncd.c,v 1.1 1996/12/26 10:24:24 davem Exp $
* The asyncd kernel daemon. This handles paging on behalf of
* processes that receive page faults due to remote (async) memory
* accesses.
*
* Idea and skeleton code courtesy of David Miller (bless his cotton socks)
*
* Implemented by tridge
*/
#include <linux/mm.h>
#include <linux/malloc.h>
#include <linux/sched.h>
#include <linux/head.h>
#include <linux/kernel.h>
#include <linux/kernel_stat.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/stat.h>
#include <linux/swap.h>
#include <linux/fs.h>
#include <linux/config.h>
#include <linux/interrupt.h>
#include <asm/dma.h>
#include <asm/system.h> /* for cli()/sti() */
#include <asm/segment.h> /* for memcpy_to/fromfs */
#include <asm/bitops.h>
#include <asm/pgtable.h>
#define DEBUG 0
#define WRITE_LIMIT 100
#define LOOP_LIMIT 200
static struct {
int faults, read, write, success, failure, errors;
} stats;
/*
* The wait queue for waking up the async daemon:
*/
static struct wait_queue * asyncd_wait = NULL;
struct async_job {
volatile struct async_job *next;
int taskid;
struct mm_struct *mm;
unsigned long address;
int write;
void (*callback)(int,unsigned long,int,int);
};
static volatile struct async_job *async_queue = NULL;
static volatile struct async_job *async_queue_end = NULL;
static void add_to_async_queue(int taskid,
struct mm_struct *mm,
unsigned long address,
int write,
void (*callback)(int,unsigned long,int,int))
{
struct async_job *a = kmalloc(sizeof(*a),GFP_ATOMIC);
if (!a) {
printk("ERROR: out of memory in asyncd\n");
a->callback(taskid,address,write,1);
return;
}
if (write)
stats.write++;
else
stats.read++;
a->next = NULL;
a->taskid = taskid;
a->mm = mm;
a->address = address;
a->write = write;
a->callback = callback;
if (!async_queue) {
async_queue = a;
} else {
async_queue_end->next = a;
}
async_queue_end = a;
}
void async_fault(unsigned long address, int write, int taskid,
void (*callback)(int,unsigned long,int,int))
{
struct task_struct *tsk = task[taskid];
struct mm_struct *mm = tsk->mm;
stats.faults++;
#if 0
printk("paging in %x for task=%d\n",address,taskid);
#endif
add_to_async_queue(taskid, mm, address, write, callback);
wake_up(&asyncd_wait);
mark_bh(TQUEUE_BH);
}
static int fault_in_page(int taskid,
struct vm_area_struct *vma,
unsigned address,int write)
{
static unsigned last_address;
static int last_task, loop_counter;
struct task_struct *tsk = task[taskid];
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
if (!tsk || !tsk->mm)
return 1;
if (!vma || (write && !(vma->vm_flags & VM_WRITE)))
goto bad_area;
if (vma->vm_start > address)
goto bad_area;
if (address == last_address && taskid == last_task) {
loop_counter++;
} else {
loop_counter = 0;
last_address = address;
last_task = taskid;
}
if (loop_counter == WRITE_LIMIT && !write) {
printk("MSC bug? setting write request\n");
stats.errors++;
write = 1;
}
if (loop_counter == LOOP_LIMIT) {
printk("MSC bug? failing request\n");
stats.errors++;
return 1;
}
pgd = pgd_offset(vma->vm_mm, address);
pmd = pmd_alloc(pgd,address);
if(!pmd)
goto no_memory;
pte = pte_alloc(pmd, address);
if(!pte)
goto no_memory;
if(!pte_present(*pte)) {
do_no_page(tsk, vma, address, write);
goto finish_up;
}
set_pte(pte, pte_mkyoung(*pte));
flush_tlb_page(vma, address);
if(!write)
goto finish_up;
if(pte_write(*pte)) {
set_pte(pte, pte_mkdirty(*pte));
flush_tlb_page(vma, address);
goto finish_up;
}
do_wp_page(tsk, vma, address, write);
/* Fall through for do_wp_page */
finish_up:
stats.success++;
update_mmu_cache(vma, address, *pte);
return 0;
no_memory:
stats.failure++;
oom(tsk);
return 1;
bad_area:
stats.failure++;
tsk->tss.sig_address = address;
tsk->tss.sig_desc = SUBSIG_NOMAPPING;
send_sig(SIGSEGV, tsk, 1);
return 1;
}
/* Note the semaphore operations must be done here, and _not_
* in async_fault().
*/
static void run_async_queue(void)
{
int ret;
unsigned flags;
while (async_queue) {
volatile struct async_job *a;
struct mm_struct *mm;
struct vm_area_struct *vma;
save_flags(flags); cli();
a = async_queue;
async_queue = async_queue->next;
restore_flags(flags);
mm = a->mm;
down(&mm->mmap_sem);
vma = find_vma(mm, a->address);
ret = fault_in_page(a->taskid,vma,a->address,a->write);
#if DEBUG
printk("fault_in_page(task=%d addr=%x write=%d) = %d\n",
a->taskid,a->address,a->write,ret);
#endif
a->callback(a->taskid,a->address,a->write,ret);
up(&mm->mmap_sem);
kfree_s((void *)a,sizeof(*a));
}
}
#if CONFIG_AP1000
static void asyncd_info(void)
{
printk("CID(%d) faults: total=%d read=%d write=%d success=%d fail=%d err=%d\n",
mpp_cid(),stats.faults, stats.read, stats.write, stats.success,
stats.failure, stats.errors);
}
#endif
/*
* The background async daemon.
* Started as a kernel thread from the init process.
*/
int asyncd(void *unused)
{
current->session = 1;
current->pgrp = 1;
sprintf(current->comm, "asyncd");
current->blocked = ~0UL; /* block all signals */
/* Give asyncd a realtime priority. */
current->policy = SCHED_FIFO;
current->priority = 32; /* Fixme --- we need to standardise our
namings for POSIX.4 realtime scheduling
priorities. */
printk("Started asyncd\n");
#if CONFIG_AP1000
bif_add_debug_key('a',asyncd_info,"stats on asyncd");
#endif
while (1) {
unsigned flags;
save_flags(flags); cli();
while (!async_queue) {
current->signal = 0;
interruptible_sleep_on(&asyncd_wait);
}
restore_flags(flags);
run_async_queue();
}
}
/* $Id: extable.c,v 1.1 1996/12/26 10:24:24 davem Exp $
* linux/arch/sparc/mm/extable.c
*/
#include <linux/config.h>
#include <linux/module.h>
#include <asm/uaccess.h>
extern const struct exception_table_entry __start___ex_table[];
extern const struct exception_table_entry __stop___ex_table[];
static unsigned long
search_one_table(const struct exception_table_entry *start,
const struct exception_table_entry *last,
unsigned long value, unsigned long *g2)
{
const struct exception_table_entry *first = start;
const struct exception_table_entry *mid;
long diff = 0;
while (first <= last) {
mid = (last - first) / 2 + first;
diff = mid->insn - value;
if (diff == 0) {
if (!mid->fixup) {
*g2 = 0;
return (mid + 1)->fixup;
} else
return mid->fixup;
} else if (diff < 0)
first = mid+1;
else
last = mid-1;
}
if (last->insn < value && !last->fixup && (last + 1)->insn > value) {
*g2 = (value - last->insn)/4;
return (last + 1)->fixup;
}
if (first > start && (first-1)->insn < value && !(first-1)->fixup && first->insn < value) {
*g2 = (value - (first-1)->insn)/4;
return first->fixup;
}
return 0;
}
unsigned long
search_exception_table(unsigned long addr, unsigned long *g2)
{
unsigned long ret;
#ifdef CONFIG_MODULES
struct module *mp;
#endif
/* Search the kernel's table first. */
ret = search_one_table(__start___ex_table,
__stop___ex_table-1, addr, g2);
if (ret)
return ret;
#ifdef CONFIG_MODULES
for (mp = module_list; mp != NULL; mp = mp->next) {
if (mp->exceptinfo.start != NULL) {
ret = search_one_table(mp->exceptinfo.start,
mp->exceptinfo.stop-1, addr, g2);
if (ret)
return ret;
}
}
#endif
return 0;
}
/* $Id: fault.c,v 1.2 1996/12/26 18:03:04 davem Exp $
* arch/sparc64/mm/fault.c: Page fault handlers for the 64-bit Sparc.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
#define ELEMENTS(arr) (sizeof (arr)/sizeof (arr[0]))
extern struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
extern int prom_node_root;
/* Nice, simple, prom library does all the sweating for us. ;) */
unsigned int prom_probe_memory (void)
{
register struct linux_mlist_v0 *mlist;
register unsigned int bytes, base_paddr, tally;
register int i;
i = 0;
mlist= *prom_meminfo()->v0_available;
bytes = tally = mlist->num_bytes;
base_paddr = (unsigned int) mlist->start_adr;
sp_banks[0].base_addr = base_paddr;
sp_banks[0].num_bytes = bytes;
while (mlist->theres_more != (void *) 0){
i++;
mlist = mlist->theres_more;
bytes = mlist->num_bytes;
tally += bytes;
if (i >= SPARC_PHYS_BANKS-1) {
printk ("The machine has more banks than "
"this kernel can support\n"
"Increase the SPARC_PHYS_BANKS "
"setting (currently %d)\n",
SPARC_PHYS_BANKS);
i = SPARC_PHYS_BANKS-1;
break;
}
sp_banks[i].base_addr = (unsigned int) mlist->start_adr;
sp_banks[i].num_bytes = mlist->num_bytes;
}
i++;
sp_banks[i].base_addr = 0xdeadbeef;
sp_banks[i].num_bytes = 0;
/* Now mask all bank sizes on a page boundary, it is all we can
* use anyways.
*/
for(i=0; sp_banks[i].num_bytes != 0; i++)
sp_banks[i].num_bytes &= PAGE_MASK;
return tally;
}
/* Traverse the memory lists in the prom to see how much physical we
* have.
*/
unsigned int
probe_memory(void)
{
unsigned int total;
total = prom_probe_memory();
/* Oh man, much nicer, keep the dirt in promlib. */
return total;
}
asmlinkage void do_sparc64_fault(struct pt_regs *regs, int text_fault, int write,
unsigned long address)
{
struct vm_area_struct *vma;
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
unsigned int fixup;
unsigned long g2;
int from_user = !(regs->tstate & TSTATE_PRIV);
down(&mm->mmap_sem);
vma = find_vma(mm, address);
if(!vma)
goto bad_area;
if(vma->vm_start <= address)
goto good_area;
if(!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if(expand_stack(vma, address))
goto bad_area;
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
good_area:
if(write) {
if(!(vma->vm_flags & VM_WRITE))
goto bad_area;
} else {
/* Allow reads even for write-only mappings */
if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
}
handle_mm_fault(vma, address, write);
up(&mm->mmap_sem);
return;
vma = find_vma(mm, address);
if(!vma)
goto bad_area;
if(vma->vm_start <= address)
goto good_area;
if(!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if(expand_stack(vma, address))
goto bad_area;
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
good_area:
if(write) {
if(!(vma->vm_flags & VM_WRITE))
goto bad_area;
} else {
/* Allow reads even for write-only mappings */
if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
}
handle_mm_fault(vma, address, write);
up(&mm->mmap_sem);
return;
/*
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
bad_area:
up(&mm->mmap_sem);
/* Is this in ex_table? */
g2 = regs->u_regs[UREG_G2];
if (!from_user && (fixup = search_exception_table (regs->pc, &g2))) {
printk("Exception: PC<%08lx> faddr<%08lx>\n", regs->pc, address);
printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
regs->pc, fixup, g2);
regs->pc = fixup;
regs->npc = regs->pc + 4;
regs->u_regs[UREG_G2] = g2;
return;
}
/* Did we have an exception handler installed? */
if(current->tss.ex.count == 1) {
if(from_user) {
printk("Yieee, exception signalled from user mode.\n");
} else {
/* Set pc to %g1, set %g1 to -EFAULT and %g2 to
* the faulting address so we can cleanup.
*/
printk("Exception: PC<%08lx> faddr<%08lx>\n", regs->pc, address);
printk("EX: count<%d> pc<%08lx> expc<%08lx> address<%08lx>\n",
(int) current->tss.ex.count, current->tss.ex.pc,
current->tss.ex.expc, current->tss.ex.address);
current->tss.ex.count = 0;
regs->pc = current->tss.ex.expc;
regs->npc = regs->pc + 4;
regs->u_regs[UREG_G1] = -EFAULT;
regs->u_regs[UREG_G2] = address - current->tss.ex.address;
regs->u_regs[UREG_G3] = current->tss.ex.pc;
return;
}
}
if(from_user) {
#if 0
printk("Fault whee %s [%d]: segfaults at %08lx pc=%08lx\n",
tsk->comm, tsk->pid, address, regs->pc);
#endif
tsk->tss.sig_address = address;
tsk->tss.sig_desc = SUBSIG_NOMAPPING;
send_sig(SIGSEGV, tsk, 1);
return;
}
if((unsigned long) address < PAGE_SIZE) {
printk(KERN_ALERT "Unable to handle kernel NULL "
"pointer dereference");
} else {
printk(KERN_ALERT "Unable to handle kernel paging request "
"at virtual address %08lx\n", address);
}
printk(KERN_ALERT "tsk->mm->context = %08lx\n",
(unsigned long) tsk->mm->context);
printk(KERN_ALERT "tsk->mm->pgd = %08lx\n",
(unsigned long) tsk->mm->pgd);
die_if_kernel("Oops", regs);
}
/* $Id: generic.c,v 1.1 1996/12/26 10:24:23 davem Exp $
* generic.c: Generic Sparc mm routines that are not dependent upon
* MMU type but are Sparc specific.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <asm/pgtable.h>
#include <asm/page.h>
/* Allocate a block of RAM which is aligned to its size.
* This procedure can be used until the call to mem_init().
*/
void *sparc_init_alloc(unsigned long *kbrk, unsigned long size)
{
unsigned long mask = size - 1;
unsigned long ret;
if(!size)
return 0x0;
if(size & mask) {
prom_printf("panic: sparc_init_alloc botch\n");
prom_halt();
}
ret = (*kbrk + mask) & ~mask;
*kbrk = ret + size;
memset((void*) ret, 0, size);
return (void*) ret;
}
static inline void forget_pte(pte_t page)
{
if (pte_none(page))
return;
if (pte_present(page)) {
unsigned long addr = pte_page(page);
if (MAP_NR(addr) >= max_mapnr || PageReserved(mem_map+MAP_NR(addr)))
return;
free_page(addr);
if (current->mm->rss <= 0)
return;
current->mm->rss--;
return;
}
swap_free(pte_val(page));
}
/* Remap IO memory, the same way as remap_page_range(), but use
* the obio memory space.
*
* They use a pgprot that sets PAGE_IO and does not check the
* mem_map table as this is independent of normal memory.
*/
static inline void io_remap_pte_range(pte_t * pte, unsigned long address, unsigned long size,
unsigned long offset, pgprot_t prot, int space)
{
unsigned long end;
address &= ~PMD_MASK;
end = address + size;
if (end > PMD_SIZE)
end = PMD_SIZE;
do {
pte_t oldpage = *pte;
pte_clear(pte);
set_pte(pte, mk_pte_io(offset, prot, space));
forget_pte(oldpage);
address += PAGE_SIZE;
offset += PAGE_SIZE;
pte++;
} while (address < end);
}
static inline int io_remap_pmd_range(pmd_t * pmd, unsigned long address, unsigned long size,
unsigned long offset, pgprot_t prot, int space)
{
unsigned long end;
address &= ~PGDIR_MASK;
end = address + size;
if (end > PGDIR_SIZE)
end = PGDIR_SIZE;
offset -= address;
do {
pte_t * pte = pte_alloc(pmd, address);
if (!pte)
return -ENOMEM;
io_remap_pte_range(pte, address, end - address, address + offset, prot, space);
address = (address + PMD_SIZE) & PMD_MASK;
pmd++;
} while (address < end);
return 0;
}
int io_remap_page_range(unsigned long from, unsigned long offset, unsigned long size, pgprot_t prot, int space)
{
int error = 0;
pgd_t * dir;
unsigned long beg = from;
unsigned long end = from + size;
prot = __pgprot(pg_iobits);
offset -= from;
dir = pgd_offset(current->mm, from);
flush_cache_range(current->mm, beg, end);
while (from < end) {
pmd_t *pmd = pmd_alloc(dir, from);
error = -ENOMEM;
if (!pmd)
break;
error = io_remap_pmd_range(pmd, from, end - from, offset + from, prot, space);
if (error)
break;
from = (from + PGDIR_SIZE) & PGDIR_MASK;
dir++;
}
flush_tlb_range(current->mm, beg, end);
return error;
}
This diff is collapsed.
# $Id: Makefile,v 1.1 1996/12/27 08:49:10 jj Exp $
# Makefile for the Sun Boot PROM interface library under
# Linux.
#
# Note! Dependencies are done automagically by 'make dep', which also
# removes any old dependencies. DON'T put your own dependencies here
# unless it's something special (ie not a .c file).
#
# Note 2! The CFLAGS definitions are now in the main makefile...
OBJS = bootstr.o devops.o init.o memory.o misc.o mp.o \
ranges.o tree.o console.o printf.o
all: promlib.a
promlib.a: $(OBJS)
$(AR) rcs promlib.a $(OBJS)
sync
dep:
$(CPP) -M *.c > .depend
include $(TOPDIR)/Rules.make
/* $Id: bootstr.c,v 1.1 1996/12/27 08:49:10 jj Exp $
* bootstr.c: Boot string/argument acquisition from the PROM.
*
* Copyright(C) 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright(C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <linux/config.h>
#include <linux/string.h>
#include <asm/oplib.h>
#define BARG_LEN 256
static char barg_buf[BARG_LEN];
static char fetched = 0;
char *
prom_getbootargs(void)
{
/* This check saves us from a panic when bootfd patches args. */
if (fetched) return barg_buf;
prom_getstring(prom_finddevice ("/chosen"), "bootargs", barg_buf, BARG_LEN);
fetched = 1;
return barg_buf;
}
/* $Id: console.c,v 1.1 1996/12/27 08:49:11 jj Exp $
* console.c: Routines that deal with sending and receiving IO
* to/from the current console device using the PROM.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <linux/config.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
#include <asm/system.h>
#include <linux/string.h>
extern int prom_stdin, prom_stdout;
/* Non blocking get character from console input device, returns -1
* if no input was taken. This can be used for polling.
*/
__inline__ int
prom_nbgetchar(void)
{
char inc;
if ((*prom_command)("read", P1275_ARG(1,P1275_ARG_IN_BUF)|
P1275_INOUT(3,1),
prom_stdin, &inc, P1275_SIZE(1)) == 1)
return inc;
else
return -1;
}
/* Non blocking put character to console device, returns -1 if
* unsuccessful.
*/
__inline__ int
prom_nbputchar(char c)
{
char outc;
outc = c;
if ((*prom_command)("write", P1275_ARG(1,P1275_ARG_IN_BUF)|
P1275_INOUT(3,1),
prom_stdin, &inc, P1275_SIZE(1)) == 1)
return 0;
else
return -1;
}
/* Blocking version of get character routine above. */
char
prom_getchar(void)
{
int character;
while((character = prom_nbgetchar()) == -1) ;
return (char) character;
}
/* Blocking version of put character routine above. */
void
prom_putchar(char c)
{
while(prom_nbputchar(c) == -1) ;
return;
}
/* Query for input device type */
enum prom_input_device
prom_query_input_device()
{
int st_p;
char propb[64];
char *p;
st_p = prom_inst2pkg(prom_stdin);
if(prom_node_has_property(st_p, "keyboard"))
return PROMDEV_IKBD;
prom_getproperty(st_p, "device_type", propb, sizeof(propb));
if(strncmp(propb, "serial", sizeof("serial")))
return PROMDEV_I_UNK;
/* FIXME: Is there any better way how to find out? */
st_p = prom_finddevice ("/options");
prom_getproperty(st_p, "input-device", propb, sizeof(propb));
if (strncmp (propb, "tty", 3) || !propb[3] || propb[4])
return PROMDEV_I_UNK;
switch (propb[3]) {
case 'a': return PROMDEV_ITTYA;
case 'b': return PROMDEV_ITTYB;
default: return PROMDEV_I_UNK;
}
}
/* Query for output device type */
enum prom_output_device
prom_query_output_device()
{
int st_p;
char propb[64];
char *p;
int propl;
st_p = prom_inst2pkg(prom_stdout);
propl = prom_getproperty(st_p, "device_type", propb, sizeof(propb));
if (propl >= 0 && propl == sizeof("display") &&
strncmp("display", propb, sizeof("display")) == 0)
return PROMDEV_OSCREEN;
if(strncmp("serial", propb, sizeof("serial")))
return PROMDEV_O_UNK;
/* FIXME: Is there any better way how to find out? */
st_p = prom_finddevice ("/options");
prom_getproperty(st_p, "output-device", propb, sizeof(propb));
if (strncmp (propb, "tty", 3) || !propb[3] || propb[4])
return PROMDEV_O_UNK;
switch (propb[3]) {
case 'a': return PROMDEV_OTTYA;
case 'b': return PROMDEV_OTTYB;
default: return PROMDEV_O_UNK;
}
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -815,6 +815,8 @@ static struct device dev_3c501 =
static int io=0x280;
static int irq=5;
MODULE_PARM(io, "i");
MODULE_PARM(irq, "i");
int init_module(void)
{
......
......@@ -634,6 +634,9 @@ static struct device dev_el2[MAX_EL2_CARDS] = {
static int io[MAX_EL2_CARDS] = { 0, };
static int irq[MAX_EL2_CARDS] = { 0, };
static int xcvr[MAX_EL2_CARDS] = { 0, }; /* choose int. or ext. xcvr */
MODULE_PARM(io, "1-" __MODULE_STRING(MAX_EL2_CARDS) "i");
MODULE_PARM(irq, "1-" __MODULE_STRING(MAX_EL2_CARDS) "i");
MODULE_PARM(xcvr, "1-" __MODULE_STRING(MAX_EL2_CARDS) "i");
/* This is set up so that only a single autoprobe takes place per call.
ISA device autoprobes on a running machine are not recommended. */
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment