Commit f19e2d9e authored by Ralf Bächle's avatar Ralf Bächle Committed by Linus Torvalds

[PATCH] MIPS merge, generic mips64 bits.

This updates the generic mips64 code.
parent f6d64aee
This diff is collapsed.
This diff is collapsed.
......@@ -15,16 +15,30 @@ else
E2EFLAGS =
endif
#
# Drop some uninteresting sections in the kernel.
# This is only relevant for ELF kernels but doesn't hurt a.out
#
drop-sections = .reginfo .mdebug .comment .note
strip-flags = $(addprefix --remove-section=,$(drop-sections))
all: vmlinux.ecoff addinitrd
vmlinux.rm200: vmlinux
$(OBJCOPY) \
--change-addresses=0xfffffffc \
-O elf32-littlemips \
$(strip-flags) \
$< $@
vmlinux.ecoff: elf2ecoff $(TOPDIR)/vmlinux
./elf2ecoff $(TOPDIR)/vmlinux vmlinux.ecoff $(E2EFLAGS)
elf2ecoff: elf2ecoff.c
$(HOSTCC) -o $@ $^
elf2ecoff: $(TOPDIR)/arch/mips/boot/elf2ecoff.c
$(HOSTCC) -I$(TOPDIR)/arch/mips/boot -I- -o $@ $^
addinitrd: addinitrd.c
$(HOSTCC) -o $@ $^
addinitrd: $(TOPDIR)/arch/mips/boot/addinitrd.c
$(HOSTCC) -I$(TOPDIR)/arch/mips/boot -I- -o $@ $^
clean:
rm -f vmlinux.ecoff
......
......@@ -2,18 +2,29 @@
# Makefile for the Linux/MIPS kernel.
#
extra-y := head.o init_task.o
extra-y := head.o init_task.o
obj-y := branch.o entry.o proc.o process.o ptrace.o r4k_cache.o r4k_fpu.o \
r4k_genex.o r4k_switch.o r4k_tlb.o r4k_tlb_debug.o r4k_tlb_glue.o \
scall_64.o semaphore.o setup.o signal.o softfp.o syscall.o \
traps.o unaligned.o
obj-y := branch.o cpu-probe.o entry.o irq.o proc.o process.o \
ptrace.o r4k_cache.o r4k_fpu.o r4k_genex.o r4k_switch.o \
reset.o scall_64.o semaphore.o setup.o signal.o syscall.o \
time.o traps.o unaligned.o
obj-$(CONFIG_I8259) += i8259.o
obj-$(CONFIG_IRQ_CPU) += irq_cpu.o
obj-$(CONFIG_MODULES) += mips64_ksyms.o
obj-$(CONFIG_MIPS32_COMPAT) += linux32.o scall_o32.o signal32.o ioctl32.o
obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o
obj-$(CONFIG_MIPS32_COMPAT) += linux32.o signal32.o ioctl32.o
obj-$(CONFIG_MIPS32_N32) += binfmt_elfn32.o scall_n32.o
obj-$(CONFIG_MIPS32_O32) += binfmt_elfo32.o scall_o32.o
obj-$(CONFIG_SMP) += smp.o
AFLAGS_r4k_genex.o := -P
AFLAGS_r4k_tlb_glue.o := -P
ifndef CONFIG_MAPPED_PCI_IO
obj-y += pci-dma.o
endif
obj-$(CONFIG_MODULES) += module.o
CFLAGS_cpu-probe.o = $(shell if $(CC) $(CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
AFLAGS_r4k_genex.o = -P
EXTRA_AFLAGS := $(CFLAGS)
/*
* Support for 32-bit Linux/MIPS ELF binaries.
* Support for n32 Linux/MIPS ELF binaries.
*
* Copyright (C) 1999, 2001 Ralf Baechle
* Copyright (C) 1999, 2001 Silicon Graphics, Inc.
......@@ -21,16 +21,32 @@
#define ELF_NGREG 45
#define ELF_NFPREG 33
typedef unsigned int elf_greg_t;
typedef unsigned long elf_greg_t;
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef double elf_fpreg_t;
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#define elf_check_arch(x) \
((x)->e_machine == EM_MIPS)
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(hdr) \
({ \
int __res = 1; \
struct elfhdr *__h = (hdr); \
\
if (__h->e_machine != EM_MIPS) \
__res = 0; \
if (__h->e_ident[EI_CLASS] != ELFCLASS32) \
__res = 0; \
if (((__h->e_flags & EF_MIPS_ABI2) == 0) || \
((__h->e_flags & EF_MIPS_ABI) != 0)) \
__res = 0; \
\
__res; \
})
#define TASK32_SIZE 0x80000000UL
#define TASK32_SIZE 0x7fff8000UL
#undef ELF_ET_DYN_BASE
#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
......@@ -38,11 +54,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#include <linux/module.h>
#include <linux/config.h>
#include <linux/elfcore.h>
struct timeval32
{
unsigned int tv_sec, tv_usec;
};
#include <linux/compat.h>
#define elf_prstatus elf_prstatus32
struct elf_prstatus32
......@@ -55,10 +67,10 @@ struct elf_prstatus32
pid_t pr_ppid;
pid_t pr_pgrp;
pid_t pr_sid;
struct timeval32 pr_utime; /* User time */
struct timeval32 pr_stime; /* System time */
struct timeval32 pr_cutime; /* Cumulative user time */
struct timeval32 pr_cstime; /* Cumulative system time */
struct compat_timeval pr_utime; /* User time */
struct compat_timeval pr_stime; /* System time */
struct compat_timeval pr_cutime;/* Cumulative user time */
struct compat_timeval pr_cstime;/* Cumulative system time */
elf_gregset_t pr_reg; /* GP registers */
int pr_fpvalid; /* True if math co-processor being used. */
};
......@@ -71,8 +83,8 @@ struct elf_prpsinfo32
char pr_zomb; /* zombie */
char pr_nice; /* nice val */
unsigned int pr_flag; /* flags */
u16 pr_uid;
u16 pr_gid;
__kernel_uid_t pr_uid;
__kernel_gid_t pr_gid;
pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
/* Lots missing */
char pr_fname[16]; /* filename of executable */
......@@ -80,7 +92,11 @@ struct elf_prpsinfo32
};
#define elf_addr_t u32
#define init_elf_binfmt init_elf32_binfmt
#define elf_caddr_t u32
#define init_elf_binfmt init_elfn32_binfmt
#define ELF_CORE_EFLAGS EF_MIPS_ABI2
#undef CONFIG_BINFMT_ELF
#ifdef CONFIG_BINFMT_ELF32
#define CONFIG_BINFMT_ELF CONFIG_BINFMT_ELF32
......@@ -90,8 +106,8 @@ struct elf_prpsinfo32
#define CONFIG_BINFMT_ELF_MODULE CONFIG_BINFMT_ELF32_MODULE
#endif
MODULE_DESCRIPTION("Binary format loader for compatibility with 32bit Linux/MIPS binaries");
MODULE_AUTHOR("Ralf Baechle (ralf@oss.sgi.com)");
MODULE_DESCRIPTION("Binary format loader for compatibility with n32 Linux/MIPS binaries");
MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)");
#undef MODULE_DESCRIPTION
#undef MODULE_AUTHOR
......
/*
* Support for o32 Linux/MIPS ELF binaries.
*
* Copyright (C) 1999, 2001 Ralf Baechle
* Copyright (C) 1999, 2001 Silicon Graphics, Inc.
*
* Heavily inspired by the 32-bit Sparc compat code which is
* Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com)
* Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
*/
#define ELF_ARCH EM_MIPS
#define ELF_CLASS ELFCLASS32
#ifdef __MIPSEB__
#define ELF_DATA ELFDATA2MSB;
#else /* __MIPSEL__ */
#define ELF_DATA ELFDATA2LSB;
#endif
/* ELF register definitions */
#define ELF_NGREG 45
#define ELF_NFPREG 33
typedef unsigned int elf_greg_t;
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef double elf_fpreg_t;
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(hdr) \
({ \
int __res = 1; \
struct elfhdr *__h = (hdr); \
\
if (__h->e_machine != EM_MIPS) \
__res = 0; \
if (__h->e_ident[EI_CLASS] != ELFCLASS32) \
__res = 0; \
if ((__h->e_flags & EF_MIPS_ABI2) != 0) \
__res = 0; \
if (((__h->e_flags & EF_MIPS_ABI) != 0) && \
((__h->e_flags & EF_MIPS_ABI) != EF_MIPS_ABI_O32)) \
__res = 0; \
\
__res; \
})
#define TASK32_SIZE 0x7fff8000UL
#undef ELF_ET_DYN_BASE
#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
#include <asm/processor.h>
#include <linux/module.h>
#include <linux/config.h>
#include <linux/elfcore.h>
#include <linux/compat.h>
#define elf_prstatus elf_prstatus32
struct elf_prstatus32
{
struct elf_siginfo pr_info; /* Info associated with signal */
short pr_cursig; /* Current signal */
unsigned int pr_sigpend; /* Set of pending signals */
unsigned int pr_sighold; /* Set of held signals */
pid_t pr_pid;
pid_t pr_ppid;
pid_t pr_pgrp;
pid_t pr_sid;
struct compat_timeval pr_utime; /* User time */
struct compat_timeval pr_stime; /* System time */
struct compat_timeval pr_cutime;/* Cumulative user time */
struct compat_timeval pr_cstime;/* Cumulative system time */
elf_gregset_t pr_reg; /* GP registers */
int pr_fpvalid; /* True if math co-processor being used. */
};
#define elf_prpsinfo elf_prpsinfo32
struct elf_prpsinfo32
{
char pr_state; /* numeric process state */
char pr_sname; /* char for pr_state */
char pr_zomb; /* zombie */
char pr_nice; /* nice val */
unsigned int pr_flag; /* flags */
__kernel_uid_t pr_uid;
__kernel_gid_t pr_gid;
pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
/* Lots missing */
char pr_fname[16]; /* filename of executable */
char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
};
#define elf_addr_t u32
#define elf_caddr_t u32
#define init_elf_binfmt init_elf32_binfmt
#undef ELF_CORE_COPY_REGS
#define ELF_CORE_COPY_REGS(_dest,_regs) elf32_core_copy_regs(_dest,_regs);
void elf32_core_copy_regs(elf_gregset_t _dest, struct pt_regs *_regs)
{
int i;
memset(_dest, 0, sizeof(elf_gregset_t));
/* XXXKW the 6 is from EF_REG0 in gdb/gdb/mips-linux-tdep.c, include/asm-mips/reg.h */
for (i=6; i<38; i++)
_dest[i] = (elf_greg_t) _regs->regs[i-6];
_dest[i++] = (elf_greg_t) _regs->lo;
_dest[i++] = (elf_greg_t) _regs->hi;
_dest[i++] = (elf_greg_t) _regs->cp0_epc;
_dest[i++] = (elf_greg_t) _regs->cp0_badvaddr;
_dest[i++] = (elf_greg_t) _regs->cp0_status;
_dest[i++] = (elf_greg_t) _regs->cp0_cause;
}
#undef CONFIG_BINFMT_ELF
#ifdef CONFIG_BINFMT_ELF32
#define CONFIG_BINFMT_ELF CONFIG_BINFMT_ELF32
#endif
#undef CONFIG_BINFMT_ELF_MODULE
#ifdef CONFIG_BINFMT_ELF32_MODULE
#define CONFIG_BINFMT_ELF_MODULE CONFIG_BINFMT_ELF32_MODULE
#endif
MODULE_DESCRIPTION("Binary format loader for compatibility with o32 Linux/MIPS binaries");
MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)");
#undef MODULE_DESCRIPTION
#undef MODULE_AUTHOR
#include "../../../fs/binfmt_elf.c"
......@@ -11,6 +11,7 @@
#include <linux/sched.h>
#include <linux/signal.h>
#include <asm/branch.h>
#include <asm/cpu.h>
#include <asm/inst.h>
#include <asm/ptrace.h>
#include <asm/uaccess.h>
......@@ -163,7 +164,10 @@ int __compute_return_epc(struct pt_regs *regs)
* And now the FPA/cp1 branch instructions.
*/
case cop1_op:
asm ("cfc1\t%0,$31":"=r" (fcr31));
if (!cpu_has_fpu)
fcr31 = current->thread.fpu.soft.sr;
else
asm volatile("cfc1\t%0,$31" : "=r" (fcr31));
bit = (insn.i_format.rt >> 2);
bit += (bit != 0);
bit += 23;
......
This diff is collapsed.
......@@ -9,67 +9,88 @@
* Copyright (C) 1999, 2000 Silicon Graphics
* Copyright (C) 2001 MIPS Technologies, Inc.
*/
#include <linux/config.h>
#include <asm/asm.h>
#include <asm/regdef.h>
#include <asm/mipsregs.h>
#include <asm/stackframe.h>
/* This duplicates the definition from <linux/sched.h> */
#error #define PT_TRACESYS 0x00000002 /* tracing system calls */
#include <asm/thread_info.h>
#define KU_USER 0x10
.text
.align 4
FEXPORT(ret_from_fork)
move a0, v0 # prev
jal schedule_tail
#error lw t0, TASK_PTRACE($28) # syscall tracing enabled?
#error andi t0, PT_TRACESYS
bnez t0, tracesys_exit
j ret_from_sys_call
.align 5
FEXPORT(ret_from_irq)
FEXPORT(ret_from_exception)
ld t0, PT_STATUS(sp) # returning to kernel mode?
andi t0, t0, KU_USER
beqz t0, restore_all
tracesys_exit:
jal syscall_trace
b ret_from_sys_call
FEXPORT(resume_userspace)
mfc0 t0, CP0_STATUS # make sure need_resched and
ori t0, t0, 1 # signals dont change between
xori t0, t0, 1 # sampling and return
SSNOP; SSNOP; SSNOP
EXPORT(ret_from_irq)
EXPORT(ret_from_exception)
lw t0, PT_STATUS(sp) # returning to kernel mode?
andi t0, t0, KU_USER
bnez t0, ret_from_sys_call
LONG_L a2, TI_FLAGS($28)
andi a2, _TIF_WORK_MASK # current->work (ignoring
# syscall_trace
bnez a2, work_pending
j restore_all
reschedule: jal schedule
FEXPORT(ret_from_fork)
jal schedule_tail
FEXPORT(ret_from_sys_call)
mfc0 t0, CP0_STATUS # need_resched and signals atomic test
ori t0, t0, 1
xori t0, t0, 1
FEXPORT(syscall_exit)
mfc0 t0, CP0_STATUS # make sure need_resched and
ori t0, t0, 1 # signals dont change between
xori t0, t0, 1 # sampling and return
mtc0 t0, CP0_STATUS
SSNOP; SSNOP; SSNOP
#error ld v0, TASK_NEED_RESCHED($28)
#error lw v1, TASK_SIGPENDING($28)
bnez v0, reschedule
bnez v1, signal_return
LONG_L a2, TI_FLAGS($28) # current->work
bnez a2, syscall_exit_work
restore_all: .set noat
restore_all:
.set noat
RESTORE_ALL
eret
.set at
signal_return: .type signal_return, @function
work_pending:
bltz a2, work_notifysig # current->work.need_resched
# test high 8 bits
work_resched:
jal schedule
mfc0 t0, CP0_STATUS
ori t0, t0, 1
mfc0 t0, CP0_STATUS # make sure need_resched and
ori t0, t0, 1 # signals dont change between
xori t0, t0, 1 # sampling and return
mtc0 t0, CP0_STATUS
SSNOP; SSNOP; SSNOP
move a0, zero
move a1, sp
jal do_signal
b restore_all
LONG_L a2, TI_FLAGS($28) # This also converts into
# a union of four chars
andi a2, _TIF_WORK_MASK # is there any work to be done
# other than syscall tracing?
beqz a2, restore_all
andi t0, a2, _TIF_NEED_RESCHED
bnez t0, work_notifysig
work_notifysig: # deal with pending signals and
# notify-resume requests
move a0, sp
li a1, 0
jal do_notify_resume # a2 already loaded
j restore_all
FEXPORT(syscall_exit_work)
LONG_L t0, TI_FLAGS($28) # current->work.syscall_trace
bgez t0, work_pending
mfc0 t0, CP0_STATUS # sti
ori t0, t0, 1
mtc0 t0, CP0_STATUS
jal do_syscall_trace
b resume_userspace
/*
* Common spurious interrupt handler.
......@@ -81,9 +102,10 @@ LEAF(spurious_interrupt)
* Someone tried to fool us by sending an interrupt but we
* couldn't find a cause for it.
*/
lui t1,%hi(spurious_count)
lw t0,%lo(spurious_count)(t1)
addiu t0,1
sw t0,%lo(spurious_count)(t1)
lui t1, %hi(irq_err_count)
1: ll t0, %lo(irq_err_count)(t1)
addiu t0, 1
sc t0, %lo(irq_err_count)(t1)
beqz t0, 1b
j ret_from_irq
END(spurious_interrupt)
......@@ -10,7 +10,6 @@
* Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999 Ralf Baechle
* Copyright (C) 1999 Silicon Graphics, Inc.
*/
#define __ASSEMBLY__
#include <linux/config.h>
#include <linux/init.h>
#include <asm/asm.h>
......@@ -27,7 +26,7 @@
#if defined(CONFIG_ARC64) || defined(CONFIG_MAPPED_KERNEL)
/* We get launched at a XKPHYS address but the kernel is linked to
run at a KSEG0 address, so jump there. */
la t0, \@f
PTR_LA t0, \@f
jr t0
\@:
#endif
......@@ -35,13 +34,13 @@
#ifdef CONFIG_SGI_IP27
/*
* outputs the local nasid into t1.
* outputs the local nasid into res. IP27 stuff.
*/
.macro GET_NASID_ASM
dli t1, LOCAL_HUB_ADDR(NI_STATUS_REV_ID)
ld t1, (t1)
and t1, NSRI_NODEID_MASK
dsrl t1, NSRI_NODEID_SHFT
.macro GET_NASID_ASM res
dli \res, LOCAL_HUB_ADDR(NI_STATUS_REV_ID)
ld \res, (\res)
and \res, NSRI_NODEID_MASK
dsrl \res, NSRI_NODEID_SHFT
.endm
#endif /* CONFIG_SGI_IP27 */
......@@ -97,7 +96,7 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
xori sp, 0xf
#ifdef CONFIG_SGI_IP27
GET_NASID_ASM
GET_NASID_ASM t1
move t2, t1 # text and data are here
MAPPED_KERNEL_SETUP_TLB
#endif /* IP27 */
......@@ -106,39 +105,39 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
CLI # disable interrupts
mfc0 t0, CP0_STATUS
PTR_LA $28, init_thread_union # init current pointer
daddiu sp, $28, KERNEL_STACK_SIZE-32
set_saved_sp sp, t0, t1
/*
* On IP27, I am seeing the TS bit set when the
* kernel is loaded. Maybe because the kernel is
* in ckseg0 and not xkphys? Clear it anyway ...
* The firmware/bootloader passes argc/argp/envp
* to us as arguments. But clear bss first because
* the romvec and other important info is stored there
* by prom_init().
*/
li t1, ~(ST0_TS|ST0_CU1|ST0_CU2|ST0_CU3)
and t0, t1
or t0, (ST0_CU0|ST0_KX|ST0_SX|ST0_FR) # Bogosity: cu0 indicates kernel
mtc0 t0, CP0_STATUS # thread in copy_thread.
la $28, init_task_union # init current pointer
daddiu t0, $28, KERNEL_STACK_SIZE-32
sd t0, kernelsp
dsubu sp, t0, 4*SZREG # init stack pointer
move t0, $28
#ifdef CONFIG_SMP
mtc0 t0, CP0_WATCHLO
dsrl32 t0, t0, 0
mtc0 t0, CP0_WATCHHI
#endif
/* Note that all firmware passed argument registers still
have their values. */
jal prom_init # initialize firmware
PTR_LA t0, __bss_start
sd zero, (t0)
PTR_LA t1, __bss_stop - 8
1:
daddiu t0, 8
sd zero, (t0)
bne t0, t1, 1b
dsubu sp, 4*SZREG # init stack pointer
jal start_kernel
1: b 1b # just in case ...
j init_arch
END(kernel_entry)
#ifdef CONFIG_SMP
/*
* SMP slave cpus entry point. Board specific code for bootstrap calls this
* function after setting up the stack and gp registers.
*/
NESTED(smp_bootstrap, 16, sp)
#ifdef CONFIG_SGI_IP27
NESTED(bootstrap, 16, sp)
GET_NASID_ASM
li t0, KLDIR_OFFSET + (KLI_KERN_VARS * KLDIR_ENT_SIZE) + KLDIR_OFF_POINTER + K0BASE
GET_NASID_ASM t1
li t0, KLDIR_OFFSET + (KLI_KERN_VARS * KLDIR_ENT_SIZE) + \
KLDIR_OFF_POINTER + K0BASE
dsll t1, NASID_SHFT
or t0, t0, t1
ld t0, 0(t0) # t0 points to kern_vars struct
......@@ -146,19 +145,27 @@ NESTED(bootstrap, 16, sp)
lh t2, KV_RW_NASID_OFFSET(t0)
MAPPED_KERNEL_SETUP_TLB
ARC64_TWIDDLE_PC
CLI
mfc0 t0, CP0_STATUS
li t1, ~(ST0_CU1|ST0_CU2|ST0_CU3)
and t0, t1
or t0, (ST0_CU0|ST0_KX|ST0_SX|ST0_FR) # Bogosity: cu0 indicates kernel
mtc0 t0, CP0_STATUS # thread in copy_thread.
jal cboot
END(bootstrap)
#endif /* CONFIG_SGI_IP27 */
CLI
/*
* For the moment set ST0_KU so the CPU will not spit fire when
* executing 64-bit instructions. The full initialization of the
* CPU's status register is done later in per_cpu_trap_init().
*/
mfc0 t0, CP0_STATUS
or t0, ST0_KX
mtc0 t0, CP0_STATUS
jal start_secondary
END(smp_bootstrap)
#endif /* CONFIG_SMP */
__FINIT
.comm kernelsp, 8, 8 # current stackpointer
declare_saved_sp
#undef PAGE_SIZE
#define PAGE_SIZE 0x1000
......@@ -171,12 +178,12 @@ NESTED(bootstrap, 16, sp)
.endm
.data
.align 12
.align PAGE_SHIFT
page swapper_pg_dir, 1
page invalid_pte_table, 0
page invalid_pmd_table, 1
page kptbl, KPTBL_PAGE_ORDER
page kptbl, _PGD_ORDER
.globl ekptbl
page kpmdtbl, 0
ekptbl:
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Code to handle x86 style IRQs plus some generic interrupt stuff.
*
* Copyright (C) 1992 Linus Torvalds
* Copyright (C) 1994 - 2000 Ralf Baechle
*/
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/sysdev.h>
#include <asm/i8259.h>
#include <asm/io.h>
void enable_8259A_irq(unsigned int irq);
void disable_8259A_irq(unsigned int irq);
/*
* This is the 'legacy' 8259A Programmable Interrupt Controller,
* present in the majority of PC/AT boxes.
* plus some generic x86 specific things if generic specifics makes
* any sense at all.
* this file should become arch/i386/kernel/irq.c when the old irq.c
* moves to arch independent land
*/
static spinlock_t i8259A_lock = SPIN_LOCK_UNLOCKED;
static void end_8259A_irq (unsigned int irq)
{
if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)) &&
irq_desc[irq].action)
enable_8259A_irq(irq);
}
#define shutdown_8259A_irq disable_8259A_irq
void mask_and_ack_8259A(unsigned int);
static unsigned int startup_8259A_irq(unsigned int irq)
{
enable_8259A_irq(irq);
return 0; /* never anything pending */
}
static struct hw_interrupt_type i8259A_irq_type = {
"XT-PIC",
startup_8259A_irq,
shutdown_8259A_irq,
enable_8259A_irq,
disable_8259A_irq,
mask_and_ack_8259A,
end_8259A_irq,
NULL
};
/*
* 8259A PIC functions to handle ISA devices:
*/
/*
* This contains the irq mask for both 8259A irq controllers,
*/
static unsigned int cached_irq_mask = 0xffff;
#define cached_21 (cached_irq_mask)
#define cached_A1 (cached_irq_mask >> 8)
void disable_8259A_irq(unsigned int irq)
{
unsigned int mask = 1 << irq;
unsigned long flags;
spin_lock_irqsave(&i8259A_lock, flags);
cached_irq_mask |= mask;
if (irq & 8)
outb(cached_A1,0xA1);
else
outb(cached_21,0x21);
spin_unlock_irqrestore(&i8259A_lock, flags);
}
void enable_8259A_irq(unsigned int irq)
{
unsigned int mask = ~(1 << irq);
unsigned long flags;
spin_lock_irqsave(&i8259A_lock, flags);
cached_irq_mask &= mask;
if (irq & 8)
outb(cached_A1,0xA1);
else
outb(cached_21,0x21);
spin_unlock_irqrestore(&i8259A_lock, flags);
}
int i8259A_irq_pending(unsigned int irq)
{
unsigned int mask = 1 << irq;
unsigned long flags;
int ret;
spin_lock_irqsave(&i8259A_lock, flags);
if (irq < 8)
ret = inb(0x20) & mask;
else
ret = inb(0xA0) & (mask >> 8);
spin_unlock_irqrestore(&i8259A_lock, flags);
return ret;
}
void make_8259A_irq(unsigned int irq)
{
disable_irq_nosync(irq);
irq_desc[irq].handler = &i8259A_irq_type;
enable_irq(irq);
}
/*
* This function assumes to be called rarely. Switching between
* 8259A registers is slow.
* This has to be protected by the irq controller spinlock
* before being called.
*/
static inline int i8259A_irq_real(unsigned int irq)
{
int value;
int irqmask = 1 << irq;
if (irq < 8) {
outb(0x0B,0x20); /* ISR register */
value = inb(0x20) & irqmask;
outb(0x0A,0x20); /* back to the IRR register */
return value;
}
outb(0x0B,0xA0); /* ISR register */
value = inb(0xA0) & (irqmask >> 8);
outb(0x0A,0xA0); /* back to the IRR register */
return value;
}
/*
* Careful! The 8259A is a fragile beast, it pretty
* much _has_ to be done exactly like this (mask it
* first, _then_ send the EOI, and the order of EOI
* to the two 8259s is important!
*/
void mask_and_ack_8259A(unsigned int irq)
{
unsigned int irqmask = 1 << irq;
unsigned long flags;
spin_lock_irqsave(&i8259A_lock, flags);
/*
* Lightweight spurious IRQ detection. We do not want to overdo
* spurious IRQ handling - it's usually a sign of hardware problems, so
* we only do the checks we can do without slowing down good hardware
* nnecesserily.
*
* Note that IRQ7 and IRQ15 (the two spurious IRQs usually resulting
* rom the 8259A-1|2 PICs) occur even if the IRQ is masked in the 8259A.
* Thus we can check spurious 8259A IRQs without doing the quite slow
* i8259A_irq_real() call for every IRQ. This does not cover 100% of
* spurious interrupts, but should be enough to warn the user that
* there is something bad going on ...
*/
if (cached_irq_mask & irqmask)
goto spurious_8259A_irq;
cached_irq_mask |= irqmask;
handle_real_irq:
if (irq & 8) {
inb(0xA1); /* DUMMY - (do we need this?) */
outb(cached_A1,0xA1);
outb(0x60+(irq&7),0xA0);/* 'Specific EOI' to slave */
outb(0x62,0x20); /* 'Specific EOI' to master-IRQ2 */
} else {
inb(0x21); /* DUMMY - (do we need this?) */
outb(cached_21,0x21);
outb(0x60+irq,0x20); /* 'Specific EOI' to master */
}
spin_unlock_irqrestore(&i8259A_lock, flags);
return;
spurious_8259A_irq:
/*
* this is the slow path - should happen rarely.
*/
if (i8259A_irq_real(irq))
/*
* oops, the IRQ _is_ in service according to the
* 8259A - not spurious, go handle it.
*/
goto handle_real_irq;
{
static int spurious_irq_mask = 0;
/*
* At this point we can be sure the IRQ is spurious,
* lets ACK and report it. [once per IRQ]
*/
if (!(spurious_irq_mask & irqmask)) {
printk("spurious 8259A interrupt: IRQ%d.\n", irq);
spurious_irq_mask |= irqmask;
}
atomic_inc(&irq_err_count);
/*
* Theoretically we do not have to handle this IRQ,
* but in Linux this does not cause problems and is
* simpler for us.
*/
goto handle_real_irq;
}
}
static int i8259A_resume(struct sys_device *dev)
{
init_8259A(0);
return 0;
}
static struct sysdev_class i8259_sysdev_class = {
set_kset_name("i8259"),
.resume = i8259A_resume,
};
static struct sys_device device_i8259A = {
.id = 0,
.cls = &i8259_sysdev_class,
};
static int __init i8259A_init_sysfs(void)
{
int error = sysdev_class_register(&i8259_sysdev_class);
if (!error)
error = sys_device_register(&device_i8259A);
return error;
}
device_initcall(i8259A_init_sysfs);
void __init init_8259A(int auto_eoi)
{
unsigned long flags;
spin_lock_irqsave(&i8259A_lock, flags);
outb(0xff, 0x21); /* mask all of 8259A-1 */
outb(0xff, 0xA1); /* mask all of 8259A-2 */
/*
* outb_p - this has to work on a wide range of PC hardware.
*/
outb_p(0x11, 0x20); /* ICW1: select 8259A-1 init */
outb_p(0x00, 0x21); /* ICW2: 8259A-1 IR0-7 mapped to 0x00-0x07 */
outb_p(0x04, 0x21); /* 8259A-1 (the master) has a slave on IR2 */
if (auto_eoi)
outb_p(0x03, 0x21); /* master does Auto EOI */
else
outb_p(0x01, 0x21); /* master expects normal EOI */
outb_p(0x11, 0xA0); /* ICW1: select 8259A-2 init */
outb_p(0x08, 0xA1); /* ICW2: 8259A-2 IR0-7 mapped to 0x08-0x0f */
outb_p(0x02, 0xA1); /* 8259A-2 is a slave on master's IR2 */
outb_p(0x01, 0xA1); /* (slave's support for AEOI in flat mode
is to be investigated) */
if (auto_eoi)
/*
* in AEOI mode we just have to mask the interrupt
* when acking.
*/
i8259A_irq_type.ack = disable_8259A_irq;
else
i8259A_irq_type.ack = mask_and_ack_8259A;
udelay(100); /* wait for 8259A to initialize */
outb(cached_21, 0x21); /* restore master IRQ mask */
outb(cached_A1, 0xA1); /* restore slave IRQ mask */
spin_unlock_irqrestore(&i8259A_lock, flags);
}
asmlinkage void i8259_do_irq(int irq, struct pt_regs regs)
{
panic("i8259_do_irq: I want to be implemented");
}
/*
* IRQ2 is cascade interrupt to second interrupt controller
*/
static struct irqaction irq2 = {
no_action, 0, 0, "cascade", NULL, NULL
};
static struct resource pic1_io_resource = {
"pic1", 0x20, 0x3f, IORESOURCE_BUSY
};
static struct resource pic2_io_resource = {
"pic2", 0xa0, 0xbf, IORESOURCE_BUSY
};
/*
* On systems with i8259-style interrupt controllers we assume for
* driver compatibility reasons interrupts 0 - 15 to be the i8295
* interrupts even if the hardware uses a different interrupt numbering.
*/
void __init init_i8259_irqs (void)
{
int i;
request_resource(&ioport_resource, &pic1_io_resource);
request_resource(&ioport_resource, &pic2_io_resource);
init_8259A(0);
for (i = 0; i < 16; i++) {
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = 0;
irq_desc[i].depth = 1;
irq_desc[i].handler = &i8259A_irq_type;
}
setup_irq(2, &irq2);
}
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/init_task.h>
#include <linux/fs.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
static struct fs_struct init_fs = INIT_FS;
static struct files_struct init_files = INIT_FILES;
static struct signal_struct init_signals = INIT_SIGNALS;
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
struct mm_struct init_mm = INIT_MM(init_mm);
/*
* Initial task structure.
* Initial thread structure.
*
* We need to make sure that this is 8192-byte aligned due to the
* way process stacks are handled. This is done by making sure
......@@ -20,6 +22,13 @@ struct mm_struct init_mm = INIT_MM(init_mm);
*
* The things we do for performance..
*/
union task_union init_task_union
union thread_union init_thread_union
__attribute__((__section__(".data.init_task"))) =
{ INIT_TASK(init_task_union.task) };
{ INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
This diff is collapsed.
This diff is collapsed.
/*
* Copyright 2001 MontaVista Software Inc.
* Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
*
* Copyright (C) 2001 Ralf Baechle
*
* This file define the irq handler for MIPS CPU interrupts.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
/*
* Almost all MIPS CPUs define 8 interrupt sources. They are typically
* level triggered (i.e., cannot be cleared from CPU; must be cleared from
* device). The first two are software interrupts which we don't really
* use or support. The last one is usually cpu timer interrupt if a counter
* register is present.
*
* Don't even think about using this on SMP. You have been warned.
*
* This file exports one global function:
* mips_cpu_irq_init(u32 irq_base);
*/
#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <asm/mipsregs.h>
#include <asm/system.h>
static int mips_cpu_irq_base;
static inline void unmask_mips_irq(unsigned int irq)
{
clear_c0_cause(0x100 << (irq - mips_cpu_irq_base));
set_c0_status(0x100 << (irq - mips_cpu_irq_base));
}
static inline void mask_mips_irq(unsigned int irq)
{
clear_c0_status(0x100 << (irq - mips_cpu_irq_base));
}
static inline void mips_cpu_irq_enable(unsigned int irq)
{
unsigned long flags;
local_irq_save(flags);
unmask_mips_irq(irq);
local_irq_restore(flags);
}
static void mips_cpu_irq_disable(unsigned int irq)
{
unsigned long flags;
local_irq_save(flags);
mask_mips_irq(irq);
local_irq_restore(flags);
}
static unsigned int mips_cpu_irq_startup(unsigned int irq)
{
mips_cpu_irq_enable(irq);
return 0;
}
#define mips_cpu_irq_shutdown mips_cpu_irq_disable
/*
* While we ack the interrupt interrupts are disabled and thus we don't need
* to deal with concurrency issues. Same for mips_cpu_irq_end.
*/
static void mips_cpu_irq_ack(unsigned int irq)
{
/* Only necessary for soft interrupts */
clear_c0_cause(1 << (irq - mips_cpu_irq_base + 8));
mask_mips_irq(irq);
}
static void mips_cpu_irq_end(unsigned int irq)
{
if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
unmask_mips_irq(irq);
}
static hw_irq_controller mips_cpu_irq_controller = {
"CPU_irq",
mips_cpu_irq_startup,
mips_cpu_irq_shutdown,
mips_cpu_irq_enable,
mips_cpu_irq_disable,
mips_cpu_irq_ack,
mips_cpu_irq_end,
NULL /* no affinity stuff for UP */
};
void mips_cpu_irq_init(u32 irq_base)
{
u32 i;
for (i = irq_base; i < irq_base + 8; i++) {
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = NULL;
irq_desc[i].depth = 1;
irq_desc[i].handler = &mips_cpu_irq_controller;
}
mips_cpu_irq_base = irq_base;
}
This diff is collapsed.
......@@ -13,9 +13,9 @@
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <asm/irq.h>
#include <linux/in6.h>
#include <linux/pci.h>
#include <linux/tty.h>
#include <asm/bootinfo.h>
#include <asm/dma.h>
......@@ -38,7 +38,10 @@ extern long __strnlen_user_nocheck_asm(const char *s);
extern long __strnlen_user_asm(const char *s);
EXPORT_SYMBOL(mips_machtype);
#ifdef CONFIG_EISA
EXPORT_SYMBOL(EISA_bus);
#endif
/*
* String functions
......@@ -53,12 +56,9 @@ EXPORT_SYMBOL_NOVERS(strlen);
EXPORT_SYMBOL_NOVERS(strncat);
EXPORT_SYMBOL_NOVERS(strnlen);
EXPORT_SYMBOL_NOVERS(strrchr);
EXPORT_SYMBOL_NOVERS(strsep);
EXPORT_SYMBOL_NOVERS(strpbrk);
EXPORT_SYMBOL(_clear_page);
EXPORT_SYMBOL(enable_irq);
EXPORT_SYMBOL(disable_irq);
EXPORT_SYMBOL(kernel_thread);
/*
......@@ -74,43 +74,16 @@ EXPORT_SYMBOL_NOVERS(__strnlen_user_nocheck_asm);
EXPORT_SYMBOL_NOVERS(__strnlen_user_asm);
/*
* Functions to control caches.
*/
EXPORT_SYMBOL(_flush_page_to_ram);
EXPORT_SYMBOL(_flush_cache_l1);
#ifndef CONFIG_COHERENT_IO
EXPORT_SYMBOL(_dma_cache_wback_inv);
EXPORT_SYMBOL(_dma_cache_inv);
#endif
EXPORT_SYMBOL(invalid_pte_table);
/*
* Base address of ports for Intel style I/O.
*/
#if defined (CONFIG_PCI) || defined (CONFIG_ISA)
EXPORT_SYMBOL(mips_io_port_base);
#endif
/*
* Kernel hacking ...
*/
#include <asm/branch.h>
#include <linux/sched.h>
int register_fpe(void (*handler)(struct pt_regs *regs, unsigned int fcr31));
int unregister_fpe(void (*handler)(struct pt_regs *regs, unsigned int fcr31));
#ifdef CONFIG_MIPS_FPE_MODULE
EXPORT_SYMBOL(__compute_return_epc);
EXPORT_SYMBOL(register_fpe);
EXPORT_SYMBOL(unregister_fpe);
#endif
#ifdef CONFIG_VT
EXPORT_SYMBOL(screen_info);
#endif
EXPORT_SYMBOL(get_wchan);
EXPORT_SYMBOL(_flush_tlb_page);
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -31,7 +31,7 @@
.set noreorder
/* Save floating point context */
LEAF(save_fp_context)
LEAF(_save_fp_context)
mfc0 t1, CP0_STATUS
sll t2, t1,5
......@@ -79,7 +79,7 @@ LEAF(save_fp_context)
jr ra
li v0, 0 # success
END(save_fp_context)
END(_save_fp_context)
/*
* Restore FPU state:
......@@ -90,7 +90,7 @@ LEAF(save_fp_context)
* frame on the current content of c0_status, not on the content of the
* stack frame which might have been changed by the user.
*/
LEAF(restore_fp_context)
LEAF(_restore_fp_context)
mfc0 t1, CP0_STATUS
sll t0, t1,5
bgez t0, 1f
......@@ -139,7 +139,7 @@ LEAF(restore_fp_context)
ctc1 t0, fcr31
jr ra
li v0, 0 # success
END(restore_fp_context)
END(_restore_fp_context)
.type fault@function
.ent fault
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -2,9 +2,17 @@
# Makefile for MIPS-specific library files..
#
EXTRA_AFLAGS := $(CFLAGS)
lib-y += csum_partial.o csum_partial_copy.o memcpy.o \
memset.o promlib.o rtc-std.o rtc-no.o strlen_user.o \
strncpy_user.o strnlen_user.o watch.o
ifeq ($(CONFIG_CPU_R3000)$(CONFIG_CPU_TX39XX),y)
lib-y += r3k_dump_tlb.o
else
lib-y += dump_tlb.o
endif
lib-y += csum_partial.o csum_partial_copy.o dump_tlb.o floppy-std.o \
floppy-no.o ide-std.o ide-no.o kbd-std.o kbd-no.o rtc-std.o \
rtc-no.o memset.o memcpy.o strlen_user.o strncpy_user.o \
strnlen_user.o watch.o
lib-$(CONFIG_BLK_DEV_FD) += floppy-no.o floppy-std.o
lib-$(subst m,y,$(CONFIG_IDE)) += ide-std.o ide-no.o # needed for ide module
EXTRA_AFLAGS := $(CFLAGS)
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment