Commit 82430821 authored by Matthew Wilcox's avatar Matthew Wilcox Committed by Linus Torvalds

[PATCH] arch/parisc/kernel

Update arch/parisc/kernel.
parent 6b3efc2a
......@@ -2,29 +2,30 @@
# Makefile for the linux kernel.
#
EXTRA_TARGETS := head.o init_task.o process.o pdc_cons.o
ifdef CONFIG_PARISC64
EXTRA_TARGETS := init_task.o pdc_cons.o process.o head64.o unaligned.o perf.o perf_asm.o
else
EXTRA_TARGETS := init_task.o pdc_cons.o process.o head.o unaligned.o
endif
# Object file lists.
AFLAGS_entry.o := -traditional
AFLAGS_pacache.o := -traditional
obj-y := cache.o setup.o traps.o time.o irq.o \
syscall.o entry.o sys_parisc.o pdc.o ptrace.o hardware.o \
inventory.o drivers.o semaphore.o pa7300lc.o pci-dma.o \
signal.o hpmc.o \
real1.o real2.o led.o parisc_ksyms.o
export-objs := parisc_ksyms.o keyboard.o
export-objs := parisc_ksyms.o
obj-y := cache.o pacache.o setup.o traps.o time.o irq.o \
pa7300lc.o syscall.o entry.o sys_parisc.o firmware.o \
ptrace.o hardware.o inventory.o drivers.o semaphore.o \
signal.o hpmc.o real2.o parisc_ksyms.o unaligned.o \
processor.o pdc_chassis.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_PA11) += pci-dma.o
obj-$(CONFIG_PCI) += pci.o
obj-$(CONFIG_VT) += keyboard.o
obj-$(CONFIG_PCI_LBA) += lba_pci.o
# I/O SAPIC is also on IA64 platforms.
# The two could be merged into a common source some day.
obj-$(CONFIG_IOSAPIC) += iosapic.o
obj-$(CONFIG_IOMMU_SBA) += sba_iommu.o
# Only use one of them: ccio-rm-dma is for PCX-W systems *only*
# obj-$(CONFIG_IOMMU_CCIO) += ccio-rm-dma.o
obj-$(CONFIG_IOMMU_CCIO) += ccio-dma.o
EXTRA_AFLAGS := -traditional
obj-$(CONFIG_PARISC64) += binfmt_elf32.o sys_parisc32.o \
ioctl32.o signal32.o
# only supported for PCX-W/U in 64-bit mode at the moment
obj-$(CONFIG_PARISC64) += perf.o perf_asm.o
include $(TOPDIR)/Rules.make
This diff is collapsed.
/*
* Support for 32-bit Linux/Parisc ELF binaries on 64 bit kernels
*
* Copyright (C) 2000 John Marvin
* Copyright (C) 2000 Hewlett Packard Co.
*
* Heavily inspired from various other efforts to do the same thing
* (ia64,sparc64/mips64)
*/
/* Make sure include/asm-parisc/elf.h does the right thing */
#define ELF_CLASS ELFCLASS32
typedef unsigned int elf_greg_t;
#include <linux/spinlock.h>
#include <asm/processor.h>
#include <linux/module.h>
#include <linux/config.h>
#include <linux/elfcore.h>
#include "sys32.h" /* struct timeval32 */
#define elf_prstatus elf_prstatus32
struct elf_prstatus32
{
struct elf_siginfo pr_info; /* Info associated with signal */
short pr_cursig; /* Current signal */
unsigned int pr_sigpend; /* Set of pending signals */
unsigned int pr_sighold; /* Set of held signals */
pid_t pr_pid;
pid_t pr_ppid;
pid_t pr_pgrp;
pid_t pr_sid;
struct timeval32 pr_utime; /* User time */
struct timeval32 pr_stime; /* System time */
struct timeval32 pr_cutime; /* Cumulative user time */
struct timeval32 pr_cstime; /* Cumulative system time */
elf_gregset_t pr_reg; /* GP registers */
int pr_fpvalid; /* True if math co-processor being used. */
};
#define elf_prpsinfo elf_prpsinfo32
struct elf_prpsinfo32
{
char pr_state; /* numeric process state */
char pr_sname; /* char for pr_state */
char pr_zomb; /* zombie */
char pr_nice; /* nice val */
unsigned int pr_flag; /* flags */
u16 pr_uid;
u16 pr_gid;
pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
/* Lots missing */
char pr_fname[16]; /* filename of executable */
char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
};
#define elf_addr_t unsigned int
#define init_elf_binfmt init_elf32_binfmt
#define ELF_PLATFORM ("PARISC32\0")
#define ELF_CORE_COPY_REGS(dst, pt) \
memset(dst, 0, sizeof(dst)); /* don't leak any "random" bits */ \
{ int i; \
for (i = 0; i < 32; i++) dst[i] = (elf_greg_t) pt->gr[i]; \
for (i = 0; i < 8; i++) dst[32 + i] = (elf_greg_t) pt->sr[i]; \
} \
dst[40] = (elf_greg_t) pt->iaoq[0]; dst[41] = (elf_greg_t) pt->iaoq[1]; \
dst[42] = (elf_greg_t) pt->iasq[0]; dst[43] = (elf_greg_t) pt->iasq[1]; \
dst[44] = (elf_greg_t) pt->sar; dst[45] = (elf_greg_t) pt->iir; \
dst[46] = (elf_greg_t) pt->isr; dst[47] = (elf_greg_t) pt->ior; \
dst[48] = (elf_greg_t) mfctl(22); dst[49] = (elf_greg_t) mfctl(0); \
dst[50] = (elf_greg_t) mfctl(24); dst[51] = (elf_greg_t) mfctl(25); \
dst[52] = (elf_greg_t) mfctl(26); dst[53] = (elf_greg_t) mfctl(27); \
dst[54] = (elf_greg_t) mfctl(28); dst[55] = (elf_greg_t) mfctl(29); \
dst[56] = (elf_greg_t) mfctl(30); dst[57] = (elf_greg_t) mfctl(31); \
dst[58] = (elf_greg_t) mfctl( 8); dst[59] = (elf_greg_t) mfctl( 9); \
dst[60] = (elf_greg_t) mfctl(12); dst[61] = (elf_greg_t) mfctl(13); \
dst[62] = (elf_greg_t) mfctl(10); dst[63] = (elf_greg_t) mfctl(15);
/*
* We should probably use this macro to set a flag somewhere to indicate
* this is a 32 on 64 process. We could use PER_LINUX_32BIT, or we
* could set a processor dependent flag in the thread_struct.
*/
#define SET_PERSONALITY(ex, ibcs2) \
current->personality = PER_LINUX_32BIT
#include "../../../fs/binfmt_elf.c"
......@@ -15,202 +15,87 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/seq_file.h>
#include <asm/pdc.h>
#include <asm/cache.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/system.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/processor.h>
int split_tlb;
int dcache_stride;
int icache_stride;
struct pdc_cache_info cache_info;
#ifndef __LP64__
#ifndef CONFIG_PA20
static struct pdc_btlb_info btlb_info;
#endif
void __flush_page_to_ram(unsigned long address)
{
__flush_dcache_range(address, PAGE_SIZE);
__flush_icache_range(address, PAGE_SIZE);
}
void flush_data_cache(void)
#ifdef CONFIG_SMP
void
flush_data_cache(void)
{
register unsigned long base = cache_info.dc_base;
register unsigned long count = cache_info.dc_count;
register unsigned long loop = cache_info.dc_loop;
register unsigned long stride = cache_info.dc_stride;
register unsigned long addr;
register long i, j;
for(i=0,addr=base; i<count; i++,addr+=stride)
for(j=0; j<loop; j++)
fdce(addr);
smp_call_function((void (*)(void *))flush_data_cache_local, NULL, 1, 1);
flush_data_cache_local();
}
static inline void flush_data_tlb_space(void)
{
unsigned long base = cache_info.dt_off_base;
unsigned long count = cache_info.dt_off_count;
unsigned long stride = cache_info.dt_off_stride;
unsigned long loop = cache_info.dt_loop;
unsigned long addr;
long i,j;
for(i=0,addr=base; i<count; i++,addr+=stride)
for(j=0; j<loop; j++)
pdtlbe(addr);
}
#endif
void flush_data_tlb(void)
void
flush_cache_all_local(void)
{
unsigned long base = cache_info.dt_sp_base;
unsigned long count = cache_info.dt_sp_count;
unsigned long stride = cache_info.dt_sp_stride;
unsigned long space;
unsigned long old_sr1;
long i;
old_sr1 = mfsp(1);
for(i=0,space=base; i<count; i++, space+=stride) {
mtsp(space,1);
flush_data_tlb_space();
}
mtsp(old_sr1, 1);
flush_instruction_cache_local();
flush_data_cache_local();
}
static inline void flush_instruction_tlb_space(void)
{
unsigned long base = cache_info.it_off_base;
unsigned long count = cache_info.it_off_count;
unsigned long stride = cache_info.it_off_stride;
unsigned long loop = cache_info.it_loop;
unsigned long addr;
long i,j;
for(i=0,addr=base; i<count; i++,addr+=stride)
for(j=0; j<loop; j++)
pitlbe(addr);
}
/* flushes EVERYTHING (tlb & cache) */
void flush_instruction_tlb(void)
void
flush_all_caches(void)
{
unsigned long base = cache_info.it_sp_base;
unsigned long count = cache_info.it_sp_count;
unsigned long stride = cache_info.it_sp_stride;
unsigned long space;
unsigned long old_sr1;
unsigned int i;
old_sr1 = mfsp(1);
for(i=0,space=base; i<count; i++, space+=stride) {
mtsp(space,1);
flush_instruction_tlb_space();
}
mtsp(old_sr1, 1);
flush_cache_all();
flush_tlb_all();
}
void __flush_tlb_space(unsigned long space)
void
update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{
unsigned long old_sr1;
old_sr1 = mfsp(1);
mtsp(space, 1);
flush_data_tlb_space();
flush_instruction_tlb_space();
mtsp(old_sr1, 1);
}
struct page *page = pte_page(pte);
if (VALID_PAGE(page) && page->mapping &&
test_bit(PG_dcache_dirty, &page->flags)) {
void flush_instruction_cache(void)
{
register unsigned long base = cache_info.ic_base;
register unsigned long count = cache_info.ic_count;
register unsigned long loop = cache_info.ic_loop;
register unsigned long stride = cache_info.ic_stride;
register unsigned long addr;
register long i, j;
unsigned long old_sr1;
old_sr1 = mfsp(1);
mtsp(0,1);
/*
* Note: fice instruction has 3 bit space field, so one must
* be specified (otherwise you are justing using whatever
* happens to be in sr0).
*/
for(i=0,addr=base; i<count; i++,addr+=stride)
for(j=0; j<loop; j++)
fice(addr);
mtsp(old_sr1, 1);
}
/* not yet ... fdc() needs to be implemented in cache.h !
void flush_datacache_range( unsigned int base, unsigned int end )
{
register long offset,offset_add;
offset_add = ( (1<<(cache_info.dc_conf.cc_block-1)) *
cache_info.dc_conf.cc_line ) << 4;
for (offset=base; offset<=end; offset+=offset_add)
fdc(space,offset);
fdc(space,end);
}
*/
/* flushes code and data-cache */
void flush_all_caches(void)
{
flush_instruction_cache();
flush_data_cache();
flush_instruction_tlb();
flush_data_tlb();
asm volatile("sync");
asm volatile("syncdma");
asm volatile("sync");
flush_kernel_dcache_page(page_address(page));
clear_bit(PG_dcache_dirty, &page->flags);
}
}
int get_cache_info(char *buffer)
void
show_cache_info(struct seq_file *m)
{
char *p = buffer;
p += sprintf(p, "I-cache\t\t: %ld KB\n",
seq_printf(m, "I-cache\t\t: %ld KB\n",
cache_info.ic_size/1024 );
p += sprintf(p, "D-cache\t\t: %ld KB (%s)%s\n",
seq_printf(m, "D-cache\t\t: %ld KB (%s)%s\n",
cache_info.dc_size/1024,
(cache_info.dc_conf.cc_wt ? "WT":"WB"),
(cache_info.dc_conf.cc_sh ? " - shared I/D":"")
);
p += sprintf(p, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
cache_info.it_size,
cache_info.dt_size,
cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
);
#ifndef __LP64__
#ifndef CONFIG_PA20
/* BTLB - Block TLB */
if (btlb_info.max_size==0) {
p += sprintf(p, "BTLB\t\t: not supported\n" );
seq_printf(m, "BTLB\t\t: not supported\n" );
} else {
p += sprintf(p,
seq_printf(m,
"BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
"BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
"BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
......@@ -225,29 +110,117 @@ int get_cache_info(char *buffer)
);
}
#endif
return p - buffer;
}
void __init
cache_init(void)
parisc_cache_init(void)
{
if(pdc_cache_info(&cache_info)<0)
panic("cache_init: pdc_cache_info failed");
panic("parisc_cache_init: pdc_cache_info failed");
#if 0
printk("ic_size %lx dc_size %lx it_size %lx pdc_cache_info %d*long pdc_cache_cf %d\n",
printk(KERN_DEBUG "ic_size %lx dc_size %lx it_size %lx pdc_cache_info %d*long pdc_cache_cf %d\n",
cache_info.ic_size,
cache_info.dc_size,
cache_info.it_size,
sizeof (struct pdc_cache_info) / sizeof (long),
sizeof (struct pdc_cache_cf)
);
printk(KERN_DEBUG "dc base %x dc stride %x dc count %x dc loop %d\n",
cache_info.dc_base,
cache_info.dc_stride,
cache_info.dc_count,
cache_info.dc_loop);
printk(KERN_DEBUG "dc conf: alias %d block %d line %d wt %d sh %d cst %d assoc %d\n",
cache_info.dc_conf.cc_alias,
cache_info.dc_conf.cc_block,
cache_info.dc_conf.cc_line,
cache_info.dc_conf.cc_wt,
cache_info.dc_conf.cc_sh,
cache_info.dc_conf.cc_cst,
cache_info.dc_conf.cc_assoc);
printk(KERN_DEBUG "ic conf: alias %d block %d line %d wt %d sh %d cst %d assoc %d\n",
cache_info.ic_conf.cc_alias,
cache_info.ic_conf.cc_block,
cache_info.ic_conf.cc_line,
cache_info.ic_conf.cc_wt,
cache_info.ic_conf.cc_sh,
cache_info.ic_conf.cc_cst,
cache_info.ic_conf.cc_assoc);
printk(KERN_DEBUG "dt conf: sh %d page %d cst %d aid %d pad1 %d \n",
cache_info.dt_conf.tc_sh,
cache_info.dt_conf.tc_page,
cache_info.dt_conf.tc_cst,
cache_info.dt_conf.tc_aid,
cache_info.dt_conf.tc_pad1);
printk(KERN_DEBUG "it conf: sh %d page %d cst %d aid %d pad1 %d \n",
cache_info.it_conf.tc_sh,
cache_info.it_conf.tc_page,
cache_info.it_conf.tc_cst,
cache_info.it_conf.tc_aid,
cache_info.it_conf.tc_pad1);
#endif
#ifndef __LP64__
split_tlb = 0;
if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
if (cache_info.dt_conf.tc_sh == 2)
printk(KERN_WARNING "Unexpected TLB configuration. "
"Will flush I/D separately (could be optimized).\n");
split_tlb = 1;
}
dcache_stride = ( (1<<(cache_info.dc_conf.cc_block+3)) *
cache_info.dc_conf.cc_line );
icache_stride = ( (1<<(cache_info.ic_conf.cc_block+3)) *
cache_info.ic_conf.cc_line );
#ifndef CONFIG_PA20
if(pdc_btlb_info(&btlb_info)<0) {
memset(&btlb_info, 0, sizeof btlb_info);
}
#endif
if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) == PDC_MODEL_NVA_UNSUPPORTED) {
printk(KERN_WARNING "Only equivalent aliasing supported\n");
#ifndef CONFIG_SMP
panic("SMP kernel required to avoid non-equivalent aliasing");
#endif
}
}
void disable_sr_hashing(void)
{
int srhash_type;
if (boot_cpu_data.cpu_type == pcxl2)
return; /* pcxl2 doesn't support space register hashing */
switch (boot_cpu_data.cpu_type) {
case pcx:
BUG(); /* We shouldn't get here. code in setup.c should prevent it */
return;
case pcxs:
case pcxt:
case pcxt_:
srhash_type = SRHASH_PCXST;
break;
case pcxl:
srhash_type = SRHASH_PCXL;
break;
default: /* Currently all PA2.0 machines use the same ins. sequence */
srhash_type = SRHASH_PA20;
break;
}
disable_sr_hashing_asm(srhash_type);
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -8,17 +8,14 @@
* Copyright 1999 SuSE GmbH (Philipp Rumpf)
* Copyright 1999 Philipp Rumpf (prumpf@tux.org)
*
* Initial Version 04-23-1999 by Helge Deller (helge.deller@ruhr-uni-bochum.de)
* Initial Version 04-23-1999 by Helge Deller <deller@gmx.de>
*/
#include <linux/autoconf.h> /* for CONFIG_SMP */
#include <asm/offset.h>
#include <asm/offsets.h>
#include <asm/psw.h>
#define __ASSEMBLY__
/*********
#include <asm/pdc.h>
*********/
#include <asm/assembly.h>
#include <asm/pgtable.h>
......@@ -36,9 +33,18 @@ __setup_start:
.export __setup_end
__setup_end:
.data
.export boot_args
boot_args:
.word 0 /* arg0 */
.word 0 /* arg1 */
.word 0 /* arg2 */
.word 0 /* arg3 */
.text
.align 4
.import init_task_union,data
.import init_thread_union,data
.import $global$ /* forward declaration */
.import fault_vector_11,code /* IVA parisc 1.1 32 bit */
.import fault_vector_20,code /* IVA parisc 2.0 32 bit */
......@@ -52,43 +58,129 @@ stext:
.callinfo
/* Make sure sr4-sr7 are set to zero for the kernel address space */
mtsp %r0,%sr4
mtsp %r0,%sr5
mtsp %r0,%sr6
mtsp %r0,%sr7
mtsp %r0,%sr4
mtsp %r0,%sr5
mtsp %r0,%sr6
mtsp %r0,%sr7
/* Clear BSS (shouldn't the boot loader do this?) */
/* Initialize startup VM. Just map first 8 MB of memory */
.import _edata,data
.import _end,data
ldil L%PA(_edata),%r3
ldo R%PA(_edata)(%r3),%r3
ldil L%PA(_end),%r4
ldo R%PA(_end)(%r4),%r4
$bss_loop:
cmpb,<<,n %r3,%r4,$bss_loop
stb,ma %r0,1(%r3)
/* Save away the arguments the boot loader passed in (32 bit args) */
ldil L%PA(boot_args),%r1
ldo R%PA(boot_args)(%r1),%r1
stw,ma %arg0,4(%r1)
stw,ma %arg1,4(%r1)
stw,ma %arg2,4(%r1)
stw,ma %arg3,4(%r1)
/* Initialize startup VM. Just map first 8 MB of memory */
ldil L%PA(pg0),%r1
ldo R%PA(pg0)(%r1),%r1
ldo _PAGE_TABLE(%r1),%r3
ldil L%PA(swapper_pg_dir),%r4
ldo R%PA(swapper_pg_dir)(%r4),%r4
mtctl %r4,%cr24 /* Initialize kernel root pointer */
mtctl %r4,%cr25 /* Initialize user root pointer */
stw %r3,0xc00(%r4) /* Hardwired 0xc0000000 kernel vaddr start */
#if (__PAGE_OFFSET != 0x10000000UL)
Error! Code below (the next two stw's) needs to be changed
#endif
stw %r3,0x100(%r4) /* Hardwired 0x1... kernel Vaddr start*/
ldo 0x1000(%r3),%r3
stw %r3,0xc04(%r4)
ldo _PAGE_KERNEL(%r0),%r3 /* Hardwired 0x0 phys addr start */
stw %r3,0x104(%r4)
ldo _PAGE_KERNEL(%r0),%r3 /* Hardwired 0 phys addr start */
$pgt_fill_loop:
stwm %r3,4(%r1)
ldo 0x1000(%r3),%r3
bb,>= %r3,8,$pgt_fill_loop
nop
/* Load the return address...er...crash 'n burn */
copy %r0,%r2
/* And the RFI Target address too */
ldil L%start_kernel,%r11
ldo R%start_kernel(%r11),%r11
/* And the initial task pointer */
ldil L%init_thread_union,%r6
ldo R%init_thread_union(%r6),%r6
mtctl %r6,%cr30
/* And the stack pointer too */
ldo THREAD_SZ_ALGN(%r6),%sp
/* And the interrupt stack */
ldil L%interrupt_stack,%r6
ldo R%interrupt_stack(%r6),%r6
mtctl %r6,%cr31
#ifdef CONFIG_SMP
/* Set the smp rendevous address into page zero.
** It would be safer to do this in init_smp_config() but
** it's just way easier to deal with here because
** of 64-bit function ptrs and the address is local to this file.
*/
ldil L%PA(smp_slave_stext),%r10
ldo R%PA(smp_slave_stext)(%r10),%r10
stw %r10,0x10(%r0) /* MEM_RENDEZ */
stw %r0,0x28(%r0) /* MEM_RENDEZ_HI - assume addr < 4GB */
/* FALLTHROUGH */
.procend
/*
** Code Common to both Monarch and Slave processors.
** Entry:
** %r11 must contain RFI target address.
** %r25/%r26 args to pass to target function
** %r2 in case rfi target decides it didn't like something
**
** Caller must init: SR4-7, %sp, %r10, %cr24/25,
*/
common_stext:
.proc
.callinfo
#else
/* Clear PDC entry point - we won't use it */
stw %r0,0x10(%r0) /* MEM_RENDEZ */
stw %r0,0x28(%r0) /* MEM_RENDEZ_HI */
#endif
/* PARANOID: clear user scratch/user space SR's */
mtsp %r0,%sr0
mtsp %r0,%sr1
mtsp %r0,%sr2
mtsp %r0,%sr3
/* Initialize Protection Registers */
mtctl %r0,%cr8
mtctl %r0,%cr9
mtctl %r0,%cr12
mtctl %r0,%cr13
/* Initialize the global data pointer */
ldil L%$global$,%dp
ldo R%$global$(%dp),%dp
/* And the stack pointer, physical too */
ldil L%init_task_union+TASK_SZ_ALGN,%sp
ldo R%init_task_union+TASK_SZ_ALGN(%sp),%sp
/* we need this to take interruptions directly after the rfi below */
/* (which we need for PA2.0 boxes) */
mtctl %r0, %cr30
/*
* Set up our interrupt table. HPMCs might not work after this!
*
......@@ -96,7 +188,6 @@ $pgt_fill_loop:
* following short sequence of instructions can determine this
* (without being illegal on a PA1.1 machine).
*/
ldi 32,%r10
mtctl %r10,%cr11
.level 2.0
......@@ -114,30 +205,30 @@ $is_pa20:
$install_iva:
mtctl %r10,%cr14
/* Disable (most) interruptions */
mtsm %r0
/* Disable Q bit so we can load the iia queue */
rsm PSW_SM_Q,%r0
/* kernel PSW:
* - no interruptions except for HPMC and TOC (which are handled by PDC)
* - no interruptions except HPMC and TOC (which are handled by PDC)
* - Q bit set (IODC / PDC interruptions)
* - big-endian
* - virtually mapped
*/
ldil L%KERNEL_PSW,%r10
ldo R%KERNEL_PSW(%r10),%r10
mtctl %r10,%ipsw
/* Set the space pointers for the post-RFI world */
mtctl %r0,%cr17 /* Clear two-level IIA Space Queue */
mtctl %r0,%cr17 /* effectively setting kernel space. */
/* And the return address(es) too */
ldil L%start_parisc,%r10
ldo R%start_parisc(%r10),%r10
mtctl %r10,%cr18
ldo 4(%r10),%r10
mtctl %r10,%cr18
/* Set the space pointers for the post-RFI world
** Clear the two-level IIA Space Queue, effectively setting
** Kernel space.
*/
mtctl %r0,%cr17
mtctl %r0,%cr17
/* Load RFI target into PC queue */
mtctl %r11,%cr18
ldo 4(%r11),%r11
mtctl %r11,%cr18
/* Jump to hyperspace */
rfi
......@@ -145,6 +236,71 @@ $install_iva:
.procend
#ifdef CONFIG_SMP
.import smp_init_current_idle_task,data
.import smp_callin,code
smp_callin_rtn:
.proc
.callinfo
break 1,1 /* Break if returned from start_secondary */
nop
nop
.procend
/***************************************************************************
*
* smp_slave_stext is executed by all non-monarch Processors when the Monarch
* pokes the slave CPUs in smp.c:smp_boot_cpus().
*
* Once here, registers values are initialized in order to branch to virtual
* mode. Once all available/eligible CPUs are in virtual mode, all are
* released and start out by executing their own idle task.
*****************************************************************************/
smp_slave_stext:
.proc
.callinfo
/*
** Initialize Space registers
*/
mtsp %r0,%sr4
mtsp %r0,%sr5
mtsp %r0,%sr6
mtsp %r0,%sr7
/* Initialize the SP - monarch sets up smp_init_current_idle_task */
ldil L%PA(smp_init_current_idle_task),%sp
ldo R%PA(smp_init_current_idle_task)(%sp),%sp
ldw 0(%sp),%sp /* load task address */
mtctl %sp,%cr30 /* store in cr30 */
addil L%TASK_SZ_ALGN,%sp /* stack is above task */
ldo R%TASK_SZ_ALGN(%r1),%sp
/* point CPU to kernel page tables */
ldil L%PA(swapper_pg_dir),%r4
ldo R%PA(swapper_pg_dir)(%r4),%r4
mtctl %r4,%cr24 /* Initialize kernel root pointer */
mtctl %r4,%cr25 /* Initialize user root pointer */
/* Load RFI *return* address in case smp_callin bails */
ldil L%smp_callin_rtn,%r2
ldo R%smp_callin_rtn(%r2),%r2
/* Load RFI target address. */
ldil L%smp_callin,%r11
ldo R%smp_callin(%r11),%r11
/* ok...common code can handle the rest */
b common_stext
nop
.procend
#endif /* CONFIG_SMP */
.data
.align 4
......
This diff is collapsed.
......@@ -43,7 +43,6 @@
.level 1.1
.data
#define __ASSEMBLY__
#include <asm/assembly.h>
#include <asm/pdc.h>
......@@ -262,12 +261,10 @@ os_hpmc_5:
mtsp %r0, %sr6
mtsp %r0, %sr7
tovirt %r30 /* make sp virtual */
tovirt_r1 %r30 /* make sp virtual */
rsm 8,%r0 /* Clear Q bit */
ldi 1,%r1
mtctl %r1,%cr29 /* Set trap code to "1" for HPMC */
mtctl %r0,%cr30 /* Force interruptions to use hpmc stack */
ldi 1,%r8 /* Set trap code to "1" for HPMC */
ldil L%PA(intr_save), %r1
ldo R%PA(intr_save)(%r1), %r1
be 0(%sr7,%r1)
......
......@@ -9,7 +9,7 @@
static struct fs_struct init_fs = INIT_FS;
static struct files_struct init_files = INIT_FILES;
static struct signal_struct init_signals = INIT_SIGNALS;
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
struct mm_struct init_mm = INIT_MM(init_mm);
/*
......@@ -19,11 +19,21 @@ struct mm_struct init_mm = INIT_MM(init_mm);
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
union task_union init_task_union
__attribute__((section("init_task"), aligned(4096))) = { INIT_TASK(init_task_union.task) };
unsigned char interrupt_stack[ISTACK_SIZE] __attribute__ ((section("init_istack"), aligned(4096)));
union thread_union init_thread_union
__attribute__((aligned(128))) __attribute__((__section__(".data.init_task"))) =
{ INIT_THREAD_INFO(init_task) };
unsigned long swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((aligned(4096))) = { 0, };
pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((aligned(4096))) = { {0}, };
#ifdef __LP64__
unsigned long pmd0[PTRS_PER_PMD] __attribute__ ((aligned(4096))) = { 0, };
#endif
unsigned long pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((aligned(4096))) = { 0, };
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
__asm__(".data");
struct task_struct init_task = INIT_TASK(init_task);
This diff is collapsed.
This diff is collapsed.
/*
* WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
* ---------------------------------------------------------------
* This source file will be removed as soon as we have converted
* hp_psaux.c and hp_keyb.c to the input layer !
*
*/
/*
* linux/arch/parisc/kernel/keyboard.c
*
......@@ -7,8 +15,10 @@
* Copyright 2000 Philipp Rumpf
*/
#include <linux/errno.h>
#include <linux/keyboard.h>
#include <asm/keyboard.h>
#include <linux/module.h>
static int def_setkeycode(unsigned int x, unsigned int y)
{
......@@ -43,20 +53,29 @@ static void def_init_hw(void)
static char def_sysrq_xlate[NR_KEYS];
static struct kbd_ops def_kbd_ops = {
setkeycode: def_setkeycode,
getkeycode: def_getkeycode,
translate: def_translate,
unexpected_up: def_unexpected_up,
leds: def_leds,
init_hw: def_init_hw,
sysrq_key: 0xff,
#define DEFAULT_KEYB_OPS \
setkeycode: def_setkeycode, \
getkeycode: def_getkeycode, \
translate: def_translate, \
unexpected_up: def_unexpected_up, \
leds: def_leds, \
init_hw: def_init_hw, \
sysrq_key: 0xff, \
sysrq_xlate: def_sysrq_xlate,
static struct kbd_ops def_kbd_ops = {
DEFAULT_KEYB_OPS
};
struct kbd_ops *kbd_ops = &def_kbd_ops;
void unregister_kbd_ops(void)
{
struct kbd_ops new_kbd_ops = { DEFAULT_KEYB_OPS };
register_kbd_ops(&new_kbd_ops);
}
EXPORT_SYMBOL(unregister_kbd_ops);
void register_kbd_ops(struct kbd_ops *ops)
{
if(ops->setkeycode)
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment