Commit c61c48df authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'xtensa-next-20130508' of git://github.com/czankel/xtensa-linux

Pull xtensa updates from Chris Zankel:
 "Support for the latest MMU architecture that allows for a larger
  accessible memory region, and various bug-fixes"

* tag 'xtensa-next-20130508' of git://github.com/czankel/xtensa-linux:
  xtensa: Switch to asm-generic/linkage.h
  xtensa: fix redboot load address
  xtensa: ISS: fix timer_lock usage in rs_open
  xtensa: disable IRQs while IRQ handler is running
  xtensa: enable lockdep support
  xtensa: fix arch_irqs_disabled_flags implementation
  xtensa: add irq flags trace support
  xtensa: provide custom CALLER_ADDR* implementations
  xtensa: add stacktrace support
  xtensa: clean up stpill_registers
  xtensa: don't use a7 in simcalls
  xtensa: don't attempt to use unconfigured timers
  xtensa: provide default platform_pcibios_init implementation
  xtensa: remove KCORE_ELF again
  xtensa: document MMUv3 setup sequence
  xtensa: add MMU v3 support
  xtensa: fix ibreakenable register update
  xtensa: fix oprofile building as module
parents e30f4192 b341d84c
MMUv3 initialization sequence.
The code in the initialize_mmu macro sets up MMUv3 memory mapping
identically to MMUv2 fixed memory mapping. Depending on
CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX symbol this code is
located in one of the following address ranges:
0xF0000000..0xFFFFFFFF (will keep same address in MMU v2 layout;
typically ROM)
0x00000000..0x07FFFFFF (system RAM; this code is actually linked
at 0xD0000000..0xD7FFFFFF [cached]
or 0xD8000000..0xDFFFFFFF [uncached];
in any case, initially runs elsewhere
than linked, so have to be careful)
The code has the following assumptions:
This code fragment is run only on an MMU v3.
TLBs are in their reset state.
ITLBCFG and DTLBCFG are zero (reset state).
RASID is 0x04030201 (reset state).
PS.RING is zero (reset state).
LITBASE is zero (reset state, PC-relative literals); required to be PIC.
TLB setup proceeds along the following steps.
Legend:
VA = virtual address (two upper nibbles of it);
PA = physical address (two upper nibbles of it);
pc = physical range that contains this code;
After step 2, we jump to virtual address in 0x40000000..0x5fffffff
that corresponds to next instruction to execute in this code.
After step 4, we jump to intended (linked) address of this code.
Step 0 Step1 Step 2 Step3 Step 4 Step5
============ ===== ============ ===== ============ =====
VA PA PA VA PA PA VA PA PA
------ -- -- ------ -- -- ------ -- --
E0..FF -> E0 -> E0 E0..FF -> E0 F0..FF -> F0 -> F0
C0..DF -> C0 -> C0 C0..DF -> C0 E0..EF -> F0 -> F0
A0..BF -> A0 -> A0 A0..BF -> A0 D8..DF -> 00 -> 00
80..9F -> 80 -> 80 80..9F -> 80 D0..D7 -> 00 -> 00
60..7F -> 60 -> 60 60..7F -> 60
40..5F -> 40 40..5F -> pc -> pc 40..5F -> pc
20..3F -> 20 -> 20 20..3F -> 20
00..1F -> 00 -> 00 00..1F -> 00
config FRAME_POINTER
def_bool n
config ZONE_DMA
def_bool y
config XTENSA
def_bool y
select ARCH_WANT_FRAME_POINTERS
select HAVE_IDE
select GENERIC_ATOMIC64
select HAVE_GENERIC_HARDIRQS
......@@ -49,6 +47,15 @@ config HZ
source "init/Kconfig"
source "kernel/Kconfig.freezer"
config LOCKDEP_SUPPORT
def_bool y
config STACKTRACE_SUPPORT
def_bool y
config TRACE_IRQFLAGS_SUPPORT
def_bool y
config MMU
def_bool n
......@@ -100,6 +107,35 @@ config MATH_EMULATION
help
Can we use information of configuration file?
config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
bool "Initialize Xtensa MMU inside the Linux kernel code"
default y
help
Earlier version initialized the MMU in the exception vector
before jumping to _startup in head.S and had an advantage that
it was possible to place a software breakpoint at 'reset' and
then enter your normal kernel breakpoints once the MMU was mapped
to the kernel mappings (0XC0000000).
This unfortunately doesn't work for U-Boot and likley also wont
work for using KEXEC to have a hot kernel ready for doing a
KDUMP.
So now the MMU is initialized in head.S but it's necessary to
use hardware breakpoints (gdb 'hbreak' cmd) to break at _startup.
xt-gdb can't place a Software Breakpoint in the 0XD region prior
to mapping the MMU and after mapping even if the area of low memory
was mapped gdb wouldn't remove the breakpoint on hitting it as the
PC wouldn't match. Since Hardware Breakpoints are recommended for
Linux configurations it seems reasonable to just assume they exist
and leave this older mechanism for unfortunate souls that choose
not to follow Tensilica's recommendation.
Selecting this will cause U-Boot to set the KERNEL Load and Entry
address at 0x00003000 instead of the mapped std of 0xD0003000.
If in doubt, say Y.
endmenu
config XTENSA_CALIBRATE_CCOUNT
......@@ -249,21 +285,6 @@ endmenu
menu "Executable file formats"
# only elf supported
config KCORE_ELF
def_bool y
depends on PROC_FS
help
If you enabled support for /proc file system then the file
/proc/kcore will contain the kernel core image in ELF format. This
can be used in gdb:
$ cd /usr/src/linux ; gdb vmlinux /proc/kcore
This is especially useful if you have compiled the kernel with the
"-g" option to preserve debugging information. It is mainly used
for examining kernel data structures on the live kernel.
source "fs/Kconfig.binfmt"
endmenu
......
......@@ -12,6 +12,7 @@ endif
export OBJCOPY_ARGS
export CPPFLAGS_boot.lds += -P -C
export KBUILD_AFLAGS += -mtext-section-literals
boot-y := bootstrap.o
......
#include <variant/core.h>
/*
* linux/arch/xtensa/boot/boot-elf/boot.lds.S
*
* Copyright (C) 2008 - 2013 by Tensilica Inc.
*
* Chris Zankel <chris@zankel.net>
* Marc Gauthier <marc@tensilica.com
* Pete Delaney <piet@tensilica.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <asm/vectors.h>
OUTPUT_ARCH(xtensa)
ENTRY(_ResetVector)
SECTIONS
{
.start 0xD0000000 : { *(.start) }
.text 0xD0000000:
{
__reloc_start = . ;
_text_start = . ;
*(.literal .text.literal .text)
_text_end = . ;
}
.rodata ALIGN(0x04):
{
*(.rodata)
*(.rodata1)
}
.data ALIGN(0x04):
.ResetVector.text XCHAL_RESET_VECTOR_VADDR :
{
*(.data)
*(.data1)
*(.sdata)
*(.sdata2)
*(.got.plt)
*(.got)
*(.dynamic)
*(.ResetVector.text)
}
__reloc_end = . ;
. = ALIGN(0x10);
__image_load = . ;
.image 0xd0001000:
.image KERNELOFFSET: AT (LOAD_MEMORY_ADDRESS)
{
_image_start = .;
*(image)
......@@ -43,7 +31,6 @@ SECTIONS
_image_end = . ;
}
.bss ((LOADADDR(.image) + SIZEOF(.image) + 3) & ~ 3):
{
__bss_start = .;
......@@ -53,14 +40,15 @@ SECTIONS
*(.bss)
__bss_end = .;
}
_end = .;
_param_start = .;
.ResetVector.text XCHAL_RESET_VECTOR_VADDR :
/*
* This is a remapped copy of the Reset Vector Code.
* It keeps gdb in sync with the PC after switching
* to the temporary mapping used while setting up
* the V2 MMU mappings for Linux.
*/
.ResetVector.remapped_text 0x46000000 (INFO):
{
*(.ResetVector.text)
*(.ResetVector.remapped_text)
}
PROVIDE (end = .);
}
/*
* arch/xtensa/boot/boot-elf/bootstrap.S
*
* Low-level exception handling
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2004 - 2013 by Tensilica Inc.
*
* Chris Zankel <chris@zankel.net>
* Marc Gauthier <marc@tensilica.com>
* Piet Delaney <piet@tensilica.com>
*/
#include <asm/bootparam.h>
#include <asm/processor.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/cacheasm.h>
#include <asm/initialize_mmu.h>
#include <linux/linkage.h>
/* ResetVector
*/
.section .ResetVector.text, "ax"
.section .ResetVector.text, "ax"
.global _ResetVector
.global reset
_ResetVector:
_j reset
_j _SetupMMU
.begin no-absolute-literals
.literal_position
.align 4
RomInitAddr:
.word 0xd0001000
#if defined(CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX) && \
XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
.word 0x00003000
#else
.word 0xd0003000
#endif
RomBootParam:
.word _bootparam
_bootparam:
.short BP_TAG_FIRST
.short 4
.long BP_VERSION
.short BP_TAG_LAST
.short 0
.long 0
.align 4
_SetupMMU:
movi a0, 0
wsr a0, windowbase
rsync
movi a0, 1
wsr a0, windowstart
rsync
movi a0, 0x1F
wsr a0, ps
rsync
Offset = _SetupMMU - _ResetVector
#ifndef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
initialize_mmu
#endif
.end no-absolute-literals
rsil a0, XCHAL_DEBUGLEVEL-1
rsync
reset:
l32r a0, RomInitAddr
l32r a2, RomBootParam
......@@ -21,13 +80,25 @@ reset:
jx a0
.align 4
.section .bootstrap.data, "aw"
.globl _bootparam
_bootparam:
.short BP_TAG_FIRST
.short 4
.long BP_VERSION
.short BP_TAG_LAST
.short 0
.long 0
.section .ResetVector.remapped_text, "x"
.global _RemappedResetVector
/* Do org before literals */
.org 0
_RemappedResetVector:
.begin no-absolute-literals
.literal_position
_j _RemappedSetupMMU
/* Position Remapped code at the same location as the original code */
. = _RemappedResetVector + Offset
_RemappedSetupMMU:
#ifndef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
initialize_mmu
#endif
.end no-absolute-literals
......@@ -33,7 +33,7 @@ SECTIONS
. = ALIGN(0x10);
__image_load = . ;
.image 0xd0001000: AT(__image_load)
.image 0xd0003000: AT(__image_load)
{
_image_start = .;
*(image)
......
......@@ -4,7 +4,11 @@
# for more details.
#
UIMAGE_LOADADDR = 0xd0001000
ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
UIMAGE_LOADADDR = 0x00003000
else
UIMAGE_LOADADDR = 0xd0003000
endif
UIMAGE_COMPRESSION = gzip
$(obj)/../uImage: vmlinux.bin.gz FORCE
......
......@@ -15,6 +15,7 @@ generic-y += irq_regs.h
generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += kvm_para.h
generic-y += linkage.h
generic-y += local.h
generic-y += local64.h
generic-y += percpu.h
......
/* empty */
/*
* arch/xtensa/include/asm/ftrace.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2013 Tensilica Inc.
*/
#ifndef _XTENSA_FTRACE_H
#define _XTENSA_FTRACE_H
#include <asm/processor.h>
#define HAVE_ARCH_CALLER_ADDR
#define CALLER_ADDR0 ({ unsigned long a0, a1; \
__asm__ __volatile__ ( \
"mov %0, a0\n" \
"mov %1, a1\n" \
: "=r"(a0), "=r"(a1) : : ); \
MAKE_PC_FROM_RA(a0, a1); })
#ifdef CONFIG_FRAME_POINTER
extern unsigned long return_address(unsigned level);
#define CALLER_ADDR1 return_address(1)
#define CALLER_ADDR2 return_address(2)
#define CALLER_ADDR3 return_address(3)
#else
#define CALLER_ADDR1 (0)
#define CALLER_ADDR2 (0)
#define CALLER_ADDR3 (0)
#endif
#endif /* _XTENSA_FTRACE_H */
......@@ -23,6 +23,9 @@
#ifndef _XTENSA_INITIALIZE_MMU_H
#define _XTENSA_INITIALIZE_MMU_H
#include <asm/pgtable.h>
#include <asm/vectors.h>
#ifdef __ASSEMBLY__
#define XTENSA_HWVERSION_RC_2009_0 230000
......@@ -48,6 +51,110 @@
* (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)
*/
#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
/*
* Have MMU v3
*/
#if !XCHAL_HAVE_VECBASE
# error "MMU v3 requires reloc vectors"
#endif
movi a1, 0
_call0 1f
_j 2f
.align 4
1: movi a2, 0x10000000
movi a3, 0x18000000
add a2, a2, a0
9: bgeu a2, a3, 9b /* PC is out of the expected range */
/* Step 1: invalidate mapping at 0x40000000..0x5FFFFFFF. */
movi a2, 0x40000006
idtlb a2
iitlb a2
isync
/* Step 2: map 0x40000000..0x47FFFFFF to paddr containing this code
* and jump to the new mapping.
*/
#define CA_BYPASS (_PAGE_CA_BYPASS | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
#define CA_WRITEBACK (_PAGE_CA_WB | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
srli a3, a0, 27
slli a3, a3, 27
addi a3, a3, CA_BYPASS
addi a7, a2, -1
wdtlb a3, a7
witlb a3, a7
isync
slli a4, a0, 5
srli a4, a4, 5
addi a5, a2, -6
add a4, a4, a5
jx a4
/* Step 3: unmap everything other than current area.
* Start at 0x60000000, wrap around, and end with 0x20000000
*/
2: movi a4, 0x20000000
add a5, a2, a4
3: idtlb a5
iitlb a5
add a5, a5, a4
bne a5, a2, 3b
/* Step 4: Setup MMU with the old V2 mappings. */
movi a6, 0x01000000
wsr a6, ITLBCFG
wsr a6, DTLBCFG
isync
movi a5, 0xd0000005
movi a4, CA_WRITEBACK
wdtlb a4, a5
witlb a4, a5
movi a5, 0xd8000005
movi a4, CA_BYPASS
wdtlb a4, a5
witlb a4, a5
movi a5, 0xe0000006
movi a4, 0xf0000000 + CA_WRITEBACK
wdtlb a4, a5
witlb a4, a5
movi a5, 0xf0000006
movi a4, 0xf0000000 + CA_BYPASS
wdtlb a4, a5
witlb a4, a5
isync
/* Jump to self, using MMU v2 mappings. */
movi a4, 1f
jx a4
1:
movi a2, VECBASE_RESET_VADDR
wsr a2, vecbase
/* Step 5: remove temporary mapping. */
idtlb a7
iitlb a7
isync
movi a0, 0
wsr a0, ptevaddr
rsync
#endif /* defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU &&
XCHAL_HAVE_SPANNING_WAY */
.endm
#endif /*__ASSEMBLY__*/
......
......@@ -47,7 +47,10 @@ static inline void arch_local_irq_restore(unsigned long flags)
static inline bool arch_irqs_disabled_flags(unsigned long flags)
{
return (flags & 0xf) != 0;
#if XCHAL_EXCM_LEVEL < LOCKLEVEL || (1 << PS_EXCM_BIT) < LOCKLEVEL
#error "XCHAL_EXCM_LEVEL and 1<<PS_EXCM_BIT must be no less than LOCKLEVEL"
#endif
return (flags & (PS_INTLEVEL_MASK | (1 << PS_EXCM_BIT))) >= LOCKLEVEL;
}
static inline bool arch_irqs_disabled(void)
......
/*
* include/asm-xtensa/linkage.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_LINKAGE_H
#define _XTENSA_LINKAGE_H
/* Nothing to do here ... */
#endif /* _XTENSA_LINKAGE_H */
/*
* arch/xtensa/include/asm/stacktrace.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2013 Tensilica Inc.
*/
#ifndef _XTENSA_STACKTRACE_H
#define _XTENSA_STACKTRACE_H
#include <linux/sched.h>
struct stackframe {
unsigned long pc;
unsigned long sp;
};
static __always_inline unsigned long *stack_pointer(struct task_struct *task)
{
unsigned long *sp;
if (!task || task == current)
__asm__ __volatile__ ("mov %0, a1\n" : "=a"(sp));
else
sp = (unsigned long *)task->thread.sp;
return sp;
}
void walk_stackframe(unsigned long *sp,
int (*fn)(struct stackframe *frame, void *data),
void *data);
#endif /* _XTENSA_STACKTRACE_H */
......@@ -19,13 +19,16 @@
#define _INTLEVEL(x) XCHAL_INT ## x ## _LEVEL
#define INTLEVEL(x) _INTLEVEL(x)
#if INTLEVEL(XCHAL_TIMER0_INTERRUPT) <= XCHAL_EXCM_LEVEL
#if XCHAL_NUM_TIMERS > 0 && \
INTLEVEL(XCHAL_TIMER0_INTERRUPT) <= XCHAL_EXCM_LEVEL
# define LINUX_TIMER 0
# define LINUX_TIMER_INT XCHAL_TIMER0_INTERRUPT
#elif INTLEVEL(XCHAL_TIMER1_INTERRUPT) <= XCHAL_EXCM_LEVEL
#elif XCHAL_NUM_TIMERS > 1 && \
INTLEVEL(XCHAL_TIMER1_INTERRUPT) <= XCHAL_EXCM_LEVEL
# define LINUX_TIMER 1
# define LINUX_TIMER_INT XCHAL_TIMER1_INTERRUPT
#elif INTLEVEL(XCHAL_TIMER2_INTERRUPT) <= XCHAL_EXCM_LEVEL
#elif XCHAL_NUM_TIMERS > 2 && \
INTLEVEL(XCHAL_TIMER2_INTERRUPT) <= XCHAL_EXCM_LEVEL
# define LINUX_TIMER 2
# define LINUX_TIMER_INT XCHAL_TIMER2_INTERRUPT
#else
......
......@@ -22,10 +22,9 @@ extern void do_unhandled(struct pt_regs *regs, unsigned long exccause);
static inline void spill_registers(void)
{
unsigned int a0, ps;
__asm__ __volatile__ (
"movi a14, " __stringify(PS_EXCM_BIT | LOCKLEVEL) "\n\t"
"movi a14, "__stringify((1 << PS_EXCM_BIT) | LOCKLEVEL)"\n\t"
"mov a12, a0\n\t"
"rsr a13, sar\n\t"
"xsr a14, ps\n\t"
......@@ -35,7 +34,7 @@ static inline void spill_registers(void)
"mov a0, a12\n\t"
"wsr a13, sar\n\t"
"wsr a14, ps\n\t"
: : "a" (&a0), "a" (&ps)
: :
#if defined(CONFIG_FRAME_POINTER)
: "a2", "a3", "a4", "a11", "a12", "a13", "a14", "a15",
#else
......
/*
* arch/xtensa/include/asm/xchal_vaddr_remap.h
*
* Xtensa macros for MMU V3 Support. Deals with re-mapping the Virtual
* Memory Addresses from "Virtual == Physical" to their prevvious V2 MMU
* mappings (KSEG at 0xD0000000 and KIO at 0XF0000000).
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2008 - 2012 Tensilica Inc.
*
* Pete Delaney <piet@tensilica.com>
* Marc Gauthier <marc@tensilica.com
*/
#ifndef _XTENSA_VECTORS_H
#define _XTENSA_VECTORS_H
#include <variant/core.h>
#if defined(CONFIG_MMU)
/* Will Become VECBASE */
#define VIRTUAL_MEMORY_ADDRESS 0xD0000000
/* Image Virtual Start Address */
#define KERNELOFFSET 0xD0003000
#if defined(XCHAL_HAVE_PTP_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
/* MMU v3 - XCHAL_HAVE_PTP_MMU == 1 */
#define PHYSICAL_MEMORY_ADDRESS 0x00000000
#define LOAD_MEMORY_ADDRESS 0x00003000
#else
/* MMU V2 - XCHAL_HAVE_PTP_MMU == 0 */
#define PHYSICAL_MEMORY_ADDRESS 0xD0000000
#define LOAD_MEMORY_ADDRESS 0xD0003000
#endif
#else /* !defined(CONFIG_MMU) */
/* MMU Not being used - Virtual == Physical */
/* VECBASE */
#define VIRTUAL_MEMORY_ADDRESS 0x00002000
/* Location of the start of the kernel text, _start */
#define KERNELOFFSET 0x00003000
#define PHYSICAL_MEMORY_ADDRESS 0x00000000
/* Loaded just above possibly live vectors */
#define LOAD_MEMORY_ADDRESS 0x00003000
#endif /* CONFIG_MMU */
#define XC_VADDR(offset) (VIRTUAL_MEMORY_ADDRESS + offset)
#define XC_PADDR(offset) (PHYSICAL_MEMORY_ADDRESS + offset)
/* Used to set VECBASE register */
#define VECBASE_RESET_VADDR VIRTUAL_MEMORY_ADDRESS
#define RESET_VECTOR_VECOFS (XCHAL_RESET_VECTOR_VADDR - \
VECBASE_RESET_VADDR)
#define RESET_VECTOR_VADDR XC_VADDR(RESET_VECTOR_VECOFS)
#define RESET_VECTOR1_VECOFS (XCHAL_RESET_VECTOR1_VADDR - \
VECBASE_RESET_VADDR)
#define RESET_VECTOR1_VADDR XC_VADDR(RESET_VECTOR1_VECOFS)
#if XCHAL_HAVE_VECBASE
#define USER_VECTOR_VADDR XC_VADDR(XCHAL_USER_VECOFS)
#define KERNEL_VECTOR_VADDR XC_VADDR(XCHAL_KERNEL_VECOFS)
#define DOUBLEEXC_VECTOR_VADDR XC_VADDR(XCHAL_DOUBLEEXC_VECOFS)
#define WINDOW_VECTORS_VADDR XC_VADDR(XCHAL_WINDOW_OF4_VECOFS)
#define INTLEVEL2_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL2_VECOFS)
#define INTLEVEL3_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL3_VECOFS)
#define INTLEVEL4_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL4_VECOFS)
#define INTLEVEL5_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL5_VECOFS)
#define INTLEVEL6_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL6_VECOFS)
#define DEBUG_VECTOR_VADDR XC_VADDR(XCHAL_DEBUG_VECOFS)
#undef XCHAL_NMI_VECTOR_VADDR
#define XCHAL_NMI_VECTOR_VADDR XC_VADDR(XCHAL_NMI_VECOFS)
#undef XCHAL_INTLEVEL7_VECTOR_VADDR
#define XCHAL_INTLEVEL7_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL7_VECOFS)
/*
* These XCHAL_* #defines from varian/core.h
* are not valid to use with V3 MMU. Non-XCHAL
* constants are defined above and should be used.
*/
#undef XCHAL_VECBASE_RESET_VADDR
#undef XCHAL_RESET_VECTOR0_VADDR
#undef XCHAL_USER_VECTOR_VADDR
#undef XCHAL_KERNEL_VECTOR_VADDR
#undef XCHAL_DOUBLEEXC_VECTOR_VADDR
#undef XCHAL_WINDOW_VECTORS_VADDR
#undef XCHAL_INTLEVEL2_VECTOR_VADDR
#undef XCHAL_INTLEVEL3_VECTOR_VADDR
#undef XCHAL_INTLEVEL4_VECTOR_VADDR
#undef XCHAL_INTLEVEL5_VECTOR_VADDR
#undef XCHAL_INTLEVEL6_VECTOR_VADDR
#undef XCHAL_DEBUG_VECTOR_VADDR
#undef XCHAL_NMI_VECTOR_VADDR
#undef XCHAL_INTLEVEL7_VECTOR_VADDR
#else
#define USER_VECTOR_VADDR XCHAL_USER_VECTOR_VADDR
#define KERNEL_VECTOR_VADDR XCHAL_KERNEL_VECTOR_VADDR
#define DOUBLEEXC_VECTOR_VADDR XCHAL_DOUBLEEXC_VECTOR_VADDR
#define WINDOW_VECTORS_VADDR XCHAL_WINDOW_VECTORS_VADDR
#define INTLEVEL2_VECTOR_VADDR XCHAL_INTLEVEL2_VECTOR_VADDR
#define INTLEVEL3_VECTOR_VADDR XCHAL_INTLEVEL3_VECTOR_VADDR
#define INTLEVEL4_VECTOR_VADDR XCHAL_INTLEVEL4_VECTOR_VADDR
#define INTLEVEL5_VECTOR_VADDR XCHAL_INTLEVEL5_VECTOR_VADDR
#define INTLEVEL6_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
#define DEBUG_VECTOR_VADDR XCHAL_DEBUG_VECTOR_VADDR
#endif
#endif /* _XTENSA_VECTORS_H */
......@@ -4,14 +4,16 @@
extra-y := head.o vmlinux.lds
obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o \
setup.o signal.o syscall.o time.o traps.o vectors.o platform.o \
pci-dma.o
obj-y := align.o coprocessor.o entry.o irq.o pci-dma.o platform.o process.o \
ptrace.o setup.o signal.o stacktrace.o syscall.o time.o traps.o \
vectors.o
obj-$(CONFIG_KGDB) += xtensa-stub.o
obj-$(CONFIG_PCI) += pci.o
obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o
AFLAGS_head.o += -mtext-section-literals
# In the Xtensa architecture, assembly generates literals which must always
# precede the L32R instruction with a relative offset less than 256 kB.
# Therefore, the .text and .literal section must be combined in parenthesis
......
......@@ -354,16 +354,16 @@ common_exception:
* so we can allow exceptions and interrupts (*) again.
* Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X)
*
* (*) We only allow interrupts of higher priority than current IRQ
* (*) We only allow interrupts if they were previously enabled and
* we're not handling an IRQ
*/
rsr a3, ps
addi a0, a0, -4
movi a2, 1
addi a0, a0, -EXCCAUSE_LEVEL1_INTERRUPT
movi a2, LOCKLEVEL
extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
# a3 = PS.INTLEVEL
movnez a2, a3, a3 # a2 = 1: level-1, > 1: high priority
moveqz a3, a2, a0 # a3 = IRQ level iff interrupt
moveqz a3, a2, a0 # a3 = LOCKLEVEL iff interrupt
movi a2, 1 << PS_WOE_BIT
or a3, a3, a2
rsr a0, exccause
......@@ -389,6 +389,22 @@ common_exception:
save_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
#ifdef CONFIG_TRACE_IRQFLAGS
l32i a4, a1, PT_DEPC
/* Double exception means we came here with an exception
* while PS.EXCM was set, i.e. interrupts disabled.
*/
bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
l32i a4, a1, PT_EXCCAUSE
bnei a4, EXCCAUSE_LEVEL1_INTERRUPT, 1f
/* We came here with an interrupt means interrupts were enabled
* and we've just disabled them.
*/
movi a4, trace_hardirqs_off
callx4 a4
1:
#endif
/* Go to second-level dispatcher. Set up parameters to pass to the
* exception handler and call the exception handler.
*/
......@@ -407,11 +423,29 @@ common_exception:
.global common_exception_return
common_exception_return:
#ifdef CONFIG_TRACE_IRQFLAGS
l32i a4, a1, PT_DEPC
/* Double exception means we came here with an exception
* while PS.EXCM was set, i.e. interrupts disabled.
*/
bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
l32i a4, a1, PT_EXCCAUSE
bnei a4, EXCCAUSE_LEVEL1_INTERRUPT, 1f
/* We came here with an interrupt means interrupts were enabled
* and we'll reenable them on return.
*/
movi a4, trace_hardirqs_on
callx4 a4
1:
#endif
/* Jump if we are returning from kernel exceptions. */
1: l32i a3, a1, PT_PS
_bbci.l a3, PS_UM_BIT, 4f
rsil a2, 0
/* Specific to a user exception exit:
* We need to check some flags for signal handling and rescheduling,
* and have to restore WB and WS, extra states, and all registers
......@@ -652,51 +686,19 @@ common_exception_exit:
l32i a0, a1, PT_DEPC
l32i a3, a1, PT_AREG3
_bltui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
wsr a0, depc
l32i a2, a1, PT_AREG2
l32i a0, a1, PT_AREG0
l32i a1, a1, PT_AREG1
rfde
_bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
1:
/* Restore a0...a3 and return */
rsr a0, ps
extui a2, a0, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
movi a0, 2f
slli a2, a2, 4
add a0, a2, a0
l32i a2, a1, PT_AREG2
jx a0
.macro irq_exit_level level
.align 16
.if XCHAL_EXCM_LEVEL >= \level
l32i a0, a1, PT_PC
wsr a0, epc\level
l32i a0, a1, PT_AREG0
l32i a1, a1, PT_AREG1
rfi \level
.endif
.endm
rfe
.align 16
2:
1: wsr a0, depc
l32i a0, a1, PT_AREG0
l32i a1, a1, PT_AREG1
rfe
.align 16
/* no rfi for level-1 irq, handled by rfe above*/
nop
irq_exit_level 2
irq_exit_level 3
irq_exit_level 4
irq_exit_level 5
irq_exit_level 6
rfde
ENDPROC(kernel_exception)
......
......@@ -48,17 +48,36 @@
*/
__HEAD
.begin no-absolute-literals
ENTRY(_start)
_j 2f
/* Preserve the pointer to the boot parameter list in EXCSAVE_1 */
wsr a2, excsave1
_j _SetupMMU
.align 4
.literal_position
.Lstartup:
.word _startup
.align 4
1: .word _startup
2: l32r a0, 1b
.global _SetupMMU
_SetupMMU:
Offset = _SetupMMU - _start
#ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
initialize_mmu
#endif
.end no-absolute-literals
l32r a0, .Lstartup
jx a0
ENDPROC(_start)
.section .init.text, "ax"
__INIT
.literal_position
ENTRY(_startup)
......@@ -67,10 +86,6 @@ ENTRY(_startup)
movi a0, LOCKLEVEL
wsr a0, ps
/* Preserve the pointer to the boot parameter list in EXCSAVE_1 */
wsr a2, excsave1
/* Start with a fresh windowbase and windowstart. */
movi a1, 1
......@@ -86,7 +101,9 @@ ENTRY(_startup)
/* Clear debugging registers. */
#if XCHAL_HAVE_DEBUG
#if XCHAL_NUM_IBREAK > 0
wsr a0, ibreakenable
#endif
wsr a0, icount
movi a1, 15
wsr a0, icountlevel
......@@ -156,8 +173,6 @@ ENTRY(_startup)
isync
initialize_mmu
/* Unpack data sections
*
* The linker script used to build the Linux kernel image
......@@ -205,6 +220,10 @@ ENTRY(_startup)
___flush_dcache_all a2 a3
#endif
memw
isync
___invalidate_icache_all a2 a3
isync
/* Setup stack and enable window exceptions (keep irqs disabled) */
......
......@@ -36,6 +36,7 @@ _F(void, power_off, (void), { while(1); });
_F(void, idle, (void), { __asm__ __volatile__ ("waiti 0" ::: "memory"); });
_F(void, heartbeat, (void), { });
_F(int, pcibios_fixup, (void), { return 0; });
_F(void, pcibios_init, (void), { });
#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
_F(void, calibrate_ccount, (void),
......
/*
* arch/xtensa/kernel/stacktrace.c
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2013 Tensilica Inc.
*/
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/stacktrace.h>
#include <asm/stacktrace.h>
#include <asm/traps.h>
void walk_stackframe(unsigned long *sp,
int (*fn)(struct stackframe *frame, void *data),
void *data)
{
unsigned long a0, a1;
unsigned long sp_end;
a1 = (unsigned long)sp;
sp_end = ALIGN(a1, THREAD_SIZE);
spill_registers();
while (a1 < sp_end) {
struct stackframe frame;
sp = (unsigned long *)a1;
a0 = *(sp - 4);
a1 = *(sp - 3);
if (a1 <= (unsigned long)sp)
break;
frame.pc = MAKE_PC_FROM_RA(a0, a1);
frame.sp = a1;
if (fn(&frame, data))
return;
}
}
#ifdef CONFIG_STACKTRACE
struct stack_trace_data {
struct stack_trace *trace;
unsigned skip;
};
static int stack_trace_cb(struct stackframe *frame, void *data)
{
struct stack_trace_data *trace_data = data;
struct stack_trace *trace = trace_data->trace;
if (trace_data->skip) {
--trace_data->skip;
return 0;
}
if (!kernel_text_address(frame->pc))
return 0;
trace->entries[trace->nr_entries++] = frame->pc;
return trace->nr_entries >= trace->max_entries;
}
void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
{
struct stack_trace_data trace_data = {
.trace = trace,
.skip = trace->skip,
};
walk_stackframe(stack_pointer(task), stack_trace_cb, &trace_data);
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
void save_stack_trace(struct stack_trace *trace)
{
save_stack_trace_tsk(current, trace);
}
EXPORT_SYMBOL_GPL(save_stack_trace);
#endif
#ifdef CONFIG_FRAME_POINTER
struct return_addr_data {
unsigned long addr;
unsigned skip;
};
static int return_address_cb(struct stackframe *frame, void *data)
{
struct return_addr_data *r = data;
if (r->skip) {
--r->skip;
return 0;
}
if (!kernel_text_address(frame->pc))
return 0;
r->addr = frame->pc;
return 1;
}
unsigned long return_address(unsigned level)
{
struct return_addr_data r = {
.skip = level + 1,
};
walk_stackframe(stack_pointer(NULL), return_address_cb, &r);
return r.addr;
}
EXPORT_SYMBOL(return_address);
#endif
......@@ -11,7 +11,7 @@
*
* Essentially rewritten for the Xtensa architecture port.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
* Copyright (C) 2001 - 2013 Tensilica Inc.
*
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
* Chris Zankel <chris@zankel.net>
......@@ -32,6 +32,7 @@
#include <linux/delay.h>
#include <linux/hardirq.h>
#include <asm/stacktrace.h>
#include <asm/ptrace.h>
#include <asm/timex.h>
#include <asm/uaccess.h>
......@@ -195,7 +196,6 @@ void do_multihit(struct pt_regs *regs, unsigned long exccause)
/*
* IRQ handler.
* PS.INTLEVEL is the current IRQ priority level.
*/
extern void do_IRQ(int, struct pt_regs *);
......@@ -212,18 +212,21 @@ void do_interrupt(struct pt_regs *regs)
XCHAL_INTLEVEL6_MASK,
XCHAL_INTLEVEL7_MASK,
};
unsigned level = get_sr(ps) & PS_INTLEVEL_MASK;
if (WARN_ON_ONCE(level >= ARRAY_SIZE(int_level_mask)))
return;
for (;;) {
unsigned intread = get_sr(interrupt);
unsigned intenable = get_sr(intenable);
unsigned int_at_level = intread & intenable &
int_level_mask[level];
unsigned int_at_level = intread & intenable;
unsigned level;
for (level = LOCKLEVEL; level > 0; --level) {
if (int_at_level & int_level_mask[level]) {
int_at_level &= int_level_mask[level];
break;
}
}
if (!int_at_level)
if (level == 0)
return;
/*
......@@ -404,53 +407,25 @@ void show_regs(struct pt_regs * regs)
regs->syscall);
}
static __always_inline unsigned long *stack_pointer(struct task_struct *task)
static int show_trace_cb(struct stackframe *frame, void *data)
{
unsigned long *sp;
if (!task || task == current)
__asm__ __volatile__ ("mov %0, a1\n" : "=a"(sp));
else
sp = (unsigned long *)task->thread.sp;
return sp;
if (kernel_text_address(frame->pc)) {
printk(" [<%08lx>] ", frame->pc);
print_symbol("%s\n", frame->pc);
}
return 0;
}
void show_trace(struct task_struct *task, unsigned long *sp)
{
unsigned long a0, a1, pc;
unsigned long sp_start, sp_end;
if (sp)
a1 = (unsigned long)sp;
else
a1 = (unsigned long)stack_pointer(task);
sp_start = a1 & ~(THREAD_SIZE-1);
sp_end = sp_start + THREAD_SIZE;
if (!sp)
sp = stack_pointer(task);
printk("Call Trace:");
#ifdef CONFIG_KALLSYMS
printk("\n");
#endif
spill_registers();
while (a1 > sp_start && a1 < sp_end) {
sp = (unsigned long*)a1;
a0 = *(sp - 4);
a1 = *(sp - 3);
if (a1 <= (unsigned long) sp)
break;
pc = MAKE_PC_FROM_RA(a0, a1);
if (kernel_text_address(pc)) {
printk(" [<%08lx>] ", pc);
print_symbol("%s\n", pc);
}
}
walk_stackframe(sp, show_trace_cb, NULL);
printk("\n");
}
......
......@@ -50,6 +50,7 @@
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/thread_info.h>
#include <asm/vectors.h>
#define WINDOW_VECTORS_SIZE 0x180
......@@ -220,7 +221,7 @@ ENTRY(_DoubleExceptionVector)
xsr a0, depc # get DEPC, save a0
movi a3, XCHAL_WINDOW_VECTORS_VADDR
movi a3, WINDOW_VECTORS_VADDR
_bltu a0, a3, .Lfixup
addi a3, a3, WINDOW_VECTORS_SIZE
_bgeu a0, a3, .Lfixup
......@@ -385,9 +386,12 @@ ENDPROC(_DebugInterruptVector)
.if XCHAL_EXCM_LEVEL >= \level
.section .Level\level\()InterruptVector.text, "ax"
ENTRY(_Level\level\()InterruptVector)
wsr a0, epc1
wsr a0, excsave2
rsr a0, epc\level
xsr a0, epc1
wsr a0, epc1
movi a0, EXCCAUSE_LEVEL1_INTERRUPT
wsr a0, exccause
rsr a0, eps\level
# branch to user or kernel vector
j _SimulateUserKernelVectorException
.endif
......@@ -439,10 +443,8 @@ ENDPROC(_WindowOverflow4)
*/
.align 4
_SimulateUserKernelVectorException:
wsr a0, excsave2
movi a0, 4 # LEVEL1_INTERRUPT cause
wsr a0, exccause
rsr a0, ps
addi a0, a0, (1 << PS_EXCM_BIT)
wsr a0, ps
bbsi.l a0, PS_UM_BIT, 1f # branch if user mode
rsr a0, excsave2 # restore a0
j _KernelExceptionVector # simulate kernel vector exception
......
......@@ -18,6 +18,7 @@
#include <asm/page.h>
#include <asm/thread_info.h>
#include <asm/vectors.h>
#include <variant/core.h>
#include <platform/hardware.h>
OUTPUT_ARCH(xtensa)
......@@ -30,7 +31,7 @@ jiffies = jiffies_64;
#endif
#ifndef KERNELOFFSET
#define KERNELOFFSET 0xd0001000
#define KERNELOFFSET 0xd0003000
#endif
/* Note: In the following macros, it would be nice to specify only the
......@@ -185,16 +186,16 @@ SECTIONS
SECTION_VECTOR (_WindowVectors_text,
.WindowVectors.text,
XCHAL_WINDOW_VECTORS_VADDR, 4,
WINDOW_VECTORS_VADDR, 4,
.dummy)
SECTION_VECTOR (_DebugInterruptVector_literal,
.DebugInterruptVector.literal,
XCHAL_DEBUG_VECTOR_VADDR - 4,
DEBUG_VECTOR_VADDR - 4,
SIZEOF(.WindowVectors.text),
.WindowVectors.text)
SECTION_VECTOR (_DebugInterruptVector_text,
.DebugInterruptVector.text,
XCHAL_DEBUG_VECTOR_VADDR,
DEBUG_VECTOR_VADDR,
4,
.DebugInterruptVector.literal)
#undef LAST
......@@ -202,7 +203,7 @@ SECTIONS
#if XCHAL_EXCM_LEVEL >= 2
SECTION_VECTOR (_Level2InterruptVector_text,
.Level2InterruptVector.text,
XCHAL_INTLEVEL2_VECTOR_VADDR,
INTLEVEL2_VECTOR_VADDR,
SIZEOF(LAST), LAST)
# undef LAST
# define LAST .Level2InterruptVector.text
......@@ -210,7 +211,7 @@ SECTIONS
#if XCHAL_EXCM_LEVEL >= 3
SECTION_VECTOR (_Level3InterruptVector_text,
.Level3InterruptVector.text,
XCHAL_INTLEVEL3_VECTOR_VADDR,
INTLEVEL3_VECTOR_VADDR,
SIZEOF(LAST), LAST)
# undef LAST
# define LAST .Level3InterruptVector.text
......@@ -218,7 +219,7 @@ SECTIONS
#if XCHAL_EXCM_LEVEL >= 4
SECTION_VECTOR (_Level4InterruptVector_text,
.Level4InterruptVector.text,
XCHAL_INTLEVEL4_VECTOR_VADDR,
INTLEVEL4_VECTOR_VADDR,
SIZEOF(LAST), LAST)
# undef LAST
# define LAST .Level4InterruptVector.text
......@@ -226,7 +227,7 @@ SECTIONS
#if XCHAL_EXCM_LEVEL >= 5
SECTION_VECTOR (_Level5InterruptVector_text,
.Level5InterruptVector.text,
XCHAL_INTLEVEL5_VECTOR_VADDR,
INTLEVEL5_VECTOR_VADDR,
SIZEOF(LAST), LAST)
# undef LAST
# define LAST .Level5InterruptVector.text
......@@ -234,39 +235,39 @@ SECTIONS
#if XCHAL_EXCM_LEVEL >= 6
SECTION_VECTOR (_Level6InterruptVector_text,
.Level6InterruptVector.text,
XCHAL_INTLEVEL6_VECTOR_VADDR,
INTLEVEL6_VECTOR_VADDR,
SIZEOF(LAST), LAST)
# undef LAST
# define LAST .Level6InterruptVector.text
#endif
SECTION_VECTOR (_KernelExceptionVector_literal,
.KernelExceptionVector.literal,
XCHAL_KERNEL_VECTOR_VADDR - 4,
KERNEL_VECTOR_VADDR - 4,
SIZEOF(LAST), LAST)
#undef LAST
SECTION_VECTOR (_KernelExceptionVector_text,
.KernelExceptionVector.text,
XCHAL_KERNEL_VECTOR_VADDR,
KERNEL_VECTOR_VADDR,
4,
.KernelExceptionVector.literal)
SECTION_VECTOR (_UserExceptionVector_literal,
.UserExceptionVector.literal,
XCHAL_USER_VECTOR_VADDR - 4,
USER_VECTOR_VADDR - 4,
SIZEOF(.KernelExceptionVector.text),
.KernelExceptionVector.text)
SECTION_VECTOR (_UserExceptionVector_text,
.UserExceptionVector.text,
XCHAL_USER_VECTOR_VADDR,
USER_VECTOR_VADDR,
4,
.UserExceptionVector.literal)
SECTION_VECTOR (_DoubleExceptionVector_literal,
.DoubleExceptionVector.literal,
XCHAL_DOUBLEEXC_VECTOR_VADDR - 16,
DOUBLEEXC_VECTOR_VADDR - 16,
SIZEOF(.UserExceptionVector.text),
.UserExceptionVector.text)
SECTION_VECTOR (_DoubleExceptionVector_text,
.DoubleExceptionVector.text,
XCHAL_DOUBLEEXC_VECTOR_VADDR,
DOUBLEEXC_VECTOR_VADDR,
32,
.DoubleExceptionVector.literal)
......@@ -284,11 +285,26 @@ SECTIONS
. = ALIGN(0x10);
.bootstrap : { *(.bootstrap.literal .bootstrap.text .bootstrap.data) }
.ResetVector.text XCHAL_RESET_VECTOR_VADDR :
.ResetVector.text RESET_VECTOR_VADDR :
{
*(.ResetVector.text)
}
/*
* This is a remapped copy of the Secondary Reset Vector Code.
* It keeps gdb in sync with the PC after switching
* to the temporary mapping used while setting up
* the V2 MMU mappings for Linux.
*
* Only debug information about this section is put in the kernel image.
*/
.SecondaryResetVector.remapped_text 0x46000000 (INFO):
{
*(.SecondaryResetVector.remapped_text)
}
.xt.lit : { *(.xt.lit) }
.xt.prop : { *(.xt.prop) }
......
......@@ -119,3 +119,8 @@ EXPORT_SYMBOL(outsl);
EXPORT_SYMBOL(insb);
EXPORT_SYMBOL(insw);
EXPORT_SYMBOL(insl);
extern long common_exception_return;
extern long _spill_registers;
EXPORT_SYMBOL(common_exception_return);
EXPORT_SYMBOL(_spill_registers);
......@@ -24,15 +24,19 @@ void __init paging_init(void)
*/
void __init init_mmu(void)
{
/* Writing zeros to the <t>TLBCFG special registers ensure
* that valid values exist in the register. For existing
* PGSZID<w> fields, zero selects the first element of the
* page-size array. For nonexistent PGSZID<w> fields, zero is
* the best value to write. Also, when changing PGSZID<w>
#if !(XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY)
/*
* Writing zeros to the instruction and data TLBCFG special
* registers ensure that valid values exist in the register.
*
* For existing PGSZID<w> fields, zero selects the first element
* of the page-size array. For nonexistent PGSZID<w> fields,
* zero is the best value to write. Also, when changing PGSZID<w>
* fields, the corresponding TLB must be flushed.
*/
set_itlbcfg_register(0);
set_dtlbcfg_register(0);
#endif
flush_tlb_all();
/* Set rasid register to a known value. */
......
......@@ -132,9 +132,7 @@ static void xtensa_backtrace_kernel(struct pt_regs *regs, unsigned int depth)
pc = MAKE_PC_FROM_RA(a0, pc);
/* Add the PC to the trace. */
if (kernel_text_address(pc))
oprofile_add_trace(pc);
oprofile_add_trace(pc);
if (pc == (unsigned long) &common_exception_return) {
regs = (struct pt_regs *)a1;
if (user_mode(regs)) {
......
......@@ -56,13 +56,13 @@ static void rs_poll(unsigned long);
static int rs_open(struct tty_struct *tty, struct file * filp)
{
tty->port = &serial_port;
spin_lock(&timer_lock);
spin_lock_bh(&timer_lock);
if (tty->count == 1) {
setup_timer(&serial_timer, rs_poll,
(unsigned long)&serial_port);
mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
}
spin_unlock(&timer_lock);
spin_unlock_bh(&timer_lock);
return 0;
}
......@@ -99,14 +99,13 @@ static int rs_write(struct tty_struct * tty,
static void rs_poll(unsigned long priv)
{
struct tty_port *port = (struct tty_port *)priv;
struct timeval tv = { .tv_sec = 0, .tv_usec = 0 };
int i = 0;
unsigned char c;
spin_lock(&timer_lock);
while (__simc(SYS_select_one, 0, XTISS_SELECT_ONE_READ, (int)&tv,0,0)){
__simc (SYS_read, 0, (unsigned long)&c, 1, 0, 0);
while (simc_poll(0)) {
simc_read(0, &c, 1);
tty_insert_flip_char(port, c, TTY_NORMAL);
i++;
}
......@@ -244,8 +243,7 @@ static void iss_console_write(struct console *co, const char *s, unsigned count)
int len = strlen(s);
if (s != 0 && *s != 0)
__simc (SYS_write, 1, (unsigned long)s,
count < len ? count : len,0,0);
simc_write(1, s, count < len ? count : len);
}
static struct tty_driver* iss_console_device(struct console *c, int *index)
......
......@@ -59,56 +59,58 @@
static int errno;
static inline int __simc(int a, int b, int c, int d, int e, int f)
static inline int __simc(int a, int b, int c, int d)
{
int ret;
register int a1 asm("a2") = a;
register int b1 asm("a3") = b;
register int c1 asm("a4") = c;
register int d1 asm("a5") = d;
register int e1 asm("a6") = e;
register int f1 asm("a7") = f;
__asm__ __volatile__ (
"simcall\n"
"mov %0, a2\n"
"mov %1, a3\n"
: "=a" (ret), "=a" (errno), "+r"(a1), "+r"(b1)
: "r"(c1), "r"(d1), "r"(e1), "r"(f1)
: "r"(c1), "r"(d1)
: "memory");
return ret;
}
static inline int simc_open(const char *file, int flags, int mode)
{
return __simc(SYS_open, (int) file, flags, mode, 0, 0);
return __simc(SYS_open, (int) file, flags, mode);
}
static inline int simc_close(int fd)
{
return __simc(SYS_close, fd, 0, 0, 0, 0);
return __simc(SYS_close, fd, 0, 0);
}
static inline int simc_ioctl(int fd, int request, void *arg)
{
return __simc(SYS_ioctl, fd, request, (int) arg, 0, 0);
return __simc(SYS_ioctl, fd, request, (int) arg);
}
static inline int simc_read(int fd, void *buf, size_t count)
{
return __simc(SYS_read, fd, (int) buf, count, 0, 0);
return __simc(SYS_read, fd, (int) buf, count);
}
static inline int simc_write(int fd, const void *buf, size_t count)
{
return __simc(SYS_write, fd, (int) buf, count, 0, 0);
return __simc(SYS_write, fd, (int) buf, count);
}
static inline int simc_poll(int fd)
{
struct timeval tv = { .tv_sec = 0, .tv_usec = 0 };
return __simc(SYS_select_one, fd, XTISS_SELECT_ONE_READ, (int)&tv,
0, 0);
return __simc(SYS_select_one, fd, XTISS_SELECT_ONE_READ, (int)&tv);
}
static inline int simc_lseek(int fd, uint32_t off, int whence)
{
return __simc(SYS_lseek, fd, off, whence);
}
#endif /* _XTENSA_PLATFORM_ISS_SIMCALL_H */
......
......@@ -38,12 +38,6 @@ void __init platform_init(bp_tag_t* bootparam)
}
#ifdef CONFIG_PCI
void platform_pcibios_init(void)
{
}
#endif
void platform_halt(void)
{
pr_info(" ** Called platform_halt() **\n");
......@@ -64,7 +58,9 @@ void platform_restart(void)
"wsr a2, icountlevel\n\t"
"movi a2, 0\n\t"
"wsr a2, icount\n\t"
#if XCHAL_NUM_IBREAK > 0
"wsr a2, ibreakenable\n\t"
#endif
"wsr a2, lcount\n\t"
"movi a2, 0x1f\n\t"
"wsr a2, ps\n\t"
......
......@@ -85,7 +85,7 @@ static void simdisk_transfer(struct simdisk *dev, unsigned long sector,
while (nbytes > 0) {
unsigned long io;
__simc(SYS_lseek, dev->fd, offset, SEEK_SET, 0, 0);
simc_lseek(dev->fd, offset, SEEK_SET);
if (write)
io = simc_write(dev->fd, buffer, nbytes);
else
......@@ -176,7 +176,7 @@ static int simdisk_attach(struct simdisk *dev, const char *filename)
err = -ENODEV;
goto out;
}
dev->size = __simc(SYS_lseek, dev->fd, 0, SEEK_END, 0, 0);
dev->size = simc_lseek(dev->fd, 0, SEEK_END);
set_capacity(dev->gd, dev->size >> SECTOR_SHIFT);
dev->filename = filename;
pr_info("SIMDISK: %s=%s\n", dev->gd->disk_name, dev->filename);
......
......@@ -69,7 +69,9 @@ void platform_restart(void)
"wsr a2, icountlevel\n\t"
"movi a2, 0\n\t"
"wsr a2, icount\n\t"
#if XCHAL_NUM_IBREAK > 0
"wsr a2, ibreakenable\n\t"
#endif
"wsr a2, lcount\n\t"
"movi a2, 0x1f\n\t"
"wsr a2, ps\n\t"
......
......@@ -60,7 +60,9 @@ void platform_restart(void)
"wsr a2, icountlevel\n\t"
"movi a2, 0\n\t"
"wsr a2, icount\n\t"
#if XCHAL_NUM_IBREAK > 0
"wsr a2, ibreakenable\n\t"
#endif
"wsr a2, lcount\n\t"
"movi a2, 0x1f\n\t"
"wsr a2, ps\n\t"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment