Commit 9b83d851 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'xtensa-next-20140123' of git://github.com/czankel/xtensa-linux

Pull Xtensa patches from Chris Zankel:
 "The major changes are adding support for SMP for Xtensa, fixing and
  cleaning up the ISS (simulator) network driver, and better support for
  device trees"

* tag 'xtensa-next-20140123' of git://github.com/czankel/xtensa-linux: (40 commits)
  xtensa: implement ndelay
  xtensa: clean up udelay
  xtensa: enable HAVE_PERF_EVENTS
  xtensa: remap io area defined in device tree
  xtensa: support default device tree buses
  xtensa: initialize device tree clock sources
  xtensa: xtfpga: fix definitions of platform devices
  xtensa: standardize devicetree cpu compatible strings
  xtensa: avoid duplicate of IO range definitions
  xtensa: fix ATOMCTL register documentation
  xtensa: Enable irqs after cpu is set online
  xtensa: ISS: raise network polling rate to 10 times/sec
  xtensa: remove unused XTENSA_ISS_NETWORK Kconfig parameter
  xtensa: ISS: avoid simple_strtoul usage
  xtensa: Switch to sched_clock_register()
  xtensa: implement CPU hotplug
  xtensa: add SMP support
  xtensa: add MX irqchip
  xtensa: clear timer IRQ unconditionally in its handler
  xtensa: clean up do_interrupt/do_IRQ
  ...
parents 2d08cd0e 9ed82c68
...@@ -40,5 +40,5 @@ See Section 4.3.12.4 of ISA; Bits: ...@@ -40,5 +40,5 @@ See Section 4.3.12.4 of ISA; Bits:
--------- --------------- ----------------- ---------------- --------- --------------- ----------------- ----------------
0 Exception Exception Exception 0 Exception Exception Exception
1 RCW Transaction RCW Transaction RCW Transaction 1 RCW Transaction RCW Transaction RCW Transaction
2 Internal Operation Exception Reserved 2 Internal Operation Internal Operation Reserved
3 Reserved Reserved Reserved 3 Reserved Reserved Reserved
...@@ -44,3 +44,21 @@ After step 4, we jump to intended (linked) address of this code. ...@@ -44,3 +44,21 @@ After step 4, we jump to intended (linked) address of this code.
40..5F -> 40 40..5F -> pc -> pc 40..5F -> pc 40..5F -> 40 40..5F -> pc -> pc 40..5F -> pc
20..3F -> 20 -> 20 20..3F -> 20 20..3F -> 20 -> 20 20..3F -> 20
00..1F -> 00 -> 00 00..1F -> 00 00..1F -> 00 -> 00 00..1F -> 00
The default location of IO peripherals is above 0xf0000000. This may change
using a "ranges" property in a device tree simple-bus node. See ePAPR 1.1, §6.5
for details on the syntax and semantic of simple-bus nodes. The following
limitations apply:
1. Only top level simple-bus nodes are considered
2. Only one (first) simple-bus node is considered
3. Empty "ranges" properties are not supported
4. Only the first triplet in the "ranges" property is considered
5. The parent-bus-address value is rounded down to the nearest 256MB boundary
6. The IO area covers the entire 256MB segment of parent-bus-address; the
"ranges" triplet length field is ignored
...@@ -9,7 +9,6 @@ config XTENSA ...@@ -9,7 +9,6 @@ config XTENSA
select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS
select VIRT_TO_BUS select VIRT_TO_BUS
select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW
select GENERIC_CPU_DEVICES
select GENERIC_SCHED_CLOCK select GENERIC_SCHED_CLOCK
select MODULES_USE_ELF_RELA select MODULES_USE_ELF_RELA
select GENERIC_PCI_IOMAP select GENERIC_PCI_IOMAP
...@@ -19,6 +18,8 @@ config XTENSA ...@@ -19,6 +18,8 @@ config XTENSA
select IRQ_DOMAIN select IRQ_DOMAIN
select HAVE_OPROFILE select HAVE_OPROFILE
select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_TRACER
select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_PERF_EVENTS
help help
Xtensa processors are 32-bit RISC machines designed by Tensilica Xtensa processors are 32-bit RISC machines designed by Tensilica
primarily for embedded systems. These processors are both primarily for embedded systems. These processors are both
...@@ -67,6 +68,9 @@ config VARIANT_IRQ_SWITCH ...@@ -67,6 +68,9 @@ config VARIANT_IRQ_SWITCH
config HAVE_XTENSA_GPIO32 config HAVE_XTENSA_GPIO32
def_bool n def_bool n
config MAY_HAVE_SMP
def_bool n
menu "Processor type and features" menu "Processor type and features"
choice choice
...@@ -110,6 +114,48 @@ config XTENSA_UNALIGNED_USER ...@@ -110,6 +114,48 @@ config XTENSA_UNALIGNED_USER
source "kernel/Kconfig.preempt" source "kernel/Kconfig.preempt"
config HAVE_SMP
bool "System Supports SMP (MX)"
depends on MAY_HAVE_SMP
select XTENSA_MX
help
This option is use to indicate that the system-on-a-chip (SOC)
supports Multiprocessing. Multiprocessor support implemented above
the CPU core definition and currently needs to be selected manually.
Multiprocessor support in implemented with external cache and
interrupt controlers.
The MX interrupt distributer adds Interprocessor Interrupts
and causes the IRQ numbers to be increased by 4 for devices
like the open cores ethernet driver and the serial interface.
You still have to select "Enable SMP" to enable SMP on this SOC.
config SMP
bool "Enable Symmetric multi-processing support"
depends on HAVE_SMP
select USE_GENERIC_SMP_HELPERS
select GENERIC_SMP_IDLE_THREAD
help
Enabled SMP Software; allows more than one CPU/CORE
to be activated during startup.
config NR_CPUS
depends on SMP
int "Maximum number of CPUs (2-32)"
range 2 32
default "4"
config HOTPLUG_CPU
bool "Enable CPU hotplug support"
depends on SMP
help
Say Y here to allow turning CPUs off and on. CPUs can be
controlled through /sys/devices/system/cpu.
Say N if you want to disable CPU hotplug.
config MATH_EMULATION config MATH_EMULATION
bool "Math emulation" bool "Math emulation"
help help
...@@ -156,9 +202,6 @@ config XTENSA_CALIBRATE_CCOUNT ...@@ -156,9 +202,6 @@ config XTENSA_CALIBRATE_CCOUNT
config SERIAL_CONSOLE config SERIAL_CONSOLE
def_bool n def_bool n
config XTENSA_ISS_NETWORK
def_bool n
menu "Bus options" menu "Bus options"
config PCI config PCI
...@@ -185,7 +228,6 @@ config XTENSA_PLATFORM_ISS ...@@ -185,7 +228,6 @@ config XTENSA_PLATFORM_ISS
depends on TTY depends on TTY
select XTENSA_CALIBRATE_CCOUNT select XTENSA_CALIBRATE_CCOUNT
select SERIAL_CONSOLE select SERIAL_CONSOLE
select XTENSA_ISS_NETWORK
help help
ISS is an acronym for Tensilica's Instruction Set Simulator. ISS is an acronym for Tensilica's Instruction Set Simulator.
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
/include/ "xtfpga-flash-4m.dtsi" /include/ "xtfpga-flash-4m.dtsi"
/ { / {
compatible = "xtensa,lx60"; compatible = "cdns,xtensa-lx60";
memory@0 { memory@0 {
device_type = "memory"; device_type = "memory";
reg = <0x00000000 0x04000000>; reg = <0x00000000 0x04000000>;
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
/include/ "xtfpga-flash-16m.dtsi" /include/ "xtfpga-flash-16m.dtsi"
/ { / {
compatible = "xtensa,ml605"; compatible = "cdns,xtensa-ml605";
memory@0 { memory@0 {
device_type = "memory"; device_type = "memory";
reg = <0x00000000 0x08000000>; reg = <0x00000000 0x08000000>;
......
/ { / {
compatible = "xtensa,xtfpga"; compatible = "cdns,xtensa-xtfpga";
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;
interrupt-parent = <&pic>; interrupt-parent = <&pic>;
...@@ -17,7 +17,7 @@ cpus { ...@@ -17,7 +17,7 @@ cpus {
#address-cells = <1>; #address-cells = <1>;
#size-cells = <0>; #size-cells = <0>;
cpu@0 { cpu@0 {
compatible = "xtensa,cpu"; compatible = "cdns,xtensa-cpu";
reg = <0>; reg = <0>;
/* Filled in by platform_setup from FPGA register /* Filled in by platform_setup from FPGA register
* clock-frequency = <100000000>; * clock-frequency = <100000000>;
...@@ -26,7 +26,7 @@ cpu@0 { ...@@ -26,7 +26,7 @@ cpu@0 {
}; };
pic: pic { pic: pic {
compatible = "xtensa,pic"; compatible = "cdns,xtensa-pic";
/* one cell: internal irq number, /* one cell: internal irq number,
* two cells: second cell == 0: internal irq number * two cells: second cell == 0: internal irq number
* second cell == 1: external irq number * second cell == 1: external irq number
......
...@@ -8,7 +8,6 @@ generic-y += emergency-restart.h ...@@ -8,7 +8,6 @@ generic-y += emergency-restart.h
generic-y += errno.h generic-y += errno.h
generic-y += exec.h generic-y += exec.h
generic-y += fcntl.h generic-y += fcntl.h
generic-y += futex.h
generic-y += hardirq.h generic-y += hardirq.h
generic-y += ioctl.h generic-y += ioctl.h
generic-y += irq_regs.h generic-y += irq_regs.h
......
...@@ -13,10 +13,6 @@ ...@@ -13,10 +13,6 @@
#define rmb() barrier() #define rmb() barrier()
#define wmb() mb() #define wmb() mb()
#ifdef CONFIG_SMP
#error smp_* not defined
#endif
#include <asm-generic/barrier.h> #include <asm-generic/barrier.h>
#endif /* _XTENSA_SYSTEM_H */ #endif /* _XTENSA_SYSTEM_H */
...@@ -22,12 +22,8 @@ ...@@ -22,12 +22,8 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
#ifdef CONFIG_SMP #define smp_mb__before_clear_bit() smp_mb()
# error SMP not supported on this architecture #define smp_mb__after_clear_bit() smp_mb()
#endif
#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()
#include <asm-generic/bitops/non-atomic.h> #include <asm-generic/bitops/non-atomic.h>
......
/* /*
* include/asm-xtensa/cacheflush.h
*
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* (C) 2001 - 2007 Tensilica Inc. * (C) 2001 - 2013 Tensilica Inc.
*/ */
#ifndef _XTENSA_CACHEFLUSH_H #ifndef _XTENSA_CACHEFLUSH_H
#define _XTENSA_CACHEFLUSH_H #define _XTENSA_CACHEFLUSH_H
#ifdef __KERNEL__
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/page.h> #include <asm/page.h>
...@@ -51,7 +47,6 @@ extern void __invalidate_icache_page(unsigned long); ...@@ -51,7 +47,6 @@ extern void __invalidate_icache_page(unsigned long);
extern void __invalidate_icache_range(unsigned long, unsigned long); extern void __invalidate_icache_range(unsigned long, unsigned long);
extern void __invalidate_dcache_range(unsigned long, unsigned long); extern void __invalidate_dcache_range(unsigned long, unsigned long);
#if XCHAL_DCACHE_IS_WRITEBACK #if XCHAL_DCACHE_IS_WRITEBACK
extern void __flush_invalidate_dcache_all(void); extern void __flush_invalidate_dcache_all(void);
extern void __flush_dcache_page(unsigned long); extern void __flush_dcache_page(unsigned long);
...@@ -87,9 +82,22 @@ static inline void __invalidate_icache_page_alias(unsigned long virt, ...@@ -87,9 +82,22 @@ static inline void __invalidate_icache_page_alias(unsigned long virt,
* (see also Documentation/cachetlb.txt) * (see also Documentation/cachetlb.txt)
*/ */
#if (DCACHE_WAY_SIZE > PAGE_SIZE) #if (DCACHE_WAY_SIZE > PAGE_SIZE) || defined(CONFIG_SMP)
#ifdef CONFIG_SMP
void flush_cache_all(void);
void flush_cache_range(struct vm_area_struct*, ulong, ulong);
void flush_icache_range(unsigned long start, unsigned long end);
void flush_cache_page(struct vm_area_struct*,
unsigned long, unsigned long);
#else
#define flush_cache_all local_flush_cache_all
#define flush_cache_range local_flush_cache_range
#define flush_icache_range local_flush_icache_range
#define flush_cache_page local_flush_cache_page
#endif
#define flush_cache_all() \ #define local_flush_cache_all() \
do { \ do { \
__flush_invalidate_dcache_all(); \ __flush_invalidate_dcache_all(); \
__invalidate_icache_all(); \ __invalidate_icache_all(); \
...@@ -103,9 +111,11 @@ static inline void __invalidate_icache_page_alias(unsigned long virt, ...@@ -103,9 +111,11 @@ static inline void __invalidate_icache_page_alias(unsigned long virt,
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page*); extern void flush_dcache_page(struct page*);
extern void flush_cache_range(struct vm_area_struct*, ulong, ulong);
extern void flush_cache_page(struct vm_area_struct*, void local_flush_cache_range(struct vm_area_struct *vma,
unsigned long, unsigned long); unsigned long start, unsigned long end);
void local_flush_cache_page(struct vm_area_struct *vma,
unsigned long address, unsigned long pfn);
#else #else
...@@ -119,13 +129,14 @@ extern void flush_cache_page(struct vm_area_struct*, ...@@ -119,13 +129,14 @@ extern void flush_cache_page(struct vm_area_struct*,
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
#define flush_dcache_page(page) do { } while (0) #define flush_dcache_page(page) do { } while (0)
#define flush_cache_page(vma,addr,pfn) do { } while (0) #define flush_icache_range local_flush_icache_range
#define flush_cache_range(vma,start,end) do { } while (0) #define flush_cache_page(vma, addr, pfn) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#endif #endif
/* Ensure consistency between data and instruction cache. */ /* Ensure consistency between data and instruction cache. */
#define flush_icache_range(start,end) \ #define local_flush_icache_range(start, end) \
do { \ do { \
__flush_dcache_range(start, (end) - (start)); \ __flush_dcache_range(start, (end) - (start)); \
__invalidate_icache_range(start,(end) - (start)); \ __invalidate_icache_range(start,(end) - (start)); \
...@@ -253,5 +264,4 @@ static inline void flush_invalidate_dcache_unaligned(u32 addr, u32 size) ...@@ -253,5 +264,4 @@ static inline void flush_invalidate_dcache_unaligned(u32 addr, u32 size)
} }
} }
#endif /* __KERNEL__ */
#endif /* _XTENSA_CACHEFLUSH_H */ #endif /* _XTENSA_CACHEFLUSH_H */
...@@ -19,23 +19,57 @@ extern unsigned long loops_per_jiffy; ...@@ -19,23 +19,57 @@ extern unsigned long loops_per_jiffy;
static inline void __delay(unsigned long loops) static inline void __delay(unsigned long loops)
{ {
/* 2 cycles per loop. */ if (__builtin_constant_p(loops) && loops < 2)
__asm__ __volatile__ ("1: addi %0, %0, -2; bgeui %0, 2, 1b" __asm__ __volatile__ ("nop");
: "=r" (loops) : "0" (loops)); else if (loops >= 2)
/* 2 cycles per loop. */
__asm__ __volatile__ ("1: addi %0, %0, -2; bgeui %0, 2, 1b"
: "+r" (loops));
} }
/* For SMP/NUMA systems, change boot_cpu_data to something like /* Undefined function to get compile-time error */
* local_cpu_data->... where local_cpu_data points to the current void __bad_udelay(void);
* cpu. */ void __bad_ndelay(void);
static __inline__ void udelay (unsigned long usecs) #define __MAX_UDELAY 30000
#define __MAX_NDELAY 30000
static inline void __udelay(unsigned long usecs)
{ {
unsigned long start = get_ccount(); unsigned long start = get_ccount();
unsigned long cycles = usecs * (loops_per_jiffy / (1000000UL / HZ)); unsigned long cycles = (usecs * (ccount_freq >> 15)) >> 5;
/* Note: all variables are unsigned (can wrap around)! */ /* Note: all variables are unsigned (can wrap around)! */
while (((unsigned long)get_ccount()) - start < cycles) while (((unsigned long)get_ccount()) - start < cycles)
; cpu_relax();
}
static inline void udelay(unsigned long usec)
{
if (__builtin_constant_p(usec) && usec >= __MAX_UDELAY)
__bad_udelay();
else
__udelay(usec);
}
static inline void __ndelay(unsigned long nsec)
{
/*
* Inner shift makes sure multiplication doesn't overflow
* for legitimate nsec values
*/
unsigned long cycles = (nsec * (ccount_freq >> 15)) >> 15;
__delay(cycles);
}
#define ndelay(n) ndelay(n)
static inline void ndelay(unsigned long nsec)
{
if (__builtin_constant_p(nsec) && nsec >= __MAX_NDELAY)
__bad_ndelay();
else
__ndelay(nsec);
} }
#endif #endif
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
__asm__ __volatile__ ( \ __asm__ __volatile__ ( \
"mov %0, a0\n" \ "mov %0, a0\n" \
"mov %1, a1\n" \ "mov %1, a1\n" \
: "=r"(a0), "=r"(a1) : : ); \ : "=r"(a0), "=r"(a1)); \
MAKE_PC_FROM_RA(a0, a1); }) MAKE_PC_FROM_RA(a0, a1); })
#ifdef CONFIG_FRAME_POINTER #ifdef CONFIG_FRAME_POINTER
extern unsigned long return_address(unsigned level); extern unsigned long return_address(unsigned level);
......
/*
* Atomic futex routines
*
* Based on the PowerPC implementataion
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Copyright (C) 2013 TangoTec Ltd.
*
* Baruch Siach <baruch@tkos.co.il>
*/
#ifndef _ASM_XTENSA_FUTEX_H
#define _ASM_XTENSA_FUTEX_H
#ifdef __KERNEL__
#include <linux/futex.h>
#include <linux/uaccess.h>
#include <linux/errno.h>
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
__asm__ __volatile( \
"1: l32i %0, %2, 0\n" \
insn "\n" \
" wsr %0, scompare1\n" \
"2: s32c1i %1, %2, 0\n" \
" bne %1, %0, 1b\n" \
" movi %1, 0\n" \
"3:\n" \
" .section .fixup,\"ax\"\n" \
" .align 4\n" \
"4: .long 3b\n" \
"5: l32r %0, 4b\n" \
" movi %1, %3\n" \
" jx %0\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .long 1b,5b,2b,5b\n" \
" .previous\n" \
: "=&r" (oldval), "=&r" (ret) \
: "r" (uaddr), "I" (-EFAULT), "r" (oparg) \
: "memory")
static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
int oparg = (encoded_op << 8) >> 20;
int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
#if !XCHAL_HAVE_S32C1I
return -ENOSYS;
#endif
pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
__futex_atomic_op("mov %1, %4", ret, oldval, uaddr, oparg);
break;
case FUTEX_OP_ADD:
__futex_atomic_op("add %1, %0, %4", ret, oldval, uaddr,
oparg);
break;
case FUTEX_OP_OR:
__futex_atomic_op("or %1, %0, %4", ret, oldval, uaddr,
oparg);
break;
case FUTEX_OP_ANDN:
__futex_atomic_op("and %1, %0, %4", ret, oldval, uaddr,
~oparg);
break;
case FUTEX_OP_XOR:
__futex_atomic_op("xor %1, %0, %4", ret, oldval, uaddr,
oparg);
break;
default:
ret = -ENOSYS;
}
pagefault_enable();
if (ret)
return ret;
switch (cmp) {
case FUTEX_OP_CMP_EQ: return (oldval == cmparg);
case FUTEX_OP_CMP_NE: return (oldval != cmparg);
case FUTEX_OP_CMP_LT: return (oldval < cmparg);
case FUTEX_OP_CMP_GE: return (oldval >= cmparg);
case FUTEX_OP_CMP_LE: return (oldval <= cmparg);
case FUTEX_OP_CMP_GT: return (oldval > cmparg);
}
return -ENOSYS;
}
static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
int ret = 0;
u32 prev;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
#if !XCHAL_HAVE_S32C1I
return -ENOSYS;
#endif
__asm__ __volatile__ (
" # futex_atomic_cmpxchg_inatomic\n"
"1: l32i %1, %3, 0\n"
" mov %0, %5\n"
" wsr %1, scompare1\n"
"2: s32c1i %0, %3, 0\n"
"3:\n"
" .section .fixup,\"ax\"\n"
" .align 4\n"
"4: .long 3b\n"
"5: l32r %1, 4b\n"
" movi %0, %6\n"
" jx %1\n"
" .previous\n"
" .section __ex_table,\"a\"\n"
" .long 1b,5b,2b,5b\n"
" .previous\n"
: "+r" (ret), "=&r" (prev), "+m" (*uaddr)
: "r" (uaddr), "r" (oldval), "r" (newval), "I" (-EFAULT)
: "memory");
*uval = prev;
return ret;
}
#endif /* __KERNEL__ */
#endif /* _ASM_XTENSA_FUTEX_H */
...@@ -26,6 +26,9 @@ ...@@ -26,6 +26,9 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/vectors.h> #include <asm/vectors.h>
#define CA_BYPASS (_PAGE_CA_BYPASS | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
#define CA_WRITEBACK (_PAGE_CA_WB | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
#ifdef __ASSEMBLY__ #ifdef __ASSEMBLY__
#define XTENSA_HWVERSION_RC_2009_0 230000 #define XTENSA_HWVERSION_RC_2009_0 230000
...@@ -80,8 +83,6 @@ ...@@ -80,8 +83,6 @@
/* Step 2: map 0x40000000..0x47FFFFFF to paddr containing this code /* Step 2: map 0x40000000..0x47FFFFFF to paddr containing this code
* and jump to the new mapping. * and jump to the new mapping.
*/ */
#define CA_BYPASS (_PAGE_CA_BYPASS | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
#define CA_WRITEBACK (_PAGE_CA_WB | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
srli a3, a0, 27 srli a3, a0, 27
slli a3, a3, 27 slli a3, a3, 27
...@@ -123,13 +124,13 @@ ...@@ -123,13 +124,13 @@
wdtlb a4, a5 wdtlb a4, a5
witlb a4, a5 witlb a4, a5
movi a5, 0xe0000006 movi a5, XCHAL_KIO_CACHED_VADDR + 6
movi a4, 0xf0000000 + CA_WRITEBACK movi a4, XCHAL_KIO_DEFAULT_PADDR + CA_WRITEBACK
wdtlb a4, a5 wdtlb a4, a5
witlb a4, a5 witlb a4, a5
movi a5, 0xf0000006 movi a5, XCHAL_KIO_BYPASS_VADDR + 6
movi a4, 0xf0000000 + CA_BYPASS movi a4, XCHAL_KIO_DEFAULT_PADDR + CA_BYPASS
wdtlb a4, a5 wdtlb a4, a5
witlb a4, a5 witlb a4, a5
......
...@@ -14,20 +14,26 @@ ...@@ -14,20 +14,26 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/vectors.h>
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/types.h> #include <linux/types.h>
#define XCHAL_KIO_CACHED_VADDR 0xe0000000
#define XCHAL_KIO_BYPASS_VADDR 0xf0000000
#define XCHAL_KIO_PADDR 0xf0000000
#define XCHAL_KIO_SIZE 0x10000000
#define IOADDR(x) (XCHAL_KIO_BYPASS_VADDR + (x)) #define IOADDR(x) (XCHAL_KIO_BYPASS_VADDR + (x))
#define IO_SPACE_LIMIT ~0 #define IO_SPACE_LIMIT ~0
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && CONFIG_OF
extern unsigned long xtensa_kio_paddr;
static inline unsigned long xtensa_get_kio_paddr(void)
{
return xtensa_kio_paddr;
}
#endif
/* /*
* Return the virtual address for the specified bus memory. * Return the virtual address for the specified bus memory.
* Note that we currently don't support any address outside the KIO segment. * Note that we currently don't support any address outside the KIO segment.
......
...@@ -43,5 +43,14 @@ static __inline__ int irq_canonicalize(int irq) ...@@ -43,5 +43,14 @@ static __inline__ int irq_canonicalize(int irq)
} }
struct irqaction; struct irqaction;
struct irq_domain;
void migrate_irqs(void);
int xtensa_irq_domain_xlate(const u32 *intspec, unsigned int intsize,
unsigned long int_irq, unsigned long ext_irq,
unsigned long *out_hwirq, unsigned int *out_type);
int xtensa_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw);
unsigned xtensa_map_ext_irq(unsigned ext_irq);
unsigned xtensa_get_ext_irq_no(unsigned irq);
#endif /* _XTENSA_IRQ_H */ #endif /* _XTENSA_IRQ_H */
/* /*
* include/asm-xtensa/mmu.h
*
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2001 - 2005 Tensilica Inc. * Copyright (C) 2001 - 2013 Tensilica Inc.
*/ */
#ifndef _XTENSA_MMU_H #ifndef _XTENSA_MMU_H
...@@ -15,8 +13,10 @@ ...@@ -15,8 +13,10 @@
#include <asm-generic/mmu.h> #include <asm-generic/mmu.h>
#else #else
/* Default "unsigned long" context */ typedef struct {
typedef unsigned long mm_context_t; unsigned long asid[NR_CPUS];
unsigned int cpu;
} mm_context_t;
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
#endif /* _XTENSA_MMU_H */ #endif /* _XTENSA_MMU_H */
/* /*
* include/asm-xtensa/mmu_context.h
*
* Switch an MMU context. * Switch an MMU context.
* *
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2001 - 2005 Tensilica Inc. * Copyright (C) 2001 - 2013 Tensilica Inc.
*/ */
#ifndef _XTENSA_MMU_CONTEXT_H #ifndef _XTENSA_MMU_CONTEXT_H
...@@ -20,22 +18,25 @@ ...@@ -20,22 +18,25 @@
#include <linux/stringify.h> #include <linux/stringify.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <variant/core.h> #include <asm/vectors.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm-generic/mm_hooks.h> #include <asm-generic/mm_hooks.h>
#include <asm-generic/percpu.h>
#if (XCHAL_HAVE_TLBS != 1) #if (XCHAL_HAVE_TLBS != 1)
# error "Linux must have an MMU!" # error "Linux must have an MMU!"
#endif #endif
extern unsigned long asid_cache; DECLARE_PER_CPU(unsigned long, asid_cache);
#define cpu_asid_cache(cpu) per_cpu(asid_cache, cpu)
/* /*
* NO_CONTEXT is the invalid ASID value that we don't ever assign to * NO_CONTEXT is the invalid ASID value that we don't ever assign to
* any user or kernel context. * any user or kernel context. We use the reserved values in the
* ASID_INSERT macro below.
* *
* 0 invalid * 0 invalid
* 1 kernel * 1 kernel
...@@ -49,6 +50,12 @@ extern unsigned long asid_cache; ...@@ -49,6 +50,12 @@ extern unsigned long asid_cache;
#define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1) #define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1)
#define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8)) #define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8))
#ifdef CONFIG_MMU
void init_mmu(void);
#else
static inline void init_mmu(void) { }
#endif
static inline void set_rasid_register (unsigned long val) static inline void set_rasid_register (unsigned long val)
{ {
__asm__ __volatile__ (" wsr %0, rasid\n\t" __asm__ __volatile__ (" wsr %0, rasid\n\t"
...@@ -62,64 +69,77 @@ static inline unsigned long get_rasid_register (void) ...@@ -62,64 +69,77 @@ static inline unsigned long get_rasid_register (void)
return tmp; return tmp;
} }
static inline void static inline void get_new_mmu_context(struct mm_struct *mm, unsigned int cpu)
__get_new_mmu_context(struct mm_struct *mm)
{ {
extern void flush_tlb_all(void); unsigned long asid = cpu_asid_cache(cpu);
if (! (++asid_cache & ASID_MASK) ) { if ((++asid & ASID_MASK) == 0) {
flush_tlb_all(); /* start new asid cycle */ /*
asid_cache += ASID_USER_FIRST; * Start new asid cycle; continue counting with next
* incarnation bits; skipping over 0, 1, 2, 3.
*/
local_flush_tlb_all();
asid += ASID_USER_FIRST;
} }
mm->context = asid_cache; cpu_asid_cache(cpu) = asid;
mm->context.asid[cpu] = asid;
mm->context.cpu = cpu;
} }
static inline void static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
__load_mmu_context(struct mm_struct *mm)
{ {
set_rasid_register(ASID_INSERT(mm->context)); /*
* Check if our ASID is of an older version and thus invalid.
*/
if (mm) {
unsigned long asid = mm->context.asid[cpu];
if (asid == NO_CONTEXT ||
((asid ^ cpu_asid_cache(cpu)) & ~ASID_MASK))
get_new_mmu_context(mm, cpu);
}
}
static inline void activate_context(struct mm_struct *mm, unsigned int cpu)
{
get_mmu_context(mm, cpu);
set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
invalidate_page_directory(); invalidate_page_directory();
} }
/* /*
* Initialize the context related info for a new mm_struct * Initialize the context related info for a new mm_struct
* instance. * instance. Valid cpu values are 0..(NR_CPUS-1), so initializing
* to -1 says the process has never run on any core.
*/ */
static inline int static inline int init_new_context(struct task_struct *tsk,
init_new_context(struct task_struct *tsk, struct mm_struct *mm) struct mm_struct *mm)
{ {
mm->context = NO_CONTEXT; int cpu;
for_each_possible_cpu(cpu) {
mm->context.asid[cpu] = NO_CONTEXT;
}
mm->context.cpu = -1;
return 0; return 0;
} }
/*
* After we have set current->mm to a new value, this activates
* the context for the new mm so we see the new mappings.
*/
static inline void
activate_mm(struct mm_struct *prev, struct mm_struct *next)
{
/* Unconditionally get a new ASID. */
__get_new_mmu_context(next);
__load_mmu_context(next);
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk) struct task_struct *tsk)
{ {
unsigned long asid = asid_cache; unsigned int cpu = smp_processor_id();
int migrated = next->context.cpu != cpu;
/* Check if our ASID is of an older version and thus invalid */ /* Flush the icache if we migrated to a new core. */
if (migrated) {
if (next->context == NO_CONTEXT || ((next->context^asid) & ~ASID_MASK)) __invalidate_icache_all();
__get_new_mmu_context(next); next->context.cpu = cpu;
}
__load_mmu_context(next); if (migrated || prev != next)
activate_context(next, cpu);
} }
#define deactivate_mm(tsk, mm) do { } while(0) #define activate_mm(prev, next) switch_mm((prev), (next), NULL)
#define deactivate_mm(tsk, mm) do { } while (0)
/* /*
* Destroy context related info for an mm_struct that is about * Destroy context related info for an mm_struct that is about
......
/*
* Xtensa MX interrupt distributor
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2008 - 2013 Tensilica Inc.
*/
#ifndef _XTENSA_MXREGS_H
#define _XTENSA_MXREGS_H
/*
* RER/WER at, as Read/write external register
* at: value
* as: address
*
* Address Value
* 00nn 0...0p..p Interrupt Routing, route IRQ n to processor p
* 01pp 0...0d..d 16 bits (d) 'ored' as single IPI to processor p
* 0180 0...0m..m Clear enable specified by mask (m)
* 0184 0...0m..m Set enable specified by mask (m)
* 0190 0...0x..x 8-bit IPI partition register
* VVVVVVVVPPPPUUUUUUUUUUUUUUUUU
* V (10-bit) Release/Version
* P ( 4-bit) Number of cores - 1
* U (18-bit) ID
* 01a0 i.......i 32-bit ConfigID
* 0200 0...0m..m RunStall core 'n'
* 0220 c Cache coherency enabled
*/
#define MIROUT(irq) (0x000 + (irq))
#define MIPICAUSE(cpu) (0x100 + (cpu))
#define MIPISET(cause) (0x140 + (cause))
#define MIENG 0x180
#define MIENGSET 0x184
#define MIASG 0x188 /* Read Global Assert Register */
#define MIASGSET 0x18c /* Set Global Addert Regiter */
#define MIPIPART 0x190
#define SYSCFGID 0x1a0
#define MPSCORE 0x200
#define CCON 0x220
#endif /* _XTENSA_MXREGS_H */
#ifndef __ASM_XTENSA_PERF_EVENT_H
#define __ASM_XTENSA_PERF_EVENT_H
#endif /* __ASM_XTENSA_PERF_EVENT_H */
...@@ -191,5 +191,25 @@ extern unsigned long get_wchan(struct task_struct *p); ...@@ -191,5 +191,25 @@ extern unsigned long get_wchan(struct task_struct *p);
#define set_sr(x,sr) ({unsigned int v=(unsigned int)x; WSR(v,sr);}) #define set_sr(x,sr) ({unsigned int v=(unsigned int)x; WSR(v,sr);})
#define get_sr(sr) ({unsigned int v; RSR(v,sr); v; }) #define get_sr(sr) ({unsigned int v; RSR(v,sr); v; })
#ifndef XCHAL_HAVE_EXTERN_REGS
#define XCHAL_HAVE_EXTERN_REGS 0
#endif
#if XCHAL_HAVE_EXTERN_REGS
static inline void set_er(unsigned long value, unsigned long addr)
{
asm volatile ("wer %0, %1" : : "a" (value), "a" (addr) : "memory");
}
static inline unsigned long get_er(unsigned long addr)
{
register unsigned long value;
asm volatile ("rer %0, %1" : "=a" (value) : "a" (addr) : "memory");
return value;
}
#endif /* XCHAL_HAVE_EXTERN_REGS */
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* _XTENSA_PROCESSOR_H */ #endif /* _XTENSA_PROCESSOR_H */
...@@ -59,9 +59,17 @@ struct pt_regs { ...@@ -59,9 +59,17 @@ struct pt_regs {
(task_stack_page(tsk) + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4) - 1) (task_stack_page(tsk) + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4) - 1)
# define user_mode(regs) (((regs)->ps & 0x00000020)!=0) # define user_mode(regs) (((regs)->ps & 0x00000020)!=0)
# define instruction_pointer(regs) ((regs)->pc) # define instruction_pointer(regs) ((regs)->pc)
# define return_pointer(regs) (MAKE_PC_FROM_RA((regs)->areg[0], \
(regs)->areg[1]))
# ifndef CONFIG_SMP # ifndef CONFIG_SMP
# define profile_pc(regs) instruction_pointer(regs) # define profile_pc(regs) instruction_pointer(regs)
# else
# define profile_pc(regs) \
({ \
in_lock_functions(instruction_pointer(regs)) ? \
return_pointer(regs) : instruction_pointer(regs); \
})
# endif # endif
#define user_stack_pointer(regs) ((regs)->areg[1]) #define user_stack_pointer(regs) ((regs)->areg[1])
......
/* /*
* include/asm-xtensa/smp.h
*
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2001 - 2005 Tensilica Inc. * Copyright (C) 2001 - 2013 Tensilica Inc.
*/ */
#ifndef _XTENSA_SMP_H #ifndef _XTENSA_SMP_H
#define _XTENSA_SMP_H #define _XTENSA_SMP_H
extern struct xtensa_cpuinfo boot_cpu_data; #ifdef CONFIG_SMP
#define cpu_data (&boot_cpu_data) #define raw_smp_processor_id() (current_thread_info()->cpu)
#define current_cpu_data boot_cpu_data #define cpu_logical_map(cpu) (cpu)
struct xtensa_cpuinfo { struct start_info {
unsigned long *pgd_cache; unsigned long stack;
unsigned long *pte_cache;
unsigned long pgtable_cache_sz;
}; };
extern struct start_info start_info;
#define cpu_logical_map(cpu) (cpu) struct cpumask;
void arch_send_call_function_ipi_mask(const struct cpumask *mask);
void arch_send_call_function_single_ipi(int cpu);
void smp_init_cpus(void);
void secondary_init_irq(void);
void ipi_init(void);
struct seq_file;
void show_ipi_list(struct seq_file *p, int prec);
#ifdef CONFIG_HOTPLUG_CPU
void __cpu_die(unsigned int cpu);
int __cpu_disable(void);
void cpu_die(void);
void cpu_restart(void);
#endif /* CONFIG_HOTPLUG_CPU */
#endif /* CONFIG_SMP */
#endif /* _XTENSA_SMP_H */ #endif /* _XTENSA_SMP_H */
...@@ -28,13 +28,13 @@ ...@@ -28,13 +28,13 @@
* 1 somebody owns the spinlock * 1 somebody owns the spinlock
*/ */
#define __raw_spin_is_locked(x) ((x)->slock != 0) #define arch_spin_is_locked(x) ((x)->slock != 0)
#define __raw_spin_unlock_wait(lock) \ #define arch_spin_unlock_wait(lock) \
do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
static inline void __raw_spin_lock(raw_spinlock_t *lock) static inline void arch_spin_lock(arch_spinlock_t *lock)
{ {
unsigned long tmp; unsigned long tmp;
...@@ -51,7 +51,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) ...@@ -51,7 +51,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
/* Returns 1 if the lock is obtained, 0 otherwise. */ /* Returns 1 if the lock is obtained, 0 otherwise. */
static inline int __raw_spin_trylock(raw_spinlock_t *lock) static inline int arch_spin_trylock(arch_spinlock_t *lock)
{ {
unsigned long tmp; unsigned long tmp;
...@@ -67,7 +67,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) ...@@ -67,7 +67,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
return tmp == 0 ? 1 : 0; return tmp == 0 ? 1 : 0;
} }
static inline void __raw_spin_unlock(raw_spinlock_t *lock) static inline void arch_spin_unlock(arch_spinlock_t *lock)
{ {
unsigned long tmp; unsigned long tmp;
...@@ -96,9 +96,9 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) ...@@ -96,9 +96,9 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
* 0x80000000 one writer owns the rwlock, no other writers, no readers * 0x80000000 one writer owns the rwlock, no other writers, no readers
*/ */
#define __raw_write_can_lock(x) ((x)->lock == 0) #define arch_write_can_lock(x) ((x)->lock == 0)
static inline void __raw_write_lock(raw_rwlock_t *rw) static inline void arch_write_lock(arch_rwlock_t *rw)
{ {
unsigned long tmp; unsigned long tmp;
...@@ -116,7 +116,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) ...@@ -116,7 +116,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
/* Returns 1 if the lock is obtained, 0 otherwise. */ /* Returns 1 if the lock is obtained, 0 otherwise. */
static inline int __raw_write_trylock(raw_rwlock_t *rw) static inline int arch_write_trylock(arch_rwlock_t *rw)
{ {
unsigned long tmp; unsigned long tmp;
...@@ -133,7 +133,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) ...@@ -133,7 +133,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
return tmp == 0 ? 1 : 0; return tmp == 0 ? 1 : 0;
} }
static inline void __raw_write_unlock(raw_rwlock_t *rw) static inline void arch_write_unlock(arch_rwlock_t *rw)
{ {
unsigned long tmp; unsigned long tmp;
...@@ -145,7 +145,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) ...@@ -145,7 +145,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
: "memory"); : "memory");
} }
static inline void __raw_read_lock(raw_rwlock_t *rw) static inline void arch_read_lock(arch_rwlock_t *rw)
{ {
unsigned long tmp; unsigned long tmp;
unsigned long result; unsigned long result;
...@@ -164,7 +164,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) ...@@ -164,7 +164,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
/* Returns 1 if the lock is obtained, 0 otherwise. */ /* Returns 1 if the lock is obtained, 0 otherwise. */
static inline int __raw_read_trylock(raw_rwlock_t *rw) static inline int arch_read_trylock(arch_rwlock_t *rw)
{ {
unsigned long result; unsigned long result;
unsigned long tmp; unsigned long tmp;
...@@ -184,7 +184,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) ...@@ -184,7 +184,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
return result == 0; return result == 0;
} }
static inline void __raw_read_unlock(raw_rwlock_t *rw) static inline void arch_read_unlock(arch_rwlock_t *rw)
{ {
unsigned long tmp1, tmp2; unsigned long tmp1, tmp2;
...@@ -199,4 +199,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) ...@@ -199,4 +199,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
: "memory"); : "memory");
} }
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
#endif /* _XTENSA_SPINLOCK_H */ #endif /* _XTENSA_SPINLOCK_H */
#ifndef __ASM_SPINLOCK_TYPES_H
#define __ASM_SPINLOCK_TYPES_H
#ifndef __LINUX_SPINLOCK_TYPES_H
# error "please don't include this file directly"
#endif
typedef struct {
volatile unsigned int slock;
} arch_spinlock_t;
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int lock;
} arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#endif
/* /*
* include/asm-xtensa/timex.h
*
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2001 - 2008 Tensilica Inc. * Copyright (C) 2001 - 2013 Tensilica Inc.
*/ */
#ifndef _XTENSA_TIMEX_H #ifndef _XTENSA_TIMEX_H
#define _XTENSA_TIMEX_H #define _XTENSA_TIMEX_H
#ifdef __KERNEL__
#include <asm/processor.h> #include <asm/processor.h>
#include <linux/stringify.h> #include <linux/stringify.h>
...@@ -39,14 +35,9 @@ extern unsigned long ccount_freq; ...@@ -39,14 +35,9 @@ extern unsigned long ccount_freq;
typedef unsigned long long cycles_t; typedef unsigned long long cycles_t;
/*
* Only used for SMP.
*/
extern cycles_t cacheflush_time;
#define get_cycles() (0) #define get_cycles() (0)
void local_timer_setup(unsigned cpu);
/* /*
* Register access. * Register access.
...@@ -81,5 +72,4 @@ static inline void set_linux_timer (unsigned long ccompare) ...@@ -81,5 +72,4 @@ static inline void set_linux_timer (unsigned long ccompare)
WSR_CCOMPARE(LINUX_TIMER, ccompare); WSR_CCOMPARE(LINUX_TIMER, ccompare);
} }
#endif /* __KERNEL__ */
#endif /* _XTENSA_TIMEX_H */ #endif /* _XTENSA_TIMEX_H */
/* /*
* include/asm-xtensa/tlbflush.h
*
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2001 - 2005 Tensilica Inc. * Copyright (C) 2001 - 2013 Tensilica Inc.
*/ */
#ifndef _XTENSA_TLBFLUSH_H #ifndef _XTENSA_TLBFLUSH_H
#define _XTENSA_TLBFLUSH_H #define _XTENSA_TLBFLUSH_H
#ifdef __KERNEL__
#include <linux/stringify.h> #include <linux/stringify.h>
#include <asm/processor.h> #include <asm/processor.h>
...@@ -34,12 +30,37 @@ ...@@ -34,12 +30,37 @@
* - flush_tlb_range(mm, start, end) flushes a range of pages * - flush_tlb_range(mm, start, end) flushes a range of pages
*/ */
extern void flush_tlb_all(void); void local_flush_tlb_all(void);
extern void flush_tlb_mm(struct mm_struct*); void local_flush_tlb_mm(struct mm_struct *mm);
extern void flush_tlb_page(struct vm_area_struct*,unsigned long); void local_flush_tlb_page(struct vm_area_struct *vma,
extern void flush_tlb_range(struct vm_area_struct*,unsigned long,unsigned long); unsigned long page);
void local_flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
#ifdef CONFIG_SMP
void flush_tlb_all(void);
void flush_tlb_mm(struct mm_struct *);
void flush_tlb_page(struct vm_area_struct *, unsigned long);
void flush_tlb_range(struct vm_area_struct *, unsigned long,
unsigned long);
static inline void flush_tlb_kernel_range(unsigned long start,
unsigned long end)
{
flush_tlb_all();
}
#else /* !CONFIG_SMP */
#define flush_tlb_all() local_flush_tlb_all()
#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
#define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, \
end)
#define flush_tlb_kernel_range(start, end) local_flush_tlb_all()
#define flush_tlb_kernel_range(start,end) flush_tlb_all() #endif /* CONFIG_SMP */
/* TLB operations. */ /* TLB operations. */
...@@ -187,5 +208,4 @@ static inline unsigned long read_itlb_translation (int way) ...@@ -187,5 +208,4 @@ static inline unsigned long read_itlb_translation (int way)
} }
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _XTENSA_TLBFLUSH_H */ #endif /* _XTENSA_TLBFLUSH_H */
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
*/ */
extern void * __init trap_set_handler(int cause, void *handler); extern void * __init trap_set_handler(int cause, void *handler);
extern void do_unhandled(struct pt_regs *regs, unsigned long exccause); extern void do_unhandled(struct pt_regs *regs, unsigned long exccause);
void secondary_trap_init(void);
static inline void spill_registers(void) static inline void spill_registers(void)
{ {
......
...@@ -20,6 +20,17 @@ ...@@ -20,6 +20,17 @@
#include <variant/core.h> #include <variant/core.h>
#define XCHAL_KIO_CACHED_VADDR 0xe0000000
#define XCHAL_KIO_BYPASS_VADDR 0xf0000000
#define XCHAL_KIO_DEFAULT_PADDR 0xf0000000
#define XCHAL_KIO_SIZE 0x10000000
#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && CONFIG_OF
#define XCHAL_KIO_PADDR xtensa_get_kio_paddr()
#else
#define XCHAL_KIO_PADDR XCHAL_KIO_DEFAULT_PADDR
#endif
#if defined(CONFIG_MMU) #if defined(CONFIG_MMU)
/* Will Become VECBASE */ /* Will Become VECBASE */
...@@ -30,11 +41,9 @@ ...@@ -30,11 +41,9 @@
#if defined(XCHAL_HAVE_PTP_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY #if defined(XCHAL_HAVE_PTP_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
/* MMU v3 - XCHAL_HAVE_PTP_MMU == 1 */ /* MMU v3 - XCHAL_HAVE_PTP_MMU == 1 */
#define PHYSICAL_MEMORY_ADDRESS 0x00000000
#define LOAD_MEMORY_ADDRESS 0x00003000 #define LOAD_MEMORY_ADDRESS 0x00003000
#else #else
/* MMU V2 - XCHAL_HAVE_PTP_MMU == 0 */ /* MMU V2 - XCHAL_HAVE_PTP_MMU == 0 */
#define PHYSICAL_MEMORY_ADDRESS 0xD0000000
#define LOAD_MEMORY_ADDRESS 0xD0003000 #define LOAD_MEMORY_ADDRESS 0xD0003000
#endif #endif
...@@ -46,7 +55,6 @@ ...@@ -46,7 +55,6 @@
/* Location of the start of the kernel text, _start */ /* Location of the start of the kernel text, _start */
#define KERNELOFFSET 0x00003000 #define KERNELOFFSET 0x00003000
#define PHYSICAL_MEMORY_ADDRESS 0x00000000
/* Loaded just above possibly live vectors */ /* Loaded just above possibly live vectors */
#define LOAD_MEMORY_ADDRESS 0x00003000 #define LOAD_MEMORY_ADDRESS 0x00003000
...@@ -54,7 +62,6 @@ ...@@ -54,7 +62,6 @@
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
#define XC_VADDR(offset) (VIRTUAL_MEMORY_ADDRESS + offset) #define XC_VADDR(offset) (VIRTUAL_MEMORY_ADDRESS + offset)
#define XC_PADDR(offset) (PHYSICAL_MEMORY_ADDRESS + offset)
/* Used to set VECBASE register */ /* Used to set VECBASE register */
#define VECBASE_RESET_VADDR VIRTUAL_MEMORY_ADDRESS #define VECBASE_RESET_VADDR VIRTUAL_MEMORY_ADDRESS
...@@ -67,7 +74,7 @@ ...@@ -67,7 +74,7 @@
VECBASE_RESET_VADDR) VECBASE_RESET_VADDR)
#define RESET_VECTOR1_VADDR XC_VADDR(RESET_VECTOR1_VECOFS) #define RESET_VECTOR1_VADDR XC_VADDR(RESET_VECTOR1_VECOFS)
#if XCHAL_HAVE_VECBASE #if defined(XCHAL_HAVE_VECBASE) && XCHAL_HAVE_VECBASE
#define USER_VECTOR_VADDR XC_VADDR(XCHAL_USER_VECOFS) #define USER_VECTOR_VADDR XC_VADDR(XCHAL_USER_VECOFS)
#define KERNEL_VECTOR_VADDR XC_VADDR(XCHAL_KERNEL_VECOFS) #define KERNEL_VECTOR_VADDR XC_VADDR(XCHAL_KERNEL_VECOFS)
...@@ -81,11 +88,9 @@ ...@@ -81,11 +88,9 @@
#define DEBUG_VECTOR_VADDR XC_VADDR(XCHAL_DEBUG_VECOFS) #define DEBUG_VECTOR_VADDR XC_VADDR(XCHAL_DEBUG_VECOFS)
#undef XCHAL_NMI_VECTOR_VADDR #define NMI_VECTOR_VADDR XC_VADDR(XCHAL_NMI_VECOFS)
#define XCHAL_NMI_VECTOR_VADDR XC_VADDR(XCHAL_NMI_VECOFS)
#undef XCHAL_INTLEVEL7_VECTOR_VADDR #define INTLEVEL7_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL7_VECOFS)
#define XCHAL_INTLEVEL7_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL7_VECOFS)
/* /*
* These XCHAL_* #defines from varian/core.h * These XCHAL_* #defines from varian/core.h
......
...@@ -12,6 +12,7 @@ obj-$(CONFIG_KGDB) += xtensa-stub.o ...@@ -12,6 +12,7 @@ obj-$(CONFIG_KGDB) += xtensa-stub.o
obj-$(CONFIG_PCI) += pci.o obj-$(CONFIG_PCI) += pci.o
obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o obj-$(CONFIG_FUNCTION_TRACER) += mcount.o
obj-$(CONFIG_SMP) += smp.o mxhead.o
AFLAGS_head.o += -mtext-section-literals AFLAGS_head.o += -mtext-section-literals
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/cacheasm.h> #include <asm/cacheasm.h>
#include <asm/initialize_mmu.h> #include <asm/initialize_mmu.h>
#include <asm/mxregs.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/linkage.h> #include <linux/linkage.h>
...@@ -54,7 +55,7 @@ ENTRY(_start) ...@@ -54,7 +55,7 @@ ENTRY(_start)
/* Preserve the pointer to the boot parameter list in EXCSAVE_1 */ /* Preserve the pointer to the boot parameter list in EXCSAVE_1 */
wsr a2, excsave1 wsr a2, excsave1
_j _SetupMMU _j _SetupOCD
.align 4 .align 4
.literal_position .literal_position
...@@ -62,6 +63,23 @@ ENTRY(_start) ...@@ -62,6 +63,23 @@ ENTRY(_start)
.word _startup .word _startup
.align 4 .align 4
_SetupOCD:
/*
* Initialize WB, WS, and clear PS.EXCM (to allow loop instructions).
* Set Interrupt Level just below XCHAL_DEBUGLEVEL to allow
* xt-gdb to single step via DEBUG exceptions received directly
* by ocd.
*/
movi a1, 1
movi a0, 0
wsr a1, windowstart
wsr a0, windowbase
rsync
movi a1, LOCKLEVEL
wsr a1, ps
rsync
.global _SetupMMU .global _SetupMMU
_SetupMMU: _SetupMMU:
Offset = _SetupMMU - _start Offset = _SetupMMU - _start
...@@ -85,24 +103,11 @@ _SetupMMU: ...@@ -85,24 +103,11 @@ _SetupMMU:
ENDPROC(_start) ENDPROC(_start)
__INIT __REF
.literal_position .literal_position
ENTRY(_startup) ENTRY(_startup)
/* Disable interrupts and exceptions. */
movi a0, LOCKLEVEL
wsr a0, ps
/* Start with a fresh windowbase and windowstart. */
movi a1, 1
movi a0, 0
wsr a1, windowstart
wsr a0, windowbase
rsync
/* Set a0 to 0 for the remaining initialization. */ /* Set a0 to 0 for the remaining initialization. */
movi a0, 0 movi a0, 0
...@@ -154,17 +159,6 @@ ENTRY(_startup) ...@@ -154,17 +159,6 @@ ENTRY(_startup)
wsr a0, cpenable wsr a0, cpenable
#endif #endif
/* Set PS.INTLEVEL=LOCKLEVEL, PS.WOE=0, kernel stack, PS.EXCM=0
*
* Note: PS.EXCM must be cleared before using any loop
* instructions; otherwise, they are silently disabled, and
* at most one iteration of the loop is executed.
*/
movi a1, LOCKLEVEL
wsr a1, ps
rsync
/* Initialize the caches. /* Initialize the caches.
* a2, a3 are just working registers (clobbered). * a2, a3 are just working registers (clobbered).
*/ */
...@@ -182,6 +176,37 @@ ENTRY(_startup) ...@@ -182,6 +176,37 @@ ENTRY(_startup)
isync isync
#ifdef CONFIG_HAVE_SMP
movi a2, CCON # MX External Register to Configure Cache
movi a3, 1
wer a3, a2
#endif
/* Setup stack and enable window exceptions (keep irqs disabled) */
movi a1, start_info
l32i a1, a1, 0
movi a2, (1 << PS_WOE_BIT) | LOCKLEVEL
# WOE=1, INTLEVEL=LOCKLEVEL, UM=0
wsr a2, ps # (enable reg-windows; progmode stack)
rsync
/* Set up EXCSAVE[DEBUGLEVEL] to point to the Debug Exception Handler.*/
movi a2, debug_exception
wsr a2, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
#ifdef CONFIG_SMP
/*
* Notice that we assume with SMP that cores have PRID
* supported by the cores.
*/
rsr a2, prid
bnez a2, .Lboot_secondary
#endif /* CONFIG_SMP */
/* Unpack data sections /* Unpack data sections
* *
* The linker script used to build the Linux kernel image * The linker script used to build the Linux kernel image
...@@ -234,24 +259,7 @@ ENTRY(_startup) ...@@ -234,24 +259,7 @@ ENTRY(_startup)
___invalidate_icache_all a2 a3 ___invalidate_icache_all a2 a3
isync isync
/* Setup stack and enable window exceptions (keep irqs disabled) */ movi a6, 0
movi a1, init_thread_union
addi a1, a1, KERNEL_STACK_SIZE
movi a2, (1 << PS_WOE_BIT) | LOCKLEVEL
# WOE=1, INTLEVEL=LOCKLEVEL, UM=0
wsr a2, ps # (enable reg-windows; progmode stack)
rsync
/* Set up EXCSAVE[DEBUGLEVEL] to point to the Debug Exception Handler.*/
movi a2, debug_exception
wsr a2, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
/* Set up EXCSAVE[1] to point to the exc_table. */
movi a6, exc_table
xsr a6, excsave1 xsr a6, excsave1
/* init_arch kick-starts the linux kernel */ /* init_arch kick-starts the linux kernel */
...@@ -265,8 +273,93 @@ ENTRY(_startup) ...@@ -265,8 +273,93 @@ ENTRY(_startup)
should_never_return: should_never_return:
j should_never_return j should_never_return
#ifdef CONFIG_SMP
.Lboot_secondary:
movi a2, cpu_start_ccount
1:
l32i a3, a2, 0
beqi a3, 0, 1b
movi a3, 0
s32i a3, a2, 0
memw
1:
l32i a3, a2, 0
beqi a3, 0, 1b
wsr a3, ccount
movi a3, 0
s32i a3, a2, 0
memw
movi a6, 0
wsr a6, excsave1
movi a4, secondary_start_kernel
callx4 a4
j should_never_return
#endif /* CONFIG_SMP */
ENDPROC(_startup) ENDPROC(_startup)
#ifdef CONFIG_HOTPLUG_CPU
ENTRY(cpu_restart)
#if XCHAL_DCACHE_IS_WRITEBACK
___flush_invalidate_dcache_all a2 a3
#else
___invalidate_dcache_all a2 a3
#endif
memw
movi a2, CCON # MX External Register to Configure Cache
movi a3, 0
wer a3, a2
extw
rsr a0, prid
neg a2, a0
movi a3, cpu_start_id
s32i a2, a3, 0
#if XCHAL_DCACHE_IS_WRITEBACK
dhwbi a3, 0
#endif
1:
l32i a2, a3, 0
dhi a3, 0
bne a2, a0, 1b
/*
* Initialize WB, WS, and clear PS.EXCM (to allow loop instructions).
* Set Interrupt Level just below XCHAL_DEBUGLEVEL to allow
* xt-gdb to single step via DEBUG exceptions received directly
* by ocd.
*/
movi a1, 1
movi a0, 0
wsr a1, windowstart
wsr a0, windowbase
rsync
movi a1, LOCKLEVEL
wsr a1, ps
rsync
j _startup
ENDPROC(cpu_restart)
#endif /* CONFIG_HOTPLUG_CPU */
/*
* DATA section
*/
.section ".data.init.refok"
.align 4
ENTRY(start_info)
.long init_thread_union + KERNEL_STACK_SIZE
/* /*
* BSS section * BSS section
*/ */
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
* Xtensa built-in interrupt controller and some generic functions copied * Xtensa built-in interrupt controller and some generic functions copied
* from i386. * from i386.
* *
* Copyright (C) 2002 - 2006 Tensilica, Inc. * Copyright (C) 2002 - 2013 Tensilica, Inc.
* Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
* *
* *
...@@ -18,36 +18,27 @@ ...@@ -18,36 +18,27 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/kernel_stat.h> #include <linux/kernel_stat.h>
#include <linux/irqchip.h>
#include <linux/irqchip/xtensa-mx.h>
#include <linux/irqchip/xtensa-pic.h>
#include <linux/irqdomain.h> #include <linux/irqdomain.h>
#include <linux/of.h> #include <linux/of.h>
#include <asm/mxregs.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/platform.h> #include <asm/platform.h>
static unsigned int cached_irq_mask;
atomic_t irq_err_count; atomic_t irq_err_count;
static struct irq_domain *root_domain;
/*
* do_IRQ handles all normal device IRQ's (the special
* SMP cross-CPU interrupts have their own specific
* handlers).
*/
asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs) asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
{ {
struct pt_regs *old_regs = set_irq_regs(regs); int irq = irq_find_mapping(NULL, hwirq);
int irq = irq_find_mapping(root_domain, hwirq);
if (hwirq >= NR_IRQS) { if (hwirq >= NR_IRQS) {
printk(KERN_EMERG "%s: cannot handle IRQ %d\n", printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
__func__, hwirq); __func__, hwirq);
} }
irq_enter();
#ifdef CONFIG_DEBUG_STACKOVERFLOW #ifdef CONFIG_DEBUG_STACKOVERFLOW
/* Debugging check for stack overflow: is there less than 1KB free? */ /* Debugging check for stack overflow: is there less than 1KB free? */
{ {
...@@ -62,95 +53,69 @@ asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs) ...@@ -62,95 +53,69 @@ asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
} }
#endif #endif
generic_handle_irq(irq); generic_handle_irq(irq);
irq_exit();
set_irq_regs(old_regs);
} }
int arch_show_interrupts(struct seq_file *p, int prec) int arch_show_interrupts(struct seq_file *p, int prec)
{ {
#ifdef CONFIG_SMP
show_ipi_list(p, prec);
#endif
seq_printf(p, "%*s: ", prec, "ERR"); seq_printf(p, "%*s: ", prec, "ERR");
seq_printf(p, "%10u\n", atomic_read(&irq_err_count)); seq_printf(p, "%10u\n", atomic_read(&irq_err_count));
return 0; return 0;
} }
static void xtensa_irq_mask(struct irq_data *d) int xtensa_irq_domain_xlate(const u32 *intspec, unsigned int intsize,
{ unsigned long int_irq, unsigned long ext_irq,
cached_irq_mask &= ~(1 << d->hwirq); unsigned long *out_hwirq, unsigned int *out_type)
set_sr (cached_irq_mask, intenable);
}
static void xtensa_irq_unmask(struct irq_data *d)
{
cached_irq_mask |= 1 << d->hwirq;
set_sr (cached_irq_mask, intenable);
}
static void xtensa_irq_enable(struct irq_data *d)
{
variant_irq_enable(d->hwirq);
xtensa_irq_unmask(d);
}
static void xtensa_irq_disable(struct irq_data *d)
{
xtensa_irq_mask(d);
variant_irq_disable(d->hwirq);
}
static void xtensa_irq_ack(struct irq_data *d)
{
set_sr(1 << d->hwirq, intclear);
}
static int xtensa_irq_retrigger(struct irq_data *d)
{ {
set_sr(1 << d->hwirq, intset); if (WARN_ON(intsize < 1 || intsize > 2))
return 1; return -EINVAL;
if (intsize == 2 && intspec[1] == 1) {
int_irq = xtensa_map_ext_irq(ext_irq);
if (int_irq < XCHAL_NUM_INTERRUPTS)
*out_hwirq = int_irq;
else
return -EINVAL;
} else {
*out_hwirq = int_irq;
}
*out_type = IRQ_TYPE_NONE;
return 0;
} }
static struct irq_chip xtensa_irq_chip = { int xtensa_irq_map(struct irq_domain *d, unsigned int irq,
.name = "xtensa",
.irq_enable = xtensa_irq_enable,
.irq_disable = xtensa_irq_disable,
.irq_mask = xtensa_irq_mask,
.irq_unmask = xtensa_irq_unmask,
.irq_ack = xtensa_irq_ack,
.irq_retrigger = xtensa_irq_retrigger,
};
static int xtensa_irq_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hw) irq_hw_number_t hw)
{ {
struct irq_chip *irq_chip = d->host_data;
u32 mask = 1 << hw; u32 mask = 1 << hw;
if (mask & XCHAL_INTTYPE_MASK_SOFTWARE) { if (mask & XCHAL_INTTYPE_MASK_SOFTWARE) {
irq_set_chip_and_handler_name(irq, &xtensa_irq_chip, irq_set_chip_and_handler_name(irq, irq_chip,
handle_simple_irq, "level"); handle_simple_irq, "level");
irq_set_status_flags(irq, IRQ_LEVEL); irq_set_status_flags(irq, IRQ_LEVEL);
} else if (mask & XCHAL_INTTYPE_MASK_EXTERN_EDGE) { } else if (mask & XCHAL_INTTYPE_MASK_EXTERN_EDGE) {
irq_set_chip_and_handler_name(irq, &xtensa_irq_chip, irq_set_chip_and_handler_name(irq, irq_chip,
handle_edge_irq, "edge"); handle_edge_irq, "edge");
irq_clear_status_flags(irq, IRQ_LEVEL); irq_clear_status_flags(irq, IRQ_LEVEL);
} else if (mask & XCHAL_INTTYPE_MASK_EXTERN_LEVEL) { } else if (mask & XCHAL_INTTYPE_MASK_EXTERN_LEVEL) {
irq_set_chip_and_handler_name(irq, &xtensa_irq_chip, irq_set_chip_and_handler_name(irq, irq_chip,
handle_level_irq, "level"); handle_level_irq, "level");
irq_set_status_flags(irq, IRQ_LEVEL); irq_set_status_flags(irq, IRQ_LEVEL);
} else if (mask & XCHAL_INTTYPE_MASK_TIMER) { } else if (mask & XCHAL_INTTYPE_MASK_TIMER) {
irq_set_chip_and_handler_name(irq, &xtensa_irq_chip, irq_set_chip_and_handler_name(irq, irq_chip,
handle_edge_irq, "edge"); handle_percpu_irq, "timer");
irq_clear_status_flags(irq, IRQ_LEVEL); irq_clear_status_flags(irq, IRQ_LEVEL);
} else {/* XCHAL_INTTYPE_MASK_WRITE_ERROR */ } else {/* XCHAL_INTTYPE_MASK_WRITE_ERROR */
/* XCHAL_INTTYPE_MASK_NMI */ /* XCHAL_INTTYPE_MASK_NMI */
irq_set_chip_and_handler_name(irq, irq_chip,
irq_set_chip_and_handler_name(irq, &xtensa_irq_chip,
handle_level_irq, "level"); handle_level_irq, "level");
irq_set_status_flags(irq, IRQ_LEVEL); irq_set_status_flags(irq, IRQ_LEVEL);
} }
return 0; return 0;
} }
static unsigned map_ext_irq(unsigned ext_irq) unsigned xtensa_map_ext_irq(unsigned ext_irq)
{ {
unsigned mask = XCHAL_INTTYPE_MASK_EXTERN_EDGE | unsigned mask = XCHAL_INTTYPE_MASK_EXTERN_EDGE |
XCHAL_INTTYPE_MASK_EXTERN_LEVEL; XCHAL_INTTYPE_MASK_EXTERN_LEVEL;
...@@ -163,55 +128,77 @@ static unsigned map_ext_irq(unsigned ext_irq) ...@@ -163,55 +128,77 @@ static unsigned map_ext_irq(unsigned ext_irq)
return XCHAL_NUM_INTERRUPTS; return XCHAL_NUM_INTERRUPTS;
} }
/* unsigned xtensa_get_ext_irq_no(unsigned irq)
* Device Tree IRQ specifier translation function which works with one or
* two cell bindings. First cell value maps directly to the hwirq number.
* Second cell if present specifies whether hwirq number is external (1) or
* internal (0).
*/
int xtensa_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
const u32 *intspec, unsigned int intsize,
unsigned long *out_hwirq, unsigned int *out_type)
{ {
if (WARN_ON(intsize < 1 || intsize > 2)) unsigned mask = (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
return -EINVAL; XCHAL_INTTYPE_MASK_EXTERN_LEVEL) &
if (intsize == 2 && intspec[1] == 1) { ((1u << irq) - 1);
unsigned int_irq = map_ext_irq(intspec[0]); return hweight32(mask);
if (int_irq < XCHAL_NUM_INTERRUPTS)
*out_hwirq = int_irq;
else
return -EINVAL;
} else {
*out_hwirq = intspec[0];
}
*out_type = IRQ_TYPE_NONE;
return 0;
} }
static const struct irq_domain_ops xtensa_irq_domain_ops = {
.xlate = xtensa_irq_domain_xlate,
.map = xtensa_irq_map,
};
void __init init_IRQ(void) void __init init_IRQ(void)
{ {
struct device_node *intc = NULL;
cached_irq_mask = 0;
set_sr(~0, intclear);
#ifdef CONFIG_OF #ifdef CONFIG_OF
/* The interrupt controller device node is mandatory */ irqchip_init();
intc = of_find_compatible_node(NULL, NULL, "xtensa,pic"); #else
BUG_ON(!intc); #ifdef CONFIG_HAVE_SMP
xtensa_mx_init_legacy(NULL);
root_domain = irq_domain_add_linear(intc, NR_IRQS,
&xtensa_irq_domain_ops, NULL);
#else #else
root_domain = irq_domain_add_legacy(intc, NR_IRQS, 0, 0, xtensa_pic_init_legacy(NULL);
&xtensa_irq_domain_ops, NULL); #endif
#endif #endif
irq_set_default_host(root_domain);
#ifdef CONFIG_SMP
ipi_init();
#endif
variant_init_irq(); variant_init_irq();
} }
#ifdef CONFIG_HOTPLUG_CPU
static void route_irq(struct irq_data *data, unsigned int irq, unsigned int cpu)
{
struct irq_desc *desc = irq_to_desc(irq);
struct irq_chip *chip = irq_data_get_irq_chip(data);
unsigned long flags;
raw_spin_lock_irqsave(&desc->lock, flags);
if (chip->irq_set_affinity)
chip->irq_set_affinity(data, cpumask_of(cpu), false);
raw_spin_unlock_irqrestore(&desc->lock, flags);
}
/*
* The CPU has been marked offline. Migrate IRQs off this CPU. If
* the affinity settings do not allow other CPUs, force them onto any
* available CPU.
*/
void migrate_irqs(void)
{
unsigned int i, cpu = smp_processor_id();
struct irq_desc *desc;
for_each_irq_desc(i, desc) {
struct irq_data *data = irq_desc_get_irq_data(desc);
unsigned int newcpu;
if (irqd_is_per_cpu(data))
continue;
if (!cpumask_test_cpu(cpu, data->affinity))
continue;
newcpu = cpumask_any_and(data->affinity, cpu_online_mask);
if (newcpu >= nr_cpu_ids) {
pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n",
i, cpu);
cpumask_setall(data->affinity);
newcpu = cpumask_any_and(data->affinity,
cpu_online_mask);
}
route_irq(data, i, newcpu);
}
}
#endif /* CONFIG_HOTPLUG_CPU */
/*
* Xtensa Secondary Processors startup code.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2013 Tensilica Inc.
*
* Joe Taylor <joe@tensilica.com>
* Chris Zankel <chris@zankel.net>
* Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
* Pete Delaney <piet@tensilica.com>
*/
#include <linux/linkage.h>
#include <asm/cacheasm.h>
#include <asm/initialize_mmu.h>
#include <asm/mxregs.h>
#include <asm/regs.h>
.section .SecondaryResetVector.text, "ax"
ENTRY(_SecondaryResetVector)
_j _SetupOCD
.begin no-absolute-literals
.literal_position
_SetupOCD:
/*
* Initialize WB, WS, and clear PS.EXCM (to allow loop instructions).
* Set Interrupt Level just below XCHAL_DEBUGLEVEL to allow
* xt-gdb to single step via DEBUG exceptions received directly
* by ocd.
*/
movi a1, 1
movi a0, 0
wsr a1, windowstart
wsr a0, windowbase
rsync
movi a1, LOCKLEVEL
wsr a1, ps
rsync
_SetupMMU:
Offset = _SetupMMU - _SecondaryResetVector
#ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
initialize_mmu
#endif
/*
* Start Secondary Processors with NULL pointer to boot params.
*/
movi a2, 0 # a2 == NULL
movi a3, _startup
jx a3
.end no-absolute-literals
.section .SecondaryResetVector.remapped_text, "ax"
.global _RemappedSecondaryResetVector
.org 0 # Need to do org before literals
_RemappedSecondaryResetVector:
.begin no-absolute-literals
.literal_position
_j _RemappedSetupMMU
. = _RemappedSecondaryResetVector + Offset
_RemappedSetupMMU:
#ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
initialize_mmu
#endif
.end no-absolute-literals
...@@ -21,6 +21,8 @@ ...@@ -21,6 +21,8 @@
#include <linux/screen_info.h> #include <linux/screen_info.h>
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/percpu.h>
#include <linux/cpu.h>
#include <linux/of_fdt.h> #include <linux/of_fdt.h>
#include <linux/of_platform.h> #include <linux/of_platform.h>
...@@ -37,6 +39,7 @@ ...@@ -37,6 +39,7 @@
#endif #endif
#include <asm/bootparam.h> #include <asm/bootparam.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/timex.h> #include <asm/timex.h>
...@@ -45,6 +48,7 @@ ...@@ -45,6 +48,7 @@
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/param.h> #include <asm/param.h>
#include <asm/traps.h> #include <asm/traps.h>
#include <asm/smp.h>
#include <platform/hardware.h> #include <platform/hardware.h>
...@@ -85,12 +89,6 @@ static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE; ...@@ -85,12 +89,6 @@ static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
sysmem_info_t __initdata sysmem; sysmem_info_t __initdata sysmem;
#ifdef CONFIG_MMU
extern void init_mmu(void);
#else
static inline void init_mmu(void) { }
#endif
extern int mem_reserve(unsigned long, unsigned long, int); extern int mem_reserve(unsigned long, unsigned long, int);
extern void bootmem_init(void); extern void bootmem_init(void);
extern void zones_init(void); extern void zones_init(void);
...@@ -214,6 +212,42 @@ static int __init parse_bootparam(const bp_tag_t* tag) ...@@ -214,6 +212,42 @@ static int __init parse_bootparam(const bp_tag_t* tag)
#ifdef CONFIG_OF #ifdef CONFIG_OF
bool __initdata dt_memory_scan = false; bool __initdata dt_memory_scan = false;
#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
unsigned long xtensa_kio_paddr = XCHAL_KIO_DEFAULT_PADDR;
EXPORT_SYMBOL(xtensa_kio_paddr);
static int __init xtensa_dt_io_area(unsigned long node, const char *uname,
int depth, void *data)
{
const __be32 *ranges;
unsigned long len;
if (depth > 1)
return 0;
if (!of_flat_dt_is_compatible(node, "simple-bus"))
return 0;
ranges = of_get_flat_dt_prop(node, "ranges", &len);
if (!ranges)
return 1;
if (len == 0)
return 1;
xtensa_kio_paddr = of_read_ulong(ranges+1, 1);
/* round down to nearest 256MB boundary */
xtensa_kio_paddr &= 0xf0000000;
return 1;
}
#else
static int __init xtensa_dt_io_area(unsigned long node, const char *uname,
int depth, void *data)
{
return 1;
}
#endif
void __init early_init_dt_add_memory_arch(u64 base, u64 size) void __init early_init_dt_add_memory_arch(u64 base, u64 size)
{ {
if (!dt_memory_scan) if (!dt_memory_scan)
...@@ -234,6 +268,7 @@ void __init early_init_devtree(void *params) ...@@ -234,6 +268,7 @@ void __init early_init_devtree(void *params)
dt_memory_scan = true; dt_memory_scan = true;
early_init_dt_scan(params); early_init_dt_scan(params);
of_scan_flat_dt(xtensa_dt_io_area, NULL);
if (!command_line[0]) if (!command_line[0])
strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
...@@ -241,7 +276,7 @@ void __init early_init_devtree(void *params) ...@@ -241,7 +276,7 @@ void __init early_init_devtree(void *params)
static int __init xtensa_device_probe(void) static int __init xtensa_device_probe(void)
{ {
of_platform_populate(NULL, NULL, NULL, NULL); of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
return 0; return 0;
} }
...@@ -354,7 +389,8 @@ static inline int probed_compare_swap(int *v, int cmp, int set) ...@@ -354,7 +389,8 @@ static inline int probed_compare_swap(int *v, int cmp, int set)
/* Handle probed exception */ /* Handle probed exception */
void __init do_probed_exception(struct pt_regs *regs, unsigned long exccause) static void __init do_probed_exception(struct pt_regs *regs,
unsigned long exccause)
{ {
if (regs->pc == rcw_probe_pc) { /* exception on s32c1i ? */ if (regs->pc == rcw_probe_pc) { /* exception on s32c1i ? */
regs->pc += 3; /* skip the s32c1i instruction */ regs->pc += 3; /* skip the s32c1i instruction */
...@@ -366,7 +402,7 @@ void __init do_probed_exception(struct pt_regs *regs, unsigned long exccause) ...@@ -366,7 +402,7 @@ void __init do_probed_exception(struct pt_regs *regs, unsigned long exccause)
/* Simple test of S32C1I (soc bringup assist) */ /* Simple test of S32C1I (soc bringup assist) */
void __init check_s32c1i(void) static int __init check_s32c1i(void)
{ {
int n, cause1, cause2; int n, cause1, cause2;
void *handbus, *handdata, *handaddr; /* temporarily saved handlers */ void *handbus, *handdata, *handaddr; /* temporarily saved handlers */
...@@ -421,24 +457,21 @@ void __init check_s32c1i(void) ...@@ -421,24 +457,21 @@ void __init check_s32c1i(void)
trap_set_handler(EXCCAUSE_LOAD_STORE_ERROR, handbus); trap_set_handler(EXCCAUSE_LOAD_STORE_ERROR, handbus);
trap_set_handler(EXCCAUSE_LOAD_STORE_DATA_ERROR, handdata); trap_set_handler(EXCCAUSE_LOAD_STORE_DATA_ERROR, handdata);
trap_set_handler(EXCCAUSE_LOAD_STORE_ADDR_ERROR, handaddr); trap_set_handler(EXCCAUSE_LOAD_STORE_ADDR_ERROR, handaddr);
return 0;
} }
#else /* XCHAL_HAVE_S32C1I */ #else /* XCHAL_HAVE_S32C1I */
/* This condition should not occur with a commercially deployed processor. /* This condition should not occur with a commercially deployed processor.
Display reminder for early engr test or demo chips / FPGA bitstreams */ Display reminder for early engr test or demo chips / FPGA bitstreams */
void __init check_s32c1i(void) static int __init check_s32c1i(void)
{ {
pr_warn("Processor configuration lacks atomic compare-and-swap support!\n"); pr_warn("Processor configuration lacks atomic compare-and-swap support!\n");
return 0;
} }
#endif /* XCHAL_HAVE_S32C1I */ #endif /* XCHAL_HAVE_S32C1I */
#else /* CONFIG_S32C1I_SELFTEST */ early_initcall(check_s32c1i);
void __init check_s32c1i(void)
{
}
#endif /* CONFIG_S32C1I_SELFTEST */ #endif /* CONFIG_S32C1I_SELFTEST */
...@@ -447,8 +480,6 @@ void __init setup_arch(char **cmdline_p) ...@@ -447,8 +480,6 @@ void __init setup_arch(char **cmdline_p)
strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
*cmdline_p = command_line; *cmdline_p = command_line;
check_s32c1i();
/* Reserve some memory regions */ /* Reserve some memory regions */
#ifdef CONFIG_BLK_DEV_INITRD #ifdef CONFIG_BLK_DEV_INITRD
...@@ -505,6 +536,10 @@ void __init setup_arch(char **cmdline_p) ...@@ -505,6 +536,10 @@ void __init setup_arch(char **cmdline_p)
platform_setup(cmdline_p); platform_setup(cmdline_p);
#ifdef CONFIG_SMP
smp_init_cpus();
#endif
paging_init(); paging_init();
zones_init(); zones_init();
...@@ -521,6 +556,22 @@ void __init setup_arch(char **cmdline_p) ...@@ -521,6 +556,22 @@ void __init setup_arch(char **cmdline_p)
#endif #endif
} }
static DEFINE_PER_CPU(struct cpu, cpu_data);
static int __init topology_init(void)
{
int i;
for_each_possible_cpu(i) {
struct cpu *cpu = &per_cpu(cpu_data, i);
cpu->hotpluggable = !!i;
register_cpu(cpu, i);
}
return 0;
}
subsys_initcall(topology_init);
void machine_restart(char * cmd) void machine_restart(char * cmd)
{ {
platform_restart(); platform_restart();
...@@ -546,21 +597,27 @@ void machine_power_off(void) ...@@ -546,21 +597,27 @@ void machine_power_off(void)
static int static int
c_show(struct seq_file *f, void *slot) c_show(struct seq_file *f, void *slot)
{ {
char buf[NR_CPUS * 5];
cpulist_scnprintf(buf, sizeof(buf), cpu_online_mask);
/* high-level stuff */ /* high-level stuff */
seq_printf(f,"processor\t: 0\n" seq_printf(f, "CPU count\t: %u\n"
"vendor_id\t: Tensilica\n" "CPU list\t: %s\n"
"model\t\t: Xtensa " XCHAL_HW_VERSION_NAME "\n" "vendor_id\t: Tensilica\n"
"core ID\t\t: " XCHAL_CORE_ID "\n" "model\t\t: Xtensa " XCHAL_HW_VERSION_NAME "\n"
"build ID\t: 0x%x\n" "core ID\t\t: " XCHAL_CORE_ID "\n"
"byte order\t: %s\n" "build ID\t: 0x%x\n"
"cpu MHz\t\t: %lu.%02lu\n" "byte order\t: %s\n"
"bogomips\t: %lu.%02lu\n", "cpu MHz\t\t: %lu.%02lu\n"
XCHAL_BUILD_UNIQUE_ID, "bogomips\t: %lu.%02lu\n",
XCHAL_HAVE_BE ? "big" : "little", num_online_cpus(),
ccount_freq/1000000, buf,
(ccount_freq/10000) % 100, XCHAL_BUILD_UNIQUE_ID,
loops_per_jiffy/(500000/HZ), XCHAL_HAVE_BE ? "big" : "little",
(loops_per_jiffy/(5000/HZ)) % 100); ccount_freq/1000000,
(ccount_freq/10000) % 100,
loops_per_jiffy/(500000/HZ),
(loops_per_jiffy/(5000/HZ)) % 100);
seq_printf(f,"flags\t\t: " seq_printf(f,"flags\t\t: "
#if XCHAL_HAVE_NMI #if XCHAL_HAVE_NMI
...@@ -672,7 +729,7 @@ c_show(struct seq_file *f, void *slot) ...@@ -672,7 +729,7 @@ c_show(struct seq_file *f, void *slot)
static void * static void *
c_start(struct seq_file *f, loff_t *pos) c_start(struct seq_file *f, loff_t *pos)
{ {
return (void *) ((*pos == 0) ? (void *)1 : NULL); return (*pos == 0) ? (void *)1 : NULL;
} }
static void * static void *
...@@ -688,10 +745,10 @@ c_stop(struct seq_file *f, void *v) ...@@ -688,10 +745,10 @@ c_stop(struct seq_file *f, void *v)
const struct seq_operations cpuinfo_op = const struct seq_operations cpuinfo_op =
{ {
start: c_start, .start = c_start,
next: c_next, .next = c_next,
stop: c_stop, .stop = c_stop,
show: c_show .show = c_show,
}; };
#endif /* CONFIG_PROC_FS */ #endif /* CONFIG_PROC_FS */
/*
* Xtensa SMP support functions.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2008 - 2013 Tensilica Inc.
*
* Chris Zankel <chris@zankel.net>
* Joe Taylor <joe@tensilica.com>
* Pete Delaney <piet@tensilica.com
*/
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/irq.h>
#include <linux/kdebug.h>
#include <linux/module.h>
#include <linux/reboot.h>
#include <linux/seq_file.h>
#include <linux/smp.h>
#include <linux/thread_info.h>
#include <asm/cacheflush.h>
#include <asm/kdebug.h>
#include <asm/mmu_context.h>
#include <asm/mxregs.h>
#include <asm/platform.h>
#include <asm/tlbflush.h>
#include <asm/traps.h>
#ifdef CONFIG_SMP
# if XCHAL_HAVE_S32C1I == 0
# error "The S32C1I option is required for SMP."
# endif
#endif
static void system_invalidate_dcache_range(unsigned long start,
unsigned long size);
static void system_flush_invalidate_dcache_range(unsigned long start,
unsigned long size);
/* IPI (Inter Process Interrupt) */
#define IPI_IRQ 0
static irqreturn_t ipi_interrupt(int irq, void *dev_id);
static struct irqaction ipi_irqaction = {
.handler = ipi_interrupt,
.flags = IRQF_PERCPU,
.name = "ipi",
};
void ipi_init(void)
{
unsigned irq = irq_create_mapping(NULL, IPI_IRQ);
setup_irq(irq, &ipi_irqaction);
}
static inline unsigned int get_core_count(void)
{
/* Bits 18..21 of SYSCFGID contain the core count minus 1. */
unsigned int syscfgid = get_er(SYSCFGID);
return ((syscfgid >> 18) & 0xf) + 1;
}
static inline int get_core_id(void)
{
/* Bits 0...18 of SYSCFGID contain the core id */
unsigned int core_id = get_er(SYSCFGID);
return core_id & 0x3fff;
}
void __init smp_prepare_cpus(unsigned int max_cpus)
{
unsigned i;
for (i = 0; i < max_cpus; ++i)
set_cpu_present(i, true);
}
void __init smp_init_cpus(void)
{
unsigned i;
unsigned int ncpus = get_core_count();
unsigned int core_id = get_core_id();
pr_info("%s: Core Count = %d\n", __func__, ncpus);
pr_info("%s: Core Id = %d\n", __func__, core_id);
for (i = 0; i < ncpus; ++i)
set_cpu_possible(i, true);
}
void __init smp_prepare_boot_cpu(void)
{
unsigned int cpu = smp_processor_id();
BUG_ON(cpu != 0);
cpu_asid_cache(cpu) = ASID_USER_FIRST;
}
void __init smp_cpus_done(unsigned int max_cpus)
{
}
static int boot_secondary_processors = 1; /* Set with xt-gdb via .xt-gdb */
static DECLARE_COMPLETION(cpu_running);
void secondary_start_kernel(void)
{
struct mm_struct *mm = &init_mm;
unsigned int cpu = smp_processor_id();
init_mmu();
#ifdef CONFIG_DEBUG_KERNEL
if (boot_secondary_processors == 0) {
pr_debug("%s: boot_secondary_processors:%d; Hanging cpu:%d\n",
__func__, boot_secondary_processors, cpu);
for (;;)
__asm__ __volatile__ ("waiti " __stringify(LOCKLEVEL));
}
pr_debug("%s: boot_secondary_processors:%d; Booting cpu:%d\n",
__func__, boot_secondary_processors, cpu);
#endif
/* Init EXCSAVE1 */
secondary_trap_init();
/* All kernel threads share the same mm context. */
atomic_inc(&mm->mm_users);
atomic_inc(&mm->mm_count);
current->active_mm = mm;
cpumask_set_cpu(cpu, mm_cpumask(mm));
enter_lazy_tlb(mm, current);
preempt_disable();
trace_hardirqs_off();
calibrate_delay();
notify_cpu_starting(cpu);
secondary_init_irq();
local_timer_setup(cpu);
set_cpu_online(cpu, true);
local_irq_enable();
complete(&cpu_running);
cpu_startup_entry(CPUHP_ONLINE);
}
static void mx_cpu_start(void *p)
{
unsigned cpu = (unsigned)p;
unsigned long run_stall_mask = get_er(MPSCORE);
set_er(run_stall_mask & ~(1u << cpu), MPSCORE);
pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n",
__func__, cpu, run_stall_mask, get_er(MPSCORE));
}
static void mx_cpu_stop(void *p)
{
unsigned cpu = (unsigned)p;
unsigned long run_stall_mask = get_er(MPSCORE);
set_er(run_stall_mask | (1u << cpu), MPSCORE);
pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n",
__func__, cpu, run_stall_mask, get_er(MPSCORE));
}
#ifdef CONFIG_HOTPLUG_CPU
unsigned long cpu_start_id __cacheline_aligned;
#endif
unsigned long cpu_start_ccount;
static int boot_secondary(unsigned int cpu, struct task_struct *ts)
{
unsigned long timeout = jiffies + msecs_to_jiffies(1000);
unsigned long ccount;
int i;
#ifdef CONFIG_HOTPLUG_CPU
cpu_start_id = cpu;
system_flush_invalidate_dcache_range(
(unsigned long)&cpu_start_id, sizeof(cpu_start_id));
#endif
smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1);
for (i = 0; i < 2; ++i) {
do
ccount = get_ccount();
while (!ccount);
cpu_start_ccount = ccount;
while (time_before(jiffies, timeout)) {
mb();
if (!cpu_start_ccount)
break;
}
if (cpu_start_ccount) {
smp_call_function_single(0, mx_cpu_stop,
(void *)cpu, 1);
cpu_start_ccount = 0;
return -EIO;
}
}
return 0;
}
int __cpu_up(unsigned int cpu, struct task_struct *idle)
{
int ret = 0;
if (cpu_asid_cache(cpu) == 0)
cpu_asid_cache(cpu) = ASID_USER_FIRST;
start_info.stack = (unsigned long)task_pt_regs(idle);
wmb();
pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n",
__func__, cpu, idle, start_info.stack);
ret = boot_secondary(cpu, idle);
if (ret == 0) {
wait_for_completion_timeout(&cpu_running,
msecs_to_jiffies(1000));
if (!cpu_online(cpu))
ret = -EIO;
}
if (ret)
pr_err("CPU %u failed to boot\n", cpu);
return ret;
}
#ifdef CONFIG_HOTPLUG_CPU
/*
* __cpu_disable runs on the processor to be shutdown.
*/
int __cpu_disable(void)
{
unsigned int cpu = smp_processor_id();
/*
* Take this CPU offline. Once we clear this, we can't return,
* and we must not schedule until we're ready to give up the cpu.
*/
set_cpu_online(cpu, false);
/*
* OK - migrate IRQs away from this CPU
*/
migrate_irqs();
/*
* Flush user cache and TLB mappings, and then remove this CPU
* from the vm mask set of all processes.
*/
local_flush_cache_all();
local_flush_tlb_all();
invalidate_page_directory();
clear_tasks_mm_cpumask(cpu);
return 0;
}
static void platform_cpu_kill(unsigned int cpu)
{
smp_call_function_single(0, mx_cpu_stop, (void *)cpu, true);
}
/*
* called on the thread which is asking for a CPU to be shutdown -
* waits until shutdown has completed, or it is timed out.
*/
void __cpu_die(unsigned int cpu)
{
unsigned long timeout = jiffies + msecs_to_jiffies(1000);
while (time_before(jiffies, timeout)) {
system_invalidate_dcache_range((unsigned long)&cpu_start_id,
sizeof(cpu_start_id));
if (cpu_start_id == -cpu) {
platform_cpu_kill(cpu);
return;
}
}
pr_err("CPU%u: unable to kill\n", cpu);
}
void arch_cpu_idle_dead(void)
{
cpu_die();
}
/*
* Called from the idle thread for the CPU which has been shutdown.
*
* Note that we disable IRQs here, but do not re-enable them
* before returning to the caller. This is also the behaviour
* of the other hotplug-cpu capable cores, so presumably coming
* out of idle fixes this.
*/
void __ref cpu_die(void)
{
idle_task_exit();
local_irq_disable();
__asm__ __volatile__(
" movi a2, cpu_restart\n"
" jx a2\n");
}
#endif /* CONFIG_HOTPLUG_CPU */
enum ipi_msg_type {
IPI_RESCHEDULE = 0,
IPI_CALL_FUNC,
IPI_CPU_STOP,
IPI_MAX
};
static const struct {
const char *short_text;
const char *long_text;
} ipi_text[] = {
{ .short_text = "RES", .long_text = "Rescheduling interrupts" },
{ .short_text = "CAL", .long_text = "Function call interrupts" },
{ .short_text = "DIE", .long_text = "CPU shutdown interrupts" },
};
struct ipi_data {
unsigned long ipi_count[IPI_MAX];
};
static DEFINE_PER_CPU(struct ipi_data, ipi_data);
static void send_ipi_message(const struct cpumask *callmask,
enum ipi_msg_type msg_id)
{
int index;
unsigned long mask = 0;
for_each_cpu(index, callmask)
if (index != smp_processor_id())
mask |= 1 << index;
set_er(mask, MIPISET(msg_id));
}
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
send_ipi_message(mask, IPI_CALL_FUNC);
}
void arch_send_call_function_single_ipi(int cpu)
{
send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
}
void smp_send_reschedule(int cpu)
{
send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
}
void smp_send_stop(void)
{
struct cpumask targets;
cpumask_copy(&targets, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &targets);
send_ipi_message(&targets, IPI_CPU_STOP);
}
static void ipi_cpu_stop(unsigned int cpu)
{
set_cpu_online(cpu, false);
machine_halt();
}
irqreturn_t ipi_interrupt(int irq, void *dev_id)
{
unsigned int cpu = smp_processor_id();
struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
unsigned int msg;
unsigned i;
msg = get_er(MIPICAUSE(cpu));
for (i = 0; i < IPI_MAX; i++)
if (msg & (1 << i)) {
set_er(1 << i, MIPICAUSE(cpu));
++ipi->ipi_count[i];
}
if (msg & (1 << IPI_RESCHEDULE))
scheduler_ipi();
if (msg & (1 << IPI_CALL_FUNC))
generic_smp_call_function_interrupt();
if (msg & (1 << IPI_CPU_STOP))
ipi_cpu_stop(cpu);
return IRQ_HANDLED;
}
void show_ipi_list(struct seq_file *p, int prec)
{
unsigned int cpu;
unsigned i;
for (i = 0; i < IPI_MAX; ++i) {
seq_printf(p, "%*s:", prec, ipi_text[i].short_text);
for_each_online_cpu(cpu)
seq_printf(p, " %10lu",
per_cpu(ipi_data, cpu).ipi_count[i]);
seq_printf(p, " %s\n", ipi_text[i].long_text);
}
}
int setup_profiling_timer(unsigned int multiplier)
{
pr_debug("setup_profiling_timer %d\n", multiplier);
return 0;
}
/* TLB flush functions */
struct flush_data {
struct vm_area_struct *vma;
unsigned long addr1;
unsigned long addr2;
};
static void ipi_flush_tlb_all(void *arg)
{
local_flush_tlb_all();
}
void flush_tlb_all(void)
{
on_each_cpu(ipi_flush_tlb_all, NULL, 1);
}
static void ipi_flush_tlb_mm(void *arg)
{
local_flush_tlb_mm(arg);
}
void flush_tlb_mm(struct mm_struct *mm)
{
on_each_cpu(ipi_flush_tlb_mm, mm, 1);
}
static void ipi_flush_tlb_page(void *arg)
{
struct flush_data *fd = arg;
local_flush_tlb_page(fd->vma, fd->addr1);
}
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
{
struct flush_data fd = {
.vma = vma,
.addr1 = addr,
};
on_each_cpu(ipi_flush_tlb_page, &fd, 1);
}
static void ipi_flush_tlb_range(void *arg)
{
struct flush_data *fd = arg;
local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
}
void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
struct flush_data fd = {
.vma = vma,
.addr1 = start,
.addr2 = end,
};
on_each_cpu(ipi_flush_tlb_range, &fd, 1);
}
/* Cache flush functions */
static void ipi_flush_cache_all(void *arg)
{
local_flush_cache_all();
}
void flush_cache_all(void)
{
on_each_cpu(ipi_flush_cache_all, NULL, 1);
}
static void ipi_flush_cache_page(void *arg)
{
struct flush_data *fd = arg;
local_flush_cache_page(fd->vma, fd->addr1, fd->addr2);
}
void flush_cache_page(struct vm_area_struct *vma,
unsigned long address, unsigned long pfn)
{
struct flush_data fd = {
.vma = vma,
.addr1 = address,
.addr2 = pfn,
};
on_each_cpu(ipi_flush_cache_page, &fd, 1);
}
static void ipi_flush_cache_range(void *arg)
{
struct flush_data *fd = arg;
local_flush_cache_range(fd->vma, fd->addr1, fd->addr2);
}
void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
struct flush_data fd = {
.vma = vma,
.addr1 = start,
.addr2 = end,
};
on_each_cpu(ipi_flush_cache_range, &fd, 1);
}
static void ipi_flush_icache_range(void *arg)
{
struct flush_data *fd = arg;
local_flush_icache_range(fd->addr1, fd->addr2);
}
void flush_icache_range(unsigned long start, unsigned long end)
{
struct flush_data fd = {
.addr1 = start,
.addr2 = end,
};
on_each_cpu(ipi_flush_icache_range, &fd, 1);
}
/* ------------------------------------------------------------------------- */
static void ipi_invalidate_dcache_range(void *arg)
{
struct flush_data *fd = arg;
__invalidate_dcache_range(fd->addr1, fd->addr2);
}
static void system_invalidate_dcache_range(unsigned long start,
unsigned long size)
{
struct flush_data fd = {
.addr1 = start,
.addr2 = size,
};
on_each_cpu(ipi_invalidate_dcache_range, &fd, 1);
}
static void ipi_flush_invalidate_dcache_range(void *arg)
{
struct flush_data *fd = arg;
__flush_invalidate_dcache_range(fd->addr1, fd->addr2);
}
static void system_flush_invalidate_dcache_range(unsigned long start,
unsigned long size)
{
struct flush_data fd = {
.addr1 = start,
.addr2 = size,
};
on_each_cpu(ipi_flush_invalidate_dcache_range, &fd, 1);
}
...@@ -36,7 +36,7 @@ static cycle_t ccount_read(struct clocksource *cs) ...@@ -36,7 +36,7 @@ static cycle_t ccount_read(struct clocksource *cs)
return (cycle_t)get_ccount(); return (cycle_t)get_ccount();
} }
static u32 notrace ccount_sched_clock_read(void) static u64 notrace ccount_sched_clock_read(void)
{ {
return get_ccount(); return get_ccount();
} }
...@@ -46,24 +46,19 @@ static struct clocksource ccount_clocksource = { ...@@ -46,24 +46,19 @@ static struct clocksource ccount_clocksource = {
.rating = 200, .rating = 200,
.read = ccount_read, .read = ccount_read,
.mask = CLOCKSOURCE_MASK(32), .mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
}; };
static int ccount_timer_set_next_event(unsigned long delta, static int ccount_timer_set_next_event(unsigned long delta,
struct clock_event_device *dev); struct clock_event_device *dev);
static void ccount_timer_set_mode(enum clock_event_mode mode, static void ccount_timer_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt); struct clock_event_device *evt);
static struct ccount_timer_t { struct ccount_timer {
struct clock_event_device evt; struct clock_event_device evt;
int irq_enabled; int irq_enabled;
} ccount_timer = { char name[24];
.evt = {
.name = "ccount_clockevent",
.features = CLOCK_EVT_FEAT_ONESHOT,
.rating = 300,
.set_next_event = ccount_timer_set_next_event,
.set_mode = ccount_timer_set_mode,
},
}; };
static DEFINE_PER_CPU(struct ccount_timer, ccount_timer);
static int ccount_timer_set_next_event(unsigned long delta, static int ccount_timer_set_next_event(unsigned long delta,
struct clock_event_device *dev) struct clock_event_device *dev)
...@@ -84,8 +79,8 @@ static int ccount_timer_set_next_event(unsigned long delta, ...@@ -84,8 +79,8 @@ static int ccount_timer_set_next_event(unsigned long delta,
static void ccount_timer_set_mode(enum clock_event_mode mode, static void ccount_timer_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt) struct clock_event_device *evt)
{ {
struct ccount_timer_t *timer = struct ccount_timer *timer =
container_of(evt, struct ccount_timer_t, evt); container_of(evt, struct ccount_timer, evt);
/* /*
* There is no way to disable the timer interrupt at the device level, * There is no way to disable the timer interrupt at the device level,
...@@ -117,9 +112,28 @@ static struct irqaction timer_irqaction = { ...@@ -117,9 +112,28 @@ static struct irqaction timer_irqaction = {
.handler = timer_interrupt, .handler = timer_interrupt,
.flags = IRQF_TIMER, .flags = IRQF_TIMER,
.name = "timer", .name = "timer",
.dev_id = &ccount_timer,
}; };
void local_timer_setup(unsigned cpu)
{
struct ccount_timer *timer = &per_cpu(ccount_timer, cpu);
struct clock_event_device *clockevent = &timer->evt;
timer->irq_enabled = 1;
clockevent->name = timer->name;
snprintf(timer->name, sizeof(timer->name), "ccount_clockevent_%u", cpu);
clockevent->features = CLOCK_EVT_FEAT_ONESHOT;
clockevent->rating = 300;
clockevent->set_next_event = ccount_timer_set_next_event;
clockevent->set_mode = ccount_timer_set_mode;
clockevent->cpumask = cpumask_of(cpu);
clockevent->irq = irq_create_mapping(NULL, LINUX_TIMER_INT);
if (WARN(!clockevent->irq, "error: can't map timer irq"))
return;
clockevents_config_and_register(clockevent, ccount_freq,
0xf, 0xffffffff);
}
void __init time_init(void) void __init time_init(void)
{ {
#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
...@@ -131,28 +145,21 @@ void __init time_init(void) ...@@ -131,28 +145,21 @@ void __init time_init(void)
ccount_freq = CONFIG_XTENSA_CPU_CLOCK*1000000UL; ccount_freq = CONFIG_XTENSA_CPU_CLOCK*1000000UL;
#endif #endif
clocksource_register_hz(&ccount_clocksource, ccount_freq); clocksource_register_hz(&ccount_clocksource, ccount_freq);
local_timer_setup(0);
ccount_timer.evt.cpumask = cpumask_of(0); setup_irq(this_cpu_ptr(&ccount_timer)->evt.irq, &timer_irqaction);
ccount_timer.evt.irq = irq_create_mapping(NULL, LINUX_TIMER_INT); sched_clock_register(ccount_sched_clock_read, 32, ccount_freq);
if (WARN(!ccount_timer.evt.irq, "error: can't map timer irq")) clocksource_of_init();
return;
clockevents_config_and_register(&ccount_timer.evt, ccount_freq, 0xf,
0xffffffff);
setup_irq(ccount_timer.evt.irq, &timer_irqaction);
ccount_timer.irq_enabled = 1;
setup_sched_clock(ccount_sched_clock_read, 32, ccount_freq);
} }
/* /*
* The timer interrupt is called HZ times per second. * The timer interrupt is called HZ times per second.
*/ */
irqreturn_t timer_interrupt (int irq, void *dev_id) irqreturn_t timer_interrupt(int irq, void *dev_id)
{ {
struct ccount_timer_t *timer = dev_id; struct clock_event_device *evt = &this_cpu_ptr(&ccount_timer)->evt;
struct clock_event_device *evt = &timer->evt;
set_linux_timer(get_linux_timer());
evt->event_handler(evt); evt->event_handler(evt);
/* Allow platform to do something useful (Wdog). */ /* Allow platform to do something useful (Wdog). */
......
...@@ -157,7 +157,7 @@ COPROCESSOR(7), ...@@ -157,7 +157,7 @@ COPROCESSOR(7),
* 2. it is a temporary memory buffer for the exception handlers. * 2. it is a temporary memory buffer for the exception handlers.
*/ */
unsigned long exc_table[EXC_TABLE_SIZE/4]; DEFINE_PER_CPU(unsigned long, exc_table[EXC_TABLE_SIZE/4]);
void die(const char*, struct pt_regs*, long); void die(const char*, struct pt_regs*, long);
...@@ -212,6 +212,9 @@ void do_interrupt(struct pt_regs *regs) ...@@ -212,6 +212,9 @@ void do_interrupt(struct pt_regs *regs)
XCHAL_INTLEVEL6_MASK, XCHAL_INTLEVEL6_MASK,
XCHAL_INTLEVEL7_MASK, XCHAL_INTLEVEL7_MASK,
}; };
struct pt_regs *old_regs = set_irq_regs(regs);
irq_enter();
for (;;) { for (;;) {
unsigned intread = get_sr(interrupt); unsigned intread = get_sr(interrupt);
...@@ -227,21 +230,13 @@ void do_interrupt(struct pt_regs *regs) ...@@ -227,21 +230,13 @@ void do_interrupt(struct pt_regs *regs)
} }
if (level == 0) if (level == 0)
return; break;
/* do_IRQ(__ffs(int_at_level), regs);
* Clear the interrupt before processing, in case it's
* edge-triggered or software-generated
*/
while (int_at_level) {
unsigned i = __ffs(int_at_level);
unsigned mask = 1 << i;
int_at_level ^= mask;
set_sr(mask, intclear);
do_IRQ(i, regs);
}
} }
irq_exit();
set_irq_regs(old_regs);
} }
/* /*
...@@ -318,17 +313,31 @@ do_debug(struct pt_regs *regs) ...@@ -318,17 +313,31 @@ do_debug(struct pt_regs *regs)
} }
static void set_handler(int idx, void *handler)
{
unsigned int cpu;
for_each_possible_cpu(cpu)
per_cpu(exc_table, cpu)[idx] = (unsigned long)handler;
}
/* Set exception C handler - for temporary use when probing exceptions */ /* Set exception C handler - for temporary use when probing exceptions */
void * __init trap_set_handler(int cause, void *handler) void * __init trap_set_handler(int cause, void *handler)
{ {
unsigned long *entry = &exc_table[EXC_TABLE_DEFAULT / 4 + cause]; void *previous = (void *)per_cpu(exc_table, 0)[
void *previous = (void *)*entry; EXC_TABLE_DEFAULT / 4 + cause];
*entry = (unsigned long)handler; set_handler(EXC_TABLE_DEFAULT / 4 + cause, handler);
return previous; return previous;
} }
static void trap_init_excsave(void)
{
unsigned long excsave1 = (unsigned long)this_cpu_ptr(exc_table);
__asm__ __volatile__("wsr %0, excsave1\n" : : "a" (excsave1));
}
/* /*
* Initialize dispatch tables. * Initialize dispatch tables.
* *
...@@ -342,8 +351,6 @@ void * __init trap_set_handler(int cause, void *handler) ...@@ -342,8 +351,6 @@ void * __init trap_set_handler(int cause, void *handler)
* See vectors.S for more details. * See vectors.S for more details.
*/ */
#define set_handler(idx,handler) (exc_table[idx] = (unsigned long) (handler))
void __init trap_init(void) void __init trap_init(void)
{ {
int i; int i;
...@@ -373,10 +380,15 @@ void __init trap_init(void) ...@@ -373,10 +380,15 @@ void __init trap_init(void)
} }
/* Initialize EXCSAVE_1 to hold the address of the exception table. */ /* Initialize EXCSAVE_1 to hold the address of the exception table. */
trap_init_excsave();
}
i = (unsigned long)exc_table; #ifdef CONFIG_SMP
__asm__ __volatile__("wsr %0, excsave1\n" : : "a" (i)); void secondary_trap_init(void)
{
trap_init_excsave();
} }
#endif
/* /*
* This function dumps the current valid window frame and other base registers. * This function dumps the current valid window frame and other base registers.
......
...@@ -165,6 +165,13 @@ SECTIONS ...@@ -165,6 +165,13 @@ SECTIONS
.DoubleExceptionVector.text); .DoubleExceptionVector.text);
RELOCATE_ENTRY(_DebugInterruptVector_text, RELOCATE_ENTRY(_DebugInterruptVector_text,
.DebugInterruptVector.text); .DebugInterruptVector.text);
#if defined(CONFIG_SMP)
RELOCATE_ENTRY(_SecondaryResetVector_literal,
.SecondaryResetVector.literal);
RELOCATE_ENTRY(_SecondaryResetVector_text,
.SecondaryResetVector.text);
#endif
__boot_reloc_table_end = ABSOLUTE(.) ; __boot_reloc_table_end = ABSOLUTE(.) ;
...@@ -272,6 +279,25 @@ SECTIONS ...@@ -272,6 +279,25 @@ SECTIONS
.DoubleExceptionVector.literal) .DoubleExceptionVector.literal)
. = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3; . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
#if defined(CONFIG_SMP)
SECTION_VECTOR (_SecondaryResetVector_literal,
.SecondaryResetVector.literal,
RESET_VECTOR1_VADDR - 4,
SIZEOF(.DoubleExceptionVector.text),
.DoubleExceptionVector.text)
SECTION_VECTOR (_SecondaryResetVector_text,
.SecondaryResetVector.text,
RESET_VECTOR1_VADDR,
4,
.SecondaryResetVector.literal)
. = LOADADDR(.SecondaryResetVector.text)+SIZEOF(.SecondaryResetVector.text);
#endif
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
__init_end = .; __init_end = .;
......
...@@ -118,7 +118,7 @@ void flush_dcache_page(struct page *page) ...@@ -118,7 +118,7 @@ void flush_dcache_page(struct page *page)
* For now, flush the whole cache. FIXME?? * For now, flush the whole cache. FIXME??
*/ */
void flush_cache_range(struct vm_area_struct* vma, void local_flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
__flush_invalidate_dcache_all(); __flush_invalidate_dcache_all();
...@@ -132,7 +132,7 @@ void flush_cache_range(struct vm_area_struct* vma, ...@@ -132,7 +132,7 @@ void flush_cache_range(struct vm_area_struct* vma,
* alias versions of the cache flush functions. * alias versions of the cache flush functions.
*/ */
void flush_cache_page(struct vm_area_struct* vma, unsigned long address, void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address,
unsigned long pfn) unsigned long pfn)
{ {
/* Note that we have to use the 'alias' address to avoid multi-hit */ /* Note that we have to use the 'alias' address to avoid multi-hit */
...@@ -159,8 +159,7 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) ...@@ -159,8 +159,7 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
/* Invalidate old entry in TLBs */ /* Invalidate old entry in TLBs */
invalidate_itlb_mapping(addr); flush_tlb_page(vma, addr);
invalidate_dtlb_mapping(addr);
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
unsigned long asid_cache = ASID_USER_FIRST; DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST;
void bad_page_fault(struct pt_regs*, unsigned long, int); void bad_page_fault(struct pt_regs*, unsigned long, int);
#undef DEBUG_PAGE_FAULT #undef DEBUG_PAGE_FAULT
......
...@@ -140,7 +140,7 @@ ENTRY(clear_user_page) ...@@ -140,7 +140,7 @@ ENTRY(clear_user_page)
/* Setup a temporary DTLB with the color of the VPN */ /* Setup a temporary DTLB with the color of the VPN */
movi a4, -PAGE_OFFSET + (PAGE_KERNEL | _PAGE_HW_WRITE) movi a4, ((PAGE_KERNEL | _PAGE_HW_WRITE) - PAGE_OFFSET) & 0xffffffff
movi a5, TLBTEMP_BASE_1 # virt movi a5, TLBTEMP_BASE_1 # virt
add a6, a2, a4 # ppn add a6, a2, a4 # ppn
add a2, a5, a3 # add 'color' add a2, a5, a3 # add 'color'
...@@ -194,7 +194,7 @@ ENTRY(copy_user_page) ...@@ -194,7 +194,7 @@ ENTRY(copy_user_page)
or a9, a9, a8 or a9, a9, a8
slli a4, a4, PAGE_SHIFT slli a4, a4, PAGE_SHIFT
s32i a9, a5, PAGE_FLAGS s32i a9, a5, PAGE_FLAGS
movi a5, -PAGE_OFFSET + (PAGE_KERNEL | _PAGE_HW_WRITE) movi a5, ((PAGE_KERNEL | _PAGE_HW_WRITE) - PAGE_OFFSET) & 0xffffffff
beqz a6, 1f beqz a6, 1f
......
...@@ -13,6 +13,8 @@ ...@@ -13,6 +13,8 @@
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/initialize_mmu.h>
#include <asm/io.h>
void __init paging_init(void) void __init paging_init(void)
{ {
...@@ -22,7 +24,7 @@ void __init paging_init(void) ...@@ -22,7 +24,7 @@ void __init paging_init(void)
/* /*
* Flush the mmu and reset associated register to default values. * Flush the mmu and reset associated register to default values.
*/ */
void __init init_mmu(void) void init_mmu(void)
{ {
#if !(XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY) #if !(XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY)
/* /*
...@@ -37,7 +39,21 @@ void __init init_mmu(void) ...@@ -37,7 +39,21 @@ void __init init_mmu(void)
set_itlbcfg_register(0); set_itlbcfg_register(0);
set_dtlbcfg_register(0); set_dtlbcfg_register(0);
#endif #endif
flush_tlb_all(); #if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && CONFIG_OF
/*
* Update the IO area mapping in case xtensa_kio_paddr has changed
*/
write_dtlb_entry(__pte(xtensa_kio_paddr + CA_WRITEBACK),
XCHAL_KIO_CACHED_VADDR + 6);
write_itlb_entry(__pte(xtensa_kio_paddr + CA_WRITEBACK),
XCHAL_KIO_CACHED_VADDR + 6);
write_dtlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS),
XCHAL_KIO_BYPASS_VADDR + 6);
write_itlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS),
XCHAL_KIO_BYPASS_VADDR + 6);
#endif
local_flush_tlb_all();
/* Set rasid register to a known value. */ /* Set rasid register to a known value. */
......
...@@ -48,7 +48,7 @@ static inline void __flush_dtlb_all (void) ...@@ -48,7 +48,7 @@ static inline void __flush_dtlb_all (void)
} }
void flush_tlb_all (void) void local_flush_tlb_all(void)
{ {
__flush_itlb_all(); __flush_itlb_all();
__flush_dtlb_all(); __flush_dtlb_all();
...@@ -60,19 +60,23 @@ void flush_tlb_all (void) ...@@ -60,19 +60,23 @@ void flush_tlb_all (void)
* a new context will be assigned to it. * a new context will be assigned to it.
*/ */
void flush_tlb_mm(struct mm_struct *mm) void local_flush_tlb_mm(struct mm_struct *mm)
{ {
int cpu = smp_processor_id();
if (mm == current->active_mm) { if (mm == current->active_mm) {
unsigned long flags; unsigned long flags;
local_irq_save(flags); local_irq_save(flags);
__get_new_mmu_context(mm); mm->context.asid[cpu] = NO_CONTEXT;
__load_mmu_context(mm); activate_context(mm, cpu);
local_irq_restore(flags); local_irq_restore(flags);
} else {
mm->context.asid[cpu] = NO_CONTEXT;
mm->context.cpu = -1;
} }
else
mm->context = 0;
} }
#define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2) #define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2)
#define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2) #define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2)
#if _ITLB_ENTRIES > _DTLB_ENTRIES #if _ITLB_ENTRIES > _DTLB_ENTRIES
...@@ -81,24 +85,26 @@ void flush_tlb_mm(struct mm_struct *mm) ...@@ -81,24 +85,26 @@ void flush_tlb_mm(struct mm_struct *mm)
# define _TLB_ENTRIES _DTLB_ENTRIES # define _TLB_ENTRIES _DTLB_ENTRIES
#endif #endif
void flush_tlb_range (struct vm_area_struct *vma, void local_flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
int cpu = smp_processor_id();
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
unsigned long flags; unsigned long flags;
if (mm->context == NO_CONTEXT) if (mm->context.asid[cpu] == NO_CONTEXT)
return; return;
#if 0 #if 0
printk("[tlbrange<%02lx,%08lx,%08lx>]\n", printk("[tlbrange<%02lx,%08lx,%08lx>]\n",
(unsigned long)mm->context, start, end); (unsigned long)mm->context.asid[cpu], start, end);
#endif #endif
local_irq_save(flags); local_irq_save(flags);
if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) { if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) {
int oldpid = get_rasid_register(); int oldpid = get_rasid_register();
set_rasid_register (ASID_INSERT(mm->context));
set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
start &= PAGE_MASK; start &= PAGE_MASK;
if (vma->vm_flags & VM_EXEC) if (vma->vm_flags & VM_EXEC)
while(start < end) { while(start < end) {
...@@ -114,24 +120,25 @@ void flush_tlb_range (struct vm_area_struct *vma, ...@@ -114,24 +120,25 @@ void flush_tlb_range (struct vm_area_struct *vma,
set_rasid_register(oldpid); set_rasid_register(oldpid);
} else { } else {
flush_tlb_mm(mm); local_flush_tlb_mm(mm);
} }
local_irq_restore(flags); local_irq_restore(flags);
} }
void flush_tlb_page (struct vm_area_struct *vma, unsigned long page) void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{ {
int cpu = smp_processor_id();
struct mm_struct* mm = vma->vm_mm; struct mm_struct* mm = vma->vm_mm;
unsigned long flags; unsigned long flags;
int oldpid; int oldpid;
if(mm->context == NO_CONTEXT) if (mm->context.asid[cpu] == NO_CONTEXT)
return; return;
local_irq_save(flags); local_irq_save(flags);
oldpid = get_rasid_register(); oldpid = get_rasid_register();
set_rasid_register(ASID_INSERT(mm->context)); set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
if (vma->vm_flags & VM_EXEC) if (vma->vm_flags & VM_EXEC)
invalidate_itlb_mapping(page); invalidate_itlb_mapping(page);
......
...@@ -38,7 +38,7 @@ ...@@ -38,7 +38,7 @@
#define DRIVER_NAME "iss-netdev" #define DRIVER_NAME "iss-netdev"
#define ETH_MAX_PACKET 1500 #define ETH_MAX_PACKET 1500
#define ETH_HEADER_OTHER 14 #define ETH_HEADER_OTHER 14
#define ISS_NET_TIMER_VALUE (2 * HZ) #define ISS_NET_TIMER_VALUE (HZ / 10)
static DEFINE_SPINLOCK(opened_lock); static DEFINE_SPINLOCK(opened_lock);
...@@ -56,8 +56,6 @@ static LIST_HEAD(devices); ...@@ -56,8 +56,6 @@ static LIST_HEAD(devices);
struct tuntap_info { struct tuntap_info {
char dev_name[IFNAMSIZ]; char dev_name[IFNAMSIZ];
int fixed_config;
unsigned char gw[ETH_ALEN];
int fd; int fd;
}; };
...@@ -67,7 +65,6 @@ struct tuntap_info { ...@@ -67,7 +65,6 @@ struct tuntap_info {
/* This structure contains out private information for the driver. */ /* This structure contains out private information for the driver. */
struct iss_net_private { struct iss_net_private {
struct list_head device_list; struct list_head device_list;
struct list_head opened_list; struct list_head opened_list;
...@@ -83,9 +80,6 @@ struct iss_net_private { ...@@ -83,9 +80,6 @@ struct iss_net_private {
int index; int index;
int mtu; int mtu;
unsigned char mac[ETH_ALEN];
int have_mac;
struct { struct {
union { union {
struct tuntap_info tuntap; struct tuntap_info tuntap;
...@@ -118,68 +112,48 @@ static char *split_if_spec(char *str, ...) ...@@ -118,68 +112,48 @@ static char *split_if_spec(char *str, ...)
*arg = str; *arg = str;
if (end == NULL) if (end == NULL)
return NULL; return NULL;
*end ++ = '\0'; *end++ = '\0';
str = end; str = end;
} }
va_end(ap); va_end(ap);
return str; return str;
} }
/* Set Ethernet address of the specified device. */
#if 0 static void setup_etheraddr(struct net_device *dev, char *str)
/* Adjust SKB. */
struct sk_buff *ether_adjust_skb(struct sk_buff *skb, int extra)
{ {
if ((skb != NULL) && (skb_tailroom(skb) < extra)) { unsigned char *addr = dev->dev_addr;
struct sk_buff *skb2;
skb2 = skb_copy_expand(skb, 0, extra, GFP_ATOMIC);
dev_kfree_skb(skb);
skb = skb2;
}
if (skb != NULL)
skb_put(skb, extra);
return skb;
}
#endif
/* Return the IP address as a string for a given device. */ if (str == NULL)
goto random;
static void dev_ip_addr(void *d, char *buf, char *bin_buf) if (!mac_pton(str, addr)) {
{ pr_err("%s: failed to parse '%s' as an ethernet address\n",
struct net_device *dev = d; dev->name, str);
struct in_device *ip = dev->ip_ptr; goto random;
struct in_ifaddr *in;
__be32 addr;
if ((ip == NULL) || ((in = ip->ifa_list) == NULL)) {
printk(KERN_WARNING "Device not assigned an IP address!\n");
return;
} }
if (is_multicast_ether_addr(addr)) {
addr = in->ifa_address; pr_err("%s: attempt to assign a multicast ethernet address\n",
sprintf(buf, "%d.%d.%d.%d", addr & 0xff, (addr >> 8) & 0xff, dev->name);
(addr >> 16) & 0xff, addr >> 24); goto random;
if (bin_buf) {
bin_buf[0] = addr & 0xff;
bin_buf[1] = (addr >> 8) & 0xff;
bin_buf[2] = (addr >> 16) & 0xff;
bin_buf[3] = addr >> 24;
} }
if (!is_valid_ether_addr(addr)) {
pr_err("%s: attempt to assign an invalid ethernet address\n",
dev->name);
goto random;
}
if (!is_local_ether_addr(addr))
pr_warn("%s: assigning a globally valid ethernet address\n",
dev->name);
return;
random:
pr_info("%s: choosing a random ethernet address\n",
dev->name);
eth_hw_addr_random(dev);
} }
/* Set Ethernet address of the specified device. */
static void inline set_ether_mac(void *d, unsigned char *addr)
{
struct net_device *dev = d;
memcpy(dev->dev_addr, addr, ETH_ALEN);
}
/* ======================= TUNTAP TRANSPORT INTERFACE ====================== */ /* ======================= TUNTAP TRANSPORT INTERFACE ====================== */
static int tuntap_open(struct iss_net_private *lp) static int tuntap_open(struct iss_net_private *lp)
...@@ -189,24 +163,21 @@ static int tuntap_open(struct iss_net_private *lp) ...@@ -189,24 +163,21 @@ static int tuntap_open(struct iss_net_private *lp)
int err = -EINVAL; int err = -EINVAL;
int fd; int fd;
/* We currently only support a fixed configuration. */ fd = simc_open("/dev/net/tun", 02, 0); /* O_RDWR */
if (fd < 0) {
if (!lp->tp.info.tuntap.fixed_config) pr_err("%s: failed to open /dev/net/tun, returned %d (errno = %d)\n",
return -EINVAL; lp->dev->name, fd, errno);
if ((fd = simc_open("/dev/net/tun", 02, 0)) < 0) { /* O_RDWR */
printk("Failed to open /dev/net/tun, returned %d "
"(errno = %d)\n", fd, errno);
return fd; return fd;
} }
memset(&ifr, 0, sizeof ifr); memset(&ifr, 0, sizeof(ifr));
ifr.ifr_flags = IFF_TAP | IFF_NO_PI; ifr.ifr_flags = IFF_TAP | IFF_NO_PI;
strlcpy(ifr.ifr_name, dev_name, sizeof ifr.ifr_name); strlcpy(ifr.ifr_name, dev_name, sizeof(ifr.ifr_name));
if ((err = simc_ioctl(fd, TUNSETIFF, (void*) &ifr)) < 0) { err = simc_ioctl(fd, TUNSETIFF, &ifr);
printk("Failed to set interface, returned %d " if (err < 0) {
"(errno = %d)\n", err, errno); pr_err("%s: failed to set interface %s, returned %d (errno = %d)\n",
lp->dev->name, dev_name, err, errno);
simc_close(fd); simc_close(fd);
return err; return err;
} }
...@@ -217,27 +188,17 @@ static int tuntap_open(struct iss_net_private *lp) ...@@ -217,27 +188,17 @@ static int tuntap_open(struct iss_net_private *lp)
static void tuntap_close(struct iss_net_private *lp) static void tuntap_close(struct iss_net_private *lp)
{ {
#if 0
if (lp->tp.info.tuntap.fixed_config)
iter_addresses(lp->tp.info.tuntap.dev, close_addr, lp->host.dev_name);
#endif
simc_close(lp->tp.info.tuntap.fd); simc_close(lp->tp.info.tuntap.fd);
lp->tp.info.tuntap.fd = -1; lp->tp.info.tuntap.fd = -1;
} }
static int tuntap_read (struct iss_net_private *lp, struct sk_buff **skb) static int tuntap_read(struct iss_net_private *lp, struct sk_buff **skb)
{ {
#if 0
*skb = ether_adjust_skb(*skb, ETH_HEADER_OTHER);
if (*skb == NULL)
return -ENOMEM;
#endif
return simc_read(lp->tp.info.tuntap.fd, return simc_read(lp->tp.info.tuntap.fd,
(*skb)->data, (*skb)->dev->mtu + ETH_HEADER_OTHER); (*skb)->data, (*skb)->dev->mtu + ETH_HEADER_OTHER);
} }
static int tuntap_write (struct iss_net_private *lp, struct sk_buff **skb) static int tuntap_write(struct iss_net_private *lp, struct sk_buff **skb)
{ {
return simc_write(lp->tp.info.tuntap.fd, (*skb)->data, (*skb)->len); return simc_write(lp->tp.info.tuntap.fd, (*skb)->data, (*skb)->len);
} }
...@@ -253,45 +214,45 @@ static int tuntap_poll(struct iss_net_private *lp) ...@@ -253,45 +214,45 @@ static int tuntap_poll(struct iss_net_private *lp)
} }
/* /*
* Currently only a device name is supported. * ethX=tuntap,[mac address],device name
* ethX=tuntap[,[mac address][,[device name]]]
*/ */
static int tuntap_probe(struct iss_net_private *lp, int index, char *init) static int tuntap_probe(struct iss_net_private *lp, int index, char *init)
{ {
const int len = strlen(TRANSPORT_TUNTAP_NAME); struct net_device *dev = lp->dev;
char *dev_name = NULL, *mac_str = NULL, *rem = NULL; char *dev_name = NULL, *mac_str = NULL, *rem = NULL;
/* Transport should be 'tuntap': ethX=tuntap,mac,dev_name */ /* Transport should be 'tuntap': ethX=tuntap,mac,dev_name */
if (strncmp(init, TRANSPORT_TUNTAP_NAME, len)) if (strncmp(init, TRANSPORT_TUNTAP_NAME,
sizeof(TRANSPORT_TUNTAP_NAME) - 1))
return 0; return 0;
if (*(init += strlen(TRANSPORT_TUNTAP_NAME)) == ',') { init += sizeof(TRANSPORT_TUNTAP_NAME) - 1;
if ((rem=split_if_spec(init+1, &mac_str, &dev_name)) != NULL) { if (*init == ',') {
printk("Extra garbage on specification : '%s'\n", rem); rem = split_if_spec(init + 1, &mac_str, &dev_name);
if (rem != NULL) {
pr_err("%s: extra garbage on specification : '%s'\n",
dev->name, rem);
return 0; return 0;
} }
} else if (*init != '\0') { } else if (*init != '\0') {
printk("Invalid argument: %s. Skipping device!\n", init); pr_err("%s: invalid argument: %s. Skipping device!\n",
dev->name, init);
return 0; return 0;
} }
if (dev_name) { if (!dev_name) {
strncpy(lp->tp.info.tuntap.dev_name, dev_name, pr_err("%s: missing tuntap device name\n", dev->name);
sizeof lp->tp.info.tuntap.dev_name); return 0;
lp->tp.info.tuntap.fixed_config = 1; }
} else
strcpy(lp->tp.info.tuntap.dev_name, TRANSPORT_TUNTAP_NAME);
strlcpy(lp->tp.info.tuntap.dev_name, dev_name,
sizeof(lp->tp.info.tuntap.dev_name));
#if 0 setup_etheraddr(dev, mac_str);
if (setup_etheraddr(mac_str, lp->mac))
lp->have_mac = 1;
#endif
lp->mtu = TRANSPORT_TUNTAP_MTU;
//lp->info.tuntap.gate_addr = gate_addr; lp->mtu = TRANSPORT_TUNTAP_MTU;
lp->tp.info.tuntap.fd = -1; lp->tp.info.tuntap.fd = -1;
...@@ -302,13 +263,6 @@ static int tuntap_probe(struct iss_net_private *lp, int index, char *init) ...@@ -302,13 +263,6 @@ static int tuntap_probe(struct iss_net_private *lp, int index, char *init)
lp->tp.protocol = tuntap_protocol; lp->tp.protocol = tuntap_protocol;
lp->tp.poll = tuntap_poll; lp->tp.poll = tuntap_poll;
printk("TUN/TAP backend - ");
#if 0
if (lp->host.gate_addr != NULL)
printk("IP = %s", lp->host.gate_addr);
#endif
printk("\n");
return 1; return 1;
} }
...@@ -327,7 +281,8 @@ static int iss_net_rx(struct net_device *dev) ...@@ -327,7 +281,8 @@ static int iss_net_rx(struct net_device *dev)
/* Try to allocate memory, if it fails, try again next round. */ /* Try to allocate memory, if it fails, try again next round. */
if ((skb = dev_alloc_skb(dev->mtu + 2 + ETH_HEADER_OTHER)) == NULL) { skb = dev_alloc_skb(dev->mtu + 2 + ETH_HEADER_OTHER);
if (skb == NULL) {
lp->stats.rx_dropped++; lp->stats.rx_dropped++;
return 0; return 0;
} }
...@@ -347,7 +302,6 @@ static int iss_net_rx(struct net_device *dev) ...@@ -347,7 +302,6 @@ static int iss_net_rx(struct net_device *dev)
lp->stats.rx_bytes += skb->len; lp->stats.rx_bytes += skb->len;
lp->stats.rx_packets++; lp->stats.rx_packets++;
// netif_rx(skb);
netif_rx_ni(skb); netif_rx_ni(skb);
return pkt_len; return pkt_len;
} }
...@@ -378,11 +332,11 @@ static int iss_net_poll(void) ...@@ -378,11 +332,11 @@ static int iss_net_poll(void)
spin_unlock(&lp->lock); spin_unlock(&lp->lock);
if (err < 0) { if (err < 0) {
printk(KERN_ERR "Device '%s' read returned %d, " pr_err("Device '%s' read returned %d, shutting it down\n",
"shutting it down\n", lp->dev->name, err); lp->dev->name, err);
dev_close(lp->dev); dev_close(lp->dev);
} else { } else {
// FIXME reactivate_fd(lp->fd, ISS_ETH_IRQ); /* FIXME reactivate_fd(lp->fd, ISS_ETH_IRQ); */
} }
} }
...@@ -393,14 +347,11 @@ static int iss_net_poll(void) ...@@ -393,14 +347,11 @@ static int iss_net_poll(void)
static void iss_net_timer(unsigned long priv) static void iss_net_timer(unsigned long priv)
{ {
struct iss_net_private* lp = (struct iss_net_private*) priv; struct iss_net_private *lp = (struct iss_net_private *)priv;
spin_lock(&lp->lock); spin_lock(&lp->lock);
iss_net_poll(); iss_net_poll();
mod_timer(&lp->timer, jiffies + lp->timer_val); mod_timer(&lp->timer, jiffies + lp->timer_val);
spin_unlock(&lp->lock); spin_unlock(&lp->lock);
} }
...@@ -408,19 +359,14 @@ static void iss_net_timer(unsigned long priv) ...@@ -408,19 +359,14 @@ static void iss_net_timer(unsigned long priv)
static int iss_net_open(struct net_device *dev) static int iss_net_open(struct net_device *dev)
{ {
struct iss_net_private *lp = netdev_priv(dev); struct iss_net_private *lp = netdev_priv(dev);
char addr[sizeof "255.255.255.255\0"];
int err; int err;
spin_lock(&lp->lock); spin_lock(&lp->lock);
if ((err = lp->tp.open(lp)) < 0) err = lp->tp.open(lp);
if (err < 0)
goto out; goto out;
if (!lp->have_mac) {
dev_ip_addr(dev, addr, &lp->mac[2]);
set_ether_mac(dev, lp->mac);
}
netif_start_queue(dev); netif_start_queue(dev);
/* clear buffer - it can happen that the host side of the interface /* clear buffer - it can happen that the host side of the interface
...@@ -448,7 +394,6 @@ static int iss_net_open(struct net_device *dev) ...@@ -448,7 +394,6 @@ static int iss_net_open(struct net_device *dev)
static int iss_net_close(struct net_device *dev) static int iss_net_close(struct net_device *dev)
{ {
struct iss_net_private *lp = netdev_priv(dev); struct iss_net_private *lp = netdev_priv(dev);
printk("iss_net_close!\n");
netif_stop_queue(dev); netif_stop_queue(dev);
spin_lock(&lp->lock); spin_lock(&lp->lock);
...@@ -490,7 +435,7 @@ static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -490,7 +435,7 @@ static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
} else { } else {
netif_start_queue(dev); netif_start_queue(dev);
printk(KERN_ERR "iss_net_start_xmit: failed(%d)\n", len); pr_err("%s: %s failed(%d)\n", dev->name, __func__, len);
} }
spin_unlock_irqrestore(&lp->lock, flags); spin_unlock_irqrestore(&lp->lock, flags);
...@@ -508,56 +453,27 @@ static struct net_device_stats *iss_net_get_stats(struct net_device *dev) ...@@ -508,56 +453,27 @@ static struct net_device_stats *iss_net_get_stats(struct net_device *dev)
static void iss_net_set_multicast_list(struct net_device *dev) static void iss_net_set_multicast_list(struct net_device *dev)
{ {
#if 0
if (dev->flags & IFF_PROMISC)
return;
else if (!netdev_mc_empty(dev))
dev->flags |= IFF_ALLMULTI;
else
dev->flags &= ~IFF_ALLMULTI;
#endif
} }
static void iss_net_tx_timeout(struct net_device *dev) static void iss_net_tx_timeout(struct net_device *dev)
{ {
#if 0
dev->trans_start = jiffies;
netif_wake_queue(dev);
#endif
} }
static int iss_net_set_mac(struct net_device *dev, void *addr) static int iss_net_set_mac(struct net_device *dev, void *addr)
{ {
#if 0
struct iss_net_private *lp = netdev_priv(dev); struct iss_net_private *lp = netdev_priv(dev);
struct sockaddr *hwaddr = addr; struct sockaddr *hwaddr = addr;
if (!is_valid_ether_addr(hwaddr->sa_data))
return -EADDRNOTAVAIL;
spin_lock(&lp->lock); spin_lock(&lp->lock);
memcpy(dev->dev_addr, hwaddr->sa_data, ETH_ALEN); memcpy(dev->dev_addr, hwaddr->sa_data, ETH_ALEN);
spin_unlock(&lp->lock); spin_unlock(&lp->lock);
#endif
return 0; return 0;
} }
static int iss_net_change_mtu(struct net_device *dev, int new_mtu) static int iss_net_change_mtu(struct net_device *dev, int new_mtu)
{ {
#if 0
struct iss_net_private *lp = netdev_priv(dev);
int err = 0;
spin_lock(&lp->lock);
// FIXME not needed new_mtu = transport_set_mtu(new_mtu, &lp->user);
if (new_mtu < 0)
err = new_mtu;
else
dev->mtu = new_mtu;
spin_unlock(&lp->lock);
return err;
#endif
return -EINVAL; return -EINVAL;
} }
...@@ -582,7 +498,6 @@ static const struct net_device_ops iss_netdev_ops = { ...@@ -582,7 +498,6 @@ static const struct net_device_ops iss_netdev_ops = {
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = iss_net_change_mtu, .ndo_change_mtu = iss_net_change_mtu,
.ndo_set_mac_address = iss_net_set_mac, .ndo_set_mac_address = iss_net_set_mac,
//.ndo_do_ioctl = iss_net_ioctl,
.ndo_tx_timeout = iss_net_tx_timeout, .ndo_tx_timeout = iss_net_tx_timeout,
.ndo_set_rx_mode = iss_net_set_multicast_list, .ndo_set_rx_mode = iss_net_set_multicast_list,
}; };
...@@ -593,24 +508,29 @@ static int iss_net_configure(int index, char *init) ...@@ -593,24 +508,29 @@ static int iss_net_configure(int index, char *init)
struct iss_net_private *lp; struct iss_net_private *lp;
int err; int err;
if ((dev = alloc_etherdev(sizeof *lp)) == NULL) { dev = alloc_etherdev(sizeof(*lp));
printk(KERN_ERR "eth_configure: failed to allocate device\n"); if (dev == NULL) {
pr_err("eth_configure: failed to allocate device\n");
return 1; return 1;
} }
/* Initialize private element. */ /* Initialize private element. */
lp = netdev_priv(dev); lp = netdev_priv(dev);
*lp = ((struct iss_net_private) { *lp = (struct iss_net_private) {
.device_list = LIST_HEAD_INIT(lp->device_list), .device_list = LIST_HEAD_INIT(lp->device_list),
.opened_list = LIST_HEAD_INIT(lp->opened_list), .opened_list = LIST_HEAD_INIT(lp->opened_list),
.lock = __SPIN_LOCK_UNLOCKED(lp.lock), .lock = __SPIN_LOCK_UNLOCKED(lp.lock),
.dev = dev, .dev = dev,
.index = index, .index = index,
//.fd = -1, };
.mac = { 0xfe, 0xfd, 0x0, 0x0, 0x0, 0x0 },
.have_mac = 0, /*
}); * If this name ends up conflicting with an existing registered
* netdevice, that is OK, register_netdev{,ice}() will notice this
* and fail.
*/
snprintf(dev->name, sizeof(dev->name), "eth%d", index);
/* /*
* Try all transport protocols. * Try all transport protocols.
...@@ -618,14 +538,12 @@ static int iss_net_configure(int index, char *init) ...@@ -618,14 +538,12 @@ static int iss_net_configure(int index, char *init)
*/ */
if (!tuntap_probe(lp, index, init)) { if (!tuntap_probe(lp, index, init)) {
printk("Invalid arguments. Skipping device!\n"); pr_err("%s: invalid arguments. Skipping device!\n",
dev->name);
goto errout; goto errout;
} }
printk(KERN_INFO "Netdevice %d ", index); pr_info("Netdevice %d (%pM)\n", index, dev->dev_addr);
if (lp->have_mac)
printk("(%pM) ", lp->mac);
printk(": ");
/* sysfs register */ /* sysfs register */
...@@ -641,14 +559,7 @@ static int iss_net_configure(int index, char *init) ...@@ -641,14 +559,7 @@ static int iss_net_configure(int index, char *init)
lp->pdev.id = index; lp->pdev.id = index;
lp->pdev.name = DRIVER_NAME; lp->pdev.name = DRIVER_NAME;
platform_device_register(&lp->pdev); platform_device_register(&lp->pdev);
SET_NETDEV_DEV(dev,&lp->pdev.dev); SET_NETDEV_DEV(dev, &lp->pdev.dev);
/*
* If this name ends up conflicting with an existing registered
* netdevice, that is OK, register_netdev{,ice}() will notice this
* and fail.
*/
snprintf(dev->name, sizeof dev->name, "eth%d", index);
dev->netdev_ops = &iss_netdev_ops; dev->netdev_ops = &iss_netdev_ops;
dev->mtu = lp->mtu; dev->mtu = lp->mtu;
...@@ -660,7 +571,7 @@ static int iss_net_configure(int index, char *init) ...@@ -660,7 +571,7 @@ static int iss_net_configure(int index, char *init)
rtnl_unlock(); rtnl_unlock();
if (err) { if (err) {
printk("Error registering net device!\n"); pr_err("%s: error registering net device!\n", dev->name);
/* XXX: should we call ->remove() here? */ /* XXX: should we call ->remove() here? */
free_netdev(dev); free_netdev(dev);
return 1; return 1;
...@@ -669,16 +580,11 @@ static int iss_net_configure(int index, char *init) ...@@ -669,16 +580,11 @@ static int iss_net_configure(int index, char *init)
init_timer(&lp->tl); init_timer(&lp->tl);
lp->tl.function = iss_net_user_timer_expire; lp->tl.function = iss_net_user_timer_expire;
#if 0
if (lp->have_mac)
set_ether_mac(dev, lp->mac);
#endif
return 0; return 0;
errout: errout:
// FIXME: unregister; free, etc.. /* FIXME: unregister; free, etc.. */
return -EIO; return -EIO;
} }
/* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */
...@@ -706,21 +612,22 @@ static int __init iss_net_setup(char *str) ...@@ -706,21 +612,22 @@ static int __init iss_net_setup(char *str)
struct iss_net_init *new; struct iss_net_init *new;
struct list_head *ele; struct list_head *ele;
char *end; char *end;
int n; int rc;
unsigned n;
n = simple_strtoul(str, &end, 0); end = strchr(str, '=');
if (end == str) { if (!end) {
printk(ERR "Failed to parse '%s'\n", str); printk(ERR "Expected '=' after device number\n");
return 1;
}
if (n < 0) {
printk(ERR "Device %d is negative\n", n);
return 1; return 1;
} }
if (*(str = end) != '=') { *end = 0;
printk(ERR "Expected '=' after device number\n"); rc = kstrtouint(str, 0, &n);
*end = '=';
if (rc < 0) {
printk(ERR "Failed to parse '%s'\n", str);
return 1; return 1;
} }
str = end;
spin_lock(&devices_lock); spin_lock(&devices_lock);
...@@ -733,13 +640,13 @@ static int __init iss_net_setup(char *str) ...@@ -733,13 +640,13 @@ static int __init iss_net_setup(char *str)
spin_unlock(&devices_lock); spin_unlock(&devices_lock);
if (device && device->index == n) { if (device && device->index == n) {
printk(ERR "Device %d already configured\n", n); printk(ERR "Device %u already configured\n", n);
return 1; return 1;
} }
new = alloc_bootmem(sizeof(*new)); new = alloc_bootmem(sizeof(*new));
if (new == NULL) { if (new == NULL) {
printk("Alloc_bootmem failed\n"); printk(ERR "Alloc_bootmem failed\n");
return 1; return 1;
} }
...@@ -753,7 +660,7 @@ static int __init iss_net_setup(char *str) ...@@ -753,7 +660,7 @@ static int __init iss_net_setup(char *str)
#undef ERR #undef ERR
__setup("eth=", iss_net_setup); __setup("eth", iss_net_setup);
/* /*
* Initialize all ISS Ethernet devices previously registered in iss_net_setup. * Initialize all ISS Ethernet devices previously registered in iss_net_setup.
......
...@@ -15,10 +15,6 @@ ...@@ -15,10 +15,6 @@
#ifndef __XTENSA_XTAVNET_HARDWARE_H #ifndef __XTENSA_XTAVNET_HARDWARE_H
#define __XTENSA_XTAVNET_HARDWARE_H #define __XTENSA_XTAVNET_HARDWARE_H
/* By default NO_IRQ is defined to 0 in Linux, but we use the
interrupt 0 for UART... */
#define NO_IRQ -1
/* Memory configuration. */ /* Memory configuration. */
#define PLATFORM_DEFAULT_MEM_START 0x00000000 #define PLATFORM_DEFAULT_MEM_START 0x00000000
...@@ -30,7 +26,7 @@ ...@@ -30,7 +26,7 @@
/* Default assignment of LX60 devices to external interrupts. */ /* Default assignment of LX60 devices to external interrupts. */
#ifdef CONFIG_ARCH_HAS_SMP #ifdef CONFIG_XTENSA_MX
#define DUART16552_INTNUM XCHAL_EXTINT3_NUM #define DUART16552_INTNUM XCHAL_EXTINT3_NUM
#define OETH_IRQ XCHAL_EXTINT4_NUM #define OETH_IRQ XCHAL_EXTINT4_NUM
#else #else
......
...@@ -168,7 +168,7 @@ void __init platform_calibrate_ccount(void) ...@@ -168,7 +168,7 @@ void __init platform_calibrate_ccount(void)
long clk_freq = 0; long clk_freq = 0;
#ifdef CONFIG_OF #ifdef CONFIG_OF
struct device_node *cpu = struct device_node *cpu =
of_find_compatible_node(NULL, NULL, "xtensa,cpu"); of_find_compatible_node(NULL, NULL, "cdns,xtensa-cpu");
if (cpu) { if (cpu) {
u32 freq; u32 freq;
update_clock_frequency(cpu); update_clock_frequency(cpu);
...@@ -194,7 +194,7 @@ void __init platform_calibrate_ccount(void) ...@@ -194,7 +194,7 @@ void __init platform_calibrate_ccount(void)
* Ethernet -- OpenCores Ethernet MAC (ethoc driver) * Ethernet -- OpenCores Ethernet MAC (ethoc driver)
*/ */
static struct resource ethoc_res[] __initdata = { static struct resource ethoc_res[] = {
[0] = { /* register space */ [0] = { /* register space */
.start = OETH_REGS_PADDR, .start = OETH_REGS_PADDR,
.end = OETH_REGS_PADDR + OETH_REGS_SIZE - 1, .end = OETH_REGS_PADDR + OETH_REGS_SIZE - 1,
...@@ -212,7 +212,7 @@ static struct resource ethoc_res[] __initdata = { ...@@ -212,7 +212,7 @@ static struct resource ethoc_res[] __initdata = {
}, },
}; };
static struct ethoc_platform_data ethoc_pdata __initdata = { static struct ethoc_platform_data ethoc_pdata = {
/* /*
* The MAC address for these boards is 00:50:c2:13:6f:xx. * The MAC address for these boards is 00:50:c2:13:6f:xx.
* The last byte (here as zero) is read from the DIP switches on the * The last byte (here as zero) is read from the DIP switches on the
...@@ -222,7 +222,7 @@ static struct ethoc_platform_data ethoc_pdata __initdata = { ...@@ -222,7 +222,7 @@ static struct ethoc_platform_data ethoc_pdata __initdata = {
.phy_id = -1, .phy_id = -1,
}; };
static struct platform_device ethoc_device __initdata = { static struct platform_device ethoc_device = {
.name = "ethoc", .name = "ethoc",
.id = -1, .id = -1,
.num_resources = ARRAY_SIZE(ethoc_res), .num_resources = ARRAY_SIZE(ethoc_res),
...@@ -236,13 +236,13 @@ static struct platform_device ethoc_device __initdata = { ...@@ -236,13 +236,13 @@ static struct platform_device ethoc_device __initdata = {
* UART * UART
*/ */
static struct resource serial_resource __initdata = { static struct resource serial_resource = {
.start = DUART16552_PADDR, .start = DUART16552_PADDR,
.end = DUART16552_PADDR + 0x1f, .end = DUART16552_PADDR + 0x1f,
.flags = IORESOURCE_MEM, .flags = IORESOURCE_MEM,
}; };
static struct plat_serial8250_port serial_platform_data[] __initdata = { static struct plat_serial8250_port serial_platform_data[] = {
[0] = { [0] = {
.mapbase = DUART16552_PADDR, .mapbase = DUART16552_PADDR,
.irq = DUART16552_INTNUM, .irq = DUART16552_INTNUM,
...@@ -255,7 +255,7 @@ static struct plat_serial8250_port serial_platform_data[] __initdata = { ...@@ -255,7 +255,7 @@ static struct plat_serial8250_port serial_platform_data[] __initdata = {
{ }, { },
}; };
static struct platform_device xtavnet_uart __initdata = { static struct platform_device xtavnet_uart = {
.name = "serial8250", .name = "serial8250",
.id = PLAT8250_DEV_PLATFORM, .id = PLAT8250_DEV_PLATFORM,
.dev = { .dev = {
......
#ifndef _XTENSA_S6000_IRQ_H #ifndef _XTENSA_S6000_IRQ_H
#define _XTENSA_S6000_IRQ_H #define _XTENSA_S6000_IRQ_H
#define NO_IRQ (-1)
#define VARIANT_NR_IRQS 8 /* GPIO interrupts */ #define VARIANT_NR_IRQS 8 /* GPIO interrupts */
extern void variant_irq_enable(unsigned int irq); extern void variant_irq_enable(unsigned int irq);
......
...@@ -65,3 +65,7 @@ config VERSATILE_FPGA_IRQ_NR ...@@ -65,3 +65,7 @@ config VERSATILE_FPGA_IRQ_NR
int int
default 4 default 4
depends on VERSATILE_FPGA_IRQ depends on VERSATILE_FPGA_IRQ
config XTENSA_MX
bool
select IRQ_DOMAIN
...@@ -23,3 +23,5 @@ obj-$(CONFIG_RENESAS_IRQC) += irq-renesas-irqc.o ...@@ -23,3 +23,5 @@ obj-$(CONFIG_RENESAS_IRQC) += irq-renesas-irqc.o
obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o
obj-$(CONFIG_ARCH_VT8500) += irq-vt8500.o obj-$(CONFIG_ARCH_VT8500) += irq-vt8500.o
obj-$(CONFIG_TB10X_IRQC) += irq-tb10x.o obj-$(CONFIG_TB10X_IRQC) += irq-tb10x.o
obj-$(CONFIG_XTENSA) += irq-xtensa-pic.o
obj-$(CONFIG_XTENSA_MX) += irq-xtensa-mx.o
/*
* Xtensa MX interrupt distributor
*
* Copyright (C) 2002 - 2013 Tensilica, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/irq.h>
#include <linux/of.h>
#include <asm/mxregs.h>
#include "irqchip.h"
#define HW_IRQ_IPI_COUNT 2
#define HW_IRQ_MX_BASE 2
#define HW_IRQ_EXTERN_BASE 3
static DEFINE_PER_CPU(unsigned int, cached_irq_mask);
static int xtensa_mx_irq_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hw)
{
if (hw < HW_IRQ_IPI_COUNT) {
struct irq_chip *irq_chip = d->host_data;
irq_set_chip_and_handler_name(irq, irq_chip,
handle_percpu_irq, "ipi");
irq_set_status_flags(irq, IRQ_LEVEL);
return 0;
}
return xtensa_irq_map(d, irq, hw);
}
/*
* Device Tree IRQ specifier translation function which works with one or
* two cell bindings. First cell value maps directly to the hwirq number.
* Second cell if present specifies whether hwirq number is external (1) or
* internal (0).
*/
static int xtensa_mx_irq_domain_xlate(struct irq_domain *d,
struct device_node *ctrlr,
const u32 *intspec, unsigned int intsize,
unsigned long *out_hwirq, unsigned int *out_type)
{
return xtensa_irq_domain_xlate(intspec, intsize,
intspec[0], intspec[0] + HW_IRQ_EXTERN_BASE,
out_hwirq, out_type);
}
static const struct irq_domain_ops xtensa_mx_irq_domain_ops = {
.xlate = xtensa_mx_irq_domain_xlate,
.map = xtensa_mx_irq_map,
};
void secondary_init_irq(void)
{
__this_cpu_write(cached_irq_mask,
XCHAL_INTTYPE_MASK_EXTERN_EDGE |
XCHAL_INTTYPE_MASK_EXTERN_LEVEL);
set_sr(XCHAL_INTTYPE_MASK_EXTERN_EDGE |
XCHAL_INTTYPE_MASK_EXTERN_LEVEL, intenable);
}
static void xtensa_mx_irq_mask(struct irq_data *d)
{
unsigned int mask = 1u << d->hwirq;
if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) -
HW_IRQ_MX_BASE), MIENG);
} else {
mask = __this_cpu_read(cached_irq_mask) & ~mask;
__this_cpu_write(cached_irq_mask, mask);
set_sr(mask, intenable);
}
}
static void xtensa_mx_irq_unmask(struct irq_data *d)
{
unsigned int mask = 1u << d->hwirq;
if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) -
HW_IRQ_MX_BASE), MIENGSET);
} else {
mask |= __this_cpu_read(cached_irq_mask);
__this_cpu_write(cached_irq_mask, mask);
set_sr(mask, intenable);
}
}
static void xtensa_mx_irq_enable(struct irq_data *d)
{
variant_irq_enable(d->hwirq);
xtensa_mx_irq_unmask(d);
}
static void xtensa_mx_irq_disable(struct irq_data *d)
{
xtensa_mx_irq_mask(d);
variant_irq_disable(d->hwirq);
}
static void xtensa_mx_irq_ack(struct irq_data *d)
{
set_sr(1 << d->hwirq, intclear);
}
static int xtensa_mx_irq_retrigger(struct irq_data *d)
{
set_sr(1 << d->hwirq, intset);
return 1;
}
static int xtensa_mx_irq_set_affinity(struct irq_data *d,
const struct cpumask *dest, bool force)
{
unsigned mask = 1u << cpumask_any(dest);
set_er(mask, MIROUT(d->hwirq - HW_IRQ_MX_BASE));
return 0;
}
static struct irq_chip xtensa_mx_irq_chip = {
.name = "xtensa-mx",
.irq_enable = xtensa_mx_irq_enable,
.irq_disable = xtensa_mx_irq_disable,
.irq_mask = xtensa_mx_irq_mask,
.irq_unmask = xtensa_mx_irq_unmask,
.irq_ack = xtensa_mx_irq_ack,
.irq_retrigger = xtensa_mx_irq_retrigger,
.irq_set_affinity = xtensa_mx_irq_set_affinity,
};
int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent)
{
struct irq_domain *root_domain =
irq_domain_add_legacy(NULL, NR_IRQS, 0, 0,
&xtensa_mx_irq_domain_ops,
&xtensa_mx_irq_chip);
irq_set_default_host(root_domain);
secondary_init_irq();
return 0;
}
static int __init xtensa_mx_init(struct device_node *np,
struct device_node *interrupt_parent)
{
struct irq_domain *root_domain =
irq_domain_add_linear(np, NR_IRQS, &xtensa_mx_irq_domain_ops,
&xtensa_mx_irq_chip);
irq_set_default_host(root_domain);
secondary_init_irq();
return 0;
}
IRQCHIP_DECLARE(xtensa_mx_irq_chip, "cdns,xtensa-mx", xtensa_mx_init);
/*
* Xtensa built-in interrupt controller
*
* Copyright (C) 2002 - 2013 Tensilica, Inc.
* Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Chris Zankel <chris@zankel.net>
* Kevin Chea
*/
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/irq.h>
#include <linux/of.h>
#include "irqchip.h"
unsigned int cached_irq_mask;
/*
* Device Tree IRQ specifier translation function which works with one or
* two cell bindings. First cell value maps directly to the hwirq number.
* Second cell if present specifies whether hwirq number is external (1) or
* internal (0).
*/
static int xtensa_pic_irq_domain_xlate(struct irq_domain *d,
struct device_node *ctrlr,
const u32 *intspec, unsigned int intsize,
unsigned long *out_hwirq, unsigned int *out_type)
{
return xtensa_irq_domain_xlate(intspec, intsize,
intspec[0], intspec[0],
out_hwirq, out_type);
}
static const struct irq_domain_ops xtensa_irq_domain_ops = {
.xlate = xtensa_pic_irq_domain_xlate,
.map = xtensa_irq_map,
};
static void xtensa_irq_mask(struct irq_data *d)
{
cached_irq_mask &= ~(1 << d->hwirq);
set_sr(cached_irq_mask, intenable);
}
static void xtensa_irq_unmask(struct irq_data *d)
{
cached_irq_mask |= 1 << d->hwirq;
set_sr(cached_irq_mask, intenable);
}
static void xtensa_irq_enable(struct irq_data *d)
{
variant_irq_enable(d->hwirq);
xtensa_irq_unmask(d);
}
static void xtensa_irq_disable(struct irq_data *d)
{
xtensa_irq_mask(d);
variant_irq_disable(d->hwirq);
}
static void xtensa_irq_ack(struct irq_data *d)
{
set_sr(1 << d->hwirq, intclear);
}
static int xtensa_irq_retrigger(struct irq_data *d)
{
set_sr(1 << d->hwirq, intset);
return 1;
}
static struct irq_chip xtensa_irq_chip = {
.name = "xtensa",
.irq_enable = xtensa_irq_enable,
.irq_disable = xtensa_irq_disable,
.irq_mask = xtensa_irq_mask,
.irq_unmask = xtensa_irq_unmask,
.irq_ack = xtensa_irq_ack,
.irq_retrigger = xtensa_irq_retrigger,
};
int __init xtensa_pic_init_legacy(struct device_node *interrupt_parent)
{
struct irq_domain *root_domain =
irq_domain_add_legacy(NULL, NR_IRQS, 0, 0,
&xtensa_irq_domain_ops, &xtensa_irq_chip);
irq_set_default_host(root_domain);
return 0;
}
static int __init xtensa_pic_init(struct device_node *np,
struct device_node *interrupt_parent)
{
struct irq_domain *root_domain =
irq_domain_add_linear(np, NR_IRQS, &xtensa_irq_domain_ops,
&xtensa_irq_chip);
irq_set_default_host(root_domain);
return 0;
}
IRQCHIP_DECLARE(xtensa_irq_chip, "cdns,xtensa-pic", xtensa_pic_init);
/*
* Xtensa MX interrupt distributor
*
* Copyright (C) 2002 - 2013 Tensilica, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef __LINUX_IRQCHIP_XTENSA_MX_H
#define __LINUX_IRQCHIP_XTENSA_MX_H
struct device_node;
int xtensa_mx_init_legacy(struct device_node *interrupt_parent);
#endif /* __LINUX_IRQCHIP_XTENSA_MX_H */
/*
* Xtensa built-in interrupt controller
*
* Copyright (C) 2002 - 2013 Tensilica, Inc.
* Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef __LINUX_IRQCHIP_XTENSA_PIC_H
#define __LINUX_IRQCHIP_XTENSA_PIC_H
struct device_node;
int xtensa_pic_init_legacy(struct device_node *interrupt_parent);
#endif /* __LINUX_IRQCHIP_XTENSA_PIC_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment