Commit 7c019191 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'xtensa-20150830' of git://github.com/czankel/xtensa-linux

Pull xtensa updates from Chris Zankel:
 "Xtensa fixes and improvements for 4.3:

   - reimplement DMA API using common helpers
   - implement counting and sampling perf events using hardware perf
     counters
   - add fake NMI support for hardware perf counters
   - fix THREADPTR register reloading on return to userspace
   - keep exception/interrupt stack continuous for debugger
   - improve vmlinux.lds.S post-processing"

* tag 'xtensa-20150830' of git://github.com/czankel/xtensa-linux:
  xtensa: improve vmlinux.lds.S sed post-processing
  xtensa: drop unused irq_err_count
  xtensa: implement fake NMI
  xtensa: don't touch EXC_TABLE_FIXUP in _switch_to
  xtensa: fix kernel register spilling
  xtensa: reorganize irq flags tracing
  perf tools: xtensa: add DWARF register names
  xtensa: implement counting and sampling perf events
  xtensa: count software page fault perf events
  xtensa: add profiling IRQ type to xtensa_irq_map
  xtensa: select PERF_USE_VMALLOC for cache-aliasing configurations
  xtensa: move oprofile stack tracing to stacktrace.c
  xtensa: keep exception/interrupt stack continuous
  xtensa: clean up Kconfig dependencies for custom cores
  xtensa: reimplement DMA API using common helpers
  xtensa: fix threadptr reload on return to userspace
  xtensa: ISS: add missing va_end into split_if_spec
parents 7b8067d3 650c9197
...@@ -14,12 +14,15 @@ config XTENSA ...@@ -14,12 +14,15 @@ config XTENSA
select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW
select GENERIC_PCI_IOMAP select GENERIC_PCI_IOMAP
select GENERIC_SCHED_CLOCK select GENERIC_SCHED_CLOCK
select HAVE_DMA_API_DEBUG
select HAVE_DMA_ATTRS
select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_TRACER
select HAVE_IRQ_TIME_ACCOUNTING select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_OPROFILE select HAVE_OPROFILE
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select IRQ_DOMAIN select IRQ_DOMAIN
select MODULES_USE_ELF_RELA select MODULES_USE_ELF_RELA
select PERF_USE_VMALLOC
select VIRT_TO_BUS select VIRT_TO_BUS
help help
Xtensa processors are 32-bit RISC machines designed by Tensilica Xtensa processors are 32-bit RISC machines designed by Tensilica
...@@ -61,9 +64,7 @@ config TRACE_IRQFLAGS_SUPPORT ...@@ -61,9 +64,7 @@ config TRACE_IRQFLAGS_SUPPORT
def_bool y def_bool y
config MMU config MMU
bool def_bool n
default n if !XTENSA_VARIANT_CUSTOM
default XTENSA_VARIANT_MMU if XTENSA_VARIANT_CUSTOM
config VARIANT_IRQ_SWITCH config VARIANT_IRQ_SWITCH
def_bool n def_bool n
...@@ -71,9 +72,6 @@ config VARIANT_IRQ_SWITCH ...@@ -71,9 +72,6 @@ config VARIANT_IRQ_SWITCH
config HAVE_XTENSA_GPIO32 config HAVE_XTENSA_GPIO32
def_bool n def_bool n
config MAY_HAVE_SMP
def_bool n
menu "Processor type and features" menu "Processor type and features"
choice choice
...@@ -100,7 +98,6 @@ config XTENSA_VARIANT_DC233C ...@@ -100,7 +98,6 @@ config XTENSA_VARIANT_DC233C
config XTENSA_VARIANT_CUSTOM config XTENSA_VARIANT_CUSTOM
bool "Custom Xtensa processor configuration" bool "Custom Xtensa processor configuration"
select MAY_HAVE_SMP
select HAVE_XTENSA_GPIO32 select HAVE_XTENSA_GPIO32
help help
Select this variant to use a custom Xtensa processor configuration. Select this variant to use a custom Xtensa processor configuration.
...@@ -126,10 +123,21 @@ config XTENSA_VARIANT_MMU ...@@ -126,10 +123,21 @@ config XTENSA_VARIANT_MMU
bool "Core variant has a Full MMU (TLB, Pages, Protection, etc)" bool "Core variant has a Full MMU (TLB, Pages, Protection, etc)"
depends on XTENSA_VARIANT_CUSTOM depends on XTENSA_VARIANT_CUSTOM
default y default y
select MMU
help help
Build a Conventional Kernel with full MMU support, Build a Conventional Kernel with full MMU support,
ie: it supports a TLB with auto-loading, page protection. ie: it supports a TLB with auto-loading, page protection.
config XTENSA_VARIANT_HAVE_PERF_EVENTS
bool "Core variant has Performance Monitor Module"
depends on XTENSA_VARIANT_CUSTOM
default n
help
Enable if core variant has Performance Monitor Module with
External Registers Interface.
If unsure, say N.
config XTENSA_UNALIGNED_USER config XTENSA_UNALIGNED_USER
bool "Unaligned memory access in use space" bool "Unaligned memory access in use space"
help help
...@@ -143,7 +151,7 @@ source "kernel/Kconfig.preempt" ...@@ -143,7 +151,7 @@ source "kernel/Kconfig.preempt"
config HAVE_SMP config HAVE_SMP
bool "System Supports SMP (MX)" bool "System Supports SMP (MX)"
depends on MAY_HAVE_SMP depends on XTENSA_VARIANT_CUSTOM
select XTENSA_MX select XTENSA_MX
help help
This option is use to indicate that the system-on-a-chip (SOC) This option is use to indicate that the system-on-a-chip (SOC)
......
...@@ -2,7 +2,6 @@ generic-y += bitsperlong.h ...@@ -2,7 +2,6 @@ generic-y += bitsperlong.h
generic-y += bug.h generic-y += bug.h
generic-y += clkdev.h generic-y += clkdev.h
generic-y += cputime.h generic-y += cputime.h
generic-y += device.h
generic-y += div64.h generic-y += div64.h
generic-y += emergency-restart.h generic-y += emergency-restart.h
generic-y += errno.h generic-y += errno.h
......
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
* *
* Locking interrupts looks like this: * Locking interrupts looks like this:
* *
* rsil a15, LOCKLEVEL * rsil a15, TOPLEVEL
* <code> * <code>
* wsr a15, PS * wsr a15, PS
* rsync * rsync
...@@ -106,7 +106,7 @@ static inline void atomic_##op(int i, atomic_t * v) \ ...@@ -106,7 +106,7 @@ static inline void atomic_##op(int i, atomic_t * v) \
unsigned int vval; \ unsigned int vval; \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
" rsil a15, "__stringify(LOCKLEVEL)"\n"\ " rsil a15, "__stringify(TOPLEVEL)"\n"\
" l32i %0, %2, 0\n" \ " l32i %0, %2, 0\n" \
" " #op " %0, %0, %1\n" \ " " #op " %0, %0, %1\n" \
" s32i %0, %2, 0\n" \ " s32i %0, %2, 0\n" \
...@@ -124,7 +124,7 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \ ...@@ -124,7 +124,7 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
unsigned int vval; \ unsigned int vval; \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
" rsil a15,"__stringify(LOCKLEVEL)"\n" \ " rsil a15,"__stringify(TOPLEVEL)"\n" \
" l32i %0, %2, 0\n" \ " l32i %0, %2, 0\n" \
" " #op " %0, %0, %1\n" \ " " #op " %0, %0, %1\n" \
" s32i %0, %2, 0\n" \ " s32i %0, %2, 0\n" \
...@@ -272,7 +272,7 @@ static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) ...@@ -272,7 +272,7 @@ static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
unsigned int vval; unsigned int vval;
__asm__ __volatile__( __asm__ __volatile__(
" rsil a15,"__stringify(LOCKLEVEL)"\n" " rsil a15,"__stringify(TOPLEVEL)"\n"
" l32i %0, %2, 0\n" " l32i %0, %2, 0\n"
" xor %1, %4, %3\n" " xor %1, %4, %3\n"
" and %0, %0, %4\n" " and %0, %0, %4\n"
...@@ -306,7 +306,7 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v) ...@@ -306,7 +306,7 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
unsigned int vval; unsigned int vval;
__asm__ __volatile__( __asm__ __volatile__(
" rsil a15,"__stringify(LOCKLEVEL)"\n" " rsil a15,"__stringify(TOPLEVEL)"\n"
" l32i %0, %2, 0\n" " l32i %0, %2, 0\n"
" or %0, %0, %1\n" " or %0, %0, %1\n"
" s32i %0, %2, 0\n" " s32i %0, %2, 0\n"
......
...@@ -34,7 +34,7 @@ __cmpxchg_u32(volatile int *p, int old, int new) ...@@ -34,7 +34,7 @@ __cmpxchg_u32(volatile int *p, int old, int new)
return new; return new;
#else #else
__asm__ __volatile__( __asm__ __volatile__(
" rsil a15, "__stringify(LOCKLEVEL)"\n" " rsil a15, "__stringify(TOPLEVEL)"\n"
" l32i %0, %1, 0\n" " l32i %0, %1, 0\n"
" bne %0, %2, 1f\n" " bne %0, %2, 1f\n"
" s32i %3, %1, 0\n" " s32i %3, %1, 0\n"
...@@ -123,7 +123,7 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val) ...@@ -123,7 +123,7 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
#else #else
unsigned long tmp; unsigned long tmp;
__asm__ __volatile__( __asm__ __volatile__(
" rsil a15, "__stringify(LOCKLEVEL)"\n" " rsil a15, "__stringify(TOPLEVEL)"\n"
" l32i %0, %1, 0\n" " l32i %0, %1, 0\n"
" s32i %2, %1, 0\n" " s32i %2, %1, 0\n"
" wsr a15, ps\n" " wsr a15, ps\n"
......
/*
* Arch specific extensions to struct device
*
* This file is released under the GPLv2
*/
#ifndef _ASM_XTENSA_DEVICE_H
#define _ASM_XTENSA_DEVICE_H
struct dma_map_ops;
struct dev_archdata {
/* DMA operations on that device */
struct dma_map_ops *dma_ops;
};
struct pdev_archdata {
};
#endif /* _ASM_XTENSA_DEVICE_H */
/* /*
* include/asm-xtensa/dma-mapping.h
*
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2003 - 2005 Tensilica Inc. * Copyright (C) 2003 - 2005 Tensilica Inc.
* Copyright (C) 2015 Cadence Design Systems Inc.
*/ */
#ifndef _XTENSA_DMA_MAPPING_H #ifndef _XTENSA_DMA_MAPPING_H
...@@ -13,142 +12,67 @@ ...@@ -13,142 +12,67 @@
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm-generic/dma-coherent.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#define DMA_ERROR_CODE (~(dma_addr_t)0x0) #define DMA_ERROR_CODE (~(dma_addr_t)0x0)
/* extern struct dma_map_ops xtensa_dma_map_ops;
* DMA-consistent mapping functions.
*/
extern void *consistent_alloc(int, size_t, dma_addr_t, unsigned long);
extern void consistent_free(void*, size_t, dma_addr_t);
extern void consistent_sync(void*, size_t, int);
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag);
void dma_free_coherent(struct device *dev, size_t size, static inline struct dma_map_ops *get_dma_ops(struct device *dev)
void *vaddr, dma_addr_t dma_handle);
static inline dma_addr_t
dma_map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
consistent_sync(ptr, size, direction);
return virt_to_phys(ptr);
}
static inline void
dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction direction)
{ {
BUG_ON(direction == DMA_NONE); if (dev && dev->archdata.dma_ops)
return dev->archdata.dma_ops;
else
return &xtensa_dma_map_ops;
} }
static inline int #include <asm-generic/dma-mapping-common.h>
dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
enum dma_data_direction direction)
{
int i;
struct scatterlist *sg;
BUG_ON(direction == DMA_NONE);
for_each_sg(sglist, sg, nents, i) {
BUG_ON(!sg_page(sg));
sg->dma_address = sg_phys(sg); #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
consistent_sync(sg_virt(sg), sg->length, direction); #define dma_free_noncoherent(d, s, v, h) dma_free_attrs(d, s, v, h, NULL)
} #define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL)
return nents; static inline void *dma_alloc_attrs(struct device *dev, size_t size,
} dma_addr_t *dma_handle, gfp_t gfp,
struct dma_attrs *attrs)
static inline dma_addr_t
dma_map_page(struct device *dev, struct page *page, unsigned long offset,
size_t size, enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
return (dma_addr_t)(page_to_pfn(page)) * PAGE_SIZE + offset;
}
static inline void
dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
enum dma_data_direction direction)
{ {
BUG_ON(direction == DMA_NONE); void *ret;
} struct dma_map_ops *ops = get_dma_ops(dev);
if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
return ret;
static inline void ret = ops->alloc(dev, size, dma_handle, gfp, attrs);
dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, debug_dma_alloc_coherent(dev, size, *dma_handle, ret);
enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
}
static inline void
dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
consistent_sync((void *)bus_to_virt(dma_handle), size, direction);
}
static inline void return ret;
dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction)
{
consistent_sync((void *)bus_to_virt(dma_handle), size, direction);
} }
static inline void static inline void dma_free_attrs(struct device *dev, size_t size,
dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, void *vaddr, dma_addr_t dma_handle,
unsigned long offset, size_t size, struct dma_attrs *attrs)
enum dma_data_direction direction)
{
consistent_sync((void *)bus_to_virt(dma_handle)+offset,size,direction);
}
static inline void
dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
unsigned long offset, size_t size,
enum dma_data_direction direction)
{ {
struct dma_map_ops *ops = get_dma_ops(dev);
consistent_sync((void *)bus_to_virt(dma_handle)+offset,size,direction); if (dma_release_from_coherent(dev, get_order(size), vaddr))
} return;
static inline void
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nelems,
enum dma_data_direction dir)
{
int i;
struct scatterlist *sg;
for_each_sg(sglist, sg, nelems, i) ops->free(dev, size, vaddr, dma_handle, attrs);
consistent_sync(sg_virt(sg), sg->length, dir); debug_dma_free_coherent(dev, size, vaddr, dma_handle);
} }
static inline void
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction dir)
{
int i;
struct scatterlist *sg;
for_each_sg(sglist, sg, nelems, i)
consistent_sync(sg_virt(sg), sg->length, dir);
}
static inline int static inline int
dma_mapping_error(struct device *dev, dma_addr_t dma_addr) dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{ {
return 0; struct dma_map_ops *ops = get_dma_ops(dev);
debug_dma_mapping_error(dev, dma_addr);
return ops->mapping_error(dev, dma_addr);
} }
static inline int static inline int
...@@ -168,39 +92,7 @@ dma_set_mask(struct device *dev, u64 mask) ...@@ -168,39 +92,7 @@ dma_set_mask(struct device *dev, u64 mask)
return 0; return 0;
} }
static inline void void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction);
enum dma_data_direction direction)
{
consistent_sync(vaddr, size, direction);
}
/* Not supported for now */
static inline int dma_mmap_coherent(struct device *dev,
struct vm_area_struct *vma, void *cpu_addr,
dma_addr_t dma_addr, size_t size)
{
return -EINVAL;
}
static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr,
size_t size)
{
return -EINVAL;
}
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
struct dma_attrs *attrs)
{
return NULL;
}
static inline void dma_free_attrs(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
struct dma_attrs *attrs)
{
}
#endif /* _XTENSA_DMA_MAPPING_H */ #endif /* _XTENSA_DMA_MAPPING_H */
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
* for more details. * for more details.
* *
* Copyright (C) 2001 - 2005 Tensilica Inc. * Copyright (C) 2001 - 2005 Tensilica Inc.
* Copyright (C) 2015 Cadence Design Systems Inc.
*/ */
#ifndef _XTENSA_IRQFLAGS_H #ifndef _XTENSA_IRQFLAGS_H
...@@ -23,8 +24,27 @@ static inline unsigned long arch_local_save_flags(void) ...@@ -23,8 +24,27 @@ static inline unsigned long arch_local_save_flags(void)
static inline unsigned long arch_local_irq_save(void) static inline unsigned long arch_local_irq_save(void)
{ {
unsigned long flags; unsigned long flags;
#if XTENSA_FAKE_NMI
#if defined(CONFIG_DEBUG_KERNEL) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL
unsigned long tmp;
asm volatile("rsr %0, ps\t\n"
"extui %1, %0, 0, 4\t\n"
"bgei %1, "__stringify(LOCKLEVEL)", 1f\t\n"
"rsil %0, "__stringify(LOCKLEVEL)"\n"
"1:"
: "=a" (flags), "=a" (tmp) :: "memory");
#else
asm volatile("rsr %0, ps\t\n"
"or %0, %0, %1\t\n"
"xsr %0, ps\t\n"
"rsync"
: "=&a" (flags) : "a" (LOCKLEVEL) : "memory");
#endif
#else
asm volatile("rsil %0, "__stringify(LOCKLEVEL) asm volatile("rsil %0, "__stringify(LOCKLEVEL)
: "=a" (flags) :: "memory"); : "=a" (flags) :: "memory");
#endif
return flags; return flags;
} }
......
/* /*
* include/asm-xtensa/processor.h
*
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2001 - 2008 Tensilica Inc. * Copyright (C) 2001 - 2008 Tensilica Inc.
* Copyright (C) 2015 Cadence Design Systems Inc.
*/ */
#ifndef _XTENSA_PROCESSOR_H #ifndef _XTENSA_PROCESSOR_H
...@@ -44,6 +43,14 @@ ...@@ -44,6 +43,14 @@
#define STACK_TOP TASK_SIZE #define STACK_TOP TASK_SIZE
#define STACK_TOP_MAX STACK_TOP #define STACK_TOP_MAX STACK_TOP
/*
* General exception cause assigned to fake NMI. Fake NMI needs to be handled
* differently from other interrupts, but it uses common kernel entry/exit
* code.
*/
#define EXCCAUSE_MAPPED_NMI 62
/* /*
* General exception cause assigned to debug exceptions. Debug exceptions go * General exception cause assigned to debug exceptions. Debug exceptions go
* to their own vector, rather than the general exception vectors (user, * to their own vector, rather than the general exception vectors (user,
...@@ -65,10 +72,30 @@ ...@@ -65,10 +72,30 @@
#define VALID_DOUBLE_EXCEPTION_ADDRESS 64 #define VALID_DOUBLE_EXCEPTION_ADDRESS 64
#define XTENSA_INT_LEVEL(intno) _XTENSA_INT_LEVEL(intno)
#define _XTENSA_INT_LEVEL(intno) XCHAL_INT##intno##_LEVEL
#define XTENSA_INTLEVEL_MASK(level) _XTENSA_INTLEVEL_MASK(level)
#define _XTENSA_INTLEVEL_MASK(level) (XCHAL_INTLEVEL##level##_MASK)
#define IS_POW2(v) (((v) & ((v) - 1)) == 0)
#define PROFILING_INTLEVEL XTENSA_INT_LEVEL(XCHAL_PROFILING_INTERRUPT)
/* LOCKLEVEL defines the interrupt level that masks all /* LOCKLEVEL defines the interrupt level that masks all
* general-purpose interrupts. * general-purpose interrupts.
*/ */
#if defined(CONFIG_XTENSA_VARIANT_HAVE_PERF_EVENTS) && \
defined(XCHAL_PROFILING_INTERRUPT) && \
PROFILING_INTLEVEL == XCHAL_EXCM_LEVEL && \
XCHAL_EXCM_LEVEL > 1 && \
IS_POW2(XTENSA_INTLEVEL_MASK(PROFILING_INTLEVEL))
#define LOCKLEVEL (XCHAL_EXCM_LEVEL - 1)
#else
#define LOCKLEVEL XCHAL_EXCM_LEVEL #define LOCKLEVEL XCHAL_EXCM_LEVEL
#endif
#define TOPLEVEL XCHAL_EXCM_LEVEL
#define XTENSA_FAKE_NMI (LOCKLEVEL < TOPLEVEL)
/* WSBITS and WBBITS are the width of the WINDOWSTART and WINDOWBASE /* WSBITS and WBBITS are the width of the WINDOWSTART and WINDOWBASE
* registers * registers
......
...@@ -33,4 +33,12 @@ void walk_stackframe(unsigned long *sp, ...@@ -33,4 +33,12 @@ void walk_stackframe(unsigned long *sp,
int (*fn)(struct stackframe *frame, void *data), int (*fn)(struct stackframe *frame, void *data),
void *data); void *data);
void xtensa_backtrace_kernel(struct pt_regs *regs, unsigned int depth,
int (*kfn)(struct stackframe *frame, void *data),
int (*ufn)(struct stackframe *frame, void *data),
void *data);
void xtensa_backtrace_user(struct pt_regs *regs, unsigned int depth,
int (*ufn)(struct stackframe *frame, void *data),
void *data);
#endif /* _XTENSA_STACKTRACE_H */ #endif /* _XTENSA_STACKTRACE_H */
...@@ -25,30 +25,39 @@ static inline void spill_registers(void) ...@@ -25,30 +25,39 @@ static inline void spill_registers(void)
{ {
#if XCHAL_NUM_AREGS > 16 #if XCHAL_NUM_AREGS > 16
__asm__ __volatile__ ( __asm__ __volatile__ (
" call12 1f\n" " call8 1f\n"
" _j 2f\n" " _j 2f\n"
" retw\n" " retw\n"
" .align 4\n" " .align 4\n"
"1:\n" "1:\n"
#if XCHAL_NUM_AREGS == 32
" _entry a1, 32\n"
" addi a8, a0, 3\n"
" _entry a1, 16\n"
" mov a12, a12\n"
" retw\n"
#else
" _entry a1, 48\n" " _entry a1, 48\n"
" addi a12, a0, 3\n" " call12 1f\n"
#if XCHAL_NUM_AREGS > 32 " retw\n"
" .rept (" __stringify(XCHAL_NUM_AREGS) " - 32) / 12\n" " .align 4\n"
"1:\n"
" .rept (" __stringify(XCHAL_NUM_AREGS) " - 16) / 12\n"
" _entry a1, 48\n" " _entry a1, 48\n"
" mov a12, a0\n" " mov a12, a0\n"
" .endr\n" " .endr\n"
#endif " _entry a1, 16\n"
" _entry a1, 48\n"
#if XCHAL_NUM_AREGS % 12 == 0 #if XCHAL_NUM_AREGS % 12 == 0
" mov a8, a8\n"
#elif XCHAL_NUM_AREGS % 12 == 4
" mov a12, a12\n" " mov a12, a12\n"
#elif XCHAL_NUM_AREGS % 12 == 8 #elif XCHAL_NUM_AREGS % 12 == 4
" mov a4, a4\n" " mov a4, a4\n"
#elif XCHAL_NUM_AREGS % 12 == 8
" mov a8, a8\n"
#endif #endif
" retw\n" " retw\n"
#endif
"2:\n" "2:\n"
: : : "a12", "a13", "memory"); : : : "a8", "a9", "memory");
#else #else
__asm__ __volatile__ ( __asm__ __volatile__ (
" mov a12, a12\n" " mov a12, a12\n"
......
...@@ -13,6 +13,7 @@ obj-$(CONFIG_PCI) += pci.o ...@@ -13,6 +13,7 @@ obj-$(CONFIG_PCI) += pci.o
obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o obj-$(CONFIG_FUNCTION_TRACER) += mcount.o
obj-$(CONFIG_SMP) += smp.o mxhead.o obj-$(CONFIG_SMP) += smp.o mxhead.o
obj-$(CONFIG_XTENSA_VARIANT_HAVE_PERF_EVENTS) += perf_event.o
AFLAGS_head.o += -mtext-section-literals AFLAGS_head.o += -mtext-section-literals
...@@ -27,10 +28,11 @@ AFLAGS_head.o += -mtext-section-literals ...@@ -27,10 +28,11 @@ AFLAGS_head.o += -mtext-section-literals
# #
# Replicate rules in scripts/Makefile.build # Replicate rules in scripts/Makefile.build
sed-y = -e 's/\*(\(\.[a-z]*it\|\.ref\|\)\.text)/*(\1.literal \1.text)/g' \ sed-y = -e ':a; s/\*(\([^)]*\)\.text\.unlikely/*(\1.literal.unlikely .{text}.unlikely/; ta; ' \
-e 's/\.text\.unlikely/.literal.unlikely .text.unlikely/g' \ -e ':b; s/\*(\([^)]*\)\.text\(\.[a-z]*\)/*(\1.{text}\2.literal .{text}\2/; tb; ' \
-e 's/\*(\(\.text .*\))/*(.literal \1)/g' \ -e ':c; s/\*(\([^)]*\)\(\.[a-z]*it\|\.ref\)\.text/*(\1\2.literal \2.{text}/; tc; ' \
-e 's/\*(\(\.text\.[a-z]*\))/*(\1.literal \1)/g' -e ':d; s/\*(\([^)]\+ \|\)\.text/*(\1.literal .{text}/; td; ' \
-e 's/\.{text}/.text/g'
quiet_cmd__cpp_lds_S = LDS $@ quiet_cmd__cpp_lds_S = LDS $@
cmd__cpp_lds_S = $(CPP) $(cpp_flags) -P -C -Uxtensa -D__ASSEMBLY__ $< \ cmd__cpp_lds_S = $(CPP) $(cpp_flags) -P -C -Uxtensa -D__ASSEMBLY__ $< \
......
/* /*
* arch/xtensa/kernel/entry.S
*
* Low-level exception handling * Low-level exception handling
* *
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
...@@ -8,6 +6,7 @@ ...@@ -8,6 +6,7 @@
* for more details. * for more details.
* *
* Copyright (C) 2004 - 2008 by Tensilica Inc. * Copyright (C) 2004 - 2008 by Tensilica Inc.
* Copyright (C) 2015 Cadence Design Systems Inc.
* *
* Chris Zankel <chris@zankel.net> * Chris Zankel <chris@zankel.net>
* *
...@@ -75,6 +74,27 @@ ...@@ -75,6 +74,27 @@
#endif #endif
.endm .endm
.macro irq_save flags tmp
#if XTENSA_FAKE_NMI
#if defined(CONFIG_DEBUG_KERNEL) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL
rsr \flags, ps
extui \tmp, \flags, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
bgei \tmp, LOCKLEVEL, 99f
rsil \tmp, LOCKLEVEL
99:
#else
movi \tmp, LOCKLEVEL
rsr \flags, ps
or \flags, \flags, \tmp
xsr \flags, ps
rsync
#endif
#else
rsil \flags, LOCKLEVEL
#endif
.endm
/* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */ /* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */
/* /*
...@@ -122,6 +142,7 @@ _user_exception: ...@@ -122,6 +142,7 @@ _user_exception:
/* Save SAR and turn off single stepping */ /* Save SAR and turn off single stepping */
movi a2, 0 movi a2, 0
wsr a2, depc # terminate user stack trace with 0
rsr a3, sar rsr a3, sar
xsr a2, icountlevel xsr a2, icountlevel
s32i a3, a1, PT_SAR s32i a3, a1, PT_SAR
...@@ -301,7 +322,18 @@ _kernel_exception: ...@@ -301,7 +322,18 @@ _kernel_exception:
s32i a14, a1, PT_AREG14 s32i a14, a1, PT_AREG14
s32i a15, a1, PT_AREG15 s32i a15, a1, PT_AREG15
_bnei a2, 1, 1f
/* Copy spill slots of a0 and a1 to imitate movsp
* in order to keep exception stack continuous
*/
l32i a3, a1, PT_SIZE
l32i a0, a1, PT_SIZE + 4
s32e a3, a1, -16
s32e a0, a1, -12
1: 1:
l32i a0, a1, PT_AREG0 # restore saved a0
wsr a0, depc
#ifdef KERNEL_STACK_OVERFLOW_CHECK #ifdef KERNEL_STACK_OVERFLOW_CHECK
...@@ -340,66 +372,79 @@ common_exception: ...@@ -340,66 +372,79 @@ common_exception:
/* It is now save to restore the EXC_TABLE_FIXUP variable. */ /* It is now save to restore the EXC_TABLE_FIXUP variable. */
rsr a0, exccause rsr a2, exccause
movi a3, 0 movi a3, 0
rsr a2, excsave1 rsr a0, excsave1
s32i a0, a1, PT_EXCCAUSE s32i a2, a1, PT_EXCCAUSE
s32i a3, a2, EXC_TABLE_FIXUP s32i a3, a0, EXC_TABLE_FIXUP
/* All unrecoverable states are saved on stack, now, and a1 is valid, /* All unrecoverable states are saved on stack, now, and a1 is valid.
* so we can allow exceptions and interrupts (*) again. * Now we can allow exceptions again. In case we've got an interrupt
* Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X) * PS.INTLEVEL is set to LOCKLEVEL disabling furhter interrupts,
* otherwise it's left unchanged.
* *
* (*) We only allow interrupts if they were previously enabled and * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X)
* we're not handling an IRQ
*/ */
rsr a3, ps rsr a3, ps
addi a0, a0, -EXCCAUSE_LEVEL1_INTERRUPT s32i a3, a1, PT_PS # save ps
movi a2, LOCKLEVEL
#if XTENSA_FAKE_NMI
/* Correct PS needs to be saved in the PT_PS:
* - in case of exception or level-1 interrupt it's in the PS,
* and is already saved.
* - in case of medium level interrupt it's in the excsave2.
*/
movi a0, EXCCAUSE_MAPPED_NMI
extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
beq a2, a0, .Lmedium_level_irq
bnei a2, EXCCAUSE_LEVEL1_INTERRUPT, .Lexception
beqz a3, .Llevel1_irq # level-1 IRQ sets ps.intlevel to 0
.Lmedium_level_irq:
rsr a0, excsave2
s32i a0, a1, PT_PS # save medium-level interrupt ps
bgei a3, LOCKLEVEL, .Lexception
.Llevel1_irq:
movi a3, LOCKLEVEL
.Lexception:
movi a0, 1 << PS_WOE_BIT
or a3, a3, a0
#else
addi a2, a2, -EXCCAUSE_LEVEL1_INTERRUPT
movi a0, LOCKLEVEL
extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
# a3 = PS.INTLEVEL # a3 = PS.INTLEVEL
moveqz a3, a2, a0 # a3 = LOCKLEVEL iff interrupt moveqz a3, a0, a2 # a3 = LOCKLEVEL iff interrupt
movi a2, 1 << PS_WOE_BIT movi a2, 1 << PS_WOE_BIT
or a3, a3, a2 or a3, a3, a2
rsr a0, exccause rsr a2, exccause
xsr a3, ps #endif
s32i a3, a1, PT_PS # save ps /* restore return address (or 0 if return to userspace) */
rsr a0, depc
wsr a3, ps
rsync # PS.WOE => rsync => overflow
/* Save lbeg, lend */ /* Save lbeg, lend */
rsr a2, lbeg rsr a4, lbeg
rsr a3, lend rsr a3, lend
s32i a2, a1, PT_LBEG s32i a4, a1, PT_LBEG
s32i a3, a1, PT_LEND s32i a3, a1, PT_LEND
/* Save SCOMPARE1 */ /* Save SCOMPARE1 */
#if XCHAL_HAVE_S32C1I #if XCHAL_HAVE_S32C1I
rsr a2, scompare1 rsr a3, scompare1
s32i a2, a1, PT_SCOMPARE1 s32i a3, a1, PT_SCOMPARE1
#endif #endif
/* Save optional registers. */ /* Save optional registers. */
save_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT save_xtregs_opt a1 a3 a4 a5 a6 a7 PT_XTREGS_OPT
#ifdef CONFIG_TRACE_IRQFLAGS
l32i a4, a1, PT_DEPC
/* Double exception means we came here with an exception
* while PS.EXCM was set, i.e. interrupts disabled.
*/
bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
l32i a4, a1, PT_EXCCAUSE
bnei a4, EXCCAUSE_LEVEL1_INTERRUPT, 1f
/* We came here with an interrupt means interrupts were enabled
* and we've just disabled them.
*/
movi a4, trace_hardirqs_off
callx4 a4
1:
#endif
/* Go to second-level dispatcher. Set up parameters to pass to the /* Go to second-level dispatcher. Set up parameters to pass to the
* exception handler and call the exception handler. * exception handler and call the exception handler.
...@@ -407,8 +452,8 @@ common_exception: ...@@ -407,8 +452,8 @@ common_exception:
rsr a4, excsave1 rsr a4, excsave1
mov a6, a1 # pass stack frame mov a6, a1 # pass stack frame
mov a7, a0 # pass EXCCAUSE mov a7, a2 # pass EXCCAUSE
addx4 a4, a0, a4 addx4 a4, a2, a4
l32i a4, a4, EXC_TABLE_DEFAULT # load handler l32i a4, a4, EXC_TABLE_DEFAULT # load handler
/* Call the second-level handler */ /* Call the second-level handler */
...@@ -419,8 +464,17 @@ common_exception: ...@@ -419,8 +464,17 @@ common_exception:
.global common_exception_return .global common_exception_return
common_exception_return: common_exception_return:
#if XTENSA_FAKE_NMI
l32i a2, a1, PT_EXCCAUSE
movi a3, EXCCAUSE_MAPPED_NMI
beq a2, a3, .LNMIexit
#endif
1: 1:
rsil a2, LOCKLEVEL irq_save a2, a3
#ifdef CONFIG_TRACE_IRQFLAGS
movi a4, trace_hardirqs_off
callx4 a4
#endif
/* Jump if we are returning from kernel exceptions. */ /* Jump if we are returning from kernel exceptions. */
...@@ -445,6 +499,10 @@ common_exception_return: ...@@ -445,6 +499,10 @@ common_exception_return:
/* Call do_signal() */ /* Call do_signal() */
#ifdef CONFIG_TRACE_IRQFLAGS
movi a4, trace_hardirqs_on
callx4 a4
#endif
rsil a2, 0 rsil a2, 0
movi a4, do_notify_resume # int do_notify_resume(struct pt_regs*) movi a4, do_notify_resume # int do_notify_resume(struct pt_regs*)
mov a6, a1 mov a6, a1
...@@ -453,6 +511,10 @@ common_exception_return: ...@@ -453,6 +511,10 @@ common_exception_return:
3: /* Reschedule */ 3: /* Reschedule */
#ifdef CONFIG_TRACE_IRQFLAGS
movi a4, trace_hardirqs_on
callx4 a4
#endif
rsil a2, 0 rsil a2, 0
movi a4, schedule # void schedule (void) movi a4, schedule # void schedule (void)
callx4 a4 callx4 a4
...@@ -471,6 +533,12 @@ common_exception_return: ...@@ -471,6 +533,12 @@ common_exception_return:
j 1b j 1b
#endif #endif
#if XTENSA_FAKE_NMI
.LNMIexit:
l32i a3, a1, PT_PS
_bbci.l a3, PS_UM_BIT, 4f
#endif
5: 5:
#ifdef CONFIG_DEBUG_TLB_SANITY #ifdef CONFIG_DEBUG_TLB_SANITY
l32i a4, a1, PT_DEPC l32i a4, a1, PT_DEPC
...@@ -481,16 +549,8 @@ common_exception_return: ...@@ -481,16 +549,8 @@ common_exception_return:
6: 6:
4: 4:
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
l32i a4, a1, PT_DEPC extui a4, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
/* Double exception means we came here with an exception bgei a4, LOCKLEVEL, 1f
* while PS.EXCM was set, i.e. interrupts disabled.
*/
bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
l32i a4, a1, PT_EXCCAUSE
bnei a4, EXCCAUSE_LEVEL1_INTERRUPT, 1f
/* We came here with an interrupt means interrupts were enabled
* and we'll reenable them on return.
*/
movi a4, trace_hardirqs_on movi a4, trace_hardirqs_on
callx4 a4 callx4 a4
1: 1:
...@@ -568,12 +628,13 @@ user_exception_exit: ...@@ -568,12 +628,13 @@ user_exception_exit:
* (if we have restored WSBITS-1 frames). * (if we have restored WSBITS-1 frames).
*/ */
2:
#if XCHAL_HAVE_THREADPTR #if XCHAL_HAVE_THREADPTR
l32i a3, a1, PT_THREADPTR l32i a3, a1, PT_THREADPTR
wur a3, threadptr wur a3, threadptr
#endif #endif
2: j common_exception_exit j common_exception_exit
/* This is the kernel exception exit. /* This is the kernel exception exit.
* We avoided to do a MOVSP when we entered the exception, but we * We avoided to do a MOVSP when we entered the exception, but we
...@@ -1561,6 +1622,13 @@ ENTRY(fast_second_level_miss) ...@@ -1561,6 +1622,13 @@ ENTRY(fast_second_level_miss)
rfde rfde
9: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0 9: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0
bnez a0, 8b
/* Even more unlikely case active_mm == 0.
* We can get here with NMI in the middle of context_switch that
* touches vmalloc area.
*/
movi a0, init_mm
j 8b j 8b
#if (DCACHE_WAY_SIZE > PAGE_SIZE) #if (DCACHE_WAY_SIZE > PAGE_SIZE)
...@@ -1820,7 +1888,7 @@ ENDPROC(system_call) ...@@ -1820,7 +1888,7 @@ ENDPROC(system_call)
mov a12, a0 mov a12, a0
.endr .endr
#endif #endif
_entry a1, 48 _entry a1, 16
#if XCHAL_NUM_AREGS % 12 == 0 #if XCHAL_NUM_AREGS % 12 == 0
mov a8, a8 mov a8, a8
#elif XCHAL_NUM_AREGS % 12 == 4 #elif XCHAL_NUM_AREGS % 12 == 4
...@@ -1844,7 +1912,7 @@ ENDPROC(system_call) ...@@ -1844,7 +1912,7 @@ ENDPROC(system_call)
ENTRY(_switch_to) ENTRY(_switch_to)
entry a1, 16 entry a1, 48
mov a11, a3 # and 'next' (a3) mov a11, a3 # and 'next' (a3)
...@@ -1864,10 +1932,8 @@ ENTRY(_switch_to) ...@@ -1864,10 +1932,8 @@ ENTRY(_switch_to)
/* Disable ints while we manipulate the stack pointer. */ /* Disable ints while we manipulate the stack pointer. */
rsil a14, LOCKLEVEL irq_save a14, a3
rsr a3, excsave1
rsync rsync
s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */
/* Switch CPENABLE */ /* Switch CPENABLE */
...@@ -1888,9 +1954,7 @@ ENTRY(_switch_to) ...@@ -1888,9 +1954,7 @@ ENTRY(_switch_to)
*/ */
rsr a3, excsave1 # exc_table rsr a3, excsave1 # exc_table
movi a6, 0
addi a7, a5, PT_REGS_OFFSET addi a7, a5, PT_REGS_OFFSET
s32i a6, a3, EXC_TABLE_FIXUP
s32i a7, a3, EXC_TABLE_KSTK s32i a7, a3, EXC_TABLE_KSTK
/* restore context of the task 'next' */ /* restore context of the task 'next' */
......
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/platform.h> #include <asm/platform.h>
atomic_t irq_err_count; DECLARE_PER_CPU(unsigned long, nmi_count);
asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs) asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
{ {
...@@ -57,11 +57,16 @@ asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs) ...@@ -57,11 +57,16 @@ asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
int arch_show_interrupts(struct seq_file *p, int prec) int arch_show_interrupts(struct seq_file *p, int prec)
{ {
unsigned cpu __maybe_unused;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
show_ipi_list(p, prec); show_ipi_list(p, prec);
#endif #endif
seq_printf(p, "%*s: ", prec, "ERR"); #if XTENSA_FAKE_NMI
seq_printf(p, "%10u\n", atomic_read(&irq_err_count)); seq_printf(p, "%*s:", prec, "NMI");
for_each_online_cpu(cpu)
seq_printf(p, " %10lu", per_cpu(nmi_count, cpu));
seq_puts(p, " Non-maskable interrupts\n");
#endif
return 0; return 0;
} }
...@@ -106,6 +111,12 @@ int xtensa_irq_map(struct irq_domain *d, unsigned int irq, ...@@ -106,6 +111,12 @@ int xtensa_irq_map(struct irq_domain *d, unsigned int irq,
irq_set_chip_and_handler_name(irq, irq_chip, irq_set_chip_and_handler_name(irq, irq_chip,
handle_percpu_irq, "timer"); handle_percpu_irq, "timer");
irq_clear_status_flags(irq, IRQ_LEVEL); irq_clear_status_flags(irq, IRQ_LEVEL);
#ifdef XCHAL_INTTYPE_MASK_PROFILING
} else if (mask & XCHAL_INTTYPE_MASK_PROFILING) {
irq_set_chip_and_handler_name(irq, irq_chip,
handle_percpu_irq, "profiling");
irq_set_status_flags(irq, IRQ_LEVEL);
#endif
} else {/* XCHAL_INTTYPE_MASK_WRITE_ERROR */ } else {/* XCHAL_INTTYPE_MASK_WRITE_ERROR */
/* XCHAL_INTTYPE_MASK_NMI */ /* XCHAL_INTTYPE_MASK_NMI */
irq_set_chip_and_handler_name(irq, irq_chip, irq_set_chip_and_handler_name(irq, irq_chip,
......
/* /*
* arch/xtensa/kernel/pci-dma.c
*
* DMA coherent memory allocation. * DMA coherent memory allocation.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
...@@ -9,6 +7,7 @@ ...@@ -9,6 +7,7 @@
* option) any later version. * option) any later version.
* *
* Copyright (C) 2002 - 2005 Tensilica Inc. * Copyright (C) 2002 - 2005 Tensilica Inc.
* Copyright (C) 2015 Cadence Design Systems Inc.
* *
* Based on version for i386. * Based on version for i386.
* *
...@@ -25,13 +24,107 @@ ...@@ -25,13 +24,107 @@
#include <asm/io.h> #include <asm/io.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir)
{
switch (dir) {
case DMA_BIDIRECTIONAL:
__flush_invalidate_dcache_range((unsigned long)vaddr, size);
break;
case DMA_FROM_DEVICE:
__invalidate_dcache_range((unsigned long)vaddr, size);
break;
case DMA_TO_DEVICE:
__flush_dcache_range((unsigned long)vaddr, size);
break;
case DMA_NONE:
BUG();
break;
}
}
EXPORT_SYMBOL(dma_cache_sync);
static void xtensa_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction dir)
{
void *vaddr;
switch (dir) {
case DMA_BIDIRECTIONAL:
case DMA_FROM_DEVICE:
vaddr = bus_to_virt(dma_handle);
__invalidate_dcache_range((unsigned long)vaddr, size);
break;
case DMA_NONE:
BUG();
break;
default:
break;
}
}
static void xtensa_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction dir)
{
void *vaddr;
switch (dir) {
case DMA_BIDIRECTIONAL:
case DMA_TO_DEVICE:
vaddr = bus_to_virt(dma_handle);
__flush_dcache_range((unsigned long)vaddr, size);
break;
case DMA_NONE:
BUG();
break;
default:
break;
}
}
static void xtensa_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction dir)
{
struct scatterlist *s;
int i;
for_each_sg(sg, s, nents, i) {
xtensa_sync_single_for_cpu(dev, sg_dma_address(s),
sg_dma_len(s), dir);
}
}
static void xtensa_sync_sg_for_device(struct device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction dir)
{
struct scatterlist *s;
int i;
for_each_sg(sg, s, nents, i) {
xtensa_sync_single_for_device(dev, sg_dma_address(s),
sg_dma_len(s), dir);
}
}
/* /*
* Note: We assume that the full memory space is always mapped to 'kseg' * Note: We assume that the full memory space is always mapped to 'kseg'
* Otherwise we have to use page attributes (not implemented). * Otherwise we have to use page attributes (not implemented).
*/ */
void * static void *xtensa_dma_alloc(struct device *dev, size_t size,
dma_alloc_coherent(struct device *dev,size_t size,dma_addr_t *handle,gfp_t flag) dma_addr_t *handle, gfp_t flag,
struct dma_attrs *attrs)
{ {
unsigned long ret; unsigned long ret;
unsigned long uncached = 0; unsigned long uncached = 0;
...@@ -52,20 +145,15 @@ dma_alloc_coherent(struct device *dev,size_t size,dma_addr_t *handle,gfp_t flag) ...@@ -52,20 +145,15 @@ dma_alloc_coherent(struct device *dev,size_t size,dma_addr_t *handle,gfp_t flag)
BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR || BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR ||
ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1); ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
uncached = ret + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
*handle = virt_to_bus((void *)ret);
__invalidate_dcache_range(ret, size);
if (ret != 0) { return (void *)uncached;
memset((void*) ret, 0, size);
uncached = ret+XCHAL_KSEG_BYPASS_VADDR-XCHAL_KSEG_CACHED_VADDR;
*handle = virt_to_bus((void*)ret);
__flush_invalidate_dcache_range(ret, size);
}
return (void*)uncached;
} }
EXPORT_SYMBOL(dma_alloc_coherent);
void dma_free_coherent(struct device *hwdev, size_t size, static void xtensa_dma_free(struct device *hwdev, size_t size, void *vaddr,
void *vaddr, dma_addr_t dma_handle) dma_addr_t dma_handle, struct dma_attrs *attrs)
{ {
unsigned long addr = (unsigned long)vaddr + unsigned long addr = (unsigned long)vaddr +
XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR; XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
...@@ -75,24 +163,79 @@ void dma_free_coherent(struct device *hwdev, size_t size, ...@@ -75,24 +163,79 @@ void dma_free_coherent(struct device *hwdev, size_t size,
free_pages(addr, get_order(size)); free_pages(addr, get_order(size));
} }
EXPORT_SYMBOL(dma_free_coherent);
static dma_addr_t xtensa_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
dma_addr_t dma_handle = page_to_phys(page) + offset;
BUG_ON(PageHighMem(page));
xtensa_sync_single_for_device(dev, dma_handle, size, dir);
return dma_handle;
}
void consistent_sync(void *vaddr, size_t size, int direction) static void xtensa_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{ {
switch (direction) { xtensa_sync_single_for_cpu(dev, dma_handle, size, dir);
case PCI_DMA_NONE: }
BUG();
case PCI_DMA_FROMDEVICE: /* invalidate only */
__invalidate_dcache_range((unsigned long)vaddr,
(unsigned long)size);
break;
case PCI_DMA_TODEVICE: /* writeback only */ static int xtensa_map_sg(struct device *dev, struct scatterlist *sg,
case PCI_DMA_BIDIRECTIONAL: /* writeback and invalidate */ int nents, enum dma_data_direction dir,
__flush_invalidate_dcache_range((unsigned long)vaddr, struct dma_attrs *attrs)
(unsigned long)size); {
break; struct scatterlist *s;
int i;
for_each_sg(sg, s, nents, i) {
s->dma_address = xtensa_map_page(dev, sg_page(s), s->offset,
s->length, dir, attrs);
}
return nents;
}
static void xtensa_unmap_sg(struct device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
struct scatterlist *s;
int i;
for_each_sg(sg, s, nents, i) {
xtensa_unmap_page(dev, sg_dma_address(s),
sg_dma_len(s), dir, attrs);
} }
} }
EXPORT_SYMBOL(consistent_sync);
int xtensa_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
struct dma_map_ops xtensa_dma_map_ops = {
.alloc = xtensa_dma_alloc,
.free = xtensa_dma_free,
.map_page = xtensa_map_page,
.unmap_page = xtensa_unmap_page,
.map_sg = xtensa_map_sg,
.unmap_sg = xtensa_unmap_sg,
.sync_single_for_cpu = xtensa_sync_single_for_cpu,
.sync_single_for_device = xtensa_sync_single_for_device,
.sync_sg_for_cpu = xtensa_sync_sg_for_cpu,
.sync_sg_for_device = xtensa_sync_sg_for_device,
.mapping_error = xtensa_dma_mapping_error,
};
EXPORT_SYMBOL(xtensa_dma_map_ops);
#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
static int __init xtensa_dma_init(void)
{
dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
return 0;
}
fs_initcall(xtensa_dma_init);
This diff is collapsed.
/* /*
* arch/xtensa/kernel/stacktrace.c * Kernel and userspace stack tracing.
* *
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2001 - 2013 Tensilica Inc. * Copyright (C) 2001 - 2013 Tensilica Inc.
* Copyright (C) 2015 Cadence Design Systems Inc.
*/ */
#include <linux/export.h> #include <linux/export.h>
#include <linux/sched.h> #include <linux/sched.h>
...@@ -13,6 +14,170 @@ ...@@ -13,6 +14,170 @@
#include <asm/stacktrace.h> #include <asm/stacktrace.h>
#include <asm/traps.h> #include <asm/traps.h>
#include <asm/uaccess.h>
#if IS_ENABLED(CONFIG_OPROFILE) || IS_ENABLED(CONFIG_PERF_EVENTS)
/* Address of common_exception_return, used to check the
* transition from kernel to user space.
*/
extern int common_exception_return;
/* A struct that maps to the part of the frame containing the a0 and
* a1 registers.
*/
struct frame_start {
unsigned long a0;
unsigned long a1;
};
void xtensa_backtrace_user(struct pt_regs *regs, unsigned int depth,
int (*ufn)(struct stackframe *frame, void *data),
void *data)
{
unsigned long windowstart = regs->windowstart;
unsigned long windowbase = regs->windowbase;
unsigned long a0 = regs->areg[0];
unsigned long a1 = regs->areg[1];
unsigned long pc = regs->pc;
struct stackframe frame;
int index;
if (!depth--)
return;
frame.pc = pc;
frame.sp = a1;
if (pc == 0 || pc >= TASK_SIZE || ufn(&frame, data))
return;
/* Two steps:
*
* 1. Look through the register window for the
* previous PCs in the call trace.
*
* 2. Look on the stack.
*/
/* Step 1. */
/* Rotate WINDOWSTART to move the bit corresponding to
* the current window to the bit #0.
*/
windowstart = (windowstart << WSBITS | windowstart) >> windowbase;
/* Look for bits that are set, they correspond to
* valid windows.
*/
for (index = WSBITS - 1; (index > 0) && depth; depth--, index--)
if (windowstart & (1 << index)) {
/* Get the PC from a0 and a1. */
pc = MAKE_PC_FROM_RA(a0, pc);
/* Read a0 and a1 from the
* corresponding position in AREGs.
*/
a0 = regs->areg[index * 4];
a1 = regs->areg[index * 4 + 1];
frame.pc = pc;
frame.sp = a1;
if (pc == 0 || pc >= TASK_SIZE || ufn(&frame, data))
return;
}
/* Step 2. */
/* We are done with the register window, we need to
* look through the stack.
*/
if (!depth)
return;
/* Start from the a1 register. */
/* a1 = regs->areg[1]; */
while (a0 != 0 && depth--) {
struct frame_start frame_start;
/* Get the location for a1, a0 for the
* previous frame from the current a1.
*/
unsigned long *psp = (unsigned long *)a1;
psp -= 4;
/* Check if the region is OK to access. */
if (!access_ok(VERIFY_READ, psp, sizeof(frame_start)))
return;
/* Copy a1, a0 from user space stack frame. */
if (__copy_from_user_inatomic(&frame_start, psp,
sizeof(frame_start)))
return;
pc = MAKE_PC_FROM_RA(a0, pc);
a0 = frame_start.a0;
a1 = frame_start.a1;
frame.pc = pc;
frame.sp = a1;
if (pc == 0 || pc >= TASK_SIZE || ufn(&frame, data))
return;
}
}
EXPORT_SYMBOL(xtensa_backtrace_user);
void xtensa_backtrace_kernel(struct pt_regs *regs, unsigned int depth,
int (*kfn)(struct stackframe *frame, void *data),
int (*ufn)(struct stackframe *frame, void *data),
void *data)
{
unsigned long pc = regs->depc > VALID_DOUBLE_EXCEPTION_ADDRESS ?
regs->depc : regs->pc;
unsigned long sp_start, sp_end;
unsigned long a0 = regs->areg[0];
unsigned long a1 = regs->areg[1];
sp_start = a1 & ~(THREAD_SIZE - 1);
sp_end = sp_start + THREAD_SIZE;
/* Spill the register window to the stack first. */
spill_registers();
/* Read the stack frames one by one and create the PC
* from the a0 and a1 registers saved there.
*/
while (a1 > sp_start && a1 < sp_end && depth--) {
struct stackframe frame;
unsigned long *psp = (unsigned long *)a1;
frame.pc = pc;
frame.sp = a1;
if (kernel_text_address(pc) && kfn(&frame, data))
return;
if (pc == (unsigned long)&common_exception_return) {
regs = (struct pt_regs *)a1;
if (user_mode(regs)) {
if (ufn == NULL)
return;
xtensa_backtrace_user(regs, depth, ufn, data);
return;
}
a0 = regs->areg[0];
a1 = regs->areg[1];
continue;
}
sp_start = a1;
pc = MAKE_PC_FROM_RA(a0, pc);
a0 = *(psp - 4);
a1 = *(psp - 3);
}
}
EXPORT_SYMBOL(xtensa_backtrace_kernel);
#endif
void walk_stackframe(unsigned long *sp, void walk_stackframe(unsigned long *sp,
int (*fn)(struct stackframe *frame, void *data), int (*fn)(struct stackframe *frame, void *data),
......
...@@ -62,6 +62,7 @@ extern void fast_coprocessor(void); ...@@ -62,6 +62,7 @@ extern void fast_coprocessor(void);
extern void do_illegal_instruction (struct pt_regs*); extern void do_illegal_instruction (struct pt_regs*);
extern void do_interrupt (struct pt_regs*); extern void do_interrupt (struct pt_regs*);
extern void do_nmi(struct pt_regs *);
extern void do_unaligned_user (struct pt_regs*); extern void do_unaligned_user (struct pt_regs*);
extern void do_multihit (struct pt_regs*, unsigned long); extern void do_multihit (struct pt_regs*, unsigned long);
extern void do_page_fault (struct pt_regs*, unsigned long); extern void do_page_fault (struct pt_regs*, unsigned long);
...@@ -146,6 +147,9 @@ COPROCESSOR(6), ...@@ -146,6 +147,9 @@ COPROCESSOR(6),
#if XTENSA_HAVE_COPROCESSOR(7) #if XTENSA_HAVE_COPROCESSOR(7)
COPROCESSOR(7), COPROCESSOR(7),
#endif #endif
#if XTENSA_FAKE_NMI
{ EXCCAUSE_MAPPED_NMI, 0, do_nmi },
#endif
{ EXCCAUSE_MAPPED_DEBUG, 0, do_debug }, { EXCCAUSE_MAPPED_DEBUG, 0, do_debug },
{ -1, -1, 0 } { -1, -1, 0 }
...@@ -199,6 +203,28 @@ void do_multihit(struct pt_regs *regs, unsigned long exccause) ...@@ -199,6 +203,28 @@ void do_multihit(struct pt_regs *regs, unsigned long exccause)
extern void do_IRQ(int, struct pt_regs *); extern void do_IRQ(int, struct pt_regs *);
#if XTENSA_FAKE_NMI
irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id);
DEFINE_PER_CPU(unsigned long, nmi_count);
void do_nmi(struct pt_regs *regs)
{
struct pt_regs *old_regs;
if ((regs->ps & PS_INTLEVEL_MASK) < LOCKLEVEL)
trace_hardirqs_off();
old_regs = set_irq_regs(regs);
nmi_enter();
++*this_cpu_ptr(&nmi_count);
xtensa_pmu_irq_handler(0, NULL);
nmi_exit();
set_irq_regs(old_regs);
}
#endif
void do_interrupt(struct pt_regs *regs) void do_interrupt(struct pt_regs *regs)
{ {
static const unsigned int_level_mask[] = { static const unsigned int_level_mask[] = {
...@@ -211,8 +237,11 @@ void do_interrupt(struct pt_regs *regs) ...@@ -211,8 +237,11 @@ void do_interrupt(struct pt_regs *regs)
XCHAL_INTLEVEL6_MASK, XCHAL_INTLEVEL6_MASK,
XCHAL_INTLEVEL7_MASK, XCHAL_INTLEVEL7_MASK,
}; };
struct pt_regs *old_regs = set_irq_regs(regs); struct pt_regs *old_regs;
trace_hardirqs_off();
old_regs = set_irq_regs(regs);
irq_enter(); irq_enter();
for (;;) { for (;;) {
......
...@@ -627,7 +627,11 @@ ENTRY(_Level\level\()InterruptVector) ...@@ -627,7 +627,11 @@ ENTRY(_Level\level\()InterruptVector)
wsr a0, excsave2 wsr a0, excsave2
rsr a0, epc\level rsr a0, epc\level
wsr a0, epc1 wsr a0, epc1
.if \level <= LOCKLEVEL
movi a0, EXCCAUSE_LEVEL1_INTERRUPT movi a0, EXCCAUSE_LEVEL1_INTERRUPT
.else
movi a0, EXCCAUSE_MAPPED_NMI
.endif
wsr a0, exccause wsr a0, exccause
rsr a0, eps\level rsr a0, eps\level
# branch to user or kernel vector # branch to user or kernel vector
...@@ -682,11 +686,13 @@ ENDPROC(_WindowOverflow4) ...@@ -682,11 +686,13 @@ ENDPROC(_WindowOverflow4)
.align 4 .align 4
_SimulateUserKernelVectorException: _SimulateUserKernelVectorException:
addi a0, a0, (1 << PS_EXCM_BIT) addi a0, a0, (1 << PS_EXCM_BIT)
#if !XTENSA_FAKE_NMI
wsr a0, ps wsr a0, ps
#endif
bbsi.l a0, PS_UM_BIT, 1f # branch if user mode bbsi.l a0, PS_UM_BIT, 1f # branch if user mode
rsr a0, excsave2 # restore a0 xsr a0, excsave2 # restore a0
j _KernelExceptionVector # simulate kernel vector exception j _KernelExceptionVector # simulate kernel vector exception
1: rsr a0, excsave2 # restore a0 1: xsr a0, excsave2 # restore a0
j _UserExceptionVector # simulate user vector exception j _UserExceptionVector # simulate user vector exception
#endif #endif
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/hardirq.h> #include <linux/hardirq.h>
#include <linux/perf_event.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
...@@ -142,6 +143,12 @@ void do_page_fault(struct pt_regs *regs) ...@@ -142,6 +143,12 @@ void do_page_fault(struct pt_regs *regs)
} }
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
if (flags & VM_FAULT_MAJOR)
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
else if (flags & VM_FAULT_MINOR)
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
return; return;
/* Something tried to access memory that isn't in our memory map.. /* Something tried to access memory that isn't in our memory map..
......
...@@ -2,168 +2,26 @@ ...@@ -2,168 +2,26 @@
* @file backtrace.c * @file backtrace.c
* *
* @remark Copyright 2008 Tensilica Inc. * @remark Copyright 2008 Tensilica Inc.
* Copyright (C) 2015 Cadence Design Systems Inc.
* @remark Read the file COPYING * @remark Read the file COPYING
* *
*/ */
#include <linux/oprofile.h> #include <linux/oprofile.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/uaccess.h> #include <asm/stacktrace.h>
#include <asm/traps.h>
/* Address of common_exception_return, used to check the static int xtensa_backtrace_cb(struct stackframe *frame, void *data)
* transition from kernel to user space.
*/
extern int common_exception_return;
/* A struct that maps to the part of the frame containing the a0 and
* a1 registers.
*/
struct frame_start {
unsigned long a0;
unsigned long a1;
};
static void xtensa_backtrace_user(struct pt_regs *regs, unsigned int depth)
{
unsigned long windowstart = regs->windowstart;
unsigned long windowbase = regs->windowbase;
unsigned long a0 = regs->areg[0];
unsigned long a1 = regs->areg[1];
unsigned long pc = MAKE_PC_FROM_RA(a0, regs->pc);
int index;
/* First add the current PC to the trace. */
if (pc != 0 && pc <= TASK_SIZE)
oprofile_add_trace(pc);
else
return;
/* Two steps:
*
* 1. Look through the register window for the
* previous PCs in the call trace.
*
* 2. Look on the stack.
*/
/* Step 1. */
/* Rotate WINDOWSTART to move the bit corresponding to
* the current window to the bit #0.
*/
windowstart = (windowstart << WSBITS | windowstart) >> windowbase;
/* Look for bits that are set, they correspond to
* valid windows.
*/
for (index = WSBITS - 1; (index > 0) && depth; depth--, index--)
if (windowstart & (1 << index)) {
/* Read a0 and a1 from the
* corresponding position in AREGs.
*/
a0 = regs->areg[index * 4];
a1 = regs->areg[index * 4 + 1];
/* Get the PC from a0 and a1. */
pc = MAKE_PC_FROM_RA(a0, pc);
/* Add the PC to the trace. */
if (pc != 0 && pc <= TASK_SIZE)
oprofile_add_trace(pc);
else
return;
}
/* Step 2. */
/* We are done with the register window, we need to
* look through the stack.
*/
if (depth > 0) {
/* Start from the a1 register. */
/* a1 = regs->areg[1]; */
while (a0 != 0 && depth--) {
struct frame_start frame_start;
/* Get the location for a1, a0 for the
* previous frame from the current a1.
*/
unsigned long *psp = (unsigned long *)a1;
psp -= 4;
/* Check if the region is OK to access. */
if (!access_ok(VERIFY_READ, psp, sizeof(frame_start)))
return;
/* Copy a1, a0 from user space stack frame. */
if (__copy_from_user_inatomic(&frame_start, psp,
sizeof(frame_start)))
return;
a0 = frame_start.a0;
a1 = frame_start.a1;
pc = MAKE_PC_FROM_RA(a0, pc);
if (pc != 0 && pc <= TASK_SIZE)
oprofile_add_trace(pc);
else
return;
}
}
}
static void xtensa_backtrace_kernel(struct pt_regs *regs, unsigned int depth)
{ {
unsigned long pc = regs->pc; oprofile_add_trace(frame->pc);
unsigned long *psp; return 0;
unsigned long sp_start, sp_end;
unsigned long a0 = regs->areg[0];
unsigned long a1 = regs->areg[1];
sp_start = a1 & ~(THREAD_SIZE-1);
sp_end = sp_start + THREAD_SIZE;
/* Spill the register window to the stack first. */
spill_registers();
/* Read the stack frames one by one and create the PC
* from the a0 and a1 registers saved there.
*/
while (a1 > sp_start && a1 < sp_end && depth--) {
pc = MAKE_PC_FROM_RA(a0, pc);
/* Add the PC to the trace. */
oprofile_add_trace(pc);
if (pc == (unsigned long) &common_exception_return) {
regs = (struct pt_regs *)a1;
if (user_mode(regs)) {
pc = regs->pc;
if (pc != 0 && pc <= TASK_SIZE)
oprofile_add_trace(pc);
else
return;
return xtensa_backtrace_user(regs, depth);
}
a0 = regs->areg[0];
a1 = regs->areg[1];
continue;
}
psp = (unsigned long *)a1;
a0 = *(psp - 4);
a1 = *(psp - 3);
if (a1 <= (unsigned long)psp)
return;
}
return;
} }
void xtensa_backtrace(struct pt_regs * const regs, unsigned int depth) void xtensa_backtrace(struct pt_regs * const regs, unsigned int depth)
{ {
if (user_mode(regs)) if (user_mode(regs))
xtensa_backtrace_user(regs, depth); xtensa_backtrace_user(regs, depth, xtensa_backtrace_cb, NULL);
else else
xtensa_backtrace_kernel(regs, depth); xtensa_backtrace_kernel(regs, depth, xtensa_backtrace_cb,
xtensa_backtrace_cb, NULL);
} }
...@@ -105,13 +105,17 @@ static char *split_if_spec(char *str, ...) ...@@ -105,13 +105,17 @@ static char *split_if_spec(char *str, ...)
va_start(ap, str); va_start(ap, str);
while ((arg = va_arg(ap, char**)) != NULL) { while ((arg = va_arg(ap, char**)) != NULL) {
if (*str == '\0') if (*str == '\0') {
va_end(ap);
return NULL; return NULL;
}
end = strchr(str, ','); end = strchr(str, ',');
if (end != str) if (end != str)
*arg = str; *arg = str;
if (end == NULL) if (end == NULL) {
va_end(ap);
return NULL; return NULL;
}
*end++ = '\0'; *end++ = '\0';
str = end; str = end;
} }
......
ifndef NO_DWARF
PERF_HAVE_DWARF_REGS := 1
endif
libperf-$(CONFIG_DWARF) += dwarf-regs.o
/*
* Mapping of DWARF debug register numbers into register names.
*
* Copyright (c) 2015 Cadence Design Systems Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <stddef.h>
#include <dwarf-regs.h>
#define XTENSA_MAX_REGS 16
const char *xtensa_regs_table[XTENSA_MAX_REGS] = {
"a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7",
"a8", "a9", "a10", "a11", "a12", "a13", "a14", "a15",
};
const char *get_arch_regstr(unsigned int n)
{
return n < XTENSA_MAX_REGS ? xtensa_regs_table[n] : NULL;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment