Commit 9ef10340 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'xtensa-20181228' of git://github.com/jcmvbkbc/linux-xtensa

Pull Xtensa updates from Max Filippov:

 - switch to generated syscall table

 - switch ptrace to regsets, use regsets for core dumps

 - complete tracehook implementation

 - add syscall tracepoints support

 - add jumplabels support

 - add memtest support

 - drop unused/duplicated code from entry.S, ptrace.c, coprocessor.S,
   elf.h and syscall.h

 - clean up warnings caused by WSR/RSR macros

 - clean up DTC warnings about SPI controller node names in xtfpga.dtsi

 - simplify coprocessor.S

 - get rid of explicit 'l32r' instruction usage in assembly

* tag 'xtensa-20181228' of git://github.com/jcmvbkbc/linux-xtensa: (25 commits)
  xtensa: implement jump_label support
  xtensa: implement syscall tracepoints
  xtensa: implement tracehook functions and enable HAVE_ARCH_TRACEHOOK
  xtensa: enable CORE_DUMP_USE_REGSET
  xtensa: implement TIE regset
  xtensa: implement task_user_regset_view
  xtensa: call do_syscall_trace_{enter,leave} selectively
  xtensa: use NO_SYSCALL instead of -1
  xtensa: define syscall_get_arch()
  Move EM_XTENSA to uapi/linux/elf-em.h
  xtensa: support memtest
  xtensa: don't use l32r opcode directly
  xtensa: xtfpga.dtsi: fix dtc warnings about SPI
  xtensa: don't clear cpenable unconditionally on release
  xtensa: simplify coprocessor.S
  xtensa: clean up WSR*/RSR*/get_sr/set_sr
  xtensa: drop unused declarations from elf.h
  xtensa: clean up syscall.h
  xtensa: drop unused coprocessor helper functions
  xtensa: drop custom PTRACE_{PEEK,POKE}{TEXT,DATA}
  ...
parents 889bb743 64711f9a
...@@ -29,5 +29,5 @@ ...@@ -29,5 +29,5 @@
| um: | TODO | | um: | TODO |
| unicore32: | TODO | | unicore32: | TODO |
| x86: | ok | | x86: | ok |
| xtensa: | TODO | | xtensa: | ok |
----------------------- -----------------------
...@@ -16,7 +16,9 @@ config XTENSA ...@@ -16,7 +16,9 @@ config XTENSA
select GENERIC_PCI_IOMAP select GENERIC_PCI_IOMAP
select GENERIC_SCHED_CLOCK select GENERIC_SCHED_CLOCK
select GENERIC_STRNCPY_FROM_USER if KASAN select GENERIC_STRNCPY_FROM_USER if KASAN
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KASAN if MMU select HAVE_ARCH_KASAN if MMU
select HAVE_ARCH_TRACEHOOK
select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS select HAVE_DMA_CONTIGUOUS
select HAVE_EXIT_THREAD select HAVE_EXIT_THREAD
...@@ -27,6 +29,7 @@ config XTENSA ...@@ -27,6 +29,7 @@ config XTENSA
select HAVE_OPROFILE select HAVE_OPROFILE
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select HAVE_STACKPROTECTOR select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select IRQ_DOMAIN select IRQ_DOMAIN
select MODULES_USE_ELF_RELA select MODULES_USE_ELF_RELA
select PERF_USE_VMALLOC select PERF_USE_VMALLOC
......
...@@ -90,6 +90,9 @@ boot := arch/xtensa/boot ...@@ -90,6 +90,9 @@ boot := arch/xtensa/boot
all Image zImage uImage: vmlinux all Image zImage uImage: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $@ $(Q)$(MAKE) $(build)=$(boot) $@
archheaders:
$(Q)$(MAKE) $(build)=arch/xtensa/kernel/syscalls all
define archhelp define archhelp
@echo '* Image - Kernel ELF image with reset vector' @echo '* Image - Kernel ELF image with reset vector'
@echo '* zImage - Compressed kernel image (arch/xtensa/boot/images/zImage.*)' @echo '* zImage - Compressed kernel image (arch/xtensa/boot/images/zImage.*)'
......
...@@ -29,17 +29,7 @@ _ResetVector: ...@@ -29,17 +29,7 @@ _ResetVector:
.begin no-absolute-literals .begin no-absolute-literals
.literal_position .literal_position
#if defined(CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX) && \ #ifdef CONFIG_PARSE_BOOTPARAM
XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
.literal RomInitAddr, CONFIG_KERNEL_LOAD_ADDRESS
#else
.literal RomInitAddr, KERNELOFFSET
#endif
#ifndef CONFIG_PARSE_BOOTPARAM
.literal RomBootParam, 0
#else
.literal RomBootParam, _bootparam
.align 4 .align 4
_bootparam: _bootparam:
.short BP_TAG_FIRST .short BP_TAG_FIRST
...@@ -66,13 +56,22 @@ _SetupMMU: ...@@ -66,13 +56,22 @@ _SetupMMU:
initialize_mmu initialize_mmu
#endif #endif
.end no-absolute-literals
rsil a0, XCHAL_DEBUGLEVEL-1 rsil a0, XCHAL_DEBUGLEVEL-1
rsync rsync
reset: reset:
l32r a0, RomInitAddr #if defined(CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX) && \
l32r a2, RomBootParam XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
movi a0, CONFIG_KERNEL_LOAD_ADDRESS
#else
movi a0, KERNELOFFSET
#endif
#ifdef CONFIG_PARSE_BOOTPARAM
movi a2, _bootparam
#else
movi a2, 0
#endif
movi a3, 0 movi a3, 0
movi a4, 0 movi a4, 0
jx a0 jx a0
.end no-absolute-literals
...@@ -103,7 +103,7 @@ cdce706: clock-synth@69 { ...@@ -103,7 +103,7 @@ cdce706: clock-synth@69 {
}; };
}; };
spi0: spi-master@0d0a0000 { spi0: spi@0d0a0000 {
compatible = "cdns,xtfpga-spi"; compatible = "cdns,xtfpga-spi";
#address-cells = <1>; #address-cells = <1>;
#size-cells = <0>; #size-cells = <0>;
......
generated-y += syscall_table.h
generic-y += bug.h generic-y += bug.h
generic-y += compat.h generic-y += compat.h
generic-y += device.h generic-y += device.h
......
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
#ifndef _XTENSA_COPROCESSOR_H #ifndef _XTENSA_COPROCESSOR_H
#define _XTENSA_COPROCESSOR_H #define _XTENSA_COPROCESSOR_H
#include <linux/stringify.h>
#include <variant/core.h> #include <variant/core.h>
#include <variant/tie.h> #include <variant/tie.h>
#include <asm/types.h> #include <asm/types.h>
...@@ -90,19 +89,6 @@ ...@@ -90,19 +89,6 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#if XCHAL_HAVE_CP
#define RSR_CPENABLE(x) do { \
__asm__ __volatile__("rsr %0, cpenable" : "=a" (x)); \
} while(0);
#define WSR_CPENABLE(x) do { \
__asm__ __volatile__("wsr %0, cpenable; rsync" :: "a" (x)); \
} while(0);
#endif /* XCHAL_HAVE_CP */
/* /*
* Additional registers. * Additional registers.
* We define three types of additional registers: * We define three types of additional registers:
...@@ -157,20 +143,11 @@ typedef struct { XCHAL_CP7_SA_LIST(2) } xtregs_cp7_t ...@@ -157,20 +143,11 @@ typedef struct { XCHAL_CP7_SA_LIST(2) } xtregs_cp7_t
__attribute__ ((aligned (XCHAL_CP7_SA_ALIGN))); __attribute__ ((aligned (XCHAL_CP7_SA_ALIGN)));
extern struct thread_info* coprocessor_owner[XCHAL_CP_MAX]; extern struct thread_info* coprocessor_owner[XCHAL_CP_MAX];
extern void coprocessor_save(void*, int);
extern void coprocessor_load(void*, int);
extern void coprocessor_flush(struct thread_info*, int); extern void coprocessor_flush(struct thread_info*, int);
extern void coprocessor_restore(struct thread_info*, int);
extern void coprocessor_release_all(struct thread_info*); extern void coprocessor_release_all(struct thread_info*);
extern void coprocessor_flush_all(struct thread_info*); extern void coprocessor_flush_all(struct thread_info*);
static inline void coprocessor_clear_cpenable(void)
{
unsigned long i = 0;
WSR_CPENABLE(i);
}
#endif /* XTENSA_HAVE_COPROCESSORS */ #endif /* XTENSA_HAVE_COPROCESSORS */
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
......
...@@ -15,10 +15,10 @@ ...@@ -15,10 +15,10 @@
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/coprocessor.h> #include <asm/coprocessor.h>
#include <linux/elf-em.h>
/* Xtensa processor ELF architecture-magic number */ /* Xtensa processor ELF architecture-magic number */
#define EM_XTENSA 94
#define EM_XTENSA_OLD 0xABC7 #define EM_XTENSA_OLD 0xABC7
/* Xtensa relocations defined by the ABIs */ /* Xtensa relocations defined by the ABIs */
...@@ -75,19 +75,7 @@ ...@@ -75,19 +75,7 @@
typedef unsigned long elf_greg_t; typedef unsigned long elf_greg_t;
typedef struct { typedef struct user_pt_regs xtensa_gregset_t;
elf_greg_t pc;
elf_greg_t ps;
elf_greg_t lbeg;
elf_greg_t lend;
elf_greg_t lcount;
elf_greg_t sar;
elf_greg_t windowstart;
elf_greg_t windowbase;
elf_greg_t threadptr;
elf_greg_t reserved[7+48];
elf_greg_t a[64];
} xtensa_gregset_t;
#define ELF_NGREG (sizeof(xtensa_gregset_t) / sizeof(elf_greg_t)) #define ELF_NGREG (sizeof(xtensa_gregset_t) / sizeof(elf_greg_t))
...@@ -98,11 +86,6 @@ typedef elf_greg_t elf_gregset_t[ELF_NGREG]; ...@@ -98,11 +86,6 @@ typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef unsigned int elf_fpreg_t; typedef unsigned int elf_fpreg_t;
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#define ELF_CORE_COPY_REGS(_eregs, _pregs) \
xtensa_elf_core_copy_regs ((xtensa_gregset_t*)&(_eregs), _pregs);
extern void xtensa_elf_core_copy_regs (xtensa_gregset_t *, struct pt_regs *);
/* /*
* This is used to ensure we don't load something for the wrong architecture. * This is used to ensure we don't load something for the wrong architecture.
*/ */
...@@ -126,6 +109,7 @@ extern void xtensa_elf_core_copy_regs (xtensa_gregset_t *, struct pt_regs *); ...@@ -126,6 +109,7 @@ extern void xtensa_elf_core_copy_regs (xtensa_gregset_t *, struct pt_regs *);
#define ELF_ARCH EM_XTENSA #define ELF_ARCH EM_XTENSA
#define ELF_EXEC_PAGESIZE PAGE_SIZE #define ELF_EXEC_PAGESIZE PAGE_SIZE
#define CORE_DUMP_USE_REGSET
/* /*
* This is the location that an ET_DYN program is loaded if exec'ed. Typical * This is the location that an ET_DYN program is loaded if exec'ed. Typical
...@@ -193,15 +177,4 @@ typedef struct { ...@@ -193,15 +177,4 @@ typedef struct {
#define SET_PERSONALITY(ex) \ #define SET_PERSONALITY(ex) \
set_personality(PER_LINUX_32BIT | (current->personality & (~PER_MASK))) set_personality(PER_LINUX_32BIT | (current->personality & (~PER_MASK)))
struct task_struct;
extern void do_copy_regs (xtensa_gregset_t*, struct pt_regs*,
struct task_struct*);
extern void do_restore_regs (xtensa_gregset_t*, struct pt_regs*,
struct task_struct*);
extern void do_save_fpregs (elf_fpregset_t*, struct pt_regs*,
struct task_struct*);
extern int do_restore_fpregs (elf_fpregset_t*, struct pt_regs*,
struct task_struct*);
#endif /* _XTENSA_ELF_H */ #endif /* _XTENSA_ELF_H */
...@@ -32,8 +32,8 @@ ...@@ -32,8 +32,8 @@
"3:\n" \ "3:\n" \
" .section .fixup,\"ax\"\n" \ " .section .fixup,\"ax\"\n" \
" .align 4\n" \ " .align 4\n" \
"4: .long 3b\n" \ " .literal_position\n" \
"5: l32r %0, 4b\n" \ "5: movi %0, 3b\n" \
" movi %1, %3\n" \ " movi %1, %3\n" \
" jx %0\n" \ " jx %0\n" \
" .previous\n" \ " .previous\n" \
...@@ -108,8 +108,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -108,8 +108,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
"2:\n" "2:\n"
" .section .fixup,\"ax\"\n" " .section .fixup,\"ax\"\n"
" .align 4\n" " .align 4\n"
"3: .long 2b\n" " .literal_position\n"
"4: l32r %1, 3b\n" "4: movi %1, 2b\n"
" movi %0, %7\n" " movi %0, %7\n"
" jx %1\n" " jx %1\n"
" .previous\n" " .previous\n"
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#ifndef _XTENSA_IRQFLAGS_H #ifndef _XTENSA_IRQFLAGS_H
#define _XTENSA_IRQFLAGS_H #define _XTENSA_IRQFLAGS_H
#include <linux/stringify.h>
#include <linux/types.h> #include <linux/types.h>
#include <asm/processor.h> #include <asm/processor.h>
......
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2018 Cadence Design Systems Inc. */
#ifndef _ASM_XTENSA_JUMP_LABEL_H
#define _ASM_XTENSA_JUMP_LABEL_H
#ifndef __ASSEMBLY__
#include <linux/types.h>
#define JUMP_LABEL_NOP_SIZE 3
static __always_inline bool arch_static_branch(struct static_key *key,
bool branch)
{
asm_volatile_goto("1:\n\t"
"_nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".word 1b, %l[l_yes], %c0\n\t"
".popsection\n\t"
: : "i" (&((char *)key)[branch]) : : l_yes);
return false;
l_yes:
return true;
}
static __always_inline bool arch_static_branch_jump(struct static_key *key,
bool branch)
{
/*
* Xtensa assembler will mark certain points in the code
* as unreachable, so that later assembler or linker relaxation
* passes could use them. A spot right after the J instruction
* is one such point. Assembler and/or linker may insert padding
* or literals here, breaking code flow in case the J instruction
* is later replaced with NOP. Put a label right after the J to
* make it reachable and wrap both into a no-transform block
* to avoid any assembler interference with this.
*/
asm_volatile_goto("1:\n\t"
".begin no-transform\n\t"
"_j %l[l_yes]\n\t"
"2:\n\t"
".end no-transform\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".word 1b, %l[l_yes], %c0\n\t"
".popsection\n\t"
: : "i" (&((char *)key)[branch]) : : l_yes);
return false;
l_yes:
return true;
}
typedef u32 jump_label_t;
struct jump_entry {
jump_label_t code;
jump_label_t target;
jump_label_t key;
};
#endif /* __ASSEMBLY__ */
#endif
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <variant/core.h> #include <variant/core.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/stringify.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/types.h> #include <asm/types.h>
#include <asm/regs.h> #include <asm/regs.h>
...@@ -212,11 +213,18 @@ extern unsigned long get_wchan(struct task_struct *p); ...@@ -212,11 +213,18 @@ extern unsigned long get_wchan(struct task_struct *p);
/* Special register access. */ /* Special register access. */
#define WSR(v,sr) __asm__ __volatile__ ("wsr %0,"__stringify(sr) :: "a"(v)); #define xtensa_set_sr(x, sr) \
#define RSR(v,sr) __asm__ __volatile__ ("rsr %0,"__stringify(sr) : "=a"(v)); ({ \
unsigned int v = (unsigned int)(x); \
#define set_sr(x,sr) ({unsigned int v=(unsigned int)x; WSR(v,sr);}) __asm__ __volatile__ ("wsr %0, "__stringify(sr) :: "a"(v)); \
#define get_sr(sr) ({unsigned int v; RSR(v,sr); v; }) })
#define xtensa_get_sr(sr) \
({ \
unsigned int v; \
__asm__ __volatile__ ("rsr %0, "__stringify(sr) : "=a"(v)); \
v; \
})
#ifndef XCHAL_HAVE_EXTERN_REGS #ifndef XCHAL_HAVE_EXTERN_REGS
#define XCHAL_HAVE_EXTERN_REGS 0 #define XCHAL_HAVE_EXTERN_REGS 0
......
...@@ -39,6 +39,8 @@ ...@@ -39,6 +39,8 @@
* +-----------------------+ -------- * +-----------------------+ --------
*/ */
#define NO_SYSCALL (-1)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm/coprocessor.h> #include <asm/coprocessor.h>
...@@ -100,6 +102,11 @@ struct pt_regs { ...@@ -100,6 +102,11 @@ struct pt_regs {
#define user_stack_pointer(regs) ((regs)->areg[1]) #define user_stack_pointer(regs) ((regs)->areg[1])
static inline unsigned long regs_return_value(struct pt_regs *regs)
{
return regs->areg[2];
}
#else /* __ASSEMBLY__ */ #else /* __ASSEMBLY__ */
# include <asm/asm-offsets.h> # include <asm/asm-offsets.h>
......
/* /*
* include/asm-xtensa/syscall.h
*
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2001 - 2007 Tensilica Inc. * Copyright (C) 2001 - 2007 Tensilica Inc.
* Copyright (C) 2018 Cadence Design Systems Inc.
*/ */
struct pt_regs; #ifndef _ASM_SYSCALL_H
asmlinkage long xtensa_ptrace(long, long, long, long); #define _ASM_SYSCALL_H
asmlinkage long xtensa_sigreturn(struct pt_regs*);
#include <linux/err.h>
#include <asm/ptrace.h>
#include <uapi/linux/audit.h>
static inline int syscall_get_arch(void)
{
return AUDIT_ARCH_XTENSA;
}
typedef void (*syscall_t)(void);
extern syscall_t sys_call_table[];
static inline long syscall_get_nr(struct task_struct *task,
struct pt_regs *regs)
{
return regs->syscall;
}
static inline void syscall_rollback(struct task_struct *task,
struct pt_regs *regs)
{
/* Do nothing. */
}
static inline long syscall_get_error(struct task_struct *task,
struct pt_regs *regs)
{
/* 0 if syscall succeeded, otherwise -Errorcode */
return IS_ERR_VALUE(regs->areg[2]) ? regs->areg[2] : 0;
}
static inline long syscall_get_return_value(struct task_struct *task,
struct pt_regs *regs)
{
return regs->areg[2];
}
static inline void syscall_set_return_value(struct task_struct *task,
struct pt_regs *regs,
int error, long val)
{
regs->areg[0] = (long) error ? error : val;
}
#define SYSCALL_MAX_ARGS 6
#define XTENSA_SYSCALL_ARGUMENT_REGS {6, 3, 4, 5, 8, 9}
static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs,
unsigned int i, unsigned int n,
unsigned long *args)
{
static const unsigned int reg[] = XTENSA_SYSCALL_ARGUMENT_REGS;
unsigned int j;
if (n == 0)
return;
WARN_ON_ONCE(i + n > SYSCALL_MAX_ARGS);
for (j = 0; j < n; ++j) {
if (i + j < SYSCALL_MAX_ARGS)
args[j] = regs->areg[reg[i + j]];
else
args[j] = 0;
}
}
static inline void syscall_set_arguments(struct task_struct *task,
struct pt_regs *regs,
unsigned int i, unsigned int n,
const unsigned long *args)
{
static const unsigned int reg[] = XTENSA_SYSCALL_ARGUMENT_REGS;
unsigned int j;
if (n == 0)
return;
if (WARN_ON_ONCE(i + n > SYSCALL_MAX_ARGS)) {
if (i < SYSCALL_MAX_ARGS)
n = SYSCALL_MAX_ARGS - i;
else
return;
}
for (j = 0; j < n; ++j)
regs->areg[reg[i + j]] = args[j];
}
asmlinkage long xtensa_rt_sigreturn(struct pt_regs*); asmlinkage long xtensa_rt_sigreturn(struct pt_regs*);
asmlinkage long xtensa_shmat(int, char __user *, int); asmlinkage long xtensa_shmat(int, char __user *, int);
asmlinkage long xtensa_fadvise64_64(int, int, asmlinkage long xtensa_fadvise64_64(int, int,
unsigned long long, unsigned long long); unsigned long long, unsigned long long);
/* Should probably move to linux/syscalls.h */ #endif
struct pollfd;
asmlinkage long sys_pselect6(int n, fd_set __user *inp, fd_set __user *outp,
fd_set __user *exp, struct timespec __user *tsp,
void __user *sig);
asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds,
struct timespec __user *tsp,
const sigset_t __user *sigmask,
size_t sigsetsize);
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#ifndef _XTENSA_THREAD_INFO_H #ifndef _XTENSA_THREAD_INFO_H
#define _XTENSA_THREAD_INFO_H #define _XTENSA_THREAD_INFO_H
#include <linux/stringify.h>
#include <asm/kmem_layout.h> #include <asm/kmem_layout.h>
#define CURRENT_SHIFT KERNEL_STACK_SHIFT #define CURRENT_SHIFT KERNEL_STACK_SHIFT
...@@ -100,13 +101,12 @@ static inline struct thread_info *current_thread_info(void) ...@@ -100,13 +101,12 @@ static inline struct thread_info *current_thread_info(void)
/* /*
* thread information flags * thread information flags
* - these are process state flags that various assembly files may need to access * - these are process state flags that various assembly files may need to access
* - pending work-to-be-done flags are in LSW
* - other flags in MSW
*/ */
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ #define TIF_SYSCALL_TRACE 0 /* syscall trace active */
#define TIF_SIGPENDING 1 /* signal pending */ #define TIF_SIGPENDING 1 /* signal pending */
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ #define TIF_NEED_RESCHED 2 /* rescheduling necessary */
#define TIF_SINGLESTEP 3 /* restore singlestep on return to user mode */ #define TIF_SINGLESTEP 3 /* restore singlestep on return to user mode */
#define TIF_SYSCALL_TRACEPOINT 4 /* syscall tracepoint instrumentation */
#define TIF_MEMDIE 5 /* is terminating due to OOM killer */ #define TIF_MEMDIE 5 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 6 /* restore signal mask in do_signal() */ #define TIF_RESTORE_SIGMASK 6 /* restore signal mask in do_signal() */
#define TIF_NOTIFY_RESUME 7 /* callback before returning to user */ #define TIF_NOTIFY_RESUME 7 /* callback before returning to user */
...@@ -116,9 +116,10 @@ static inline struct thread_info *current_thread_info(void) ...@@ -116,9 +116,10 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) #define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) #define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ #define _TIF_WORK_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */ _TIF_SYSCALL_TRACEPOINT)
/* /*
* Thread-synchronous status. * Thread-synchronous status.
......
...@@ -10,7 +10,6 @@ ...@@ -10,7 +10,6 @@
#define _XTENSA_TIMEX_H #define _XTENSA_TIMEX_H
#include <asm/processor.h> #include <asm/processor.h>
#include <linux/stringify.h>
#if XCHAL_NUM_TIMERS > 0 && \ #if XCHAL_NUM_TIMERS > 0 && \
XTENSA_INT_LEVEL(XCHAL_TIMER0_INTERRUPT) <= XCHAL_EXCM_LEVEL XTENSA_INT_LEVEL(XCHAL_TIMER0_INTERRUPT) <= XCHAL_EXCM_LEVEL
...@@ -40,33 +39,24 @@ void local_timer_setup(unsigned cpu); ...@@ -40,33 +39,24 @@ void local_timer_setup(unsigned cpu);
* Register access. * Register access.
*/ */
#define WSR_CCOUNT(r) asm volatile ("wsr %0, ccount" :: "a" (r))
#define RSR_CCOUNT(r) asm volatile ("rsr %0, ccount" : "=a" (r))
#define WSR_CCOMPARE(x,r) asm volatile ("wsr %0,"__stringify(SREG_CCOMPARE)"+"__stringify(x) :: "a"(r))
#define RSR_CCOMPARE(x,r) asm volatile ("rsr %0,"__stringify(SREG_CCOMPARE)"+"__stringify(x) : "=a"(r))
static inline unsigned long get_ccount (void) static inline unsigned long get_ccount (void)
{ {
unsigned long ccount; return xtensa_get_sr(ccount);
RSR_CCOUNT(ccount);
return ccount;
} }
static inline void set_ccount (unsigned long ccount) static inline void set_ccount (unsigned long ccount)
{ {
WSR_CCOUNT(ccount); xtensa_set_sr(ccount, ccount);
} }
static inline unsigned long get_linux_timer (void) static inline unsigned long get_linux_timer (void)
{ {
unsigned ccompare; return xtensa_get_sr(SREG_CCOMPARE + LINUX_TIMER);
RSR_CCOMPARE(LINUX_TIMER, ccompare);
return ccompare;
} }
static inline void set_linux_timer (unsigned long ccompare) static inline void set_linux_timer (unsigned long ccompare)
{ {
WSR_CCOMPARE(LINUX_TIMER, ccompare); xtensa_set_sr(ccompare, SREG_CCOMPARE + LINUX_TIMER);
} }
#endif /* _XTENSA_TIMEX_H */ #endif /* _XTENSA_TIMEX_H */
...@@ -25,8 +25,6 @@ struct exc_table { ...@@ -25,8 +25,6 @@ struct exc_table {
void *fixup; void *fixup;
/* For passing a parameter to fixup */ /* For passing a parameter to fixup */
void *fixup_param; void *fixup_param;
/* For fast syscall handler */
unsigned long syscall_save;
/* Fast user exception handlers */ /* Fast user exception handlers */
void *fast_user_handler[EXCCAUSE_N]; void *fast_user_handler[EXCCAUSE_N];
/* Fast kernel exception handlers */ /* Fast kernel exception handlers */
......
...@@ -159,10 +159,9 @@ __asm__ __volatile__( \ ...@@ -159,10 +159,9 @@ __asm__ __volatile__( \
"2: \n" \ "2: \n" \
" .section .fixup,\"ax\" \n" \ " .section .fixup,\"ax\" \n" \
" .align 4 \n" \ " .align 4 \n" \
"4: \n" \ " .literal_position \n" \
" .long 2b \n" \
"5: \n" \ "5: \n" \
" l32r %1, 4b \n" \ " movi %1, 2b \n" \
" movi %0, %4 \n" \ " movi %0, %4 \n" \
" jx %1 \n" \ " jx %1 \n" \
" .previous \n" \ " .previous \n" \
...@@ -217,10 +216,9 @@ __asm__ __volatile__( \ ...@@ -217,10 +216,9 @@ __asm__ __volatile__( \
"2: \n" \ "2: \n" \
" .section .fixup,\"ax\" \n" \ " .section .fixup,\"ax\" \n" \
" .align 4 \n" \ " .align 4 \n" \
"4: \n" \ " .literal_position \n" \
" .long 2b \n" \
"5: \n" \ "5: \n" \
" l32r %1, 4b \n" \ " movi %1, 2b \n" \
" movi %2, 0 \n" \ " movi %2, 0 \n" \
" movi %0, %4 \n" \ " movi %0, %4 \n" \
" jx %1 \n" \ " jx %1 \n" \
......
...@@ -22,4 +22,6 @@ ...@@ -22,4 +22,6 @@
#define __IGNORE_vfork /* use clone */ #define __IGNORE_vfork /* use clone */
#define __IGNORE_fadvise64 /* use fadvise64_64 */ #define __IGNORE_fadvise64 /* use fadvise64_64 */
#define NR_syscalls __NR_syscalls
#endif /* _XTENSA_UNISTD_H */ #endif /* _XTENSA_UNISTD_H */
# UAPI Header export list # UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm include include/uapi/asm-generic/Kbuild.asm
generated-y += unistd_32.h
generic-y += bitsperlong.h generic-y += bitsperlong.h
generic-y += bpf_perf_event.h generic-y += bpf_perf_event.h
generic-y += errno.h generic-y += errno.h
......
...@@ -12,6 +12,8 @@ ...@@ -12,6 +12,8 @@
#ifndef _UAPI_XTENSA_PTRACE_H #ifndef _UAPI_XTENSA_PTRACE_H
#define _UAPI_XTENSA_PTRACE_H #define _UAPI_XTENSA_PTRACE_H
#include <linux/types.h>
/* Registers used by strace */ /* Registers used by strace */
#define REG_A_BASE 0x0000 #define REG_A_BASE 0x0000
...@@ -36,5 +38,21 @@ ...@@ -36,5 +38,21 @@
#define PTRACE_GETHBPREGS 20 #define PTRACE_GETHBPREGS 20
#define PTRACE_SETHBPREGS 21 #define PTRACE_SETHBPREGS 21
#ifndef __ASSEMBLY__
struct user_pt_regs {
__u32 pc;
__u32 ps;
__u32 lbeg;
__u32 lend;
__u32 lcount;
__u32 sar;
__u32 windowstart;
__u32 windowbase;
__u32 threadptr;
__u32 reserved[7 + 48];
__u32 a[64];
};
#endif
#endif /* _UAPI_XTENSA_PTRACE_H */ #endif /* _UAPI_XTENSA_PTRACE_H */
This diff is collapsed.
...@@ -16,6 +16,7 @@ obj-$(CONFIG_SMP) += smp.o mxhead.o ...@@ -16,6 +16,7 @@ obj-$(CONFIG_SMP) += smp.o mxhead.o
obj-$(CONFIG_XTENSA_VARIANT_HAVE_PERF_EVENTS) += perf_event.o obj-$(CONFIG_XTENSA_VARIANT_HAVE_PERF_EVENTS) += perf_event.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_S32C1I_SELFTEST) += s32c1i_selftest.o obj-$(CONFIG_S32C1I_SELFTEST) += s32c1i_selftest.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
# In the Xtensa architecture, assembly generates literals which must always # In the Xtensa architecture, assembly generates literals which must always
# precede the L32R instruction with a relative offset less than 256 kB. # precede the L32R instruction with a relative offset less than 256 kB.
......
...@@ -137,8 +137,6 @@ int main(void) ...@@ -137,8 +137,6 @@ int main(void)
DEFINE(EXC_TABLE_DOUBLE_SAVE, offsetof(struct exc_table, double_save)); DEFINE(EXC_TABLE_DOUBLE_SAVE, offsetof(struct exc_table, double_save));
DEFINE(EXC_TABLE_FIXUP, offsetof(struct exc_table, fixup)); DEFINE(EXC_TABLE_FIXUP, offsetof(struct exc_table, fixup));
DEFINE(EXC_TABLE_PARAM, offsetof(struct exc_table, fixup_param)); DEFINE(EXC_TABLE_PARAM, offsetof(struct exc_table, fixup_param));
DEFINE(EXC_TABLE_SYSCALL_SAVE,
offsetof(struct exc_table, syscall_save));
DEFINE(EXC_TABLE_FAST_USER, DEFINE(EXC_TABLE_FAST_USER,
offsetof(struct exc_table, fast_user_handler)); offsetof(struct exc_table, fast_user_handler));
DEFINE(EXC_TABLE_FAST_KERNEL, DEFINE(EXC_TABLE_FAST_KERNEL,
......
...@@ -33,16 +33,16 @@ ...@@ -33,16 +33,16 @@
*/ */
#define SAVE_CP_REGS(x) \ #define SAVE_CP_REGS(x) \
.align 4; \
.Lsave_cp_regs_cp##x: \
.if XTENSA_HAVE_COPROCESSOR(x); \ .if XTENSA_HAVE_COPROCESSOR(x); \
.align 4; \
.Lsave_cp_regs_cp##x: \
xchal_cp##x##_store a2 a4 a5 a6 a7; \ xchal_cp##x##_store a2 a4 a5 a6 a7; \
.endif; \ jx a0; \
jx a0 .endif
#define SAVE_CP_REGS_TAB(x) \ #define SAVE_CP_REGS_TAB(x) \
.if XTENSA_HAVE_COPROCESSOR(x); \ .if XTENSA_HAVE_COPROCESSOR(x); \
.long .Lsave_cp_regs_cp##x - .Lsave_cp_regs_jump_table; \ .long .Lsave_cp_regs_cp##x; \
.else; \ .else; \
.long 0; \ .long 0; \
.endif; \ .endif; \
...@@ -50,16 +50,16 @@ ...@@ -50,16 +50,16 @@
#define LOAD_CP_REGS(x) \ #define LOAD_CP_REGS(x) \
.align 4; \
.Lload_cp_regs_cp##x: \
.if XTENSA_HAVE_COPROCESSOR(x); \ .if XTENSA_HAVE_COPROCESSOR(x); \
.align 4; \
.Lload_cp_regs_cp##x: \
xchal_cp##x##_load a2 a4 a5 a6 a7; \ xchal_cp##x##_load a2 a4 a5 a6 a7; \
.endif; \ jx a0; \
jx a0 .endif
#define LOAD_CP_REGS_TAB(x) \ #define LOAD_CP_REGS_TAB(x) \
.if XTENSA_HAVE_COPROCESSOR(x); \ .if XTENSA_HAVE_COPROCESSOR(x); \
.long .Lload_cp_regs_cp##x - .Lload_cp_regs_jump_table; \ .long .Lload_cp_regs_cp##x; \
.else; \ .else; \
.long 0; \ .long 0; \
.endif; \ .endif; \
...@@ -83,6 +83,7 @@ ...@@ -83,6 +83,7 @@
LOAD_CP_REGS(6) LOAD_CP_REGS(6)
LOAD_CP_REGS(7) LOAD_CP_REGS(7)
.section ".rodata", "a"
.align 4 .align 4
.Lsave_cp_regs_jump_table: .Lsave_cp_regs_jump_table:
SAVE_CP_REGS_TAB(0) SAVE_CP_REGS_TAB(0)
...@@ -104,64 +105,20 @@ ...@@ -104,64 +105,20 @@
LOAD_CP_REGS_TAB(6) LOAD_CP_REGS_TAB(6)
LOAD_CP_REGS_TAB(7) LOAD_CP_REGS_TAB(7)
/* .previous
* coprocessor_save(buffer, index)
* a2 a3
* coprocessor_load(buffer, index)
* a2 a3
*
* Save or load coprocessor registers for coprocessor 'index'.
* The register values are saved to or loaded from them 'buffer' address.
*
* Note that these functions don't update the coprocessor_owner information!
*
*/
ENTRY(coprocessor_save)
entry a1, 32
s32i a0, a1, 0
movi a0, .Lsave_cp_regs_jump_table
addx8 a3, a3, a0
l32i a3, a3, 0
beqz a3, 1f
add a0, a0, a3
callx0 a0
1: l32i a0, a1, 0
retw
ENDPROC(coprocessor_save)
ENTRY(coprocessor_load)
entry a1, 32
s32i a0, a1, 0
movi a0, .Lload_cp_regs_jump_table
addx4 a3, a3, a0
l32i a3, a3, 0
beqz a3, 1f
add a0, a0, a3
callx0 a0
1: l32i a0, a1, 0
retw
ENDPROC(coprocessor_load)
/* /*
* coprocessor_flush(struct task_info*, index) * coprocessor_flush(struct thread_info*, index)
* a2 a3 * a2 a3
* coprocessor_restore(struct task_info*, index)
* a2 a3
* *
* Save or load coprocessor registers for coprocessor 'index'. * Save coprocessor registers for coprocessor 'index'.
* The register values are saved to or loaded from the coprocessor area * The register values are saved to or loaded from the coprocessor area
* inside the task_info structure. * inside the task_info structure.
* *
* Note that these functions don't update the coprocessor_owner information! * Note that this function doesn't update the coprocessor_owner information!
* *
*/ */
ENTRY(coprocessor_flush) ENTRY(coprocessor_flush)
entry a1, 32 entry a1, 32
...@@ -172,29 +129,12 @@ ENTRY(coprocessor_flush) ...@@ -172,29 +129,12 @@ ENTRY(coprocessor_flush)
l32i a3, a3, 0 l32i a3, a3, 0
add a2, a2, a4 add a2, a2, a4
beqz a3, 1f beqz a3, 1f
add a0, a0, a3 callx0 a3
callx0 a0
1: l32i a0, a1, 0 1: l32i a0, a1, 0
retw retw
ENDPROC(coprocessor_flush) ENDPROC(coprocessor_flush)
ENTRY(coprocessor_restore)
entry a1, 32
s32i a0, a1, 0
movi a0, .Lload_cp_regs_jump_table
addx4 a3, a3, a0
l32i a4, a3, 4
l32i a3, a3, 0
add a2, a2, a4
beqz a3, 1f
add a0, a0, a3
callx0 a0
1: l32i a0, a1, 0
retw
ENDPROC(coprocessor_restore)
/* /*
* Entry condition: * Entry condition:
* *
...@@ -274,10 +214,9 @@ ENTRY(fast_coprocessor) ...@@ -274,10 +214,9 @@ ENTRY(fast_coprocessor)
movi a0, 2f # a0: 'return' address movi a0, 2f # a0: 'return' address
addx8 a3, a3, a5 # a3: coprocessor number addx8 a3, a3, a5 # a3: coprocessor number
l32i a2, a3, 4 # a2: xtregs offset l32i a2, a3, 4 # a2: xtregs offset
l32i a3, a3, 0 # a3: jump offset l32i a3, a3, 0 # a3: jump address
add a2, a2, a4 add a2, a2, a4
add a4, a3, a5 # a4: address of save routine jx a3
jx a4
/* Note that only a0 and a1 were preserved. */ /* Note that only a0 and a1 were preserved. */
...@@ -297,10 +236,9 @@ ENTRY(fast_coprocessor) ...@@ -297,10 +236,9 @@ ENTRY(fast_coprocessor)
movi a0, 1f movi a0, 1f
addx8 a3, a3, a5 addx8 a3, a3, a5
l32i a2, a3, 4 # a2: xtregs offset l32i a2, a3, 4 # a2: xtregs offset
l32i a3, a3, 0 # a3: jump offset l32i a3, a3, 0 # a3: jump address
add a2, a2, a4 add a2, a2, a4
add a4, a3, a5 jx a3
jx a4
/* Restore all registers and return from exception handler. */ /* Restore all registers and return from exception handler. */
......
...@@ -364,7 +364,7 @@ common_exception: ...@@ -364,7 +364,7 @@ common_exception:
s32i a2, a1, PT_DEBUGCAUSE s32i a2, a1, PT_DEBUGCAUSE
s32i a3, a1, PT_PC s32i a3, a1, PT_PC
movi a2, -1 movi a2, NO_SYSCALL
rsr a3, excvaddr rsr a3, excvaddr
s32i a2, a1, PT_SYSCALL s32i a2, a1, PT_SYSCALL
movi a2, 0 movi a2, 0
...@@ -1022,25 +1022,6 @@ ENDPROC(fast_alloca) ...@@ -1022,25 +1022,6 @@ ENDPROC(fast_alloca)
* excsave_1: dispatch table * excsave_1: dispatch table
*/ */
ENTRY(fast_syscall_kernel)
/* Skip syscall. */
rsr a0, epc1
addi a0, a0, 3
wsr a0, epc1
l32i a0, a2, PT_DEPC
bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
rsr a0, depc # get syscall-nr
_beqz a0, fast_syscall_spill_registers
_beqi a0, __NR_xtensa, fast_syscall_xtensa
j kernel_exception
ENDPROC(fast_syscall_kernel)
ENTRY(fast_syscall_user) ENTRY(fast_syscall_user)
/* Skip syscall. */ /* Skip syscall. */
...@@ -1865,20 +1846,28 @@ ENTRY(system_call) ...@@ -1865,20 +1846,28 @@ ENTRY(system_call)
/* regs->syscall = regs->areg[2] */ /* regs->syscall = regs->areg[2] */
l32i a3, a2, PT_AREG2 l32i a7, a2, PT_AREG2
s32i a7, a2, PT_SYSCALL
GET_THREAD_INFO(a4, a1)
l32i a3, a4, TI_FLAGS
movi a4, _TIF_WORK_MASK
and a3, a3, a4
beqz a3, 1f
mov a6, a2 mov a6, a2
s32i a3, a2, PT_SYSCALL
call4 do_syscall_trace_enter call4 do_syscall_trace_enter
mov a3, a6 l32i a7, a2, PT_SYSCALL
1:
/* syscall = sys_call_table[syscall_nr] */ /* syscall = sys_call_table[syscall_nr] */
movi a4, sys_call_table movi a4, sys_call_table
movi a5, __NR_syscall_count movi a5, __NR_syscalls
movi a6, -ENOSYS movi a6, -ENOSYS
bgeu a3, a5, 1f bgeu a7, a5, 1f
addx4 a4, a3, a4 addx4 a4, a7, a4
l32i a4, a4, 0 l32i a4, a4, 0
movi a5, sys_ni_syscall; movi a5, sys_ni_syscall;
beq a4, a5, 1f beq a4, a5, 1f
...@@ -1900,6 +1889,10 @@ ENTRY(system_call) ...@@ -1900,6 +1889,10 @@ ENTRY(system_call)
1: /* regs->areg[2] = return_value */ 1: /* regs->areg[2] = return_value */
s32i a6, a2, PT_AREG2 s32i a6, a2, PT_AREG2
bnez a3, 1f
retw
1:
mov a6, a2 mov a6, a2
call4 do_syscall_trace_leave call4 do_syscall_trace_leave
retw retw
......
...@@ -59,10 +59,6 @@ ENTRY(_start) ...@@ -59,10 +59,6 @@ ENTRY(_start)
.align 4 .align 4
.literal_position .literal_position
.Lstartup:
.word _startup
.align 4
_SetupOCD: _SetupOCD:
/* /*
* Initialize WB, WS, and clear PS.EXCM (to allow loop instructions). * Initialize WB, WS, and clear PS.EXCM (to allow loop instructions).
...@@ -99,12 +95,12 @@ _SetupMMU: ...@@ -99,12 +95,12 @@ _SetupMMU:
1: 1:
#endif #endif
#endif #endif
.end no-absolute-literals
l32r a0, .Lstartup movi a0, _startup
jx a0 jx a0
ENDPROC(_start) ENDPROC(_start)
.end no-absolute-literals
__REF __REF
.literal_position .literal_position
......
...@@ -101,30 +101,30 @@ static void xtensa_wsr(unsigned long v, u8 sr) ...@@ -101,30 +101,30 @@ static void xtensa_wsr(unsigned long v, u8 sr)
switch (sr) { switch (sr) {
#if XCHAL_NUM_IBREAK > 0 #if XCHAL_NUM_IBREAK > 0
case SREG_IBREAKA + 0: case SREG_IBREAKA + 0:
WSR(v, SREG_IBREAKA + 0); xtensa_set_sr(v, SREG_IBREAKA + 0);
break; break;
#endif #endif
#if XCHAL_NUM_IBREAK > 1 #if XCHAL_NUM_IBREAK > 1
case SREG_IBREAKA + 1: case SREG_IBREAKA + 1:
WSR(v, SREG_IBREAKA + 1); xtensa_set_sr(v, SREG_IBREAKA + 1);
break; break;
#endif #endif
#if XCHAL_NUM_DBREAK > 0 #if XCHAL_NUM_DBREAK > 0
case SREG_DBREAKA + 0: case SREG_DBREAKA + 0:
WSR(v, SREG_DBREAKA + 0); xtensa_set_sr(v, SREG_DBREAKA + 0);
break; break;
case SREG_DBREAKC + 0: case SREG_DBREAKC + 0:
WSR(v, SREG_DBREAKC + 0); xtensa_set_sr(v, SREG_DBREAKC + 0);
break; break;
#endif #endif
#if XCHAL_NUM_DBREAK > 1 #if XCHAL_NUM_DBREAK > 1
case SREG_DBREAKA + 1: case SREG_DBREAKA + 1:
WSR(v, SREG_DBREAKA + 1); xtensa_set_sr(v, SREG_DBREAKA + 1);
break; break;
case SREG_DBREAKC + 1: case SREG_DBREAKC + 1:
WSR(v, SREG_DBREAKC + 1); xtensa_set_sr(v, SREG_DBREAKC + 1);
break; break;
#endif #endif
} }
...@@ -150,8 +150,8 @@ static void set_ibreak_regs(int reg, struct perf_event *bp) ...@@ -150,8 +150,8 @@ static void set_ibreak_regs(int reg, struct perf_event *bp)
unsigned long ibreakenable; unsigned long ibreakenable;
xtensa_wsr(info->address, SREG_IBREAKA + reg); xtensa_wsr(info->address, SREG_IBREAKA + reg);
RSR(ibreakenable, SREG_IBREAKENABLE); ibreakenable = xtensa_get_sr(SREG_IBREAKENABLE);
WSR(ibreakenable | (1 << reg), SREG_IBREAKENABLE); xtensa_set_sr(ibreakenable | (1 << reg), SREG_IBREAKENABLE);
} }
static void set_dbreak_regs(int reg, struct perf_event *bp) static void set_dbreak_regs(int reg, struct perf_event *bp)
...@@ -214,8 +214,9 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp) ...@@ -214,8 +214,9 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
/* Breakpoint */ /* Breakpoint */
i = free_slot(this_cpu_ptr(bp_on_reg), XCHAL_NUM_IBREAK, bp); i = free_slot(this_cpu_ptr(bp_on_reg), XCHAL_NUM_IBREAK, bp);
if (i >= 0) { if (i >= 0) {
RSR(ibreakenable, SREG_IBREAKENABLE); ibreakenable = xtensa_get_sr(SREG_IBREAKENABLE);
WSR(ibreakenable & ~(1 << i), SREG_IBREAKENABLE); xtensa_set_sr(ibreakenable & ~(1 << i),
SREG_IBREAKENABLE);
} }
} else { } else {
/* Watchpoint */ /* Watchpoint */
......
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Cadence Design Systems Inc.
#include <linux/cpu.h>
#include <linux/jump_label.h>
#include <linux/kernel.h>
#include <linux/memory.h>
#include <linux/stop_machine.h>
#include <linux/types.h>
#include <asm/cacheflush.h>
#ifdef HAVE_JUMP_LABEL
#define J_OFFSET_MASK 0x0003ffff
#define J_SIGN_MASK (~(J_OFFSET_MASK >> 1))
#if defined(__XTENSA_EL__)
#define J_INSN 0x6
#define NOP_INSN 0x0020f0
#elif defined(__XTENSA_EB__)
#define J_INSN 0x60000000
#define NOP_INSN 0x0f020000
#else
#error Unsupported endianness.
#endif
struct patch {
atomic_t cpu_count;
unsigned long addr;
size_t sz;
const void *data;
};
static void local_patch_text(unsigned long addr, const void *data, size_t sz)
{
memcpy((void *)addr, data, sz);
local_flush_icache_range(addr, addr + sz);
}
static int patch_text_stop_machine(void *data)
{
struct patch *patch = data;
if (atomic_inc_return(&patch->cpu_count) == 1) {
local_patch_text(patch->addr, patch->data, patch->sz);
atomic_inc(&patch->cpu_count);
} else {
while (atomic_read(&patch->cpu_count) <= num_online_cpus())
cpu_relax();
__invalidate_icache_range(patch->addr, patch->sz);
}
return 0;
}
static void patch_text(unsigned long addr, const void *data, size_t sz)
{
if (IS_ENABLED(CONFIG_SMP)) {
struct patch patch = {
.cpu_count = ATOMIC_INIT(0),
.addr = addr,
.sz = sz,
.data = data,
};
stop_machine_cpuslocked(patch_text_stop_machine,
&patch, NULL);
} else {
unsigned long flags;
local_irq_save(flags);
local_patch_text(addr, data, sz);
local_irq_restore(flags);
}
}
void arch_jump_label_transform(struct jump_entry *e,
enum jump_label_type type)
{
u32 d = (jump_entry_target(e) - (jump_entry_code(e) + 4));
u32 insn;
/* Jump only works within 128K of the J instruction. */
BUG_ON(!((d & J_SIGN_MASK) == 0 ||
(d & J_SIGN_MASK) == J_SIGN_MASK));
if (type == JUMP_LABEL_JMP) {
#if defined(__XTENSA_EL__)
insn = ((d & J_OFFSET_MASK) << 6) | J_INSN;
#elif defined(__XTENSA_EB__)
insn = ((d & J_OFFSET_MASK) << 8) | J_INSN;
#endif
} else {
insn = NOP_INSN;
}
patch_text(jump_entry_code(e), &insn, JUMP_LABEL_NOP_SIZE);
}
#endif /* HAVE_JUMP_LABEL */
...@@ -87,7 +87,8 @@ void coprocessor_release_all(struct thread_info *ti) ...@@ -87,7 +87,8 @@ void coprocessor_release_all(struct thread_info *ti)
} }
ti->cpenable = cpenable; ti->cpenable = cpenable;
coprocessor_clear_cpenable(); if (ti == current_thread_info())
xtensa_set_sr(0, cpenable);
preempt_enable(); preempt_enable();
} }
...@@ -99,16 +100,16 @@ void coprocessor_flush_all(struct thread_info *ti) ...@@ -99,16 +100,16 @@ void coprocessor_flush_all(struct thread_info *ti)
preempt_disable(); preempt_disable();
RSR_CPENABLE(old_cpenable); old_cpenable = xtensa_get_sr(cpenable);
cpenable = ti->cpenable; cpenable = ti->cpenable;
WSR_CPENABLE(cpenable); xtensa_set_sr(cpenable, cpenable);
for (i = 0; i < XCHAL_CP_MAX; i++) { for (i = 0; i < XCHAL_CP_MAX; i++) {
if ((cpenable & 1) != 0 && coprocessor_owner[i] == ti) if ((cpenable & 1) != 0 && coprocessor_owner[i] == ti)
coprocessor_flush(ti, i); coprocessor_flush(ti, i);
cpenable >>= 1; cpenable >>= 1;
} }
WSR_CPENABLE(old_cpenable); xtensa_set_sr(old_cpenable, cpenable);
preempt_enable(); preempt_enable();
} }
...@@ -325,49 +326,3 @@ unsigned long get_wchan(struct task_struct *p) ...@@ -325,49 +326,3 @@ unsigned long get_wchan(struct task_struct *p)
} while (count++ < 16); } while (count++ < 16);
return 0; return 0;
} }
/*
* xtensa_gregset_t and 'struct pt_regs' are vastly different formats
* of processor registers. Besides different ordering,
* xtensa_gregset_t contains non-live register information that
* 'struct pt_regs' does not. Exception handling (primarily) uses
* 'struct pt_regs'. Core files and ptrace use xtensa_gregset_t.
*
*/
void xtensa_elf_core_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs)
{
unsigned long wb, ws, wm;
int live, last;
wb = regs->windowbase;
ws = regs->windowstart;
wm = regs->wmask;
ws = ((ws >> wb) | (ws << (WSBITS - wb))) & ((1 << WSBITS) - 1);
/* Don't leak any random bits. */
memset(elfregs, 0, sizeof(*elfregs));
/* Note: PS.EXCM is not set while user task is running; its
* being set in regs->ps is for exception handling convenience.
*/
elfregs->pc = regs->pc;
elfregs->ps = (regs->ps & ~(1 << PS_EXCM_BIT));
elfregs->lbeg = regs->lbeg;
elfregs->lend = regs->lend;
elfregs->lcount = regs->lcount;
elfregs->sar = regs->sar;
elfregs->windowstart = ws;
live = (wm & 2) ? 4 : (wm & 4) ? 8 : (wm & 8) ? 12 : 16;
last = XCHAL_NUM_AREGS - (wm >> 4) * 4;
memcpy(elfregs->a, regs->areg, live * 4);
memcpy(elfregs->a + last, regs->areg + last, (wm >> 4) * 16);
}
int dump_fpu(void)
{
return 0;
}
This diff is collapsed.
...@@ -318,9 +318,9 @@ static inline int mem_reserve(unsigned long start, unsigned long end) ...@@ -318,9 +318,9 @@ static inline int mem_reserve(unsigned long start, unsigned long end)
void __init setup_arch(char **cmdline_p) void __init setup_arch(char **cmdline_p)
{ {
pr_info("config ID: %08x:%08x\n", pr_info("config ID: %08x:%08x\n",
get_sr(SREG_EPC), get_sr(SREG_EXCSAVE)); xtensa_get_sr(SREG_EPC), xtensa_get_sr(SREG_EXCSAVE));
if (get_sr(SREG_EPC) != XCHAL_HW_CONFIGID0 || if (xtensa_get_sr(SREG_EPC) != XCHAL_HW_CONFIGID0 ||
get_sr(SREG_EXCSAVE) != XCHAL_HW_CONFIGID1) xtensa_get_sr(SREG_EXCSAVE) != XCHAL_HW_CONFIGID1)
pr_info("built for config ID: %08x:%08x\n", pr_info("built for config ID: %08x:%08x\n",
XCHAL_HW_CONFIGID0, XCHAL_HW_CONFIGID1); XCHAL_HW_CONFIGID0, XCHAL_HW_CONFIGID1);
...@@ -596,7 +596,7 @@ c_show(struct seq_file *f, void *slot) ...@@ -596,7 +596,7 @@ c_show(struct seq_file *f, void *slot)
num_online_cpus(), num_online_cpus(),
cpumask_pr_args(cpu_online_mask), cpumask_pr_args(cpu_online_mask),
XCHAL_BUILD_UNIQUE_ID, XCHAL_BUILD_UNIQUE_ID,
get_sr(SREG_EPC), get_sr(SREG_EXCSAVE), xtensa_get_sr(SREG_EPC), xtensa_get_sr(SREG_EXCSAVE),
XCHAL_HAVE_BE ? "big" : "little", XCHAL_HAVE_BE ? "big" : "little",
ccount_freq/1000000, ccount_freq/1000000,
(ccount_freq/10000) % 100, (ccount_freq/10000) % 100,
......
...@@ -185,13 +185,13 @@ restore_sigcontext(struct pt_regs *regs, struct rt_sigframe __user *frame) ...@@ -185,13 +185,13 @@ restore_sigcontext(struct pt_regs *regs, struct rt_sigframe __user *frame)
COPY(sar); COPY(sar);
#undef COPY #undef COPY
/* All registers were flushed to stack. Start with a prestine frame. */ /* All registers were flushed to stack. Start with a pristine frame. */
regs->wmask = 1; regs->wmask = 1;
regs->windowbase = 0; regs->windowbase = 0;
regs->windowstart = 1; regs->windowstart = 1;
regs->syscall = -1; /* disable syscall checks */ regs->syscall = NO_SYSCALL; /* disable syscall checks */
/* For PS, restore only PS.CALLINC. /* For PS, restore only PS.CALLINC.
* Assume that all other bits are either the same as for the signal * Assume that all other bits are either the same as for the signal
...@@ -423,7 +423,7 @@ static void do_signal(struct pt_regs *regs) ...@@ -423,7 +423,7 @@ static void do_signal(struct pt_regs *regs)
/* Are we from a system call? */ /* Are we from a system call? */
if ((signed)regs->syscall >= 0) { if (regs->syscall != NO_SYSCALL) {
/* If so, check system call restarting.. */ /* If so, check system call restarting.. */
...@@ -462,7 +462,7 @@ static void do_signal(struct pt_regs *regs) ...@@ -462,7 +462,7 @@ static void do_signal(struct pt_regs *regs)
} }
/* Did we come from a system call? */ /* Did we come from a system call? */
if ((signed) regs->syscall >= 0) { if (regs->syscall != NO_SYSCALL) {
/* Restart the system call - no handlers present */ /* Restart the system call - no handlers present */
switch (regs->areg[2]) { switch (regs->areg[2]) {
case -ERESTARTNOHAND: case -ERESTARTNOHAND:
......
...@@ -28,13 +28,12 @@ ...@@ -28,13 +28,12 @@
#include <linux/sched/mm.h> #include <linux/sched/mm.h>
#include <linux/shm.h> #include <linux/shm.h>
typedef void (*syscall_t)(void); syscall_t sys_call_table[__NR_syscalls] /* FIXME __cacheline_aligned */= {
[0 ... __NR_syscalls - 1] = (syscall_t)&sys_ni_syscall,
syscall_t sys_call_table[__NR_syscall_count] /* FIXME __cacheline_aligned */= { #define __SYSCALL(nr, entry, nargs)[nr] = (syscall_t)entry,
[0 ... __NR_syscall_count - 1] = (syscall_t)&sys_ni_syscall, #include <asm/syscall_table.h>
#undef __SYSCALL
#define __SYSCALL(nr,symbol,nargs) [ nr ] = (syscall_t)symbol,
#include <uapi/asm/unistd.h>
}; };
#define COLOUR_ALIGN(addr, pgoff) \ #define COLOUR_ALIGN(addr, pgoff) \
......
# SPDX-License-Identifier: GPL-2.0
kapi := arch/$(SRCARCH)/include/generated/asm
uapi := arch/$(SRCARCH)/include/generated/uapi/asm
_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \
$(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
syscall := $(srctree)/$(src)/syscall.tbl
syshdr := $(srctree)/$(src)/syscallhdr.sh
systbl := $(srctree)/$(src)/syscalltbl.sh
quiet_cmd_syshdr = SYSHDR $@
cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \
'$(syshdr_abis_$(basetarget))' \
'$(syshdr_pfx_$(basetarget))' \
'$(syshdr_offset_$(basetarget))'
quiet_cmd_systbl = SYSTBL $@
cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \
'$(systbl_abis_$(basetarget))' \
'$(systbl_abi_$(basetarget))' \
'$(systbl_offset_$(basetarget))'
$(uapi)/unistd_32.h: $(syscall) $(syshdr)
$(call if_changed,syshdr)
$(kapi)/syscall_table.h: $(syscall) $(systbl)
$(call if_changed,systbl)
uapisyshdr-y += unistd_32.h
kapisyshdr-y += syscall_table.h
targets += $(uapisyshdr-y) $(kapisyshdr-y)
PHONY += all
all: $(addprefix $(uapi)/,$(uapisyshdr-y))
all: $(addprefix $(kapi)/,$(kapisyshdr-y))
@:
This diff is collapsed.
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
in="$1"
out="$2"
my_abis=`echo "($3)" | tr ',' '|'`
prefix="$4"
offset="$5"
fileguard=_UAPI_ASM_XTENSA_`basename "$out" | sed \
-e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
-e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
printf "#ifndef %s\n" "${fileguard}"
printf "#define %s\n" "${fileguard}"
printf "\n"
nxt=0
while read nr abi name entry ; do
if [ -z "$offset" ]; then
printf "#define __NR_%s%s\t%s\n" \
"${prefix}" "${name}" "${nr}"
else
printf "#define __NR_%s%s\t(%s + %s)\n" \
"${prefix}" "${name}" "${offset}" "${nr}"
fi
nxt=$((nr+1))
done
printf "\n"
printf "#ifdef __KERNEL__\n"
printf "#define __NR_syscalls\t%s\n" "${nxt}"
printf "#endif\n"
printf "\n"
printf "#endif /* %s */" "${fileguard}"
) > "$out"
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
in="$1"
out="$2"
my_abis=`echo "($3)" | tr ',' '|'`
my_abi="$4"
offset="$5"
emit() {
t_nxt="$1"
t_nr="$2"
t_entry="$3"
while [ $t_nxt -lt $t_nr ]; do
printf "__SYSCALL(%s, sys_ni_syscall, )\n" "${t_nxt}"
t_nxt=$((t_nxt+1))
done
printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}"
}
grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
nxt=0
if [ -z "$offset" ]; then
offset=0
fi
while read nr abi name entry ; do
emit $((nxt+offset)) $((nr+offset)) $entry
nxt=$((nr+1))
done
) > "$out"
...@@ -51,7 +51,6 @@ ...@@ -51,7 +51,6 @@
extern void kernel_exception(void); extern void kernel_exception(void);
extern void user_exception(void); extern void user_exception(void);
extern void fast_syscall_kernel(void);
extern void fast_syscall_user(void); extern void fast_syscall_user(void);
extern void fast_alloca(void); extern void fast_alloca(void);
extern void fast_unaligned(void); extern void fast_unaligned(void);
...@@ -89,7 +88,6 @@ typedef struct { ...@@ -89,7 +88,6 @@ typedef struct {
static dispatch_init_table_t __initdata dispatch_init_table[] = { static dispatch_init_table_t __initdata dispatch_init_table[] = {
{ EXCCAUSE_ILLEGAL_INSTRUCTION, 0, do_illegal_instruction}, { EXCCAUSE_ILLEGAL_INSTRUCTION, 0, do_illegal_instruction},
{ EXCCAUSE_SYSTEM_CALL, KRNL, fast_syscall_kernel },
{ EXCCAUSE_SYSTEM_CALL, USER, fast_syscall_user }, { EXCCAUSE_SYSTEM_CALL, USER, fast_syscall_user },
{ EXCCAUSE_SYSTEM_CALL, 0, system_call }, { EXCCAUSE_SYSTEM_CALL, 0, system_call },
/* EXCCAUSE_INSTRUCTION_FETCH unhandled */ /* EXCCAUSE_INSTRUCTION_FETCH unhandled */
...@@ -215,8 +213,8 @@ extern void do_IRQ(int, struct pt_regs *); ...@@ -215,8 +213,8 @@ extern void do_IRQ(int, struct pt_regs *);
static inline void check_valid_nmi(void) static inline void check_valid_nmi(void)
{ {
unsigned intread = get_sr(interrupt); unsigned intread = xtensa_get_sr(interrupt);
unsigned intenable = get_sr(intenable); unsigned intenable = xtensa_get_sr(intenable);
BUG_ON(intread & intenable & BUG_ON(intread & intenable &
~(XTENSA_INTLEVEL_ANDBELOW_MASK(PROFILING_INTLEVEL) ^ ~(XTENSA_INTLEVEL_ANDBELOW_MASK(PROFILING_INTLEVEL) ^
...@@ -273,8 +271,8 @@ void do_interrupt(struct pt_regs *regs) ...@@ -273,8 +271,8 @@ void do_interrupt(struct pt_regs *regs)
irq_enter(); irq_enter();
for (;;) { for (;;) {
unsigned intread = get_sr(interrupt); unsigned intread = xtensa_get_sr(interrupt);
unsigned intenable = get_sr(intenable); unsigned intenable = xtensa_get_sr(intenable);
unsigned int_at_level = intread & intenable; unsigned int_at_level = intread & intenable;
unsigned level; unsigned level;
......
...@@ -60,6 +60,9 @@ void __init bootmem_init(void) ...@@ -60,6 +60,9 @@ void __init bootmem_init(void)
max_pfn = PFN_DOWN(memblock_end_of_DRAM()); max_pfn = PFN_DOWN(memblock_end_of_DRAM());
max_low_pfn = min(max_pfn, MAX_LOW_PFN); max_low_pfn = min(max_pfn, MAX_LOW_PFN);
early_memtest((phys_addr_t)min_low_pfn << PAGE_SHIFT,
(phys_addr_t)max_low_pfn << PAGE_SHIFT);
memblock_set_current_limit(PFN_PHYS(max_low_pfn)); memblock_set_current_limit(PFN_PHYS(max_low_pfn));
dma_contiguous_reserve(PFN_PHYS(max_low_pfn)); dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
......
...@@ -62,7 +62,7 @@ void secondary_init_irq(void) ...@@ -62,7 +62,7 @@ void secondary_init_irq(void)
__this_cpu_write(cached_irq_mask, __this_cpu_write(cached_irq_mask,
XCHAL_INTTYPE_MASK_EXTERN_EDGE | XCHAL_INTTYPE_MASK_EXTERN_EDGE |
XCHAL_INTTYPE_MASK_EXTERN_LEVEL); XCHAL_INTTYPE_MASK_EXTERN_LEVEL);
set_sr(XCHAL_INTTYPE_MASK_EXTERN_EDGE | xtensa_set_sr(XCHAL_INTTYPE_MASK_EXTERN_EDGE |
XCHAL_INTTYPE_MASK_EXTERN_LEVEL, intenable); XCHAL_INTTYPE_MASK_EXTERN_LEVEL, intenable);
} }
...@@ -77,7 +77,7 @@ static void xtensa_mx_irq_mask(struct irq_data *d) ...@@ -77,7 +77,7 @@ static void xtensa_mx_irq_mask(struct irq_data *d)
} else { } else {
mask = __this_cpu_read(cached_irq_mask) & ~mask; mask = __this_cpu_read(cached_irq_mask) & ~mask;
__this_cpu_write(cached_irq_mask, mask); __this_cpu_write(cached_irq_mask, mask);
set_sr(mask, intenable); xtensa_set_sr(mask, intenable);
} }
} }
...@@ -92,7 +92,7 @@ static void xtensa_mx_irq_unmask(struct irq_data *d) ...@@ -92,7 +92,7 @@ static void xtensa_mx_irq_unmask(struct irq_data *d)
} else { } else {
mask |= __this_cpu_read(cached_irq_mask); mask |= __this_cpu_read(cached_irq_mask);
__this_cpu_write(cached_irq_mask, mask); __this_cpu_write(cached_irq_mask, mask);
set_sr(mask, intenable); xtensa_set_sr(mask, intenable);
} }
} }
...@@ -108,12 +108,12 @@ static void xtensa_mx_irq_disable(struct irq_data *d) ...@@ -108,12 +108,12 @@ static void xtensa_mx_irq_disable(struct irq_data *d)
static void xtensa_mx_irq_ack(struct irq_data *d) static void xtensa_mx_irq_ack(struct irq_data *d)
{ {
set_sr(1 << d->hwirq, intclear); xtensa_set_sr(1 << d->hwirq, intclear);
} }
static int xtensa_mx_irq_retrigger(struct irq_data *d) static int xtensa_mx_irq_retrigger(struct irq_data *d)
{ {
set_sr(1 << d->hwirq, intset); xtensa_set_sr(1 << d->hwirq, intset);
return 1; return 1;
} }
......
...@@ -44,13 +44,13 @@ static const struct irq_domain_ops xtensa_irq_domain_ops = { ...@@ -44,13 +44,13 @@ static const struct irq_domain_ops xtensa_irq_domain_ops = {
static void xtensa_irq_mask(struct irq_data *d) static void xtensa_irq_mask(struct irq_data *d)
{ {
cached_irq_mask &= ~(1 << d->hwirq); cached_irq_mask &= ~(1 << d->hwirq);
set_sr(cached_irq_mask, intenable); xtensa_set_sr(cached_irq_mask, intenable);
} }
static void xtensa_irq_unmask(struct irq_data *d) static void xtensa_irq_unmask(struct irq_data *d)
{ {
cached_irq_mask |= 1 << d->hwirq; cached_irq_mask |= 1 << d->hwirq;
set_sr(cached_irq_mask, intenable); xtensa_set_sr(cached_irq_mask, intenable);
} }
static void xtensa_irq_enable(struct irq_data *d) static void xtensa_irq_enable(struct irq_data *d)
...@@ -65,12 +65,12 @@ static void xtensa_irq_disable(struct irq_data *d) ...@@ -65,12 +65,12 @@ static void xtensa_irq_disable(struct irq_data *d)
static void xtensa_irq_ack(struct irq_data *d) static void xtensa_irq_ack(struct irq_data *d)
{ {
set_sr(1 << d->hwirq, intclear); xtensa_set_sr(1 << d->hwirq, intclear);
} }
static int xtensa_irq_retrigger(struct irq_data *d) static int xtensa_irq_retrigger(struct irq_data *d)
{ {
set_sr(1 << d->hwirq, intset); xtensa_set_sr(1 << d->hwirq, intset);
return 1; return 1;
} }
......
...@@ -411,6 +411,7 @@ enum { ...@@ -411,6 +411,7 @@ enum {
#define AUDIT_ARCH_TILEGX32 (EM_TILEGX|__AUDIT_ARCH_LE) #define AUDIT_ARCH_TILEGX32 (EM_TILEGX|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_TILEPRO (EM_TILEPRO|__AUDIT_ARCH_LE) #define AUDIT_ARCH_TILEPRO (EM_TILEPRO|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) #define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_XTENSA (EM_XTENSA)
#define AUDIT_PERM_EXEC 1 #define AUDIT_PERM_EXEC 1
#define AUDIT_PERM_WRITE 2 #define AUDIT_PERM_WRITE 2
......
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#define EM_M32R 88 /* Renesas M32R */ #define EM_M32R 88 /* Renesas M32R */
#define EM_MN10300 89 /* Panasonic/MEI MN10300, AM33 */ #define EM_MN10300 89 /* Panasonic/MEI MN10300, AM33 */
#define EM_OPENRISC 92 /* OpenRISC 32-bit embedded processor */ #define EM_OPENRISC 92 /* OpenRISC 32-bit embedded processor */
#define EM_XTENSA 94 /* Tensilica Xtensa Architecture */
#define EM_BLACKFIN 106 /* ADI Blackfin Processor */ #define EM_BLACKFIN 106 /* ADI Blackfin Processor */
#define EM_ALTERA_NIOS2 113 /* Altera Nios II soft-core processor */ #define EM_ALTERA_NIOS2 113 /* Altera Nios II soft-core processor */
#define EM_TI_C6000 140 /* TI C6X DSPs */ #define EM_TI_C6000 140 /* TI C6X DSPs */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment