Commit 38fef73c authored by Max Filippov's avatar Max Filippov

xtensa: implement fake NMI

In case perf IRQ is the highest of the medium-level IRQs, and is alone
on its level, it may be treated as NMI:
- LOCKLEVEL is defined to be one level less than EXCM level,
- IRQ masking never lowers current IRQ level,
- new fake exception cause code, EXCCAUSE_MAPPED_NMI is assigned to that
  IRQ; new second level exception handler, do_nmi, assigned to it
  handles it as NMI,
- atomic operations in configurations without s32c1i still need to mask
  all interrupts.

Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarMax Filippov <jcmvbkbc@gmail.com>
parent 98e29832
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
* *
* Locking interrupts looks like this: * Locking interrupts looks like this:
* *
* rsil a15, LOCKLEVEL * rsil a15, TOPLEVEL
* <code> * <code>
* wsr a15, PS * wsr a15, PS
* rsync * rsync
...@@ -106,7 +106,7 @@ static inline void atomic_##op(int i, atomic_t * v) \ ...@@ -106,7 +106,7 @@ static inline void atomic_##op(int i, atomic_t * v) \
unsigned int vval; \ unsigned int vval; \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
" rsil a15, "__stringify(LOCKLEVEL)"\n"\ " rsil a15, "__stringify(TOPLEVEL)"\n"\
" l32i %0, %2, 0\n" \ " l32i %0, %2, 0\n" \
" " #op " %0, %0, %1\n" \ " " #op " %0, %0, %1\n" \
" s32i %0, %2, 0\n" \ " s32i %0, %2, 0\n" \
...@@ -124,7 +124,7 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \ ...@@ -124,7 +124,7 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
unsigned int vval; \ unsigned int vval; \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
" rsil a15,"__stringify(LOCKLEVEL)"\n" \ " rsil a15,"__stringify(TOPLEVEL)"\n" \
" l32i %0, %2, 0\n" \ " l32i %0, %2, 0\n" \
" " #op " %0, %0, %1\n" \ " " #op " %0, %0, %1\n" \
" s32i %0, %2, 0\n" \ " s32i %0, %2, 0\n" \
...@@ -272,7 +272,7 @@ static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) ...@@ -272,7 +272,7 @@ static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
unsigned int vval; unsigned int vval;
__asm__ __volatile__( __asm__ __volatile__(
" rsil a15,"__stringify(LOCKLEVEL)"\n" " rsil a15,"__stringify(TOPLEVEL)"\n"
" l32i %0, %2, 0\n" " l32i %0, %2, 0\n"
" xor %1, %4, %3\n" " xor %1, %4, %3\n"
" and %0, %0, %4\n" " and %0, %0, %4\n"
...@@ -306,7 +306,7 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v) ...@@ -306,7 +306,7 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
unsigned int vval; unsigned int vval;
__asm__ __volatile__( __asm__ __volatile__(
" rsil a15,"__stringify(LOCKLEVEL)"\n" " rsil a15,"__stringify(TOPLEVEL)"\n"
" l32i %0, %2, 0\n" " l32i %0, %2, 0\n"
" or %0, %0, %1\n" " or %0, %0, %1\n"
" s32i %0, %2, 0\n" " s32i %0, %2, 0\n"
......
...@@ -34,7 +34,7 @@ __cmpxchg_u32(volatile int *p, int old, int new) ...@@ -34,7 +34,7 @@ __cmpxchg_u32(volatile int *p, int old, int new)
return new; return new;
#else #else
__asm__ __volatile__( __asm__ __volatile__(
" rsil a15, "__stringify(LOCKLEVEL)"\n" " rsil a15, "__stringify(TOPLEVEL)"\n"
" l32i %0, %1, 0\n" " l32i %0, %1, 0\n"
" bne %0, %2, 1f\n" " bne %0, %2, 1f\n"
" s32i %3, %1, 0\n" " s32i %3, %1, 0\n"
...@@ -123,7 +123,7 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val) ...@@ -123,7 +123,7 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
#else #else
unsigned long tmp; unsigned long tmp;
__asm__ __volatile__( __asm__ __volatile__(
" rsil a15, "__stringify(LOCKLEVEL)"\n" " rsil a15, "__stringify(TOPLEVEL)"\n"
" l32i %0, %1, 0\n" " l32i %0, %1, 0\n"
" s32i %2, %1, 0\n" " s32i %2, %1, 0\n"
" wsr a15, ps\n" " wsr a15, ps\n"
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
* for more details. * for more details.
* *
* Copyright (C) 2001 - 2005 Tensilica Inc. * Copyright (C) 2001 - 2005 Tensilica Inc.
* Copyright (C) 2015 Cadence Design Systems Inc.
*/ */
#ifndef _XTENSA_IRQFLAGS_H #ifndef _XTENSA_IRQFLAGS_H
...@@ -23,8 +24,27 @@ static inline unsigned long arch_local_save_flags(void) ...@@ -23,8 +24,27 @@ static inline unsigned long arch_local_save_flags(void)
static inline unsigned long arch_local_irq_save(void) static inline unsigned long arch_local_irq_save(void)
{ {
unsigned long flags; unsigned long flags;
#if XTENSA_FAKE_NMI
#if defined(CONFIG_DEBUG_KERNEL) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL
unsigned long tmp;
asm volatile("rsr %0, ps\t\n"
"extui %1, %0, 0, 4\t\n"
"bgei %1, "__stringify(LOCKLEVEL)", 1f\t\n"
"rsil %0, "__stringify(LOCKLEVEL)"\n"
"1:"
: "=a" (flags), "=a" (tmp) :: "memory");
#else
asm volatile("rsr %0, ps\t\n"
"or %0, %0, %1\t\n"
"xsr %0, ps\t\n"
"rsync"
: "=&a" (flags) : "a" (LOCKLEVEL) : "memory");
#endif
#else
asm volatile("rsil %0, "__stringify(LOCKLEVEL) asm volatile("rsil %0, "__stringify(LOCKLEVEL)
: "=a" (flags) :: "memory"); : "=a" (flags) :: "memory");
#endif
return flags; return flags;
} }
......
/* /*
* include/asm-xtensa/processor.h
*
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2001 - 2008 Tensilica Inc. * Copyright (C) 2001 - 2008 Tensilica Inc.
* Copyright (C) 2015 Cadence Design Systems Inc.
*/ */
#ifndef _XTENSA_PROCESSOR_H #ifndef _XTENSA_PROCESSOR_H
...@@ -44,6 +43,14 @@ ...@@ -44,6 +43,14 @@
#define STACK_TOP TASK_SIZE #define STACK_TOP TASK_SIZE
#define STACK_TOP_MAX STACK_TOP #define STACK_TOP_MAX STACK_TOP
/*
* General exception cause assigned to fake NMI. Fake NMI needs to be handled
* differently from other interrupts, but it uses common kernel entry/exit
* code.
*/
#define EXCCAUSE_MAPPED_NMI 62
/* /*
* General exception cause assigned to debug exceptions. Debug exceptions go * General exception cause assigned to debug exceptions. Debug exceptions go
* to their own vector, rather than the general exception vectors (user, * to their own vector, rather than the general exception vectors (user,
...@@ -65,10 +72,30 @@ ...@@ -65,10 +72,30 @@
#define VALID_DOUBLE_EXCEPTION_ADDRESS 64 #define VALID_DOUBLE_EXCEPTION_ADDRESS 64
#define XTENSA_INT_LEVEL(intno) _XTENSA_INT_LEVEL(intno)
#define _XTENSA_INT_LEVEL(intno) XCHAL_INT##intno##_LEVEL
#define XTENSA_INTLEVEL_MASK(level) _XTENSA_INTLEVEL_MASK(level)
#define _XTENSA_INTLEVEL_MASK(level) (XCHAL_INTLEVEL##level##_MASK)
#define IS_POW2(v) (((v) & ((v) - 1)) == 0)
#define PROFILING_INTLEVEL XTENSA_INT_LEVEL(XCHAL_PROFILING_INTERRUPT)
/* LOCKLEVEL defines the interrupt level that masks all /* LOCKLEVEL defines the interrupt level that masks all
* general-purpose interrupts. * general-purpose interrupts.
*/ */
#if defined(CONFIG_XTENSA_VARIANT_HAVE_PERF_EVENTS) && \
defined(XCHAL_PROFILING_INTERRUPT) && \
PROFILING_INTLEVEL == XCHAL_EXCM_LEVEL && \
XCHAL_EXCM_LEVEL > 1 && \
IS_POW2(XTENSA_INTLEVEL_MASK(PROFILING_INTLEVEL))
#define LOCKLEVEL (XCHAL_EXCM_LEVEL - 1)
#else
#define LOCKLEVEL XCHAL_EXCM_LEVEL #define LOCKLEVEL XCHAL_EXCM_LEVEL
#endif
#define TOPLEVEL XCHAL_EXCM_LEVEL
#define XTENSA_FAKE_NMI (LOCKLEVEL < TOPLEVEL)
/* WSBITS and WBBITS are the width of the WINDOWSTART and WINDOWBASE /* WSBITS and WBBITS are the width of the WINDOWSTART and WINDOWBASE
* registers * registers
......
/* /*
* arch/xtensa/kernel/entry.S
*
* Low-level exception handling * Low-level exception handling
* *
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
...@@ -8,6 +6,7 @@ ...@@ -8,6 +6,7 @@
* for more details. * for more details.
* *
* Copyright (C) 2004 - 2008 by Tensilica Inc. * Copyright (C) 2004 - 2008 by Tensilica Inc.
* Copyright (C) 2015 Cadence Design Systems Inc.
* *
* Chris Zankel <chris@zankel.net> * Chris Zankel <chris@zankel.net>
* *
...@@ -75,6 +74,27 @@ ...@@ -75,6 +74,27 @@
#endif #endif
.endm .endm
.macro irq_save flags tmp
#if XTENSA_FAKE_NMI
#if defined(CONFIG_DEBUG_KERNEL) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL
rsr \flags, ps
extui \tmp, \flags, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
bgei \tmp, LOCKLEVEL, 99f
rsil \tmp, LOCKLEVEL
99:
#else
movi \tmp, LOCKLEVEL
rsr \flags, ps
or \flags, \flags, \tmp
xsr \flags, ps
rsync
#endif
#else
rsil \flags, LOCKLEVEL
#endif
.endm
/* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */ /* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */
/* /*
...@@ -352,11 +372,11 @@ common_exception: ...@@ -352,11 +372,11 @@ common_exception:
/* It is now save to restore the EXC_TABLE_FIXUP variable. */ /* It is now save to restore the EXC_TABLE_FIXUP variable. */
rsr a0, exccause rsr a2, exccause
movi a3, 0 movi a3, 0
rsr a2, excsave1 rsr a0, excsave1
s32i a0, a1, PT_EXCCAUSE s32i a2, a1, PT_EXCCAUSE
s32i a3, a2, EXC_TABLE_FIXUP s32i a3, a0, EXC_TABLE_FIXUP
/* All unrecoverable states are saved on stack, now, and a1 is valid. /* All unrecoverable states are saved on stack, now, and a1 is valid.
* Now we can allow exceptions again. In case we've got an interrupt * Now we can allow exceptions again. In case we've got an interrupt
...@@ -367,19 +387,46 @@ common_exception: ...@@ -367,19 +387,46 @@ common_exception:
*/ */
rsr a3, ps rsr a3, ps
addi a0, a0, -EXCCAUSE_LEVEL1_INTERRUPT s32i a3, a1, PT_PS # save ps
movi a2, LOCKLEVEL
#if XTENSA_FAKE_NMI
/* Correct PS needs to be saved in the PT_PS:
* - in case of exception or level-1 interrupt it's in the PS,
* and is already saved.
* - in case of medium level interrupt it's in the excsave2.
*/
movi a0, EXCCAUSE_MAPPED_NMI
extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
beq a2, a0, .Lmedium_level_irq
bnei a2, EXCCAUSE_LEVEL1_INTERRUPT, .Lexception
beqz a3, .Llevel1_irq # level-1 IRQ sets ps.intlevel to 0
.Lmedium_level_irq:
rsr a0, excsave2
s32i a0, a1, PT_PS # save medium-level interrupt ps
bgei a3, LOCKLEVEL, .Lexception
.Llevel1_irq:
movi a3, LOCKLEVEL
.Lexception:
movi a0, 1 << PS_WOE_BIT
or a3, a3, a0
#else
addi a2, a2, -EXCCAUSE_LEVEL1_INTERRUPT
movi a0, LOCKLEVEL
extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
# a3 = PS.INTLEVEL # a3 = PS.INTLEVEL
moveqz a3, a2, a0 # a3 = LOCKLEVEL iff interrupt moveqz a3, a0, a2 # a3 = LOCKLEVEL iff interrupt
movi a2, 1 << PS_WOE_BIT movi a2, 1 << PS_WOE_BIT
or a3, a3, a2 or a3, a3, a2
rsr a2, exccause rsr a2, exccause
#endif
/* restore return address (or 0 if return to userspace) */ /* restore return address (or 0 if return to userspace) */
rsr a0, depc rsr a0, depc
xsr a3, ps wsr a3, ps
rsync # PS.WOE => rsync => overflow
s32i a3, a1, PT_PS # save ps
/* Save lbeg, lend */ /* Save lbeg, lend */
...@@ -417,8 +464,13 @@ common_exception: ...@@ -417,8 +464,13 @@ common_exception:
.global common_exception_return .global common_exception_return
common_exception_return: common_exception_return:
#if XTENSA_FAKE_NMI
l32i a2, a1, PT_EXCCAUSE
movi a3, EXCCAUSE_MAPPED_NMI
beq a2, a3, .LNMIexit
#endif
1: 1:
rsil a2, LOCKLEVEL irq_save a2, a3
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
movi a4, trace_hardirqs_off movi a4, trace_hardirqs_off
callx4 a4 callx4 a4
...@@ -481,6 +533,12 @@ common_exception_return: ...@@ -481,6 +533,12 @@ common_exception_return:
j 1b j 1b
#endif #endif
#if XTENSA_FAKE_NMI
.LNMIexit:
l32i a3, a1, PT_PS
_bbci.l a3, PS_UM_BIT, 4f
#endif
5: 5:
#ifdef CONFIG_DEBUG_TLB_SANITY #ifdef CONFIG_DEBUG_TLB_SANITY
l32i a4, a1, PT_DEPC l32i a4, a1, PT_DEPC
...@@ -1564,6 +1622,13 @@ ENTRY(fast_second_level_miss) ...@@ -1564,6 +1622,13 @@ ENTRY(fast_second_level_miss)
rfde rfde
9: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0 9: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0
bnez a0, 8b
/* Even more unlikely case active_mm == 0.
* We can get here with NMI in the middle of context_switch that
* touches vmalloc area.
*/
movi a0, init_mm
j 8b j 8b
#if (DCACHE_WAY_SIZE > PAGE_SIZE) #if (DCACHE_WAY_SIZE > PAGE_SIZE)
...@@ -1867,7 +1932,7 @@ ENTRY(_switch_to) ...@@ -1867,7 +1932,7 @@ ENTRY(_switch_to)
/* Disable ints while we manipulate the stack pointer. */ /* Disable ints while we manipulate the stack pointer. */
rsil a14, LOCKLEVEL irq_save a14, a3
rsync rsync
/* Switch CPENABLE */ /* Switch CPENABLE */
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <asm/platform.h> #include <asm/platform.h>
atomic_t irq_err_count; atomic_t irq_err_count;
DECLARE_PER_CPU(unsigned long, nmi_count);
asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs) asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
{ {
...@@ -57,11 +58,18 @@ asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs) ...@@ -57,11 +58,18 @@ asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
int arch_show_interrupts(struct seq_file *p, int prec) int arch_show_interrupts(struct seq_file *p, int prec)
{ {
unsigned cpu __maybe_unused;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
show_ipi_list(p, prec); show_ipi_list(p, prec);
#endif #endif
seq_printf(p, "%*s: ", prec, "ERR"); seq_printf(p, "%*s: ", prec, "ERR");
seq_printf(p, "%10u\n", atomic_read(&irq_err_count)); seq_printf(p, "%10u\n", atomic_read(&irq_err_count));
#if XTENSA_FAKE_NMI
seq_printf(p, "%*s:", prec, "NMI");
for_each_online_cpu(cpu)
seq_printf(p, " %10lu", per_cpu(nmi_count, cpu));
seq_puts(p, " Non-maskable interrupts\n");
#endif
return 0; return 0;
} }
......
...@@ -359,7 +359,7 @@ void perf_event_print_debug(void) ...@@ -359,7 +359,7 @@ void perf_event_print_debug(void)
local_irq_restore(flags); local_irq_restore(flags);
} }
static irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id) irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id)
{ {
irqreturn_t rc = IRQ_NONE; irqreturn_t rc = IRQ_NONE;
struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events); struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events);
...@@ -436,10 +436,14 @@ static int __init xtensa_pmu_init(void) ...@@ -436,10 +436,14 @@ static int __init xtensa_pmu_init(void)
int irq = irq_create_mapping(NULL, XCHAL_PROFILING_INTERRUPT); int irq = irq_create_mapping(NULL, XCHAL_PROFILING_INTERRUPT);
perf_cpu_notifier(xtensa_pmu_notifier); perf_cpu_notifier(xtensa_pmu_notifier);
#if XTENSA_FAKE_NMI
enable_irq(irq);
#else
ret = request_irq(irq, xtensa_pmu_irq_handler, IRQF_PERCPU, ret = request_irq(irq, xtensa_pmu_irq_handler, IRQF_PERCPU,
"pmu", NULL); "pmu", NULL);
if (ret < 0) if (ret < 0)
return ret; return ret;
#endif
ret = perf_pmu_register(&xtensa_pmu, "cpu", PERF_TYPE_RAW); ret = perf_pmu_register(&xtensa_pmu, "cpu", PERF_TYPE_RAW);
if (ret) if (ret)
......
...@@ -62,6 +62,7 @@ extern void fast_coprocessor(void); ...@@ -62,6 +62,7 @@ extern void fast_coprocessor(void);
extern void do_illegal_instruction (struct pt_regs*); extern void do_illegal_instruction (struct pt_regs*);
extern void do_interrupt (struct pt_regs*); extern void do_interrupt (struct pt_regs*);
extern void do_nmi(struct pt_regs *);
extern void do_unaligned_user (struct pt_regs*); extern void do_unaligned_user (struct pt_regs*);
extern void do_multihit (struct pt_regs*, unsigned long); extern void do_multihit (struct pt_regs*, unsigned long);
extern void do_page_fault (struct pt_regs*, unsigned long); extern void do_page_fault (struct pt_regs*, unsigned long);
...@@ -146,6 +147,9 @@ COPROCESSOR(6), ...@@ -146,6 +147,9 @@ COPROCESSOR(6),
#if XTENSA_HAVE_COPROCESSOR(7) #if XTENSA_HAVE_COPROCESSOR(7)
COPROCESSOR(7), COPROCESSOR(7),
#endif #endif
#if XTENSA_FAKE_NMI
{ EXCCAUSE_MAPPED_NMI, 0, do_nmi },
#endif
{ EXCCAUSE_MAPPED_DEBUG, 0, do_debug }, { EXCCAUSE_MAPPED_DEBUG, 0, do_debug },
{ -1, -1, 0 } { -1, -1, 0 }
...@@ -199,6 +203,28 @@ void do_multihit(struct pt_regs *regs, unsigned long exccause) ...@@ -199,6 +203,28 @@ void do_multihit(struct pt_regs *regs, unsigned long exccause)
extern void do_IRQ(int, struct pt_regs *); extern void do_IRQ(int, struct pt_regs *);
#if XTENSA_FAKE_NMI
irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id);
DEFINE_PER_CPU(unsigned long, nmi_count);
void do_nmi(struct pt_regs *regs)
{
struct pt_regs *old_regs;
if ((regs->ps & PS_INTLEVEL_MASK) < LOCKLEVEL)
trace_hardirqs_off();
old_regs = set_irq_regs(regs);
nmi_enter();
++*this_cpu_ptr(&nmi_count);
xtensa_pmu_irq_handler(0, NULL);
nmi_exit();
set_irq_regs(old_regs);
}
#endif
void do_interrupt(struct pt_regs *regs) void do_interrupt(struct pt_regs *regs)
{ {
static const unsigned int_level_mask[] = { static const unsigned int_level_mask[] = {
......
...@@ -627,7 +627,11 @@ ENTRY(_Level\level\()InterruptVector) ...@@ -627,7 +627,11 @@ ENTRY(_Level\level\()InterruptVector)
wsr a0, excsave2 wsr a0, excsave2
rsr a0, epc\level rsr a0, epc\level
wsr a0, epc1 wsr a0, epc1
.if \level <= LOCKLEVEL
movi a0, EXCCAUSE_LEVEL1_INTERRUPT movi a0, EXCCAUSE_LEVEL1_INTERRUPT
.else
movi a0, EXCCAUSE_MAPPED_NMI
.endif
wsr a0, exccause wsr a0, exccause
rsr a0, eps\level rsr a0, eps\level
# branch to user or kernel vector # branch to user or kernel vector
...@@ -682,11 +686,13 @@ ENDPROC(_WindowOverflow4) ...@@ -682,11 +686,13 @@ ENDPROC(_WindowOverflow4)
.align 4 .align 4
_SimulateUserKernelVectorException: _SimulateUserKernelVectorException:
addi a0, a0, (1 << PS_EXCM_BIT) addi a0, a0, (1 << PS_EXCM_BIT)
#if !XTENSA_FAKE_NMI
wsr a0, ps wsr a0, ps
#endif
bbsi.l a0, PS_UM_BIT, 1f # branch if user mode bbsi.l a0, PS_UM_BIT, 1f # branch if user mode
rsr a0, excsave2 # restore a0 xsr a0, excsave2 # restore a0
j _KernelExceptionVector # simulate kernel vector exception j _KernelExceptionVector # simulate kernel vector exception
1: rsr a0, excsave2 # restore a0 1: xsr a0, excsave2 # restore a0
j _UserExceptionVector # simulate user vector exception j _UserExceptionVector # simulate user vector exception
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment