Commit a4c3733d authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Paul Walmsley

riscv: abstract out CSR names for supervisor vs machine mode

Many of the privileged CSRs exist in a supervisor and machine version
that are used very similarly.  Provide versions of the CSR names and
fields that map to either the S-mode or M-mode variant depending on
a new CONFIG_RISCV_M_MODE kconfig symbol.

Contains contributions from Damien Le Moal <Damien.LeMoal@wdc.com>
and Paul Walmsley <paul.walmsley@sifive.com>.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: Thomas Gleixner <tglx@linutronix.de> # for drivers/clocksource, drivers/irqchip
[paul.walmsley@sifive.com: updated to apply]
Signed-off-by: default avatarPaul Walmsley <paul.walmsley@sifive.com>
parent 0c3ac289
...@@ -72,6 +72,10 @@ config ARCH_MMAP_RND_BITS_MAX ...@@ -72,6 +72,10 @@ config ARCH_MMAP_RND_BITS_MAX
default 24 if 64BIT # SV39 based default 24 if 64BIT # SV39 based
default 17 default 17
# set if we run in machine mode, cleared if we run in supervisor mode
config RISCV_M_MODE
bool
config MMU config MMU
def_bool y def_bool y
......
...@@ -11,8 +11,11 @@ ...@@ -11,8 +11,11 @@
/* Status register flags */ /* Status register flags */
#define SR_SIE _AC(0x00000002, UL) /* Supervisor Interrupt Enable */ #define SR_SIE _AC(0x00000002, UL) /* Supervisor Interrupt Enable */
#define SR_MIE _AC(0x00000008, UL) /* Machine Interrupt Enable */
#define SR_SPIE _AC(0x00000020, UL) /* Previous Supervisor IE */ #define SR_SPIE _AC(0x00000020, UL) /* Previous Supervisor IE */
#define SR_MPIE _AC(0x00000080, UL) /* Previous Machine IE */
#define SR_SPP _AC(0x00000100, UL) /* Previously Supervisor */ #define SR_SPP _AC(0x00000100, UL) /* Previously Supervisor */
#define SR_MPP _AC(0x00001800, UL) /* Previously Machine */
#define SR_SUM _AC(0x00040000, UL) /* Supervisor User Memory Access */ #define SR_SUM _AC(0x00040000, UL) /* Supervisor User Memory Access */
#define SR_FS _AC(0x00006000, UL) /* Floating-point Status */ #define SR_FS _AC(0x00006000, UL) /* Floating-point Status */
...@@ -44,9 +47,10 @@ ...@@ -44,9 +47,10 @@
#define SATP_MODE SATP_MODE_39 #define SATP_MODE SATP_MODE_39
#endif #endif
/* SCAUSE */ /* Exception cause high bit - is an interrupt if set */
#define SCAUSE_IRQ_FLAG (_AC(1, UL) << (__riscv_xlen - 1)) #define CAUSE_IRQ_FLAG (_AC(1, UL) << (__riscv_xlen - 1))
/* Interrupt causes (minus the high bit) */
#define IRQ_U_SOFT 0 #define IRQ_U_SOFT 0
#define IRQ_S_SOFT 1 #define IRQ_S_SOFT 1
#define IRQ_M_SOFT 3 #define IRQ_M_SOFT 3
...@@ -57,6 +61,7 @@ ...@@ -57,6 +61,7 @@
#define IRQ_S_EXT 9 #define IRQ_S_EXT 9
#define IRQ_M_EXT 11 #define IRQ_M_EXT 11
/* Exception causes */
#define EXC_INST_MISALIGNED 0 #define EXC_INST_MISALIGNED 0
#define EXC_INST_ACCESS 1 #define EXC_INST_ACCESS 1
#define EXC_BREAKPOINT 3 #define EXC_BREAKPOINT 3
...@@ -67,14 +72,14 @@ ...@@ -67,14 +72,14 @@
#define EXC_LOAD_PAGE_FAULT 13 #define EXC_LOAD_PAGE_FAULT 13
#define EXC_STORE_PAGE_FAULT 15 #define EXC_STORE_PAGE_FAULT 15
/* SIE (Interrupt Enable) and SIP (Interrupt Pending) flags */ /* symbolic CSR names: */
#define SIE_SSIE (_AC(0x1, UL) << IRQ_S_SOFT)
#define SIE_STIE (_AC(0x1, UL) << IRQ_S_TIMER)
#define SIE_SEIE (_AC(0x1, UL) << IRQ_S_EXT)
#define CSR_CYCLE 0xc00 #define CSR_CYCLE 0xc00
#define CSR_TIME 0xc01 #define CSR_TIME 0xc01
#define CSR_INSTRET 0xc02 #define CSR_INSTRET 0xc02
#define CSR_CYCLEH 0xc80
#define CSR_TIMEH 0xc81
#define CSR_INSTRETH 0xc82
#define CSR_SSTATUS 0x100 #define CSR_SSTATUS 0x100
#define CSR_SIE 0x104 #define CSR_SIE 0x104
#define CSR_STVEC 0x105 #define CSR_STVEC 0x105
...@@ -85,9 +90,56 @@ ...@@ -85,9 +90,56 @@
#define CSR_STVAL 0x143 #define CSR_STVAL 0x143
#define CSR_SIP 0x144 #define CSR_SIP 0x144
#define CSR_SATP 0x180 #define CSR_SATP 0x180
#define CSR_CYCLEH 0xc80
#define CSR_TIMEH 0xc81 #define CSR_MSTATUS 0x300
#define CSR_INSTRETH 0xc82 #define CSR_MIE 0x304
#define CSR_MTVEC 0x305
#define CSR_MSCRATCH 0x340
#define CSR_MEPC 0x341
#define CSR_MCAUSE 0x342
#define CSR_MTVAL 0x343
#define CSR_MIP 0x344
#ifdef CONFIG_RISCV_M_MODE
# define CSR_STATUS CSR_MSTATUS
# define CSR_IE CSR_MIE
# define CSR_TVEC CSR_MTVEC
# define CSR_SCRATCH CSR_MSCRATCH
# define CSR_EPC CSR_MEPC
# define CSR_CAUSE CSR_MCAUSE
# define CSR_TVAL CSR_MTVAL
# define CSR_IP CSR_MIP
# define SR_IE SR_MIE
# define SR_PIE SR_MPIE
# define SR_PP SR_MPP
# define IRQ_SOFT IRQ_M_SOFT
# define IRQ_TIMER IRQ_M_TIMER
# define IRQ_EXT IRQ_M_EXT
#else /* CONFIG_RISCV_M_MODE */
# define CSR_STATUS CSR_SSTATUS
# define CSR_IE CSR_SIE
# define CSR_TVEC CSR_STVEC
# define CSR_SCRATCH CSR_SSCRATCH
# define CSR_EPC CSR_SEPC
# define CSR_CAUSE CSR_SCAUSE
# define CSR_TVAL CSR_STVAL
# define CSR_IP CSR_SIP
# define SR_IE SR_SIE
# define SR_PIE SR_SPIE
# define SR_PP SR_SPP
# define IRQ_SOFT IRQ_S_SOFT
# define IRQ_TIMER IRQ_S_TIMER
# define IRQ_EXT IRQ_S_EXT
#endif /* CONFIG_RISCV_M_MODE */
/* IE/IP (Supervisor/Machine Interrupt Enable/Pending) flags */
#define IE_SIE (_AC(0x1, UL) << IRQ_SOFT)
#define IE_TIE (_AC(0x1, UL) << IRQ_TIMER)
#define IE_EIE (_AC(0x1, UL) << IRQ_EXT)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
......
...@@ -13,31 +13,31 @@ ...@@ -13,31 +13,31 @@
/* read interrupt enabled status */ /* read interrupt enabled status */
static inline unsigned long arch_local_save_flags(void) static inline unsigned long arch_local_save_flags(void)
{ {
return csr_read(CSR_SSTATUS); return csr_read(CSR_STATUS);
} }
/* unconditionally enable interrupts */ /* unconditionally enable interrupts */
static inline void arch_local_irq_enable(void) static inline void arch_local_irq_enable(void)
{ {
csr_set(CSR_SSTATUS, SR_SIE); csr_set(CSR_STATUS, SR_IE);
} }
/* unconditionally disable interrupts */ /* unconditionally disable interrupts */
static inline void arch_local_irq_disable(void) static inline void arch_local_irq_disable(void)
{ {
csr_clear(CSR_SSTATUS, SR_SIE); csr_clear(CSR_STATUS, SR_IE);
} }
/* get status and disable interrupts */ /* get status and disable interrupts */
static inline unsigned long arch_local_irq_save(void) static inline unsigned long arch_local_irq_save(void)
{ {
return csr_read_clear(CSR_SSTATUS, SR_SIE); return csr_read_clear(CSR_STATUS, SR_IE);
} }
/* test flags */ /* test flags */
static inline int arch_irqs_disabled_flags(unsigned long flags) static inline int arch_irqs_disabled_flags(unsigned long flags)
{ {
return !(flags & SR_SIE); return !(flags & SR_IE);
} }
/* test hardware interrupt enable bit */ /* test hardware interrupt enable bit */
...@@ -49,7 +49,7 @@ static inline int arch_irqs_disabled(void) ...@@ -49,7 +49,7 @@ static inline int arch_irqs_disabled(void)
/* set interrupt enabled status */ /* set interrupt enabled status */
static inline void arch_local_irq_restore(unsigned long flags) static inline void arch_local_irq_restore(unsigned long flags)
{ {
csr_set(CSR_SSTATUS, flags & SR_SIE); csr_set(CSR_STATUS, flags & SR_IE);
} }
#endif /* _ASM_RISCV_IRQFLAGS_H */ #endif /* _ASM_RISCV_IRQFLAGS_H */
...@@ -42,7 +42,7 @@ struct thread_struct { ...@@ -42,7 +42,7 @@ struct thread_struct {
((struct pt_regs *)(task_stack_page(tsk) + THREAD_SIZE \ ((struct pt_regs *)(task_stack_page(tsk) + THREAD_SIZE \
- ALIGN(sizeof(struct pt_regs), STACK_ALIGN))) - ALIGN(sizeof(struct pt_regs), STACK_ALIGN)))
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->sepc) #define KSTK_EIP(tsk) (task_pt_regs(tsk)->epc)
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp) #define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp)
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
struct pt_regs { struct pt_regs {
unsigned long sepc; unsigned long epc;
unsigned long ra; unsigned long ra;
unsigned long sp; unsigned long sp;
unsigned long gp; unsigned long gp;
...@@ -44,10 +44,10 @@ struct pt_regs { ...@@ -44,10 +44,10 @@ struct pt_regs {
unsigned long t4; unsigned long t4;
unsigned long t5; unsigned long t5;
unsigned long t6; unsigned long t6;
/* Supervisor CSRs */ /* Supervisor/Machine CSRs */
unsigned long sstatus; unsigned long status;
unsigned long sbadaddr; unsigned long badaddr;
unsigned long scause; unsigned long cause;
/* a0 value before the syscall */ /* a0 value before the syscall */
unsigned long orig_a0; unsigned long orig_a0;
}; };
...@@ -58,18 +58,18 @@ struct pt_regs { ...@@ -58,18 +58,18 @@ struct pt_regs {
#define REG_FMT "%08lx" #define REG_FMT "%08lx"
#endif #endif
#define user_mode(regs) (((regs)->sstatus & SR_SPP) == 0) #define user_mode(regs) (((regs)->status & SR_PP) == 0)
/* Helpers for working with the instruction pointer */ /* Helpers for working with the instruction pointer */
static inline unsigned long instruction_pointer(struct pt_regs *regs) static inline unsigned long instruction_pointer(struct pt_regs *regs)
{ {
return regs->sepc; return regs->epc;
} }
static inline void instruction_pointer_set(struct pt_regs *regs, static inline void instruction_pointer_set(struct pt_regs *regs,
unsigned long val) unsigned long val)
{ {
regs->sepc = val; regs->epc = val;
} }
#define profile_pc(regs) instruction_pointer(regs) #define profile_pc(regs) instruction_pointer(regs)
......
...@@ -17,19 +17,19 @@ extern void __fstate_restore(struct task_struct *restore_from); ...@@ -17,19 +17,19 @@ extern void __fstate_restore(struct task_struct *restore_from);
static inline void __fstate_clean(struct pt_regs *regs) static inline void __fstate_clean(struct pt_regs *regs)
{ {
regs->sstatus = (regs->sstatus & ~SR_FS) | SR_FS_CLEAN; regs->status = (regs->status & ~SR_FS) | SR_FS_CLEAN;
} }
static inline void fstate_off(struct task_struct *task, static inline void fstate_off(struct task_struct *task,
struct pt_regs *regs) struct pt_regs *regs)
{ {
regs->sstatus = (regs->sstatus & ~SR_FS) | SR_FS_OFF; regs->status = (regs->status & ~SR_FS) | SR_FS_OFF;
} }
static inline void fstate_save(struct task_struct *task, static inline void fstate_save(struct task_struct *task,
struct pt_regs *regs) struct pt_regs *regs)
{ {
if ((regs->sstatus & SR_FS) == SR_FS_DIRTY) { if ((regs->status & SR_FS) == SR_FS_DIRTY) {
__fstate_save(task); __fstate_save(task);
__fstate_clean(regs); __fstate_clean(regs);
} }
...@@ -38,7 +38,7 @@ static inline void fstate_save(struct task_struct *task, ...@@ -38,7 +38,7 @@ static inline void fstate_save(struct task_struct *task,
static inline void fstate_restore(struct task_struct *task, static inline void fstate_restore(struct task_struct *task,
struct pt_regs *regs) struct pt_regs *regs)
{ {
if ((regs->sstatus & SR_FS) != SR_FS_OFF) { if ((regs->status & SR_FS) != SR_FS_OFF) {
__fstate_restore(task); __fstate_restore(task);
__fstate_clean(regs); __fstate_clean(regs);
} }
...@@ -50,7 +50,7 @@ static inline void __switch_to_aux(struct task_struct *prev, ...@@ -50,7 +50,7 @@ static inline void __switch_to_aux(struct task_struct *prev,
struct pt_regs *regs; struct pt_regs *regs;
regs = task_pt_regs(prev); regs = task_pt_regs(prev);
if (unlikely(regs->sstatus & SR_SD)) if (unlikely(regs->status & SR_SD))
fstate_save(prev, regs); fstate_save(prev, regs);
fstate_restore(next, task_pt_regs(next)); fstate_restore(next, task_pt_regs(next));
} }
......
...@@ -71,7 +71,7 @@ void asm_offsets(void) ...@@ -71,7 +71,7 @@ void asm_offsets(void)
OFFSET(TASK_THREAD_FCSR, task_struct, thread.fstate.fcsr); OFFSET(TASK_THREAD_FCSR, task_struct, thread.fstate.fcsr);
DEFINE(PT_SIZE, sizeof(struct pt_regs)); DEFINE(PT_SIZE, sizeof(struct pt_regs));
OFFSET(PT_SEPC, pt_regs, sepc); OFFSET(PT_EPC, pt_regs, epc);
OFFSET(PT_RA, pt_regs, ra); OFFSET(PT_RA, pt_regs, ra);
OFFSET(PT_FP, pt_regs, s0); OFFSET(PT_FP, pt_regs, s0);
OFFSET(PT_S0, pt_regs, s0); OFFSET(PT_S0, pt_regs, s0);
...@@ -105,9 +105,9 @@ void asm_offsets(void) ...@@ -105,9 +105,9 @@ void asm_offsets(void)
OFFSET(PT_T6, pt_regs, t6); OFFSET(PT_T6, pt_regs, t6);
OFFSET(PT_GP, pt_regs, gp); OFFSET(PT_GP, pt_regs, gp);
OFFSET(PT_ORIG_A0, pt_regs, orig_a0); OFFSET(PT_ORIG_A0, pt_regs, orig_a0);
OFFSET(PT_SSTATUS, pt_regs, sstatus); OFFSET(PT_STATUS, pt_regs, status);
OFFSET(PT_SBADADDR, pt_regs, sbadaddr); OFFSET(PT_BADADDR, pt_regs, badaddr);
OFFSET(PT_SCAUSE, pt_regs, scause); OFFSET(PT_CAUSE, pt_regs, cause);
/* /*
* THREAD_{F,X}* might be larger than a S-type offset can handle, but * THREAD_{F,X}* might be larger than a S-type offset can handle, but
......
...@@ -26,14 +26,14 @@ ...@@ -26,14 +26,14 @@
/* /*
* If coming from userspace, preserve the user thread pointer and load * If coming from userspace, preserve the user thread pointer and load
* the kernel thread pointer. If we came from the kernel, sscratch * the kernel thread pointer. If we came from the kernel, the scratch
* will contain 0, and we should continue on the current TP. * register will contain 0, and we should continue on the current TP.
*/ */
csrrw tp, CSR_SSCRATCH, tp csrrw tp, CSR_SCRATCH, tp
bnez tp, _save_context bnez tp, _save_context
_restore_kernel_tpsp: _restore_kernel_tpsp:
csrr tp, CSR_SSCRATCH csrr tp, CSR_SCRATCH
REG_S sp, TASK_TI_KERNEL_SP(tp) REG_S sp, TASK_TI_KERNEL_SP(tp)
_save_context: _save_context:
REG_S sp, TASK_TI_USER_SP(tp) REG_S sp, TASK_TI_USER_SP(tp)
...@@ -79,16 +79,16 @@ _save_context: ...@@ -79,16 +79,16 @@ _save_context:
li t0, SR_SUM | SR_FS li t0, SR_SUM | SR_FS
REG_L s0, TASK_TI_USER_SP(tp) REG_L s0, TASK_TI_USER_SP(tp)
csrrc s1, CSR_SSTATUS, t0 csrrc s1, CSR_STATUS, t0
csrr s2, CSR_SEPC csrr s2, CSR_EPC
csrr s3, CSR_STVAL csrr s3, CSR_TVAL
csrr s4, CSR_SCAUSE csrr s4, CSR_CAUSE
csrr s5, CSR_SSCRATCH csrr s5, CSR_SCRATCH
REG_S s0, PT_SP(sp) REG_S s0, PT_SP(sp)
REG_S s1, PT_SSTATUS(sp) REG_S s1, PT_STATUS(sp)
REG_S s2, PT_SEPC(sp) REG_S s2, PT_EPC(sp)
REG_S s3, PT_SBADADDR(sp) REG_S s3, PT_BADADDR(sp)
REG_S s4, PT_SCAUSE(sp) REG_S s4, PT_CAUSE(sp)
REG_S s5, PT_TP(sp) REG_S s5, PT_TP(sp)
.endm .endm
...@@ -97,7 +97,7 @@ _save_context: ...@@ -97,7 +97,7 @@ _save_context:
* registers from the stack. * registers from the stack.
*/ */
.macro RESTORE_ALL .macro RESTORE_ALL
REG_L a0, PT_SSTATUS(sp) REG_L a0, PT_STATUS(sp)
/* /*
* The current load reservation is effectively part of the processor's * The current load reservation is effectively part of the processor's
* state, in the sense that load reservations cannot be shared between * state, in the sense that load reservations cannot be shared between
...@@ -115,11 +115,11 @@ _save_context: ...@@ -115,11 +115,11 @@ _save_context:
* completes, implementations are allowed to expand reservations to be * completes, implementations are allowed to expand reservations to be
* arbitrarily large. * arbitrarily large.
*/ */
REG_L a2, PT_SEPC(sp) REG_L a2, PT_EPC(sp)
REG_SC x0, a2, PT_SEPC(sp) REG_SC x0, a2, PT_EPC(sp)
csrw CSR_SSTATUS, a0 csrw CSR_STATUS, a0
csrw CSR_SEPC, a2 csrw CSR_EPC, a2
REG_L x1, PT_RA(sp) REG_L x1, PT_RA(sp)
REG_L x3, PT_GP(sp) REG_L x3, PT_GP(sp)
...@@ -163,10 +163,10 @@ ENTRY(handle_exception) ...@@ -163,10 +163,10 @@ ENTRY(handle_exception)
SAVE_ALL SAVE_ALL
/* /*
* Set sscratch register to 0, so that if a recursive exception * Set the scratch register to 0, so that if a recursive exception
* occurs, the exception vector knows it came from the kernel * occurs, the exception vector knows it came from the kernel
*/ */
csrw CSR_SSCRATCH, x0 csrw CSR_SCRATCH, x0
/* Load the global pointer */ /* Load the global pointer */
.option push .option push
...@@ -185,11 +185,13 @@ ENTRY(handle_exception) ...@@ -185,11 +185,13 @@ ENTRY(handle_exception)
move a0, sp /* pt_regs */ move a0, sp /* pt_regs */
tail do_IRQ tail do_IRQ
1: 1:
/* Exceptions run with interrupts enabled or disabled /*
depending on the state of sstatus.SR_SPIE */ * Exceptions run with interrupts enabled or disabled depending on the
andi t0, s1, SR_SPIE * state of SR_PIE in m/sstatus.
*/
andi t0, s1, SR_PIE
beqz t0, 1f beqz t0, 1f
csrs CSR_SSTATUS, SR_SIE csrs CSR_STATUS, SR_IE
1: 1:
/* Handle syscalls */ /* Handle syscalls */
...@@ -217,7 +219,7 @@ handle_syscall: ...@@ -217,7 +219,7 @@ handle_syscall:
* scall instruction on sret * scall instruction on sret
*/ */
addi s2, s2, 0x4 addi s2, s2, 0x4
REG_S s2, PT_SEPC(sp) REG_S s2, PT_EPC(sp)
/* Trace syscalls, but only if requested by the user. */ /* Trace syscalls, but only if requested by the user. */
REG_L t0, TASK_TI_FLAGS(tp) REG_L t0, TASK_TI_FLAGS(tp)
andi t0, t0, _TIF_SYSCALL_WORK andi t0, t0, _TIF_SYSCALL_WORK
...@@ -244,9 +246,15 @@ ret_from_syscall: ...@@ -244,9 +246,15 @@ ret_from_syscall:
bnez t0, handle_syscall_trace_exit bnez t0, handle_syscall_trace_exit
ret_from_exception: ret_from_exception:
REG_L s0, PT_SSTATUS(sp) REG_L s0, PT_STATUS(sp)
csrc CSR_SSTATUS, SR_SIE csrc CSR_STATUS, SR_IE
#ifdef CONFIG_RISCV_M_MODE
/* the MPP value is too large to be used as an immediate arg for addi */
li t0, SR_MPP
and s0, s0, t0
#else
andi s0, s0, SR_SPP andi s0, s0, SR_SPP
#endif
bnez s0, resume_kernel bnez s0, resume_kernel
resume_userspace: resume_userspace:
...@@ -260,14 +268,18 @@ resume_userspace: ...@@ -260,14 +268,18 @@ resume_userspace:
REG_S s0, TASK_TI_KERNEL_SP(tp) REG_S s0, TASK_TI_KERNEL_SP(tp)
/* /*
* Save TP into sscratch, so we can find the kernel data structures * Save TP into the scratch register , so we can find the kernel data
* again. * structures again.
*/ */
csrw CSR_SSCRATCH, tp csrw CSR_SCRATCH, tp
restore_all: restore_all:
RESTORE_ALL RESTORE_ALL
#ifdef CONFIG_RISCV_M_MODE
mret
#else
sret sret
#endif
#if IS_ENABLED(CONFIG_PREEMPT) #if IS_ENABLED(CONFIG_PREEMPT)
resume_kernel: resume_kernel:
...@@ -287,7 +299,7 @@ work_pending: ...@@ -287,7 +299,7 @@ work_pending:
bnez s1, work_resched bnez s1, work_resched
work_notifysig: work_notifysig:
/* Handle pending signals and notify-resume requests */ /* Handle pending signals and notify-resume requests */
csrs CSR_SSTATUS, SR_SIE /* Enable interrupts for do_notify_resume() */ csrs CSR_STATUS, SR_IE /* Enable interrupts for do_notify_resume() */
move a0, sp /* pt_regs */ move a0, sp /* pt_regs */
move a1, s0 /* current_thread_info->flags */ move a1, s0 /* current_thread_info->flags */
tail do_notify_resume tail do_notify_resume
......
...@@ -23,7 +23,7 @@ ENTRY(__fstate_save) ...@@ -23,7 +23,7 @@ ENTRY(__fstate_save)
li a2, TASK_THREAD_F0 li a2, TASK_THREAD_F0
add a0, a0, a2 add a0, a0, a2
li t1, SR_FS li t1, SR_FS
csrs CSR_SSTATUS, t1 csrs CSR_STATUS, t1
frcsr t0 frcsr t0
fsd f0, TASK_THREAD_F0_F0(a0) fsd f0, TASK_THREAD_F0_F0(a0)
fsd f1, TASK_THREAD_F1_F0(a0) fsd f1, TASK_THREAD_F1_F0(a0)
...@@ -58,7 +58,7 @@ ENTRY(__fstate_save) ...@@ -58,7 +58,7 @@ ENTRY(__fstate_save)
fsd f30, TASK_THREAD_F30_F0(a0) fsd f30, TASK_THREAD_F30_F0(a0)
fsd f31, TASK_THREAD_F31_F0(a0) fsd f31, TASK_THREAD_F31_F0(a0)
sw t0, TASK_THREAD_FCSR_F0(a0) sw t0, TASK_THREAD_FCSR_F0(a0)
csrc CSR_SSTATUS, t1 csrc CSR_STATUS, t1
ret ret
ENDPROC(__fstate_save) ENDPROC(__fstate_save)
...@@ -67,7 +67,7 @@ ENTRY(__fstate_restore) ...@@ -67,7 +67,7 @@ ENTRY(__fstate_restore)
add a0, a0, a2 add a0, a0, a2
li t1, SR_FS li t1, SR_FS
lw t0, TASK_THREAD_FCSR_F0(a0) lw t0, TASK_THREAD_FCSR_F0(a0)
csrs CSR_SSTATUS, t1 csrs CSR_STATUS, t1
fld f0, TASK_THREAD_F0_F0(a0) fld f0, TASK_THREAD_F0_F0(a0)
fld f1, TASK_THREAD_F1_F0(a0) fld f1, TASK_THREAD_F1_F0(a0)
fld f2, TASK_THREAD_F2_F0(a0) fld f2, TASK_THREAD_F2_F0(a0)
...@@ -101,6 +101,6 @@ ENTRY(__fstate_restore) ...@@ -101,6 +101,6 @@ ENTRY(__fstate_restore)
fld f30, TASK_THREAD_F30_F0(a0) fld f30, TASK_THREAD_F30_F0(a0)
fld f31, TASK_THREAD_F31_F0(a0) fld f31, TASK_THREAD_F31_F0(a0)
fscsr t0 fscsr t0
csrc CSR_SSTATUS, t1 csrc CSR_STATUS, t1
ret ret
ENDPROC(__fstate_restore) ENDPROC(__fstate_restore)
...@@ -47,8 +47,8 @@ ENTRY(_start) ...@@ -47,8 +47,8 @@ ENTRY(_start)
.global _start_kernel .global _start_kernel
_start_kernel: _start_kernel:
/* Mask all interrupts */ /* Mask all interrupts */
csrw CSR_SIE, zero csrw CSR_IE, zero
csrw CSR_SIP, zero csrw CSR_IP, zero
/* Load the global pointer */ /* Load the global pointer */
.option push .option push
...@@ -61,7 +61,7 @@ _start_kernel: ...@@ -61,7 +61,7 @@ _start_kernel:
* floating point in kernel space * floating point in kernel space
*/ */
li t0, SR_FS li t0, SR_FS
csrc CSR_SSTATUS, t0 csrc CSR_STATUS, t0
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
li t0, CONFIG_NR_CPUS li t0, CONFIG_NR_CPUS
...@@ -116,7 +116,7 @@ relocate: ...@@ -116,7 +116,7 @@ relocate:
/* Point stvec to virtual address of intruction after satp write */ /* Point stvec to virtual address of intruction after satp write */
la a2, 1f la a2, 1f
add a2, a2, a1 add a2, a2, a1
csrw CSR_STVEC, a2 csrw CSR_TVEC, a2
/* Compute satp for kernel page tables, but don't load it yet */ /* Compute satp for kernel page tables, but don't load it yet */
srl a2, a0, PAGE_SHIFT srl a2, a0, PAGE_SHIFT
...@@ -138,7 +138,7 @@ relocate: ...@@ -138,7 +138,7 @@ relocate:
1: 1:
/* Set trap vector to spin forever to help debug */ /* Set trap vector to spin forever to help debug */
la a0, .Lsecondary_park la a0, .Lsecondary_park
csrw CSR_STVEC, a0 csrw CSR_TVEC, a0
/* Reload the global pointer */ /* Reload the global pointer */
.option push .option push
...@@ -161,7 +161,7 @@ relocate: ...@@ -161,7 +161,7 @@ relocate:
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* Set trap vector to spin forever to help debug */ /* Set trap vector to spin forever to help debug */
la a3, .Lsecondary_park la a3, .Lsecondary_park
csrw CSR_STVEC, a3 csrw CSR_TVEC, a3
slli a3, a0, LGREG slli a3, a0, LGREG
la a1, __cpu_up_stack_pointer la a1, __cpu_up_stack_pointer
......
...@@ -11,13 +11,6 @@ ...@@ -11,13 +11,6 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <asm/smp.h> #include <asm/smp.h>
/*
* Possible interrupt causes:
*/
#define INTERRUPT_CAUSE_SOFTWARE IRQ_S_SOFT
#define INTERRUPT_CAUSE_TIMER IRQ_S_TIMER
#define INTERRUPT_CAUSE_EXTERNAL IRQ_S_EXT
int arch_show_interrupts(struct seq_file *p, int prec) int arch_show_interrupts(struct seq_file *p, int prec)
{ {
show_ipi_stats(p, prec); show_ipi_stats(p, prec);
...@@ -29,12 +22,12 @@ asmlinkage __visible void __irq_entry do_IRQ(struct pt_regs *regs) ...@@ -29,12 +22,12 @@ asmlinkage __visible void __irq_entry do_IRQ(struct pt_regs *regs)
struct pt_regs *old_regs = set_irq_regs(regs); struct pt_regs *old_regs = set_irq_regs(regs);
irq_enter(); irq_enter();
switch (regs->scause & ~SCAUSE_IRQ_FLAG) { switch (regs->cause & ~CAUSE_IRQ_FLAG) {
case INTERRUPT_CAUSE_TIMER: case IRQ_TIMER:
riscv_timer_interrupt(); riscv_timer_interrupt();
break; break;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
case INTERRUPT_CAUSE_SOFTWARE: case IRQ_SOFT:
/* /*
* We only use software interrupts to pass IPIs, so if a non-SMP * We only use software interrupts to pass IPIs, so if a non-SMP
* system gets one, then we don't know what to do. * system gets one, then we don't know what to do.
...@@ -42,11 +35,11 @@ asmlinkage __visible void __irq_entry do_IRQ(struct pt_regs *regs) ...@@ -42,11 +35,11 @@ asmlinkage __visible void __irq_entry do_IRQ(struct pt_regs *regs)
riscv_software_interrupt(); riscv_software_interrupt();
break; break;
#endif #endif
case INTERRUPT_CAUSE_EXTERNAL: case IRQ_EXT:
handle_arch_irq(regs); handle_arch_irq(regs);
break; break;
default: default:
pr_alert("unexpected interrupt cause 0x%lx", regs->scause); pr_alert("unexpected interrupt cause 0x%lx", regs->cause);
BUG(); BUG();
} }
irq_exit(); irq_exit();
......
...@@ -67,7 +67,7 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry, ...@@ -67,7 +67,7 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
return; return;
fp = regs->s0; fp = regs->s0;
perf_callchain_store(entry, regs->sepc); perf_callchain_store(entry, regs->epc);
fp = user_backtrace(entry, fp, regs->ra); fp = user_backtrace(entry, fp, regs->ra);
while (fp && !(fp & 0x3) && entry->nr < entry->max_stack) while (fp && !(fp & 0x3) && entry->nr < entry->max_stack)
......
...@@ -35,8 +35,8 @@ void show_regs(struct pt_regs *regs) ...@@ -35,8 +35,8 @@ void show_regs(struct pt_regs *regs)
{ {
show_regs_print_info(KERN_DEFAULT); show_regs_print_info(KERN_DEFAULT);
pr_cont("sepc: " REG_FMT " ra : " REG_FMT " sp : " REG_FMT "\n", pr_cont("epc: " REG_FMT " ra : " REG_FMT " sp : " REG_FMT "\n",
regs->sepc, regs->ra, regs->sp); regs->epc, regs->ra, regs->sp);
pr_cont(" gp : " REG_FMT " tp : " REG_FMT " t0 : " REG_FMT "\n", pr_cont(" gp : " REG_FMT " tp : " REG_FMT " t0 : " REG_FMT "\n",
regs->gp, regs->tp, regs->t0); regs->gp, regs->tp, regs->t0);
pr_cont(" t1 : " REG_FMT " t2 : " REG_FMT " s0 : " REG_FMT "\n", pr_cont(" t1 : " REG_FMT " t2 : " REG_FMT " s0 : " REG_FMT "\n",
...@@ -58,23 +58,23 @@ void show_regs(struct pt_regs *regs) ...@@ -58,23 +58,23 @@ void show_regs(struct pt_regs *regs)
pr_cont(" t5 : " REG_FMT " t6 : " REG_FMT "\n", pr_cont(" t5 : " REG_FMT " t6 : " REG_FMT "\n",
regs->t5, regs->t6); regs->t5, regs->t6);
pr_cont("sstatus: " REG_FMT " sbadaddr: " REG_FMT " scause: " REG_FMT "\n", pr_cont("status: " REG_FMT " badaddr: " REG_FMT " cause: " REG_FMT "\n",
regs->sstatus, regs->sbadaddr, regs->scause); regs->status, regs->badaddr, regs->cause);
} }
void start_thread(struct pt_regs *regs, unsigned long pc, void start_thread(struct pt_regs *regs, unsigned long pc,
unsigned long sp) unsigned long sp)
{ {
regs->sstatus = SR_SPIE; regs->status = SR_PIE;
if (has_fpu) { if (has_fpu) {
regs->sstatus |= SR_FS_INITIAL; regs->status |= SR_FS_INITIAL;
/* /*
* Restore the initial value to the FP register * Restore the initial value to the FP register
* before starting the user program. * before starting the user program.
*/ */
fstate_restore(current, regs); fstate_restore(current, regs);
} }
regs->sepc = pc; regs->epc = pc;
regs->sp = sp; regs->sp = sp;
set_fs(USER_DS); set_fs(USER_DS);
} }
...@@ -110,7 +110,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, ...@@ -110,7 +110,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
const register unsigned long gp __asm__ ("gp"); const register unsigned long gp __asm__ ("gp");
memset(childregs, 0, sizeof(struct pt_regs)); memset(childregs, 0, sizeof(struct pt_regs));
childregs->gp = gp; childregs->gp = gp;
childregs->sstatus = SR_SPP | SR_SPIE; /* Supervisor, irqs on */ /* Supervisor/Machine, irqs on: */
childregs->status = SR_PP | SR_PIE;
p->thread.ra = (unsigned long)ret_from_kernel_thread; p->thread.ra = (unsigned long)ret_from_kernel_thread;
p->thread.s[0] = usp; /* fn */ p->thread.s[0] = usp; /* fn */
......
...@@ -124,7 +124,7 @@ SYSCALL_DEFINE0(rt_sigreturn) ...@@ -124,7 +124,7 @@ SYSCALL_DEFINE0(rt_sigreturn)
pr_info_ratelimited( pr_info_ratelimited(
"%s[%d]: bad frame in %s: frame=%p pc=%p sp=%p\n", "%s[%d]: bad frame in %s: frame=%p pc=%p sp=%p\n",
task->comm, task_pid_nr(task), __func__, task->comm, task_pid_nr(task), __func__,
frame, (void *)regs->sepc, (void *)regs->sp); frame, (void *)regs->epc, (void *)regs->sp);
} }
force_sig(SIGSEGV); force_sig(SIGSEGV);
return 0; return 0;
...@@ -199,7 +199,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, ...@@ -199,7 +199,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
* We always pass siginfo and mcontext, regardless of SA_SIGINFO, * We always pass siginfo and mcontext, regardless of SA_SIGINFO,
* since some things rely on this (e.g. glibc's debug/segfault.c). * since some things rely on this (e.g. glibc's debug/segfault.c).
*/ */
regs->sepc = (unsigned long)ksig->ka.sa.sa_handler; regs->epc = (unsigned long)ksig->ka.sa.sa_handler;
regs->sp = (unsigned long)frame; regs->sp = (unsigned long)frame;
regs->a0 = ksig->sig; /* a0: signal number */ regs->a0 = ksig->sig; /* a0: signal number */
regs->a1 = (unsigned long)(&frame->info); /* a1: siginfo pointer */ regs->a1 = (unsigned long)(&frame->info); /* a1: siginfo pointer */
...@@ -208,7 +208,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, ...@@ -208,7 +208,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
#if DEBUG_SIG #if DEBUG_SIG
pr_info("SIG deliver (%s:%d): sig=%d pc=%p ra=%p sp=%p\n", pr_info("SIG deliver (%s:%d): sig=%d pc=%p ra=%p sp=%p\n",
current->comm, task_pid_nr(current), ksig->sig, current->comm, task_pid_nr(current), ksig->sig,
(void *)regs->sepc, (void *)regs->ra, frame); (void *)regs->epc, (void *)regs->ra, frame);
#endif #endif
return 0; return 0;
...@@ -220,10 +220,9 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) ...@@ -220,10 +220,9 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
int ret; int ret;
/* Are we from a system call? */ /* Are we from a system call? */
if (regs->scause == EXC_SYSCALL) { if (regs->cause == EXC_SYSCALL) {
/* Avoid additional syscall restarting via ret_from_exception */ /* Avoid additional syscall restarting via ret_from_exception */
regs->scause = -1UL; regs->cause = -1UL;
/* If so, check system call restarting.. */ /* If so, check system call restarting.. */
switch (regs->a0) { switch (regs->a0) {
case -ERESTART_RESTARTBLOCK: case -ERESTART_RESTARTBLOCK:
...@@ -239,7 +238,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) ...@@ -239,7 +238,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
/* fallthrough */ /* fallthrough */
case -ERESTARTNOINTR: case -ERESTARTNOINTR:
regs->a0 = regs->orig_a0; regs->a0 = regs->orig_a0;
regs->sepc -= 0x4; regs->epc -= 0x4;
break; break;
} }
} }
...@@ -261,9 +260,9 @@ static void do_signal(struct pt_regs *regs) ...@@ -261,9 +260,9 @@ static void do_signal(struct pt_regs *regs)
} }
/* Did we come from a system call? */ /* Did we come from a system call? */
if (regs->scause == EXC_SYSCALL) { if (regs->cause == EXC_SYSCALL) {
/* Avoid additional syscall restarting via ret_from_exception */ /* Avoid additional syscall restarting via ret_from_exception */
regs->scause = -1UL; regs->cause = -1UL;
/* Restart the system call - no handlers present */ /* Restart the system call - no handlers present */
switch (regs->a0) { switch (regs->a0) {
...@@ -271,12 +270,12 @@ static void do_signal(struct pt_regs *regs) ...@@ -271,12 +270,12 @@ static void do_signal(struct pt_regs *regs)
case -ERESTARTSYS: case -ERESTARTSYS:
case -ERESTARTNOINTR: case -ERESTARTNOINTR:
regs->a0 = regs->orig_a0; regs->a0 = regs->orig_a0;
regs->sepc -= 0x4; regs->epc -= 0x4;
break; break;
case -ERESTART_RESTARTBLOCK: case -ERESTART_RESTARTBLOCK:
regs->a0 = regs->orig_a0; regs->a0 = regs->orig_a0;
regs->a7 = __NR_restart_syscall; regs->a7 = __NR_restart_syscall;
regs->sepc -= 0x4; regs->epc -= 0x4;
break; break;
} }
} }
......
...@@ -108,7 +108,7 @@ static void send_ipi_single(int cpu, enum ipi_message_type op) ...@@ -108,7 +108,7 @@ static void send_ipi_single(int cpu, enum ipi_message_type op)
static inline void clear_ipi(void) static inline void clear_ipi(void)
{ {
csr_clear(CSR_SIP, SIE_SSIE); csr_clear(CSR_IP, IE_SIE);
} }
void riscv_software_interrupt(void) void riscv_software_interrupt(void)
......
...@@ -41,7 +41,7 @@ void die(struct pt_regs *regs, const char *str) ...@@ -41,7 +41,7 @@ void die(struct pt_regs *regs, const char *str)
print_modules(); print_modules();
show_regs(regs); show_regs(regs);
ret = notify_die(DIE_OOPS, str, regs, 0, regs->scause, SIGSEGV); ret = notify_die(DIE_OOPS, str, regs, 0, regs->cause, SIGSEGV);
bust_spinlocks(0); bust_spinlocks(0);
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
...@@ -86,7 +86,7 @@ static void do_trap_error(struct pt_regs *regs, int signo, int code, ...@@ -86,7 +86,7 @@ static void do_trap_error(struct pt_regs *regs, int signo, int code,
#define DO_ERROR_INFO(name, signo, code, str) \ #define DO_ERROR_INFO(name, signo, code, str) \
asmlinkage __visible void name(struct pt_regs *regs) \ asmlinkage __visible void name(struct pt_regs *regs) \
{ \ { \
do_trap_error(regs, signo, code, regs->sepc, "Oops - " str); \ do_trap_error(regs, signo, code, regs->epc, "Oops - " str); \
} }
DO_ERROR_INFO(do_trap_unknown, DO_ERROR_INFO(do_trap_unknown,
...@@ -124,9 +124,9 @@ static inline unsigned long get_break_insn_length(unsigned long pc) ...@@ -124,9 +124,9 @@ static inline unsigned long get_break_insn_length(unsigned long pc)
asmlinkage __visible void do_trap_break(struct pt_regs *regs) asmlinkage __visible void do_trap_break(struct pt_regs *regs)
{ {
if (user_mode(regs)) if (user_mode(regs))
force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->sepc); force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->epc);
else if (report_bug(regs->sepc, regs) == BUG_TRAP_TYPE_WARN) else if (report_bug(regs->epc, regs) == BUG_TRAP_TYPE_WARN)
regs->sepc += get_break_insn_length(regs->sepc); regs->epc += get_break_insn_length(regs->epc);
else else
die(regs, "Kernel BUG"); die(regs, "Kernel BUG");
} }
...@@ -153,9 +153,9 @@ void __init trap_init(void) ...@@ -153,9 +153,9 @@ void __init trap_init(void)
* Set sup0 scratch register to 0, indicating to exception vector * Set sup0 scratch register to 0, indicating to exception vector
* that we are presently executing in the kernel * that we are presently executing in the kernel
*/ */
csr_write(CSR_SSCRATCH, 0); csr_write(CSR_SCRATCH, 0);
/* Set the exception vector address */ /* Set the exception vector address */
csr_write(CSR_STVEC, &handle_exception); csr_write(CSR_TVEC, &handle_exception);
/* Enable all interrupts */ /* Enable all interrupts */
csr_write(CSR_SIE, -1); csr_write(CSR_IE, -1);
} }
...@@ -18,7 +18,7 @@ ENTRY(__asm_copy_from_user) ...@@ -18,7 +18,7 @@ ENTRY(__asm_copy_from_user)
/* Enable access to user memory */ /* Enable access to user memory */
li t6, SR_SUM li t6, SR_SUM
csrs CSR_SSTATUS, t6 csrs CSR_STATUS, t6
add a3, a1, a2 add a3, a1, a2
/* Use word-oriented copy only if low-order bits match */ /* Use word-oriented copy only if low-order bits match */
...@@ -47,7 +47,7 @@ ENTRY(__asm_copy_from_user) ...@@ -47,7 +47,7 @@ ENTRY(__asm_copy_from_user)
3: 3:
/* Disable access to user memory */ /* Disable access to user memory */
csrc CSR_SSTATUS, t6 csrc CSR_STATUS, t6
li a0, 0 li a0, 0
ret ret
4: /* Edge case: unalignment */ 4: /* Edge case: unalignment */
...@@ -72,7 +72,7 @@ ENTRY(__clear_user) ...@@ -72,7 +72,7 @@ ENTRY(__clear_user)
/* Enable access to user memory */ /* Enable access to user memory */
li t6, SR_SUM li t6, SR_SUM
csrs CSR_SSTATUS, t6 csrs CSR_STATUS, t6
add a3, a0, a1 add a3, a0, a1
addi t0, a0, SZREG-1 addi t0, a0, SZREG-1
...@@ -94,7 +94,7 @@ ENTRY(__clear_user) ...@@ -94,7 +94,7 @@ ENTRY(__clear_user)
3: 3:
/* Disable access to user memory */ /* Disable access to user memory */
csrc CSR_SSTATUS, t6 csrc CSR_STATUS, t6
li a0, 0 li a0, 0
ret ret
4: /* Edge case: unalignment */ 4: /* Edge case: unalignment */
...@@ -114,11 +114,11 @@ ENDPROC(__clear_user) ...@@ -114,11 +114,11 @@ ENDPROC(__clear_user)
/* Fixup code for __copy_user(10) and __clear_user(11) */ /* Fixup code for __copy_user(10) and __clear_user(11) */
10: 10:
/* Disable access to user memory */ /* Disable access to user memory */
csrs CSR_SSTATUS, t6 csrs CSR_STATUS, t6
mv a0, a2 mv a0, a2
ret ret
11: 11:
csrs CSR_SSTATUS, t6 csrs CSR_STATUS, t6
mv a0, a1 mv a0, a1
ret ret
.previous .previous
...@@ -15,9 +15,9 @@ int fixup_exception(struct pt_regs *regs) ...@@ -15,9 +15,9 @@ int fixup_exception(struct pt_regs *regs)
{ {
const struct exception_table_entry *fixup; const struct exception_table_entry *fixup;
fixup = search_exception_tables(regs->sepc); fixup = search_exception_tables(regs->epc);
if (fixup) { if (fixup) {
regs->sepc = fixup->fixup; regs->epc = fixup->fixup;
return 1; return 1;
} }
return 0; return 0;
......
...@@ -34,8 +34,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs) ...@@ -34,8 +34,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
int code = SEGV_MAPERR; int code = SEGV_MAPERR;
vm_fault_t fault; vm_fault_t fault;
cause = regs->scause; cause = regs->cause;
addr = regs->sbadaddr; addr = regs->badaddr;
tsk = current; tsk = current;
mm = tsk->mm; mm = tsk->mm;
...@@ -53,7 +53,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs) ...@@ -53,7 +53,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
goto vmalloc_fault; goto vmalloc_fault;
/* Enable interrupts if they were enabled in the parent context. */ /* Enable interrupts if they were enabled in the parent context. */
if (likely(regs->sstatus & SR_SPIE)) if (likely(regs->status & SR_PIE))
local_irq_enable(); local_irq_enable();
/* /*
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
static int riscv_clock_next_event(unsigned long delta, static int riscv_clock_next_event(unsigned long delta,
struct clock_event_device *ce) struct clock_event_device *ce)
{ {
csr_set(sie, SIE_STIE); csr_set(CSR_IE, IE_TIE);
sbi_set_timer(get_cycles64() + delta); sbi_set_timer(get_cycles64() + delta);
return 0; return 0;
} }
...@@ -61,13 +61,13 @@ static int riscv_timer_starting_cpu(unsigned int cpu) ...@@ -61,13 +61,13 @@ static int riscv_timer_starting_cpu(unsigned int cpu)
ce->cpumask = cpumask_of(cpu); ce->cpumask = cpumask_of(cpu);
clockevents_config_and_register(ce, riscv_timebase, 100, 0x7fffffff); clockevents_config_and_register(ce, riscv_timebase, 100, 0x7fffffff);
csr_set(sie, SIE_STIE); csr_set(CSR_IE, IE_TIE);
return 0; return 0;
} }
static int riscv_timer_dying_cpu(unsigned int cpu) static int riscv_timer_dying_cpu(unsigned int cpu)
{ {
csr_clear(sie, SIE_STIE); csr_clear(CSR_IE, IE_TIE);
return 0; return 0;
} }
...@@ -76,7 +76,7 @@ void riscv_timer_interrupt(void) ...@@ -76,7 +76,7 @@ void riscv_timer_interrupt(void)
{ {
struct clock_event_device *evdev = this_cpu_ptr(&riscv_clock_event); struct clock_event_device *evdev = this_cpu_ptr(&riscv_clock_event);
csr_clear(sie, SIE_STIE); csr_clear(CSR_IE, IE_TIE);
evdev->event_handler(evdev); evdev->event_handler(evdev);
} }
......
...@@ -181,7 +181,7 @@ static void plic_handle_irq(struct pt_regs *regs) ...@@ -181,7 +181,7 @@ static void plic_handle_irq(struct pt_regs *regs)
WARN_ON_ONCE(!handler->present); WARN_ON_ONCE(!handler->present);
csr_clear(sie, SIE_SEIE); csr_clear(CSR_IE, IE_EIE);
while ((hwirq = readl(claim))) { while ((hwirq = readl(claim))) {
int irq = irq_find_mapping(plic_irqdomain, hwirq); int irq = irq_find_mapping(plic_irqdomain, hwirq);
...@@ -191,7 +191,7 @@ static void plic_handle_irq(struct pt_regs *regs) ...@@ -191,7 +191,7 @@ static void plic_handle_irq(struct pt_regs *regs)
else else
generic_handle_irq(irq); generic_handle_irq(irq);
} }
csr_set(sie, SIE_SEIE); csr_set(CSR_IE, IE_EIE);
} }
/* /*
...@@ -252,8 +252,11 @@ static int __init plic_init(struct device_node *node, ...@@ -252,8 +252,11 @@ static int __init plic_init(struct device_node *node,
continue; continue;
} }
/* skip contexts other than supervisor external interrupt */ /*
if (parent.args[0] != IRQ_S_EXT) * Skip contexts other than external interrupts for our
* privilege level.
*/
if (parent.args[0] != IRQ_EXT)
continue; continue;
hartid = plic_find_hart_id(parent.np); hartid = plic_find_hart_id(parent.np);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment