Commit dc623182 authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman

powerpc/interrupt: update common interrupt code for

This makes adjustments to 64-bit asm and common C interrupt return
code to be usable by the 64e subarchitecture.
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210316104206.407354-4-npiggin@gmail.com
parent 4228b2c3
...@@ -632,7 +632,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) ...@@ -632,7 +632,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
addi r1,r1,SWITCH_FRAME_SIZE addi r1,r1,SWITCH_FRAME_SIZE
blr blr
#ifdef CONFIG_PPC_BOOK3S
/* /*
* If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
* touched, no exit work created, then this can be used. * touched, no exit work created, then this can be used.
...@@ -644,6 +643,7 @@ _ASM_NOKPROBE_SYMBOL(fast_interrupt_return) ...@@ -644,6 +643,7 @@ _ASM_NOKPROBE_SYMBOL(fast_interrupt_return)
kuap_check_amr r3, r4 kuap_check_amr r3, r4
ld r5,_MSR(r1) ld r5,_MSR(r1)
andi. r0,r5,MSR_PR andi. r0,r5,MSR_PR
#ifdef CONFIG_PPC_BOOK3S
bne .Lfast_user_interrupt_return_amr bne .Lfast_user_interrupt_return_amr
kuap_kernel_restore r3, r4 kuap_kernel_restore r3, r4
andi. r0,r5,MSR_RI andi. r0,r5,MSR_RI
...@@ -652,6 +652,10 @@ _ASM_NOKPROBE_SYMBOL(fast_interrupt_return) ...@@ -652,6 +652,10 @@ _ASM_NOKPROBE_SYMBOL(fast_interrupt_return)
addi r3,r1,STACK_FRAME_OVERHEAD addi r3,r1,STACK_FRAME_OVERHEAD
bl unrecoverable_exception bl unrecoverable_exception
b . /* should not get here */ b . /* should not get here */
#else
bne .Lfast_user_interrupt_return
b .Lfast_kernel_interrupt_return
#endif
.balign IFETCH_ALIGN_BYTES .balign IFETCH_ALIGN_BYTES
.globl interrupt_return .globl interrupt_return
...@@ -665,8 +669,10 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return) ...@@ -665,8 +669,10 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return)
cmpdi r3,0 cmpdi r3,0
bne- .Lrestore_nvgprs bne- .Lrestore_nvgprs
#ifdef CONFIG_PPC_BOOK3S
.Lfast_user_interrupt_return_amr: .Lfast_user_interrupt_return_amr:
kuap_user_restore r3, r4 kuap_user_restore r3, r4
#endif
.Lfast_user_interrupt_return: .Lfast_user_interrupt_return:
ld r11,_NIP(r1) ld r11,_NIP(r1)
ld r12,_MSR(r1) ld r12,_MSR(r1)
...@@ -775,7 +781,6 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) ...@@ -775,7 +781,6 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
RFI_TO_KERNEL RFI_TO_KERNEL
b . /* prevent speculative execution */ b . /* prevent speculative execution */
#endif /* CONFIG_PPC_BOOK3S */
#ifdef CONFIG_PPC_RTAS #ifdef CONFIG_PPC_RTAS
/* /*
......
...@@ -235,6 +235,10 @@ static notrace void booke_load_dbcr0(void) ...@@ -235,6 +235,10 @@ static notrace void booke_load_dbcr0(void)
#endif #endif
} }
/* temporary hack for context tracking, removed in later patch */
#include <linux/sched/debug.h>
asmlinkage __visible void __sched schedule_user(void);
/* /*
* This should be called after a syscall returns, with r3 the return value * This should be called after a syscall returns, with r3 the return value
* from the syscall. If this function returns non-zero, the system call * from the syscall. If this function returns non-zero, the system call
...@@ -292,7 +296,11 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3, ...@@ -292,7 +296,11 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) { while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
local_irq_enable(); local_irq_enable();
if (ti_flags & _TIF_NEED_RESCHED) { if (ti_flags & _TIF_NEED_RESCHED) {
#ifdef CONFIG_PPC_BOOK3E_64
schedule_user();
#else
schedule(); schedule();
#endif
} else { } else {
/* /*
* SIGPENDING must restore signal handler function * SIGPENDING must restore signal handler function
...@@ -349,18 +357,13 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3, ...@@ -349,18 +357,13 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
account_cpu_user_exit(); account_cpu_user_exit();
#ifndef CONFIG_PPC_BOOK3E_64 /* BOOK3E not using this */ /* Restore user access locks last */
/*
* We do this at the end so that we do context switch with KERNEL AMR
*/
kuap_user_restore(regs); kuap_user_restore(regs);
#endif
kuep_unlock(); kuep_unlock();
return ret; return ret;
} }
#ifndef CONFIG_PPC_BOOK3E_64 /* BOOK3E not yet using this */
notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned long msr) notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned long msr)
{ {
unsigned long ti_flags; unsigned long ti_flags;
...@@ -372,7 +375,9 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned ...@@ -372,7 +375,9 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned
BUG_ON(!(regs->msr & MSR_PR)); BUG_ON(!(regs->msr & MSR_PR));
BUG_ON(!FULL_REGS(regs)); BUG_ON(!FULL_REGS(regs));
BUG_ON(arch_irq_disabled_regs(regs)); BUG_ON(arch_irq_disabled_regs(regs));
#ifdef CONFIG_PPC_BOOK3S_64
CT_WARN_ON(ct_state() == CONTEXT_USER); CT_WARN_ON(ct_state() == CONTEXT_USER);
#endif
/* /*
* We don't need to restore AMR on the way back to userspace for KUAP. * We don't need to restore AMR on the way back to userspace for KUAP.
...@@ -387,7 +392,11 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned ...@@ -387,7 +392,11 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned
while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) { while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
local_irq_enable(); /* returning to user: may enable */ local_irq_enable(); /* returning to user: may enable */
if (ti_flags & _TIF_NEED_RESCHED) { if (ti_flags & _TIF_NEED_RESCHED) {
#ifdef CONFIG_PPC_BOOK3E_64
schedule_user();
#else
schedule(); schedule();
#endif
} else { } else {
if (ti_flags & _TIF_SIGPENDING) if (ti_flags & _TIF_SIGPENDING)
ret |= _TIF_RESTOREALL; ret |= _TIF_RESTOREALL;
...@@ -432,10 +441,9 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned ...@@ -432,10 +441,9 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned
account_cpu_user_exit(); account_cpu_user_exit();
/* /* Restore user access locks last */
* We do this at the end so that we do context switch with KERNEL AMR
*/
kuap_user_restore(regs); kuap_user_restore(regs);
return ret; return ret;
} }
...@@ -456,7 +464,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign ...@@ -456,7 +464,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign
* CT_WARN_ON comes here via program_check_exception, * CT_WARN_ON comes here via program_check_exception,
* so avoid recursion. * so avoid recursion.
*/ */
if (TRAP(regs) != 0x700) if (IS_ENABLED(CONFIG_BOOKS) && TRAP(regs) != 0x700)
CT_WARN_ON(ct_state() == CONTEXT_USER); CT_WARN_ON(ct_state() == CONTEXT_USER);
kuap = kuap_get_and_assert_locked(); kuap = kuap_get_and_assert_locked();
...@@ -497,12 +505,11 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign ...@@ -497,12 +505,11 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign
#endif #endif
/* /*
* Don't want to mfspr(SPRN_AMR) here, because this comes after mtmsr, * 64s does not want to mfspr(SPRN_AMR) here, because this comes after
* which would cause Read-After-Write stalls. Hence, we take the AMR * mtmsr, which would cause Read-After-Write stalls. Hence, take the
* value from the check above. * AMR value from the check above.
*/ */
kuap_kernel_restore(regs, kuap); kuap_kernel_restore(regs, kuap);
return ret; return ret;
} }
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment