Commit de4415d0 authored by Max Filippov's avatar Max Filippov

xtensa: move trace_hardirqs_off call back to entry.S

Context tracking call must be done after hardirq tracking call,
otherwise lockdep_assert_irqs_disabled called from rcu_eqs_exit gives
a warning. To avoid context tracking logic duplication for IRQ/exception
entry paths move trace_hardirqs_off call back to common entry code.
Signed-off-by: default avatarMax Filippov <jcmvbkbc@gmail.com>
parent 4b816909
...@@ -424,7 +424,6 @@ KABI_W or a3, a3, a0 ...@@ -424,7 +424,6 @@ KABI_W or a3, a3, a0
moveqz a3, a0, a2 # a3 = LOCKLEVEL iff interrupt moveqz a3, a0, a2 # a3 = LOCKLEVEL iff interrupt
KABI_W movi a2, PS_WOE_MASK KABI_W movi a2, PS_WOE_MASK
KABI_W or a3, a3, a2 KABI_W or a3, a3, a2
rsr a2, exccause
#endif #endif
/* restore return address (or 0 if return to userspace) */ /* restore return address (or 0 if return to userspace) */
...@@ -451,19 +450,27 @@ KABI_W or a3, a3, a2 ...@@ -451,19 +450,27 @@ KABI_W or a3, a3, a2
save_xtregs_opt a1 a3 a4 a5 a6 a7 PT_XTREGS_OPT save_xtregs_opt a1 a3 a4 a5 a6 a7 PT_XTREGS_OPT
#ifdef CONFIG_TRACE_IRQFLAGS
rsr abi_tmp0, ps
extui abi_tmp0, abi_tmp0, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
beqz abi_tmp0, 1f
abi_call trace_hardirqs_off
1:
#endif
/* Go to second-level dispatcher. Set up parameters to pass to the /* Go to second-level dispatcher. Set up parameters to pass to the
* exception handler and call the exception handler. * exception handler and call the exception handler.
*/ */
rsr a4, excsave1 l32i abi_arg1, a1, PT_EXCCAUSE # pass EXCCAUSE
addx4 a4, a2, a4 rsr abi_tmp0, excsave1
l32i a4, a4, EXC_TABLE_DEFAULT # load handler addx4 abi_tmp0, abi_arg1, abi_tmp0
mov abi_arg1, a2 # pass EXCCAUSE l32i abi_tmp0, abi_tmp0, EXC_TABLE_DEFAULT # load handler
mov abi_arg0, a1 # pass stack frame mov abi_arg0, a1 # pass stack frame
/* Call the second-level handler */ /* Call the second-level handler */
abi_callx a4 abi_callx abi_tmp0
/* Jump here for exception exit */ /* Jump here for exception exit */
.global common_exception_return .global common_exception_return
......
...@@ -242,12 +242,8 @@ DEFINE_PER_CPU(unsigned long, nmi_count); ...@@ -242,12 +242,8 @@ DEFINE_PER_CPU(unsigned long, nmi_count);
void do_nmi(struct pt_regs *regs) void do_nmi(struct pt_regs *regs)
{ {
struct pt_regs *old_regs; struct pt_regs *old_regs = set_irq_regs(regs);
if ((regs->ps & PS_INTLEVEL_MASK) < LOCKLEVEL)
trace_hardirqs_off();
old_regs = set_irq_regs(regs);
nmi_enter(); nmi_enter();
++*this_cpu_ptr(&nmi_count); ++*this_cpu_ptr(&nmi_count);
check_valid_nmi(); check_valid_nmi();
...@@ -269,12 +265,9 @@ void do_interrupt(struct pt_regs *regs) ...@@ -269,12 +265,9 @@ void do_interrupt(struct pt_regs *regs)
XCHAL_INTLEVEL6_MASK, XCHAL_INTLEVEL6_MASK,
XCHAL_INTLEVEL7_MASK, XCHAL_INTLEVEL7_MASK,
}; };
struct pt_regs *old_regs; struct pt_regs *old_regs = set_irq_regs(regs);
unsigned unhandled = ~0u; unsigned unhandled = ~0u;
trace_hardirqs_off();
old_regs = set_irq_regs(regs);
irq_enter(); irq_enter();
for (;;) { for (;;) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment