Commit fa771d02 authored by James Hogan's avatar James Hogan

metag: move irq enable out of irqflags.h on SMP

The SMP version of arch_local_irq_enable() uses preempt_disable(), but
<asm/irqflags.h> doesn't include <linux/preempt.h> causing the following
errors on SMP when pstore/ftrace is enabled (caught by buildbot smp
allyesconfig):

In file included from include/linux/irqflags.h:15,
                 from fs/pstore/ftrace.c:16:
arch/metag/include/asm/irqflags.h: In function 'arch_local_irq_enable':
arch/metag/include/asm/irqflags.h:84: error: implicit declaration of function 'preempt_disable'
arch/metag/include/asm/irqflags.h:86: error: implicit declaration of function 'preempt_enable_no_resched'

However <linux/preempt.h> cannot be easily included from
<asm/irqflags.h> as it can cause circular include dependencies in the
!SMP case, and potentially in the SMP case in the future. Therefore move
the SMP implementation of arch_local_irq_enable() into traps.c and use
an inline version of get_trigger_mask() which is also defined in traps.c
for SMP.

This adds an extra layer of function call / stack push when
preempt_disable needs to call other functions, however in the
non-preemptive SMP case it should be about as fast, as it was already
calling the get_trigger_mask() function which is now used inline.
Signed-off-by: default avatarJames Hogan <james.hogan@imgtec.com>
parent 97c3ec63
...@@ -78,16 +78,15 @@ static inline void arch_local_irq_disable(void) ...@@ -78,16 +78,15 @@ static inline void arch_local_irq_disable(void)
asm volatile("MOV TXMASKI,%0\n" : : "r" (flags) : "memory"); asm volatile("MOV TXMASKI,%0\n" : : "r" (flags) : "memory");
} }
static inline void arch_local_irq_enable(void)
{
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
preempt_disable(); /* Avoid circular include dependencies through <linux/preempt.h> */
arch_local_irq_restore(get_trigger_mask()); void arch_local_irq_enable(void);
preempt_enable_no_resched();
#else #else
static inline void arch_local_irq_enable(void)
{
arch_local_irq_restore(get_trigger_mask()); arch_local_irq_restore(get_trigger_mask());
#endif
} }
#endif
#endif /* (__ASSEMBLY__) */ #endif /* (__ASSEMBLY__) */
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/preempt.h>
#include <linux/ptrace.h> #include <linux/ptrace.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/kallsyms.h> #include <linux/kallsyms.h>
...@@ -776,17 +777,30 @@ int traps_restore_context(void) ...@@ -776,17 +777,30 @@ int traps_restore_context(void)
#endif #endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
unsigned int get_trigger_mask(void) static inline unsigned int _get_trigger_mask(void)
{ {
unsigned long cpu = smp_processor_id(); unsigned long cpu = smp_processor_id();
return per_cpu(trigger_mask, cpu); return per_cpu(trigger_mask, cpu);
} }
unsigned int get_trigger_mask(void)
{
return _get_trigger_mask();
}
static void set_trigger_mask(unsigned int mask) static void set_trigger_mask(unsigned int mask)
{ {
unsigned long cpu = smp_processor_id(); unsigned long cpu = smp_processor_id();
per_cpu(trigger_mask, cpu) = mask; per_cpu(trigger_mask, cpu) = mask;
} }
void arch_local_irq_enable(void)
{
preempt_disable();
arch_local_irq_restore(_get_trigger_mask());
preempt_enable_no_resched();
}
EXPORT_SYMBOL(arch_local_irq_enable);
#else #else
static void set_trigger_mask(unsigned int mask) static void set_trigger_mask(unsigned int mask)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment