Commit 02b849f7 authored by Ralf Baechle's avatar Ralf Baechle

MIPS: Get rid of the use of .macro in C code.

It fails with LTO and probably has always been a fragile.
Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent 0bfbf6a2
...@@ -10,34 +10,13 @@ ...@@ -10,34 +10,13 @@
#ifndef _ASM_HAZARDS_H #ifndef _ASM_HAZARDS_H
#define _ASM_HAZARDS_H #define _ASM_HAZARDS_H
#ifdef __ASSEMBLY__ #include <linux/stringify.h>
#define ASMMACRO(name, code...) .macro name; code; .endm
#else
#include <asm/cpu-features.h>
#define ASMMACRO(name, code...) \
__asm__(".macro " #name "; " #code "; .endm"); \
\
static inline void name(void) \
{ \
__asm__ __volatile__ (#name); \
}
/*
* MIPS R2 instruction hazard barrier. Needs to be called as a subroutine.
*/
extern void mips_ihb(void);
#endif
ASMMACRO(_ssnop, #define ___ssnop \
sll $0, $0, 1 sll $0, $0, 1
)
ASMMACRO(_ehb, #define ___ehb \
sll $0, $0, 3 sll $0, $0, 3
)
/* /*
* TLB hazards * TLB hazards
...@@ -48,24 +27,24 @@ ASMMACRO(_ehb, ...@@ -48,24 +27,24 @@ ASMMACRO(_ehb,
* MIPSR2 defines ehb for hazard avoidance * MIPSR2 defines ehb for hazard avoidance
*/ */
ASMMACRO(mtc0_tlbw_hazard, #define __mtc0_tlbw_hazard \
_ehb ___ehb
)
ASMMACRO(tlbw_use_hazard, #define __tlbw_use_hazard \
_ehb ___ehb
)
ASMMACRO(tlb_probe_hazard, #define __tlb_probe_hazard \
_ehb ___ehb
)
ASMMACRO(irq_enable_hazard, #define __irq_enable_hazard \
_ehb ___ehb
)
ASMMACRO(irq_disable_hazard, #define __irq_disable_hazard \
_ehb ___ehb
)
ASMMACRO(back_to_back_c0_hazard, #define __back_to_back_c0_hazard \
_ehb ___ehb
)
/* /*
* gcc has a tradition of misscompiling the previous construct using the * gcc has a tradition of misscompiling the previous construct using the
* address of a label as argument to inline assembler. Gas otoh has the * address of a label as argument to inline assembler. Gas otoh has the
...@@ -94,24 +73,42 @@ do { \ ...@@ -94,24 +73,42 @@ do { \
* These are slightly complicated by the fact that we guarantee R1 kernels to * These are slightly complicated by the fact that we guarantee R1 kernels to
* run fine on R2 processors. * run fine on R2 processors.
*/ */
ASMMACRO(mtc0_tlbw_hazard,
_ssnop; _ssnop; _ehb #define __mtc0_tlbw_hazard \
) ___ssnop; \
ASMMACRO(tlbw_use_hazard, ___ssnop; \
_ssnop; _ssnop; _ssnop; _ehb ___ehb
)
ASMMACRO(tlb_probe_hazard, #define __tlbw_use_hazard \
_ssnop; _ssnop; _ssnop; _ehb ___ssnop; \
) ___ssnop; \
ASMMACRO(irq_enable_hazard, ___ssnop; \
_ssnop; _ssnop; _ssnop; _ehb ___ehb
)
ASMMACRO(irq_disable_hazard, #define __tlb_probe_hazard \
_ssnop; _ssnop; _ssnop; _ehb ___ssnop; \
) ___ssnop; \
ASMMACRO(back_to_back_c0_hazard, ___ssnop; \
_ssnop; _ssnop; _ssnop; _ehb ___ehb
)
#define __irq_enable_hazard \
___ssnop; \
___ssnop; \
___ssnop; \
___ehb
#define __irq_disable_hazard \
___ssnop; \
___ssnop; \
___ssnop; \
___ehb
#define __back_to_back_c0_hazard \
___ssnop; \
___ssnop; \
___ssnop; \
___ehb
/* /*
* gcc has a tradition of misscompiling the previous construct using the * gcc has a tradition of misscompiling the previous construct using the
* address of a label as argument to inline assembler. Gas otoh has the * address of a label as argument to inline assembler. Gas otoh has the
...@@ -147,18 +144,18 @@ do { \ ...@@ -147,18 +144,18 @@ do { \
* R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer. * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
*/ */
ASMMACRO(mtc0_tlbw_hazard, #define __mtc0_tlbw_hazard
)
ASMMACRO(tlbw_use_hazard, #define __tlbw_use_hazard
)
ASMMACRO(tlb_probe_hazard, #define __tlb_probe_hazard
)
ASMMACRO(irq_enable_hazard, #define __irq_enable_hazard
)
ASMMACRO(irq_disable_hazard, #define __irq_disable_hazard
)
ASMMACRO(back_to_back_c0_hazard, #define __back_to_back_c0_hazard
)
#define instruction_hazard() do { } while (0) #define instruction_hazard() do { } while (0)
#elif defined(CONFIG_CPU_SB1) #elif defined(CONFIG_CPU_SB1)
...@@ -166,19 +163,21 @@ ASMMACRO(back_to_back_c0_hazard, ...@@ -166,19 +163,21 @@ ASMMACRO(back_to_back_c0_hazard,
/* /*
* Mostly like R4000 for historic reasons * Mostly like R4000 for historic reasons
*/ */
ASMMACRO(mtc0_tlbw_hazard, #define __mtc0_tlbw_hazard
)
ASMMACRO(tlbw_use_hazard, #define __tlbw_use_hazard
)
ASMMACRO(tlb_probe_hazard, #define __tlb_probe_hazard
)
ASMMACRO(irq_enable_hazard, #define __irq_enable_hazard
)
ASMMACRO(irq_disable_hazard, #define __irq_disable_hazard \
_ssnop; _ssnop; _ssnop ___ssnop; \
) ___ssnop; \
ASMMACRO(back_to_back_c0_hazard, ___ssnop
)
#define __back_to_back_c0_hazard
#define instruction_hazard() do { } while (0) #define instruction_hazard() do { } while (0)
#else #else
...@@ -192,24 +191,35 @@ ASMMACRO(back_to_back_c0_hazard, ...@@ -192,24 +191,35 @@ ASMMACRO(back_to_back_c0_hazard,
* hazard so this is nice trick to have an optimal code for a range of * hazard so this is nice trick to have an optimal code for a range of
* processors. * processors.
*/ */
ASMMACRO(mtc0_tlbw_hazard, #define __mtc0_tlbw_hazard \
nop; nop nop; \
) nop
ASMMACRO(tlbw_use_hazard,
nop; nop; nop #define __tlbw_use_hazard \
) nop; \
ASMMACRO(tlb_probe_hazard, nop; \
nop; nop; nop nop
)
ASMMACRO(irq_enable_hazard, #define __tlb_probe_hazard \
_ssnop; _ssnop; _ssnop; nop; \
) nop; \
ASMMACRO(irq_disable_hazard, nop
nop; nop; nop
) #define __irq_enable_hazard \
ASMMACRO(back_to_back_c0_hazard, ___ssnop; \
_ssnop; _ssnop; _ssnop; ___ssnop; \
) ___ssnop
#define __irq_disable_hazard \
nop; \
nop; \
nop
#define __back_to_back_c0_hazard \
___ssnop; \
___ssnop; \
___ssnop
#define instruction_hazard() do { } while (0) #define instruction_hazard() do { } while (0)
#endif #endif
...@@ -218,32 +228,137 @@ ASMMACRO(back_to_back_c0_hazard, ...@@ -218,32 +228,137 @@ ASMMACRO(back_to_back_c0_hazard,
/* FPU hazards */ /* FPU hazards */
#if defined(CONFIG_CPU_SB1) #if defined(CONFIG_CPU_SB1)
ASMMACRO(enable_fpu_hazard,
.set push; #define __enable_fpu_hazard \
.set mips64; .set push; \
.set noreorder; .set mips64; \
_ssnop; .set noreorder; \
bnezl $0, .+4; ___ssnop; \
_ssnop; bnezl $0, .+4; \
.set pop ___ssnop; \
) .set pop
ASMMACRO(disable_fpu_hazard,
) #define __disable_fpu_hazard
#elif defined(CONFIG_CPU_MIPSR2) #elif defined(CONFIG_CPU_MIPSR2)
ASMMACRO(enable_fpu_hazard,
_ehb #define __enable_fpu_hazard \
) ___ehb
ASMMACRO(disable_fpu_hazard,
_ehb #define __disable_fpu_hazard \
) ___ehb
#else #else
ASMMACRO(enable_fpu_hazard,
nop; nop; nop; nop #define __enable_fpu_hazard \
) nop; \
ASMMACRO(disable_fpu_hazard, nop; \
_ehb nop; \
) nop
#define __disable_fpu_hazard \
___ehb
#endif #endif
#ifdef __ASSEMBLY__
#define _ssnop ___ssnop
#define _ehb ___ehb
#define mtc0_tlbw_hazard __mtc0_tlbw_hazard
#define tlbw_use_hazard __tlbw_use_hazard
#define tlb_probe_hazard __tlb_probe_hazard
#define irq_enable_hazard __irq_enable_hazard
#define irq_disable_hazard __irq_disable_hazard
#define back_to_back_c0_hazard __back_to_back_c0_hazard
#define enable_fpu_hazard __enable_fpu_hazard
#define disable_fpu_hazard __disable_fpu_hazard
#else
#define _ssnop() \
do { \
__asm__ __volatile__( \
__stringify(___ssnop) \
); \
} while (0)
#define _ehb() \
do { \
__asm__ __volatile__( \
__stringify(___ehb) \
); \
} while (0)
#define mtc0_tlbw_hazard() \
do { \
__asm__ __volatile__( \
__stringify(__mtc0_tlbw_hazard) \
); \
} while (0)
#define tlbw_use_hazard() \
do { \
__asm__ __volatile__( \
__stringify(__tlbw_use_hazard) \
); \
} while (0)
#define tlb_probe_hazard() \
do { \
__asm__ __volatile__( \
__stringify(__tlb_probe_hazard) \
); \
} while (0)
#define irq_enable_hazard() \
do { \
__asm__ __volatile__( \
__stringify(__irq_enable_hazard) \
); \
} while (0)
#define irq_disable_hazard() \
do { \
__asm__ __volatile__( \
__stringify(__irq_disable_hazard) \
); \
} while (0)
#define back_to_back_c0_hazard() \
do { \
__asm__ __volatile__( \
__stringify(__back_to_back_c0_hazard) \
); \
} while (0)
#define enable_fpu_hazard() \
do { \
__asm__ __volatile__( \
__stringify(__enable_fpu_hazard) \
); \
} while (0)
#define disable_fpu_hazard() \
do { \
__asm__ __volatile__( \
__stringify(__disable_fpu_hazard) \
); \
} while (0)
/*
* MIPS R2 instruction hazard barrier. Needs to be called as a subroutine.
*/
extern void mips_ihb(void);
#endif /* __ASSEMBLY__ */
#endif /* _ASM_HAZARDS_H */ #endif /* _ASM_HAZARDS_H */
...@@ -14,53 +14,48 @@ ...@@ -14,53 +14,48 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/stringify.h>
#include <asm/hazards.h> #include <asm/hazards.h>
#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) #if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC)
__asm__( static inline void arch_local_irq_disable(void)
" .macro arch_local_irq_disable\n" {
__asm__ __volatile__(
" .set push \n" " .set push \n"
" .set noat \n" " .set noat \n"
" di \n" " di \n"
" irq_disable_hazard \n" " " __stringify(__irq_disable_hazard) " \n"
" .set pop \n" " .set pop \n"
" .endm \n"); : /* no outputs */
: /* no inputs */
static inline void arch_local_irq_disable(void) : "memory");
{
__asm__ __volatile__(
"arch_local_irq_disable"
: /* no outputs */
: /* no inputs */
: "memory");
} }
static inline unsigned long arch_local_irq_save(void)
{
unsigned long flags;
__asm__( asm __volatile__(
" .macro arch_local_irq_save result \n"
" .set push \n" " .set push \n"
" .set reorder \n" " .set reorder \n"
" .set noat \n" " .set noat \n"
" di \\result \n" " di %[flags] \n"
" andi \\result, 1 \n" " andi %[flags], 1 \n"
" irq_disable_hazard \n" " " __stringify(__irq_disable_hazard) " \n"
" .set pop \n" " .set pop \n"
" .endm \n"); : [flags] "=r" (flags)
: /* no inputs */
: "memory");
static inline unsigned long arch_local_irq_save(void)
{
unsigned long flags;
asm volatile("arch_local_irq_save\t%0"
: "=r" (flags)
: /* no inputs */
: "memory");
return flags; return flags;
} }
static inline void arch_local_irq_restore(unsigned long flags)
{
unsigned long __tmp1;
__asm__( __asm__ __volatile__(
" .macro arch_local_irq_restore flags \n"
" .set push \n" " .set push \n"
" .set noreorder \n" " .set noreorder \n"
" .set noat \n" " .set noat \n"
...@@ -69,7 +64,7 @@ __asm__( ...@@ -69,7 +64,7 @@ __asm__(
* Slow, but doesn't suffer from a relatively unlikely race * Slow, but doesn't suffer from a relatively unlikely race
* condition we're having since days 1. * condition we're having since days 1.
*/ */
" beqz \\flags, 1f \n" " beqz %[flags], 1f \n"
" di \n" " di \n"
" ei \n" " ei \n"
"1: \n" "1: \n"
...@@ -78,33 +73,44 @@ __asm__( ...@@ -78,33 +73,44 @@ __asm__(
* Fast, dangerous. Life is fun, life is good. * Fast, dangerous. Life is fun, life is good.
*/ */
" mfc0 $1, $12 \n" " mfc0 $1, $12 \n"
" ins $1, \\flags, 0, 1 \n" " ins $1, %[flags], 0, 1 \n"
" mtc0 $1, $12 \n" " mtc0 $1, $12 \n"
#endif #endif
" irq_disable_hazard \n" " " __stringify(__irq_disable_hazard) " \n"
" .set pop \n" " .set pop \n"
" .endm \n"); : [flags] "=r" (__tmp1)
: "0" (flags)
static inline void arch_local_irq_restore(unsigned long flags) : "memory");
{
unsigned long __tmp1;
__asm__ __volatile__(
"arch_local_irq_restore\t%0"
: "=r" (__tmp1)
: "0" (flags)
: "memory");
} }
static inline void __arch_local_irq_restore(unsigned long flags) static inline void __arch_local_irq_restore(unsigned long flags)
{ {
unsigned long __tmp1;
__asm__ __volatile__( __asm__ __volatile__(
"arch_local_irq_restore\t%0" " .set push \n"
: "=r" (__tmp1) " .set noreorder \n"
: "0" (flags) " .set noat \n"
: "memory"); #if defined(CONFIG_IRQ_CPU)
/*
* Slow, but doesn't suffer from a relatively unlikely race
* condition we're having since days 1.
*/
" beqz %[flags], 1f \n"
" di \n"
" ei \n"
"1: \n"
#else
/*
* Fast, dangerous. Life is fun, life is good.
*/
" mfc0 $1, $12 \n"
" ins $1, %[flags], 0, 1 \n"
" mtc0 $1, $12 \n"
#endif
" " __stringify(__irq_disable_hazard) " \n"
" .set pop \n"
: [flags] "=r" (flags)
: "0" (flags)
: "memory");
} }
#else #else
/* Functions that require preempt_{dis,en}able() are in mips-atomic.c */ /* Functions that require preempt_{dis,en}able() are in mips-atomic.c */
...@@ -115,8 +121,18 @@ void __arch_local_irq_restore(unsigned long flags); ...@@ -115,8 +121,18 @@ void __arch_local_irq_restore(unsigned long flags);
#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */ #endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */
__asm__( extern void smtc_ipi_replay(void);
" .macro arch_local_irq_enable \n"
static inline void arch_local_irq_enable(void)
{
#ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC kernel needs to do a software replay of queued
* IPIs, at the cost of call overhead on each local_irq_enable()
*/
smtc_ipi_replay();
#endif
__asm__ __volatile__(
" .set push \n" " .set push \n"
" .set reorder \n" " .set reorder \n"
" .set noat \n" " .set noat \n"
...@@ -133,45 +149,28 @@ __asm__( ...@@ -133,45 +149,28 @@ __asm__(
" xori $1,0x1e \n" " xori $1,0x1e \n"
" mtc0 $1,$12 \n" " mtc0 $1,$12 \n"
#endif #endif
" irq_enable_hazard \n" " " __stringify(__irq_enable_hazard) " \n"
" .set pop \n" " .set pop \n"
" .endm"); : /* no outputs */
: /* no inputs */
extern void smtc_ipi_replay(void); : "memory");
static inline void arch_local_irq_enable(void)
{
#ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC kernel needs to do a software replay of queued
* IPIs, at the cost of call overhead on each local_irq_enable()
*/
smtc_ipi_replay();
#endif
__asm__ __volatile__(
"arch_local_irq_enable"
: /* no outputs */
: /* no inputs */
: "memory");
} }
static inline unsigned long arch_local_save_flags(void)
{
unsigned long flags;
__asm__( asm __volatile__(
" .macro arch_local_save_flags flags \n"
" .set push \n" " .set push \n"
" .set reorder \n" " .set reorder \n"
#ifdef CONFIG_MIPS_MT_SMTC #ifdef CONFIG_MIPS_MT_SMTC
" mfc0 \\flags, $2, 1 \n" " mfc0 %[flags], $2, 1 \n"
#else #else
" mfc0 \\flags, $12 \n" " mfc0 %[flags], $12 \n"
#endif #endif
" .set pop \n" " .set pop \n"
" .endm \n"); : [flags] "=r" (flags));
static inline unsigned long arch_local_save_flags(void)
{
unsigned long flags;
asm volatile("arch_local_save_flags %0" : "=r" (flags));
return flags; return flags;
} }
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/preempt.h> #include <linux/preempt.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/stringify.h>
#if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) #if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC)
...@@ -34,8 +35,11 @@ ...@@ -34,8 +35,11 @@
* *
* Workaround: mask EXL bit of the result or place a nop before mfc0. * Workaround: mask EXL bit of the result or place a nop before mfc0.
*/ */
__asm__( notrace void arch_local_irq_disable(void)
" .macro arch_local_irq_disable\n" {
preempt_disable();
__asm__ __volatile__(
" .set push \n" " .set push \n"
" .set noat \n" " .set noat \n"
#ifdef CONFIG_MIPS_MT_SMTC #ifdef CONFIG_MIPS_MT_SMTC
...@@ -52,108 +56,98 @@ __asm__( ...@@ -52,108 +56,98 @@ __asm__(
" .set noreorder \n" " .set noreorder \n"
" mtc0 $1,$12 \n" " mtc0 $1,$12 \n"
#endif #endif
" irq_disable_hazard \n" " " __stringify(__irq_disable_hazard) " \n"
" .set pop \n" " .set pop \n"
" .endm \n"); : /* no outputs */
: /* no inputs */
: "memory");
notrace void arch_local_irq_disable(void)
{
preempt_disable();
__asm__ __volatile__(
"arch_local_irq_disable"
: /* no outputs */
: /* no inputs */
: "memory");
preempt_enable(); preempt_enable();
} }
EXPORT_SYMBOL(arch_local_irq_disable); EXPORT_SYMBOL(arch_local_irq_disable);
__asm__( notrace unsigned long arch_local_irq_save(void)
" .macro arch_local_irq_save result \n" {
unsigned long flags;
preempt_disable();
__asm__ __volatile__(
" .set push \n" " .set push \n"
" .set reorder \n" " .set reorder \n"
" .set noat \n" " .set noat \n"
#ifdef CONFIG_MIPS_MT_SMTC #ifdef CONFIG_MIPS_MT_SMTC
" mfc0 \\result, $2, 1 \n" " mfc0 %[flags], $2, 1 \n"
" ori $1, \\result, 0x400 \n" " ori $1, %[flags], 0x400 \n"
" .set noreorder \n" " .set noreorder \n"
" mtc0 $1, $2, 1 \n" " mtc0 $1, $2, 1 \n"
" andi \\result, \\result, 0x400 \n" " andi %[flags], %[flags], 0x400 \n"
#elif defined(CONFIG_CPU_MIPSR2) #elif defined(CONFIG_CPU_MIPSR2)
/* see irqflags.h for inline function */ /* see irqflags.h for inline function */
#else #else
" mfc0 \\result, $12 \n" " mfc0 %[flags], $12 \n"
" ori $1, \\result, 0x1f \n" " ori $1, %[flags], 0x1f \n"
" xori $1, 0x1f \n" " xori $1, 0x1f \n"
" .set noreorder \n" " .set noreorder \n"
" mtc0 $1, $12 \n" " mtc0 $1, $12 \n"
#endif #endif
" irq_disable_hazard \n" " " __stringify(__irq_disable_hazard) " \n"
" .set pop \n" " .set pop \n"
" .endm \n"); : [flags] "=r" (flags)
: /* no inputs */
: "memory");
notrace unsigned long arch_local_irq_save(void)
{
unsigned long flags;
preempt_disable();
asm volatile("arch_local_irq_save\t%0"
: "=r" (flags)
: /* no inputs */
: "memory");
preempt_enable(); preempt_enable();
return flags; return flags;
} }
EXPORT_SYMBOL(arch_local_irq_save); EXPORT_SYMBOL(arch_local_irq_save);
notrace void arch_local_irq_restore(unsigned long flags)
{
unsigned long __tmp1;
#ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC kernel needs to do a software replay of queued
* IPIs, at the cost of branch and call overhead on each
* local_irq_restore()
*/
if (unlikely(!(flags & 0x0400)))
smtc_ipi_replay();
#endif
preempt_disable();
__asm__( __asm__ __volatile__(
" .macro arch_local_irq_restore flags \n"
" .set push \n" " .set push \n"
" .set noreorder \n" " .set noreorder \n"
" .set noat \n" " .set noat \n"
#ifdef CONFIG_MIPS_MT_SMTC #ifdef CONFIG_MIPS_MT_SMTC
"mfc0 $1, $2, 1 \n" " mfc0 $1, $2, 1 \n"
"andi \\flags, 0x400 \n" " andi %[flags], 0x400 \n"
"ori $1, 0x400 \n" " ori $1, 0x400 \n"
"xori $1, 0x400 \n" " xori $1, 0x400 \n"
"or \\flags, $1 \n" " or %[flags], $1 \n"
"mtc0 \\flags, $2, 1 \n" " mtc0 %[flags], $2, 1 \n"
#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) #elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
/* see irqflags.h for inline function */ /* see irqflags.h for inline function */
#elif defined(CONFIG_CPU_MIPSR2) #elif defined(CONFIG_CPU_MIPSR2)
/* see irqflags.h for inline function */ /* see irqflags.h for inline function */
#else #else
" mfc0 $1, $12 \n" " mfc0 $1, $12 \n"
" andi \\flags, 1 \n" " andi %[flags], 1 \n"
" ori $1, 0x1f \n" " ori $1, 0x1f \n"
" xori $1, 0x1f \n" " xori $1, 0x1f \n"
" or \\flags, $1 \n" " or %[flags], $1 \n"
" mtc0 \\flags, $12 \n" " mtc0 %[flags], $12 \n"
#endif #endif
" irq_disable_hazard \n" " " __stringify(__irq_disable_hazard) " \n"
" .set pop \n" " .set pop \n"
" .endm \n"); : [flags] "=r" (__tmp1)
: "0" (flags)
: "memory");
notrace void arch_local_irq_restore(unsigned long flags)
{
unsigned long __tmp1;
#ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC kernel needs to do a software replay of queued
* IPIs, at the cost of branch and call overhead on each
* local_irq_restore()
*/
if (unlikely(!(flags & 0x0400)))
smtc_ipi_replay();
#endif
preempt_disable();
__asm__ __volatile__(
"arch_local_irq_restore\t%0"
: "=r" (__tmp1)
: "0" (flags)
: "memory");
preempt_enable(); preempt_enable();
} }
EXPORT_SYMBOL(arch_local_irq_restore); EXPORT_SYMBOL(arch_local_irq_restore);
...@@ -164,11 +158,36 @@ notrace void __arch_local_irq_restore(unsigned long flags) ...@@ -164,11 +158,36 @@ notrace void __arch_local_irq_restore(unsigned long flags)
unsigned long __tmp1; unsigned long __tmp1;
preempt_disable(); preempt_disable();
__asm__ __volatile__( __asm__ __volatile__(
"arch_local_irq_restore\t%0" " .set push \n"
: "=r" (__tmp1) " .set noreorder \n"
: "0" (flags) " .set noat \n"
: "memory"); #ifdef CONFIG_MIPS_MT_SMTC
" mfc0 $1, $2, 1 \n"
" andi %[flags], 0x400 \n"
" ori $1, 0x400 \n"
" xori $1, 0x400 \n"
" or %[flags], $1 \n"
" mtc0 %[flags], $2, 1 \n"
#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
/* see irqflags.h for inline function */
#elif defined(CONFIG_CPU_MIPSR2)
/* see irqflags.h for inline function */
#else
" mfc0 $1, $12 \n"
" andi %[flags], 1 \n"
" ori $1, 0x1f \n"
" xori $1, 0x1f \n"
" or %[flags], $1 \n"
" mtc0 %[flags], $12 \n"
#endif
" " __stringify(__irq_disable_hazard) " \n"
" .set pop \n"
: [flags] "=r" (__tmp1)
: "0" (flags)
: "memory");
preempt_enable(); preempt_enable();
} }
EXPORT_SYMBOL(__arch_local_irq_restore); EXPORT_SYMBOL(__arch_local_irq_restore);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment