Commit 48cf12d8 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/irq: Inline call_do_irq() and call_do_softirq()

call_do_irq() and call_do_softirq() are simple enough to be
worth inlining.

Inlining them avoids an mflr/mtlr pair plus a save/reload on stack.

This is inspired from S390 arch. Several other arches do more or
less the same. The way sparc arch does seems odd thought.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210320122227.345427-1-mpe@ellerman.id.au
parent d2313da4
...@@ -53,8 +53,6 @@ extern void *mcheckirq_ctx[NR_CPUS]; ...@@ -53,8 +53,6 @@ extern void *mcheckirq_ctx[NR_CPUS];
extern void *hardirq_ctx[NR_CPUS]; extern void *hardirq_ctx[NR_CPUS];
extern void *softirq_ctx[NR_CPUS]; extern void *softirq_ctx[NR_CPUS];
void call_do_softirq(void *sp);
void call_do_irq(struct pt_regs *regs, void *sp);
extern void do_IRQ(struct pt_regs *regs); extern void do_IRQ(struct pt_regs *regs);
extern void __init init_IRQ(void); extern void __init init_IRQ(void);
extern void __do_irq(struct pt_regs *regs); extern void __do_irq(struct pt_regs *regs);
......
...@@ -667,6 +667,47 @@ static inline void check_stack_overflow(void) ...@@ -667,6 +667,47 @@ static inline void check_stack_overflow(void)
} }
} }
static __always_inline void call_do_softirq(const void *sp)
{
/* Temporarily switch r1 to sp, call __do_softirq() then restore r1. */
asm volatile (
PPC_STLU " %%r1, %[offset](%[sp]) ;"
"mr %%r1, %[sp] ;"
"bl %[callee] ;"
PPC_LL " %%r1, 0(%%r1) ;"
: // Outputs
: // Inputs
[sp] "b" (sp), [offset] "i" (THREAD_SIZE - STACK_FRAME_OVERHEAD),
[callee] "i" (__do_softirq)
: // Clobbers
"lr", "xer", "ctr", "memory", "cr0", "cr1", "cr5", "cr6",
"cr7", "r0", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
"r11", "r12"
);
}
static __always_inline void call_do_irq(struct pt_regs *regs, void *sp)
{
register unsigned long r3 asm("r3") = (unsigned long)regs;
/* Temporarily switch r1 to sp, call __do_irq() then restore r1. */
asm volatile (
PPC_STLU " %%r1, %[offset](%[sp]) ;"
"mr %%r1, %[sp] ;"
"bl %[callee] ;"
PPC_LL " %%r1, 0(%%r1) ;"
: // Outputs
"+r" (r3)
: // Inputs
[sp] "b" (sp), [offset] "i" (THREAD_SIZE - STACK_FRAME_OVERHEAD),
[callee] "i" (__do_irq)
: // Clobbers
"lr", "xer", "ctr", "memory", "cr0", "cr1", "cr5", "cr6",
"cr7", "r0", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
"r11", "r12"
);
}
void __do_irq(struct pt_regs *regs) void __do_irq(struct pt_regs *regs)
{ {
unsigned int irq; unsigned int irq;
......
...@@ -27,31 +27,6 @@ ...@@ -27,31 +27,6 @@
.text .text
_GLOBAL(call_do_softirq)
mflr r0
stw r0,4(r1)
stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
mr r1,r3
bl __do_softirq
lwz r1,0(r1)
lwz r0,4(r1)
mtlr r0
blr
/*
* void call_do_irq(struct pt_regs *regs, void *sp);
*/
_GLOBAL(call_do_irq)
mflr r0
stw r0,4(r1)
stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
mr r1,r4
bl __do_irq
lwz r1,0(r1)
lwz r0,4(r1)
mtlr r0
blr
/* /*
* This returns the high 64 bits of the product of two 64-bit numbers. * This returns the high 64 bits of the product of two 64-bit numbers.
*/ */
......
...@@ -27,28 +27,6 @@ ...@@ -27,28 +27,6 @@
.text .text
_GLOBAL(call_do_softirq)
mflr r0
std r0,16(r1)
stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
mr r1,r3
bl __do_softirq
ld r1,0(r1)
ld r0,16(r1)
mtlr r0
blr
_GLOBAL(call_do_irq)
mflr r0
std r0,16(r1)
stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
mr r1,r4
bl __do_irq
ld r1,0(r1)
ld r0,16(r1)
mtlr r0
blr
_GLOBAL(__bswapdi2) _GLOBAL(__bswapdi2)
EXPORT_SYMBOL(__bswapdi2) EXPORT_SYMBOL(__bswapdi2)
srdi r8,r3,32 srdi r8,r3,32
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment