Commit 7d65f4a6 authored by Frederic Weisbecker's avatar Frederic Weisbecker

irq: Consolidate do_softirq() arch overriden implementations

All arch overriden implementations of do_softirq() share the following
common code: disable irqs (to avoid races with the pending check),
check if there are softirqs pending, then execute __do_softirq() on
a specific stack.

Consolidate the common parts such that archs only worry about the
stack switch.
Acked-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@au1.ibm.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul Mackerras <paulus@au1.ibm.com>
Cc: James Hogan <james.hogan@imgtec.com>
Cc: James E.J. Bottomley <jejb@parisc-linux.org>
Cc: Helge Deller <deller@gmx.de>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Andrew Morton <akpm@linux-foundation.org>
parent ded79754
...@@ -159,19 +159,12 @@ void irq_ctx_exit(int cpu) ...@@ -159,19 +159,12 @@ void irq_ctx_exit(int cpu)
extern asmlinkage void __do_softirq(void); extern asmlinkage void __do_softirq(void);
asmlinkage void do_softirq(void) void do_softirq_own_stack(void)
{ {
unsigned long flags;
struct thread_info *curctx; struct thread_info *curctx;
union irq_ctx *irqctx; union irq_ctx *irqctx;
u32 *isp; u32 *isp;
if (in_interrupt())
return;
local_irq_save(flags);
if (local_softirq_pending()) {
curctx = current_thread_info(); curctx = current_thread_info();
irqctx = softirq_ctx[smp_processor_id()]; irqctx = softirq_ctx[smp_processor_id()];
irqctx->tinfo.task = curctx->task; irqctx->tinfo.task = curctx->task;
...@@ -190,13 +183,6 @@ asmlinkage void do_softirq(void) ...@@ -190,13 +183,6 @@ asmlinkage void do_softirq(void)
"D1Ar5", "D0Ar6", "D0Re0", "D1Re0", "D0.4", "D1RtP", "D1Ar5", "D0Ar6", "D0Re0", "D1Re0", "D0.4", "D1RtP",
"D0.5" "D0.5"
); );
/*
* Shouldn't happen, we returned above if in_interrupt():
*/
WARN_ON_ONCE(softirq_count());
}
local_irq_restore(flags);
} }
#endif #endif
......
...@@ -499,22 +499,9 @@ static void execute_on_irq_stack(void *func, unsigned long param1) ...@@ -499,22 +499,9 @@ static void execute_on_irq_stack(void *func, unsigned long param1)
*irq_stack_in_use = 1; *irq_stack_in_use = 1;
} }
asmlinkage void do_softirq(void) void do_softirq_own_stack(void)
{ {
__u32 pending;
unsigned long flags;
if (in_interrupt())
return;
local_irq_save(flags);
pending = local_softirq_pending();
if (pending)
execute_on_irq_stack(__do_softirq, 0); execute_on_irq_stack(__do_softirq, 0);
local_irq_restore(flags);
} }
#endif /* CONFIG_IRQSTACKS */ #endif /* CONFIG_IRQSTACKS */
......
...@@ -593,7 +593,7 @@ void irq_ctx_init(void) ...@@ -593,7 +593,7 @@ void irq_ctx_init(void)
} }
} }
static inline void do_softirq_onstack(void) void do_softirq_own_stack(void)
{ {
struct thread_info *curtp, *irqtp; struct thread_info *curtp, *irqtp;
...@@ -611,21 +611,6 @@ static inline void do_softirq_onstack(void) ...@@ -611,21 +611,6 @@ static inline void do_softirq_onstack(void)
set_bits(irqtp->flags, &curtp->flags); set_bits(irqtp->flags, &curtp->flags);
} }
void do_softirq(void)
{
unsigned long flags;
if (in_interrupt())
return;
local_irq_save(flags);
if (local_softirq_pending())
do_softirq_onstack();
local_irq_restore(flags);
}
irq_hw_number_t virq_to_hw(unsigned int virq) irq_hw_number_t virq_to_hw(unsigned int virq)
{ {
struct irq_data *irq_data = irq_get_irq_data(virq); struct irq_data *irq_data = irq_get_irq_data(virq);
......
...@@ -157,16 +157,10 @@ int arch_show_interrupts(struct seq_file *p, int prec) ...@@ -157,16 +157,10 @@ int arch_show_interrupts(struct seq_file *p, int prec)
/* /*
* Switch to the asynchronous interrupt stack for softirq execution. * Switch to the asynchronous interrupt stack for softirq execution.
*/ */
asmlinkage void do_softirq(void) void do_softirq_own_stack(void)
{ {
unsigned long flags, old, new; unsigned long old, new;
if (in_interrupt())
return;
local_irq_save(flags);
if (local_softirq_pending()) {
/* Get current stack pointer. */ /* Get current stack pointer. */
asm volatile("la %0,0(15)" : "=a" (old)); asm volatile("la %0,0(15)" : "=a" (old));
/* Check against async. stack address range. */ /* Check against async. stack address range. */
...@@ -175,7 +169,6 @@ asmlinkage void do_softirq(void) ...@@ -175,7 +169,6 @@ asmlinkage void do_softirq(void)
/* Need to switch to the async. stack. */ /* Need to switch to the async. stack. */
new -= STACK_FRAME_OVERHEAD; new -= STACK_FRAME_OVERHEAD;
((struct stack_frame *) new)->back_chain = old; ((struct stack_frame *) new)->back_chain = old;
asm volatile(" la 15,0(%0)\n" asm volatile(" la 15,0(%0)\n"
" basr 14,%2\n" " basr 14,%2\n"
" la 15,0(%1)\n" " la 15,0(%1)\n"
...@@ -187,9 +180,6 @@ asmlinkage void do_softirq(void) ...@@ -187,9 +180,6 @@ asmlinkage void do_softirq(void)
/* We are already on the async stack. */ /* We are already on the async stack. */
__do_softirq(); __do_softirq();
} }
}
local_irq_restore(flags);
} }
/* /*
......
...@@ -149,19 +149,12 @@ void irq_ctx_exit(int cpu) ...@@ -149,19 +149,12 @@ void irq_ctx_exit(int cpu)
hardirq_ctx[cpu] = NULL; hardirq_ctx[cpu] = NULL;
} }
asmlinkage void do_softirq(void) void do_softirq_own_stack(void)
{ {
unsigned long flags;
struct thread_info *curctx; struct thread_info *curctx;
union irq_ctx *irqctx; union irq_ctx *irqctx;
u32 *isp; u32 *isp;
if (in_interrupt())
return;
local_irq_save(flags);
if (local_softirq_pending()) {
curctx = current_thread_info(); curctx = current_thread_info();
irqctx = softirq_ctx[smp_processor_id()]; irqctx = softirq_ctx[smp_processor_id()];
irqctx->tinfo.task = curctx->task; irqctx->tinfo.task = curctx->task;
...@@ -182,14 +175,6 @@ asmlinkage void do_softirq(void) ...@@ -182,14 +175,6 @@ asmlinkage void do_softirq(void)
: "memory", "r0", "r1", "r2", "r3", "r4", : "memory", "r0", "r1", "r2", "r3", "r4",
"r5", "r6", "r7", "r8", "r9", "r15", "t", "pr" "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
); );
/*
* Shouldn't happen, we returned above if in_interrupt():
*/
WARN_ON_ONCE(softirq_count());
}
local_irq_restore(flags);
} }
#else #else
static inline void handle_one_irq(unsigned int irq) static inline void handle_one_irq(unsigned int irq)
......
...@@ -698,16 +698,8 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs) ...@@ -698,16 +698,8 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs)
set_irq_regs(old_regs); set_irq_regs(old_regs);
} }
void do_softirq(void) void do_softirq_own_stack(void)
{ {
unsigned long flags;
if (in_interrupt())
return;
local_irq_save(flags);
if (local_softirq_pending()) {
void *orig_sp, *sp = softirq_stack[smp_processor_id()]; void *orig_sp, *sp = softirq_stack[smp_processor_id()];
sp += THREAD_SIZE - 192 - STACK_BIAS; sp += THREAD_SIZE - 192 - STACK_BIAS;
...@@ -719,9 +711,6 @@ void do_softirq(void) ...@@ -719,9 +711,6 @@ void do_softirq(void)
__do_softirq(); __do_softirq();
__asm__ __volatile__("mov %0, %%sp" __asm__ __volatile__("mov %0, %%sp"
: : "r" (orig_sp)); : : "r" (orig_sp));
}
local_irq_restore(flags);
} }
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
......
...@@ -1342,7 +1342,7 @@ bad_gs: ...@@ -1342,7 +1342,7 @@ bad_gs:
.previous .previous
/* Call softirq on interrupt stack. Interrupts are off. */ /* Call softirq on interrupt stack. Interrupts are off. */
ENTRY(call_softirq) ENTRY(do_softirq_own_stack)
CFI_STARTPROC CFI_STARTPROC
pushq_cfi %rbp pushq_cfi %rbp
CFI_REL_OFFSET rbp,0 CFI_REL_OFFSET rbp,0
...@@ -1359,7 +1359,7 @@ ENTRY(call_softirq) ...@@ -1359,7 +1359,7 @@ ENTRY(call_softirq)
decl PER_CPU_VAR(irq_count) decl PER_CPU_VAR(irq_count)
ret ret
CFI_ENDPROC CFI_ENDPROC
END(call_softirq) END(do_softirq_own_stack)
#ifdef CONFIG_XEN #ifdef CONFIG_XEN
zeroentry xen_hypervisor_callback xen_do_hypervisor_callback zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
......
...@@ -149,19 +149,12 @@ void irq_ctx_init(int cpu) ...@@ -149,19 +149,12 @@ void irq_ctx_init(int cpu)
cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu)); cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
} }
asmlinkage void do_softirq(void) void do_softirq_own_stack(void)
{ {
unsigned long flags;
struct thread_info *curctx; struct thread_info *curctx;
union irq_ctx *irqctx; union irq_ctx *irqctx;
u32 *isp; u32 *isp;
if (in_interrupt())
return;
local_irq_save(flags);
if (local_softirq_pending()) {
curctx = current_thread_info(); curctx = current_thread_info();
irqctx = __this_cpu_read(softirq_ctx); irqctx = __this_cpu_read(softirq_ctx);
irqctx->tinfo.task = curctx->task; irqctx->tinfo.task = curctx->task;
...@@ -171,13 +164,6 @@ asmlinkage void do_softirq(void) ...@@ -171,13 +164,6 @@ asmlinkage void do_softirq(void)
isp = (u32 *) ((char *)irqctx + sizeof(*irqctx)); isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
call_on_stack(__do_softirq, isp); call_on_stack(__do_softirq, isp);
/*
* Shouldn't happen, we returned above if in_interrupt():
*/
WARN_ON_ONCE(softirq_count());
}
local_irq_restore(flags);
} }
bool handle_irq(unsigned irq, struct pt_regs *regs) bool handle_irq(unsigned irq, struct pt_regs *regs)
......
...@@ -87,24 +87,3 @@ bool handle_irq(unsigned irq, struct pt_regs *regs) ...@@ -87,24 +87,3 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
generic_handle_irq_desc(irq, desc); generic_handle_irq_desc(irq, desc);
return true; return true;
} }
extern void call_softirq(void);
asmlinkage void do_softirq(void)
{
__u32 pending;
unsigned long flags;
if (in_interrupt())
return;
local_irq_save(flags);
pending = local_softirq_pending();
/* Switch to interrupt stack */
if (pending) {
call_softirq();
WARN_ON_ONCE(softirq_count());
}
local_irq_restore(flags);
}
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/atomic.h> #include <linux/atomic.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/irq.h>
/* /*
* These correspond to the IORESOURCE_IRQ_* defines in * These correspond to the IORESOURCE_IRQ_* defines in
...@@ -374,6 +375,16 @@ struct softirq_action ...@@ -374,6 +375,16 @@ struct softirq_action
asmlinkage void do_softirq(void); asmlinkage void do_softirq(void);
asmlinkage void __do_softirq(void); asmlinkage void __do_softirq(void);
#ifdef __ARCH_HAS_DO_SOFTIRQ
void do_softirq_own_stack(void);
#else
static inline void do_softirq_own_stack(void)
{
__do_softirq();
}
#endif
extern void open_softirq(int nr, void (*action)(struct softirq_action *)); extern void open_softirq(int nr, void (*action)(struct softirq_action *));
extern void softirq_init(void); extern void softirq_init(void);
extern void __raise_softirq_irqoff(unsigned int nr); extern void __raise_softirq_irqoff(unsigned int nr);
......
...@@ -29,7 +29,6 @@ ...@@ -29,7 +29,6 @@
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <trace/events/irq.h> #include <trace/events/irq.h>
#include <asm/irq.h>
/* /*
- No shared variables, all the data are CPU local. - No shared variables, all the data are CPU local.
- If a softirq needs serialization, let it serialize itself - If a softirq needs serialization, let it serialize itself
...@@ -283,7 +282,7 @@ asmlinkage void __do_softirq(void) ...@@ -283,7 +282,7 @@ asmlinkage void __do_softirq(void)
tsk_restore_flags(current, old_flags, PF_MEMALLOC); tsk_restore_flags(current, old_flags, PF_MEMALLOC);
} }
#ifndef __ARCH_HAS_DO_SOFTIRQ
asmlinkage void do_softirq(void) asmlinkage void do_softirq(void)
{ {
...@@ -298,13 +297,12 @@ asmlinkage void do_softirq(void) ...@@ -298,13 +297,12 @@ asmlinkage void do_softirq(void)
pending = local_softirq_pending(); pending = local_softirq_pending();
if (pending) if (pending)
__do_softirq(); do_softirq_own_stack();
WARN_ON_ONCE(softirq_count());
local_irq_restore(flags); local_irq_restore(flags);
} }
#endif
/* /*
* Enter an interrupt context. * Enter an interrupt context.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment