Commit f60fe78f authored by Mark Rutland's avatar Mark Rutland

arm64: use an irq stack pointer

We allocate our IRQ stacks using a percpu array. This allows us to generate our
IRQ stack pointers with adr_this_cpu, but bloats the kernel Image with the boot
CPU's IRQ stack. Additionally, these are packed with other percpu variables,
and aren't guaranteed to have guard pages.

When we enable VMAP_STACK we'll want to vmap our IRQ stacks also, in order to
provide guard pages and to permit more stringent alignment requirements. Doing
so will require that we use a percpu pointer to each IRQ stack, rather than
allocating a percpu IRQ stack in the kernel image.

This patch updates our IRQ stack code to use a percpu pointer to the base of
each IRQ stack. This will allow us to change the way the stack is allocated
with minimal changes elsewhere. In some cases we may try to backtrace before
the IRQ stack pointers are initialised, so on_irq_stack() is updated to account
for this.

In testing with cyclictest, there was no measureable difference between using
adr_this_cpu (for irq_stack) and ldr_this_cpu (for irq_stack_ptr) in the IRQ
entry path.
Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Reviewed-by: default avatarWill Deacon <will.deacon@arm.com>
Tested-by: default avatarLaura Abbott <labbott@redhat.com>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: James Morse <james.morse@arm.com>
parent 8ea41b11
...@@ -36,13 +36,16 @@ extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame, ...@@ -36,13 +36,16 @@ extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
int (*fn)(struct stackframe *, void *), void *data); int (*fn)(struct stackframe *, void *), void *data);
extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk); extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk);
DECLARE_PER_CPU(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack); DECLARE_PER_CPU(unsigned long *, irq_stack_ptr);
static inline bool on_irq_stack(unsigned long sp) static inline bool on_irq_stack(unsigned long sp)
{ {
unsigned long low = (unsigned long)raw_cpu_ptr(irq_stack); unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr);
unsigned long high = low + IRQ_STACK_SIZE; unsigned long high = low + IRQ_STACK_SIZE;
if (!low)
return false;
return (low <= sp && sp < high); return (low <= sp && sp < high);
} }
......
...@@ -276,7 +276,7 @@ alternative_else_nop_endif ...@@ -276,7 +276,7 @@ alternative_else_nop_endif
and x25, x25, #~(THREAD_SIZE - 1) and x25, x25, #~(THREAD_SIZE - 1)
cbnz x25, 9998f cbnz x25, 9998f
adr_this_cpu x25, irq_stack, x26 ldr_this_cpu x25, irq_stack_ptr, x26
mov x26, #IRQ_STACK_SIZE mov x26, #IRQ_STACK_SIZE
add x26, x25, x26 add x26, x25, x26
......
...@@ -32,6 +32,7 @@ unsigned long irq_err_count; ...@@ -32,6 +32,7 @@ unsigned long irq_err_count;
/* irq stack only needs to be 16 byte aligned - not IRQ_STACK_SIZE aligned. */ /* irq stack only needs to be 16 byte aligned - not IRQ_STACK_SIZE aligned. */
DEFINE_PER_CPU(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack) __aligned(16); DEFINE_PER_CPU(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack) __aligned(16);
DEFINE_PER_CPU(unsigned long *, irq_stack_ptr);
int arch_show_interrupts(struct seq_file *p, int prec) int arch_show_interrupts(struct seq_file *p, int prec)
{ {
...@@ -50,8 +51,17 @@ void __init set_handle_irq(void (*handle_irq)(struct pt_regs *)) ...@@ -50,8 +51,17 @@ void __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
handle_arch_irq = handle_irq; handle_arch_irq = handle_irq;
} }
static void init_irq_stacks(void)
{
int cpu;
for_each_possible_cpu(cpu)
per_cpu(irq_stack_ptr, cpu) = per_cpu(irq_stack, cpu);
}
void __init init_IRQ(void) void __init init_IRQ(void)
{ {
init_irq_stacks();
irqchip_init(); irqchip_init();
if (!handle_arch_irq) if (!handle_arch_irq)
panic("No interrupt controller found."); panic("No interrupt controller found.");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment