Commit dd69d07a authored by Guo Ren's avatar Guo Ren Committed by Palmer Dabbelt

riscv: stack: Support HAVE_SOFTIRQ_ON_OWN_STACK

Add the HAVE_SOFTIRQ_ON_OWN_STACK feature for the IRQ_STACKS config, and
the irq and softirq use the same irq_stack of percpu.
Tested-by: default avatarJisheng Zhang <jszhang@kernel.org>
Signed-off-by: default avatarGuo Ren <guoren@linux.alibaba.com>
Signed-off-by: default avatarGuo Ren <guoren@kernel.org>
Link: https://lore.kernel.org/r/20230614013018.2168426-3-guoren@kernel.orgSigned-off-by: default avatarPalmer Dabbelt <palmer@rivosinc.com>
parent 163e76cc
...@@ -591,11 +591,13 @@ config FPU ...@@ -591,11 +591,13 @@ config FPU
If you don't know what to do here, say Y. If you don't know what to do here, say Y.
config IRQ_STACKS config IRQ_STACKS
bool "Independent irq stacks" if EXPERT bool "Independent irq & softirq stacks" if EXPERT
default y default y
select HAVE_IRQ_EXIT_ON_IRQ_STACK select HAVE_IRQ_EXIT_ON_IRQ_STACK
select HAVE_SOFTIRQ_ON_OWN_STACK
help help
Add independent irq stacks for percpu to prevent kernel stack overflows. Add independent irq & softirq stacks for percpu to prevent kernel stack
overflows. We may save some memory footprint by disabling IRQ_STACKS.
endmenu # "Platform type" endmenu # "Platform type"
......
...@@ -11,6 +11,9 @@ ...@@ -11,6 +11,9 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <asm/sbi.h> #include <asm/sbi.h>
#include <asm/smp.h>
#include <asm/softirq_stack.h>
#include <asm/stacktrace.h>
static struct fwnode_handle *(*__get_intc_node)(void); static struct fwnode_handle *(*__get_intc_node)(void);
...@@ -56,6 +59,38 @@ static void init_irq_stacks(void) ...@@ -56,6 +59,38 @@ static void init_irq_stacks(void)
per_cpu(irq_stack_ptr, cpu) = per_cpu(irq_stack, cpu); per_cpu(irq_stack_ptr, cpu) = per_cpu(irq_stack, cpu);
} }
#endif /* CONFIG_VMAP_STACK */ #endif /* CONFIG_VMAP_STACK */
#ifdef CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK
void do_softirq_own_stack(void)
{
#ifdef CONFIG_IRQ_STACKS
if (on_thread_stack()) {
ulong *sp = per_cpu(irq_stack_ptr, smp_processor_id())
+ IRQ_STACK_SIZE/sizeof(ulong);
__asm__ __volatile(
"addi sp, sp, -"RISCV_SZPTR "\n"
REG_S" ra, (sp) \n"
"addi sp, sp, -"RISCV_SZPTR "\n"
REG_S" s0, (sp) \n"
"addi s0, sp, 2*"RISCV_SZPTR "\n"
"move sp, %[sp] \n"
"call __do_softirq \n"
"addi sp, s0, -2*"RISCV_SZPTR"\n"
REG_L" s0, (sp) \n"
"addi sp, sp, "RISCV_SZPTR "\n"
REG_L" ra, (sp) \n"
"addi sp, sp, "RISCV_SZPTR "\n"
:
: [sp] "r" (sp)
: "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7",
"t0", "t1", "t2", "t3", "t4", "t5", "t6",
"memory");
} else
#endif
__do_softirq();
}
#endif /* CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK */
#else #else
static void init_irq_stacks(void) {} static void init_irq_stacks(void) {}
#endif /* CONFIG_IRQ_STACKS */ #endif /* CONFIG_IRQ_STACKS */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment