Commit 95f238ea authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] ia32: 4Kb stacks (and irqstacks) patch

From: Arjan van de Ven <arjanv@redhat.com>

Below is a patch to enable 4Kb stacks for x86. The goal of this is to

1) Reduce footprint per thread so that systems can run many more threads
   (for the java people)

2) Reduce the pressure on the VM for order > 0 allocations. We see real life
   workloads (granted with 2.4 but the fundamental fragmentation issue isn't
   solved in 2.6 and isn't solvable in theory) where this can be a problem.
   In addition order > 0 allocations can make the VM "stutter" and give more
   latency due to having to do much much more work trying to defragment

The first 2 bits of the patch actually affect compiler options in a generic
way: I propose to disable the -funit-at-a-time feature from gcc.  With this
enabled (and it's default with -O2), gcc will very agressively inline
functions, which is nice and all for userspace, but for the kernel this makes
us suffer a gcc deficiency more: gcc is extremely bad at sharing stackslots,
for example a situation like this:

if (some_condition)
	function_A();
else
	function_B();

with -funit-at-a-time, both function_A() and _B() might get inlined, however
the stack usage of both functions of the parent function grows the stack
usage of both functions COMBINED instead of the maximum of the two.  Even
with the normal 8Kb stacks this is a danger since we see some functions grow
3Kb to 4Kb of stack use this way.  With 4Kb stacks, 4Kb of stack usage growth
obviously is deadly ;-( but even with 8Kb stacks it's pure lottery.
Disabling -funit-at-a-time also exposes another thing in the -mm tree; the
attribute always_inline is considered harmful by gcc folks in that when gcc
makes a decision to NOT inline a function marked this way, it throws an
error.  Disabling -funit-at-a-time disables some of the agressive inlining
(eg of large functions that come later in the .c file) so this would make
your tree not compile.

The 4k stackness of the kernel is included in modversions, so people don't
load 4k-stack modules into 8k-stack kernels.

At present 4k stacks are selectable in config.  When the feature has settled
in we should remove the 8k option.  This will break the nvidia modules.  But
Fedora uses 4k stacks so a new nvidia driver is expected soon.
parent 124187e5
......@@ -1294,6 +1294,15 @@ config FRAME_POINTER
If you don't debug the kernel, you can say N, but we may not be able
to solve problems without frame pointers.
config 4KSTACKS
bool "Use 4Kb for kernel stacks instead of 8Kb"
help
If you say Y here the kernel will use a 4Kb stacksize for the
kernel stack attached to each process/thread. This facilitates
running more threads on a system and also reduces the pressure
on the VM subsystem for higher order allocations. This option
will also use IRQ stacks to compensate for the reduced stackspace.
config X86_FIND_SMP_CONFIG
bool
depends on X86_LOCAL_APIC || X86_VOYAGER
......
......@@ -56,9 +56,9 @@ cflags-$(CONFIG_X86_ELAN) += -march=i486
GCC_VERSION := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CC))
cflags-$(CONFIG_REGPARM) += $(shell if [ $(GCC_VERSION) -ge 0300 ] ; then echo "-mregparm=3"; fi ;)
# Enable unit-at-a-time mode when possible. It shrinks the
# kernel considerably.
CFLAGS += $(call check_gcc,-funit-at-a-time,)
# Disable unit-at-a-time mode, it makes gcc use a lot more stack
# due to the lack of sharing of stacklots.
CFLAGS += $(call check_gcc,-fno-unit-at-a-time,)
CFLAGS += $(cflags-y)
......
......@@ -444,4 +444,7 @@ void __init init_IRQ(void)
*/
if (boot_cpu_data.hard_math && !cpu_has_fpu)
setup_irq(FPU_IRQ, &fpu_irq);
current_thread_info()->cpu = 0;
irq_ctx_init(0);
}
......@@ -74,6 +74,14 @@ irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
static void register_irq_proc (unsigned int irq);
/*
* per-CPU IRQ handling stacks
*/
#ifdef CONFIG_4KSTACKS
union irq_ctx *hardirq_ctx[NR_CPUS];
union irq_ctx *softirq_ctx[NR_CPUS];
#endif
/*
* Special irq handlers.
*/
......@@ -209,7 +217,7 @@ inline void synchronize_irq(unsigned int irq)
* waste of time and is not what some drivers would
* prefer.
*/
int handle_IRQ_event(unsigned int irq,
asmlinkage int handle_IRQ_event(unsigned int irq,
struct pt_regs *regs, struct irqaction *action)
{
int status = 1; /* Force the "do bottom halves" bit */
......@@ -432,7 +440,7 @@ asmlinkage unsigned int do_IRQ(struct pt_regs regs)
__asm__ __volatile__("andl %%esp,%0" :
"=r" (esp) : "0" (THREAD_SIZE - 1));
if (unlikely(esp < (sizeof(struct thread_info) + 1024))) {
if (unlikely(esp < (sizeof(struct thread_info) + STACK_WARN))) {
printk("do_IRQ: stack overflow: %ld\n",
esp - sizeof(struct thread_info));
dump_stack();
......@@ -480,11 +488,68 @@ asmlinkage unsigned int do_IRQ(struct pt_regs regs)
* useful for irq hardware that does not mask cleanly in an
* SMP environment.
*/
#ifdef CONFIG_4KSTACKS
for (;;) {
irqreturn_t action_ret;
u32 *isp;
union irq_ctx * curctx;
union irq_ctx * irqctx;
curctx = (union irq_ctx *) current_thread_info();
irqctx = hardirq_ctx[smp_processor_id()];
spin_unlock(&desc->lock);
/*
* this is where we switch to the IRQ stack. However, if we are already using
* the IRQ stack (because we interrupted a hardirq handler) we can't do that
* and just have to keep using the current stack (which is the irq stack already
* after all)
*/
if (curctx == irqctx)
action_ret = handle_IRQ_event(irq, &regs, action);
else {
/* build the stack frame on the IRQ stack */
isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
irqctx->tinfo.task = curctx->tinfo.task;
irqctx->tinfo.previous_esp = current_stack_pointer();
*--isp = (u32) action;
*--isp = (u32) &regs;
*--isp = (u32) irq;
asm volatile(
" xchgl %%ebx,%%esp \n"
" call handle_IRQ_event \n"
" xchgl %%ebx,%%esp \n"
: "=a"(action_ret)
: "b"(isp)
: "memory", "cc", "edx", "ecx"
);
}
spin_lock(&desc->lock);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
if (curctx != irqctx)
irqctx->tinfo.task = NULL;
if (likely(!(desc->status & IRQ_PENDING)))
break;
desc->status &= ~IRQ_PENDING;
}
#else
for (;;) {
irqreturn_t action_ret;
spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, &regs, action);
spin_lock(&desc->lock);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
......@@ -492,6 +557,7 @@ asmlinkage unsigned int do_IRQ(struct pt_regs regs)
break;
desc->status &= ~IRQ_PENDING;
}
#endif
desc->status &= ~IRQ_INPROGRESS;
out:
......@@ -1049,3 +1115,79 @@ void init_irq_proc (void)
register_irq_proc(i);
}
#ifdef CONFIG_4KSTACKS
static char softirq_stack[NR_CPUS * THREAD_SIZE] __attribute__((__aligned__(THREAD_SIZE)));
static char hardirq_stack[NR_CPUS * THREAD_SIZE] __attribute__((__aligned__(THREAD_SIZE)));
/*
* allocate per-cpu stacks for hardirq and for softirq processing
*/
void irq_ctx_init(int cpu)
{
union irq_ctx *irqctx;
if (hardirq_ctx[cpu])
return;
irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE];
irqctx->tinfo.task = NULL;
irqctx->tinfo.exec_domain = NULL;
irqctx->tinfo.cpu = cpu;
irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
hardirq_ctx[cpu] = irqctx;
irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE];
irqctx->tinfo.task = NULL;
irqctx->tinfo.exec_domain = NULL;
irqctx->tinfo.cpu = cpu;
irqctx->tinfo.preempt_count = SOFTIRQ_OFFSET;
irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
softirq_ctx[cpu] = irqctx;
printk("CPU %u irqstacks, hard=%p soft=%p\n",
cpu,hardirq_ctx[cpu],softirq_ctx[cpu]);
}
extern asmlinkage void __do_softirq(void);
asmlinkage void do_softirq(void)
{
unsigned long flags;
struct thread_info *curctx;
union irq_ctx *irqctx;
u32 *isp;
if (in_interrupt())
return;
local_irq_save(flags);
if (local_softirq_pending()) {
curctx = current_thread_info();
irqctx = softirq_ctx[smp_processor_id()];
irqctx->tinfo.task = curctx->task;
irqctx->tinfo.previous_esp = current_stack_pointer();
/* build the stack frame on the softirq stack */
isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
asm volatile(
" xchgl %%ebx,%%esp \n"
" call __do_softirq \n"
" movl %%ebx,%%esp \n"
: "=b"(isp)
: "0"(isp)
: "memory", "cc", "edx", "ecx", "eax"
);
}
local_irq_restore(flags);
}
EXPORT_SYMBOL(do_softirq);
#endif
......@@ -815,6 +815,8 @@ static int __init do_boot_cpu(int apicid)
/* Stack for startup_32 can be just as for start_secondary onwards */
stack_start.esp = (void *) idle->thread.esp;
irq_ctx_init(cpu);
/*
* This grunge runs the startup process for
* the targeted processor.
......
......@@ -105,12 +105,20 @@ void show_trace(struct task_struct *task, unsigned long * stack)
#ifdef CONFIG_KALLSYMS
printk("\n");
#endif
while (!kstack_end(stack)) {
addr = *stack++;
if (kernel_text_address(addr)) {
printk(" [<%08lx>] ", addr);
print_symbol("%s\n", addr);
while (1) {
struct thread_info *context;
context = (struct thread_info*) ((unsigned long)stack & (~(THREAD_SIZE - 1)));
while (!kstack_end(stack)) {
addr = *stack++;
if (kernel_text_address(addr)) {
printk(" [<%08lx>] ", addr);
print_symbol("%s\n", addr);
}
}
stack = (unsigned long*)context->previous_esp;
if (!stack)
break;
printk(" =======================\n");
}
printk("\n");
}
......
......@@ -93,5 +93,8 @@ extern void enable_irq(unsigned int);
struct pt_regs;
extern void (*perf_irq)(unsigned long, struct pt_regs *);
struct irqaction;
int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
#endif /* _ALPHA_IRQ_H */
......@@ -44,5 +44,9 @@ void disable_irq_wake(unsigned int irq);
void enable_irq_wake(unsigned int irq);
int setup_irq(unsigned int, struct irqaction *);
struct irqaction;
struct pt_regs;
int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
#endif
......@@ -45,6 +45,8 @@ extern void enable_irq(unsigned int);
int set_irq_type(unsigned int irq, unsigned int type);
int setup_irq(unsigned int, struct irqaction *);
struct pt_regs;
int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
#endif
......@@ -14,6 +14,10 @@ extern void enable_irq(unsigned int);
#define disable_irq_nosync disable_irq
#define enable_irq_nosync enable_irq
struct irqaction;
struct pt_regs;
int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
#endif /* _ASM_IRQ_H */
......@@ -68,4 +68,8 @@ extern void disable_irq(unsigned int);
#define enable_irq_nosync(x) enable_irq(x)
#define disable_irq_nosync(x) disable_irq(x)
struct irqaction;
struct pt_regs;
int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
#endif /* _H8300_IRQ_H_ */
......@@ -14,6 +14,7 @@
#include <linux/sched.h>
/* include comes from machine specific directory */
#include "irq_vectors.h"
#include <asm/thread_info.h>
static __inline__ int irq_canonicalize(int irq)
{
......@@ -30,4 +31,28 @@ extern int can_request_irq(unsigned int, unsigned long flags);
#define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */
#endif
#ifdef CONFIG_4KSTACKS
/*
* per-CPU IRQ handling contexts (thread information and stack)
*/
union irq_ctx {
struct thread_info tinfo;
u32 stack[THREAD_SIZE/sizeof(u32)];
};
extern union irq_ctx *hardirq_ctx[NR_CPUS];
extern union irq_ctx *softirq_ctx[NR_CPUS];
extern void irq_ctx_init(int cpu);
#define __ARCH_HAS_DO_SOFTIRQ
#else
#define irq_ctx_init(cpu) do { ; } while (0)
#endif
struct irqaction;
struct pt_regs;
asmlinkage int handle_IRQ_event(unsigned int, struct pt_regs *,
struct irqaction *);
#endif /* _ASM_IRQ_H */
......@@ -60,6 +60,12 @@ struct mod_arch_specific
#define MODULE_REGPARM ""
#endif
#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_REGPARM
#ifdef CONFIG_4KSTACKS
#define MODULE_STACKSIZE "4KSTACKS "
#else
#define MODULE_STACKSIZE ""
#endif
#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_REGPARM MODULE_STACKSIZE
#endif /* _ASM_I386_MODULE_H */
......@@ -9,6 +9,9 @@
#ifdef __KERNEL__
#include <linux/config.h>
#include <asm/page.h>
#ifndef __ASSEMBLY__
#include <asm/processor.h>
#endif
......@@ -29,12 +32,16 @@ struct thread_info {
__u32 cpu; /* current CPU */
__s32 preempt_count; /* 0 => preemptable, <0 => BUG */
mm_segment_t addr_limit; /* thread address space:
0-0xBFFFFFFF for user-thead
0-0xFFFFFFFF for kernel-thread
*/
struct restart_block restart_block;
unsigned long previous_esp; /* ESP of the previous stack in case
of nested (IRQ) stacks
*/
__u8 supervisor_stack[0];
};
......@@ -53,7 +60,13 @@ struct thread_info {
#endif
#define PREEMPT_ACTIVE 0x4000000
#ifdef CONFIG_4KSTACKS
#define THREAD_SIZE (4096)
#else
#define THREAD_SIZE (8192)
#endif
#define STACK_WARN (THREAD_SIZE/8)
/*
* macros/functions for gaining access to the thread information structure
*
......@@ -77,7 +90,6 @@ struct thread_info {
#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
#define THREAD_SIZE (2*PAGE_SIZE)
/* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
......@@ -87,6 +99,14 @@ static inline struct thread_info *current_thread_info(void)
return ti;
}
/* how to get the current stack pointer from C */
static inline unsigned long current_stack_pointer(void)
{
unsigned long ti;
__asm__("movl %%esp,%0; ":"=r" (ti) : );
return ti;
}
/* thread information allocation */
#ifdef CONFIG_DEBUG_STACK_USAGE
#define alloc_thread_info(tsk) \
......@@ -108,8 +128,6 @@ static inline struct thread_info *current_thread_info(void)
#else /* !__ASSEMBLY__ */
#define THREAD_SIZE 8192
/* how to get the thread information struct from ASM */
#define GET_THREAD_INFO(reg) \
movl $-THREAD_SIZE, reg; \
......
......@@ -30,4 +30,8 @@ extern void disable_irq_nosync (unsigned int);
extern void enable_irq (unsigned int);
extern void set_irq_affinity_info (unsigned int irq, int dest, int redir);
struct irqaction;
struct pt_regs;
int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
#endif /* _ASM_IA64_IRQ_H */
......@@ -124,4 +124,8 @@ extern volatile unsigned int num_spurious;
*/
extern irq_node_t *new_irq_node(void);
struct irqaction;
struct pt_regs;
int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
#endif /* _M68K_IRQ_H_ */
......@@ -121,4 +121,8 @@ extern irq_node_t *new_irq_node(void);
#define enable_irq_nosync(x) enable_irq(x)
#define disable_irq_nosync(x) disable_irq(x)
struct irqaction;
struct pt_regs;
int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
#endif /* _M68K_IRQ_H_ */
......@@ -31,4 +31,7 @@ extern asmlinkage unsigned int do_IRQ(int irq, struct pt_regs *regs);
extern void init_generic_irq(void);
struct irqaction;
int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
#endif /* _ASM_IRQ_H */
......@@ -96,4 +96,7 @@ extern unsigned long txn_alloc_addr(int);
/* soft power switch support (power.c) */
extern struct tasklet_struct power_tasklet;
struct irqaction;
int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
#endif /* _ASM_PARISC_IRQ_H */
......@@ -211,5 +211,9 @@ extern unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
extern unsigned long ppc_lost_interrupts[NR_MASK_WORDS];
extern atomic_t ppc_n_lost_interrupts;
struct irqaction;
struct pt_regs;
int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
#endif /* _ASM_IRQ_H */
#endif /* __KERNEL__ */
......@@ -75,5 +75,9 @@ static __inline__ int irq_canonicalize(int irq)
return irq;
}
struct irqaction;
struct pt_regs;
int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
#endif /* _ASM_IRQ_H */
#endif /* __KERNEL__ */
......@@ -21,6 +21,10 @@ enum interruption_class {
#define touch_nmi_watchdog() do { } while(0)
struct irqaction;
struct pt_regs;
int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
#endif /* __KERNEL__ */
#endif
......@@ -329,4 +329,8 @@ static inline int generic_irq_demux(int irq)
#define irq_canonicalize(irq) (irq)
#define irq_demux(irq) __irq_demux(sh_mv.mv_irq_demux(irq))
struct irqaction;
struct pt_regs;
int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
#endif /* __ASM_SH_IRQ_H */
......@@ -184,4 +184,8 @@ extern struct sun4m_intregs *sun4m_interrupts;
#define SUN4M_INT_SBUS(x) (1 << (x+7))
#define SUN4M_INT_VME(x) (1 << (x))
struct irqaction;
struct pt_regs;
int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
#endif
......@@ -150,4 +150,8 @@ static __inline__ unsigned long get_softint(void)
return retval;
}
struct irqaction;
struct pt_regs;
int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
#endif
......@@ -32,4 +32,9 @@ extern int um_request_irq(unsigned int irq, int fd, int type,
void (*handler)(int, void *, struct pt_regs *),
unsigned long irqflags, const char * devname,
void *dev_id);
struct irqaction;
struct pt_regs;
int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
#endif
......@@ -65,4 +65,8 @@ extern void disable_irq_nosync (unsigned int irq);
#endif /* !__ASSEMBLY__ */
struct irqaction;
struct pt_regs;
int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
#endif /* __V850_IRQ_H__ */
......@@ -53,4 +53,8 @@ extern int can_request_irq(unsigned int, unsigned long flags);
#define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */
#endif
struct irqaction;
struct pt_regs;
int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
#endif /* _ASM_IRQ_H */
......@@ -3,7 +3,7 @@
/* These definitions are for GCC v3.x. */
#include <linux/compiler-gcc.h>
#if __GNUC_MINOR__ >= 1
#if __GNUC_MINOR__ >= 1 && __GNUC_MINOR__ < 4
# define inline __inline__ __attribute__((always_inline))
# define __inline__ __inline__ __attribute__((always_inline))
# define __inline __inline__ __attribute__((always_inline))
......
......@@ -71,7 +71,6 @@ extern irq_desc_t irq_desc [NR_IRQS];
#include <asm/hw_irq.h> /* the arch dependent stuff */
extern int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
extern int setup_irq(unsigned int , struct irqaction * );
extern hw_irq_controller no_irq_type; /* needed in every arch ? */
......
......@@ -16,6 +16,7 @@
#include <linux/cpu.h>
#include <linux/kthread.h>
#include <asm/irq.h>
/*
- No shared variables, all the data are CPU local.
- If a softirq needs serialization, let it serialize itself
......@@ -69,53 +70,66 @@ static inline void wakeup_softirqd(void)
*/
#define MAX_SOFTIRQ_RESTART 10
asmlinkage void do_softirq(void)
asmlinkage void __do_softirq(void)
{
int max_restart = MAX_SOFTIRQ_RESTART;
struct softirq_action *h;
__u32 pending;
unsigned long flags;
int max_restart = MAX_SOFTIRQ_RESTART;
if (in_interrupt())
return;
pending = local_softirq_pending();
local_irq_save(flags);
local_bh_disable();
restart:
/* Reset the pending bitmask before enabling irqs */
local_softirq_pending() = 0;
local_irq_enable();
h = softirq_vec;
do {
if (pending & 1)
h->action(h);
h++;
pending >>= 1;
} while (pending);
local_irq_disable();
pending = local_softirq_pending();
if (pending && --max_restart)
goto restart;
if (pending) {
struct softirq_action *h;
if (pending)
wakeup_softirqd();
local_bh_disable();
restart:
/* Reset the pending bitmask before enabling irqs */
local_softirq_pending() = 0;
__local_bh_enable();
}
local_irq_enable();
#ifndef __ARCH_HAS_DO_SOFTIRQ
asmlinkage void do_softirq(void)
{
__u32 pending;
unsigned long flags;
h = softirq_vec;
if (in_interrupt())
return;
do {
if (pending & 1)
h->action(h);
h++;
pending >>= 1;
} while (pending);
local_irq_save(flags);
local_irq_disable();
pending = local_softirq_pending();
pending = local_softirq_pending();
if (pending && --max_restart)
goto restart;
if (pending)
wakeup_softirqd();
__local_bh_enable();
}
if (pending)
__do_softirq();
local_irq_restore(flags);
}
EXPORT_SYMBOL(do_softirq);
#endif
void local_bh_enable(void)
{
__local_bh_enable();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment