Commit c15c5f8c authored by Paul Mundt's avatar Paul Mundt

sh: Support kernel stacks smaller than a page.

This follows the powerpc commit f6a61680
'[POWERPC] Fix kernel stack allocation alignment'.

SH has traditionally forced the thread order to be relative to the page
size, so there were never any situations where the same bug was
triggered by slub. Regardless, the usage of > 8kB stacks for the larger
page sizes is overkill, so we switch to using slab allocations there,
as per the powerpc change.
Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent b817f7e0
...@@ -33,20 +33,12 @@ struct thread_info { ...@@ -33,20 +33,12 @@ struct thread_info {
#define PREEMPT_ACTIVE 0x10000000 #define PREEMPT_ACTIVE 0x10000000
#if defined(CONFIG_4KSTACKS) #if defined(CONFIG_4KSTACKS)
#define THREAD_SIZE_ORDER (0) #define THREAD_SHIFT 12
#elif defined(CONFIG_PAGE_SIZE_4KB)
#define THREAD_SIZE_ORDER (1)
#elif defined(CONFIG_PAGE_SIZE_8KB)
#define THREAD_SIZE_ORDER (1)
#elif defined(CONFIG_PAGE_SIZE_16KB)
#define THREAD_SIZE_ORDER (0)
#elif defined(CONFIG_PAGE_SIZE_64KB)
#define THREAD_SIZE_ORDER (0)
#else #else
#error "Unknown thread size" #define THREAD_SHIFT 13
#endif #endif
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) #define THREAD_SIZE (1 << THREAD_SHIFT)
#define STACK_WARN (THREAD_SIZE >> 3) #define STACK_WARN (THREAD_SIZE >> 3)
/* /*
...@@ -94,15 +86,19 @@ static inline struct thread_info *current_thread_info(void) ...@@ -94,15 +86,19 @@ static inline struct thread_info *current_thread_info(void)
return ti; return ti;
} }
/* thread information allocation */
#if THREAD_SHIFT >= PAGE_SHIFT
#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
#else /* THREAD_SHIFT < PAGE_SHIFT */
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
/* thread information allocation */ extern struct thread_info *alloc_thread_info(struct task_struct *tsk);
#ifdef CONFIG_DEBUG_STACK_USAGE extern void free_thread_info(struct thread_info *ti);
#define alloc_thread_info(ti) kzalloc(THREAD_SIZE, GFP_KERNEL)
#else #endif /* THREAD_SHIFT < PAGE_SHIFT */
#define alloc_thread_info(ti) kmalloc(THREAD_SIZE, GFP_KERNEL)
#endif
#define free_thread_info(ti) kfree(ti)
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
...@@ -265,6 +265,35 @@ void free_initrd_mem(unsigned long start, unsigned long end) ...@@ -265,6 +265,35 @@ void free_initrd_mem(unsigned long start, unsigned long end)
} }
#endif #endif
#if THREAD_SHIFT < PAGE_SHIFT
static struct kmem_cache *thread_info_cache;
struct thread_info *alloc_thread_info(struct task_struct *tsk)
{
struct thread_info *ti;
ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL);
if (unlikely(ti == NULL))
return NULL;
#ifdef CONFIG_DEBUG_STACK_USAGE
memset(ti, 0, THREAD_SIZE);
#endif
return ti;
}
void free_thread_info(struct thread_info *ti)
{
kmem_cache_free(thread_info_cache, ti);
}
void thread_info_cache_init(void)
{
thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
THREAD_SIZE, 0, NULL);
BUG_ON(thread_info_cache == NULL);
}
#endif /* THREAD_SHIFT < PAGE_SHIFT */
#ifdef CONFIG_MEMORY_HOTPLUG #ifdef CONFIG_MEMORY_HOTPLUG
int arch_add_memory(int nid, u64 start, u64 size) int arch_add_memory(int nid, u64 start, u64 size)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment