Commit 513ad84b authored by Ingo Molnar's avatar Ingo Molnar

x86: de-macro start_thread()

Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 1180e01d
...@@ -512,6 +512,21 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, ...@@ -512,6 +512,21 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
return err; return err;
} }
void
start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
{
__asm__("movl %0, %%gs" :: "r"(0));
regs->fs = 0;
set_fs(USER_DS);
regs->ds = __USER_DS;
regs->es = __USER_DS;
regs->ss = __USER_DS;
regs->cs = __USER_CS;
regs->ip = new_ip;
regs->sp = new_sp;
}
EXPORT_SYMBOL_GPL(start_thread);
#ifdef CONFIG_SECCOMP #ifdef CONFIG_SECCOMP
static void hard_disable_TSC(void) static void hard_disable_TSC(void)
{ {
......
...@@ -528,6 +528,21 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, ...@@ -528,6 +528,21 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
return err; return err;
} }
void
start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
{
asm volatile("movl %0, %%fs; movl %0, %%es; movl %0, %%ds" :: "r"(0));
load_gs_index(0);
regs->ip = new_ip;
regs->sp = new_sp;
write_pda(oldrsp, new_sp);
regs->cs = __USER_CS;
regs->ss = __USER_DS;
regs->flags = 0x200;
set_fs(USER_DS);
}
EXPORT_SYMBOL_GPL(start_thread);
/* /*
* This special macro can be used to load a debugging register * This special macro can be used to load a debugging register
*/ */
......
...@@ -817,20 +817,6 @@ static inline void spin_lock_prefetch(const void *x) ...@@ -817,20 +817,6 @@ static inline void spin_lock_prefetch(const void *x)
.io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \ .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \
} }
#define start_thread(regs, new_eip, new_esp) \
do { \
__asm__("movl %0,%%gs": :"r" (0)); \
regs->fs = 0; \
set_fs(USER_DS); \
regs->ds = __USER_DS; \
regs->es = __USER_DS; \
regs->ss = __USER_DS; \
regs->cs = __USER_CS; \
regs->ip = new_eip; \
regs->sp = new_esp; \
} while (0)
extern unsigned long thread_saved_pc(struct task_struct *tsk); extern unsigned long thread_saved_pc(struct task_struct *tsk);
#define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long)) #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
...@@ -887,18 +873,6 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); ...@@ -887,18 +873,6 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
.x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
} }
#define start_thread(regs, new_rip, new_rsp) do { \
asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \
load_gs_index(0); \
(regs)->ip = (new_rip); \
(regs)->sp = (new_rsp); \
write_pda(oldrsp, (new_rsp)); \
(regs)->cs = __USER_CS; \
(regs)->ss = __USER_DS; \
(regs)->flags = 0x200; \
set_fs(USER_DS); \
} while (0)
/* /*
* Return saved PC of a blocked thread. * Return saved PC of a blocked thread.
* What is this good for? it will be always the scheduler or ret_from_fork. * What is this good for? it will be always the scheduler or ret_from_fork.
...@@ -909,6 +883,9 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); ...@@ -909,6 +883,9 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */ #define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
#endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86_64 */
extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
unsigned long new_sp);
/* /*
* This decides where the kernel will search for a free chunk of vm * This decides where the kernel will search for a free chunk of vm
* space during mmap's. * space during mmap's.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment