Commit 2319295d authored by Al Viro's avatar Al Viro Committed by Michal Simek

microblaze: switch to generic kernel_thread()

Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
Signed-off-by: default avatarMichal Simek <michal.simek@xilinx.com>
parent fd11ff73
...@@ -26,6 +26,7 @@ config MICROBLAZE ...@@ -26,6 +26,7 @@ config MICROBLAZE
select GENERIC_ATOMIC64 select GENERIC_ATOMIC64
select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS
select MODULES_USE_ELF_RELA select MODULES_USE_ELF_RELA
select GENERIC_KERNEL_THREAD
config SWAP config SWAP
def_bool n def_bool n
......
...@@ -31,6 +31,7 @@ extern const struct seq_operations cpuinfo_op; ...@@ -31,6 +31,7 @@ extern const struct seq_operations cpuinfo_op;
void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp); void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp);
extern void ret_from_fork(void); extern void ret_from_fork(void);
extern void ret_from_kernel_thread(void);
# endif /* __ASSEMBLY__ */ # endif /* __ASSEMBLY__ */
...@@ -78,11 +79,6 @@ extern unsigned long thread_saved_pc(struct task_struct *t); ...@@ -78,11 +79,6 @@ extern unsigned long thread_saved_pc(struct task_struct *t);
extern unsigned long get_wchan(struct task_struct *p); extern unsigned long get_wchan(struct task_struct *p);
/*
* create a kernel thread without removing it from tasklists
*/
extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
# define KSTK_EIP(tsk) (0) # define KSTK_EIP(tsk) (0)
# define KSTK_ESP(tsk) (0) # define KSTK_ESP(tsk) (0)
...@@ -131,8 +127,6 @@ extern inline void release_thread(struct task_struct *dead_task) ...@@ -131,8 +127,6 @@ extern inline void release_thread(struct task_struct *dead_task)
{ {
} }
extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
/* Free current thread data structures etc. */ /* Free current thread data structures etc. */
static inline void exit_thread(void) static inline void exit_thread(void)
{ {
......
...@@ -474,6 +474,14 @@ ENTRY(ret_from_fork) ...@@ -474,6 +474,14 @@ ENTRY(ret_from_fork)
brid ret_to_user brid ret_to_user
nop nop
ENTRY(ret_from_kernel_thread)
brlid r15, schedule_tail
addk r5, r0, r3
brald r15, r20
addk r5, r0, r19
brid sys_exit /* won't be returning... */
addk r5, r0, r0
work_pending: work_pending:
enable_irq enable_irq
......
...@@ -484,6 +484,15 @@ C_ENTRY(ret_from_fork): ...@@ -484,6 +484,15 @@ C_ENTRY(ret_from_fork):
brid ret_from_trap; /* Do normal trap return */ brid ret_from_trap; /* Do normal trap return */
add r3, r0, r0; /* Child's fork call should return 0. */ add r3, r0, r0; /* Child's fork call should return 0. */
C_ENTRY(ret_from_kernel_thread):
bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
add r5, r3, r0; /* switch_thread returns the prev task */
/* ( in the delay slot ) */
brald r15, r20 /* fn was left in r20 */
addk r5, r0, r19 /* ... and argument - in r19 */
brid sys_exit /* won't be returning... */
addk r5, r0, r0
C_ENTRY(sys_vfork): C_ENTRY(sys_vfork):
brid microblaze_vfork /* Do real work (tail-call) */ brid microblaze_vfork /* Do real work (tail-call) */
addik r5, r1, 0 addik r5, r1, 0
......
...@@ -119,46 +119,38 @@ void flush_thread(void) ...@@ -119,46 +119,38 @@ void flush_thread(void)
} }
int copy_thread(unsigned long clone_flags, unsigned long usp, int copy_thread(unsigned long clone_flags, unsigned long usp,
unsigned long unused, unsigned long arg,
struct task_struct *p, struct pt_regs *regs) struct task_struct *p, struct pt_regs *regs)
{ {
struct pt_regs *childregs = task_pt_regs(p); struct pt_regs *childregs = task_pt_regs(p);
struct thread_info *ti = task_thread_info(p); struct thread_info *ti = task_thread_info(p);
if (unlikely(p->flags & PF_KTHREAD)) {
/* if we're creating a new kernel thread then just zeroing all
* the registers. That's OK for a brand new thread.*/
memset(childregs, 0, sizeof(struct pt_regs));
memset(&ti->cpu_context, 0, sizeof(struct cpu_context));
ti->cpu_context.r1 = (unsigned long)childregs;
ti->cpu_context.r20 = (unsigned long)usp; /* fn */
ti->cpu_context.r19 = (unsigned long)arg;
childregs->pt_mode = 1;
local_save_flags(childregs->msr);
#ifdef CONFIG_MMU
ti->cpu_context.msr = childregs->msr & ~MSR_IE;
#endif
ti->cpu_context.r15 = (unsigned long)ret_from_kernel_thread - 8;
return 0;
}
*childregs = *regs; *childregs = *regs;
if (user_mode(regs)) childregs->r1 = usp;
childregs->r1 = usp;
else
childregs->r1 = ((unsigned long) ti) + THREAD_SIZE;
#ifndef CONFIG_MMU
memset(&ti->cpu_context, 0, sizeof(struct cpu_context)); memset(&ti->cpu_context, 0, sizeof(struct cpu_context));
ti->cpu_context.r1 = (unsigned long)childregs; ti->cpu_context.r1 = (unsigned long)childregs;
#ifndef CONFIG_MMU
ti->cpu_context.msr = (unsigned long)childregs->msr; ti->cpu_context.msr = (unsigned long)childregs->msr;
#else #else
childregs->msr |= MSR_UMS;
/* if creating a kernel thread then update the current reg (we don't
* want to use the parent's value when restoring by POP_STATE) */
if (kernel_mode(regs))
/* save new current on stack to use POP_STATE */
childregs->CURRENT_TASK = (unsigned long)p;
/* if returning to user then use the parent's value of this register */
/* if we're creating a new kernel thread then just zeroing all
* the registers. That's OK for a brand new thread.*/
/* Pls. note that some of them will be restored in POP_STATE */
if (kernel_mode(regs))
memset(&ti->cpu_context, 0, sizeof(struct cpu_context));
/* if this thread is created for fork/vfork/clone, then we want to
* restore all the parent's context */
/* in addition to the registers which will be restored by POP_STATE */
else {
ti->cpu_context = *(struct cpu_context *)regs;
childregs->msr |= MSR_UMS;
}
/* FIXME STATE_SAVE_PT_OFFSET; */
ti->cpu_context.r1 = (unsigned long)childregs;
/* we should consider the fact that childregs is a copy of the parent /* we should consider the fact that childregs is a copy of the parent
* regs which were saved immediately after entering the kernel state * regs which were saved immediately after entering the kernel state
* before enabling VM. This MSR will be restored in switch_to and * before enabling VM. This MSR will be restored in switch_to and
...@@ -209,29 +201,6 @@ unsigned long thread_saved_pc(struct task_struct *tsk) ...@@ -209,29 +201,6 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
} }
#endif #endif
static void kernel_thread_helper(int (*fn)(void *), void *arg)
{
fn(arg);
do_exit(-1);
}
int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
{
struct pt_regs regs;
memset(&regs, 0, sizeof(regs));
/* store them in non-volatile registers */
regs.r5 = (unsigned long)fn;
regs.r6 = (unsigned long)arg;
local_save_flags(regs.msr);
regs.pc = (unsigned long)kernel_thread_helper;
regs.pt_mode = 1;
return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
&regs, 0, NULL, NULL);
}
EXPORT_SYMBOL_GPL(kernel_thread);
unsigned long get_wchan(struct task_struct *p) unsigned long get_wchan(struct task_struct *p)
{ {
/* TBD (used by procfs) */ /* TBD (used by procfs) */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment