Commit c55f5158 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched, mips, ia64: Remove __ARCH_WANT_UNLOCKED_CTXSW

Kirill found that there's a subtle race in the
__ARCH_WANT_UNLOCKED_CTXSW code, and instead of fixing it, remove the
entire exception because neither arch that uses it seems to actually
still require it.

Boot tested on mips64el (qemu) only.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarKirill Tkhai <tkhai@yandex.ru>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Davidlohr Bueso <davidlohr@hp.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: James Hogan <james.hogan@imgtec.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul Burton <paul.burton@imgtec.com>
Cc: Qais Yousef <qais.yousef@imgtec.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Tony Luck <tony.luck@intel.com>
Cc: oleg@redhat.com
Cc: linux@roeck-us.net
Cc: linux-ia64@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Cc: linux-mips@linux-mips.org
Link: http://lkml.kernel.org/r/20140923150641.GH3312@worktop.programming.kicks-ass.netSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 5bd96ab6
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/ustack.h> #include <asm/ustack.h>
#define __ARCH_WANT_UNLOCKED_CTXSW
#define ARCH_HAS_PREFETCH_SWITCH_STACK #define ARCH_HAS_PREFETCH_SWITCH_STACK
#define IA64_NUM_PHYS_STACK_REG 96 #define IA64_NUM_PHYS_STACK_REG 96
......
...@@ -397,12 +397,6 @@ unsigned long get_wchan(struct task_struct *p); ...@@ -397,12 +397,6 @@ unsigned long get_wchan(struct task_struct *p);
#define ARCH_HAS_PREFETCHW #define ARCH_HAS_PREFETCHW
#define prefetchw(x) __builtin_prefetch((x), 1, 1) #define prefetchw(x) __builtin_prefetch((x), 1, 1)
/*
* See Documentation/scheduler/sched-arch.txt; prevents deadlock on SMP
* systems.
*/
#define __ARCH_WANT_UNLOCKED_CTXSW
#endif #endif
#endif /* _ASM_PROCESSOR_H */ #endif /* _ASM_PROCESSOR_H */
...@@ -2331,10 +2331,6 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev) ...@@ -2331,10 +2331,6 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev)
*/ */
post_schedule(rq); post_schedule(rq);
#ifdef __ARCH_WANT_UNLOCKED_CTXSW
/* In this case, finish_task_switch does not reenable preemption */
preempt_enable();
#endif
if (current->set_child_tid) if (current->set_child_tid)
put_user(task_pid_vnr(current), current->set_child_tid); put_user(task_pid_vnr(current), current->set_child_tid);
} }
...@@ -2377,9 +2373,7 @@ context_switch(struct rq *rq, struct task_struct *prev, ...@@ -2377,9 +2373,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
* of the scheduler it's an obvious special-case), so we * of the scheduler it's an obvious special-case), so we
* do an early lockdep release here: * do an early lockdep release here:
*/ */
#ifndef __ARCH_WANT_UNLOCKED_CTXSW
spin_release(&rq->lock.dep_map, 1, _THIS_IP_); spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
#endif
context_tracking_task_switch(prev, next); context_tracking_task_switch(prev, next);
/* Here we just switch the register state and the stack. */ /* Here we just switch the register state and the stack. */
......
...@@ -975,7 +975,6 @@ static inline int task_on_rq_migrating(struct task_struct *p) ...@@ -975,7 +975,6 @@ static inline int task_on_rq_migrating(struct task_struct *p)
# define finish_arch_post_lock_switch() do { } while (0) # define finish_arch_post_lock_switch() do { } while (0)
#endif #endif
#ifndef __ARCH_WANT_UNLOCKED_CTXSW
static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -1013,35 +1012,6 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) ...@@ -1013,35 +1012,6 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
raw_spin_unlock_irq(&rq->lock); raw_spin_unlock_irq(&rq->lock);
} }
#else /* __ARCH_WANT_UNLOCKED_CTXSW */
static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
{
#ifdef CONFIG_SMP
/*
* We can optimise this out completely for !SMP, because the
* SMP rebalancing from interrupt is the only thing that cares
* here.
*/
next->on_cpu = 1;
#endif
raw_spin_unlock(&rq->lock);
}
static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
{
#ifdef CONFIG_SMP
/*
* After ->on_cpu is cleared, the task can be moved to a different CPU.
* We must ensure this doesn't happen until the switch is completely
* finished.
*/
smp_wmb();
prev->on_cpu = 0;
#endif
local_irq_enable();
}
#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
/* /*
* wake flags * wake flags
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment