Commit d58a247c authored by Ingo Molnar's avatar Ingo Molnar

- revert the raw spinlock usage - it's too dangerous and volatile.

parent 873cbfcf
...@@ -156,12 +156,6 @@ static struct runqueue runqueues[NR_CPUS] __cacheline_aligned; ...@@ -156,12 +156,6 @@ static struct runqueue runqueues[NR_CPUS] __cacheline_aligned;
* task_rq_lock - lock the runqueue a given task resides on and disable * task_rq_lock - lock the runqueue a given task resides on and disable
* interrupts. Note the ordering: we can safely lookup the task_rq without * interrupts. Note the ordering: we can safely lookup the task_rq without
* explicitly disabling preemption. * explicitly disabling preemption.
*
* WARNING: to squeeze out a few more cycles we do not disable preemption
* explicitly (or implicitly), we just keep interrupts disabled. This means
* that within task_rq_lock/unlock sections you must be careful
* about locking/unlocking spinlocks, since they could cause an unexpected
* preemption.
*/ */
static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags) static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
{ {
...@@ -170,9 +164,9 @@ static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags) ...@@ -170,9 +164,9 @@ static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
repeat_lock_task: repeat_lock_task:
local_irq_save(*flags); local_irq_save(*flags);
rq = task_rq(p); rq = task_rq(p);
_raw_spin_lock(&rq->lock); spin_lock(&rq->lock);
if (unlikely(rq != task_rq(p))) { if (unlikely(rq != task_rq(p))) {
_raw_spin_unlock_irqrestore(&rq->lock, *flags); spin_unlock_irqrestore(&rq->lock, *flags);
goto repeat_lock_task; goto repeat_lock_task;
} }
return rq; return rq;
...@@ -180,8 +174,7 @@ static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags) ...@@ -180,8 +174,7 @@ static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags) static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags)
{ {
_raw_spin_unlock_irqrestore(&rq->lock, *flags); spin_unlock_irqrestore(&rq->lock, *flags);
preempt_check_resched();
} }
/* /*
...@@ -289,15 +282,8 @@ static inline void resched_task(task_t *p) ...@@ -289,15 +282,8 @@ static inline void resched_task(task_t *p)
nrpolling |= test_tsk_thread_flag(p,TIF_POLLING_NRFLAG); nrpolling |= test_tsk_thread_flag(p,TIF_POLLING_NRFLAG);
if (!need_resched && !nrpolling && (p->thread_info->cpu != smp_processor_id())) if (!need_resched && !nrpolling && (p->thread_info->cpu != smp_processor_id()))
/*
* NOTE: smp_send_reschedule() can be called from
* spinlocked sections which do not have an elevated
* preemption count. So the code either has to avoid
* spinlocks, or has to put preempt_disable() and
* preempt_enable_no_resched() around the code.
*/
smp_send_reschedule(p->thread_info->cpu); smp_send_reschedule(p->thread_info->cpu);
preempt_enable_no_resched(); preempt_enable();
#else #else
set_tsk_need_resched(p); set_tsk_need_resched(p);
#endif #endif
...@@ -348,10 +334,8 @@ void wait_task_inactive(task_t * p) ...@@ -348,10 +334,8 @@ void wait_task_inactive(task_t * p)
*/ */
void kick_if_running(task_t * p) void kick_if_running(task_t * p)
{ {
if (p == task_rq(p)->curr) { if (p == task_rq(p)->curr)
resched_task(p); resched_task(p);
preempt_check_resched();
}
} }
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment