Commit 31cb1bc0 authored by rodrigosiqueira's avatar rodrigosiqueira Committed by Ingo Molnar

sched/core: Rework and clarify prepare_lock_switch()

The prepare_lock_switch() function has an unused parameter, and also the
function name was not descriptive. To improve readability and remove
the extra parameter, do the following changes:

* Move prepare_lock_switch() from kernel/sched/sched.h to
  kernel/sched/core.c, rename it to prepare_task(), and remove the
  unused parameter.

* Split the smp_store_release() out from finish_lock_switch() to a
  function named finish_task.

* Comments ajdustments.
Signed-off-by: default avatarRodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20171215140603.gxe5i2y6fg5ojfpp@smtp.gmail.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent cb1f34dd
...@@ -2045,7 +2045,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) ...@@ -2045,7 +2045,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
* If the owning (remote) CPU is still in the middle of schedule() with * If the owning (remote) CPU is still in the middle of schedule() with
* this task as prev, wait until its done referencing the task. * this task as prev, wait until its done referencing the task.
* *
* Pairs with the smp_store_release() in finish_lock_switch(). * Pairs with the smp_store_release() in finish_task().
* *
* This ensures that tasks getting woken will be fully ordered against * This ensures that tasks getting woken will be fully ordered against
* their previous state and preserve Program Order. * their previous state and preserve Program Order.
...@@ -2571,6 +2571,50 @@ fire_sched_out_preempt_notifiers(struct task_struct *curr, ...@@ -2571,6 +2571,50 @@ fire_sched_out_preempt_notifiers(struct task_struct *curr,
#endif /* CONFIG_PREEMPT_NOTIFIERS */ #endif /* CONFIG_PREEMPT_NOTIFIERS */
static inline void prepare_task(struct task_struct *next)
{
#ifdef CONFIG_SMP
/*
* Claim the task as running, we do this before switching to it
* such that any running task will have this set.
*/
next->on_cpu = 1;
#endif
}
static inline void finish_task(struct task_struct *prev)
{
#ifdef CONFIG_SMP
/*
* After ->on_cpu is cleared, the task can be moved to a different CPU.
* We must ensure this doesn't happen until the switch is completely
* finished.
*
* In particular, the load of prev->state in finish_task_switch() must
* happen before this.
*
* Pairs with the smp_cond_load_acquire() in try_to_wake_up().
*/
smp_store_release(&prev->on_cpu, 0);
#endif
}
static inline void finish_lock_switch(struct rq *rq)
{
#ifdef CONFIG_DEBUG_SPINLOCK
/* this is a valid case when another task releases the spinlock */
rq->lock.owner = current;
#endif
/*
* If we are tracking spinlock dependencies then we have to
* fix up the runqueue lock - which gets 'carried over' from
* prev into current:
*/
spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
raw_spin_unlock_irq(&rq->lock);
}
/** /**
* prepare_task_switch - prepare to switch tasks * prepare_task_switch - prepare to switch tasks
* @rq: the runqueue preparing to switch * @rq: the runqueue preparing to switch
...@@ -2591,7 +2635,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev, ...@@ -2591,7 +2635,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
sched_info_switch(rq, prev, next); sched_info_switch(rq, prev, next);
perf_event_task_sched_out(prev, next); perf_event_task_sched_out(prev, next);
fire_sched_out_preempt_notifiers(prev, next); fire_sched_out_preempt_notifiers(prev, next);
prepare_lock_switch(rq, next); prepare_task(next);
prepare_arch_switch(next); prepare_arch_switch(next);
} }
...@@ -2646,7 +2690,7 @@ static struct rq *finish_task_switch(struct task_struct *prev) ...@@ -2646,7 +2690,7 @@ static struct rq *finish_task_switch(struct task_struct *prev)
* the scheduled task must drop that reference. * the scheduled task must drop that reference.
* *
* We must observe prev->state before clearing prev->on_cpu (in * We must observe prev->state before clearing prev->on_cpu (in
* finish_lock_switch), otherwise a concurrent wakeup can get prev * finish_task), otherwise a concurrent wakeup can get prev
* running on another CPU and we could rave with its RUNNING -> DEAD * running on another CPU and we could rave with its RUNNING -> DEAD
* transition, resulting in a double drop. * transition, resulting in a double drop.
*/ */
...@@ -2663,7 +2707,8 @@ static struct rq *finish_task_switch(struct task_struct *prev) ...@@ -2663,7 +2707,8 @@ static struct rq *finish_task_switch(struct task_struct *prev)
* to use. * to use.
*/ */
smp_mb__after_unlock_lock(); smp_mb__after_unlock_lock();
finish_lock_switch(rq, prev); finish_task(prev);
finish_lock_switch(rq);
finish_arch_post_lock_switch(); finish_arch_post_lock_switch();
fire_sched_in_preempt_notifiers(current); fire_sched_in_preempt_notifiers(current);
......
...@@ -1328,47 +1328,6 @@ static inline int task_on_rq_migrating(struct task_struct *p) ...@@ -1328,47 +1328,6 @@ static inline int task_on_rq_migrating(struct task_struct *p)
# define finish_arch_post_lock_switch() do { } while (0) # define finish_arch_post_lock_switch() do { } while (0)
#endif #endif
static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
{
#ifdef CONFIG_SMP
/*
* We can optimise this out completely for !SMP, because the
* SMP rebalancing from interrupt is the only thing that cares
* here.
*/
next->on_cpu = 1;
#endif
}
static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
{
#ifdef CONFIG_SMP
/*
* After ->on_cpu is cleared, the task can be moved to a different CPU.
* We must ensure this doesn't happen until the switch is completely
* finished.
*
* In particular, the load of prev->state in finish_task_switch() must
* happen before this.
*
* Pairs with the smp_cond_load_acquire() in try_to_wake_up().
*/
smp_store_release(&prev->on_cpu, 0);
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
/* this is a valid case when another task releases the spinlock */
rq->lock.owner = current;
#endif
/*
* If we are tracking spinlock dependencies then we have to
* fix up the runqueue lock - which gets 'carried over' from
* prev into current:
*/
spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
raw_spin_unlock_irq(&rq->lock);
}
/* /*
* wake flags * wake flags
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment