Commit 5d299eab authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched/fair: Add tmp_alone_branch assertion

The magic in list_add_leaf_cfs_rq() requires that at the end of
enqueue_task_fair():

  rq->tmp_alone_branch == &rq->lead_cfs_rq_list

If this is violated, list integrity is compromised for list entries
and the tmp_alone_branch pointer might dangle.

Also, reflow list_add_leaf_cfs_rq() while there. This looses one
indentation level and generates a form that's convenient for the next
patch.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent c546951d
...@@ -277,64 +277,69 @@ static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) ...@@ -277,64 +277,69 @@ static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
{ {
if (!cfs_rq->on_list) { struct rq *rq = rq_of(cfs_rq);
struct rq *rq = rq_of(cfs_rq); int cpu = cpu_of(rq);
int cpu = cpu_of(rq);
if (cfs_rq->on_list)
return;
cfs_rq->on_list = 1;
/*
* Ensure we either appear before our parent (if already
* enqueued) or force our parent to appear after us when it is
* enqueued. The fact that we always enqueue bottom-up
* reduces this to two cases and a special case for the root
* cfs_rq. Furthermore, it also means that we will always reset
* tmp_alone_branch either when the branch is connected
* to a tree or when we reach the top of the tree
*/
if (cfs_rq->tg->parent &&
cfs_rq->tg->parent->cfs_rq[cpu]->on_list) {
/* /*
* Ensure we either appear before our parent (if already * If parent is already on the list, we add the child
* enqueued) or force our parent to appear after us when it is * just before. Thanks to circular linked property of
* enqueued. The fact that we always enqueue bottom-up * the list, this means to put the child at the tail
* reduces this to two cases and a special case for the root * of the list that starts by parent.
* cfs_rq. Furthermore, it also means that we will always reset
* tmp_alone_branch either when the branch is connected
* to a tree or when we reach the beg of the tree
*/ */
if (cfs_rq->tg->parent && list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
cfs_rq->tg->parent->cfs_rq[cpu]->on_list) { &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list));
/* /*
* If parent is already on the list, we add the child * The branch is now connected to its tree so we can
* just before. Thanks to circular linked property of * reset tmp_alone_branch to the beginning of the
* the list, this means to put the child at the tail * list.
* of the list that starts by parent. */
*/ rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, return;
&(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list)); }
/*
* The branch is now connected to its tree so we can
* reset tmp_alone_branch to the beginning of the
* list.
*/
rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
} else if (!cfs_rq->tg->parent) {
/*
* cfs rq without parent should be put
* at the tail of the list.
*/
list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
&rq->leaf_cfs_rq_list);
/*
* We have reach the beg of a tree so we can reset
* tmp_alone_branch to the beginning of the list.
*/
rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
} else {
/*
* The parent has not already been added so we want to
* make sure that it will be put after us.
* tmp_alone_branch points to the beg of the branch
* where we will add parent.
*/
list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
rq->tmp_alone_branch);
/*
* update tmp_alone_branch to points to the new beg
* of the branch
*/
rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list;
}
cfs_rq->on_list = 1; if (!cfs_rq->tg->parent) {
/*
* cfs rq without parent should be put
* at the tail of the list.
*/
list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
&rq->leaf_cfs_rq_list);
/*
* We have reach the top of a tree so we can reset
* tmp_alone_branch to the beginning of the list.
*/
rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
return;
} }
/*
* The parent has not already been added so we want to
* make sure that it will be put after us.
* tmp_alone_branch points to the begin of the branch
* where we will add parent.
*/
list_add_rcu(&cfs_rq->leaf_cfs_rq_list, rq->tmp_alone_branch);
/*
* update tmp_alone_branch to points to the new begin
* of the branch
*/
rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list;
} }
static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
...@@ -345,7 +350,12 @@ static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) ...@@ -345,7 +350,12 @@ static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
} }
} }
/* Iterate through all leaf cfs_rq's on a runqueue: */ static inline void assert_list_leaf_cfs_rq(struct rq *rq)
{
SCHED_WARN_ON(rq->tmp_alone_branch != &rq->leaf_cfs_rq_list);
}
/* Iterate through all cfs_rq's on a runqueue in bottom-up order */
#define for_each_leaf_cfs_rq(rq, cfs_rq) \ #define for_each_leaf_cfs_rq(rq, cfs_rq) \
list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list) list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
...@@ -433,6 +443,10 @@ static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) ...@@ -433,6 +443,10 @@ static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
{ {
} }
static inline void assert_list_leaf_cfs_rq(struct rq *rq)
{
}
#define for_each_leaf_cfs_rq(rq, cfs_rq) \ #define for_each_leaf_cfs_rq(rq, cfs_rq) \
for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL) for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
...@@ -5172,6 +5186,8 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) ...@@ -5172,6 +5186,8 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
} }
assert_list_leaf_cfs_rq(rq);
hrtick_update(rq); hrtick_update(rq);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment