Commit 5d299eab authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched/fair: Add tmp_alone_branch assertion

The magic in list_add_leaf_cfs_rq() requires that at the end of
enqueue_task_fair():

  rq->tmp_alone_branch == &rq->lead_cfs_rq_list

If this is violated, list integrity is compromised for list entries
and the tmp_alone_branch pointer might dangle.

Also, reflow list_add_leaf_cfs_rq() while there. This looses one
indentation level and generates a form that's convenient for the next
patch.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent c546951d
...@@ -277,9 +277,14 @@ static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) ...@@ -277,9 +277,14 @@ static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
{ {
if (!cfs_rq->on_list) {
struct rq *rq = rq_of(cfs_rq); struct rq *rq = rq_of(cfs_rq);
int cpu = cpu_of(rq); int cpu = cpu_of(rq);
if (cfs_rq->on_list)
return;
cfs_rq->on_list = 1;
/* /*
* Ensure we either appear before our parent (if already * Ensure we either appear before our parent (if already
* enqueued) or force our parent to appear after us when it is * enqueued) or force our parent to appear after us when it is
...@@ -287,7 +292,7 @@ static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) ...@@ -287,7 +292,7 @@ static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
* reduces this to two cases and a special case for the root * reduces this to two cases and a special case for the root
* cfs_rq. Furthermore, it also means that we will always reset * cfs_rq. Furthermore, it also means that we will always reset
* tmp_alone_branch either when the branch is connected * tmp_alone_branch either when the branch is connected
* to a tree or when we reach the beg of the tree * to a tree or when we reach the top of the tree
*/ */
if (cfs_rq->tg->parent && if (cfs_rq->tg->parent &&
cfs_rq->tg->parent->cfs_rq[cpu]->on_list) { cfs_rq->tg->parent->cfs_rq[cpu]->on_list) {
...@@ -305,7 +310,10 @@ static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) ...@@ -305,7 +310,10 @@ static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
* list. * list.
*/ */
rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
} else if (!cfs_rq->tg->parent) { return;
}
if (!cfs_rq->tg->parent) {
/* /*
* cfs rq without parent should be put * cfs rq without parent should be put
* at the tail of the list. * at the tail of the list.
...@@ -313,28 +321,25 @@ static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) ...@@ -313,28 +321,25 @@ static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
&rq->leaf_cfs_rq_list); &rq->leaf_cfs_rq_list);
/* /*
* We have reach the beg of a tree so we can reset * We have reach the top of a tree so we can reset
* tmp_alone_branch to the beginning of the list. * tmp_alone_branch to the beginning of the list.
*/ */
rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
} else { return;
}
/* /*
* The parent has not already been added so we want to * The parent has not already been added so we want to
* make sure that it will be put after us. * make sure that it will be put after us.
* tmp_alone_branch points to the beg of the branch * tmp_alone_branch points to the begin of the branch
* where we will add parent. * where we will add parent.
*/ */
list_add_rcu(&cfs_rq->leaf_cfs_rq_list, list_add_rcu(&cfs_rq->leaf_cfs_rq_list, rq->tmp_alone_branch);
rq->tmp_alone_branch);
/* /*
* update tmp_alone_branch to points to the new beg * update tmp_alone_branch to points to the new begin
* of the branch * of the branch
*/ */
rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list; rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list;
}
cfs_rq->on_list = 1;
}
} }
static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
...@@ -345,7 +350,12 @@ static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) ...@@ -345,7 +350,12 @@ static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
} }
} }
/* Iterate through all leaf cfs_rq's on a runqueue: */ static inline void assert_list_leaf_cfs_rq(struct rq *rq)
{
SCHED_WARN_ON(rq->tmp_alone_branch != &rq->leaf_cfs_rq_list);
}
/* Iterate through all cfs_rq's on a runqueue in bottom-up order */
#define for_each_leaf_cfs_rq(rq, cfs_rq) \ #define for_each_leaf_cfs_rq(rq, cfs_rq) \
list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list) list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
...@@ -433,6 +443,10 @@ static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) ...@@ -433,6 +443,10 @@ static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
{ {
} }
static inline void assert_list_leaf_cfs_rq(struct rq *rq)
{
}
#define for_each_leaf_cfs_rq(rq, cfs_rq) \ #define for_each_leaf_cfs_rq(rq, cfs_rq) \
for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL) for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
...@@ -5172,6 +5186,8 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) ...@@ -5172,6 +5186,8 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
} }
assert_list_leaf_cfs_rq(rq);
hrtick_update(rq); hrtick_update(rq);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment