Commit f664a58c authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Greg Kroah-Hartman

sched,rt: Remove return value from pull_rt_task()

commit 8046d680 upstream.

In order to be able to use pull_rt_task() from a callback, we need to
do away with the return value.

Since the return value indicates if we should reschedule, do this
inside the function. Since not all callers currently do this, this can
increase the number of reschedules due rt balancing.

Too many reschedules is not a correctness issues, too few are.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: ktkhai@parallels.com
Cc: rostedt@goodmis.org
Cc: juri.lelli@gmail.com
Cc: pang.xunlei@linaro.org
Cc: oleg@redhat.com
Cc: wanpeng.li@linux.intel.com
Cc: umgwanakikbuti@gmail.com
Link: http://lkml.kernel.org/r/20150611124742.679002000@infradead.orgSigned-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarByungchul Park <byungchul.park@lge.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent ad11cd87
...@@ -1652,14 +1652,15 @@ static void push_rt_tasks(struct rq *rq) ...@@ -1652,14 +1652,15 @@ static void push_rt_tasks(struct rq *rq)
; ;
} }
static int pull_rt_task(struct rq *this_rq) static void pull_rt_task(struct rq *this_rq)
{ {
int this_cpu = this_rq->cpu, ret = 0, cpu; int this_cpu = this_rq->cpu, cpu;
bool resched = false;
struct task_struct *p; struct task_struct *p;
struct rq *src_rq; struct rq *src_rq;
if (likely(!rt_overloaded(this_rq))) if (likely(!rt_overloaded(this_rq)))
return 0; return;
/* /*
* Match the barrier from rt_set_overloaded; this guarantees that if we * Match the barrier from rt_set_overloaded; this guarantees that if we
...@@ -1716,7 +1717,7 @@ static int pull_rt_task(struct rq *this_rq) ...@@ -1716,7 +1717,7 @@ static int pull_rt_task(struct rq *this_rq)
if (p->prio < src_rq->curr->prio) if (p->prio < src_rq->curr->prio)
goto skip; goto skip;
ret = 1; resched = true;
deactivate_task(src_rq, p, 0); deactivate_task(src_rq, p, 0);
set_task_cpu(p, this_cpu); set_task_cpu(p, this_cpu);
...@@ -1732,7 +1733,8 @@ static int pull_rt_task(struct rq *this_rq) ...@@ -1732,7 +1733,8 @@ static int pull_rt_task(struct rq *this_rq)
double_unlock_balance(this_rq, src_rq); double_unlock_balance(this_rq, src_rq);
} }
return ret; if (resched)
resched_task(this_rq->curr);
} }
static void pre_schedule_rt(struct rq *rq, struct task_struct *prev) static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
...@@ -1835,8 +1837,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p) ...@@ -1835,8 +1837,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p)
if (!p->on_rq || rq->rt.rt_nr_running) if (!p->on_rq || rq->rt.rt_nr_running)
return; return;
if (pull_rt_task(rq)) pull_rt_task(rq);
resched_task(rq->curr);
} }
void init_sched_rt_class(void) void init_sched_rt_class(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment