Commit b85c8b71 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched/core: Optimize ttwu_stat()

The whole of ttwu_stat() is guarded by a single schedstat_enabled(),
there is absolutely no point in then issuing another static_branch for
every single schedstat_inc() in there.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 460e8c33
...@@ -1630,16 +1630,16 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags) ...@@ -1630,16 +1630,16 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (cpu == rq->cpu) { if (cpu == rq->cpu) {
schedstat_inc(rq->ttwu_local); __schedstat_inc(rq->ttwu_local);
schedstat_inc(p->se.statistics.nr_wakeups_local); __schedstat_inc(p->se.statistics.nr_wakeups_local);
} else { } else {
struct sched_domain *sd; struct sched_domain *sd;
schedstat_inc(p->se.statistics.nr_wakeups_remote); __schedstat_inc(p->se.statistics.nr_wakeups_remote);
rcu_read_lock(); rcu_read_lock();
for_each_domain(rq->cpu, sd) { for_each_domain(rq->cpu, sd) {
if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
schedstat_inc(sd->ttwu_wake_remote); __schedstat_inc(sd->ttwu_wake_remote);
break; break;
} }
} }
...@@ -1647,14 +1647,14 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags) ...@@ -1647,14 +1647,14 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
} }
if (wake_flags & WF_MIGRATED) if (wake_flags & WF_MIGRATED)
schedstat_inc(p->se.statistics.nr_wakeups_migrate); __schedstat_inc(p->se.statistics.nr_wakeups_migrate);
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
schedstat_inc(rq->ttwu_count); __schedstat_inc(rq->ttwu_count);
schedstat_inc(p->se.statistics.nr_wakeups); __schedstat_inc(p->se.statistics.nr_wakeups);
if (wake_flags & WF_SYNC) if (wake_flags & WF_SYNC)
schedstat_inc(p->se.statistics.nr_wakeups_sync); __schedstat_inc(p->se.statistics.nr_wakeups_sync);
} }
static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags) static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
......
...@@ -31,6 +31,7 @@ rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) ...@@ -31,6 +31,7 @@ rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
rq->rq_sched_info.run_delay += delta; rq->rq_sched_info.run_delay += delta;
} }
#define schedstat_enabled() static_branch_unlikely(&sched_schedstats) #define schedstat_enabled() static_branch_unlikely(&sched_schedstats)
#define __schedstat_inc(var) do { var++; } while (0)
#define schedstat_inc(var) do { if (schedstat_enabled()) { var++; } } while (0) #define schedstat_inc(var) do { if (schedstat_enabled()) { var++; } } while (0)
#define schedstat_add(var, amt) do { if (schedstat_enabled()) { var += (amt); } } while (0) #define schedstat_add(var, amt) do { if (schedstat_enabled()) { var += (amt); } } while (0)
#define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0) #define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0)
...@@ -48,6 +49,7 @@ static inline void ...@@ -48,6 +49,7 @@ static inline void
rq_sched_info_depart(struct rq *rq, unsigned long long delta) rq_sched_info_depart(struct rq *rq, unsigned long long delta)
{} {}
#define schedstat_enabled() 0 #define schedstat_enabled() 0
#define __schedstat_inc(var) do { } while (0)
#define schedstat_inc(var) do { } while (0) #define schedstat_inc(var) do { } while (0)
#define schedstat_add(var, amt) do { } while (0) #define schedstat_add(var, amt) do { } while (0)
#define schedstat_set(var, val) do { } while (0) #define schedstat_set(var, val) do { } while (0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment