Commit 317f3941 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched: Move the second half of ttwu() to the remote cpu

Now that we've removed the rq->lock requirement from the first part of
ttwu() and can compute placement without holding any rq->lock, ensure
we execute the second half of ttwu() on the actual cpu we want the
task to run on.

This avoids having to take rq->lock and doing the task enqueue
remotely, saving lots on cacheline transfers.

As measured using: http://oss.oracle.com/~mason/sembench.c

  $ for i in /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor ; do echo performance > $i; done
  $ echo 4096 32000 64 128 > /proc/sys/kernel/sem
  $ ./sembench -t 2048 -w 1900 -o 0

  unpatched: run time 30 seconds 647278 worker burns per second
  patched:   run time 30 seconds 816715 worker burns per second
Reviewed-by: default avatarFrank Rowand <frank.rowand@am.sony.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/20110405152729.515897185@chello.nl
parent c05fbafb
...@@ -1203,6 +1203,7 @@ struct task_struct { ...@@ -1203,6 +1203,7 @@ struct task_struct {
int lock_depth; /* BKL lock depth */ int lock_depth; /* BKL lock depth */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
struct task_struct *wake_entry;
int on_cpu; int on_cpu;
#endif #endif
int on_rq; int on_rq;
...@@ -2192,7 +2193,7 @@ extern void set_task_comm(struct task_struct *tsk, char *from); ...@@ -2192,7 +2193,7 @@ extern void set_task_comm(struct task_struct *tsk, char *from);
extern char *get_task_comm(char *to, struct task_struct *tsk); extern char *get_task_comm(char *to, struct task_struct *tsk);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static inline void scheduler_ipi(void) { } void scheduler_ipi(void);
extern unsigned long wait_task_inactive(struct task_struct *, long match_state); extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
#else #else
static inline void scheduler_ipi(void) { } static inline void scheduler_ipi(void) { }
......
...@@ -827,6 +827,11 @@ config SCHED_AUTOGROUP ...@@ -827,6 +827,11 @@ config SCHED_AUTOGROUP
desktop applications. Task group autogeneration is currently based desktop applications. Task group autogeneration is currently based
upon task session. upon task session.
config SCHED_TTWU_QUEUE
bool
depends on !SPARC32
default y
config MM_OWNER config MM_OWNER
bool bool
......
...@@ -556,6 +556,10 @@ struct rq { ...@@ -556,6 +556,10 @@ struct rq {
unsigned int ttwu_count; unsigned int ttwu_count;
unsigned int ttwu_local; unsigned int ttwu_local;
#endif #endif
#ifdef CONFIG_SMP
struct task_struct *wake_list;
#endif
}; };
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
...@@ -2516,10 +2520,61 @@ static int ttwu_remote(struct task_struct *p, int wake_flags) ...@@ -2516,10 +2520,61 @@ static int ttwu_remote(struct task_struct *p, int wake_flags)
return ret; return ret;
} }
#ifdef CONFIG_SMP
static void sched_ttwu_pending(void)
{
struct rq *rq = this_rq();
struct task_struct *list = xchg(&rq->wake_list, NULL);
if (!list)
return;
raw_spin_lock(&rq->lock);
while (list) {
struct task_struct *p = list;
list = list->wake_entry;
ttwu_do_activate(rq, p, 0);
}
raw_spin_unlock(&rq->lock);
}
void scheduler_ipi(void)
{
sched_ttwu_pending();
}
static void ttwu_queue_remote(struct task_struct *p, int cpu)
{
struct rq *rq = cpu_rq(cpu);
struct task_struct *next = rq->wake_list;
for (;;) {
struct task_struct *old = next;
p->wake_entry = next;
next = cmpxchg(&rq->wake_list, old, p);
if (next == old)
break;
}
if (!next)
smp_send_reschedule(cpu);
}
#endif
static void ttwu_queue(struct task_struct *p, int cpu) static void ttwu_queue(struct task_struct *p, int cpu)
{ {
struct rq *rq = cpu_rq(cpu); struct rq *rq = cpu_rq(cpu);
#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_TTWU_QUEUE)
if (sched_feat(TTWU_QUEUE) && cpu != smp_processor_id()) {
ttwu_queue_remote(p, cpu);
return;
}
#endif
raw_spin_lock(&rq->lock); raw_spin_lock(&rq->lock);
ttwu_do_activate(rq, p, 0); ttwu_do_activate(rq, p, 0);
raw_spin_unlock(&rq->lock); raw_spin_unlock(&rq->lock);
...@@ -6331,6 +6386,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) ...@@ -6331,6 +6386,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
case CPU_DYING: case CPU_DYING:
sched_ttwu_pending();
/* Update our root-domain */ /* Update our root-domain */
raw_spin_lock_irqsave(&rq->lock, flags); raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->rd) { if (rq->rd) {
......
...@@ -64,3 +64,9 @@ SCHED_FEAT(OWNER_SPIN, 1) ...@@ -64,3 +64,9 @@ SCHED_FEAT(OWNER_SPIN, 1)
* Decrement CPU power based on irq activity * Decrement CPU power based on irq activity
*/ */
SCHED_FEAT(NONIRQ_POWER, 1) SCHED_FEAT(NONIRQ_POWER, 1)
/*
* Queue remote wakeups on the target CPU and process them
* using the scheduler IPI. Reduces rq->lock contention/bounces.
*/
SCHED_FEAT(TTWU_QUEUE, 1)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment