Commit bb795e33 authored by Anton Blanchard's avatar Anton Blanchard Committed by Linus Torvalds

[PATCH] Speed up oprofile buffer drain code

I noticed a large machine was doing about 400,000 context switches per
second when oprofile was enabled.  Upon closer inspection it looks like we
were rearming the buffer sync timer without modifying the expire time.

Now that we have schedule_delayed_work_on I believe we can remove the timer
completely.  Each cpu should be offset by 1 jiffy so they dont all fire at
the same time.  I bumped DEFAULT_TIMER_EXPIRE from 2 to 10 times a second
to be sure we reap cpu buffers.

With the following patch the same large machine gets about 4000 context
switches per second.
Signed-off-by: default avatarAnton Blanchard <anton@samba.org>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 24668b77
......@@ -28,8 +28,8 @@
struct oprofile_cpu_buffer cpu_buffer[NR_CPUS] __cacheline_aligned;
static void wq_sync_buffer(void *);
static void timer_ping(unsigned long data);
#define DEFAULT_TIMER_EXPIRE (HZ / 2)
#define DEFAULT_TIMER_EXPIRE (HZ / 10)
int timers_enabled;
static void __free_cpu_buffers(int num)
......@@ -64,10 +64,6 @@ int alloc_cpu_buffers(void)
b->sample_received = 0;
b->sample_lost_overflow = 0;
b->cpu = i;
init_timer(&b->timer);
b->timer.function = timer_ping;
b->timer.data = i;
b->timer.expires = jiffies + DEFAULT_TIMER_EXPIRE;
INIT_WORK(&b->work, wq_sync_buffer, b);
}
return 0;
......@@ -93,7 +89,11 @@ void start_cpu_timers(void)
for_each_online_cpu(i) {
struct oprofile_cpu_buffer * b = &cpu_buffer[i];
add_timer_on(&b->timer, i);
/*
* Spread the work by 1 jiffy per cpu so they dont all
* fire at once.
*/
schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i);
}
}
......@@ -107,7 +107,7 @@ void end_cpu_timers(void)
for_each_online_cpu(i) {
struct oprofile_cpu_buffer * b = &cpu_buffer[i];
del_timer_sync(&b->timer);
cancel_delayed_work(&b->work);
}
flush_scheduled_work();
......@@ -203,7 +203,13 @@ void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf)
}
/* FIXME: not guaranteed to be on our CPU */
/*
* This serves to avoid cpu buffer overflow, and makes sure
* the task mortuary progresses
*
* By using schedule_delayed_work_on and then schedule_delayed_work
* we guarantee this will stay on the correct cpu
*/
static void wq_sync_buffer(void * data)
{
struct oprofile_cpu_buffer * b = (struct oprofile_cpu_buffer *)data;
......@@ -213,24 +219,7 @@ static void wq_sync_buffer(void * data)
}
sync_buffer(b->cpu);
/* don't re-add the timer if we're shutting down */
if (timers_enabled) {
del_timer_sync(&b->timer);
add_timer_on(&b->timer, b->cpu);
}
}
/* This serves to avoid cpu buffer overflow, and makes sure
* the task mortuary progresses
*/
static void timer_ping(unsigned long data)
{
struct oprofile_cpu_buffer * b = &cpu_buffer[data];
if (b->cpu != smp_processor_id()) {
printk("Timer on CPU%d, prefer CPU%d\n",
smp_processor_id(), b->cpu);
}
schedule_work(&b->work);
/* work will re-enable our timer */
/* don't re-add the work if we're shutting down */
if (timers_enabled)
schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE);
}
......@@ -12,7 +12,6 @@
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/cache.h>
......@@ -42,7 +41,6 @@ struct oprofile_cpu_buffer {
unsigned long sample_received;
unsigned long sample_lost_overflow;
int cpu;
struct timer_list timer;
struct work_struct work;
} ____cacheline_aligned;
......
......@@ -525,5 +525,5 @@ EXPORT_SYMBOL_GPL(destroy_workqueue);
EXPORT_SYMBOL(schedule_work);
EXPORT_SYMBOL(schedule_delayed_work);
EXPORT_SYMBOL(schedule_delayed_work_on);
EXPORT_SYMBOL(flush_scheduled_work);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment