Commit f6853ce5 authored by William Lee Irwin III's avatar William Lee Irwin III Committed by Linus Torvalds

[PATCH] consolidate hit count increments in profile_tick()

With prof_cpu_mask and profile_pc() in hand, the core is now able to perform
all the profile accounting work on behalf of arches.  Consolidate the profile
accounting and convert all arches to call the core function.
Signed-off-by: default avatarWilliam Irwin <wli@holomorphy.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent f3549b15
......@@ -40,30 +40,3 @@ extern struct hw_interrupt_type i8259a_irq_type;
extern void init_i8259a_irqs(void);
extern void handle_irq(int irq, struct pt_regs * regs);
static inline void
alpha_do_profile(unsigned long pc)
{
extern char _stext;
if (!prof_buffer)
return;
/*
* Only measure the CPUs specified by /proc/irq/prof_cpu_mask.
* (default is all CPUs.)
*/
if (!cpu_isset(smp_processor_id(), prof_cpu_mask))
return;
pc -= (unsigned long) &_stext;
pc >>= prof_shift;
/*
* Don't ignore out-of-bounds PC values silently,
* put them into the last histogram slot, so if
* present, they will show up as a sharp peak.
*/
if (pc > prof_len - 1)
pc = prof_len - 1;
atomic_inc((atomic_t *)&prof_buffer[pc]);
}
......@@ -25,6 +25,7 @@
#include <linux/spinlock.h>
#include <linux/irq.h>
#include <linux/cache.h>
#include <linux/profile.h>
#include <asm/hwrpb.h>
#include <asm/ptrace.h>
......@@ -599,8 +600,7 @@ smp_percpu_timer_interrupt(struct pt_regs *regs)
struct cpuinfo_alpha *data = &cpu_data[cpu];
/* Record kernel PC. */
if (!user)
alpha_do_profile(regs->pc);
profile_tick(CPU_PROFILING, regs);
if (!--data->prof_counter) {
/* We need to make like a normal interrupt -- otherwise
......
......@@ -41,6 +41,7 @@
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/bcd.h>
#include <linux/profile.h>
#include <asm/uaccess.h>
#include <asm/io.h>
......@@ -118,8 +119,7 @@ irqreturn_t timer_interrupt(int irq, void *dev, struct pt_regs * regs)
#ifndef CONFIG_SMP
/* Not SMP, do kernel PC profiling here. */
if (!user_mode(regs))
alpha_do_profile(regs->pc);
profile_tick(CPU_PROFILING, regs);
#endif
write_seqlock(&xtime_lock);
......
......@@ -79,31 +79,6 @@ unsigned long long __attribute__((weak)) sched_clock(void)
return (unsigned long long)jiffies * (1000000000 / HZ);
}
/*
* Handle kernel profile stuff...
*/
static inline void do_profile(struct pt_regs *regs)
{
profile_hook(regs);
if (!user_mode(regs) &&
prof_buffer &&
current->pid) {
unsigned long pc = instruction_pointer(regs);
extern int _stext;
pc -= (unsigned long)&_stext;
pc >>= prof_shift;
if (pc >= prof_len)
pc = prof_len - 1;
prof_buffer[pc] += 1;
}
}
static unsigned long next_rtc_update;
/*
......@@ -317,7 +292,7 @@ EXPORT_SYMBOL(do_settimeofday);
void timer_tick(struct pt_regs *regs)
{
do_profile(regs);
profile_tick(CPU_PROFILING, regs);
do_leds();
do_set_rtc();
do_timer(regs);
......
......@@ -67,28 +67,6 @@ static unsigned long dummy_gettimeoffset(void)
*/
unsigned long (*gettimeoffset)(void) = dummy_gettimeoffset;
/*
* Handle kernel profile stuff...
*/
static inline void do_profile(struct pt_regs *regs)
{
if (!user_mode(regs) &&
prof_buffer &&
current->pid) {
unsigned long pc = instruction_pointer(regs);
extern int _stext;
pc -= (unsigned long)&_stext;
pc >>= prof_shift;
if (pc >= prof_len)
pc = prof_len - 1;
prof_buffer[pc] += 1;
}
}
static unsigned long next_rtc_update;
/*
......@@ -189,7 +167,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
do_timer(regs);
do_set_rtc(); //FIME - EVERY timer IRQ?
do_profile(regs);
profile_tick(CPU_PROFILING, regs);
return IRQ_HANDLED; //FIXME - is this right?
}
......
......@@ -36,24 +36,6 @@ u64 jiffies_64;
EXPORT_SYMBOL(jiffies_64);
static inline void do_profile (unsigned long pc)
{
if (prof_buffer && current->pid) {
extern int _stext;
pc -= (unsigned long) &_stext;
pc >>= prof_shift;
if (pc < prof_len)
++prof_buffer[pc];
else
/*
* Don't ignore out-of-bounds PC values silently,
* put them into the last histogram slot, so if
* present, they will show up as a sharp peak.
*/
++prof_buffer[prof_len-1];
}
}
/*
* timer_interrupt() needs to keep up the real-time clock,
* as well as call the "do_timer()" routine every clocktick
......@@ -64,10 +46,7 @@ static void timer_interrupt(int irq, void *dummy, struct pt_regs * regs)
platform_timer_eoi();
do_timer(regs);
if (!user_mode(regs))
do_profile(regs->pc);
profile_tick(CPU_PROFILING, regs);
}
void time_init(void)
......
......@@ -1071,8 +1071,7 @@ inline void smp_local_timer_interrupt(struct pt_regs * regs)
{
int cpu = smp_processor_id();
x86_do_profile(regs);
profile_tick(CPU_PROFILING, regs);
if (--per_cpu(prof_counter, cpu) <= 0) {
/*
* The multiplier may have changed since the last time we got
......
......@@ -1287,8 +1287,7 @@ smp_local_timer_interrupt(struct pt_regs * regs)
int cpu = smp_processor_id();
long weight;
x86_do_profile(regs);
profile_tick(CPU_PROFILING, regs);
if (--per_cpu(prof_counter, cpu) <= 0) {
/*
* The multiplier may have changed since the last time we got
......
......@@ -186,49 +186,6 @@ do_gettimeofday (struct timeval *tv)
EXPORT_SYMBOL(do_gettimeofday);
/*
* The profiling function is SMP safe. (nothing can mess
* around with "current", and the profiling counters are
* updated with atomic operations). This is especially
* useful with a profiling multiplier != 1
*/
static inline void
ia64_do_profile (struct pt_regs * regs)
{
unsigned long ip;
profile_hook(regs);
if (user_mode(regs))
return;
if (!prof_buffer)
return;
/* Conserve space in histogram by encoding slot bits in address
* bits 2 and 3 rather than bits 0 and 1.
*/
ip = profile_pc(regs);
/*
* Only measure the CPUs specified by /proc/irq/prof_cpu_mask.
* (default is all CPUs.)
*/
if (!cpu_isset(smp_processor_id(), prof_cpu_mask))
return;
ip -= (unsigned long) &_stext;
ip >>= prof_shift;
/*
* Don't ignore out-of-bounds IP values silently,
* put them into the last histogram slot, so if
* present, they will show up as a sharp peak.
*/
if (ip > prof_len-1)
ip = prof_len-1;
atomic_inc((atomic_t *)&prof_buffer[ip]);
}
static irqreturn_t
timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
{
......@@ -246,7 +203,7 @@ timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
ia64_get_itc(), new_itm);
ia64_do_profile(regs);
profile_tick(CPU_PROFILING, regs);
while (1) {
#ifdef CONFIG_SMP
......
......@@ -38,24 +38,6 @@ static inline int set_rtc_mmss(unsigned long nowtime)
return -1;
}
static inline void do_profile (unsigned long pc)
{
if (prof_buffer && current->pid) {
extern int _stext;
pc -= (unsigned long) &_stext;
pc >>= prof_shift;
if (pc < prof_len)
++prof_buffer[pc];
else
/*
* Don't ignore out-of-bounds PC values silently,
* put them into the last histogram slot, so if
* present, they will show up as a sharp peak.
*/
++prof_buffer[prof_len-1];
}
}
/*
* timer_interrupt() needs to keep up the real-time clock,
* as well as call the "do_timer()" routine every clocktick
......@@ -63,9 +45,7 @@ static inline void do_profile (unsigned long pc)
static irqreturn_t timer_interrupt(int irq, void *dummy, struct pt_regs * regs)
{
do_timer(regs);
if (!user_mode(regs))
do_profile(regs->pc);
profile_tick(CPU_PROFILING, regs);
#ifdef CONFIG_HEARTBEAT
/* use power LED as a heartbeat instead -- much more useful
......
......@@ -41,24 +41,6 @@ static inline int set_rtc_mmss(unsigned long nowtime)
return -1;
}
static inline void do_profile (unsigned long pc)
{
if (prof_buffer && current->pid) {
extern int _stext;
pc -= (unsigned long) &_stext;
pc >>= prof_shift;
if (pc < prof_len)
++prof_buffer[pc];
else
/*
* Don't ignore out-of-bounds PC values silently,
* put them into the last histogram slot, so if
* present, they will show up as a sharp peak.
*/
++prof_buffer[prof_len-1];
}
}
/*
* timer_interrupt() needs to keep up the real-time clock,
* as well as call the "do_timer()" routine every clocktick
......@@ -75,9 +57,8 @@ static irqreturn_t timer_interrupt(int irq, void *dummy, struct pt_regs * regs)
write_seqlock(&xtime_lock);
do_timer(regs);
if (!user_mode(regs))
do_profile(regs->pc);
if (current->pid)
profile_tick(CPU_PROFILING, regs);
/*
* If we have an externally synchronized Linux clock, then update
......
......@@ -110,17 +110,8 @@ void coldfire_profile_tick(int irq, void *dummy, struct pt_regs *regs)
{
/* Reset ColdFire timer2 */
mcf_proftp->ter = MCFTIMER_TER_CAP | MCFTIMER_TER_REF;
if (!user_mode(regs)) {
if (prof_buffer && current->pid) {
extern int _stext;
unsigned long ip = instruction_pointer(regs);
ip -= (unsigned long) &_stext;
ip >>= prof_shift;
if (ip < prof_len)
prof_buffer[ip]++;
}
}
if (current->pid)
profile_tick(CPU_PROFILING, regs);
}
/***************************************************************************/
......
......@@ -417,23 +417,8 @@ static long last_rtc_update;
*/
void local_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
if (!user_mode(regs)) {
if (prof_buffer && current->pid) {
unsigned long pc = regs->cp0_epc;
pc -= (unsigned long) _stext;
pc >>= prof_shift;
/*
* Dont ignore out-of-bounds pc values silently,
* put them into the last histogram slot, so if
* present, they will show up as a sharp peak.
*/
if (pc > prof_len - 1)
pc = prof_len - 1;
atomic_inc((atomic_t *)&prof_buffer[pc]);
}
}
if (current->pid)
profile_tick(CPU_PROFILING, regs);
#ifdef CONFIG_SMP
/* in UP mode, update_process_times() is invoked by do_timer() */
update_process_times(user_mode(regs));
......
......@@ -47,41 +47,6 @@ static long halftick;
extern void smp_do_timer(struct pt_regs *regs);
#endif
static inline void
parisc_do_profile(struct pt_regs *regs)
{
unsigned long pc = regs->iaoq[0];
extern char _stext;
profile_hook(regs);
if (user_mode(regs))
return;
if (!prof_buffer)
return;
#if 0
/* FIXME: when we have irq affinity to cpu, we need to
* only look at the cpus specified in this mask
*/
if (!cpu_isset(smp_processor_id(), prof_cpu_mask))
return;
#endif
pc -= (unsigned long) &_stext;
pc >>= prof_shift;
/*
* Don't ignore out-of-bounds PC values silently,
* put them into the last histogram slot, so if
* present, they will show up as a sharp peak.
*/
if (pc > prof_len - 1)
pc = prof_len - 1;
atomic_inc((atomic_t *)&prof_buffer[pc]);
}
irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
long now;
......@@ -89,7 +54,7 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
int nticks;
int cpu = smp_processor_id();
parisc_do_profile(regs);
profile_tick(CPU_PROFILING, regs);
now = mfctl(16);
/* initialize next_tick to time at last clocktick */
......
......@@ -108,41 +108,6 @@ static inline int tb_delta(unsigned *jiffy_stamp) {
return delta;
}
extern char _stext;
static inline void ppc_do_profile (struct pt_regs *regs)
{
unsigned long nip;
profile_hook(regs);
if (user_mode(regs))
return;
if (!prof_buffer)
return;
nip = instruction_pointer(regs);
/*
* Only measure the CPUs specified by /proc/irq/prof_cpu_mask.
* (default is all CPUs.)
*/
if (!cpu_isset(smp_processor_id(), prof_cpu_mask))
return;
nip -= (unsigned long) &_stext;
nip >>= prof_shift;
/*
* Don't ignore out-of-bounds EIP values silently,
* put them into the last histogram slot, so if
* present, they will show up as a sharp peak.
*/
if (nip > prof_len-1)
nip = prof_len-1;
atomic_inc((atomic_t *)&prof_buffer[nip]);
}
/*
* timer_interrupt - gets called when the decrementer overflows,
* with interrupts disabled.
......@@ -163,7 +128,7 @@ void timer_interrupt(struct pt_regs * regs)
while ((next_dec = tb_ticks_per_jiffy - tb_delta(&jiffy_stamp)) <= 0) {
jiffy_stamp += tb_ticks_per_jiffy;
ppc_do_profile(regs);
profile_tick(CPU_PROFILING, regs);
if (smp_processor_id())
continue;
......
......@@ -108,45 +108,6 @@ void ppc_adjtimex(void);
static unsigned adjusting_time = 0;
/*
* The profiling function is SMP safe. (nothing can mess
* around with "current", and the profiling counters are
* updated with atomic operations). This is especially
* useful with a profiling multiplier != 1
*/
static inline void ppc64_do_profile(struct pt_regs *regs)
{
unsigned long nip;
profile_hook(regs);
if (user_mode(regs))
return;
if (!prof_buffer)
return;
nip = instruction_pointer(regs);
/*
* Only measure the CPUs specified by /proc/irq/prof_cpu_mask.
* (default is all CPUs.)
*/
if (!cpu_isset(smp_processor_id(), prof_cpu_mask))
return;
nip -= (unsigned long)_stext;
nip >>= prof_shift;
/*
* Don't ignore out-of-bounds EIP values silently,
* put them into the last histogram slot, so if
* present, they will show up as a sharp peak.
*/
if (nip > prof_len-1)
nip = prof_len-1;
atomic_inc((atomic_t *)&prof_buffer[nip]);
}
static __inline__ void timer_check_rtc(void)
{
/*
......@@ -277,7 +238,7 @@ int timer_interrupt(struct pt_regs * regs)
irq_enter();
#ifndef CONFIG_PPC_ISERIES
ppc64_do_profile(regs);
profile_tick(CPU_PROFILING, regs);
#endif
lpaca->lppaca.xIntDword.xFields.xDecrInt = 0;
......
......@@ -174,46 +174,7 @@ __calculate_ticks(__u64 elapsed)
#ifdef CONFIG_PROFILING
extern char _stext, _etext;
/*
* The profiling function is SMP safe. (nothing can mess
* around with "current", and the profiling counters are
* updated with atomic operations). This is especially
* useful with a profiling multiplier != 1
*/
static inline void s390_do_profile(struct pt_regs * regs)
{
unsigned long eip;
profile_hook(regs);
if (user_mode(regs))
return;
if (!prof_buffer)
return;
eip = instruction_pointer(regs);
/*
* Only measure the CPUs specified by /proc/irq/prof_cpu_mask.
* (default is all CPUs.)
*/
if (!cpu_isset(smp_processor_id(), prof_cpu_mask))
return;
eip -= (unsigned long) &_stext;
eip >>= prof_shift;
/*
* Don't ignore out-of-bounds EIP values silently,
* put them into the last histogram slot, so if
* present, they will show up as a sharp peak.
*/
if (eip > prof_len-1)
eip = prof_len-1;
atomic_inc((atomic_t *)&prof_buffer[eip]);
}
#define s390_do_profile(regs) profile_tick(CPU_PROFILING, regs)
#else
#define s390_do_profile(regs) do { ; } while(0)
#endif /* CONFIG_PROFILING */
......
......@@ -24,6 +24,7 @@
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/profile.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
......@@ -250,32 +251,6 @@ EXPORT_SYMBOL(do_settimeofday);
/* last time the RTC clock got updated */
static long last_rtc_update;
/* Profiling definitions */
extern unsigned int * prof_buffer;
extern unsigned long prof_len;
extern unsigned long prof_shift;
extern char _stext;
static inline void sh_do_profile(unsigned long pc)
{
/* Don't profile cpu_idle.. */
if (!prof_buffer || !current->pid)
return;
pc -= (unsigned long)&_stext;
pc >>= prof_shift;
/*
* Don't ignore out-of-bounds PC values silently,
* put them into the last histogram slot, so if
* present, they will show up as a sharp peak.
*/
if (pc > prof_len - 1)
pc = prof_len - 1;
atomic_inc((atomic_t *)&prof_buffer[pc]);
}
/*
* timer_interrupt() needs to keep up the real-time clock,
* as well as call the "do_timer()" routine every clocktick
......@@ -283,9 +258,7 @@ static inline void sh_do_profile(unsigned long pc)
static inline void do_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
do_timer(regs);
if (!user_mode(regs))
sh_do_profile(profile_pc(regs));
profile_tick(CPU_PROFILING, regs);
#ifdef CONFIG_HEARTBEAT
if (sh_mv.mv_heartbeat != NULL)
......
......@@ -298,37 +298,6 @@ static int set_rtc_time(unsigned long nowtime)
/* last time the RTC clock got updated */
static long last_rtc_update = 0;
static inline void sh64_do_profile(struct pt_regs *regs)
{
extern int _stext;
unsigned long pc;
profile_hook(regs);
if (user_mode(regs))
return;
/* Don't profile cpu_idle.. */
if (!prof_buffer || !current->pid)
return;
pc = instruction_pointer(regs);
pc -= (unsigned long) &_stext;
pc >>= prof_shift;
/*
* Don't ignore out-of-bounds PC values silently, put them into the
* last histogram slot, so if present, they will show up as a sharp
* peak.
*/
if (pc > prof_len - 1)
pc = prof_len - 1;
/* We could just be sloppy and not lock against a re-entry on this
increment, but the profiling code won't always be linked in anyway. */
atomic_inc((atomic_t *)&prof_buffer[pc]);
}
/*
* timer_interrupt() needs to keep up the real-time clock,
* as well as call the "do_timer()" routine every clocktick
......@@ -340,8 +309,7 @@ static inline void do_timer_interrupt(int irq, void *dev_id, struct pt_regs *reg
ctc_last_interrupt = (unsigned long) current_ctc;
do_timer(regs);
sh64_do_profile(regs);
profile_tick(CPU_PROFILING, regs);
#ifdef CONFIG_HEARTBEAT
{
......
......@@ -410,8 +410,6 @@ void smp4d_message_pass(int target, int msg, unsigned long data, int wait)
panic("Bogon SMP message pass.");
}
extern void sparc_do_profile(unsigned long pc, unsigned long o7);
void smp4d_percpu_timer_interrupt(struct pt_regs *regs)
{
int cpu = hard_smp4d_processor_id();
......@@ -429,8 +427,7 @@ void smp4d_percpu_timer_interrupt(struct pt_regs *regs)
show_leds(cpu);
}
if(!user_mode(regs))
sparc_do_profile(regs->pc, regs->u_regs[UREG_RETPC]);
profile_tick(CPU_PROFILING, regs);
if(!--prof_counter(cpu)) {
int user = user_mode(regs);
......
......@@ -392,16 +392,13 @@ void smp4m_cross_call_irq(void)
ccall_info.processors_out[i] = 1;
}
extern void sparc_do_profile(unsigned long pc, unsigned long o7);
void smp4m_percpu_timer_interrupt(struct pt_regs *regs)
{
int cpu = smp_processor_id();
clear_profile_irq(cpu);
if(!user_mode(regs))
sparc_do_profile(regs->pc, regs->u_regs[UREG_RETPC]);
profile_tick(CPU_PROFILING, regs);
if(!--prof_counter(cpu)) {
int user = user_mode(regs);
......
......@@ -99,26 +99,6 @@ unsigned long profile_pc(struct pt_regs *regs)
return pc;
}
static spinlock_t ticker_lock = SPIN_LOCK_UNLOCKED;
/* 32-bit Sparc specific profiling function. */
void sparc_do_profile(unsigned long pc, unsigned long o7)
{
if(prof_buffer && current->pid) {
extern int _stext;
pc -= (unsigned long) &_stext;
pc >>= prof_shift;
spin_lock(&ticker_lock);
if(pc < prof_len)
prof_buffer[pc]++;
else
prof_buffer[prof_len - 1]++;
spin_unlock(&ticker_lock);
}
}
__volatile__ unsigned int *master_l10_counter;
__volatile__ unsigned int *master_l10_limit;
......@@ -135,8 +115,7 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs * regs)
static long last_rtc_update;
#ifndef CONFIG_SMP
if(!user_mode(regs))
sparc_do_profile(profile_pc(regs));
profile_tick(CPU_PROFILING, regs);
#endif
/* Protect counter clear so that do_gettimeoffset works */
......
......@@ -975,8 +975,6 @@ void smp_promstop_others(void)
smp_cross_call(&xcall_promstop, 0, 0, 0);
}
extern void sparc64_do_profile(struct pt_regs *regs);
#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
#define prof_counter(__cpu) cpu_data(__cpu).counter
......@@ -1002,7 +1000,7 @@ void smp_percpu_timer_interrupt(struct pt_regs *regs)
}
do {
sparc64_do_profile(regs);
profile_tick(CPU_PROFILING, regs);
if (!--prof_counter(cpu)) {
irq_enter();
......
......@@ -442,22 +442,6 @@ static inline void timer_check_rtc(void)
}
}
void sparc64_do_profile(struct pt_regs *regs)
{
unsigned long pc;
profile_hook(regs);
if (user_mode(regs))
return;
if (!prof_buffer)
return;
pc = (profile_pc(regs) - (unsigned long)_stext) >> prof_shift;
atomic_inc((atomic_t *)&prof_buffer[min(pc, prof_len-1)]);
}
static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs * regs)
{
unsigned long ticks, pstate;
......@@ -466,7 +450,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs * regs)
do {
#ifndef CONFIG_SMP
sparc64_do_profile(regs);
profile_tick(CPU_PROFILING, regs);
#endif
do_timer(regs);
......
......@@ -40,24 +40,6 @@ unsigned long long sched_clock(void)
return (unsigned long long)jiffies * (1000000000 / HZ);
}
static inline void do_profile (unsigned long pc)
{
if (prof_buffer && current->pid) {
extern int _stext;
pc -= (unsigned long) &_stext;
pc >>= prof_shift;
if (pc < prof_len)
++prof_buffer[pc];
else
/*
* Don't ignore out-of-bounds PC values silently,
* put them into the last histogram slot, so if
* present, they will show up as a sharp peak.
*/
++prof_buffer[prof_len-1];
}
}
/*
* timer_interrupt() needs to keep up the real-time clock,
* as well as call the "do_timer()" routine every clocktick
......@@ -74,10 +56,7 @@ static irqreturn_t timer_interrupt (int irq, void *dummy, struct pt_regs *regs)
mach_tick ();
do_timer (regs);
if (! user_mode (regs))
do_profile (regs->pc);
profile_tick(CPU_PROFILING, regs);
#if 0
/*
* If we have an externally synchronized Linux clock, then update
......
......@@ -836,8 +836,7 @@ void smp_local_timer_interrupt(struct pt_regs *regs)
{
int cpu = smp_processor_id();
x86_do_profile(regs);
profile_tick(CPU_PROFILING, regs);
if (--per_cpu(prof_counter, cpu) <= 0) {
/*
* The multiplier may have changed since the last time we got
......
......@@ -396,7 +396,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
*/
#ifndef CONFIG_X86_LOCAL_APIC
x86_do_profile(regs);
profile_tick(CPU_PROFILING, regs);
#else
if (!using_apic_timer)
smp_local_timer_interrupt(regs);
......
......@@ -68,48 +68,6 @@ extern atomic_t irq_mis_count;
#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
static inline void __do_profile(unsigned long eip)
{
if (!prof_buffer)
return;
/*
* Only measure the CPUs specified by /proc/irq/prof_cpu_mask.
* (default is all CPUs.)
*/
if (!cpu_isset(smp_processor_id(), prof_cpu_mask))
return;
eip -= (unsigned long)_stext;
eip >>= prof_shift;
/*
* Don't ignore out-of-bounds EIP values silently,
* put them into the last histogram slot, so if
* present, they will show up as a sharp peak.
*/
if (eip > prof_len-1)
eip = prof_len-1;
atomic_inc((atomic_t *)&prof_buffer[eip]);
}
#define kern_profile(eip) __do_profile(eip)
/*
* The profiling function is SMP safe. (nothing can mess
* around with "current", and the profiling counters are
* updated with atomic operations). This is especially
* useful with a profiling multiplier != 1
*/
static inline void x86_do_profile(struct pt_regs * regs)
{
profile_hook(regs);
if (prof_on != 1 || user_mode(regs))
return;
__do_profile(regs->eip);
}
#if defined(CONFIG_X86_IO_APIC)
static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i)
{
......
......@@ -22,7 +22,7 @@ static inline void do_timer_interrupt_hook(struct pt_regs *regs)
* system, in that case we have to call the local interrupt handler.
*/
#ifndef CONFIG_X86_LOCAL_APIC
x86_do_profile(regs);
profile_tick(CPU_PROFILING, regs);
#else
if (!using_apic_timer)
smp_local_timer_interrupt(regs);
......
......@@ -15,7 +15,7 @@ static inline void do_timer_interrupt_hook(struct pt_regs *regs)
* system, in that case we have to call the local interrupt handler.
*/
#ifndef CONFIG_X86_LOCAL_APIC
x86_do_profile(regs);
profile_tick(CPU_PROFILING, regs);
#else
if (!using_apic_timer)
smp_local_timer_interrupt(regs);
......
......@@ -129,39 +129,6 @@ __asm__( \
"push $" #nr "-256 ; " \
"jmp common_interrupt");
static inline void x86_do_profile (struct pt_regs *regs)
{
unsigned long rip;
extern char _stext[];
profile_hook(regs);
if (user_mode(regs))
return;
if (!prof_buffer)
return;
rip = regs->rip;
/*
* Only measure the CPUs specified by /proc/irq/prof_cpu_mask.
* (default is all CPUs.)
*/
if (!cpu_isset(smp_processor_id(), prof_cpu_mask))
return;
rip -= (unsigned long) &_stext;
rip >>= prof_shift;
/*
* Don't ignore out-of-bounds EIP values silently,
* put them into the last histogram slot, so if
* present, they will show up as a sharp peak.
*/
if (rip > prof_len-1)
rip = prof_len-1;
atomic_inc((atomic_t *)&prof_buffer[rip]);
}
#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP)
static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {
if (IO_APIC_IRQ(i))
......
......@@ -9,7 +9,11 @@
#include <linux/cpumask.h>
#include <asm/errno.h>
#define CPU_PROFILING 1
#define SCHED_PROFILING 2
struct proc_dir_entry;
struct pt_regs;
/* parse command line */
int __init profile_setup(char * str);
......@@ -17,6 +21,8 @@ int __init profile_setup(char * str);
/* init basic kernel profiler */
void __init profile_init(void);
void create_prof_cpu_mask(struct proc_dir_entry *);
void profile_tick(int, struct pt_regs *);
void profile_hit(int, void *);
extern unsigned int * prof_buffer;
extern unsigned long prof_len;
......
......@@ -9,6 +9,7 @@
#include <linux/notifier.h>
#include <linux/mm.h>
#include <linux/cpumask.h>
#include <linux/profile.h>
#include <asm/sections.h>
unsigned int * prof_buffer;
......@@ -169,6 +170,25 @@ EXPORT_SYMBOL_GPL(profile_event_unregister);
#ifdef CONFIG_PROC_FS
#include <linux/proc_fs.h>
#include <asm/uaccess.h>
#include <asm/ptrace.h>
void profile_hit(int type, void *__pc)
{
unsigned long pc;
if (prof_on != type || !prof_buffer)
return;
pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
atomic_inc((atomic_t *)&prof_buffer[min(pc, prof_len - 1)]);
}
void profile_tick(int type, struct pt_regs *regs)
{
if (type == CPU_PROFILING)
profile_hook(regs);
if (!user_mode(regs) && cpu_isset(smp_processor_id(), prof_cpu_mask))
profile_hit(type, (void *)profile_pc(regs));
}
static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
int count, int *eof, void *data)
......
......@@ -31,6 +31,7 @@
#include <linux/kernel_stat.h>
#include <linux/security.h>
#include <linux/notifier.h>
#include <linux/profile.h>
#include <linux/suspend.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
......@@ -3220,10 +3221,7 @@ static int setscheduler(pid_t pid, int policy, struct sched_param __user *param)
policy != SCHED_NORMAL)
goto out_unlock;
}
#ifdef kern_profile
if (unlikely(prof_on == 2))
__do_profile((unsigned long)__builtin_return_address(0));
#endif
profile_hit(SCHED_PROFILING, __builtin_return_address(0));
/*
* Valid priorities for SCHED_FIFO and SCHED_RR are
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment