Commit f6853ce5 authored by William Lee Irwin III's avatar William Lee Irwin III Committed by Linus Torvalds

[PATCH] consolidate hit count increments in profile_tick()

With prof_cpu_mask and profile_pc() in hand, the core is now able to perform
all the profile accounting work on behalf of arches.  Consolidate the profile
accounting and convert all arches to call the core function.
Signed-off-by: default avatarWilliam Irwin <wli@holomorphy.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent f3549b15
...@@ -40,30 +40,3 @@ extern struct hw_interrupt_type i8259a_irq_type; ...@@ -40,30 +40,3 @@ extern struct hw_interrupt_type i8259a_irq_type;
extern void init_i8259a_irqs(void); extern void init_i8259a_irqs(void);
extern void handle_irq(int irq, struct pt_regs * regs); extern void handle_irq(int irq, struct pt_regs * regs);
static inline void
alpha_do_profile(unsigned long pc)
{
extern char _stext;
if (!prof_buffer)
return;
/*
* Only measure the CPUs specified by /proc/irq/prof_cpu_mask.
* (default is all CPUs.)
*/
if (!cpu_isset(smp_processor_id(), prof_cpu_mask))
return;
pc -= (unsigned long) &_stext;
pc >>= prof_shift;
/*
* Don't ignore out-of-bounds PC values silently,
* put them into the last histogram slot, so if
* present, they will show up as a sharp peak.
*/
if (pc > prof_len - 1)
pc = prof_len - 1;
atomic_inc((atomic_t *)&prof_buffer[pc]);
}
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/profile.h>
#include <asm/hwrpb.h> #include <asm/hwrpb.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
...@@ -599,8 +600,7 @@ smp_percpu_timer_interrupt(struct pt_regs *regs) ...@@ -599,8 +600,7 @@ smp_percpu_timer_interrupt(struct pt_regs *regs)
struct cpuinfo_alpha *data = &cpu_data[cpu]; struct cpuinfo_alpha *data = &cpu_data[cpu];
/* Record kernel PC. */ /* Record kernel PC. */
if (!user) profile_tick(CPU_PROFILING, regs);
alpha_do_profile(regs->pc);
if (!--data->prof_counter) { if (!--data->prof_counter) {
/* We need to make like a normal interrupt -- otherwise /* We need to make like a normal interrupt -- otherwise
......
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/bcd.h> #include <linux/bcd.h>
#include <linux/profile.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/io.h> #include <asm/io.h>
...@@ -118,8 +119,7 @@ irqreturn_t timer_interrupt(int irq, void *dev, struct pt_regs * regs) ...@@ -118,8 +119,7 @@ irqreturn_t timer_interrupt(int irq, void *dev, struct pt_regs * regs)
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
/* Not SMP, do kernel PC profiling here. */ /* Not SMP, do kernel PC profiling here. */
if (!user_mode(regs)) profile_tick(CPU_PROFILING, regs);
alpha_do_profile(regs->pc);
#endif #endif
write_seqlock(&xtime_lock); write_seqlock(&xtime_lock);
......
...@@ -79,31 +79,6 @@ unsigned long long __attribute__((weak)) sched_clock(void) ...@@ -79,31 +79,6 @@ unsigned long long __attribute__((weak)) sched_clock(void)
return (unsigned long long)jiffies * (1000000000 / HZ); return (unsigned long long)jiffies * (1000000000 / HZ);
} }
/*
* Handle kernel profile stuff...
*/
static inline void do_profile(struct pt_regs *regs)
{
profile_hook(regs);
if (!user_mode(regs) &&
prof_buffer &&
current->pid) {
unsigned long pc = instruction_pointer(regs);
extern int _stext;
pc -= (unsigned long)&_stext;
pc >>= prof_shift;
if (pc >= prof_len)
pc = prof_len - 1;
prof_buffer[pc] += 1;
}
}
static unsigned long next_rtc_update; static unsigned long next_rtc_update;
/* /*
...@@ -317,7 +292,7 @@ EXPORT_SYMBOL(do_settimeofday); ...@@ -317,7 +292,7 @@ EXPORT_SYMBOL(do_settimeofday);
void timer_tick(struct pt_regs *regs) void timer_tick(struct pt_regs *regs)
{ {
do_profile(regs); profile_tick(CPU_PROFILING, regs);
do_leds(); do_leds();
do_set_rtc(); do_set_rtc();
do_timer(regs); do_timer(regs);
......
...@@ -67,28 +67,6 @@ static unsigned long dummy_gettimeoffset(void) ...@@ -67,28 +67,6 @@ static unsigned long dummy_gettimeoffset(void)
*/ */
unsigned long (*gettimeoffset)(void) = dummy_gettimeoffset; unsigned long (*gettimeoffset)(void) = dummy_gettimeoffset;
/*
* Handle kernel profile stuff...
*/
static inline void do_profile(struct pt_regs *regs)
{
if (!user_mode(regs) &&
prof_buffer &&
current->pid) {
unsigned long pc = instruction_pointer(regs);
extern int _stext;
pc -= (unsigned long)&_stext;
pc >>= prof_shift;
if (pc >= prof_len)
pc = prof_len - 1;
prof_buffer[pc] += 1;
}
}
static unsigned long next_rtc_update; static unsigned long next_rtc_update;
/* /*
...@@ -189,7 +167,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) ...@@ -189,7 +167,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{ {
do_timer(regs); do_timer(regs);
do_set_rtc(); //FIME - EVERY timer IRQ? do_set_rtc(); //FIME - EVERY timer IRQ?
do_profile(regs); profile_tick(CPU_PROFILING, regs);
return IRQ_HANDLED; //FIXME - is this right? return IRQ_HANDLED; //FIXME - is this right?
} }
......
...@@ -36,24 +36,6 @@ u64 jiffies_64; ...@@ -36,24 +36,6 @@ u64 jiffies_64;
EXPORT_SYMBOL(jiffies_64); EXPORT_SYMBOL(jiffies_64);
static inline void do_profile (unsigned long pc)
{
if (prof_buffer && current->pid) {
extern int _stext;
pc -= (unsigned long) &_stext;
pc >>= prof_shift;
if (pc < prof_len)
++prof_buffer[pc];
else
/*
* Don't ignore out-of-bounds PC values silently,
* put them into the last histogram slot, so if
* present, they will show up as a sharp peak.
*/
++prof_buffer[prof_len-1];
}
}
/* /*
* timer_interrupt() needs to keep up the real-time clock, * timer_interrupt() needs to keep up the real-time clock,
* as well as call the "do_timer()" routine every clocktick * as well as call the "do_timer()" routine every clocktick
...@@ -64,10 +46,7 @@ static void timer_interrupt(int irq, void *dummy, struct pt_regs * regs) ...@@ -64,10 +46,7 @@ static void timer_interrupt(int irq, void *dummy, struct pt_regs * regs)
platform_timer_eoi(); platform_timer_eoi();
do_timer(regs); do_timer(regs);
profile_tick(CPU_PROFILING, regs);
if (!user_mode(regs))
do_profile(regs->pc);
} }
void time_init(void) void time_init(void)
......
...@@ -1071,8 +1071,7 @@ inline void smp_local_timer_interrupt(struct pt_regs * regs) ...@@ -1071,8 +1071,7 @@ inline void smp_local_timer_interrupt(struct pt_regs * regs)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
x86_do_profile(regs); profile_tick(CPU_PROFILING, regs);
if (--per_cpu(prof_counter, cpu) <= 0) { if (--per_cpu(prof_counter, cpu) <= 0) {
/* /*
* The multiplier may have changed since the last time we got * The multiplier may have changed since the last time we got
......
...@@ -1287,8 +1287,7 @@ smp_local_timer_interrupt(struct pt_regs * regs) ...@@ -1287,8 +1287,7 @@ smp_local_timer_interrupt(struct pt_regs * regs)
int cpu = smp_processor_id(); int cpu = smp_processor_id();
long weight; long weight;
x86_do_profile(regs); profile_tick(CPU_PROFILING, regs);
if (--per_cpu(prof_counter, cpu) <= 0) { if (--per_cpu(prof_counter, cpu) <= 0) {
/* /*
* The multiplier may have changed since the last time we got * The multiplier may have changed since the last time we got
......
...@@ -186,49 +186,6 @@ do_gettimeofday (struct timeval *tv) ...@@ -186,49 +186,6 @@ do_gettimeofday (struct timeval *tv)
EXPORT_SYMBOL(do_gettimeofday); EXPORT_SYMBOL(do_gettimeofday);
/*
* The profiling function is SMP safe. (nothing can mess
* around with "current", and the profiling counters are
* updated with atomic operations). This is especially
* useful with a profiling multiplier != 1
*/
static inline void
ia64_do_profile (struct pt_regs * regs)
{
unsigned long ip;
profile_hook(regs);
if (user_mode(regs))
return;
if (!prof_buffer)
return;
/* Conserve space in histogram by encoding slot bits in address
* bits 2 and 3 rather than bits 0 and 1.
*/
ip = profile_pc(regs);
/*
* Only measure the CPUs specified by /proc/irq/prof_cpu_mask.
* (default is all CPUs.)
*/
if (!cpu_isset(smp_processor_id(), prof_cpu_mask))
return;
ip -= (unsigned long) &_stext;
ip >>= prof_shift;
/*
* Don't ignore out-of-bounds IP values silently,
* put them into the last histogram slot, so if
* present, they will show up as a sharp peak.
*/
if (ip > prof_len-1)
ip = prof_len-1;
atomic_inc((atomic_t *)&prof_buffer[ip]);
}
static irqreturn_t static irqreturn_t
timer_interrupt (int irq, void *dev_id, struct pt_regs *regs) timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
{ {
...@@ -246,7 +203,7 @@ timer_interrupt (int irq, void *dev_id, struct pt_regs *regs) ...@@ -246,7 +203,7 @@ timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n", printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
ia64_get_itc(), new_itm); ia64_get_itc(), new_itm);
ia64_do_profile(regs); profile_tick(CPU_PROFILING, regs);
while (1) { while (1) {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
...@@ -38,24 +38,6 @@ static inline int set_rtc_mmss(unsigned long nowtime) ...@@ -38,24 +38,6 @@ static inline int set_rtc_mmss(unsigned long nowtime)
return -1; return -1;
} }
static inline void do_profile (unsigned long pc)
{
if (prof_buffer && current->pid) {
extern int _stext;
pc -= (unsigned long) &_stext;
pc >>= prof_shift;
if (pc < prof_len)
++prof_buffer[pc];
else
/*
* Don't ignore out-of-bounds PC values silently,
* put them into the last histogram slot, so if
* present, they will show up as a sharp peak.
*/
++prof_buffer[prof_len-1];
}
}
/* /*
* timer_interrupt() needs to keep up the real-time clock, * timer_interrupt() needs to keep up the real-time clock,
* as well as call the "do_timer()" routine every clocktick * as well as call the "do_timer()" routine every clocktick
...@@ -63,9 +45,7 @@ static inline void do_profile (unsigned long pc) ...@@ -63,9 +45,7 @@ static inline void do_profile (unsigned long pc)
static irqreturn_t timer_interrupt(int irq, void *dummy, struct pt_regs * regs) static irqreturn_t timer_interrupt(int irq, void *dummy, struct pt_regs * regs)
{ {
do_timer(regs); do_timer(regs);
profile_tick(CPU_PROFILING, regs);
if (!user_mode(regs))
do_profile(regs->pc);
#ifdef CONFIG_HEARTBEAT #ifdef CONFIG_HEARTBEAT
/* use power LED as a heartbeat instead -- much more useful /* use power LED as a heartbeat instead -- much more useful
......
...@@ -41,24 +41,6 @@ static inline int set_rtc_mmss(unsigned long nowtime) ...@@ -41,24 +41,6 @@ static inline int set_rtc_mmss(unsigned long nowtime)
return -1; return -1;
} }
static inline void do_profile (unsigned long pc)
{
if (prof_buffer && current->pid) {
extern int _stext;
pc -= (unsigned long) &_stext;
pc >>= prof_shift;
if (pc < prof_len)
++prof_buffer[pc];
else
/*
* Don't ignore out-of-bounds PC values silently,
* put them into the last histogram slot, so if
* present, they will show up as a sharp peak.
*/
++prof_buffer[prof_len-1];
}
}
/* /*
* timer_interrupt() needs to keep up the real-time clock, * timer_interrupt() needs to keep up the real-time clock,
* as well as call the "do_timer()" routine every clocktick * as well as call the "do_timer()" routine every clocktick
...@@ -75,9 +57,8 @@ static irqreturn_t timer_interrupt(int irq, void *dummy, struct pt_regs * regs) ...@@ -75,9 +57,8 @@ static irqreturn_t timer_interrupt(int irq, void *dummy, struct pt_regs * regs)
write_seqlock(&xtime_lock); write_seqlock(&xtime_lock);
do_timer(regs); do_timer(regs);
if (current->pid)
if (!user_mode(regs)) profile_tick(CPU_PROFILING, regs);
do_profile(regs->pc);
/* /*
* If we have an externally synchronized Linux clock, then update * If we have an externally synchronized Linux clock, then update
......
...@@ -110,17 +110,8 @@ void coldfire_profile_tick(int irq, void *dummy, struct pt_regs *regs) ...@@ -110,17 +110,8 @@ void coldfire_profile_tick(int irq, void *dummy, struct pt_regs *regs)
{ {
/* Reset ColdFire timer2 */ /* Reset ColdFire timer2 */
mcf_proftp->ter = MCFTIMER_TER_CAP | MCFTIMER_TER_REF; mcf_proftp->ter = MCFTIMER_TER_CAP | MCFTIMER_TER_REF;
if (current->pid)
if (!user_mode(regs)) { profile_tick(CPU_PROFILING, regs);
if (prof_buffer && current->pid) {
extern int _stext;
unsigned long ip = instruction_pointer(regs);
ip -= (unsigned long) &_stext;
ip >>= prof_shift;
if (ip < prof_len)
prof_buffer[ip]++;
}
}
} }
/***************************************************************************/ /***************************************************************************/
......
...@@ -417,23 +417,8 @@ static long last_rtc_update; ...@@ -417,23 +417,8 @@ static long last_rtc_update;
*/ */
void local_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) void local_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{ {
if (!user_mode(regs)) { if (current->pid)
if (prof_buffer && current->pid) { profile_tick(CPU_PROFILING, regs);
unsigned long pc = regs->cp0_epc;
pc -= (unsigned long) _stext;
pc >>= prof_shift;
/*
* Dont ignore out-of-bounds pc values silently,
* put them into the last histogram slot, so if
* present, they will show up as a sharp peak.
*/
if (pc > prof_len - 1)
pc = prof_len - 1;
atomic_inc((atomic_t *)&prof_buffer[pc]);
}
}
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* in UP mode, update_process_times() is invoked by do_timer() */ /* in UP mode, update_process_times() is invoked by do_timer() */
update_process_times(user_mode(regs)); update_process_times(user_mode(regs));
......
...@@ -47,41 +47,6 @@ static long halftick; ...@@ -47,41 +47,6 @@ static long halftick;
extern void smp_do_timer(struct pt_regs *regs); extern void smp_do_timer(struct pt_regs *regs);
#endif #endif
static inline void
parisc_do_profile(struct pt_regs *regs)
{
unsigned long pc = regs->iaoq[0];
extern char _stext;
profile_hook(regs);
if (user_mode(regs))
return;
if (!prof_buffer)
return;
#if 0
/* FIXME: when we have irq affinity to cpu, we need to
* only look at the cpus specified in this mask
*/
if (!cpu_isset(smp_processor_id(), prof_cpu_mask))
return;
#endif
pc -= (unsigned long) &_stext;
pc >>= prof_shift;
/*
* Don't ignore out-of-bounds PC values silently,
* put them into the last histogram slot, so if
* present, they will show up as a sharp peak.
*/
if (pc > prof_len - 1)
pc = prof_len - 1;
atomic_inc((atomic_t *)&prof_buffer[pc]);
}
irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{ {
long now; long now;
...@@ -89,7 +54,7 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) ...@@ -89,7 +54,7 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
int nticks; int nticks;
int cpu = smp_processor_id(); int cpu = smp_processor_id();
parisc_do_profile(regs); profile_tick(CPU_PROFILING, regs);
now = mfctl(16); now = mfctl(16);
/* initialize next_tick to time at last clocktick */ /* initialize next_tick to time at last clocktick */
......
...@@ -108,41 +108,6 @@ static inline int tb_delta(unsigned *jiffy_stamp) { ...@@ -108,41 +108,6 @@ static inline int tb_delta(unsigned *jiffy_stamp) {
return delta; return delta;
} }
extern char _stext;
static inline void ppc_do_profile (struct pt_regs *regs)
{
unsigned long nip;
profile_hook(regs);
if (user_mode(regs))
return;
if (!prof_buffer)
return;
nip = instruction_pointer(regs);
/*
* Only measure the CPUs specified by /proc/irq/prof_cpu_mask.
* (default is all CPUs.)
*/
if (!cpu_isset(smp_processor_id(), prof_cpu_mask))
return;
nip -= (unsigned long) &_stext;
nip >>= prof_shift;
/*
* Don't ignore out-of-bounds EIP values silently,
* put them into the last histogram slot, so if
* present, they will show up as a sharp peak.
*/
if (nip > prof_len-1)
nip = prof_len-1;
atomic_inc((atomic_t *)&prof_buffer[nip]);
}
/* /*
* timer_interrupt - gets called when the decrementer overflows, * timer_interrupt - gets called when the decrementer overflows,
* with interrupts disabled. * with interrupts disabled.
...@@ -163,7 +128,7 @@ void timer_interrupt(struct pt_regs * regs) ...@@ -163,7 +128,7 @@ void timer_interrupt(struct pt_regs * regs)
while ((next_dec = tb_ticks_per_jiffy - tb_delta(&jiffy_stamp)) <= 0) { while ((next_dec = tb_ticks_per_jiffy - tb_delta(&jiffy_stamp)) <= 0) {
jiffy_stamp += tb_ticks_per_jiffy; jiffy_stamp += tb_ticks_per_jiffy;
ppc_do_profile(regs); profile_tick(CPU_PROFILING, regs);
if (smp_processor_id()) if (smp_processor_id())
continue; continue;
......
...@@ -108,45 +108,6 @@ void ppc_adjtimex(void); ...@@ -108,45 +108,6 @@ void ppc_adjtimex(void);
static unsigned adjusting_time = 0; static unsigned adjusting_time = 0;
/*
* The profiling function is SMP safe. (nothing can mess
* around with "current", and the profiling counters are
* updated with atomic operations). This is especially
* useful with a profiling multiplier != 1
*/
static inline void ppc64_do_profile(struct pt_regs *regs)
{
unsigned long nip;
profile_hook(regs);
if (user_mode(regs))
return;
if (!prof_buffer)
return;
nip = instruction_pointer(regs);
/*
* Only measure the CPUs specified by /proc/irq/prof_cpu_mask.
* (default is all CPUs.)
*/
if (!cpu_isset(smp_processor_id(), prof_cpu_mask))
return;
nip -= (unsigned long)_stext;
nip >>= prof_shift;
/*
* Don't ignore out-of-bounds EIP values silently,
* put them into the last histogram slot, so if
* present, they will show up as a sharp peak.
*/
if (nip > prof_len-1)
nip = prof_len-1;
atomic_inc((atomic_t *)&prof_buffer[nip]);
}
static __inline__ void timer_check_rtc(void) static __inline__ void timer_check_rtc(void)
{ {
/* /*
...@@ -277,7 +238,7 @@ int timer_interrupt(struct pt_regs * regs) ...@@ -277,7 +238,7 @@ int timer_interrupt(struct pt_regs * regs)
irq_enter(); irq_enter();
#ifndef CONFIG_PPC_ISERIES #ifndef CONFIG_PPC_ISERIES
ppc64_do_profile(regs); profile_tick(CPU_PROFILING, regs);
#endif #endif
lpaca->lppaca.xIntDword.xFields.xDecrInt = 0; lpaca->lppaca.xIntDword.xFields.xDecrInt = 0;
......
...@@ -174,46 +174,7 @@ __calculate_ticks(__u64 elapsed) ...@@ -174,46 +174,7 @@ __calculate_ticks(__u64 elapsed)
#ifdef CONFIG_PROFILING #ifdef CONFIG_PROFILING
extern char _stext, _etext; #define s390_do_profile(regs) profile_tick(CPU_PROFILING, regs)
/*
* The profiling function is SMP safe. (nothing can mess
* around with "current", and the profiling counters are
* updated with atomic operations). This is especially
* useful with a profiling multiplier != 1
*/
static inline void s390_do_profile(struct pt_regs * regs)
{
unsigned long eip;
profile_hook(regs);
if (user_mode(regs))
return;
if (!prof_buffer)
return;
eip = instruction_pointer(regs);
/*
* Only measure the CPUs specified by /proc/irq/prof_cpu_mask.
* (default is all CPUs.)
*/
if (!cpu_isset(smp_processor_id(), prof_cpu_mask))
return;
eip -= (unsigned long) &_stext;
eip >>= prof_shift;
/*
* Don't ignore out-of-bounds EIP values silently,
* put them into the last histogram slot, so if
* present, they will show up as a sharp peak.
*/
if (eip > prof_len-1)
eip = prof_len-1;
atomic_inc((atomic_t *)&prof_buffer[eip]);
}
#else #else
#define s390_do_profile(regs) do { ; } while(0) #define s390_do_profile(regs) do { ; } while(0)
#endif /* CONFIG_PROFILING */ #endif /* CONFIG_PROFILING */
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/profile.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
...@@ -250,32 +251,6 @@ EXPORT_SYMBOL(do_settimeofday); ...@@ -250,32 +251,6 @@ EXPORT_SYMBOL(do_settimeofday);
/* last time the RTC clock got updated */ /* last time the RTC clock got updated */
static long last_rtc_update; static long last_rtc_update;
/* Profiling definitions */
extern unsigned int * prof_buffer;
extern unsigned long prof_len;
extern unsigned long prof_shift;
extern char _stext;
static inline void sh_do_profile(unsigned long pc)
{
/* Don't profile cpu_idle.. */
if (!prof_buffer || !current->pid)
return;
pc -= (unsigned long)&_stext;
pc >>= prof_shift;
/*
* Don't ignore out-of-bounds PC values silently,
* put them into the last histogram slot, so if
* present, they will show up as a sharp peak.
*/
if (pc > prof_len - 1)
pc = prof_len - 1;
atomic_inc((atomic_t *)&prof_buffer[pc]);
}
/* /*
* timer_interrupt() needs to keep up the real-time clock, * timer_interrupt() needs to keep up the real-time clock,
* as well as call the "do_timer()" routine every clocktick * as well as call the "do_timer()" routine every clocktick
...@@ -283,9 +258,7 @@ static inline void sh_do_profile(unsigned long pc) ...@@ -283,9 +258,7 @@ static inline void sh_do_profile(unsigned long pc)
static inline void do_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) static inline void do_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{ {
do_timer(regs); do_timer(regs);
profile_tick(CPU_PROFILING, regs);
if (!user_mode(regs))
sh_do_profile(profile_pc(regs));
#ifdef CONFIG_HEARTBEAT #ifdef CONFIG_HEARTBEAT
if (sh_mv.mv_heartbeat != NULL) if (sh_mv.mv_heartbeat != NULL)
......
...@@ -298,37 +298,6 @@ static int set_rtc_time(unsigned long nowtime) ...@@ -298,37 +298,6 @@ static int set_rtc_time(unsigned long nowtime)
/* last time the RTC clock got updated */ /* last time the RTC clock got updated */
static long last_rtc_update = 0; static long last_rtc_update = 0;
static inline void sh64_do_profile(struct pt_regs *regs)
{
extern int _stext;
unsigned long pc;
profile_hook(regs);
if (user_mode(regs))
return;
/* Don't profile cpu_idle.. */
if (!prof_buffer || !current->pid)
return;
pc = instruction_pointer(regs);
pc -= (unsigned long) &_stext;
pc >>= prof_shift;
/*
* Don't ignore out-of-bounds PC values silently, put them into the
* last histogram slot, so if present, they will show up as a sharp
* peak.
*/
if (pc > prof_len - 1)
pc = prof_len - 1;
/* We could just be sloppy and not lock against a re-entry on this
increment, but the profiling code won't always be linked in anyway. */
atomic_inc((atomic_t *)&prof_buffer[pc]);
}
/* /*
* timer_interrupt() needs to keep up the real-time clock, * timer_interrupt() needs to keep up the real-time clock,
* as well as call the "do_timer()" routine every clocktick * as well as call the "do_timer()" routine every clocktick
...@@ -340,8 +309,7 @@ static inline void do_timer_interrupt(int irq, void *dev_id, struct pt_regs *reg ...@@ -340,8 +309,7 @@ static inline void do_timer_interrupt(int irq, void *dev_id, struct pt_regs *reg
ctc_last_interrupt = (unsigned long) current_ctc; ctc_last_interrupt = (unsigned long) current_ctc;
do_timer(regs); do_timer(regs);
profile_tick(CPU_PROFILING, regs);
sh64_do_profile(regs);
#ifdef CONFIG_HEARTBEAT #ifdef CONFIG_HEARTBEAT
{ {
......
...@@ -410,8 +410,6 @@ void smp4d_message_pass(int target, int msg, unsigned long data, int wait) ...@@ -410,8 +410,6 @@ void smp4d_message_pass(int target, int msg, unsigned long data, int wait)
panic("Bogon SMP message pass."); panic("Bogon SMP message pass.");
} }
extern void sparc_do_profile(unsigned long pc, unsigned long o7);
void smp4d_percpu_timer_interrupt(struct pt_regs *regs) void smp4d_percpu_timer_interrupt(struct pt_regs *regs)
{ {
int cpu = hard_smp4d_processor_id(); int cpu = hard_smp4d_processor_id();
...@@ -429,8 +427,7 @@ void smp4d_percpu_timer_interrupt(struct pt_regs *regs) ...@@ -429,8 +427,7 @@ void smp4d_percpu_timer_interrupt(struct pt_regs *regs)
show_leds(cpu); show_leds(cpu);
} }
if(!user_mode(regs)) profile_tick(CPU_PROFILING, regs);
sparc_do_profile(regs->pc, regs->u_regs[UREG_RETPC]);
if(!--prof_counter(cpu)) { if(!--prof_counter(cpu)) {
int user = user_mode(regs); int user = user_mode(regs);
......
...@@ -392,16 +392,13 @@ void smp4m_cross_call_irq(void) ...@@ -392,16 +392,13 @@ void smp4m_cross_call_irq(void)
ccall_info.processors_out[i] = 1; ccall_info.processors_out[i] = 1;
} }
extern void sparc_do_profile(unsigned long pc, unsigned long o7);
void smp4m_percpu_timer_interrupt(struct pt_regs *regs) void smp4m_percpu_timer_interrupt(struct pt_regs *regs)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
clear_profile_irq(cpu); clear_profile_irq(cpu);
if(!user_mode(regs)) profile_tick(CPU_PROFILING, regs);
sparc_do_profile(regs->pc, regs->u_regs[UREG_RETPC]);
if(!--prof_counter(cpu)) { if(!--prof_counter(cpu)) {
int user = user_mode(regs); int user = user_mode(regs);
......
...@@ -99,26 +99,6 @@ unsigned long profile_pc(struct pt_regs *regs) ...@@ -99,26 +99,6 @@ unsigned long profile_pc(struct pt_regs *regs)
return pc; return pc;
} }
static spinlock_t ticker_lock = SPIN_LOCK_UNLOCKED;
/* 32-bit Sparc specific profiling function. */
void sparc_do_profile(unsigned long pc, unsigned long o7)
{
if(prof_buffer && current->pid) {
extern int _stext;
pc -= (unsigned long) &_stext;
pc >>= prof_shift;
spin_lock(&ticker_lock);
if(pc < prof_len)
prof_buffer[pc]++;
else
prof_buffer[prof_len - 1]++;
spin_unlock(&ticker_lock);
}
}
__volatile__ unsigned int *master_l10_counter; __volatile__ unsigned int *master_l10_counter;
__volatile__ unsigned int *master_l10_limit; __volatile__ unsigned int *master_l10_limit;
...@@ -135,8 +115,7 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs * regs) ...@@ -135,8 +115,7 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs * regs)
static long last_rtc_update; static long last_rtc_update;
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
if(!user_mode(regs)) profile_tick(CPU_PROFILING, regs);
sparc_do_profile(profile_pc(regs));
#endif #endif
/* Protect counter clear so that do_gettimeoffset works */ /* Protect counter clear so that do_gettimeoffset works */
......
...@@ -975,8 +975,6 @@ void smp_promstop_others(void) ...@@ -975,8 +975,6 @@ void smp_promstop_others(void)
smp_cross_call(&xcall_promstop, 0, 0, 0); smp_cross_call(&xcall_promstop, 0, 0, 0);
} }
extern void sparc64_do_profile(struct pt_regs *regs);
#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier #define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
#define prof_counter(__cpu) cpu_data(__cpu).counter #define prof_counter(__cpu) cpu_data(__cpu).counter
...@@ -1002,7 +1000,7 @@ void smp_percpu_timer_interrupt(struct pt_regs *regs) ...@@ -1002,7 +1000,7 @@ void smp_percpu_timer_interrupt(struct pt_regs *regs)
} }
do { do {
sparc64_do_profile(regs); profile_tick(CPU_PROFILING, regs);
if (!--prof_counter(cpu)) { if (!--prof_counter(cpu)) {
irq_enter(); irq_enter();
......
...@@ -442,22 +442,6 @@ static inline void timer_check_rtc(void) ...@@ -442,22 +442,6 @@ static inline void timer_check_rtc(void)
} }
} }
void sparc64_do_profile(struct pt_regs *regs)
{
unsigned long pc;
profile_hook(regs);
if (user_mode(regs))
return;
if (!prof_buffer)
return;
pc = (profile_pc(regs) - (unsigned long)_stext) >> prof_shift;
atomic_inc((atomic_t *)&prof_buffer[min(pc, prof_len-1)]);
}
static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs * regs) static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs * regs)
{ {
unsigned long ticks, pstate; unsigned long ticks, pstate;
...@@ -466,7 +450,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs * regs) ...@@ -466,7 +450,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs * regs)
do { do {
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
sparc64_do_profile(regs); profile_tick(CPU_PROFILING, regs);
#endif #endif
do_timer(regs); do_timer(regs);
......
...@@ -40,24 +40,6 @@ unsigned long long sched_clock(void) ...@@ -40,24 +40,6 @@ unsigned long long sched_clock(void)
return (unsigned long long)jiffies * (1000000000 / HZ); return (unsigned long long)jiffies * (1000000000 / HZ);
} }
static inline void do_profile (unsigned long pc)
{
if (prof_buffer && current->pid) {
extern int _stext;
pc -= (unsigned long) &_stext;
pc >>= prof_shift;
if (pc < prof_len)
++prof_buffer[pc];
else
/*
* Don't ignore out-of-bounds PC values silently,
* put them into the last histogram slot, so if
* present, they will show up as a sharp peak.
*/
++prof_buffer[prof_len-1];
}
}
/* /*
* timer_interrupt() needs to keep up the real-time clock, * timer_interrupt() needs to keep up the real-time clock,
* as well as call the "do_timer()" routine every clocktick * as well as call the "do_timer()" routine every clocktick
...@@ -74,10 +56,7 @@ static irqreturn_t timer_interrupt (int irq, void *dummy, struct pt_regs *regs) ...@@ -74,10 +56,7 @@ static irqreturn_t timer_interrupt (int irq, void *dummy, struct pt_regs *regs)
mach_tick (); mach_tick ();
do_timer (regs); do_timer (regs);
profile_tick(CPU_PROFILING, regs);
if (! user_mode (regs))
do_profile (regs->pc);
#if 0 #if 0
/* /*
* If we have an externally synchronized Linux clock, then update * If we have an externally synchronized Linux clock, then update
......
...@@ -836,8 +836,7 @@ void smp_local_timer_interrupt(struct pt_regs *regs) ...@@ -836,8 +836,7 @@ void smp_local_timer_interrupt(struct pt_regs *regs)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
x86_do_profile(regs); profile_tick(CPU_PROFILING, regs);
if (--per_cpu(prof_counter, cpu) <= 0) { if (--per_cpu(prof_counter, cpu) <= 0) {
/* /*
* The multiplier may have changed since the last time we got * The multiplier may have changed since the last time we got
......
...@@ -396,7 +396,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) ...@@ -396,7 +396,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
*/ */
#ifndef CONFIG_X86_LOCAL_APIC #ifndef CONFIG_X86_LOCAL_APIC
x86_do_profile(regs); profile_tick(CPU_PROFILING, regs);
#else #else
if (!using_apic_timer) if (!using_apic_timer)
smp_local_timer_interrupt(regs); smp_local_timer_interrupt(regs);
......
...@@ -68,48 +68,6 @@ extern atomic_t irq_mis_count; ...@@ -68,48 +68,6 @@ extern atomic_t irq_mis_count;
#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs)) #define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
static inline void __do_profile(unsigned long eip)
{
if (!prof_buffer)
return;
/*
* Only measure the CPUs specified by /proc/irq/prof_cpu_mask.
* (default is all CPUs.)
*/
if (!cpu_isset(smp_processor_id(), prof_cpu_mask))
return;
eip -= (unsigned long)_stext;
eip >>= prof_shift;
/*
* Don't ignore out-of-bounds EIP values silently,
* put them into the last histogram slot, so if
* present, they will show up as a sharp peak.
*/
if (eip > prof_len-1)
eip = prof_len-1;
atomic_inc((atomic_t *)&prof_buffer[eip]);
}
#define kern_profile(eip) __do_profile(eip)
/*
* The profiling function is SMP safe. (nothing can mess
* around with "current", and the profiling counters are
* updated with atomic operations). This is especially
* useful with a profiling multiplier != 1
*/
static inline void x86_do_profile(struct pt_regs * regs)
{
profile_hook(regs);
if (prof_on != 1 || user_mode(regs))
return;
__do_profile(regs->eip);
}
#if defined(CONFIG_X86_IO_APIC) #if defined(CONFIG_X86_IO_APIC)
static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i)
{ {
......
...@@ -22,7 +22,7 @@ static inline void do_timer_interrupt_hook(struct pt_regs *regs) ...@@ -22,7 +22,7 @@ static inline void do_timer_interrupt_hook(struct pt_regs *regs)
* system, in that case we have to call the local interrupt handler. * system, in that case we have to call the local interrupt handler.
*/ */
#ifndef CONFIG_X86_LOCAL_APIC #ifndef CONFIG_X86_LOCAL_APIC
x86_do_profile(regs); profile_tick(CPU_PROFILING, regs);
#else #else
if (!using_apic_timer) if (!using_apic_timer)
smp_local_timer_interrupt(regs); smp_local_timer_interrupt(regs);
......
...@@ -15,7 +15,7 @@ static inline void do_timer_interrupt_hook(struct pt_regs *regs) ...@@ -15,7 +15,7 @@ static inline void do_timer_interrupt_hook(struct pt_regs *regs)
* system, in that case we have to call the local interrupt handler. * system, in that case we have to call the local interrupt handler.
*/ */
#ifndef CONFIG_X86_LOCAL_APIC #ifndef CONFIG_X86_LOCAL_APIC
x86_do_profile(regs); profile_tick(CPU_PROFILING, regs);
#else #else
if (!using_apic_timer) if (!using_apic_timer)
smp_local_timer_interrupt(regs); smp_local_timer_interrupt(regs);
......
...@@ -129,39 +129,6 @@ __asm__( \ ...@@ -129,39 +129,6 @@ __asm__( \
"push $" #nr "-256 ; " \ "push $" #nr "-256 ; " \
"jmp common_interrupt"); "jmp common_interrupt");
static inline void x86_do_profile (struct pt_regs *regs)
{
unsigned long rip;
extern char _stext[];
profile_hook(regs);
if (user_mode(regs))
return;
if (!prof_buffer)
return;
rip = regs->rip;
/*
* Only measure the CPUs specified by /proc/irq/prof_cpu_mask.
* (default is all CPUs.)
*/
if (!cpu_isset(smp_processor_id(), prof_cpu_mask))
return;
rip -= (unsigned long) &_stext;
rip >>= prof_shift;
/*
* Don't ignore out-of-bounds EIP values silently,
* put them into the last histogram slot, so if
* present, they will show up as a sharp peak.
*/
if (rip > prof_len-1)
rip = prof_len-1;
atomic_inc((atomic_t *)&prof_buffer[rip]);
}
#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) #if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP)
static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) { static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {
if (IO_APIC_IRQ(i)) if (IO_APIC_IRQ(i))
......
...@@ -9,7 +9,11 @@ ...@@ -9,7 +9,11 @@
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <asm/errno.h> #include <asm/errno.h>
#define CPU_PROFILING 1
#define SCHED_PROFILING 2
struct proc_dir_entry; struct proc_dir_entry;
struct pt_regs;
/* parse command line */ /* parse command line */
int __init profile_setup(char * str); int __init profile_setup(char * str);
...@@ -17,6 +21,8 @@ int __init profile_setup(char * str); ...@@ -17,6 +21,8 @@ int __init profile_setup(char * str);
/* init basic kernel profiler */ /* init basic kernel profiler */
void __init profile_init(void); void __init profile_init(void);
void create_prof_cpu_mask(struct proc_dir_entry *); void create_prof_cpu_mask(struct proc_dir_entry *);
void profile_tick(int, struct pt_regs *);
void profile_hit(int, void *);
extern unsigned int * prof_buffer; extern unsigned int * prof_buffer;
extern unsigned long prof_len; extern unsigned long prof_len;
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <linux/notifier.h> #include <linux/notifier.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/profile.h>
#include <asm/sections.h> #include <asm/sections.h>
unsigned int * prof_buffer; unsigned int * prof_buffer;
...@@ -169,6 +170,25 @@ EXPORT_SYMBOL_GPL(profile_event_unregister); ...@@ -169,6 +170,25 @@ EXPORT_SYMBOL_GPL(profile_event_unregister);
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/ptrace.h>
void profile_hit(int type, void *__pc)
{
unsigned long pc;
if (prof_on != type || !prof_buffer)
return;
pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
atomic_inc((atomic_t *)&prof_buffer[min(pc, prof_len - 1)]);
}
void profile_tick(int type, struct pt_regs *regs)
{
if (type == CPU_PROFILING)
profile_hook(regs);
if (!user_mode(regs) && cpu_isset(smp_processor_id(), prof_cpu_mask))
profile_hit(type, (void *)profile_pc(regs));
}
static int prof_cpu_mask_read_proc (char *page, char **start, off_t off, static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
int count, int *eof, void *data) int count, int *eof, void *data)
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <linux/kernel_stat.h> #include <linux/kernel_stat.h>
#include <linux/security.h> #include <linux/security.h>
#include <linux/notifier.h> #include <linux/notifier.h>
#include <linux/profile.h>
#include <linux/suspend.h> #include <linux/suspend.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/delay.h> #include <linux/delay.h>
...@@ -3220,10 +3221,7 @@ static int setscheduler(pid_t pid, int policy, struct sched_param __user *param) ...@@ -3220,10 +3221,7 @@ static int setscheduler(pid_t pid, int policy, struct sched_param __user *param)
policy != SCHED_NORMAL) policy != SCHED_NORMAL)
goto out_unlock; goto out_unlock;
} }
#ifdef kern_profile profile_hit(SCHED_PROFILING, __builtin_return_address(0));
if (unlikely(prof_on == 2))
__do_profile((unsigned long)__builtin_return_address(0));
#endif
/* /*
* Valid priorities for SCHED_FIFO and SCHED_RR are * Valid priorities for SCHED_FIFO and SCHED_RR are
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment