Commit 4ce5f241 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rric/oprofile

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rric/oprofile: (31 commits)
  powerpc/oprofile: fix whitespaces in op_model_cell.c
  powerpc/oprofile: IBM CELL: add SPU event profiling support
  powerpc/oprofile: fix cell/pr_util.h
  powerpc/oprofile: IBM CELL: cleanup and restructuring
  oprofile: make new cpu buffer functions part of the api
  oprofile: remove #ifdef CONFIG_OPROFILE_IBS in non-ibs code
  ring_buffer: fix ring_buffer_event_length()
  oprofile: use new data sample format for ibs
  oprofile: add op_cpu_buffer_get_data()
  oprofile: add op_cpu_buffer_add_data()
  oprofile: rework implementation of cpu buffer events
  oprofile: modify op_cpu_buffer_read_entry()
  oprofile: add op_cpu_buffer_write_reserve()
  oprofile: rename variables in add_ibs_begin()
  oprofile: rename add_sample() in cpu_buffer.c
  oprofile: rename variable ibs_allowed to has_ibs in op_model_amd.c
  oprofile: making add_sample_entry() inline
  oprofile: remove backtrace code for ibs
  oprofile: remove unused ibs macro
  oprofile: remove unused components in struct oprofile_cpu_buffer
  ...
parents 7c51d57e a076aa4f
...@@ -37,9 +37,11 @@ ...@@ -37,9 +37,11 @@
#define CBE_PM_STOP_AT_MAX 0x40000000 #define CBE_PM_STOP_AT_MAX 0x40000000
#define CBE_PM_TRACE_MODE_GET(pm_control) (((pm_control) >> 28) & 0x3) #define CBE_PM_TRACE_MODE_GET(pm_control) (((pm_control) >> 28) & 0x3)
#define CBE_PM_TRACE_MODE_SET(mode) (((mode) & 0x3) << 28) #define CBE_PM_TRACE_MODE_SET(mode) (((mode) & 0x3) << 28)
#define CBE_PM_TRACE_BUF_OVFLW(bit) (((bit) & 0x1) << 17)
#define CBE_PM_COUNT_MODE_SET(count) (((count) & 0x3) << 18) #define CBE_PM_COUNT_MODE_SET(count) (((count) & 0x3) << 18)
#define CBE_PM_FREEZE_ALL_CTRS 0x00100000 #define CBE_PM_FREEZE_ALL_CTRS 0x00100000
#define CBE_PM_ENABLE_EXT_TRACE 0x00008000 #define CBE_PM_ENABLE_EXT_TRACE 0x00008000
#define CBE_PM_SPU_ADDR_TRACE_SET(msk) (((msk) & 0x3) << 9)
/* Macros for the trace_address register. */ /* Macros for the trace_address register. */
#define CBE_PM_TRACE_BUF_FULL 0x00000800 #define CBE_PM_TRACE_BUF_FULL 0x00000800
......
...@@ -32,6 +32,12 @@ struct op_system_config { ...@@ -32,6 +32,12 @@ struct op_system_config {
unsigned long mmcr0; unsigned long mmcr0;
unsigned long mmcr1; unsigned long mmcr1;
unsigned long mmcra; unsigned long mmcra;
#ifdef CONFIG_OPROFILE_CELL
/* Register for oprofile user tool to check cell kernel profiling
* suport.
*/
unsigned long cell_support;
#endif
#endif #endif
unsigned long enable_kernel; unsigned long enable_kernel;
unsigned long enable_user; unsigned long enable_user;
......
...@@ -30,6 +30,10 @@ ...@@ -30,6 +30,10 @@
extern struct delayed_work spu_work; extern struct delayed_work spu_work;
extern int spu_prof_running; extern int spu_prof_running;
#define TRACE_ARRAY_SIZE 1024
extern spinlock_t oprof_spu_smpl_arry_lck;
struct spu_overlay_info { /* map of sections within an SPU overlay */ struct spu_overlay_info { /* map of sections within an SPU overlay */
unsigned int vma; /* SPU virtual memory address from elf */ unsigned int vma; /* SPU virtual memory address from elf */
unsigned int size; /* size of section from elf */ unsigned int size; /* size of section from elf */
...@@ -89,10 +93,11 @@ void vma_map_free(struct vma_to_fileoffset_map *map); ...@@ -89,10 +93,11 @@ void vma_map_free(struct vma_to_fileoffset_map *map);
* Entry point for SPU profiling. * Entry point for SPU profiling.
* cycles_reset is the SPU_CYCLES count value specified by the user. * cycles_reset is the SPU_CYCLES count value specified by the user.
*/ */
int start_spu_profiling(unsigned int cycles_reset); int start_spu_profiling_cycles(unsigned int cycles_reset);
void start_spu_profiling_events(void);
void stop_spu_profiling(void);
void stop_spu_profiling_cycles(void);
void stop_spu_profiling_events(void);
/* add the necessary profiling hooks */ /* add the necessary profiling hooks */
int spu_sync_start(void); int spu_sync_start(void);
......
...@@ -18,11 +18,21 @@ ...@@ -18,11 +18,21 @@
#include <asm/cell-pmu.h> #include <asm/cell-pmu.h>
#include "pr_util.h" #include "pr_util.h"
#define TRACE_ARRAY_SIZE 1024
#define SCALE_SHIFT 14 #define SCALE_SHIFT 14
static u32 *samples; static u32 *samples;
/* spu_prof_running is a flag used to indicate if spu profiling is enabled
* or not. It is set by the routines start_spu_profiling_cycles() and
* start_spu_profiling_events(). The flag is cleared by the routines
* stop_spu_profiling_cycles() and stop_spu_profiling_events(). These
* routines are called via global_start() and global_stop() which are called in
* op_powerpc_start() and op_powerpc_stop(). These routines are called once
* per system as a result of the user starting/stopping oprofile. Hence, only
* one CPU per user at a time will be changing the value of spu_prof_running.
* In general, OProfile does not protect against multiple users trying to run
* OProfile at a time.
*/
int spu_prof_running; int spu_prof_running;
static unsigned int profiling_interval; static unsigned int profiling_interval;
...@@ -31,8 +41,8 @@ static unsigned int profiling_interval; ...@@ -31,8 +41,8 @@ static unsigned int profiling_interval;
#define SPU_PC_MASK 0xFFFF #define SPU_PC_MASK 0xFFFF
static DEFINE_SPINLOCK(sample_array_lock); DEFINE_SPINLOCK(oprof_spu_smpl_arry_lck);
unsigned long sample_array_lock_flags; unsigned long oprof_spu_smpl_arry_lck_flags;
void set_spu_profiling_frequency(unsigned int freq_khz, unsigned int cycles_reset) void set_spu_profiling_frequency(unsigned int freq_khz, unsigned int cycles_reset)
{ {
...@@ -145,13 +155,13 @@ static enum hrtimer_restart profile_spus(struct hrtimer *timer) ...@@ -145,13 +155,13 @@ static enum hrtimer_restart profile_spus(struct hrtimer *timer)
* sample array must be loaded and then processed for a given * sample array must be loaded and then processed for a given
* cpu. The sample array is not per cpu. * cpu. The sample array is not per cpu.
*/ */
spin_lock_irqsave(&sample_array_lock, spin_lock_irqsave(&oprof_spu_smpl_arry_lck,
sample_array_lock_flags); oprof_spu_smpl_arry_lck_flags);
num_samples = cell_spu_pc_collection(cpu); num_samples = cell_spu_pc_collection(cpu);
if (num_samples == 0) { if (num_samples == 0) {
spin_unlock_irqrestore(&sample_array_lock, spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
sample_array_lock_flags); oprof_spu_smpl_arry_lck_flags);
continue; continue;
} }
...@@ -162,8 +172,8 @@ static enum hrtimer_restart profile_spus(struct hrtimer *timer) ...@@ -162,8 +172,8 @@ static enum hrtimer_restart profile_spus(struct hrtimer *timer)
num_samples); num_samples);
} }
spin_unlock_irqrestore(&sample_array_lock, spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
sample_array_lock_flags); oprof_spu_smpl_arry_lck_flags);
} }
smp_wmb(); /* insure spu event buffer updates are written */ smp_wmb(); /* insure spu event buffer updates are written */
...@@ -182,13 +192,13 @@ static enum hrtimer_restart profile_spus(struct hrtimer *timer) ...@@ -182,13 +192,13 @@ static enum hrtimer_restart profile_spus(struct hrtimer *timer)
static struct hrtimer timer; static struct hrtimer timer;
/* /*
* Entry point for SPU profiling. * Entry point for SPU cycle profiling.
* NOTE: SPU profiling is done system-wide, not per-CPU. * NOTE: SPU profiling is done system-wide, not per-CPU.
* *
* cycles_reset is the count value specified by the user when * cycles_reset is the count value specified by the user when
* setting up OProfile to count SPU_CYCLES. * setting up OProfile to count SPU_CYCLES.
*/ */
int start_spu_profiling(unsigned int cycles_reset) int start_spu_profiling_cycles(unsigned int cycles_reset)
{ {
ktime_t kt; ktime_t kt;
...@@ -212,10 +222,30 @@ int start_spu_profiling(unsigned int cycles_reset) ...@@ -212,10 +222,30 @@ int start_spu_profiling(unsigned int cycles_reset)
return 0; return 0;
} }
void stop_spu_profiling(void) /*
* Entry point for SPU event profiling.
* NOTE: SPU profiling is done system-wide, not per-CPU.
*
* cycles_reset is the count value specified by the user when
* setting up OProfile to count SPU_CYCLES.
*/
void start_spu_profiling_events(void)
{
spu_prof_running = 1;
schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE);
return;
}
void stop_spu_profiling_cycles(void)
{ {
spu_prof_running = 0; spu_prof_running = 0;
hrtimer_cancel(&timer); hrtimer_cancel(&timer);
kfree(samples); kfree(samples);
pr_debug("SPU_PROF: stop_spu_profiling issued\n"); pr_debug("SPU_PROF: stop_spu_profiling_cycles issued\n");
}
void stop_spu_profiling_events(void)
{
spu_prof_running = 0;
} }
...@@ -132,6 +132,28 @@ static int op_powerpc_create_files(struct super_block *sb, struct dentry *root) ...@@ -132,6 +132,28 @@ static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
oprofilefs_create_ulong(sb, root, "mmcr0", &sys.mmcr0); oprofilefs_create_ulong(sb, root, "mmcr0", &sys.mmcr0);
oprofilefs_create_ulong(sb, root, "mmcr1", &sys.mmcr1); oprofilefs_create_ulong(sb, root, "mmcr1", &sys.mmcr1);
oprofilefs_create_ulong(sb, root, "mmcra", &sys.mmcra); oprofilefs_create_ulong(sb, root, "mmcra", &sys.mmcra);
#ifdef CONFIG_OPROFILE_CELL
/* create a file the user tool can check to see what level of profiling
* support exits with this kernel. Initialize bit mask to indicate
* what support the kernel has:
* bit 0 - Supports SPU event profiling in addition to PPU
* event and cycles; and SPU cycle profiling
* bits 1-31 - Currently unused.
*
* If the file does not exist, then the kernel only supports SPU
* cycle profiling, PPU event and cycle profiling.
*/
oprofilefs_create_ulong(sb, root, "cell_support", &sys.cell_support);
sys.cell_support = 0x1; /* Note, the user OProfile tool must check
* that this bit is set before attempting to
* user SPU event profiling. Older kernels
* will not have this file, hence the user
* tool is not allowed to do SPU event
* profiling on older kernels. Older kernels
* will accept SPU events but collected data
* is garbage.
*/
#endif
#endif #endif
for (i = 0; i < model->num_counters; ++i) { for (i = 0; i < model->num_counters; ++i) {
......
This diff is collapsed.
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
* @file op_model_amd.c * @file op_model_amd.c
* athlon / K7 / K8 / Family 10h model-specific MSR operations * athlon / K7 / K8 / Family 10h model-specific MSR operations
* *
* @remark Copyright 2002-2008 OProfile authors * @remark Copyright 2002-2009 OProfile authors
* @remark Read the file COPYING * @remark Read the file COPYING
* *
* @author John Levon * @author John Levon
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
* @author Graydon Hoare * @author Graydon Hoare
* @author Robert Richter <robert.richter@amd.com> * @author Robert Richter <robert.richter@amd.com>
* @author Barry Kasindorf * @author Barry Kasindorf
*/ */
#include <linux/oprofile.h> #include <linux/oprofile.h>
#include <linux/device.h> #include <linux/device.h>
...@@ -60,53 +60,10 @@ static unsigned long reset_value[NUM_COUNTERS]; ...@@ -60,53 +60,10 @@ static unsigned long reset_value[NUM_COUNTERS];
#define IBS_OP_LOW_VALID_BIT (1ULL<<18) /* bit 18 */ #define IBS_OP_LOW_VALID_BIT (1ULL<<18) /* bit 18 */
#define IBS_OP_LOW_ENABLE (1ULL<<17) /* bit 17 */ #define IBS_OP_LOW_ENABLE (1ULL<<17) /* bit 17 */
/* Codes used in cpu_buffer.c */ #define IBS_FETCH_SIZE 6
/* This produces duplicate code, need to be fixed */ #define IBS_OP_SIZE 12
#define IBS_FETCH_BEGIN 3
#define IBS_OP_BEGIN 4
/*
* The function interface needs to be fixed, something like add
* data. Should then be added to linux/oprofile.h.
*/
extern void
oprofile_add_ibs_sample(struct pt_regs * const regs,
unsigned int * const ibs_sample, int ibs_code);
struct ibs_fetch_sample {
/* MSRC001_1031 IBS Fetch Linear Address Register */
unsigned int ibs_fetch_lin_addr_low;
unsigned int ibs_fetch_lin_addr_high;
/* MSRC001_1030 IBS Fetch Control Register */
unsigned int ibs_fetch_ctl_low;
unsigned int ibs_fetch_ctl_high;
/* MSRC001_1032 IBS Fetch Physical Address Register */
unsigned int ibs_fetch_phys_addr_low;
unsigned int ibs_fetch_phys_addr_high;
};
struct ibs_op_sample {
/* MSRC001_1034 IBS Op Logical Address Register (IbsRIP) */
unsigned int ibs_op_rip_low;
unsigned int ibs_op_rip_high;
/* MSRC001_1035 IBS Op Data Register */
unsigned int ibs_op_data1_low;
unsigned int ibs_op_data1_high;
/* MSRC001_1036 IBS Op Data 2 Register */
unsigned int ibs_op_data2_low;
unsigned int ibs_op_data2_high;
/* MSRC001_1037 IBS Op Data 3 Register */
unsigned int ibs_op_data3_low;
unsigned int ibs_op_data3_high;
/* MSRC001_1038 IBS DC Linear Address Register (IbsDcLinAd) */
unsigned int ibs_dc_linear_low;
unsigned int ibs_dc_linear_high;
/* MSRC001_1039 IBS DC Physical Address Register (IbsDcPhysAd) */
unsigned int ibs_dc_phys_low;
unsigned int ibs_dc_phys_high;
};
static int ibs_allowed; /* AMD Family10h and later */ static int has_ibs; /* AMD Family10h and later */
struct op_ibs_config { struct op_ibs_config {
unsigned long op_enabled; unsigned long op_enabled;
...@@ -197,31 +154,29 @@ static inline int ...@@ -197,31 +154,29 @@ static inline int
op_amd_handle_ibs(struct pt_regs * const regs, op_amd_handle_ibs(struct pt_regs * const regs,
struct op_msrs const * const msrs) struct op_msrs const * const msrs)
{ {
unsigned int low, high; u32 low, high;
struct ibs_fetch_sample ibs_fetch; u64 msr;
struct ibs_op_sample ibs_op; struct op_entry entry;
if (!ibs_allowed) if (!has_ibs)
return 1; return 1;
if (ibs_config.fetch_enabled) { if (ibs_config.fetch_enabled) {
rdmsr(MSR_AMD64_IBSFETCHCTL, low, high); rdmsr(MSR_AMD64_IBSFETCHCTL, low, high);
if (high & IBS_FETCH_HIGH_VALID_BIT) { if (high & IBS_FETCH_HIGH_VALID_BIT) {
ibs_fetch.ibs_fetch_ctl_high = high; rdmsrl(MSR_AMD64_IBSFETCHLINAD, msr);
ibs_fetch.ibs_fetch_ctl_low = low; oprofile_write_reserve(&entry, regs, msr,
rdmsr(MSR_AMD64_IBSFETCHLINAD, low, high); IBS_FETCH_CODE, IBS_FETCH_SIZE);
ibs_fetch.ibs_fetch_lin_addr_high = high; oprofile_add_data(&entry, (u32)msr);
ibs_fetch.ibs_fetch_lin_addr_low = low; oprofile_add_data(&entry, (u32)(msr >> 32));
rdmsr(MSR_AMD64_IBSFETCHPHYSAD, low, high); oprofile_add_data(&entry, low);
ibs_fetch.ibs_fetch_phys_addr_high = high; oprofile_add_data(&entry, high);
ibs_fetch.ibs_fetch_phys_addr_low = low; rdmsrl(MSR_AMD64_IBSFETCHPHYSAD, msr);
oprofile_add_data(&entry, (u32)msr);
oprofile_add_ibs_sample(regs, oprofile_add_data(&entry, (u32)(msr >> 32));
(unsigned int *)&ibs_fetch, oprofile_write_commit(&entry);
IBS_FETCH_BEGIN);
/* reenable the IRQ */ /* reenable the IRQ */
rdmsr(MSR_AMD64_IBSFETCHCTL, low, high);
high &= ~IBS_FETCH_HIGH_VALID_BIT; high &= ~IBS_FETCH_HIGH_VALID_BIT;
high |= IBS_FETCH_HIGH_ENABLE; high |= IBS_FETCH_HIGH_ENABLE;
low &= IBS_FETCH_LOW_MAX_CNT_MASK; low &= IBS_FETCH_LOW_MAX_CNT_MASK;
...@@ -232,30 +187,29 @@ op_amd_handle_ibs(struct pt_regs * const regs, ...@@ -232,30 +187,29 @@ op_amd_handle_ibs(struct pt_regs * const regs,
if (ibs_config.op_enabled) { if (ibs_config.op_enabled) {
rdmsr(MSR_AMD64_IBSOPCTL, low, high); rdmsr(MSR_AMD64_IBSOPCTL, low, high);
if (low & IBS_OP_LOW_VALID_BIT) { if (low & IBS_OP_LOW_VALID_BIT) {
rdmsr(MSR_AMD64_IBSOPRIP, low, high); rdmsrl(MSR_AMD64_IBSOPRIP, msr);
ibs_op.ibs_op_rip_low = low; oprofile_write_reserve(&entry, regs, msr,
ibs_op.ibs_op_rip_high = high; IBS_OP_CODE, IBS_OP_SIZE);
rdmsr(MSR_AMD64_IBSOPDATA, low, high); oprofile_add_data(&entry, (u32)msr);
ibs_op.ibs_op_data1_low = low; oprofile_add_data(&entry, (u32)(msr >> 32));
ibs_op.ibs_op_data1_high = high; rdmsrl(MSR_AMD64_IBSOPDATA, msr);
rdmsr(MSR_AMD64_IBSOPDATA2, low, high); oprofile_add_data(&entry, (u32)msr);
ibs_op.ibs_op_data2_low = low; oprofile_add_data(&entry, (u32)(msr >> 32));
ibs_op.ibs_op_data2_high = high; rdmsrl(MSR_AMD64_IBSOPDATA2, msr);
rdmsr(MSR_AMD64_IBSOPDATA3, low, high); oprofile_add_data(&entry, (u32)msr);
ibs_op.ibs_op_data3_low = low; oprofile_add_data(&entry, (u32)(msr >> 32));
ibs_op.ibs_op_data3_high = high; rdmsrl(MSR_AMD64_IBSOPDATA3, msr);
rdmsr(MSR_AMD64_IBSDCLINAD, low, high); oprofile_add_data(&entry, (u32)msr);
ibs_op.ibs_dc_linear_low = low; oprofile_add_data(&entry, (u32)(msr >> 32));
ibs_op.ibs_dc_linear_high = high; rdmsrl(MSR_AMD64_IBSDCLINAD, msr);
rdmsr(MSR_AMD64_IBSDCPHYSAD, low, high); oprofile_add_data(&entry, (u32)msr);
ibs_op.ibs_dc_phys_low = low; oprofile_add_data(&entry, (u32)(msr >> 32));
ibs_op.ibs_dc_phys_high = high; rdmsrl(MSR_AMD64_IBSDCPHYSAD, msr);
oprofile_add_data(&entry, (u32)msr);
oprofile_add_data(&entry, (u32)(msr >> 32));
oprofile_write_commit(&entry);
/* reenable the IRQ */ /* reenable the IRQ */
oprofile_add_ibs_sample(regs,
(unsigned int *)&ibs_op,
IBS_OP_BEGIN);
rdmsr(MSR_AMD64_IBSOPCTL, low, high);
high = 0; high = 0;
low &= ~IBS_OP_LOW_VALID_BIT; low &= ~IBS_OP_LOW_VALID_BIT;
low |= IBS_OP_LOW_ENABLE; low |= IBS_OP_LOW_ENABLE;
...@@ -305,14 +259,14 @@ static void op_amd_start(struct op_msrs const * const msrs) ...@@ -305,14 +259,14 @@ static void op_amd_start(struct op_msrs const * const msrs)
} }
#ifdef CONFIG_OPROFILE_IBS #ifdef CONFIG_OPROFILE_IBS
if (ibs_allowed && ibs_config.fetch_enabled) { if (has_ibs && ibs_config.fetch_enabled) {
low = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF; low = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF;
high = ((ibs_config.rand_en & 0x1) << 25) /* bit 57 */ high = ((ibs_config.rand_en & 0x1) << 25) /* bit 57 */
+ IBS_FETCH_HIGH_ENABLE; + IBS_FETCH_HIGH_ENABLE;
wrmsr(MSR_AMD64_IBSFETCHCTL, low, high); wrmsr(MSR_AMD64_IBSFETCHCTL, low, high);
} }
if (ibs_allowed && ibs_config.op_enabled) { if (has_ibs && ibs_config.op_enabled) {
low = ((ibs_config.max_cnt_op >> 4) & 0xFFFF) low = ((ibs_config.max_cnt_op >> 4) & 0xFFFF)
+ ((ibs_config.dispatched_ops & 0x1) << 19) /* bit 19 */ + ((ibs_config.dispatched_ops & 0x1) << 19) /* bit 19 */
+ IBS_OP_LOW_ENABLE; + IBS_OP_LOW_ENABLE;
...@@ -341,14 +295,14 @@ static void op_amd_stop(struct op_msrs const * const msrs) ...@@ -341,14 +295,14 @@ static void op_amd_stop(struct op_msrs const * const msrs)
} }
#ifdef CONFIG_OPROFILE_IBS #ifdef CONFIG_OPROFILE_IBS
if (ibs_allowed && ibs_config.fetch_enabled) { if (has_ibs && ibs_config.fetch_enabled) {
/* clear max count and enable */ /* clear max count and enable */
low = 0; low = 0;
high = 0; high = 0;
wrmsr(MSR_AMD64_IBSFETCHCTL, low, high); wrmsr(MSR_AMD64_IBSFETCHCTL, low, high);
} }
if (ibs_allowed && ibs_config.op_enabled) { if (has_ibs && ibs_config.op_enabled) {
/* clear max count and enable */ /* clear max count and enable */
low = 0; low = 0;
high = 0; high = 0;
...@@ -409,6 +363,7 @@ static int init_ibs_nmi(void) ...@@ -409,6 +363,7 @@ static int init_ibs_nmi(void)
| IBSCTL_LVTOFFSETVAL); | IBSCTL_LVTOFFSETVAL);
pci_read_config_dword(cpu_cfg, IBSCTL, &value); pci_read_config_dword(cpu_cfg, IBSCTL, &value);
if (value != (ibs_eilvt_off | IBSCTL_LVTOFFSETVAL)) { if (value != (ibs_eilvt_off | IBSCTL_LVTOFFSETVAL)) {
pci_dev_put(cpu_cfg);
printk(KERN_DEBUG "Failed to setup IBS LVT offset, " printk(KERN_DEBUG "Failed to setup IBS LVT offset, "
"IBSCTL = 0x%08x", value); "IBSCTL = 0x%08x", value);
return 1; return 1;
...@@ -436,20 +391,20 @@ static int init_ibs_nmi(void) ...@@ -436,20 +391,20 @@ static int init_ibs_nmi(void)
/* uninitialize the APIC for the IBS interrupts if needed */ /* uninitialize the APIC for the IBS interrupts if needed */
static void clear_ibs_nmi(void) static void clear_ibs_nmi(void)
{ {
if (ibs_allowed) if (has_ibs)
on_each_cpu(apic_clear_ibs_nmi_per_cpu, NULL, 1); on_each_cpu(apic_clear_ibs_nmi_per_cpu, NULL, 1);
} }
/* initialize the APIC for the IBS interrupts if available */ /* initialize the APIC for the IBS interrupts if available */
static void ibs_init(void) static void ibs_init(void)
{ {
ibs_allowed = boot_cpu_has(X86_FEATURE_IBS); has_ibs = boot_cpu_has(X86_FEATURE_IBS);
if (!ibs_allowed) if (!has_ibs)
return; return;
if (init_ibs_nmi()) { if (init_ibs_nmi()) {
ibs_allowed = 0; has_ibs = 0;
return; return;
} }
...@@ -458,7 +413,7 @@ static void ibs_init(void) ...@@ -458,7 +413,7 @@ static void ibs_init(void)
static void ibs_exit(void) static void ibs_exit(void)
{ {
if (!ibs_allowed) if (!has_ibs)
return; return;
clear_ibs_nmi(); clear_ibs_nmi();
...@@ -478,7 +433,7 @@ static int setup_ibs_files(struct super_block *sb, struct dentry *root) ...@@ -478,7 +433,7 @@ static int setup_ibs_files(struct super_block *sb, struct dentry *root)
if (ret) if (ret)
return ret; return ret;
if (!ibs_allowed) if (!has_ibs)
return ret; return ret;
/* model specific files */ /* model specific files */
......
/** /**
* @file buffer_sync.c * @file buffer_sync.c
* *
* @remark Copyright 2002 OProfile authors * @remark Copyright 2002-2009 OProfile authors
* @remark Read the file COPYING * @remark Read the file COPYING
* *
* @author John Levon <levon@movementarian.org> * @author John Levon <levon@movementarian.org>
* @author Barry Kasindorf * @author Barry Kasindorf
* @author Robert Richter <robert.richter@amd.com>
* *
* This is the core of the buffer management. Each * This is the core of the buffer management. Each
* CPU buffer is processed and entered into the * CPU buffer is processed and entered into the
...@@ -315,88 +316,73 @@ static void add_trace_begin(void) ...@@ -315,88 +316,73 @@ static void add_trace_begin(void)
add_event_entry(TRACE_BEGIN_CODE); add_event_entry(TRACE_BEGIN_CODE);
} }
#ifdef CONFIG_OPROFILE_IBS static void add_data(struct op_entry *entry, struct mm_struct *mm)
#define IBS_FETCH_CODE_SIZE 2
#define IBS_OP_CODE_SIZE 5
/*
* Add IBS fetch and op entries to event buffer
*/
static void add_ibs_begin(int cpu, int code, struct mm_struct *mm)
{ {
unsigned long rip; unsigned long code, pc, val;
int i, count; unsigned long cookie;
unsigned long ibs_cookie = 0;
off_t offset; off_t offset;
struct op_sample *sample;
sample = cpu_buffer_read_entry(cpu); if (!op_cpu_buffer_get_data(entry, &code))
if (!sample) return;
goto Error; if (!op_cpu_buffer_get_data(entry, &pc))
rip = sample->eip; return;
if (!op_cpu_buffer_get_size(entry))
#ifdef __LP64__ return;
rip += sample->event << 32;
#endif
if (mm) { if (mm) {
ibs_cookie = lookup_dcookie(mm, rip, &offset); cookie = lookup_dcookie(mm, pc, &offset);
if (ibs_cookie == NO_COOKIE) if (cookie == NO_COOKIE)
offset = rip; offset = pc;
if (ibs_cookie == INVALID_COOKIE) { if (cookie == INVALID_COOKIE) {
atomic_inc(&oprofile_stats.sample_lost_no_mapping); atomic_inc(&oprofile_stats.sample_lost_no_mapping);
offset = rip; offset = pc;
} }
if (ibs_cookie != last_cookie) { if (cookie != last_cookie) {
add_cookie_switch(ibs_cookie); add_cookie_switch(cookie);
last_cookie = ibs_cookie; last_cookie = cookie;
} }
} else } else
offset = rip; offset = pc;
add_event_entry(ESCAPE_CODE); add_event_entry(ESCAPE_CODE);
add_event_entry(code); add_event_entry(code);
add_event_entry(offset); /* Offset from Dcookie */ add_event_entry(offset); /* Offset from Dcookie */
/* we send the Dcookie offset, but send the raw Linear Add also*/ while (op_cpu_buffer_get_data(entry, &val))
add_event_entry(sample->eip); add_event_entry(val);
add_event_entry(sample->event);
if (code == IBS_FETCH_CODE)
count = IBS_FETCH_CODE_SIZE; /*IBS FETCH is 2 int64s*/
else
count = IBS_OP_CODE_SIZE; /*IBS OP is 5 int64s*/
for (i = 0; i < count; i++) {
sample = cpu_buffer_read_entry(cpu);
if (!sample)
goto Error;
add_event_entry(sample->eip);
add_event_entry(sample->event);
}
return;
Error:
return;
} }
#endif static inline void add_sample_entry(unsigned long offset, unsigned long event)
static void add_sample_entry(unsigned long offset, unsigned long event)
{ {
add_event_entry(offset); add_event_entry(offset);
add_event_entry(event); add_event_entry(event);
} }
static int add_us_sample(struct mm_struct *mm, struct op_sample *s) /*
* Add a sample to the global event buffer. If possible the
* sample is converted into a persistent dentry/offset pair
* for later lookup from userspace. Return 0 on failure.
*/
static int
add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
{ {
unsigned long cookie; unsigned long cookie;
off_t offset; off_t offset;
if (in_kernel) {
add_sample_entry(s->eip, s->event);
return 1;
}
/* add userspace sample */
if (!mm) {
atomic_inc(&oprofile_stats.sample_lost_no_mm);
return 0;
}
cookie = lookup_dcookie(mm, s->eip, &offset); cookie = lookup_dcookie(mm, s->eip, &offset);
if (cookie == INVALID_COOKIE) { if (cookie == INVALID_COOKIE) {
...@@ -415,25 +401,6 @@ static int add_us_sample(struct mm_struct *mm, struct op_sample *s) ...@@ -415,25 +401,6 @@ static int add_us_sample(struct mm_struct *mm, struct op_sample *s)
} }
/* Add a sample to the global event buffer. If possible the
* sample is converted into a persistent dentry/offset pair
* for later lookup from userspace.
*/
static int
add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
{
if (in_kernel) {
add_sample_entry(s->eip, s->event);
return 1;
} else if (mm) {
return add_us_sample(mm, s);
} else {
atomic_inc(&oprofile_stats.sample_lost_no_mm);
}
return 0;
}
static void release_mm(struct mm_struct *mm) static void release_mm(struct mm_struct *mm)
{ {
if (!mm) if (!mm)
...@@ -526,68 +493,71 @@ void sync_buffer(int cpu) ...@@ -526,68 +493,71 @@ void sync_buffer(int cpu)
{ {
struct mm_struct *mm = NULL; struct mm_struct *mm = NULL;
struct mm_struct *oldmm; struct mm_struct *oldmm;
unsigned long val;
struct task_struct *new; struct task_struct *new;
unsigned long cookie = 0; unsigned long cookie = 0;
int in_kernel = 1; int in_kernel = 1;
sync_buffer_state state = sb_buffer_start; sync_buffer_state state = sb_buffer_start;
unsigned int i; unsigned int i;
unsigned long available; unsigned long available;
unsigned long flags;
struct op_entry entry;
struct op_sample *sample;
mutex_lock(&buffer_mutex); mutex_lock(&buffer_mutex);
add_cpu_switch(cpu); add_cpu_switch(cpu);
cpu_buffer_reset(cpu); op_cpu_buffer_reset(cpu);
available = cpu_buffer_entries(cpu); available = op_cpu_buffer_entries(cpu);
for (i = 0; i < available; ++i) { for (i = 0; i < available; ++i) {
struct op_sample *s = cpu_buffer_read_entry(cpu); sample = op_cpu_buffer_read_entry(&entry, cpu);
if (!s) if (!sample)
break; break;
if (is_code(s->eip)) { if (is_code(sample->eip)) {
switch (s->event) { flags = sample->event;
case 0: if (flags & TRACE_BEGIN) {
case CPU_IS_KERNEL: state = sb_bt_start;
add_trace_begin();
}
if (flags & KERNEL_CTX_SWITCH) {
/* kernel/userspace switch */ /* kernel/userspace switch */
in_kernel = s->event; in_kernel = flags & IS_KERNEL;
if (state == sb_buffer_start) if (state == sb_buffer_start)
state = sb_sample_start; state = sb_sample_start;
add_kernel_ctx_switch(s->event); add_kernel_ctx_switch(flags & IS_KERNEL);
break; }
case CPU_TRACE_BEGIN: if (flags & USER_CTX_SWITCH
state = sb_bt_start; && op_cpu_buffer_get_data(&entry, &val)) {
add_trace_begin();
break;
#ifdef CONFIG_OPROFILE_IBS
case IBS_FETCH_BEGIN:
state = sb_bt_start;
add_ibs_begin(cpu, IBS_FETCH_CODE, mm);
break;
case IBS_OP_BEGIN:
state = sb_bt_start;
add_ibs_begin(cpu, IBS_OP_CODE, mm);
break;
#endif
default:
/* userspace context switch */ /* userspace context switch */
new = (struct task_struct *)val;
oldmm = mm; oldmm = mm;
new = (struct task_struct *)s->event;
release_mm(oldmm); release_mm(oldmm);
mm = take_tasks_mm(new); mm = take_tasks_mm(new);
if (mm != oldmm) if (mm != oldmm)
cookie = get_exec_dcookie(mm); cookie = get_exec_dcookie(mm);
add_user_ctx_switch(new, cookie); add_user_ctx_switch(new, cookie);
break;
} }
} else if (state >= sb_bt_start && if (op_cpu_buffer_get_size(&entry))
!add_sample(mm, s, in_kernel)) { add_data(&entry, mm);
continue;
}
if (state < sb_bt_start)
/* ignore sample */
continue;
if (add_sample(mm, sample, in_kernel))
continue;
/* ignore backtraces if failed to add a sample */
if (state == sb_bt_start) { if (state == sb_bt_start) {
state = sb_bt_ignore; state = sb_bt_ignore;
atomic_inc(&oprofile_stats.bt_lost_no_mapping); atomic_inc(&oprofile_stats.bt_lost_no_mapping);
} }
} }
}
release_mm(mm); release_mm(mm);
mark_done(cpu); mark_done(cpu);
......
This diff is collapsed.
/** /**
* @file cpu_buffer.h * @file cpu_buffer.h
* *
* @remark Copyright 2002 OProfile authors * @remark Copyright 2002-2009 OProfile authors
* @remark Read the file COPYING * @remark Read the file COPYING
* *
* @author John Levon <levon@movementarian.org> * @author John Levon <levon@movementarian.org>
* @author Robert Richter <robert.richter@amd.com>
*/ */
#ifndef OPROFILE_CPU_BUFFER_H #ifndef OPROFILE_CPU_BUFFER_H
...@@ -31,17 +32,12 @@ void end_cpu_work(void); ...@@ -31,17 +32,12 @@ void end_cpu_work(void);
struct op_sample { struct op_sample {
unsigned long eip; unsigned long eip;
unsigned long event; unsigned long event;
unsigned long data[0];
}; };
struct op_entry { struct op_entry;
struct ring_buffer_event *event;
struct op_sample *sample;
unsigned long irq_flags;
};
struct oprofile_cpu_buffer { struct oprofile_cpu_buffer {
volatile unsigned long head_pos;
volatile unsigned long tail_pos;
unsigned long buffer_size; unsigned long buffer_size;
struct task_struct *last_task; struct task_struct *last_task;
int last_is_kernel; int last_is_kernel;
...@@ -54,8 +50,6 @@ struct oprofile_cpu_buffer { ...@@ -54,8 +50,6 @@ struct oprofile_cpu_buffer {
struct delayed_work work; struct delayed_work work;
}; };
extern struct ring_buffer *op_ring_buffer_read;
extern struct ring_buffer *op_ring_buffer_write;
DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
/* /*
...@@ -64,7 +58,7 @@ DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); ...@@ -64,7 +58,7 @@ DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
* reset these to invalid values; the next sample collected will * reset these to invalid values; the next sample collected will
* populate the buffer with proper values to initialize the buffer * populate the buffer with proper values to initialize the buffer
*/ */
static inline void cpu_buffer_reset(int cpu) static inline void op_cpu_buffer_reset(int cpu)
{ {
struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu); struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu);
...@@ -72,55 +66,48 @@ static inline void cpu_buffer_reset(int cpu) ...@@ -72,55 +66,48 @@ static inline void cpu_buffer_reset(int cpu)
cpu_buf->last_task = NULL; cpu_buf->last_task = NULL;
} }
static inline int cpu_buffer_write_entry(struct op_entry *entry) struct op_sample
{ *op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size);
entry->event = ring_buffer_lock_reserve(op_ring_buffer_write, int op_cpu_buffer_write_commit(struct op_entry *entry);
sizeof(struct op_sample), struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu);
&entry->irq_flags); unsigned long op_cpu_buffer_entries(int cpu);
if (entry->event)
entry->sample = ring_buffer_event_data(entry->event);
else
entry->sample = NULL;
if (!entry->sample)
return -ENOMEM;
return 0;
}
static inline int cpu_buffer_write_commit(struct op_entry *entry) /* returns the remaining free size of data in the entry */
static inline
int op_cpu_buffer_add_data(struct op_entry *entry, unsigned long val)
{ {
return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event, if (!entry->size)
entry->irq_flags); return 0;
*entry->data = val;
entry->size--;
entry->data++;
return entry->size;
} }
static inline struct op_sample *cpu_buffer_read_entry(int cpu) /* returns the size of data in the entry */
static inline
int op_cpu_buffer_get_size(struct op_entry *entry)
{ {
struct ring_buffer_event *e; return entry->size;
e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
if (e)
return ring_buffer_event_data(e);
if (ring_buffer_swap_cpu(op_ring_buffer_read,
op_ring_buffer_write,
cpu))
return NULL;
e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
if (e)
return ring_buffer_event_data(e);
return NULL;
} }
/* "acquire" as many cpu buffer slots as we can */ /* returns 0 if empty or the size of data including the current value */
static inline unsigned long cpu_buffer_entries(int cpu) static inline
int op_cpu_buffer_get_data(struct op_entry *entry, unsigned long *val)
{ {
return ring_buffer_entries_cpu(op_ring_buffer_read, cpu) int size = entry->size;
+ ring_buffer_entries_cpu(op_ring_buffer_write, cpu); if (!size)
return 0;
*val = *entry->data;
entry->size--;
entry->data++;
return size;
} }
/* transient events for the CPU buffer -> event buffer */ /* extra data flags */
#define CPU_IS_KERNEL 1 #define KERNEL_CTX_SWITCH (1UL << 0)
#define CPU_TRACE_BEGIN 2 #define IS_KERNEL (1UL << 1)
#define IBS_FETCH_BEGIN 3 #define TRACE_BEGIN (1UL << 2)
#define IBS_OP_BEGIN 4 #define USER_CTX_SWITCH (1UL << 3)
#endif /* OPROFILE_CPU_BUFFER_H */ #endif /* OPROFILE_CPU_BUFFER_H */
...@@ -73,8 +73,8 @@ int alloc_event_buffer(void) ...@@ -73,8 +73,8 @@ int alloc_event_buffer(void)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&oprofilefs_lock, flags); spin_lock_irqsave(&oprofilefs_lock, flags);
buffer_size = fs_buffer_size; buffer_size = oprofile_buffer_size;
buffer_watershed = fs_buffer_watershed; buffer_watershed = oprofile_buffer_watershed;
spin_unlock_irqrestore(&oprofilefs_lock, flags); spin_unlock_irqrestore(&oprofilefs_lock, flags);
if (buffer_watershed >= buffer_size) if (buffer_watershed >= buffer_size)
......
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
struct oprofile_operations oprofile_ops; struct oprofile_operations oprofile_ops;
unsigned long oprofile_started; unsigned long oprofile_started;
unsigned long backtrace_depth; unsigned long oprofile_backtrace_depth;
static unsigned long is_setup; static unsigned long is_setup;
static DEFINE_MUTEX(start_mutex); static DEFINE_MUTEX(start_mutex);
...@@ -172,7 +172,7 @@ int oprofile_set_backtrace(unsigned long val) ...@@ -172,7 +172,7 @@ int oprofile_set_backtrace(unsigned long val)
goto out; goto out;
} }
backtrace_depth = val; oprofile_backtrace_depth = val;
out: out:
mutex_unlock(&start_mutex); mutex_unlock(&start_mutex);
......
...@@ -21,12 +21,12 @@ void oprofile_stop(void); ...@@ -21,12 +21,12 @@ void oprofile_stop(void);
struct oprofile_operations; struct oprofile_operations;
extern unsigned long fs_buffer_size; extern unsigned long oprofile_buffer_size;
extern unsigned long fs_cpu_buffer_size; extern unsigned long oprofile_cpu_buffer_size;
extern unsigned long fs_buffer_watershed; extern unsigned long oprofile_buffer_watershed;
extern struct oprofile_operations oprofile_ops; extern struct oprofile_operations oprofile_ops;
extern unsigned long oprofile_started; extern unsigned long oprofile_started;
extern unsigned long backtrace_depth; extern unsigned long oprofile_backtrace_depth;
struct super_block; struct super_block;
struct dentry; struct dentry;
......
...@@ -14,17 +14,18 @@ ...@@ -14,17 +14,18 @@
#include "oprofile_stats.h" #include "oprofile_stats.h"
#include "oprof.h" #include "oprof.h"
#define FS_BUFFER_SIZE_DEFAULT 131072 #define BUFFER_SIZE_DEFAULT 131072
#define FS_CPU_BUFFER_SIZE_DEFAULT 8192 #define CPU_BUFFER_SIZE_DEFAULT 8192
#define FS_BUFFER_WATERSHED_DEFAULT 32768 /* FIXME: tune */ #define BUFFER_WATERSHED_DEFAULT 32768 /* FIXME: tune */
unsigned long fs_buffer_size; unsigned long oprofile_buffer_size;
unsigned long fs_cpu_buffer_size; unsigned long oprofile_cpu_buffer_size;
unsigned long fs_buffer_watershed; unsigned long oprofile_buffer_watershed;
static ssize_t depth_read(struct file *file, char __user *buf, size_t count, loff_t *offset) static ssize_t depth_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
{ {
return oprofilefs_ulong_to_user(backtrace_depth, buf, count, offset); return oprofilefs_ulong_to_user(oprofile_backtrace_depth, buf, count,
offset);
} }
...@@ -125,16 +126,16 @@ static const struct file_operations dump_fops = { ...@@ -125,16 +126,16 @@ static const struct file_operations dump_fops = {
void oprofile_create_files(struct super_block *sb, struct dentry *root) void oprofile_create_files(struct super_block *sb, struct dentry *root)
{ {
/* reinitialize default values */ /* reinitialize default values */
fs_buffer_size = FS_BUFFER_SIZE_DEFAULT; oprofile_buffer_size = BUFFER_SIZE_DEFAULT;
fs_cpu_buffer_size = FS_CPU_BUFFER_SIZE_DEFAULT; oprofile_cpu_buffer_size = CPU_BUFFER_SIZE_DEFAULT;
fs_buffer_watershed = FS_BUFFER_WATERSHED_DEFAULT; oprofile_buffer_watershed = BUFFER_WATERSHED_DEFAULT;
oprofilefs_create_file(sb, root, "enable", &enable_fops); oprofilefs_create_file(sb, root, "enable", &enable_fops);
oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666); oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666);
oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops); oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops);
oprofilefs_create_ulong(sb, root, "buffer_size", &fs_buffer_size); oprofilefs_create_ulong(sb, root, "buffer_size", &oprofile_buffer_size);
oprofilefs_create_ulong(sb, root, "buffer_watershed", &fs_buffer_watershed); oprofilefs_create_ulong(sb, root, "buffer_watershed", &oprofile_buffer_watershed);
oprofilefs_create_ulong(sb, root, "cpu_buffer_size", &fs_cpu_buffer_size); oprofilefs_create_ulong(sb, root, "cpu_buffer_size", &oprofile_cpu_buffer_size);
oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops); oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops);
oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops); oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops);
oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops); oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops);
......
...@@ -164,4 +164,22 @@ void oprofile_put_buff(unsigned long *buf, unsigned int start, ...@@ -164,4 +164,22 @@ void oprofile_put_buff(unsigned long *buf, unsigned int start,
unsigned long oprofile_get_cpu_buffer_size(void); unsigned long oprofile_get_cpu_buffer_size(void);
void oprofile_cpu_buffer_inc_smpl_lost(void); void oprofile_cpu_buffer_inc_smpl_lost(void);
/* cpu buffer functions */
struct op_sample;
struct op_entry {
struct ring_buffer_event *event;
struct op_sample *sample;
unsigned long irq_flags;
unsigned long size;
unsigned long *data;
};
void oprofile_write_reserve(struct op_entry *entry,
struct pt_regs * const regs,
unsigned long pc, int code, int size);
int oprofile_add_data(struct op_entry *entry, unsigned long val);
int oprofile_write_commit(struct op_entry *entry);
#endif /* OPROFILE_H */ #endif /* OPROFILE_H */
...@@ -168,7 +168,13 @@ rb_event_length(struct ring_buffer_event *event) ...@@ -168,7 +168,13 @@ rb_event_length(struct ring_buffer_event *event)
*/ */
unsigned ring_buffer_event_length(struct ring_buffer_event *event) unsigned ring_buffer_event_length(struct ring_buffer_event *event)
{ {
return rb_event_length(event); unsigned length = rb_event_length(event);
if (event->type != RINGBUF_TYPE_DATA)
return length;
length -= RB_EVNT_HDR_SIZE;
if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
length -= sizeof(event->array[0]);
return length;
} }
EXPORT_SYMBOL_GPL(ring_buffer_event_length); EXPORT_SYMBOL_GPL(ring_buffer_event_length);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment