Commit d2852b93 authored by Robert Richter's avatar Robert Richter

Merge branch 'oprofile/ring_buffer' into oprofile/oprofile-for-tip

parents 4a6908a3 14f0ca8e
...@@ -6,6 +6,8 @@ config OPROFILE ...@@ -6,6 +6,8 @@ config OPROFILE
tristate "OProfile system profiling (EXPERIMENTAL)" tristate "OProfile system profiling (EXPERIMENTAL)"
depends on PROFILING depends on PROFILING
depends on HAVE_OPROFILE depends on HAVE_OPROFILE
select TRACING
select RING_BUFFER
help help
OProfile is a profiling system capable of profiling the OProfile is a profiling system capable of profiling the
whole system, include the kernel, kernel modules, libraries, whole system, include the kernel, kernel modules, libraries,
......
This diff is collapsed.
/** /**
* @file buffer_sync.c * @file buffer_sync.c
* *
* @remark Copyright 2002 OProfile authors * @remark Copyright 2002-2009 OProfile authors
* @remark Read the file COPYING * @remark Read the file COPYING
* *
* @author John Levon <levon@movementarian.org> * @author John Levon <levon@movementarian.org>
* @author Barry Kasindorf * @author Barry Kasindorf
* @author Robert Richter <robert.richter@amd.com>
* *
* This is the core of the buffer management. Each * This is the core of the buffer management. Each
* CPU buffer is processed and entered into the * CPU buffer is processed and entered into the
...@@ -268,18 +269,6 @@ lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset) ...@@ -268,18 +269,6 @@ lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
return cookie; return cookie;
} }
static void increment_tail(struct oprofile_cpu_buffer *b)
{
unsigned long new_tail = b->tail_pos + 1;
rmb(); /* be sure fifo pointers are synchromized */
if (new_tail < b->buffer_size)
b->tail_pos = new_tail;
else
b->tail_pos = 0;
}
static unsigned long last_cookie = INVALID_COOKIE; static unsigned long last_cookie = INVALID_COOKIE;
static void add_cpu_switch(int i) static void add_cpu_switch(int i)
...@@ -327,84 +316,73 @@ static void add_trace_begin(void) ...@@ -327,84 +316,73 @@ static void add_trace_begin(void)
add_event_entry(TRACE_BEGIN_CODE); add_event_entry(TRACE_BEGIN_CODE);
} }
#ifdef CONFIG_OPROFILE_IBS static void add_data(struct op_entry *entry, struct mm_struct *mm)
#define IBS_FETCH_CODE_SIZE 2
#define IBS_OP_CODE_SIZE 5
#define IBS_EIP(offset) \
(((struct op_sample *)&cpu_buf->buffer[(offset)])->eip)
#define IBS_EVENT(offset) \
(((struct op_sample *)&cpu_buf->buffer[(offset)])->event)
/*
* Add IBS fetch and op entries to event buffer
*/
static void add_ibs_begin(struct oprofile_cpu_buffer *cpu_buf, int code,
struct mm_struct *mm)
{ {
unsigned long rip; unsigned long code, pc, val;
int i, count; unsigned long cookie;
unsigned long ibs_cookie = 0;
off_t offset; off_t offset;
increment_tail(cpu_buf); /* move to RIP entry */ if (!op_cpu_buffer_get_data(entry, &code))
return;
rip = IBS_EIP(cpu_buf->tail_pos); if (!op_cpu_buffer_get_data(entry, &pc))
return;
#ifdef __LP64__ if (!op_cpu_buffer_get_size(entry))
rip += IBS_EVENT(cpu_buf->tail_pos) << 32; return;
#endif
if (mm) { if (mm) {
ibs_cookie = lookup_dcookie(mm, rip, &offset); cookie = lookup_dcookie(mm, pc, &offset);
if (ibs_cookie == NO_COOKIE) if (cookie == NO_COOKIE)
offset = rip; offset = pc;
if (ibs_cookie == INVALID_COOKIE) { if (cookie == INVALID_COOKIE) {
atomic_inc(&oprofile_stats.sample_lost_no_mapping); atomic_inc(&oprofile_stats.sample_lost_no_mapping);
offset = rip; offset = pc;
} }
if (ibs_cookie != last_cookie) { if (cookie != last_cookie) {
add_cookie_switch(ibs_cookie); add_cookie_switch(cookie);
last_cookie = ibs_cookie; last_cookie = cookie;
} }
} else } else
offset = rip; offset = pc;
add_event_entry(ESCAPE_CODE); add_event_entry(ESCAPE_CODE);
add_event_entry(code); add_event_entry(code);
add_event_entry(offset); /* Offset from Dcookie */ add_event_entry(offset); /* Offset from Dcookie */
/* we send the Dcookie offset, but send the raw Linear Add also*/ while (op_cpu_buffer_get_data(entry, &val))
add_event_entry(IBS_EIP(cpu_buf->tail_pos)); add_event_entry(val);
add_event_entry(IBS_EVENT(cpu_buf->tail_pos));
if (code == IBS_FETCH_CODE)
count = IBS_FETCH_CODE_SIZE; /*IBS FETCH is 2 int64s*/
else
count = IBS_OP_CODE_SIZE; /*IBS OP is 5 int64s*/
for (i = 0; i < count; i++) {
increment_tail(cpu_buf);
add_event_entry(IBS_EIP(cpu_buf->tail_pos));
add_event_entry(IBS_EVENT(cpu_buf->tail_pos));
}
} }
#endif static inline void add_sample_entry(unsigned long offset, unsigned long event)
static void add_sample_entry(unsigned long offset, unsigned long event)
{ {
add_event_entry(offset); add_event_entry(offset);
add_event_entry(event); add_event_entry(event);
} }
static int add_us_sample(struct mm_struct *mm, struct op_sample *s) /*
* Add a sample to the global event buffer. If possible the
* sample is converted into a persistent dentry/offset pair
* for later lookup from userspace. Return 0 on failure.
*/
static int
add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
{ {
unsigned long cookie; unsigned long cookie;
off_t offset; off_t offset;
if (in_kernel) {
add_sample_entry(s->eip, s->event);
return 1;
}
/* add userspace sample */
if (!mm) {
atomic_inc(&oprofile_stats.sample_lost_no_mm);
return 0;
}
cookie = lookup_dcookie(mm, s->eip, &offset); cookie = lookup_dcookie(mm, s->eip, &offset);
if (cookie == INVALID_COOKIE) { if (cookie == INVALID_COOKIE) {
...@@ -423,25 +401,6 @@ static int add_us_sample(struct mm_struct *mm, struct op_sample *s) ...@@ -423,25 +401,6 @@ static int add_us_sample(struct mm_struct *mm, struct op_sample *s)
} }
/* Add a sample to the global event buffer. If possible the
* sample is converted into a persistent dentry/offset pair
* for later lookup from userspace.
*/
static int
add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
{
if (in_kernel) {
add_sample_entry(s->eip, s->event);
return 1;
} else if (mm) {
return add_us_sample(mm, s);
} else {
atomic_inc(&oprofile_stats.sample_lost_no_mm);
}
return 0;
}
static void release_mm(struct mm_struct *mm) static void release_mm(struct mm_struct *mm)
{ {
if (!mm) if (!mm)
...@@ -466,33 +425,6 @@ static inline int is_code(unsigned long val) ...@@ -466,33 +425,6 @@ static inline int is_code(unsigned long val)
} }
/* "acquire" as many cpu buffer slots as we can */
static unsigned long get_slots(struct oprofile_cpu_buffer *b)
{
unsigned long head = b->head_pos;
unsigned long tail = b->tail_pos;
/*
* Subtle. This resets the persistent last_task
* and in_kernel values used for switching notes.
* BUT, there is a small window between reading
* head_pos, and this call, that means samples
* can appear at the new head position, but not
* be prefixed with the notes for switching
* kernel mode or a task switch. This small hole
* can lead to mis-attribution or samples where
* we don't know if it's in the kernel or not,
* at the start of an event buffer.
*/
cpu_buffer_reset(b);
if (head >= tail)
return head - tail;
return head + (b->buffer_size - tail);
}
/* Move tasks along towards death. Any tasks on dead_tasks /* Move tasks along towards death. Any tasks on dead_tasks
* will definitely have no remaining references in any * will definitely have no remaining references in any
* CPU buffers at this point, because we use two lists, * CPU buffers at this point, because we use two lists,
...@@ -559,72 +491,73 @@ typedef enum { ...@@ -559,72 +491,73 @@ typedef enum {
*/ */
void sync_buffer(int cpu) void sync_buffer(int cpu)
{ {
struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu);
struct mm_struct *mm = NULL; struct mm_struct *mm = NULL;
struct mm_struct *oldmm;
unsigned long val;
struct task_struct *new; struct task_struct *new;
unsigned long cookie = 0; unsigned long cookie = 0;
int in_kernel = 1; int in_kernel = 1;
sync_buffer_state state = sb_buffer_start; sync_buffer_state state = sb_buffer_start;
#ifndef CONFIG_OPROFILE_IBS
unsigned int i; unsigned int i;
unsigned long available; unsigned long available;
#endif unsigned long flags;
struct op_entry entry;
struct op_sample *sample;
mutex_lock(&buffer_mutex); mutex_lock(&buffer_mutex);
add_cpu_switch(cpu); add_cpu_switch(cpu);
/* Remember, only we can modify tail_pos */ op_cpu_buffer_reset(cpu);
available = op_cpu_buffer_entries(cpu);
#ifndef CONFIG_OPROFILE_IBS
available = get_slots(cpu_buf);
for (i = 0; i < available; ++i) { for (i = 0; i < available; ++i) {
#else sample = op_cpu_buffer_read_entry(&entry, cpu);
while (get_slots(cpu_buf)) { if (!sample)
#endif break;
struct op_sample *s = &cpu_buf->buffer[cpu_buf->tail_pos];
if (is_code(s->eip)) { if (is_code(sample->eip)) {
if (s->event <= CPU_IS_KERNEL) { flags = sample->event;
if (flags & TRACE_BEGIN) {
state = sb_bt_start;
add_trace_begin();
}
if (flags & KERNEL_CTX_SWITCH) {
/* kernel/userspace switch */ /* kernel/userspace switch */
in_kernel = s->event; in_kernel = flags & IS_KERNEL;
if (state == sb_buffer_start) if (state == sb_buffer_start)
state = sb_sample_start; state = sb_sample_start;
add_kernel_ctx_switch(s->event); add_kernel_ctx_switch(flags & IS_KERNEL);
} else if (s->event == CPU_TRACE_BEGIN) { }
state = sb_bt_start; if (flags & USER_CTX_SWITCH
add_trace_begin(); && op_cpu_buffer_get_data(&entry, &val)) {
#ifdef CONFIG_OPROFILE_IBS
} else if (s->event == IBS_FETCH_BEGIN) {
state = sb_bt_start;
add_ibs_begin(cpu_buf, IBS_FETCH_CODE, mm);
} else if (s->event == IBS_OP_BEGIN) {
state = sb_bt_start;
add_ibs_begin(cpu_buf, IBS_OP_CODE, mm);
#endif
} else {
struct mm_struct *oldmm = mm;
/* userspace context switch */ /* userspace context switch */
new = (struct task_struct *)s->event; new = (struct task_struct *)val;
oldmm = mm;
release_mm(oldmm); release_mm(oldmm);
mm = take_tasks_mm(new); mm = take_tasks_mm(new);
if (mm != oldmm) if (mm != oldmm)
cookie = get_exec_dcookie(mm); cookie = get_exec_dcookie(mm);
add_user_ctx_switch(new, cookie); add_user_ctx_switch(new, cookie);
} }
} else if (state >= sb_bt_start && if (op_cpu_buffer_get_size(&entry))
!add_sample(mm, s, in_kernel)) { add_data(&entry, mm);
continue;
}
if (state < sb_bt_start)
/* ignore sample */
continue;
if (add_sample(mm, sample, in_kernel))
continue;
/* ignore backtraces if failed to add a sample */
if (state == sb_bt_start) { if (state == sb_bt_start) {
state = sb_bt_ignore; state = sb_bt_ignore;
atomic_inc(&oprofile_stats.bt_lost_no_mapping); atomic_inc(&oprofile_stats.bt_lost_no_mapping);
} }
} }
increment_tail(cpu_buf);
}
release_mm(mm); release_mm(mm);
mark_done(cpu); mark_done(cpu);
......
This diff is collapsed.
/** /**
* @file cpu_buffer.h * @file cpu_buffer.h
* *
* @remark Copyright 2002 OProfile authors * @remark Copyright 2002-2009 OProfile authors
* @remark Read the file COPYING * @remark Read the file COPYING
* *
* @author John Levon <levon@movementarian.org> * @author John Levon <levon@movementarian.org>
* @author Robert Richter <robert.richter@amd.com>
*/ */
#ifndef OPROFILE_CPU_BUFFER_H #ifndef OPROFILE_CPU_BUFFER_H
...@@ -15,6 +16,7 @@ ...@@ -15,6 +16,7 @@
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/ring_buffer.h>
struct task_struct; struct task_struct;
...@@ -30,16 +32,16 @@ void end_cpu_work(void); ...@@ -30,16 +32,16 @@ void end_cpu_work(void);
struct op_sample { struct op_sample {
unsigned long eip; unsigned long eip;
unsigned long event; unsigned long event;
unsigned long data[0];
}; };
struct op_entry;
struct oprofile_cpu_buffer { struct oprofile_cpu_buffer {
volatile unsigned long head_pos;
volatile unsigned long tail_pos;
unsigned long buffer_size; unsigned long buffer_size;
struct task_struct *last_task; struct task_struct *last_task;
int last_is_kernel; int last_is_kernel;
int tracing; int tracing;
struct op_sample *buffer;
unsigned long sample_received; unsigned long sample_received;
unsigned long sample_lost_overflow; unsigned long sample_lost_overflow;
unsigned long backtrace_aborted; unsigned long backtrace_aborted;
...@@ -50,12 +52,62 @@ struct oprofile_cpu_buffer { ...@@ -50,12 +52,62 @@ struct oprofile_cpu_buffer {
DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
void cpu_buffer_reset(struct oprofile_cpu_buffer *cpu_buf); /*
* Resets the cpu buffer to a sane state.
*
* reset these to invalid values; the next sample collected will
* populate the buffer with proper values to initialize the buffer
*/
static inline void op_cpu_buffer_reset(int cpu)
{
struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu);
cpu_buf->last_is_kernel = -1;
cpu_buf->last_task = NULL;
}
struct op_sample
*op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size);
int op_cpu_buffer_write_commit(struct op_entry *entry);
struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu);
unsigned long op_cpu_buffer_entries(int cpu);
/* returns the remaining free size of data in the entry */
static inline
int op_cpu_buffer_add_data(struct op_entry *entry, unsigned long val)
{
if (!entry->size)
return 0;
*entry->data = val;
entry->size--;
entry->data++;
return entry->size;
}
/* returns the size of data in the entry */
static inline
int op_cpu_buffer_get_size(struct op_entry *entry)
{
return entry->size;
}
/* returns 0 if empty or the size of data including the current value */
static inline
int op_cpu_buffer_get_data(struct op_entry *entry, unsigned long *val)
{
int size = entry->size;
if (!size)
return 0;
*val = *entry->data;
entry->size--;
entry->data++;
return size;
}
/* transient events for the CPU buffer -> event buffer */ /* extra data flags */
#define CPU_IS_KERNEL 1 #define KERNEL_CTX_SWITCH (1UL << 0)
#define CPU_TRACE_BEGIN 2 #define IS_KERNEL (1UL << 1)
#define IBS_FETCH_BEGIN 3 #define TRACE_BEGIN (1UL << 2)
#define IBS_OP_BEGIN 4 #define USER_CTX_SWITCH (1UL << 3)
#endif /* OPROFILE_CPU_BUFFER_H */ #endif /* OPROFILE_CPU_BUFFER_H */
...@@ -73,8 +73,8 @@ int alloc_event_buffer(void) ...@@ -73,8 +73,8 @@ int alloc_event_buffer(void)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&oprofilefs_lock, flags); spin_lock_irqsave(&oprofilefs_lock, flags);
buffer_size = fs_buffer_size; buffer_size = oprofile_buffer_size;
buffer_watershed = fs_buffer_watershed; buffer_watershed = oprofile_buffer_watershed;
spin_unlock_irqrestore(&oprofilefs_lock, flags); spin_unlock_irqrestore(&oprofilefs_lock, flags);
if (buffer_watershed >= buffer_size) if (buffer_watershed >= buffer_size)
......
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
struct oprofile_operations oprofile_ops; struct oprofile_operations oprofile_ops;
unsigned long oprofile_started; unsigned long oprofile_started;
unsigned long backtrace_depth; unsigned long oprofile_backtrace_depth;
static unsigned long is_setup; static unsigned long is_setup;
static DEFINE_MUTEX(start_mutex); static DEFINE_MUTEX(start_mutex);
...@@ -172,7 +172,7 @@ int oprofile_set_backtrace(unsigned long val) ...@@ -172,7 +172,7 @@ int oprofile_set_backtrace(unsigned long val)
goto out; goto out;
} }
backtrace_depth = val; oprofile_backtrace_depth = val;
out: out:
mutex_unlock(&start_mutex); mutex_unlock(&start_mutex);
......
...@@ -21,12 +21,12 @@ void oprofile_stop(void); ...@@ -21,12 +21,12 @@ void oprofile_stop(void);
struct oprofile_operations; struct oprofile_operations;
extern unsigned long fs_buffer_size; extern unsigned long oprofile_buffer_size;
extern unsigned long fs_cpu_buffer_size; extern unsigned long oprofile_cpu_buffer_size;
extern unsigned long fs_buffer_watershed; extern unsigned long oprofile_buffer_watershed;
extern struct oprofile_operations oprofile_ops; extern struct oprofile_operations oprofile_ops;
extern unsigned long oprofile_started; extern unsigned long oprofile_started;
extern unsigned long backtrace_depth; extern unsigned long oprofile_backtrace_depth;
struct super_block; struct super_block;
struct dentry; struct dentry;
......
...@@ -14,13 +14,18 @@ ...@@ -14,13 +14,18 @@
#include "oprofile_stats.h" #include "oprofile_stats.h"
#include "oprof.h" #include "oprof.h"
unsigned long fs_buffer_size = 131072; #define BUFFER_SIZE_DEFAULT 131072
unsigned long fs_cpu_buffer_size = 8192; #define CPU_BUFFER_SIZE_DEFAULT 8192
unsigned long fs_buffer_watershed = 32768; /* FIXME: tune */ #define BUFFER_WATERSHED_DEFAULT 32768 /* FIXME: tune */
unsigned long oprofile_buffer_size;
unsigned long oprofile_cpu_buffer_size;
unsigned long oprofile_buffer_watershed;
static ssize_t depth_read(struct file *file, char __user *buf, size_t count, loff_t *offset) static ssize_t depth_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
{ {
return oprofilefs_ulong_to_user(backtrace_depth, buf, count, offset); return oprofilefs_ulong_to_user(oprofile_backtrace_depth, buf, count,
offset);
} }
...@@ -120,12 +125,17 @@ static const struct file_operations dump_fops = { ...@@ -120,12 +125,17 @@ static const struct file_operations dump_fops = {
void oprofile_create_files(struct super_block *sb, struct dentry *root) void oprofile_create_files(struct super_block *sb, struct dentry *root)
{ {
/* reinitialize default values */
oprofile_buffer_size = BUFFER_SIZE_DEFAULT;
oprofile_cpu_buffer_size = CPU_BUFFER_SIZE_DEFAULT;
oprofile_buffer_watershed = BUFFER_WATERSHED_DEFAULT;
oprofilefs_create_file(sb, root, "enable", &enable_fops); oprofilefs_create_file(sb, root, "enable", &enable_fops);
oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666); oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666);
oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops); oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops);
oprofilefs_create_ulong(sb, root, "buffer_size", &fs_buffer_size); oprofilefs_create_ulong(sb, root, "buffer_size", &oprofile_buffer_size);
oprofilefs_create_ulong(sb, root, "buffer_watershed", &fs_buffer_watershed); oprofilefs_create_ulong(sb, root, "buffer_watershed", &oprofile_buffer_watershed);
oprofilefs_create_ulong(sb, root, "cpu_buffer_size", &fs_cpu_buffer_size); oprofilefs_create_ulong(sb, root, "cpu_buffer_size", &oprofile_cpu_buffer_size);
oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops); oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops);
oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops); oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops);
oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops); oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops);
......
...@@ -86,8 +86,7 @@ int oprofile_arch_init(struct oprofile_operations * ops); ...@@ -86,8 +86,7 @@ int oprofile_arch_init(struct oprofile_operations * ops);
void oprofile_arch_exit(void); void oprofile_arch_exit(void);
/** /**
* Add a sample. This may be called from any context. Pass * Add a sample. This may be called from any context.
* smp_processor_id() as cpu.
*/ */
void oprofile_add_sample(struct pt_regs * const regs, unsigned long event); void oprofile_add_sample(struct pt_regs * const regs, unsigned long event);
...@@ -165,4 +164,22 @@ void oprofile_put_buff(unsigned long *buf, unsigned int start, ...@@ -165,4 +164,22 @@ void oprofile_put_buff(unsigned long *buf, unsigned int start,
unsigned long oprofile_get_cpu_buffer_size(void); unsigned long oprofile_get_cpu_buffer_size(void);
void oprofile_cpu_buffer_inc_smpl_lost(void); void oprofile_cpu_buffer_inc_smpl_lost(void);
/* cpu buffer functions */
struct op_sample;
struct op_entry {
struct ring_buffer_event *event;
struct op_sample *sample;
unsigned long irq_flags;
unsigned long size;
unsigned long *data;
};
void oprofile_write_reserve(struct op_entry *entry,
struct pt_regs * const regs,
unsigned long pc, int code, int size);
int oprofile_add_data(struct op_entry *entry, unsigned long val);
int oprofile_write_commit(struct op_entry *entry);
#endif /* OPROFILE_H */ #endif /* OPROFILE_H */
...@@ -116,6 +116,8 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu); ...@@ -116,6 +116,8 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
unsigned long ring_buffer_entries(struct ring_buffer *buffer); unsigned long ring_buffer_entries(struct ring_buffer *buffer);
unsigned long ring_buffer_overruns(struct ring_buffer *buffer); unsigned long ring_buffer_overruns(struct ring_buffer *buffer);
unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu);
unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);
u64 ring_buffer_time_stamp(int cpu); u64 ring_buffer_time_stamp(int cpu);
void ring_buffer_normalize_time_stamp(int cpu, u64 *ts); void ring_buffer_normalize_time_stamp(int cpu, u64 *ts);
......
...@@ -31,6 +31,7 @@ void tracing_on(void) ...@@ -31,6 +31,7 @@ void tracing_on(void)
{ {
ring_buffers_off = 0; ring_buffers_off = 0;
} }
EXPORT_SYMBOL_GPL(tracing_on);
/** /**
* tracing_off - turn off all tracing buffers * tracing_off - turn off all tracing buffers
...@@ -44,6 +45,7 @@ void tracing_off(void) ...@@ -44,6 +45,7 @@ void tracing_off(void)
{ {
ring_buffers_off = 1; ring_buffers_off = 1;
} }
EXPORT_SYMBOL_GPL(tracing_off);
/* Up this if you want to test the TIME_EXTENTS and normalization */ /* Up this if you want to test the TIME_EXTENTS and normalization */
#define DEBUG_SHIFT 0 #define DEBUG_SHIFT 0
...@@ -60,12 +62,14 @@ u64 ring_buffer_time_stamp(int cpu) ...@@ -60,12 +62,14 @@ u64 ring_buffer_time_stamp(int cpu)
return time; return time;
} }
EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
void ring_buffer_normalize_time_stamp(int cpu, u64 *ts) void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
{ {
/* Just stupid testing the normalize function and deltas */ /* Just stupid testing the normalize function and deltas */
*ts >>= DEBUG_SHIFT; *ts >>= DEBUG_SHIFT;
} }
EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
#define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event)) #define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
#define RB_ALIGNMENT_SHIFT 2 #define RB_ALIGNMENT_SHIFT 2
...@@ -113,8 +117,15 @@ rb_event_length(struct ring_buffer_event *event) ...@@ -113,8 +117,15 @@ rb_event_length(struct ring_buffer_event *event)
*/ */
unsigned ring_buffer_event_length(struct ring_buffer_event *event) unsigned ring_buffer_event_length(struct ring_buffer_event *event)
{ {
return rb_event_length(event); unsigned length = rb_event_length(event);
if (event->type != RINGBUF_TYPE_DATA)
return length;
length -= RB_EVNT_HDR_SIZE;
if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
length -= sizeof(event->array[0]);
return length;
} }
EXPORT_SYMBOL_GPL(ring_buffer_event_length);
/* inline for ring buffer fast paths */ /* inline for ring buffer fast paths */
static inline void * static inline void *
...@@ -136,6 +147,7 @@ void *ring_buffer_event_data(struct ring_buffer_event *event) ...@@ -136,6 +147,7 @@ void *ring_buffer_event_data(struct ring_buffer_event *event)
{ {
return rb_event_data(event); return rb_event_data(event);
} }
EXPORT_SYMBOL_GPL(ring_buffer_event_data);
#define for_each_buffer_cpu(buffer, cpu) \ #define for_each_buffer_cpu(buffer, cpu) \
for_each_cpu_mask(cpu, buffer->cpumask) for_each_cpu_mask(cpu, buffer->cpumask)
...@@ -381,7 +393,7 @@ extern int ring_buffer_page_too_big(void); ...@@ -381,7 +393,7 @@ extern int ring_buffer_page_too_big(void);
/** /**
* ring_buffer_alloc - allocate a new ring_buffer * ring_buffer_alloc - allocate a new ring_buffer
* @size: the size in bytes that is needed. * @size: the size in bytes per cpu that is needed.
* @flags: attributes to set for the ring buffer. * @flags: attributes to set for the ring buffer.
* *
* Currently the only flag that is available is the RB_FL_OVERWRITE * Currently the only flag that is available is the RB_FL_OVERWRITE
...@@ -444,6 +456,7 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags) ...@@ -444,6 +456,7 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
kfree(buffer); kfree(buffer);
return NULL; return NULL;
} }
EXPORT_SYMBOL_GPL(ring_buffer_alloc);
/** /**
* ring_buffer_free - free a ring buffer. * ring_buffer_free - free a ring buffer.
...@@ -459,6 +472,7 @@ ring_buffer_free(struct ring_buffer *buffer) ...@@ -459,6 +472,7 @@ ring_buffer_free(struct ring_buffer *buffer)
kfree(buffer); kfree(buffer);
} }
EXPORT_SYMBOL_GPL(ring_buffer_free);
static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
...@@ -620,6 +634,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) ...@@ -620,6 +634,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
mutex_unlock(&buffer->mutex); mutex_unlock(&buffer->mutex);
return -ENOMEM; return -ENOMEM;
} }
EXPORT_SYMBOL_GPL(ring_buffer_resize);
static inline int rb_null_event(struct ring_buffer_event *event) static inline int rb_null_event(struct ring_buffer_event *event)
{ {
...@@ -1220,6 +1235,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, ...@@ -1220,6 +1235,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
preempt_enable_notrace(); preempt_enable_notrace();
return NULL; return NULL;
} }
EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
struct ring_buffer_event *event) struct ring_buffer_event *event)
...@@ -1269,6 +1285,7 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer, ...@@ -1269,6 +1285,7 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
/** /**
* ring_buffer_write - write data to the buffer without reserving * ring_buffer_write - write data to the buffer without reserving
...@@ -1334,6 +1351,7 @@ int ring_buffer_write(struct ring_buffer *buffer, ...@@ -1334,6 +1351,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(ring_buffer_write);
static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
{ {
...@@ -1360,6 +1378,7 @@ void ring_buffer_record_disable(struct ring_buffer *buffer) ...@@ -1360,6 +1378,7 @@ void ring_buffer_record_disable(struct ring_buffer *buffer)
{ {
atomic_inc(&buffer->record_disabled); atomic_inc(&buffer->record_disabled);
} }
EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
/** /**
* ring_buffer_record_enable - enable writes to the buffer * ring_buffer_record_enable - enable writes to the buffer
...@@ -1372,6 +1391,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer) ...@@ -1372,6 +1391,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer)
{ {
atomic_dec(&buffer->record_disabled); atomic_dec(&buffer->record_disabled);
} }
EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
/** /**
* ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
...@@ -1393,6 +1413,7 @@ void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu) ...@@ -1393,6 +1413,7 @@ void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
cpu_buffer = buffer->buffers[cpu]; cpu_buffer = buffer->buffers[cpu];
atomic_inc(&cpu_buffer->record_disabled); atomic_inc(&cpu_buffer->record_disabled);
} }
EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
/** /**
* ring_buffer_record_enable_cpu - enable writes to the buffer * ring_buffer_record_enable_cpu - enable writes to the buffer
...@@ -1412,6 +1433,7 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) ...@@ -1412,6 +1433,7 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
cpu_buffer = buffer->buffers[cpu]; cpu_buffer = buffer->buffers[cpu];
atomic_dec(&cpu_buffer->record_disabled); atomic_dec(&cpu_buffer->record_disabled);
} }
EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
/** /**
* ring_buffer_entries_cpu - get the number of entries in a cpu buffer * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
...@@ -1428,6 +1450,7 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu) ...@@ -1428,6 +1450,7 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
cpu_buffer = buffer->buffers[cpu]; cpu_buffer = buffer->buffers[cpu];
return cpu_buffer->entries; return cpu_buffer->entries;
} }
EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
/** /**
* ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
...@@ -1444,6 +1467,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu) ...@@ -1444,6 +1467,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
cpu_buffer = buffer->buffers[cpu]; cpu_buffer = buffer->buffers[cpu];
return cpu_buffer->overrun; return cpu_buffer->overrun;
} }
EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
/** /**
* ring_buffer_entries - get the number of entries in a buffer * ring_buffer_entries - get the number of entries in a buffer
...@@ -1466,6 +1490,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer) ...@@ -1466,6 +1490,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer)
return entries; return entries;
} }
EXPORT_SYMBOL_GPL(ring_buffer_entries);
/** /**
* ring_buffer_overrun_cpu - get the number of overruns in buffer * ring_buffer_overrun_cpu - get the number of overruns in buffer
...@@ -1488,6 +1513,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer) ...@@ -1488,6 +1513,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
return overruns; return overruns;
} }
EXPORT_SYMBOL_GPL(ring_buffer_overruns);
/** /**
* ring_buffer_iter_reset - reset an iterator * ring_buffer_iter_reset - reset an iterator
...@@ -1513,6 +1539,7 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter) ...@@ -1513,6 +1539,7 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
else else
iter->read_stamp = iter->head_page->time_stamp; iter->read_stamp = iter->head_page->time_stamp;
} }
EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
/** /**
* ring_buffer_iter_empty - check if an iterator has no more to read * ring_buffer_iter_empty - check if an iterator has no more to read
...@@ -1527,6 +1554,7 @@ int ring_buffer_iter_empty(struct ring_buffer_iter *iter) ...@@ -1527,6 +1554,7 @@ int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
return iter->head_page == cpu_buffer->commit_page && return iter->head_page == cpu_buffer->commit_page &&
iter->head == rb_commit_index(cpu_buffer); iter->head == rb_commit_index(cpu_buffer);
} }
EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
static void static void
rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
...@@ -1797,6 +1825,7 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) ...@@ -1797,6 +1825,7 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
return NULL; return NULL;
} }
EXPORT_SYMBOL_GPL(ring_buffer_peek);
/** /**
* ring_buffer_iter_peek - peek at the next event to be read * ring_buffer_iter_peek - peek at the next event to be read
...@@ -1867,6 +1896,7 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) ...@@ -1867,6 +1896,7 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
return NULL; return NULL;
} }
EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
/** /**
* ring_buffer_consume - return an event and consume it * ring_buffer_consume - return an event and consume it
...@@ -1894,6 +1924,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) ...@@ -1894,6 +1924,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
return event; return event;
} }
EXPORT_SYMBOL_GPL(ring_buffer_consume);
/** /**
* ring_buffer_read_start - start a non consuming read of the buffer * ring_buffer_read_start - start a non consuming read of the buffer
...@@ -1934,6 +1965,7 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu) ...@@ -1934,6 +1965,7 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
return iter; return iter;
} }
EXPORT_SYMBOL_GPL(ring_buffer_read_start);
/** /**
* ring_buffer_finish - finish reading the iterator of the buffer * ring_buffer_finish - finish reading the iterator of the buffer
...@@ -1950,6 +1982,7 @@ ring_buffer_read_finish(struct ring_buffer_iter *iter) ...@@ -1950,6 +1982,7 @@ ring_buffer_read_finish(struct ring_buffer_iter *iter)
atomic_dec(&cpu_buffer->record_disabled); atomic_dec(&cpu_buffer->record_disabled);
kfree(iter); kfree(iter);
} }
EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
/** /**
* ring_buffer_read - read the next item in the ring buffer by the iterator * ring_buffer_read - read the next item in the ring buffer by the iterator
...@@ -1971,6 +2004,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) ...@@ -1971,6 +2004,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
return event; return event;
} }
EXPORT_SYMBOL_GPL(ring_buffer_read);
/** /**
* ring_buffer_size - return the size of the ring buffer (in bytes) * ring_buffer_size - return the size of the ring buffer (in bytes)
...@@ -1980,6 +2014,7 @@ unsigned long ring_buffer_size(struct ring_buffer *buffer) ...@@ -1980,6 +2014,7 @@ unsigned long ring_buffer_size(struct ring_buffer *buffer)
{ {
return BUF_PAGE_SIZE * buffer->pages; return BUF_PAGE_SIZE * buffer->pages;
} }
EXPORT_SYMBOL_GPL(ring_buffer_size);
static void static void
rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
...@@ -2022,6 +2057,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) ...@@ -2022,6 +2057,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
spin_unlock_irqrestore(&cpu_buffer->lock, flags); spin_unlock_irqrestore(&cpu_buffer->lock, flags);
} }
EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
/** /**
* ring_buffer_reset - reset a ring buffer * ring_buffer_reset - reset a ring buffer
...@@ -2034,6 +2070,7 @@ void ring_buffer_reset(struct ring_buffer *buffer) ...@@ -2034,6 +2070,7 @@ void ring_buffer_reset(struct ring_buffer *buffer)
for_each_buffer_cpu(buffer, cpu) for_each_buffer_cpu(buffer, cpu)
ring_buffer_reset_cpu(buffer, cpu); ring_buffer_reset_cpu(buffer, cpu);
} }
EXPORT_SYMBOL_GPL(ring_buffer_reset);
/** /**
* rind_buffer_empty - is the ring buffer empty? * rind_buffer_empty - is the ring buffer empty?
...@@ -2052,6 +2089,7 @@ int ring_buffer_empty(struct ring_buffer *buffer) ...@@ -2052,6 +2089,7 @@ int ring_buffer_empty(struct ring_buffer *buffer)
} }
return 1; return 1;
} }
EXPORT_SYMBOL_GPL(ring_buffer_empty);
/** /**
* ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
...@@ -2068,6 +2106,7 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) ...@@ -2068,6 +2106,7 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
cpu_buffer = buffer->buffers[cpu]; cpu_buffer = buffer->buffers[cpu];
return rb_per_cpu_empty(cpu_buffer); return rb_per_cpu_empty(cpu_buffer);
} }
EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
/** /**
* ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
...@@ -2117,6 +2156,7 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, ...@@ -2117,6 +2156,7 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
static ssize_t static ssize_t
rb_simple_read(struct file *filp, char __user *ubuf, rb_simple_read(struct file *filp, char __user *ubuf,
......
...@@ -914,7 +914,7 @@ enum trace_file_type { ...@@ -914,7 +914,7 @@ enum trace_file_type {
TRACE_FILE_LAT_FMT = 1, TRACE_FILE_LAT_FMT = 1,
}; };
static void trace_iterator_increment(struct trace_iterator *iter, int cpu) static void trace_iterator_increment(struct trace_iterator *iter)
{ {
/* Don't allow ftrace to trace into the ring buffers */ /* Don't allow ftrace to trace into the ring buffers */
ftrace_disable_cpu(); ftrace_disable_cpu();
...@@ -993,7 +993,7 @@ static void *find_next_entry_inc(struct trace_iterator *iter) ...@@ -993,7 +993,7 @@ static void *find_next_entry_inc(struct trace_iterator *iter)
iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts); iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts);
if (iter->ent) if (iter->ent)
trace_iterator_increment(iter, iter->cpu); trace_iterator_increment(iter);
return iter->ent ? iter : NULL; return iter->ent ? iter : NULL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment