Commit 910e94dd authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'tip/perf/core' of git://github.com/rostedt/linux into perf/core

parents 177e2163 d696b58c
...@@ -42,7 +42,7 @@ void arch_jump_label_transform(struct jump_entry *entry, ...@@ -42,7 +42,7 @@ void arch_jump_label_transform(struct jump_entry *entry,
put_online_cpus(); put_online_cpus();
} }
void arch_jump_label_text_poke_early(jump_label_t addr) void __init_or_module arch_jump_label_text_poke_early(jump_label_t addr)
{ {
text_poke_early((void *)addr, ideal_nops[NOP_ATOMIC5], text_poke_early((void *)addr, ideal_nops[NOP_ATOMIC5],
JUMP_LABEL_NOP_SIZE); JUMP_LABEL_NOP_SIZE);
......
...@@ -580,9 +580,6 @@ int unregister_module_notifier(struct notifier_block * nb); ...@@ -580,9 +580,6 @@ int unregister_module_notifier(struct notifier_block * nb);
extern void print_modules(void); extern void print_modules(void);
extern void module_update_tracepoints(void);
extern int module_get_iter_tracepoints(struct tracepoint_iter *iter);
#else /* !CONFIG_MODULES... */ #else /* !CONFIG_MODULES... */
#define EXPORT_SYMBOL(sym) #define EXPORT_SYMBOL(sym)
#define EXPORT_SYMBOL_GPL(sym) #define EXPORT_SYMBOL_GPL(sym)
...@@ -698,15 +695,6 @@ static inline int unregister_module_notifier(struct notifier_block * nb) ...@@ -698,15 +695,6 @@ static inline int unregister_module_notifier(struct notifier_block * nb)
static inline void print_modules(void) static inline void print_modules(void)
{ {
} }
static inline void module_update_tracepoints(void)
{
}
static inline int module_get_iter_tracepoints(struct tracepoint_iter *iter)
{
return 0;
}
#endif /* CONFIG_MODULES */ #endif /* CONFIG_MODULES */
#ifdef CONFIG_SYSFS #ifdef CONFIG_SYSFS
......
...@@ -154,6 +154,8 @@ void ring_buffer_record_enable(struct ring_buffer *buffer); ...@@ -154,6 +154,8 @@ void ring_buffer_record_enable(struct ring_buffer *buffer);
void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu); void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu); void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu);
unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu);
unsigned long ring_buffer_entries(struct ring_buffer *buffer); unsigned long ring_buffer_entries(struct ring_buffer *buffer);
unsigned long ring_buffer_overruns(struct ring_buffer *buffer); unsigned long ring_buffer_overruns(struct ring_buffer *buffer);
unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu); unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu);
......
...@@ -15,5 +15,6 @@ ...@@ -15,5 +15,6 @@
extern u64 notrace trace_clock_local(void); extern u64 notrace trace_clock_local(void);
extern u64 notrace trace_clock(void); extern u64 notrace trace_clock(void);
extern u64 notrace trace_clock_global(void); extern u64 notrace trace_clock_global(void);
extern u64 notrace trace_clock_counter(void);
#endif /* _LINUX_TRACE_CLOCK_H */ #endif /* _LINUX_TRACE_CLOCK_H */
...@@ -54,8 +54,18 @@ extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe, ...@@ -54,8 +54,18 @@ extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe,
void *data); void *data);
extern void tracepoint_probe_update_all(void); extern void tracepoint_probe_update_all(void);
#ifdef CONFIG_MODULES
struct tp_module {
struct list_head list;
unsigned int num_tracepoints;
struct tracepoint * const *tracepoints_ptrs;
};
#endif /* CONFIG_MODULES */
struct tracepoint_iter { struct tracepoint_iter {
struct module *module; #ifdef CONFIG_MODULES
struct tp_module *module;
#endif /* CONFIG_MODULES */
struct tracepoint * const *tracepoint; struct tracepoint * const *tracepoint;
}; };
...@@ -63,8 +73,6 @@ extern void tracepoint_iter_start(struct tracepoint_iter *iter); ...@@ -63,8 +73,6 @@ extern void tracepoint_iter_start(struct tracepoint_iter *iter);
extern void tracepoint_iter_next(struct tracepoint_iter *iter); extern void tracepoint_iter_next(struct tracepoint_iter *iter);
extern void tracepoint_iter_stop(struct tracepoint_iter *iter); extern void tracepoint_iter_stop(struct tracepoint_iter *iter);
extern void tracepoint_iter_reset(struct tracepoint_iter *iter); extern void tracepoint_iter_reset(struct tracepoint_iter *iter);
extern int tracepoint_get_iter_range(struct tracepoint * const **tracepoint,
struct tracepoint * const *begin, struct tracepoint * const *end);
/* /*
* tracepoint_synchronize_unregister must be called between the last tracepoint * tracepoint_synchronize_unregister must be called between the last tracepoint
...@@ -78,17 +86,6 @@ static inline void tracepoint_synchronize_unregister(void) ...@@ -78,17 +86,6 @@ static inline void tracepoint_synchronize_unregister(void)
#define PARAMS(args...) args #define PARAMS(args...) args
#ifdef CONFIG_TRACEPOINTS
extern
void tracepoint_update_probe_range(struct tracepoint * const *begin,
struct tracepoint * const *end);
#else
static inline
void tracepoint_update_probe_range(struct tracepoint * const *begin,
struct tracepoint * const *end)
{ }
#endif /* CONFIG_TRACEPOINTS */
#endif /* _LINUX_TRACEPOINT_H */ #endif /* _LINUX_TRACEPOINT_H */
/* /*
......
...@@ -3487,50 +3487,3 @@ void module_layout(struct module *mod, ...@@ -3487,50 +3487,3 @@ void module_layout(struct module *mod,
} }
EXPORT_SYMBOL(module_layout); EXPORT_SYMBOL(module_layout);
#endif #endif
#ifdef CONFIG_TRACEPOINTS
void module_update_tracepoints(void)
{
struct module *mod;
mutex_lock(&module_mutex);
list_for_each_entry(mod, &modules, list)
if (!mod->taints)
tracepoint_update_probe_range(mod->tracepoints_ptrs,
mod->tracepoints_ptrs + mod->num_tracepoints);
mutex_unlock(&module_mutex);
}
/*
* Returns 0 if current not found.
* Returns 1 if current found.
*/
int module_get_iter_tracepoints(struct tracepoint_iter *iter)
{
struct module *iter_mod;
int found = 0;
mutex_lock(&module_mutex);
list_for_each_entry(iter_mod, &modules, list) {
if (!iter_mod->taints) {
/*
* Sorted module list
*/
if (iter_mod < iter->module)
continue;
else if (iter_mod > iter->module)
iter->tracepoint = NULL;
found = tracepoint_get_iter_range(&iter->tracepoint,
iter_mod->tracepoints_ptrs,
iter_mod->tracepoints_ptrs
+ iter_mod->num_tracepoints);
if (found) {
iter->module = iter_mod;
break;
}
}
}
mutex_unlock(&module_mutex);
return found;
}
#endif
...@@ -15,6 +15,8 @@ ifdef CONFIG_TRACING_BRANCHES ...@@ -15,6 +15,8 @@ ifdef CONFIG_TRACING_BRANCHES
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
endif endif
CFLAGS_trace_events_filter.o := -I$(src)
# #
# Make the trace clocks available generally: it's infrastructure # Make the trace clocks available generally: it's infrastructure
# relied on by ptrace for example: # relied on by ptrace for example:
......
...@@ -3862,6 +3862,14 @@ void ftrace_kill(void) ...@@ -3862,6 +3862,14 @@ void ftrace_kill(void)
clear_ftrace_function(); clear_ftrace_function();
} }
/**
* Test if ftrace is dead or not.
*/
int ftrace_is_dead(void)
{
return ftrace_disabled;
}
/** /**
* register_ftrace_function - register a function for profiling * register_ftrace_function - register a function for profiling
* @ops - ops structure that holds the function for profiling. * @ops - ops structure that holds the function for profiling.
......
...@@ -488,12 +488,14 @@ struct ring_buffer_per_cpu { ...@@ -488,12 +488,14 @@ struct ring_buffer_per_cpu {
struct buffer_page *reader_page; struct buffer_page *reader_page;
unsigned long lost_events; unsigned long lost_events;
unsigned long last_overrun; unsigned long last_overrun;
local_t entries_bytes;
local_t commit_overrun; local_t commit_overrun;
local_t overrun; local_t overrun;
local_t entries; local_t entries;
local_t committing; local_t committing;
local_t commits; local_t commits;
unsigned long read; unsigned long read;
unsigned long read_bytes;
u64 write_stamp; u64 write_stamp;
u64 read_stamp; u64 read_stamp;
}; };
...@@ -1708,6 +1710,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -1708,6 +1710,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
* the counters. * the counters.
*/ */
local_add(entries, &cpu_buffer->overrun); local_add(entries, &cpu_buffer->overrun);
local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
/* /*
* The entries will be zeroed out when we move the * The entries will be zeroed out when we move the
...@@ -1863,6 +1866,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -1863,6 +1866,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
event = __rb_page_index(tail_page, tail); event = __rb_page_index(tail_page, tail);
kmemcheck_annotate_bitfield(event, bitfield); kmemcheck_annotate_bitfield(event, bitfield);
/* account for padding bytes */
local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
/* /*
* Save the original length to the meta data. * Save the original length to the meta data.
* This will be used by the reader to add lost event * This will be used by the reader to add lost event
...@@ -2054,6 +2060,9 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -2054,6 +2060,9 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
if (!tail) if (!tail)
tail_page->page->time_stamp = ts; tail_page->page->time_stamp = ts;
/* account for these added bytes */
local_add(length, &cpu_buffer->entries_bytes);
return event; return event;
} }
...@@ -2076,6 +2085,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -2076,6 +2085,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) { if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
unsigned long write_mask = unsigned long write_mask =
local_read(&bpage->write) & ~RB_WRITE_MASK; local_read(&bpage->write) & ~RB_WRITE_MASK;
unsigned long event_length = rb_event_length(event);
/* /*
* This is on the tail page. It is possible that * This is on the tail page. It is possible that
* a write could come in and move the tail page * a write could come in and move the tail page
...@@ -2085,9 +2095,12 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -2085,9 +2095,12 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
old_index += write_mask; old_index += write_mask;
new_index += write_mask; new_index += write_mask;
index = local_cmpxchg(&bpage->write, old_index, new_index); index = local_cmpxchg(&bpage->write, old_index, new_index);
if (index == old_index) if (index == old_index) {
/* update counters */
local_sub(event_length, &cpu_buffer->entries_bytes);
return 1; return 1;
} }
}
/* could not discard */ /* could not discard */
return 0; return 0;
...@@ -2660,6 +2673,58 @@ rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer) ...@@ -2660,6 +2673,58 @@ rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
(local_read(&cpu_buffer->overrun) + cpu_buffer->read); (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
} }
/**
* ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
* @buffer: The ring buffer
* @cpu: The per CPU buffer to read from.
*/
unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
{
unsigned long flags;
struct ring_buffer_per_cpu *cpu_buffer;
struct buffer_page *bpage;
unsigned long ret;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return 0;
cpu_buffer = buffer->buffers[cpu];
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
/*
* if the tail is on reader_page, oldest time stamp is on the reader
* page
*/
if (cpu_buffer->tail_page == cpu_buffer->reader_page)
bpage = cpu_buffer->reader_page;
else
bpage = rb_set_head_page(cpu_buffer);
ret = bpage->page->time_stamp;
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
/**
* ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
* @buffer: The ring buffer
* @cpu: The per CPU buffer to read from.
*/
unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long ret;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return 0;
cpu_buffer = buffer->buffers[cpu];
ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
return ret;
}
EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
/** /**
* ring_buffer_entries_cpu - get the number of entries in a cpu buffer * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
* @buffer: The ring buffer * @buffer: The ring buffer
...@@ -3527,11 +3592,13 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) ...@@ -3527,11 +3592,13 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer->reader_page->read = 0; cpu_buffer->reader_page->read = 0;
local_set(&cpu_buffer->commit_overrun, 0); local_set(&cpu_buffer->commit_overrun, 0);
local_set(&cpu_buffer->entries_bytes, 0);
local_set(&cpu_buffer->overrun, 0); local_set(&cpu_buffer->overrun, 0);
local_set(&cpu_buffer->entries, 0); local_set(&cpu_buffer->entries, 0);
local_set(&cpu_buffer->committing, 0); local_set(&cpu_buffer->committing, 0);
local_set(&cpu_buffer->commits, 0); local_set(&cpu_buffer->commits, 0);
cpu_buffer->read = 0; cpu_buffer->read = 0;
cpu_buffer->read_bytes = 0;
cpu_buffer->write_stamp = 0; cpu_buffer->write_stamp = 0;
cpu_buffer->read_stamp = 0; cpu_buffer->read_stamp = 0;
...@@ -3918,6 +3985,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer, ...@@ -3918,6 +3985,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
} else { } else {
/* update the entry counter */ /* update the entry counter */
cpu_buffer->read += rb_page_entries(reader); cpu_buffer->read += rb_page_entries(reader);
cpu_buffer->read_bytes += BUF_PAGE_SIZE;
/* swap the pages */ /* swap the pages */
rb_init_page(bpage); rb_init_page(bpage);
......
...@@ -435,6 +435,7 @@ static struct { ...@@ -435,6 +435,7 @@ static struct {
} trace_clocks[] = { } trace_clocks[] = {
{ trace_clock_local, "local" }, { trace_clock_local, "local" },
{ trace_clock_global, "global" }, { trace_clock_global, "global" },
{ trace_clock_counter, "counter" },
}; };
int trace_clock_id; int trace_clock_id;
...@@ -2159,6 +2160,14 @@ void trace_default_header(struct seq_file *m) ...@@ -2159,6 +2160,14 @@ void trace_default_header(struct seq_file *m)
} }
} }
static void test_ftrace_alive(struct seq_file *m)
{
if (!ftrace_is_dead())
return;
seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n");
}
static int s_show(struct seq_file *m, void *v) static int s_show(struct seq_file *m, void *v)
{ {
struct trace_iterator *iter = v; struct trace_iterator *iter = v;
...@@ -2168,6 +2177,7 @@ static int s_show(struct seq_file *m, void *v) ...@@ -2168,6 +2177,7 @@ static int s_show(struct seq_file *m, void *v)
if (iter->tr) { if (iter->tr) {
seq_printf(m, "# tracer: %s\n", iter->trace->name); seq_printf(m, "# tracer: %s\n", iter->trace->name);
seq_puts(m, "#\n"); seq_puts(m, "#\n");
test_ftrace_alive(m);
} }
if (iter->trace && iter->trace->print_header) if (iter->trace && iter->trace->print_header)
iter->trace->print_header(m); iter->trace->print_header(m);
...@@ -3568,6 +3578,30 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, ...@@ -3568,6 +3578,30 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
return cnt; return cnt;
} }
static ssize_t
tracing_total_entries_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct trace_array *tr = filp->private_data;
char buf[64];
int r, cpu;
unsigned long size = 0, expanded_size = 0;
mutex_lock(&trace_types_lock);
for_each_tracing_cpu(cpu) {
size += tr->entries >> 10;
if (!ring_buffer_expanded)
expanded_size += trace_buf_size >> 10;
}
if (ring_buffer_expanded)
r = sprintf(buf, "%lu\n", size);
else
r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
mutex_unlock(&trace_types_lock);
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}
static ssize_t static ssize_t
tracing_free_buffer_write(struct file *filp, const char __user *ubuf, tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos) size_t cnt, loff_t *ppos)
...@@ -3594,22 +3628,24 @@ tracing_free_buffer_release(struct inode *inode, struct file *filp) ...@@ -3594,22 +3628,24 @@ tracing_free_buffer_release(struct inode *inode, struct file *filp)
return 0; return 0;
} }
static int mark_printk(const char *fmt, ...)
{
int ret;
va_list args;
va_start(args, fmt);
ret = trace_vprintk(0, fmt, args);
va_end(args);
return ret;
}
static ssize_t static ssize_t
tracing_mark_write(struct file *filp, const char __user *ubuf, tracing_mark_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *fpos) size_t cnt, loff_t *fpos)
{ {
char *buf; unsigned long addr = (unsigned long)ubuf;
size_t written; struct ring_buffer_event *event;
struct ring_buffer *buffer;
struct print_entry *entry;
unsigned long irq_flags;
struct page *pages[2];
int nr_pages = 1;
ssize_t written;
void *page1;
void *page2;
int offset;
int size;
int len;
int ret;
if (tracing_disabled) if (tracing_disabled)
return -EINVAL; return -EINVAL;
...@@ -3617,28 +3653,81 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, ...@@ -3617,28 +3653,81 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
if (cnt > TRACE_BUF_SIZE) if (cnt > TRACE_BUF_SIZE)
cnt = TRACE_BUF_SIZE; cnt = TRACE_BUF_SIZE;
buf = kmalloc(cnt + 2, GFP_KERNEL); /*
if (buf == NULL) * Userspace is injecting traces into the kernel trace buffer.
return -ENOMEM; * We want to be as non intrusive as possible.
* To do so, we do not want to allocate any special buffers
* or take any locks, but instead write the userspace data
* straight into the ring buffer.
*
* First we need to pin the userspace buffer into memory,
* which, most likely it is, because it just referenced it.
* But there's no guarantee that it is. By using get_user_pages_fast()
* and kmap_atomic/kunmap_atomic() we can get access to the
* pages directly. We then write the data directly into the
* ring buffer.
*/
BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
if (copy_from_user(buf, ubuf, cnt)) { /* check if we cross pages */
kfree(buf); if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
return -EFAULT; nr_pages = 2;
offset = addr & (PAGE_SIZE - 1);
addr &= PAGE_MASK;
ret = get_user_pages_fast(addr, nr_pages, 0, pages);
if (ret < nr_pages) {
while (--ret >= 0)
put_page(pages[ret]);
written = -EFAULT;
goto out;
} }
if (buf[cnt-1] != '\n') {
buf[cnt] = '\n'; page1 = kmap_atomic(pages[0]);
buf[cnt+1] = '\0'; if (nr_pages == 2)
page2 = kmap_atomic(pages[1]);
local_save_flags(irq_flags);
size = sizeof(*entry) + cnt + 2; /* possible \n added */
buffer = global_trace.buffer;
event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
irq_flags, preempt_count());
if (!event) {
/* Ring buffer disabled, return as if not open for write */
written = -EBADF;
goto out_unlock;
}
entry = ring_buffer_event_data(event);
entry->ip = _THIS_IP_;
if (nr_pages == 2) {
len = PAGE_SIZE - offset;
memcpy(&entry->buf, page1 + offset, len);
memcpy(&entry->buf[len], page2, cnt - len);
} else } else
buf[cnt] = '\0'; memcpy(&entry->buf, page1 + offset, cnt);
written = mark_printk("%s", buf); if (entry->buf[cnt - 1] != '\n') {
kfree(buf); entry->buf[cnt] = '\n';
*fpos += written; entry->buf[cnt + 1] = '\0';
} else
entry->buf[cnt] = '\0';
ring_buffer_unlock_commit(buffer, event);
/* don't tell userspace we wrote more - it might confuse them */
if (written > cnt)
written = cnt; written = cnt;
*fpos += written;
out_unlock:
if (nr_pages == 2)
kunmap_atomic(page2);
kunmap_atomic(page1);
while (nr_pages > 0)
put_page(pages[--nr_pages]);
out:
return written; return written;
} }
...@@ -3739,6 +3828,12 @@ static const struct file_operations tracing_entries_fops = { ...@@ -3739,6 +3828,12 @@ static const struct file_operations tracing_entries_fops = {
.llseek = generic_file_llseek, .llseek = generic_file_llseek,
}; };
static const struct file_operations tracing_total_entries_fops = {
.open = tracing_open_generic,
.read = tracing_total_entries_read,
.llseek = generic_file_llseek,
};
static const struct file_operations tracing_free_buffer_fops = { static const struct file_operations tracing_free_buffer_fops = {
.write = tracing_free_buffer_write, .write = tracing_free_buffer_write,
.release = tracing_free_buffer_release, .release = tracing_free_buffer_release,
...@@ -4026,6 +4121,8 @@ tracing_stats_read(struct file *filp, char __user *ubuf, ...@@ -4026,6 +4121,8 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
struct trace_array *tr = &global_trace; struct trace_array *tr = &global_trace;
struct trace_seq *s; struct trace_seq *s;
unsigned long cnt; unsigned long cnt;
unsigned long long t;
unsigned long usec_rem;
s = kmalloc(sizeof(*s), GFP_KERNEL); s = kmalloc(sizeof(*s), GFP_KERNEL);
if (!s) if (!s)
...@@ -4042,6 +4139,17 @@ tracing_stats_read(struct file *filp, char __user *ubuf, ...@@ -4042,6 +4139,17 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu); cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu);
trace_seq_printf(s, "commit overrun: %ld\n", cnt); trace_seq_printf(s, "commit overrun: %ld\n", cnt);
cnt = ring_buffer_bytes_cpu(tr->buffer, cpu);
trace_seq_printf(s, "bytes: %ld\n", cnt);
t = ns2usecs(ring_buffer_oldest_event_ts(tr->buffer, cpu));
usec_rem = do_div(t, USEC_PER_SEC);
trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", t, usec_rem);
t = ns2usecs(ring_buffer_time_stamp(tr->buffer, cpu));
usec_rem = do_div(t, USEC_PER_SEC);
trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
kfree(s); kfree(s);
...@@ -4450,6 +4558,9 @@ static __init int tracer_init_debugfs(void) ...@@ -4450,6 +4558,9 @@ static __init int tracer_init_debugfs(void)
trace_create_file("buffer_size_kb", 0644, d_tracer, trace_create_file("buffer_size_kb", 0644, d_tracer,
&global_trace, &tracing_entries_fops); &global_trace, &tracing_entries_fops);
trace_create_file("buffer_total_size_kb", 0444, d_tracer,
&global_trace, &tracing_total_entries_fops);
trace_create_file("free_buffer", 0644, d_tracer, trace_create_file("free_buffer", 0644, d_tracer,
&global_trace, &tracing_free_buffer_fops); &global_trace, &tracing_free_buffer_fops);
...@@ -4566,6 +4677,12 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) ...@@ -4566,6 +4677,12 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
tracing_off(); tracing_off();
/* Did function tracer already get disabled? */
if (ftrace_is_dead()) {
printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
printk("# MAY BE MISSING FUNCTION EVENTS\n");
}
if (disable_tracing) if (disable_tracing)
ftrace_kill(); ftrace_kill();
......
...@@ -579,11 +579,13 @@ static inline int ftrace_trace_task(struct task_struct *task) ...@@ -579,11 +579,13 @@ static inline int ftrace_trace_task(struct task_struct *task)
return test_tsk_trace_trace(task); return test_tsk_trace_trace(task);
} }
extern int ftrace_is_dead(void);
#else #else
static inline int ftrace_trace_task(struct task_struct *task) static inline int ftrace_trace_task(struct task_struct *task)
{ {
return 1; return 1;
} }
static inline int ftrace_is_dead(void) { return 0; }
#endif #endif
/* /*
...@@ -761,16 +763,10 @@ struct filter_pred { ...@@ -761,16 +763,10 @@ struct filter_pred {
filter_pred_fn_t fn; filter_pred_fn_t fn;
u64 val; u64 val;
struct regex regex; struct regex regex;
/*
* Leaf nodes use field_name, ops is used by AND and OR
* nodes. The field_name is always freed when freeing a pred.
* We can overload field_name for ops and have it freed
* as well.
*/
union {
char *field_name;
unsigned short *ops; unsigned short *ops;
}; #ifdef CONFIG_FTRACE_STARTUP_TEST
struct ftrace_event_field *field;
#endif
int offset; int offset;
int not; int not;
int op; int op;
......
...@@ -113,3 +113,15 @@ u64 notrace trace_clock_global(void) ...@@ -113,3 +113,15 @@ u64 notrace trace_clock_global(void)
return now; return now;
} }
static atomic64_t trace_counter;
/*
* trace_clock_counter(): simply an atomic counter.
* Use the trace_counter "counter" for cases where you do not care
* about timings, but are interested in strict ordering.
*/
u64 notrace trace_clock_counter(void)
{
return atomic64_add_return(1, &trace_counter);
}
This diff is collapsed.
#undef TRACE_SYSTEM
#define TRACE_SYSTEM test
#if !defined(_TRACE_TEST_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_TEST_H
#include <linux/tracepoint.h>
TRACE_EVENT(ftrace_test_filter,
TP_PROTO(int a, int b, int c, int d, int e, int f, int g, int h),
TP_ARGS(a, b, c, d, e, f, g, h),
TP_STRUCT__entry(
__field(int, a)
__field(int, b)
__field(int, c)
__field(int, d)
__field(int, e)
__field(int, f)
__field(int, g)
__field(int, h)
),
TP_fast_assign(
__entry->a = a;
__entry->b = b;
__entry->c = c;
__entry->d = d;
__entry->e = e;
__entry->f = f;
__entry->g = g;
__entry->h = h;
),
TP_printk("a %d, b %d, c %d, d %d, e %d, f %d, g %d, h %d",
__entry->a, __entry->b, __entry->c, __entry->d,
__entry->e, __entry->f, __entry->g, __entry->h)
);
#endif /* _TRACE_TEST_H || TRACE_HEADER_MULTI_READ */
#undef TRACE_INCLUDE_PATH
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE trace_events_filter_test
/* This part must be outside protection */
#include <trace/define_trace.h>
...@@ -505,13 +505,13 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller); ...@@ -505,13 +505,13 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller);
#ifdef CONFIG_PREEMPT_TRACER #ifdef CONFIG_PREEMPT_TRACER
void trace_preempt_on(unsigned long a0, unsigned long a1) void trace_preempt_on(unsigned long a0, unsigned long a1)
{ {
if (preempt_trace()) if (preempt_trace() && !irq_trace())
stop_critical_timing(a0, a1); stop_critical_timing(a0, a1);
} }
void trace_preempt_off(unsigned long a0, unsigned long a1) void trace_preempt_off(unsigned long a0, unsigned long a1)
{ {
if (preempt_trace()) if (preempt_trace() && !irq_trace())
start_critical_timing(a0, a1); start_critical_timing(a0, a1);
} }
#endif /* CONFIG_PREEMPT_TRACER */ #endif /* CONFIG_PREEMPT_TRACER */
......
...@@ -59,18 +59,19 @@ void hold_module_trace_bprintk_format(const char **start, const char **end) ...@@ -59,18 +59,19 @@ void hold_module_trace_bprintk_format(const char **start, const char **end)
continue; continue;
} }
fmt = NULL;
tb_fmt = kmalloc(sizeof(*tb_fmt), GFP_KERNEL); tb_fmt = kmalloc(sizeof(*tb_fmt), GFP_KERNEL);
if (tb_fmt) if (tb_fmt) {
fmt = kmalloc(strlen(*iter) + 1, GFP_KERNEL); fmt = kmalloc(strlen(*iter) + 1, GFP_KERNEL);
if (tb_fmt && fmt) { if (fmt) {
list_add_tail(&tb_fmt->list, &trace_bprintk_fmt_list); list_add_tail(&tb_fmt->list, &trace_bprintk_fmt_list);
strcpy(fmt, *iter); strcpy(fmt, *iter);
tb_fmt->fmt = fmt; tb_fmt->fmt = fmt;
*iter = tb_fmt->fmt; } else
} else {
kfree(tb_fmt); kfree(tb_fmt);
*iter = NULL;
} }
*iter = fmt;
} }
mutex_unlock(&btrace_mutex); mutex_unlock(&btrace_mutex);
} }
......
...@@ -34,11 +34,16 @@ extern struct tracepoint * const __stop___tracepoints_ptrs[]; ...@@ -34,11 +34,16 @@ extern struct tracepoint * const __stop___tracepoints_ptrs[];
static const int tracepoint_debug; static const int tracepoint_debug;
/* /*
* tracepoints_mutex nests inside module_mutex. Tracepoints mutex protects the * Tracepoints mutex protects the builtin and module tracepoints and the hash
* builtin and module tracepoints and the hash table. * table, as well as the local module list.
*/ */
static DEFINE_MUTEX(tracepoints_mutex); static DEFINE_MUTEX(tracepoints_mutex);
#ifdef CONFIG_MODULES
/* Local list of struct module */
static LIST_HEAD(tracepoint_module_list);
#endif /* CONFIG_MODULES */
/* /*
* Tracepoint hash table, containing the active tracepoints. * Tracepoint hash table, containing the active tracepoints.
* Protected by tracepoints_mutex. * Protected by tracepoints_mutex.
...@@ -292,8 +297,9 @@ static void disable_tracepoint(struct tracepoint *elem) ...@@ -292,8 +297,9 @@ static void disable_tracepoint(struct tracepoint *elem)
* @end: end of the range * @end: end of the range
* *
* Updates the probe callback corresponding to a range of tracepoints. * Updates the probe callback corresponding to a range of tracepoints.
* Called with tracepoints_mutex held.
*/ */
void tracepoint_update_probe_range(struct tracepoint * const *begin, static void tracepoint_update_probe_range(struct tracepoint * const *begin,
struct tracepoint * const *end) struct tracepoint * const *end)
{ {
struct tracepoint * const *iter; struct tracepoint * const *iter;
...@@ -302,7 +308,6 @@ void tracepoint_update_probe_range(struct tracepoint * const *begin, ...@@ -302,7 +308,6 @@ void tracepoint_update_probe_range(struct tracepoint * const *begin,
if (!begin) if (!begin)
return; return;
mutex_lock(&tracepoints_mutex);
for (iter = begin; iter < end; iter++) { for (iter = begin; iter < end; iter++) {
mark_entry = get_tracepoint((*iter)->name); mark_entry = get_tracepoint((*iter)->name);
if (mark_entry) { if (mark_entry) {
...@@ -312,11 +317,27 @@ void tracepoint_update_probe_range(struct tracepoint * const *begin, ...@@ -312,11 +317,27 @@ void tracepoint_update_probe_range(struct tracepoint * const *begin,
disable_tracepoint(*iter); disable_tracepoint(*iter);
} }
} }
mutex_unlock(&tracepoints_mutex);
} }
#ifdef CONFIG_MODULES
void module_update_tracepoints(void)
{
struct tp_module *tp_mod;
list_for_each_entry(tp_mod, &tracepoint_module_list, list)
tracepoint_update_probe_range(tp_mod->tracepoints_ptrs,
tp_mod->tracepoints_ptrs + tp_mod->num_tracepoints);
}
#else /* CONFIG_MODULES */
void module_update_tracepoints(void)
{
}
#endif /* CONFIG_MODULES */
/* /*
* Update probes, removing the faulty probes. * Update probes, removing the faulty probes.
* Called with tracepoints_mutex held.
*/ */
static void tracepoint_update_probes(void) static void tracepoint_update_probes(void)
{ {
...@@ -359,11 +380,12 @@ int tracepoint_probe_register(const char *name, void *probe, void *data) ...@@ -359,11 +380,12 @@ int tracepoint_probe_register(const char *name, void *probe, void *data)
mutex_lock(&tracepoints_mutex); mutex_lock(&tracepoints_mutex);
old = tracepoint_add_probe(name, probe, data); old = tracepoint_add_probe(name, probe, data);
if (IS_ERR(old)) {
mutex_unlock(&tracepoints_mutex); mutex_unlock(&tracepoints_mutex);
if (IS_ERR(old))
return PTR_ERR(old); return PTR_ERR(old);
}
tracepoint_update_probes(); /* may update entry */ tracepoint_update_probes(); /* may update entry */
mutex_unlock(&tracepoints_mutex);
release_probes(old); release_probes(old);
return 0; return 0;
} }
...@@ -402,11 +424,12 @@ int tracepoint_probe_unregister(const char *name, void *probe, void *data) ...@@ -402,11 +424,12 @@ int tracepoint_probe_unregister(const char *name, void *probe, void *data)
mutex_lock(&tracepoints_mutex); mutex_lock(&tracepoints_mutex);
old = tracepoint_remove_probe(name, probe, data); old = tracepoint_remove_probe(name, probe, data);
if (IS_ERR(old)) {
mutex_unlock(&tracepoints_mutex); mutex_unlock(&tracepoints_mutex);
if (IS_ERR(old))
return PTR_ERR(old); return PTR_ERR(old);
}
tracepoint_update_probes(); /* may update entry */ tracepoint_update_probes(); /* may update entry */
mutex_unlock(&tracepoints_mutex);
release_probes(old); release_probes(old);
return 0; return 0;
} }
...@@ -489,9 +512,8 @@ void tracepoint_probe_update_all(void) ...@@ -489,9 +512,8 @@ void tracepoint_probe_update_all(void)
if (!list_empty(&old_probes)) if (!list_empty(&old_probes))
list_replace_init(&old_probes, &release_probes); list_replace_init(&old_probes, &release_probes);
need_update = 0; need_update = 0;
mutex_unlock(&tracepoints_mutex);
tracepoint_update_probes(); tracepoint_update_probes();
mutex_unlock(&tracepoints_mutex);
list_for_each_entry_safe(pos, next, &release_probes, u.list) { list_for_each_entry_safe(pos, next, &release_probes, u.list) {
list_del(&pos->u.list); list_del(&pos->u.list);
call_rcu_sched(&pos->u.rcu, rcu_free_old_probes); call_rcu_sched(&pos->u.rcu, rcu_free_old_probes);
...@@ -509,7 +531,7 @@ EXPORT_SYMBOL_GPL(tracepoint_probe_update_all); ...@@ -509,7 +531,7 @@ EXPORT_SYMBOL_GPL(tracepoint_probe_update_all);
* Will return the first tracepoint in the range if the input tracepoint is * Will return the first tracepoint in the range if the input tracepoint is
* NULL. * NULL.
*/ */
int tracepoint_get_iter_range(struct tracepoint * const **tracepoint, static int tracepoint_get_iter_range(struct tracepoint * const **tracepoint,
struct tracepoint * const *begin, struct tracepoint * const *end) struct tracepoint * const *begin, struct tracepoint * const *end)
{ {
if (!*tracepoint && begin != end) { if (!*tracepoint && begin != end) {
...@@ -520,11 +542,12 @@ int tracepoint_get_iter_range(struct tracepoint * const **tracepoint, ...@@ -520,11 +542,12 @@ int tracepoint_get_iter_range(struct tracepoint * const **tracepoint,
return 1; return 1;
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(tracepoint_get_iter_range);
#ifdef CONFIG_MODULES
static void tracepoint_get_iter(struct tracepoint_iter *iter) static void tracepoint_get_iter(struct tracepoint_iter *iter)
{ {
int found = 0; int found = 0;
struct tp_module *iter_mod;
/* Core kernel tracepoints */ /* Core kernel tracepoints */
if (!iter->module) { if (!iter->module) {
...@@ -534,12 +557,43 @@ static void tracepoint_get_iter(struct tracepoint_iter *iter) ...@@ -534,12 +557,43 @@ static void tracepoint_get_iter(struct tracepoint_iter *iter)
if (found) if (found)
goto end; goto end;
} }
/* tracepoints in modules. */ /* Tracepoints in modules */
found = module_get_iter_tracepoints(iter); mutex_lock(&tracepoints_mutex);
list_for_each_entry(iter_mod, &tracepoint_module_list, list) {
/*
* Sorted module list
*/
if (iter_mod < iter->module)
continue;
else if (iter_mod > iter->module)
iter->tracepoint = NULL;
found = tracepoint_get_iter_range(&iter->tracepoint,
iter_mod->tracepoints_ptrs,
iter_mod->tracepoints_ptrs
+ iter_mod->num_tracepoints);
if (found) {
iter->module = iter_mod;
break;
}
}
mutex_unlock(&tracepoints_mutex);
end: end:
if (!found) if (!found)
tracepoint_iter_reset(iter); tracepoint_iter_reset(iter);
} }
#else /* CONFIG_MODULES */
static void tracepoint_get_iter(struct tracepoint_iter *iter)
{
int found = 0;
/* Core kernel tracepoints */
found = tracepoint_get_iter_range(&iter->tracepoint,
__start___tracepoints_ptrs,
__stop___tracepoints_ptrs);
if (!found)
tracepoint_iter_reset(iter);
}
#endif /* CONFIG_MODULES */
void tracepoint_iter_start(struct tracepoint_iter *iter) void tracepoint_iter_start(struct tracepoint_iter *iter)
{ {
...@@ -566,26 +620,98 @@ EXPORT_SYMBOL_GPL(tracepoint_iter_stop); ...@@ -566,26 +620,98 @@ EXPORT_SYMBOL_GPL(tracepoint_iter_stop);
void tracepoint_iter_reset(struct tracepoint_iter *iter) void tracepoint_iter_reset(struct tracepoint_iter *iter)
{ {
#ifdef CONFIG_MODULES
iter->module = NULL; iter->module = NULL;
#endif /* CONFIG_MODULES */
iter->tracepoint = NULL; iter->tracepoint = NULL;
} }
EXPORT_SYMBOL_GPL(tracepoint_iter_reset); EXPORT_SYMBOL_GPL(tracepoint_iter_reset);
#ifdef CONFIG_MODULES #ifdef CONFIG_MODULES
static int tracepoint_module_coming(struct module *mod)
{
struct tp_module *tp_mod, *iter;
int ret = 0;
/*
* We skip modules that tain the kernel, especially those with different
* module header (for forced load), to make sure we don't cause a crash.
*/
if (mod->taints)
return 0;
mutex_lock(&tracepoints_mutex);
tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
if (!tp_mod) {
ret = -ENOMEM;
goto end;
}
tp_mod->num_tracepoints = mod->num_tracepoints;
tp_mod->tracepoints_ptrs = mod->tracepoints_ptrs;
/*
* tracepoint_module_list is kept sorted by struct module pointer
* address for iteration on tracepoints from a seq_file that can release
* the mutex between calls.
*/
list_for_each_entry_reverse(iter, &tracepoint_module_list, list) {
BUG_ON(iter == tp_mod); /* Should never be in the list twice */
if (iter < tp_mod) {
/* We belong to the location right after iter. */
list_add(&tp_mod->list, &iter->list);
goto module_added;
}
}
/* We belong to the beginning of the list */
list_add(&tp_mod->list, &tracepoint_module_list);
module_added:
tracepoint_update_probe_range(mod->tracepoints_ptrs,
mod->tracepoints_ptrs + mod->num_tracepoints);
end:
mutex_unlock(&tracepoints_mutex);
return ret;
}
static int tracepoint_module_going(struct module *mod)
{
struct tp_module *pos;
mutex_lock(&tracepoints_mutex);
tracepoint_update_probe_range(mod->tracepoints_ptrs,
mod->tracepoints_ptrs + mod->num_tracepoints);
list_for_each_entry(pos, &tracepoint_module_list, list) {
if (pos->tracepoints_ptrs == mod->tracepoints_ptrs) {
list_del(&pos->list);
kfree(pos);
break;
}
}
/*
* In the case of modules that were tainted at "coming", we'll simply
* walk through the list without finding it. We cannot use the "tainted"
* flag on "going", in case a module taints the kernel only after being
* loaded.
*/
mutex_unlock(&tracepoints_mutex);
return 0;
}
int tracepoint_module_notify(struct notifier_block *self, int tracepoint_module_notify(struct notifier_block *self,
unsigned long val, void *data) unsigned long val, void *data)
{ {
struct module *mod = data; struct module *mod = data;
int ret = 0;
switch (val) { switch (val) {
case MODULE_STATE_COMING: case MODULE_STATE_COMING:
ret = tracepoint_module_coming(mod);
break;
case MODULE_STATE_LIVE:
break;
case MODULE_STATE_GOING: case MODULE_STATE_GOING:
tracepoint_update_probe_range(mod->tracepoints_ptrs, ret = tracepoint_module_going(mod);
mod->tracepoints_ptrs + mod->num_tracepoints);
break; break;
} }
return 0; return ret;
} }
struct notifier_block tracepoint_module_nb = { struct notifier_block tracepoint_module_nb = {
...@@ -598,7 +724,6 @@ static int init_tracepoints(void) ...@@ -598,7 +724,6 @@ static int init_tracepoints(void)
return register_module_notifier(&tracepoint_module_nb); return register_module_notifier(&tracepoint_module_nb);
} }
__initcall(init_tracepoints); __initcall(init_tracepoints);
#endif /* CONFIG_MODULES */ #endif /* CONFIG_MODULES */
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment