Commit 26b840ae authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'trace-fixes-v3.10' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull tracing/kprobes update from Steven Rostedt:
 "The majority of these changes are from Masami Hiramatsu bringing
  kprobes up to par with the latest changes to ftrace (multi buffering
  and the new function probes).

  He also discovered and fixed some bugs in doing so.  When pulling in
  his patches, I also found a few minor bugs as well and fixed them.

  This also includes a compile fix for some archs that select the ring
  buffer but not tracing.

  I based this off of the last patch you took from me that fixed the
  merge conflict error, as that was the commit that had all the changes
  I needed for this set of changes."

* tag 'trace-fixes-v3.10' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
  tracing/kprobes: Support soft-mode disabling
  tracing/kprobes: Support ftrace_event_file base multibuffer
  tracing/kprobes: Pass trace_probe directly from dispatcher
  tracing/kprobes: Increment probe hit-count even if it is used by perf
  tracing/kprobes: Use bool for retprobe checker
  ftrace: Fix function probe when more than one probe is added
  ftrace: Fix the output of enabled_functions debug file
  ftrace: Fix locking in register_ftrace_function_probe()
  tracing: Add helper function trace_create_new_event() to remove duplicate code
  tracing: Modify soft-mode only if there's no other referrer
  tracing: Indicate enabled soft-mode in enable file
  tracing/kprobes: Fix to increment return event probe hit-count
  ftrace: Cleanup regex_lock and ftrace_lock around hash updating
  ftrace, kprobes: Fix a deadlock on ftrace_regex_lock
  ftrace: Have ftrace_regex_write() return either read or error
  tracing: Return error if register_ftrace_function_probe() fails for event_enable_func()
  tracing: Don't succeed if event_enable_func did not register anything
  ring-buffer: Select IRQ_WORK
parents 607eeb0b b8820084
...@@ -90,6 +90,8 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, ...@@ -90,6 +90,8 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
* not set this, then the ftrace infrastructure will add recursion * not set this, then the ftrace infrastructure will add recursion
* protection for the caller. * protection for the caller.
* STUB - The ftrace_ops is just a place holder. * STUB - The ftrace_ops is just a place holder.
* INITIALIZED - The ftrace_ops has already been initialized (first use time
* register_ftrace_function() is called, it will initialized the ops)
*/ */
enum { enum {
FTRACE_OPS_FL_ENABLED = 1 << 0, FTRACE_OPS_FL_ENABLED = 1 << 0,
...@@ -100,6 +102,7 @@ enum { ...@@ -100,6 +102,7 @@ enum {
FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5, FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5,
FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6, FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6,
FTRACE_OPS_FL_STUB = 1 << 7, FTRACE_OPS_FL_STUB = 1 << 7,
FTRACE_OPS_FL_INITIALIZED = 1 << 8,
}; };
struct ftrace_ops { struct ftrace_ops {
...@@ -110,6 +113,7 @@ struct ftrace_ops { ...@@ -110,6 +113,7 @@ struct ftrace_ops {
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
struct ftrace_hash *notrace_hash; struct ftrace_hash *notrace_hash;
struct ftrace_hash *filter_hash; struct ftrace_hash *filter_hash;
struct mutex regex_lock;
#endif #endif
}; };
......
...@@ -293,6 +293,7 @@ struct ftrace_event_file { ...@@ -293,6 +293,7 @@ struct ftrace_event_file {
* caching and such. Which is mostly OK ;-) * caching and such. Which is mostly OK ;-)
*/ */
unsigned long flags; unsigned long flags;
atomic_t sm_ref; /* soft-mode reference counter */
}; };
#define __TRACE_EVENT_FLAGS(name, value) \ #define __TRACE_EVENT_FLAGS(name, value) \
......
...@@ -71,6 +71,7 @@ config TRACE_CLOCK ...@@ -71,6 +71,7 @@ config TRACE_CLOCK
config RING_BUFFER config RING_BUFFER
bool bool
select TRACE_CLOCK select TRACE_CLOCK
select IRQ_WORK
config FTRACE_NMI_ENTER config FTRACE_NMI_ENTER
bool bool
...@@ -107,7 +108,6 @@ config TRACING ...@@ -107,7 +108,6 @@ config TRACING
select BINARY_PRINTF select BINARY_PRINTF
select EVENT_TRACING select EVENT_TRACING
select TRACE_CLOCK select TRACE_CLOCK
select IRQ_WORK
config GENERIC_TRACER config GENERIC_TRACER
bool bool
......
This diff is collapsed.
...@@ -251,7 +251,8 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file, ...@@ -251,7 +251,8 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
switch (enable) { switch (enable) {
case 0: case 0:
/* /*
* When soft_disable is set and enable is cleared, we want * When soft_disable is set and enable is cleared, the sm_ref
* reference counter is decremented. If it reaches 0, we want
* to clear the SOFT_DISABLED flag but leave the event in the * to clear the SOFT_DISABLED flag but leave the event in the
* state that it was. That is, if the event was enabled and * state that it was. That is, if the event was enabled and
* SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
...@@ -263,6 +264,8 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file, ...@@ -263,6 +264,8 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
* "soft enable"s (clearing the SOFT_DISABLED bit) wont work. * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
*/ */
if (soft_disable) { if (soft_disable) {
if (atomic_dec_return(&file->sm_ref) > 0)
break;
disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED; disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED;
clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags); clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
} else } else
...@@ -291,8 +294,11 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file, ...@@ -291,8 +294,11 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
*/ */
if (!soft_disable) if (!soft_disable)
clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags); clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
else else {
if (atomic_inc_return(&file->sm_ref) > 1)
break;
set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags); set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
}
if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) { if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) {
...@@ -623,6 +629,8 @@ event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, ...@@ -623,6 +629,8 @@ event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
if (file->flags & FTRACE_EVENT_FL_ENABLED) { if (file->flags & FTRACE_EVENT_FL_ENABLED) {
if (file->flags & FTRACE_EVENT_FL_SOFT_DISABLED) if (file->flags & FTRACE_EVENT_FL_SOFT_DISABLED)
buf = "0*\n"; buf = "0*\n";
else if (file->flags & FTRACE_EVENT_FL_SOFT_MODE)
buf = "1*\n";
else else
buf = "1\n"; buf = "1\n";
} else } else
...@@ -1521,6 +1529,24 @@ __register_event(struct ftrace_event_call *call, struct module *mod) ...@@ -1521,6 +1529,24 @@ __register_event(struct ftrace_event_call *call, struct module *mod)
return 0; return 0;
} }
static struct ftrace_event_file *
trace_create_new_event(struct ftrace_event_call *call,
struct trace_array *tr)
{
struct ftrace_event_file *file;
file = kmem_cache_alloc(file_cachep, GFP_TRACE);
if (!file)
return NULL;
file->event_call = call;
file->tr = tr;
atomic_set(&file->sm_ref, 0);
list_add(&file->list, &tr->events);
return file;
}
/* Add an event to a trace directory */ /* Add an event to a trace directory */
static int static int
__trace_add_new_event(struct ftrace_event_call *call, __trace_add_new_event(struct ftrace_event_call *call,
...@@ -1532,14 +1558,10 @@ __trace_add_new_event(struct ftrace_event_call *call, ...@@ -1532,14 +1558,10 @@ __trace_add_new_event(struct ftrace_event_call *call,
{ {
struct ftrace_event_file *file; struct ftrace_event_file *file;
file = kmem_cache_alloc(file_cachep, GFP_TRACE); file = trace_create_new_event(call, tr);
if (!file) if (!file)
return -ENOMEM; return -ENOMEM;
file->event_call = call;
file->tr = tr;
list_add(&file->list, &tr->events);
return event_create_dir(tr->event_dir, file, id, enable, filter, format); return event_create_dir(tr->event_dir, file, id, enable, filter, format);
} }
...@@ -1554,14 +1576,10 @@ __trace_early_add_new_event(struct ftrace_event_call *call, ...@@ -1554,14 +1576,10 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
{ {
struct ftrace_event_file *file; struct ftrace_event_file *file;
file = kmem_cache_alloc(file_cachep, GFP_TRACE); file = trace_create_new_event(call, tr);
if (!file) if (!file)
return -ENOMEM; return -ENOMEM;
file->event_call = call;
file->tr = tr;
list_add(&file->list, &tr->events);
return 0; return 0;
} }
...@@ -2061,8 +2079,18 @@ event_enable_func(struct ftrace_hash *hash, ...@@ -2061,8 +2079,18 @@ event_enable_func(struct ftrace_hash *hash,
if (ret < 0) if (ret < 0)
goto out_put; goto out_put;
ret = register_ftrace_function_probe(glob, ops, data); ret = register_ftrace_function_probe(glob, ops, data);
if (!ret) /*
* The above returns on success the # of functions enabled,
* but if it didn't find any functions it returns zero.
* Consider no functions a failure too.
*/
if (!ret) {
ret = -ENOENT;
goto out_disable;
} else if (ret < 0)
goto out_disable; goto out_disable;
/* Just return zero, not the number of enabled functions */
ret = 0;
out: out:
mutex_unlock(&event_mutex); mutex_unlock(&event_mutex);
return ret; return ret;
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment