Commit 2816c551 authored by Oleg Nesterov's avatar Oleg Nesterov Committed by Steven Rostedt

tracing: trace_remove_event_call() should fail if call/file is in use

Change trace_remove_event_call(call) to return the error if this
call is active. This is what the callers assume but can't verify
outside of the tracing locks. Both trace_kprobe.c/trace_uprobe.c
need the additional changes, unregister_trace_probe() should abort
if trace_remove_event_call() fails.

The caller is going to free this call/file so we must ensure that
nobody can use them after trace_remove_event_call() succeeds.
debugfs should be fine after the previous changes and event_remove()
does TRACE_REG_UNREGISTER, but still there are 2 reasons why we need
the additional checks:

- There could be a perf_event(s) attached to this tp_event, so the
  patch checks ->perf_refcount.

- TRACE_REG_UNREGISTER can be suppressed by FTRACE_EVENT_FL_SOFT_MODE,
  so we simply check FTRACE_EVENT_FL_ENABLED protected by event_mutex.

Link: http://lkml.kernel.org/r/20130729175033.GB26284@redhat.comReviewed-by: default avatarMasami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Signed-off-by: default avatarOleg Nesterov <oleg@redhat.com>
Signed-off-by: default avatarSteven Rostedt <rostedt@goodmis.org>
parent 776164c1
...@@ -332,7 +332,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type, ...@@ -332,7 +332,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
const char *name, int offset, int size, const char *name, int offset, int size,
int is_signed, int filter_type); int is_signed, int filter_type);
extern int trace_add_event_call(struct ftrace_event_call *call); extern int trace_add_event_call(struct ftrace_event_call *call);
extern void trace_remove_event_call(struct ftrace_event_call *call); extern int trace_remove_event_call(struct ftrace_event_call *call);
#define is_signed_type(type) (((type)(-1)) < (type)1) #define is_signed_type(type) (((type)(-1)) < (type)1)
......
...@@ -1713,16 +1713,47 @@ static void __trace_remove_event_call(struct ftrace_event_call *call) ...@@ -1713,16 +1713,47 @@ static void __trace_remove_event_call(struct ftrace_event_call *call)
destroy_preds(call); destroy_preds(call);
} }
static int probe_remove_event_call(struct ftrace_event_call *call)
{
struct trace_array *tr;
struct ftrace_event_file *file;
#ifdef CONFIG_PERF_EVENTS
if (call->perf_refcount)
return -EBUSY;
#endif
do_for_each_event_file(tr, file) {
if (file->event_call != call)
continue;
/*
* We can't rely on ftrace_event_enable_disable(enable => 0)
* we are going to do, FTRACE_EVENT_FL_SOFT_MODE can suppress
* TRACE_REG_UNREGISTER.
*/
if (file->flags & FTRACE_EVENT_FL_ENABLED)
return -EBUSY;
break;
} while_for_each_event_file();
__trace_remove_event_call(call);
return 0;
}
/* Remove an event_call */ /* Remove an event_call */
void trace_remove_event_call(struct ftrace_event_call *call) int trace_remove_event_call(struct ftrace_event_call *call)
{ {
int ret;
mutex_lock(&trace_types_lock); mutex_lock(&trace_types_lock);
mutex_lock(&event_mutex); mutex_lock(&event_mutex);
down_write(&trace_event_sem); down_write(&trace_event_sem);
__trace_remove_event_call(call); ret = probe_remove_event_call(call);
up_write(&trace_event_sem); up_write(&trace_event_sem);
mutex_unlock(&event_mutex); mutex_unlock(&event_mutex);
mutex_unlock(&trace_types_lock); mutex_unlock(&trace_types_lock);
return ret;
} }
#define for_each_event(event, start, end) \ #define for_each_event(event, start, end) \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment