Commit 2a6c24af authored by Steven Rostedt (Red Hat)'s avatar Steven Rostedt (Red Hat) Committed by Steven Rostedt

tracing: Fix race between deleting buffer and setting events

While analyzing the code, I discovered that there's a potential race between
deleting a trace instance and setting events. There are a few races that can
occur if events are being traced as the buffer is being deleted. Mostly the
problem comes with freeing the descriptor used by the trace event callback.
To prevent problems like this, the events are disabled before the buffer is
deleted. The problem with the current solution is that the event_mutex is let
go between disabling the events and freeing the files, which means that the events
could be enabled again while the freeing takes place.

Cc: stable@vger.kernel.org # 3.10
Signed-off-by: default avatarSteven Rostedt <rostedt@goodmis.org>
parent 8e2e2fa4
...@@ -441,14 +441,14 @@ static int tracing_release_generic_file(struct inode *inode, struct file *filp) ...@@ -441,14 +441,14 @@ static int tracing_release_generic_file(struct inode *inode, struct file *filp)
/* /*
* __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events. * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
*/ */
static int __ftrace_set_clr_event(struct trace_array *tr, const char *match, static int
const char *sub, const char *event, int set) __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
const char *sub, const char *event, int set)
{ {
struct ftrace_event_file *file; struct ftrace_event_file *file;
struct ftrace_event_call *call; struct ftrace_event_call *call;
int ret = -EINVAL; int ret = -EINVAL;
mutex_lock(&event_mutex);
list_for_each_entry(file, &tr->events, list) { list_for_each_entry(file, &tr->events, list) {
call = file->event_call; call = file->event_call;
...@@ -474,6 +474,17 @@ static int __ftrace_set_clr_event(struct trace_array *tr, const char *match, ...@@ -474,6 +474,17 @@ static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
ret = 0; ret = 0;
} }
return ret;
}
static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
const char *sub, const char *event, int set)
{
int ret;
mutex_lock(&event_mutex);
ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set);
mutex_unlock(&event_mutex); mutex_unlock(&event_mutex);
return ret; return ret;
...@@ -2408,11 +2419,11 @@ early_event_add_tracer(struct dentry *parent, struct trace_array *tr) ...@@ -2408,11 +2419,11 @@ early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
int event_trace_del_tracer(struct trace_array *tr) int event_trace_del_tracer(struct trace_array *tr)
{ {
/* Disable any running events */
__ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
mutex_lock(&event_mutex); mutex_lock(&event_mutex);
/* Disable any running events */
__ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
down_write(&trace_event_sem); down_write(&trace_event_sem);
__trace_remove_event_dirs(tr); __trace_remove_event_dirs(tr);
debugfs_remove_recursive(tr->event_dir); debugfs_remove_recursive(tr->event_dir);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment