Commit 5be71b61 authored by Frederic Weisbecker's avatar Frederic Weisbecker Committed by Ingo Molnar

tracing/syscalls: protect thread flag toggling from races

Impact: fix syscall tracer enable/disable race

The current thread flag toggling is racy as shown in the following
scenario:

- task A is the last user of syscall tracing, it releases the
  TIF_SYSCALL_FTRACE on each tasks

- at the same time task B start syscall tracing. refcount == 0 so
  it sets up TIF_SYSCALL_FTRACE on each tasks.

The effect of the mixup is unpredictable.
So this fix adds a mutex on {start,stop}_syscall_tracing().
Reported-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reported-by: default avatarIngo Molnar <mingo@elte.hu>
LKML-Reference: <1237151439-6755-3-git-send-email-fweisbec@gmail.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 64044345
...@@ -5,7 +5,11 @@ ...@@ -5,7 +5,11 @@
#include "trace_output.h" #include "trace_output.h"
#include "trace.h" #include "trace.h"
static atomic_t refcount; /* Keep a counter of the syscall tracing users */
static int refcount;
/* Prevent from races on thread flags toggling */
static DEFINE_MUTEX(syscall_trace_lock);
/* Option to display the parameters types */ /* Option to display the parameters types */
enum { enum {
...@@ -96,9 +100,11 @@ void start_ftrace_syscalls(void) ...@@ -96,9 +100,11 @@ void start_ftrace_syscalls(void)
unsigned long flags; unsigned long flags;
struct task_struct *g, *t; struct task_struct *g, *t;
mutex_lock(&syscall_trace_lock);
/* Don't enable the flag on the tasks twice */ /* Don't enable the flag on the tasks twice */
if (atomic_inc_return(&refcount) != 1) if (++refcount != 1)
return; goto unlock;
arch_init_ftrace_syscalls(); arch_init_ftrace_syscalls();
read_lock_irqsave(&tasklist_lock, flags); read_lock_irqsave(&tasklist_lock, flags);
...@@ -108,6 +114,9 @@ void start_ftrace_syscalls(void) ...@@ -108,6 +114,9 @@ void start_ftrace_syscalls(void)
} while_each_thread(g, t); } while_each_thread(g, t);
read_unlock_irqrestore(&tasklist_lock, flags); read_unlock_irqrestore(&tasklist_lock, flags);
unlock:
mutex_unlock(&syscall_trace_lock);
} }
void stop_ftrace_syscalls(void) void stop_ftrace_syscalls(void)
...@@ -115,9 +124,11 @@ void stop_ftrace_syscalls(void) ...@@ -115,9 +124,11 @@ void stop_ftrace_syscalls(void)
unsigned long flags; unsigned long flags;
struct task_struct *g, *t; struct task_struct *g, *t;
mutex_lock(&syscall_trace_lock);
/* There are perhaps still some users */ /* There are perhaps still some users */
if (atomic_dec_return(&refcount)) if (--refcount)
return; goto unlock;
read_lock_irqsave(&tasklist_lock, flags); read_lock_irqsave(&tasklist_lock, flags);
...@@ -126,6 +137,9 @@ void stop_ftrace_syscalls(void) ...@@ -126,6 +137,9 @@ void stop_ftrace_syscalls(void)
} while_each_thread(g, t); } while_each_thread(g, t);
read_unlock_irqrestore(&tasklist_lock, flags); read_unlock_irqrestore(&tasklist_lock, flags);
unlock:
mutex_unlock(&syscall_trace_lock);
} }
void ftrace_syscall_enter(struct pt_regs *regs) void ftrace_syscall_enter(struct pt_regs *regs)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment