Commit e248491a authored by Jiri Olsa's avatar Jiri Olsa Committed by Steven Rostedt

ftrace: Add enable/disable ftrace_ops control interface

Adding a way to temporarily enable/disable ftrace_ops. The change
follows the same way as 'global' ftrace_ops are done.

Introducing 2 global ftrace_ops - control_ops and ftrace_control_list
which take over all ftrace_ops registered with FTRACE_OPS_FL_CONTROL
flag. In addition new per cpu flag called 'disabled' is also added to
ftrace_ops to provide the control information for each cpu.

When ftrace_ops with FTRACE_OPS_FL_CONTROL is registered, it is
set as disabled for all cpus.

The ftrace_control_list contains all the registered 'control' ftrace_ops.
The control_ops provides function which iterates ftrace_control_list
and does the check for 'disabled' flag on current cpu.

Adding 3 inline functions:
  ftrace_function_local_disable/ftrace_function_local_enable
  - enable/disable the ftrace_ops on current cpu
  ftrace_function_local_disabled
  - get disabled ftrace_ops::disabled value for current cpu

Link: http://lkml.kernel.org/r/1329317514-8131-2-git-send-email-jolsa@redhat.comAcked-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: default avatarJiri Olsa <jolsa@redhat.com>
Signed-off-by: default avatarSteven Rostedt <rostedt@goodmis.org>
parent 5b349261
...@@ -31,16 +31,33 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, ...@@ -31,16 +31,33 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip); typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip);
/*
* FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
* set in the flags member.
*
* ENABLED - set/unset when ftrace_ops is registered/unregistered
* GLOBAL - set manualy by ftrace_ops user to denote the ftrace_ops
* is part of the global tracers sharing the same filter
* via set_ftrace_* debugfs files.
* DYNAMIC - set when ftrace_ops is registered to denote dynamically
* allocated ftrace_ops which need special care
* CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops
* could be controled by following calls:
* ftrace_function_local_enable
* ftrace_function_local_disable
*/
enum { enum {
FTRACE_OPS_FL_ENABLED = 1 << 0, FTRACE_OPS_FL_ENABLED = 1 << 0,
FTRACE_OPS_FL_GLOBAL = 1 << 1, FTRACE_OPS_FL_GLOBAL = 1 << 1,
FTRACE_OPS_FL_DYNAMIC = 1 << 2, FTRACE_OPS_FL_DYNAMIC = 1 << 2,
FTRACE_OPS_FL_CONTROL = 1 << 3,
}; };
struct ftrace_ops { struct ftrace_ops {
ftrace_func_t func; ftrace_func_t func;
struct ftrace_ops *next; struct ftrace_ops *next;
unsigned long flags; unsigned long flags;
int __percpu *disabled;
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
struct ftrace_hash *notrace_hash; struct ftrace_hash *notrace_hash;
struct ftrace_hash *filter_hash; struct ftrace_hash *filter_hash;
...@@ -97,6 +114,55 @@ int register_ftrace_function(struct ftrace_ops *ops); ...@@ -97,6 +114,55 @@ int register_ftrace_function(struct ftrace_ops *ops);
int unregister_ftrace_function(struct ftrace_ops *ops); int unregister_ftrace_function(struct ftrace_ops *ops);
void clear_ftrace_function(void); void clear_ftrace_function(void);
/**
* ftrace_function_local_enable - enable controlled ftrace_ops on current cpu
*
* This function enables tracing on current cpu by decreasing
* the per cpu control variable.
* It must be called with preemption disabled and only on ftrace_ops
* registered with FTRACE_OPS_FL_CONTROL. If called without preemption
* disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
*/
static inline void ftrace_function_local_enable(struct ftrace_ops *ops)
{
if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
return;
(*this_cpu_ptr(ops->disabled))--;
}
/**
* ftrace_function_local_disable - enable controlled ftrace_ops on current cpu
*
* This function enables tracing on current cpu by decreasing
* the per cpu control variable.
* It must be called with preemption disabled and only on ftrace_ops
* registered with FTRACE_OPS_FL_CONTROL. If called without preemption
* disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
*/
static inline void ftrace_function_local_disable(struct ftrace_ops *ops)
{
if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
return;
(*this_cpu_ptr(ops->disabled))++;
}
/**
* ftrace_function_local_disabled - returns ftrace_ops disabled value
* on current cpu
*
* This function returns value of ftrace_ops::disabled on current cpu.
* It must be called with preemption disabled and only on ftrace_ops
* registered with FTRACE_OPS_FL_CONTROL. If called without preemption
* disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
*/
static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
{
WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL));
return *this_cpu_ptr(ops->disabled);
}
extern void ftrace_stub(unsigned long a0, unsigned long a1); extern void ftrace_stub(unsigned long a0, unsigned long a1);
#else /* !CONFIG_FUNCTION_TRACER */ #else /* !CONFIG_FUNCTION_TRACER */
......
...@@ -62,6 +62,8 @@ ...@@ -62,6 +62,8 @@
#define FTRACE_HASH_DEFAULT_BITS 10 #define FTRACE_HASH_DEFAULT_BITS 10
#define FTRACE_HASH_MAX_BITS 12 #define FTRACE_HASH_MAX_BITS 12
#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
/* ftrace_enabled is a method to turn ftrace on or off */ /* ftrace_enabled is a method to turn ftrace on or off */
int ftrace_enabled __read_mostly; int ftrace_enabled __read_mostly;
static int last_ftrace_enabled; static int last_ftrace_enabled;
...@@ -89,12 +91,14 @@ static struct ftrace_ops ftrace_list_end __read_mostly = { ...@@ -89,12 +91,14 @@ static struct ftrace_ops ftrace_list_end __read_mostly = {
}; };
static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end; static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub; static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub;
ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
static struct ftrace_ops global_ops; static struct ftrace_ops global_ops;
static struct ftrace_ops control_ops;
static void static void
ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip); ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
...@@ -168,6 +172,32 @@ static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip) ...@@ -168,6 +172,32 @@ static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
} }
#endif #endif
static void control_ops_disable_all(struct ftrace_ops *ops)
{
int cpu;
for_each_possible_cpu(cpu)
*per_cpu_ptr(ops->disabled, cpu) = 1;
}
static int control_ops_alloc(struct ftrace_ops *ops)
{
int __percpu *disabled;
disabled = alloc_percpu(int);
if (!disabled)
return -ENOMEM;
ops->disabled = disabled;
control_ops_disable_all(ops);
return 0;
}
static void control_ops_free(struct ftrace_ops *ops)
{
free_percpu(ops->disabled);
}
static void update_global_ops(void) static void update_global_ops(void)
{ {
ftrace_func_t func; ftrace_func_t func;
...@@ -259,6 +289,26 @@ static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) ...@@ -259,6 +289,26 @@ static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
return 0; return 0;
} }
static void add_ftrace_list_ops(struct ftrace_ops **list,
struct ftrace_ops *main_ops,
struct ftrace_ops *ops)
{
int first = *list == &ftrace_list_end;
add_ftrace_ops(list, ops);
if (first)
add_ftrace_ops(&ftrace_ops_list, main_ops);
}
static int remove_ftrace_list_ops(struct ftrace_ops **list,
struct ftrace_ops *main_ops,
struct ftrace_ops *ops)
{
int ret = remove_ftrace_ops(list, ops);
if (!ret && *list == &ftrace_list_end)
ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
return ret;
}
static int __register_ftrace_function(struct ftrace_ops *ops) static int __register_ftrace_function(struct ftrace_ops *ops)
{ {
if (ftrace_disabled) if (ftrace_disabled)
...@@ -270,15 +320,20 @@ static int __register_ftrace_function(struct ftrace_ops *ops) ...@@ -270,15 +320,20 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
return -EBUSY; return -EBUSY;
/* We don't support both control and global flags set. */
if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
return -EINVAL;
if (!core_kernel_data((unsigned long)ops)) if (!core_kernel_data((unsigned long)ops))
ops->flags |= FTRACE_OPS_FL_DYNAMIC; ops->flags |= FTRACE_OPS_FL_DYNAMIC;
if (ops->flags & FTRACE_OPS_FL_GLOBAL) { if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
int first = ftrace_global_list == &ftrace_list_end; add_ftrace_list_ops(&ftrace_global_list, &global_ops, ops);
add_ftrace_ops(&ftrace_global_list, ops);
ops->flags |= FTRACE_OPS_FL_ENABLED; ops->flags |= FTRACE_OPS_FL_ENABLED;
if (first) } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
add_ftrace_ops(&ftrace_ops_list, &global_ops); if (control_ops_alloc(ops))
return -ENOMEM;
add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
} else } else
add_ftrace_ops(&ftrace_ops_list, ops); add_ftrace_ops(&ftrace_ops_list, ops);
...@@ -302,11 +357,23 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) ...@@ -302,11 +357,23 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
return -EINVAL; return -EINVAL;
if (ops->flags & FTRACE_OPS_FL_GLOBAL) { if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
ret = remove_ftrace_ops(&ftrace_global_list, ops); ret = remove_ftrace_list_ops(&ftrace_global_list,
if (!ret && ftrace_global_list == &ftrace_list_end) &global_ops, ops);
ret = remove_ftrace_ops(&ftrace_ops_list, &global_ops);
if (!ret) if (!ret)
ops->flags &= ~FTRACE_OPS_FL_ENABLED; ops->flags &= ~FTRACE_OPS_FL_ENABLED;
} else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
ret = remove_ftrace_list_ops(&ftrace_control_list,
&control_ops, ops);
if (!ret) {
/*
* The ftrace_ops is now removed from the list,
* so there'll be no new users. We must ensure
* all current users are done before we free
* the control data.
*/
synchronize_sched();
control_ops_free(ops);
}
} else } else
ret = remove_ftrace_ops(&ftrace_ops_list, ops); ret = remove_ftrace_ops(&ftrace_ops_list, ops);
...@@ -3873,6 +3940,36 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) ...@@ -3873,6 +3940,36 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
#endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_DYNAMIC_FTRACE */
static void
ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip)
{
struct ftrace_ops *op;
if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
return;
/*
* Some of the ops may be dynamically allocated,
* they must be freed after a synchronize_sched().
*/
preempt_disable_notrace();
trace_recursion_set(TRACE_CONTROL_BIT);
op = rcu_dereference_raw(ftrace_control_list);
while (op != &ftrace_list_end) {
if (!ftrace_function_local_disabled(op) &&
ftrace_ops_test(op, ip))
op->func(ip, parent_ip);
op = rcu_dereference_raw(op->next);
};
trace_recursion_clear(TRACE_CONTROL_BIT);
preempt_enable_notrace();
}
static struct ftrace_ops control_ops = {
.func = ftrace_ops_control_func,
};
static void static void
ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip) ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
{ {
......
...@@ -288,6 +288,8 @@ struct tracer { ...@@ -288,6 +288,8 @@ struct tracer {
/* for function tracing recursion */ /* for function tracing recursion */
#define TRACE_INTERNAL_BIT (1<<11) #define TRACE_INTERNAL_BIT (1<<11)
#define TRACE_GLOBAL_BIT (1<<12) #define TRACE_GLOBAL_BIT (1<<12)
#define TRACE_CONTROL_BIT (1<<13)
/* /*
* Abuse of the trace_recursion. * Abuse of the trace_recursion.
* As we need a way to maintain state if we are tracing the function * As we need a way to maintain state if we are tracing the function
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment