Commit aaecaa0b authored by Joel Fernandes's avatar Joel Fernandes Committed by Steven Rostedt (VMware)

tracing: Prepare to add preempt and irq trace events

In preparation of adding irqsoff and preemptsoff enable and disable trace
events, move required functions and code to make it easier to add these events
in a later patch. This patch is just code movement and no functional change.
Link: http://lkml.kernel.org/r/20171006005432.14244-2-joelaf@google.com

Cc: Peter Zijlstra <peterz@infradead.org>
Cc: kernel-team@android.com
Signed-off-by: default avatarJoel Fernandes <joelaf@google.com>
Signed-off-by: default avatarSteven Rostedt (VMware) <rostedt@goodmis.org>
parent 6171a031
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include "trace.h" #include "trace.h"
#if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
static struct trace_array *irqsoff_trace __read_mostly; static struct trace_array *irqsoff_trace __read_mostly;
static int tracer_enabled __read_mostly; static int tracer_enabled __read_mostly;
...@@ -462,64 +463,44 @@ void time_hardirqs_off(unsigned long a0, unsigned long a1) ...@@ -462,64 +463,44 @@ void time_hardirqs_off(unsigned long a0, unsigned long a1)
#else /* !CONFIG_PROVE_LOCKING */ #else /* !CONFIG_PROVE_LOCKING */
/*
* Stubs:
*/
void trace_softirqs_on(unsigned long ip)
{
}
void trace_softirqs_off(unsigned long ip)
{
}
inline void print_irqtrace_events(struct task_struct *curr)
{
}
/* /*
* We are only interested in hardirq on/off events: * We are only interested in hardirq on/off events:
*/ */
void trace_hardirqs_on(void) static inline void tracer_hardirqs_on(void)
{ {
if (!preempt_trace() && irq_trace()) if (!preempt_trace() && irq_trace())
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
} }
EXPORT_SYMBOL(trace_hardirqs_on);
void trace_hardirqs_off(void) static inline void tracer_hardirqs_off(void)
{ {
if (!preempt_trace() && irq_trace()) if (!preempt_trace() && irq_trace())
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
} }
EXPORT_SYMBOL(trace_hardirqs_off);
__visible void trace_hardirqs_on_caller(unsigned long caller_addr) static inline void tracer_hardirqs_on_caller(unsigned long caller_addr)
{ {
if (!preempt_trace() && irq_trace()) if (!preempt_trace() && irq_trace())
stop_critical_timing(CALLER_ADDR0, caller_addr); stop_critical_timing(CALLER_ADDR0, caller_addr);
} }
EXPORT_SYMBOL(trace_hardirqs_on_caller);
__visible void trace_hardirqs_off_caller(unsigned long caller_addr) static inline void tracer_hardirqs_off_caller(unsigned long caller_addr)
{ {
if (!preempt_trace() && irq_trace()) if (!preempt_trace() && irq_trace())
start_critical_timing(CALLER_ADDR0, caller_addr); start_critical_timing(CALLER_ADDR0, caller_addr);
} }
EXPORT_SYMBOL(trace_hardirqs_off_caller);
#endif /* CONFIG_PROVE_LOCKING */ #endif /* CONFIG_PROVE_LOCKING */
#endif /* CONFIG_IRQSOFF_TRACER */ #endif /* CONFIG_IRQSOFF_TRACER */
#ifdef CONFIG_PREEMPT_TRACER #ifdef CONFIG_PREEMPT_TRACER
void trace_preempt_on(unsigned long a0, unsigned long a1) static inline void tracer_preempt_on(unsigned long a0, unsigned long a1)
{ {
if (preempt_trace() && !irq_trace()) if (preempt_trace() && !irq_trace())
stop_critical_timing(a0, a1); stop_critical_timing(a0, a1);
} }
void trace_preempt_off(unsigned long a0, unsigned long a1) static inline void tracer_preempt_off(unsigned long a0, unsigned long a1)
{ {
if (preempt_trace() && !irq_trace()) if (preempt_trace() && !irq_trace())
start_critical_timing(a0, a1); start_critical_timing(a0, a1);
...@@ -781,3 +762,70 @@ __init static int init_irqsoff_tracer(void) ...@@ -781,3 +762,70 @@ __init static int init_irqsoff_tracer(void)
return 0; return 0;
} }
core_initcall(init_irqsoff_tracer); core_initcall(init_irqsoff_tracer);
#endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */
#ifndef CONFIG_IRQSOFF_TRACER
static inline void tracer_hardirqs_on(void) { }
static inline void tracer_hardirqs_off(void) { }
static inline void tracer_hardirqs_on_caller(unsigned long caller_addr) { }
static inline void tracer_hardirqs_off_caller(unsigned long caller_addr) { }
#endif
#ifndef CONFIG_PREEMPT_TRACER
static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { }
static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
#endif
#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PROVE_LOCKING)
void trace_hardirqs_on(void)
{
tracer_hardirqs_on();
}
EXPORT_SYMBOL(trace_hardirqs_on);
void trace_hardirqs_off(void)
{
tracer_hardirqs_off();
}
EXPORT_SYMBOL(trace_hardirqs_off);
__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
{
tracer_hardirqs_on_caller(caller_addr);
}
EXPORT_SYMBOL(trace_hardirqs_on_caller);
__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
{
tracer_hardirqs_off_caller(caller_addr);
}
EXPORT_SYMBOL(trace_hardirqs_off_caller);
/*
* Stubs:
*/
void trace_softirqs_on(unsigned long ip)
{
}
void trace_softirqs_off(unsigned long ip)
{
}
inline void print_irqtrace_events(struct task_struct *curr)
{
}
#endif
#ifdef CONFIG_PREEMPT_TRACER
void trace_preempt_on(unsigned long a0, unsigned long a1)
{
tracer_preempt_on(a0, a1);
}
void trace_preempt_off(unsigned long a0, unsigned long a1)
{
tracer_preempt_off(a0, a1);
}
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment