Commit aec0be2d authored by Steven Rostedt (Red Hat)'s avatar Steven Rostedt (Red Hat) Committed by Steven Rostedt

ftrace/x86/extable: Add is_ftrace_trampoline() function

Stack traces that happen from function tracing check if the address
on the stack is a __kernel_text_address(). That is, is the address
kernel code. This calls core_kernel_text() which returns true
if the address is part of the builtin kernel code. It also calls
is_module_text_address() which returns true if the address belongs
to module code.

But what is missing is ftrace dynamically allocated trampolines.
These trampolines are allocated for individual ftrace_ops that
call the ftrace_ops callback functions directly. But if they do a
stack trace, the code checking the stack wont detect them as they
are neither core kernel code nor module address space.

Adding another field to ftrace_ops that also stores the size of
the trampoline assigned to it we can create a new function called
is_ftrace_trampoline() that returns true if the address is a
dynamically allocate ftrace trampoline. Note, it ignores trampolines
that are not dynamically allocated as they will return true with
the core_kernel_text() function.

Link: http://lkml.kernel.org/r/20141119034829.497125839@goodmis.org

Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Acked-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarSteven Rostedt <rostedt@goodmis.org>
parent 9960efeb
...@@ -712,7 +712,8 @@ union ftrace_op_code_union { ...@@ -712,7 +712,8 @@ union ftrace_op_code_union {
} __attribute__((packed)); } __attribute__((packed));
}; };
static unsigned long create_trampoline(struct ftrace_ops *ops) static unsigned long
create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
{ {
unsigned const char *jmp; unsigned const char *jmp;
unsigned long start_offset; unsigned long start_offset;
...@@ -749,6 +750,8 @@ static unsigned long create_trampoline(struct ftrace_ops *ops) ...@@ -749,6 +750,8 @@ static unsigned long create_trampoline(struct ftrace_ops *ops)
if (!trampoline) if (!trampoline)
return 0; return 0;
*tramp_size = size + MCOUNT_INSN_SIZE + sizeof(void *);
/* Copy ftrace_caller onto the trampoline memory */ /* Copy ftrace_caller onto the trampoline memory */
ret = probe_kernel_read(trampoline, (void *)start_offset, size); ret = probe_kernel_read(trampoline, (void *)start_offset, size);
if (WARN_ON(ret < 0)) { if (WARN_ON(ret < 0)) {
...@@ -819,6 +822,7 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops) ...@@ -819,6 +822,7 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
unsigned char *new; unsigned char *new;
unsigned long offset; unsigned long offset;
unsigned long ip; unsigned long ip;
unsigned int size;
int ret; int ret;
if (ops->trampoline) { if (ops->trampoline) {
...@@ -829,9 +833,10 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops) ...@@ -829,9 +833,10 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
return; return;
} else { } else {
ops->trampoline = create_trampoline(ops); ops->trampoline = create_trampoline(ops, &size);
if (!ops->trampoline) if (!ops->trampoline)
return; return;
ops->trampoline_size = size;
} }
offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS); offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
......
...@@ -150,6 +150,7 @@ struct ftrace_ops { ...@@ -150,6 +150,7 @@ struct ftrace_ops {
struct ftrace_ops_hash *func_hash; struct ftrace_ops_hash *func_hash;
struct ftrace_ops_hash old_hash; struct ftrace_ops_hash old_hash;
unsigned long trampoline; unsigned long trampoline;
unsigned long trampoline_size;
#endif #endif
}; };
...@@ -297,6 +298,8 @@ extern int ftrace_text_reserved(const void *start, const void *end); ...@@ -297,6 +298,8 @@ extern int ftrace_text_reserved(const void *start, const void *end);
extern int ftrace_nr_registered_ops(void); extern int ftrace_nr_registered_ops(void);
bool is_ftrace_trampoline(unsigned long addr);
/* /*
* The dyn_ftrace record's flags field is split into two parts. * The dyn_ftrace record's flags field is split into two parts.
* the first part which is '0-FTRACE_REF_MAX' is a counter of * the first part which is '0-FTRACE_REF_MAX' is a counter of
...@@ -596,6 +599,11 @@ static inline ssize_t ftrace_notrace_write(struct file *file, const char __user ...@@ -596,6 +599,11 @@ static inline ssize_t ftrace_notrace_write(struct file *file, const char __user
size_t cnt, loff_t *ppos) { return -ENODEV; } size_t cnt, loff_t *ppos) { return -ENODEV; }
static inline int static inline int
ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; } ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
static inline bool is_ftrace_trampoline(unsigned long addr)
{
return false;
}
#endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_DYNAMIC_FTRACE */
/* totally disable ftrace - can not re-enable after this */ /* totally disable ftrace - can not re-enable after this */
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/ftrace.h> #include <linux/ftrace.h>
#include <linux/memory.h> #include <linux/memory.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/ftrace.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/init.h> #include <linux/init.h>
...@@ -102,6 +103,8 @@ int __kernel_text_address(unsigned long addr) ...@@ -102,6 +103,8 @@ int __kernel_text_address(unsigned long addr)
return 1; return 1;
if (is_module_text_address(addr)) if (is_module_text_address(addr))
return 1; return 1;
if (is_ftrace_trampoline(addr))
return 1;
/* /*
* There might be init symbols in saved stacktraces. * There might be init symbols in saved stacktraces.
* Give those symbols a chance to be printed in * Give those symbols a chance to be printed in
...@@ -119,7 +122,9 @@ int kernel_text_address(unsigned long addr) ...@@ -119,7 +122,9 @@ int kernel_text_address(unsigned long addr)
{ {
if (core_kernel_text(addr)) if (core_kernel_text(addr))
return 1; return 1;
return is_module_text_address(addr); if (is_module_text_address(addr))
return 1;
return is_ftrace_trampoline(addr);
} }
/* /*
......
...@@ -1117,6 +1117,43 @@ static struct ftrace_ops global_ops = { ...@@ -1117,6 +1117,43 @@ static struct ftrace_ops global_ops = {
FTRACE_OPS_FL_INITIALIZED, FTRACE_OPS_FL_INITIALIZED,
}; };
/*
* This is used by __kernel_text_address() to return true if the
* the address is on a dynamically allocated trampoline that would
* not return true for either core_kernel_text() or
* is_module_text_address().
*/
bool is_ftrace_trampoline(unsigned long addr)
{
struct ftrace_ops *op;
bool ret = false;
/*
* Some of the ops may be dynamically allocated,
* they are freed after a synchronize_sched().
*/
preempt_disable_notrace();
do_for_each_ftrace_op(op, ftrace_ops_list) {
/*
* This is to check for dynamically allocated trampolines.
* Trampolines that are in kernel text will have
* core_kernel_text() return true.
*/
if (op->trampoline && op->trampoline_size)
if (addr >= op->trampoline &&
addr < op->trampoline + op->trampoline_size) {
ret = true;
goto out;
}
} while_for_each_ftrace_op(op);
out:
preempt_enable_notrace();
return ret;
}
struct ftrace_page { struct ftrace_page {
struct ftrace_page *next; struct ftrace_page *next;
struct dyn_ftrace *records; struct dyn_ftrace *records;
...@@ -5373,6 +5410,7 @@ static struct ftrace_ops graph_ops = { ...@@ -5373,6 +5410,7 @@ static struct ftrace_ops graph_ops = {
FTRACE_OPS_FL_STUB, FTRACE_OPS_FL_STUB,
#ifdef FTRACE_GRAPH_TRAMP_ADDR #ifdef FTRACE_GRAPH_TRAMP_ADDR
.trampoline = FTRACE_GRAPH_TRAMP_ADDR, .trampoline = FTRACE_GRAPH_TRAMP_ADDR,
/* trampoline_size is only needed for dynamically allocated tramps */
#endif #endif
ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash) ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment