Commit dbc15d24 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'trace-v5.11-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull tracing fixes from Steven Rostedt:

 - Initialize tracing-graph-pause at task creation, not start of
   function tracing, to avoid corrupting the pause counter.

 - Set "pause-on-trace" for latency tracers as that option breaks their
   output (regression).

 - Fix the wrong error return for setting kretprobes on future modules
   (before they are loaded).

 - Fix re-registering the same kretprobe.

 - Add missing value check for added RCU variable reload.

* tag 'trace-v5.11-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
  tracepoint: Fix race between tracing and removing tracepoint
  kretprobe: Avoid re-registration of the same kretprobe earlier
  tracing/kprobe: Fix to support kretprobe events on unloaded modules
  tracing: Use pause-on-trace with the latency tracers
  fgraph: Initialize tracing_graph_pause at task creation
parents 54fe3ffe c8b186a8
......@@ -266,7 +266,7 @@ extern void kprobes_inc_nmissed_count(struct kprobe *p);
extern bool arch_within_kprobe_blacklist(unsigned long addr);
extern int arch_populate_kprobe_blacklist(void);
extern bool arch_kprobe_on_func_entry(unsigned long offset);
extern bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset);
extern int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset);
extern bool within_kprobe_blacklist(unsigned long addr);
extern int kprobe_add_ksym_blacklist(unsigned long entry);
......
......@@ -307,11 +307,13 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
\
it_func_ptr = \
rcu_dereference_raw((&__tracepoint_##_name)->funcs); \
do { \
it_func = (it_func_ptr)->func; \
__data = (it_func_ptr)->data; \
((void(*)(void *, proto))(it_func))(__data, args); \
} while ((++it_func_ptr)->func); \
if (it_func_ptr) { \
do { \
it_func = (it_func_ptr)->func; \
__data = (it_func_ptr)->data; \
((void(*)(void *, proto))(it_func))(__data, args); \
} while ((++it_func_ptr)->func); \
} \
return 0; \
} \
DEFINE_STATIC_CALL(tp_func_##_name, __traceiter_##_name);
......
......@@ -198,7 +198,8 @@ struct task_struct init_task
.lockdep_recursion = 0,
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.ret_stack = NULL,
.ret_stack = NULL,
.tracing_graph_pause = ATOMIC_INIT(0),
#endif
#if defined(CONFIG_TRACING) && defined(CONFIG_PREEMPTION)
.trace_recursion = 0,
......
......@@ -1954,28 +1954,48 @@ bool __weak arch_kprobe_on_func_entry(unsigned long offset)
return !offset;
}
bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
/**
* kprobe_on_func_entry() -- check whether given address is function entry
* @addr: Target address
* @sym: Target symbol name
* @offset: The offset from the symbol or the address
*
* This checks whether the given @addr+@offset or @sym+@offset is on the
* function entry address or not.
* This returns 0 if it is the function entry, or -EINVAL if it is not.
* And also it returns -ENOENT if it fails the symbol or address lookup.
* Caller must pass @addr or @sym (either one must be NULL), or this
* returns -EINVAL.
*/
int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
{
kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset);
if (IS_ERR(kp_addr))
return false;
return PTR_ERR(kp_addr);
if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset) ||
!arch_kprobe_on_func_entry(offset))
return false;
if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset))
return -ENOENT;
return true;
if (!arch_kprobe_on_func_entry(offset))
return -EINVAL;
return 0;
}
int register_kretprobe(struct kretprobe *rp)
{
int ret = 0;
int ret;
struct kretprobe_instance *inst;
int i;
void *addr;
if (!kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset))
ret = kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset);
if (ret)
return ret;
/* If only rp->kp.addr is specified, check reregistering kprobes */
if (rp->kp.addr && check_kprobe_rereg(&rp->kp))
return -EINVAL;
if (kretprobe_blacklist_size) {
......
......@@ -394,7 +394,6 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
}
if (t->ret_stack == NULL) {
atomic_set(&t->tracing_graph_pause, 0);
atomic_set(&t->trace_overrun, 0);
t->curr_ret_stack = -1;
t->curr_ret_depth = -1;
......@@ -489,7 +488,6 @@ static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
static void
graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
{
atomic_set(&t->tracing_graph_pause, 0);
atomic_set(&t->trace_overrun, 0);
t->ftrace_timestamp = 0;
/* make curr_ret_stack visible before we add the ret_stack */
......
......@@ -562,6 +562,8 @@ static int __irqsoff_tracer_init(struct trace_array *tr)
/* non overwrite screws up the latency tracers */
set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
/* without pause, we will produce garbage if another latency occurs */
set_tracer_flag(tr, TRACE_ITER_PAUSE_ON_TRACE, 1);
tr->max_latency = 0;
irqsoff_trace = tr;
......@@ -583,11 +585,13 @@ static void __irqsoff_tracer_reset(struct trace_array *tr)
{
int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
int pause_flag = save_flags & TRACE_ITER_PAUSE_ON_TRACE;
stop_irqsoff_tracer(tr, is_graph(tr));
set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
set_tracer_flag(tr, TRACE_ITER_PAUSE_ON_TRACE, pause_flag);
ftrace_reset_array_ops(tr);
irqsoff_busy = false;
......
......@@ -221,9 +221,9 @@ bool trace_kprobe_on_func_entry(struct trace_event_call *call)
{
struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
return tk ? kprobe_on_func_entry(tk->rp.kp.addr,
return tk ? (kprobe_on_func_entry(tk->rp.kp.addr,
tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
tk->rp.kp.addr ? 0 : tk->rp.kp.offset) : false;
tk->rp.kp.addr ? 0 : tk->rp.kp.offset) == 0) : false;
}
bool trace_kprobe_error_injectable(struct trace_event_call *call)
......@@ -828,9 +828,11 @@ static int trace_kprobe_create(int argc, const char *argv[])
}
if (is_return)
flags |= TPARG_FL_RETURN;
if (kprobe_on_func_entry(NULL, symbol, offset))
ret = kprobe_on_func_entry(NULL, symbol, offset);
if (ret == 0)
flags |= TPARG_FL_FENTRY;
if (offset && is_return && !(flags & TPARG_FL_FENTRY)) {
/* Defer the ENOENT case until register kprobe */
if (ret == -EINVAL && is_return) {
trace_probe_log_err(0, BAD_RETPROBE);
goto parse_error;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment