Commit 068da098 authored by Steven Rostedt (VMware)'s avatar Steven Rostedt (VMware) Committed by Steven Rostedt (Google)

function_graph: Move graph depth stored data to shadow stack global var

The use of the task->trace_recursion for the logic used for the function
graph depth was a bit of an abuse of that variable. Now that there
exists global vars that are per stack for registered graph traces, use that
instead.

Link: https://lore.kernel.org/linux-trace-kernel/171509106728.162236.2398372644430125344.stgit@devnote2
Link: https://lore.kernel.org/linux-trace-kernel/20240603190823.634870264@goodmis.org

Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Alexei Starovoitov <alexei.starovoitov@gmail.com>
Cc: Florent Revest <revest@chromium.org>
Cc: Martin KaFai Lau <martin.lau@linux.dev>
Cc: bpf <bpf@vger.kernel.org>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: Alan Maguire <alan.maguire@oracle.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Guo Ren <guoren@kernel.org>
Reviewed-by: default avatarMasami Hiramatsu (Google) <mhiramat@kernel.org>
Signed-off-by: default avatarSteven Rostedt (VMware) <rostedt@goodmis.org>
Signed-off-by: default avatarMasami Hiramatsu (Google) <mhiramat@kernel.org>
Signed-off-by: default avatarSteven Rostedt (Google) <rostedt@goodmis.org>
parent 12117f33
......@@ -44,25 +44,6 @@ enum {
*/
TRACE_IRQ_BIT,
/*
* In the very unlikely case that an interrupt came in
* at a start of graph tracing, and we want to trace
* the function in that interrupt, the depth can be greater
* than zero, because of the preempted start of a previous
* trace. In an even more unlikely case, depth could be 2
* if a softirq interrupted the start of graph tracing,
* followed by an interrupt preempting a start of graph
* tracing in the softirq, and depth can even be 3
* if an NMI came in at the start of an interrupt function
* that preempted a softirq start of a function that
* preempted normal context!!!! Luckily, it can't be
* greater than 3, so the next two bits are a mask
* of what the depth is when we set TRACE_GRAPH_FL
*/
TRACE_GRAPH_DEPTH_START_BIT,
TRACE_GRAPH_DEPTH_END_BIT,
/*
* To implement set_graph_notrace, if this bit is set, we ignore
* function graph tracing of called functions, until the return
......@@ -78,16 +59,6 @@ enum {
#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
#define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit)))
#define trace_recursion_depth() \
(((current)->trace_recursion >> TRACE_GRAPH_DEPTH_START_BIT) & 3)
#define trace_recursion_set_depth(depth) \
do { \
current->trace_recursion &= \
~(3 << TRACE_GRAPH_DEPTH_START_BIT); \
current->trace_recursion |= \
((depth) & 3) << TRACE_GRAPH_DEPTH_START_BIT; \
} while (0)
#define TRACE_CONTEXT_BITS 4
#define TRACE_FTRACE_START TRACE_FTRACE_BIT
......
......@@ -900,8 +900,38 @@ extern void free_fgraph_ops(struct trace_array *tr);
enum {
TRACE_GRAPH_FL = 1,
/*
* In the very unlikely case that an interrupt came in
* at a start of graph tracing, and we want to trace
* the function in that interrupt, the depth can be greater
* than zero, because of the preempted start of a previous
* trace. In an even more unlikely case, depth could be 2
* if a softirq interrupted the start of graph tracing,
* followed by an interrupt preempting a start of graph
* tracing in the softirq, and depth can even be 3
* if an NMI came in at the start of an interrupt function
* that preempted a softirq start of a function that
* preempted normal context!!!! Luckily, it can't be
* greater than 3, so the next two bits are a mask
* of what the depth is when we set TRACE_GRAPH_FL
*/
TRACE_GRAPH_DEPTH_START_BIT,
TRACE_GRAPH_DEPTH_END_BIT,
};
static inline unsigned long ftrace_graph_depth(unsigned long *task_var)
{
return (*task_var >> TRACE_GRAPH_DEPTH_START_BIT) & 3;
}
static inline void ftrace_graph_set_depth(unsigned long *task_var, int depth)
{
*task_var &= ~(3 << TRACE_GRAPH_DEPTH_START_BIT);
*task_var |= (depth & 3) << TRACE_GRAPH_DEPTH_START_BIT;
}
#ifdef CONFIG_DYNAMIC_FTRACE
extern struct ftrace_hash __rcu *ftrace_graph_hash;
extern struct ftrace_hash __rcu *ftrace_graph_notrace_hash;
......@@ -934,7 +964,7 @@ ftrace_graph_addr(unsigned long *task_var, struct ftrace_graph_ent *trace)
* when the depth is zero.
*/
*task_var |= TRACE_GRAPH_FL;
trace_recursion_set_depth(trace->depth);
ftrace_graph_set_depth(task_var, trace->depth);
/*
* If no irqs are to be traced, but a set_graph_function
......@@ -959,7 +989,7 @@ ftrace_graph_addr_finish(struct fgraph_ops *gops, struct ftrace_graph_ret *trace
unsigned long *task_var = fgraph_get_task_var(gops);
if ((*task_var & TRACE_GRAPH_FL) &&
trace->depth == trace_recursion_depth())
trace->depth == ftrace_graph_depth(task_var))
*task_var &= ~TRACE_GRAPH_FL;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment