Commit ae3b5093 authored by Steven Rostedt's avatar Steven Rostedt Committed by Steven Rostedt

tracing: Use RING_BUFFER_ALL_CPUS for TRACE_PIPE_ALL_CPU

Both RING_BUFFER_ALL_CPUS and TRACE_PIPE_ALL_CPU are defined as
-1 and used to say that all the ring buffers are to be modified
or read (instead of just a single cpu, which would be >= 0).

There's no reason to keep TRACE_PIPE_ALL_CPU as it is also started
to be used for more than what it was created for, and now that
the ring buffer code added a generic RING_BUFFER_ALL_CPUS define,
we can clean up the trace code to use that instead and remove
the TRACE_PIPE_ALL_CPU macro.
Signed-off-by: default avatarSteven Rostedt <rostedt@goodmis.org>
parent ae63b31e
...@@ -287,13 +287,13 @@ static DEFINE_PER_CPU(struct mutex, cpu_access_lock); ...@@ -287,13 +287,13 @@ static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
static inline void trace_access_lock(int cpu) static inline void trace_access_lock(int cpu)
{ {
if (cpu == TRACE_PIPE_ALL_CPU) { if (cpu == RING_BUFFER_ALL_CPUS) {
/* gain it for accessing the whole ring buffer. */ /* gain it for accessing the whole ring buffer. */
down_write(&all_cpu_access_lock); down_write(&all_cpu_access_lock);
} else { } else {
/* gain it for accessing a cpu ring buffer. */ /* gain it for accessing a cpu ring buffer. */
/* Firstly block other trace_access_lock(TRACE_PIPE_ALL_CPU). */ /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
down_read(&all_cpu_access_lock); down_read(&all_cpu_access_lock);
/* Secondly block other access to this @cpu ring buffer. */ /* Secondly block other access to this @cpu ring buffer. */
...@@ -303,7 +303,7 @@ static inline void trace_access_lock(int cpu) ...@@ -303,7 +303,7 @@ static inline void trace_access_lock(int cpu)
static inline void trace_access_unlock(int cpu) static inline void trace_access_unlock(int cpu)
{ {
if (cpu == TRACE_PIPE_ALL_CPU) { if (cpu == RING_BUFFER_ALL_CPUS) {
up_write(&all_cpu_access_lock); up_write(&all_cpu_access_lock);
} else { } else {
mutex_unlock(&per_cpu(cpu_access_lock, cpu)); mutex_unlock(&per_cpu(cpu_access_lock, cpu));
...@@ -1823,7 +1823,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, ...@@ -1823,7 +1823,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
* If we are in a per_cpu trace file, don't bother by iterating over * If we are in a per_cpu trace file, don't bother by iterating over
* all cpu and peek directly. * all cpu and peek directly.
*/ */
if (cpu_file > TRACE_PIPE_ALL_CPU) { if (cpu_file > RING_BUFFER_ALL_CPUS) {
if (ring_buffer_empty_cpu(buffer, cpu_file)) if (ring_buffer_empty_cpu(buffer, cpu_file))
return NULL; return NULL;
ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events); ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
...@@ -1983,7 +1983,7 @@ static void *s_start(struct seq_file *m, loff_t *pos) ...@@ -1983,7 +1983,7 @@ static void *s_start(struct seq_file *m, loff_t *pos)
iter->cpu = 0; iter->cpu = 0;
iter->idx = -1; iter->idx = -1;
if (cpu_file == TRACE_PIPE_ALL_CPU) { if (cpu_file == RING_BUFFER_ALL_CPUS) {
for_each_tracing_cpu(cpu) for_each_tracing_cpu(cpu)
tracing_iter_reset(iter, cpu); tracing_iter_reset(iter, cpu);
} else } else
...@@ -2291,7 +2291,7 @@ int trace_empty(struct trace_iterator *iter) ...@@ -2291,7 +2291,7 @@ int trace_empty(struct trace_iterator *iter)
int cpu; int cpu;
/* If we are looking at one CPU buffer, only check that one */ /* If we are looking at one CPU buffer, only check that one */
if (iter->cpu_file != TRACE_PIPE_ALL_CPU) { if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
cpu = iter->cpu_file; cpu = iter->cpu_file;
buf_iter = trace_buffer_iter(iter, cpu); buf_iter = trace_buffer_iter(iter, cpu);
if (buf_iter) { if (buf_iter) {
...@@ -2533,7 +2533,7 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot) ...@@ -2533,7 +2533,7 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
if (!iter->snapshot) if (!iter->snapshot)
tracing_stop(); tracing_stop();
if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
for_each_tracing_cpu(cpu) { for_each_tracing_cpu(cpu) {
iter->buffer_iter[cpu] = iter->buffer_iter[cpu] =
ring_buffer_read_prepare(iter->tr->buffer, cpu); ring_buffer_read_prepare(iter->tr->buffer, cpu);
...@@ -2617,7 +2617,7 @@ static int tracing_open(struct inode *inode, struct file *file) ...@@ -2617,7 +2617,7 @@ static int tracing_open(struct inode *inode, struct file *file)
(file->f_flags & O_TRUNC)) { (file->f_flags & O_TRUNC)) {
long cpu = (long) inode->i_private; long cpu = (long) inode->i_private;
if (cpu == TRACE_PIPE_ALL_CPU) if (cpu == RING_BUFFER_ALL_CPUS)
tracing_reset_online_cpus(&global_trace); tracing_reset_online_cpus(&global_trace);
else else
tracing_reset(&global_trace, cpu); tracing_reset(&global_trace, cpu);
...@@ -5035,7 +5035,7 @@ static __init int tracer_init_debugfs(void) ...@@ -5035,7 +5035,7 @@ static __init int tracer_init_debugfs(void)
NULL, &tracing_cpumask_fops); NULL, &tracing_cpumask_fops);
trace_create_file("trace", 0644, d_tracer, trace_create_file("trace", 0644, d_tracer,
(void *) TRACE_PIPE_ALL_CPU, &tracing_fops); (void *) RING_BUFFER_ALL_CPUS, &tracing_fops);
trace_create_file("available_tracers", 0444, d_tracer, trace_create_file("available_tracers", 0444, d_tracer,
&global_trace, &show_traces_fops); &global_trace, &show_traces_fops);
...@@ -5055,7 +5055,7 @@ static __init int tracer_init_debugfs(void) ...@@ -5055,7 +5055,7 @@ static __init int tracer_init_debugfs(void)
NULL, &tracing_readme_fops); NULL, &tracing_readme_fops);
trace_create_file("trace_pipe", 0444, d_tracer, trace_create_file("trace_pipe", 0444, d_tracer,
(void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops); (void *) RING_BUFFER_ALL_CPUS, &tracing_pipe_fops);
trace_create_file("buffer_size_kb", 0644, d_tracer, trace_create_file("buffer_size_kb", 0644, d_tracer,
(void *) RING_BUFFER_ALL_CPUS, &tracing_entries_fops); (void *) RING_BUFFER_ALL_CPUS, &tracing_entries_fops);
...@@ -5085,7 +5085,7 @@ static __init int tracer_init_debugfs(void) ...@@ -5085,7 +5085,7 @@ static __init int tracer_init_debugfs(void)
#ifdef CONFIG_TRACER_SNAPSHOT #ifdef CONFIG_TRACER_SNAPSHOT
trace_create_file("snapshot", 0644, d_tracer, trace_create_file("snapshot", 0644, d_tracer,
(void *) TRACE_PIPE_ALL_CPU, &snapshot_fops); (void *) RING_BUFFER_ALL_CPUS, &snapshot_fops);
#endif #endif
create_trace_options_dir(); create_trace_options_dir();
...@@ -5162,7 +5162,7 @@ void trace_init_global_iter(struct trace_iterator *iter) ...@@ -5162,7 +5162,7 @@ void trace_init_global_iter(struct trace_iterator *iter)
{ {
iter->tr = &global_trace; iter->tr = &global_trace;
iter->trace = current_trace; iter->trace = current_trace;
iter->cpu_file = TRACE_PIPE_ALL_CPU; iter->cpu_file = RING_BUFFER_ALL_CPUS;
} }
static void static void
...@@ -5210,7 +5210,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) ...@@ -5210,7 +5210,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
switch (oops_dump_mode) { switch (oops_dump_mode) {
case DUMP_ALL: case DUMP_ALL:
iter.cpu_file = TRACE_PIPE_ALL_CPU; iter.cpu_file = RING_BUFFER_ALL_CPUS;
break; break;
case DUMP_ORIG: case DUMP_ORIG:
iter.cpu_file = raw_smp_processor_id(); iter.cpu_file = raw_smp_processor_id();
...@@ -5219,7 +5219,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) ...@@ -5219,7 +5219,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
goto out_enable; goto out_enable;
default: default:
printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n"); printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
iter.cpu_file = TRACE_PIPE_ALL_CPU; iter.cpu_file = RING_BUFFER_ALL_CPUS;
} }
printk(KERN_TRACE "Dumping ftrace buffer:\n"); printk(KERN_TRACE "Dumping ftrace buffer:\n");
......
...@@ -453,8 +453,6 @@ static __always_inline void trace_clear_recursion(int bit) ...@@ -453,8 +453,6 @@ static __always_inline void trace_clear_recursion(int bit)
current->trace_recursion = val; current->trace_recursion = val;
} }
#define TRACE_PIPE_ALL_CPU -1
static inline struct ring_buffer_iter * static inline struct ring_buffer_iter *
trace_buffer_iter(struct trace_iterator *iter, int cpu) trace_buffer_iter(struct trace_iterator *iter, int cpu)
{ {
......
...@@ -43,7 +43,7 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file) ...@@ -43,7 +43,7 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
iter.iter_flags |= TRACE_FILE_LAT_FMT; iter.iter_flags |= TRACE_FILE_LAT_FMT;
iter.pos = -1; iter.pos = -1;
if (cpu_file == TRACE_PIPE_ALL_CPU) { if (cpu_file == RING_BUFFER_ALL_CPUS) {
for_each_tracing_cpu(cpu) { for_each_tracing_cpu(cpu) {
iter.buffer_iter[cpu] = iter.buffer_iter[cpu] =
ring_buffer_read_prepare(iter.tr->buffer, cpu); ring_buffer_read_prepare(iter.tr->buffer, cpu);
...@@ -115,7 +115,7 @@ static int kdb_ftdump(int argc, const char **argv) ...@@ -115,7 +115,7 @@ static int kdb_ftdump(int argc, const char **argv)
!cpu_online(cpu_file)) !cpu_online(cpu_file))
return KDB_BADINT; return KDB_BADINT;
} else { } else {
cpu_file = TRACE_PIPE_ALL_CPU; cpu_file = RING_BUFFER_ALL_CPUS;
} }
kdb_trap_printk++; kdb_trap_printk++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment