tracing: Rename trace_buffer to array_buffer

As we are working to remove the generic "ring_buffer" name that is used by
both tracing and perf, the ring_buffer name for tracing will be renamed to
trace_buffer, and perf's ring buffer will be renamed to perf_buffer.

As there already exists a trace_buffer that is used by the trace_arrays, it
needs to be first renamed to array_buffer.

Link: https://lore.kernel.org/r/20191213153553.GE20583@kravaSigned-off-by: default avatarSteven Rostedt (VMware) <rostedt@goodmis.org>
parent 56de4e8f
......@@ -11,7 +11,7 @@
#include <linux/tracepoint.h>
struct trace_array;
struct trace_buffer;
struct array_buffer;
struct tracer;
struct dentry;
struct bpf_prog;
......@@ -79,7 +79,7 @@ struct trace_entry {
struct trace_iterator {
struct trace_array *tr;
struct tracer *trace;
struct trace_buffer *trace_buffer;
struct array_buffer *array_buffer;
void *private;
int cpu_file;
struct mutex mutex;
......
......@@ -75,7 +75,7 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action,
ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
if (blk_tracer) {
buffer = blk_tr->trace_buffer.buffer;
buffer = blk_tr->array_buffer.buffer;
pc = preempt_count();
event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
sizeof(*t) + len + cgid_len,
......@@ -248,7 +248,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
if (blk_tracer) {
tracing_record_cmdline(current);
buffer = blk_tr->trace_buffer.buffer;
buffer = blk_tr->array_buffer.buffer;
pc = preempt_count();
event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
sizeof(*t) + pdu_len + cgid_len,
......
......@@ -146,7 +146,7 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
{
struct trace_array *tr = op->private;
if (tr && this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid))
if (tr && this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid))
return;
op->saved_func(ip, parent_ip, op, regs);
......@@ -6922,7 +6922,7 @@ ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
pid_list = rcu_dereference_sched(tr->function_pids);
this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid,
this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
trace_ignore_this_task(pid_list, next));
}
......@@ -6976,7 +6976,7 @@ static void clear_ftrace_pids(struct trace_array *tr)
unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
for_each_possible_cpu(cpu)
per_cpu_ptr(tr->trace_buffer.data, cpu)->ftrace_ignore_pid = false;
per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = false;
rcu_assign_pointer(tr->function_pids, NULL);
......@@ -7100,7 +7100,7 @@ static void ignore_task_cpu(void *data)
pid_list = rcu_dereference_protected(tr->function_pids,
mutex_is_locked(&ftrace_lock));
this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid,
this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
trace_ignore_this_task(pid_list, current));
}
......
This diff is collapsed.
......@@ -176,7 +176,7 @@ struct trace_array_cpu {
struct tracer;
struct trace_option_dentry;
struct trace_buffer {
struct array_buffer {
struct trace_array *tr;
struct ring_buffer *buffer;
struct trace_array_cpu __percpu *data;
......@@ -249,7 +249,7 @@ struct cond_snapshot {
struct trace_array {
struct list_head list;
char *name;
struct trace_buffer trace_buffer;
struct array_buffer array_buffer;
#ifdef CONFIG_TRACER_MAX_TRACE
/*
* The max_buffer is used to snapshot the trace when a maximum
......@@ -257,12 +257,12 @@ struct trace_array {
* Some tracers will use this to store a maximum trace while
* it continues examining live traces.
*
* The buffers for the max_buffer are set up the same as the trace_buffer
* The buffers for the max_buffer are set up the same as the array_buffer
* When a snapshot is taken, the buffer of the max_buffer is swapped
* with the buffer of the trace_buffer and the buffers are reset for
* the trace_buffer so the tracing can continue.
* with the buffer of the array_buffer and the buffers are reset for
* the array_buffer so the tracing can continue.
*/
struct trace_buffer max_buffer;
struct array_buffer max_buffer;
bool allocated_snapshot;
#endif
#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
......@@ -685,7 +685,7 @@ trace_buffer_iter(struct trace_iterator *iter, int cpu)
int tracer_init(struct tracer *t, struct trace_array *tr);
int tracing_is_enabled(void);
void tracing_reset_online_cpus(struct trace_buffer *buf);
void tracing_reset_online_cpus(struct array_buffer *buf);
void tracing_reset_current(int cpu);
void tracing_reset_all_online_cpus(void);
int tracing_open_generic(struct inode *inode, struct file *filp);
......@@ -1057,7 +1057,7 @@ struct ftrace_func_command {
extern bool ftrace_filter_param __initdata;
static inline int ftrace_trace_task(struct trace_array *tr)
{
return !this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid);
return !this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid);
}
extern int ftrace_is_dead(void);
int ftrace_create_function_files(struct trace_array *tr,
......
......@@ -55,12 +55,12 @@ probe_likely_condition(struct ftrace_likely_data *f, int val, int expect)
raw_local_irq_save(flags);
current->trace_recursion |= TRACE_BRANCH_BIT;
data = this_cpu_ptr(tr->trace_buffer.data);
data = this_cpu_ptr(tr->array_buffer.data);
if (atomic_read(&data->disabled))
goto out;
pc = preempt_count();
buffer = tr->trace_buffer.buffer;
buffer = tr->array_buffer.buffer;
event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH,
sizeof(*entry), flags, pc);
if (!event)
......
......@@ -237,7 +237,7 @@ bool trace_event_ignore_this_pid(struct trace_event_file *trace_file)
if (!pid_list)
return false;
data = this_cpu_ptr(tr->trace_buffer.data);
data = this_cpu_ptr(tr->array_buffer.data);
return data->ignore_pid;
}
......@@ -546,7 +546,7 @@ event_filter_pid_sched_switch_probe_pre(void *data, bool preempt,
pid_list = rcu_dereference_sched(tr->filtered_pids);
this_cpu_write(tr->trace_buffer.data->ignore_pid,
this_cpu_write(tr->array_buffer.data->ignore_pid,
trace_ignore_this_task(pid_list, prev) &&
trace_ignore_this_task(pid_list, next));
}
......@@ -560,7 +560,7 @@ event_filter_pid_sched_switch_probe_post(void *data, bool preempt,
pid_list = rcu_dereference_sched(tr->filtered_pids);
this_cpu_write(tr->trace_buffer.data->ignore_pid,
this_cpu_write(tr->array_buffer.data->ignore_pid,
trace_ignore_this_task(pid_list, next));
}
......@@ -571,12 +571,12 @@ event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task)
struct trace_pid_list *pid_list;
/* Nothing to do if we are already tracing */
if (!this_cpu_read(tr->trace_buffer.data->ignore_pid))
if (!this_cpu_read(tr->array_buffer.data->ignore_pid))
return;
pid_list = rcu_dereference_sched(tr->filtered_pids);
this_cpu_write(tr->trace_buffer.data->ignore_pid,
this_cpu_write(tr->array_buffer.data->ignore_pid,
trace_ignore_this_task(pid_list, task));
}
......@@ -587,13 +587,13 @@ event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task)
struct trace_pid_list *pid_list;
/* Nothing to do if we are not tracing */
if (this_cpu_read(tr->trace_buffer.data->ignore_pid))
if (this_cpu_read(tr->array_buffer.data->ignore_pid))
return;
pid_list = rcu_dereference_sched(tr->filtered_pids);
/* Set tracing if current is enabled */
this_cpu_write(tr->trace_buffer.data->ignore_pid,
this_cpu_write(tr->array_buffer.data->ignore_pid,
trace_ignore_this_task(pid_list, current));
}
......@@ -625,7 +625,7 @@ static void __ftrace_clear_event_pids(struct trace_array *tr)
}
for_each_possible_cpu(cpu)
per_cpu_ptr(tr->trace_buffer.data, cpu)->ignore_pid = false;
per_cpu_ptr(tr->array_buffer.data, cpu)->ignore_pid = false;
rcu_assign_pointer(tr->filtered_pids, NULL);
......@@ -1594,7 +1594,7 @@ static void ignore_task_cpu(void *data)
pid_list = rcu_dereference_protected(tr->filtered_pids,
mutex_is_locked(&event_mutex));
this_cpu_write(tr->trace_buffer.data->ignore_pid,
this_cpu_write(tr->array_buffer.data->ignore_pid,
trace_ignore_this_task(pid_list, current));
}
......
......@@ -895,7 +895,7 @@ static notrace void trace_event_raw_event_synth(void *__data,
* Avoid ring buffer recursion detection, as this event
* is being performed within another event.
*/
buffer = trace_file->tr->trace_buffer.buffer;
buffer = trace_file->tr->array_buffer.buffer;
ring_buffer_nest_start(buffer);
entry = trace_event_buffer_reserve(&fbuffer, trace_file,
......
......@@ -101,7 +101,7 @@ static int function_trace_init(struct trace_array *tr)
ftrace_init_array_ops(tr, func);
tr->trace_buffer.cpu = get_cpu();
tr->array_buffer.cpu = get_cpu();
put_cpu();
tracing_start_cmdline_record();
......@@ -118,7 +118,7 @@ static void function_trace_reset(struct trace_array *tr)
static void function_trace_start(struct trace_array *tr)
{
tracing_reset_online_cpus(&tr->trace_buffer);
tracing_reset_online_cpus(&tr->array_buffer);
}
static void
......@@ -143,7 +143,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
goto out;
cpu = smp_processor_id();
data = per_cpu_ptr(tr->trace_buffer.data, cpu);
data = per_cpu_ptr(tr->array_buffer.data, cpu);
if (!atomic_read(&data->disabled)) {
local_save_flags(flags);
trace_function(tr, ip, parent_ip, flags, pc);
......@@ -192,7 +192,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
*/
local_irq_save(flags);
cpu = raw_smp_processor_id();
data = per_cpu_ptr(tr->trace_buffer.data, cpu);
data = per_cpu_ptr(tr->array_buffer.data, cpu);
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) {
......
......@@ -101,7 +101,7 @@ int __trace_graph_entry(struct trace_array *tr,
{
struct trace_event_call *call = &event_funcgraph_entry;
struct ring_buffer_event *event;
struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct ring_buffer *buffer = tr->array_buffer.buffer;
struct ftrace_graph_ent_entry *entry;
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
......@@ -171,7 +171,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
local_irq_save(flags);
cpu = raw_smp_processor_id();
data = per_cpu_ptr(tr->trace_buffer.data, cpu);
data = per_cpu_ptr(tr->array_buffer.data, cpu);
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) {
pc = preempt_count();
......@@ -221,7 +221,7 @@ void __trace_graph_return(struct trace_array *tr,
{
struct trace_event_call *call = &event_funcgraph_exit;
struct ring_buffer_event *event;
struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct ring_buffer *buffer = tr->array_buffer.buffer;
struct ftrace_graph_ret_entry *entry;
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
......@@ -252,7 +252,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
local_irq_save(flags);
cpu = raw_smp_processor_id();
data = per_cpu_ptr(tr->trace_buffer.data, cpu);
data = per_cpu_ptr(tr->array_buffer.data, cpu);
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) {
pc = preempt_count();
......@@ -444,9 +444,9 @@ get_return_for_leaf(struct trace_iterator *iter,
* We need to consume the current entry to see
* the next one.
*/
ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
ring_buffer_consume(iter->array_buffer->buffer, iter->cpu,
NULL, NULL);
event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
event = ring_buffer_peek(iter->array_buffer->buffer, iter->cpu,
NULL, NULL);
}
......@@ -503,7 +503,7 @@ print_graph_rel_time(struct trace_iterator *iter, struct trace_seq *s)
{
unsigned long long usecs;
usecs = iter->ts - iter->trace_buffer->time_start;
usecs = iter->ts - iter->array_buffer->time_start;
do_div(usecs, NSEC_PER_USEC);
trace_seq_printf(s, "%9llu us | ", usecs);
......
......@@ -104,7 +104,7 @@ static void trace_hwlat_sample(struct hwlat_sample *sample)
{
struct trace_array *tr = hwlat_trace;
struct trace_event_call *call = &event_hwlat;
struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct ring_buffer *buffer = tr->array_buffer.buffer;
struct ring_buffer_event *event;
struct hwlat_entry *entry;
unsigned long flags;
......
......@@ -122,7 +122,7 @@ static int func_prolog_dec(struct trace_array *tr,
if (!irqs_disabled_flags(*flags) && !preempt_count())
return 0;
*data = per_cpu_ptr(tr->trace_buffer.data, cpu);
*data = per_cpu_ptr(tr->array_buffer.data, cpu);
disabled = atomic_inc_return(&(*data)->disabled);
if (likely(disabled == 1))
......@@ -167,7 +167,7 @@ static int irqsoff_display_graph(struct trace_array *tr, int set)
per_cpu(tracing_cpu, cpu) = 0;
tr->max_latency = 0;
tracing_reset_online_cpus(&irqsoff_trace->trace_buffer);
tracing_reset_online_cpus(&irqsoff_trace->array_buffer);
return start_irqsoff_tracer(irqsoff_trace, set);
}
......@@ -382,7 +382,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
if (per_cpu(tracing_cpu, cpu))
return;
data = per_cpu_ptr(tr->trace_buffer.data, cpu);
data = per_cpu_ptr(tr->array_buffer.data, cpu);
if (unlikely(!data) || atomic_read(&data->disabled))
return;
......@@ -420,7 +420,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
if (!tracer_enabled || !tracing_is_enabled())
return;
data = per_cpu_ptr(tr->trace_buffer.data, cpu);
data = per_cpu_ptr(tr->array_buffer.data, cpu);
if (unlikely(!data) ||
!data->critical_start || atomic_read(&data->disabled))
......
......@@ -43,7 +43,7 @@ static void ftrace_dump_buf(int skip_entries, long cpu_file)
if (cpu_file == RING_BUFFER_ALL_CPUS) {
for_each_tracing_cpu(cpu) {
iter.buffer_iter[cpu] =
ring_buffer_read_prepare(iter.trace_buffer->buffer,
ring_buffer_read_prepare(iter.array_buffer->buffer,
cpu, GFP_ATOMIC);
ring_buffer_read_start(iter.buffer_iter[cpu]);
tracing_iter_reset(&iter, cpu);
......@@ -51,7 +51,7 @@ static void ftrace_dump_buf(int skip_entries, long cpu_file)
} else {
iter.cpu_file = cpu_file;
iter.buffer_iter[cpu_file] =
ring_buffer_read_prepare(iter.trace_buffer->buffer,
ring_buffer_read_prepare(iter.array_buffer->buffer,
cpu_file, GFP_ATOMIC);
ring_buffer_read_start(iter.buffer_iter[cpu_file]);
tracing_iter_reset(&iter, cpu_file);
......@@ -124,7 +124,7 @@ static int kdb_ftdump(int argc, const char **argv)
iter.buffer_iter = buffer_iter;
for_each_tracing_cpu(cpu) {
atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
}
/* A negative skip_entries means skip all but the last entries */
......@@ -139,7 +139,7 @@ static int kdb_ftdump(int argc, const char **argv)
ftrace_dump_buf(skip_entries, cpu_file);
for_each_tracing_cpu(cpu) {
atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
}
kdb_trap_printk--;
......
......@@ -32,7 +32,7 @@ static void mmio_reset_data(struct trace_array *tr)
overrun_detected = false;
prev_overruns = 0;
tracing_reset_online_cpus(&tr->trace_buffer);
tracing_reset_online_cpus(&tr->array_buffer);
}
static int mmio_trace_init(struct trace_array *tr)
......@@ -122,7 +122,7 @@ static void mmio_close(struct trace_iterator *iter)
static unsigned long count_overruns(struct trace_iterator *iter)
{
unsigned long cnt = atomic_xchg(&dropped_count, 0);
unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
unsigned long over = ring_buffer_overruns(iter->array_buffer->buffer);
if (over > prev_overruns)
cnt += over - prev_overruns;
......@@ -297,7 +297,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
struct mmiotrace_rw *rw)
{
struct trace_event_call *call = &event_mmiotrace_rw;
struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct ring_buffer *buffer = tr->array_buffer.buffer;
struct ring_buffer_event *event;
struct trace_mmiotrace_rw *entry;
int pc = preempt_count();
......@@ -318,7 +318,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
void mmio_trace_rw(struct mmiotrace_rw *rw)
{
struct trace_array *tr = mmio_trace_array;
struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id());
struct trace_array_cpu *data = per_cpu_ptr(tr->array_buffer.data, smp_processor_id());
__trace_mmiotrace_rw(tr, data, rw);
}
......@@ -327,7 +327,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
struct mmiotrace_map *map)
{
struct trace_event_call *call = &event_mmiotrace_map;
struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct ring_buffer *buffer = tr->array_buffer.buffer;
struct ring_buffer_event *event;
struct trace_mmiotrace_map *entry;
int pc = preempt_count();
......@@ -351,7 +351,7 @@ void mmio_trace_mapping(struct mmiotrace_map *map)
struct trace_array_cpu *data;
preempt_disable();
data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id());
data = per_cpu_ptr(tr->array_buffer.data, smp_processor_id());
__trace_mmiotrace_map(tr, data, map);
preempt_enable();
}
......
......@@ -538,7 +538,7 @@ lat_print_timestamp(struct trace_iterator *iter, u64 next_ts)
struct trace_array *tr = iter->tr;
unsigned long verbose = tr->trace_flags & TRACE_ITER_VERBOSE;
unsigned long in_ns = iter->iter_flags & TRACE_FILE_TIME_IN_NS;
unsigned long long abs_ts = iter->ts - iter->trace_buffer->time_start;
unsigned long long abs_ts = iter->ts - iter->array_buffer->time_start;
unsigned long long rel_ts = next_ts - iter->ts;
struct trace_seq *s = &iter->seq;
......
......@@ -82,7 +82,7 @@ func_prolog_preempt_disable(struct trace_array *tr,
if (cpu != wakeup_current_cpu)
goto out_enable;
*data = per_cpu_ptr(tr->trace_buffer.data, cpu);
*data = per_cpu_ptr(tr->array_buffer.data, cpu);
disabled = atomic_inc_return(&(*data)->disabled);
if (unlikely(disabled != 1))
goto out;
......@@ -378,7 +378,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
unsigned long flags, int pc)
{
struct trace_event_call *call = &event_context_switch;
struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct ring_buffer *buffer = tr->array_buffer.buffer;
struct ring_buffer_event *event;
struct ctx_switch_entry *entry;
......@@ -408,7 +408,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
struct trace_event_call *call = &event_wakeup;
struct ring_buffer_event *event;
struct ctx_switch_entry *entry;
struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct ring_buffer *buffer = tr->array_buffer.buffer;
event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
sizeof(*entry), flags, pc);
......@@ -459,7 +459,7 @@ probe_wakeup_sched_switch(void *ignore, bool preempt,
/* disable local data, not wakeup_cpu data */
cpu = raw_smp_processor_id();
disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
if (likely(disabled != 1))
goto out;
......@@ -471,7 +471,7 @@ probe_wakeup_sched_switch(void *ignore, bool preempt,
goto out_unlock;
/* The task we are waiting for is waking up */
data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
__trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
......@@ -494,7 +494,7 @@ probe_wakeup_sched_switch(void *ignore, bool preempt,
arch_spin_unlock(&wakeup_lock);
local_irq_restore(flags);
out:
atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
}
static void __wakeup_reset(struct trace_array *tr)
......@@ -513,7 +513,7 @@ static void wakeup_reset(struct trace_array *tr)
{
unsigned long flags;
tracing_reset_online_cpus(&tr->trace_buffer);
tracing_reset_online_cpus(&tr->array_buffer);
local_irq_save(flags);
arch_spin_lock(&wakeup_lock);
......@@ -551,7 +551,7 @@ probe_wakeup(void *ignore, struct task_struct *p)
return;
pc = preempt_count();
disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
if (unlikely(disabled != 1))
goto out;
......@@ -583,7 +583,7 @@ probe_wakeup(void *ignore, struct task_struct *p)
local_save_flags(flags);
data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
data->preempt_timestamp = ftrace_now(cpu);
tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
__trace_stack(wakeup_trace, flags, 0, pc);
......@@ -598,7 +598,7 @@ probe_wakeup(void *ignore, struct task_struct *p)
out_locked:
arch_spin_unlock(&wakeup_lock);
out:
atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
}
static void start_wakeup_tracer(struct trace_array *tr)
......
......@@ -23,7 +23,7 @@ static inline int trace_valid_entry(struct trace_entry *entry)
return 0;
}
static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu)
static int trace_test_buffer_cpu(struct array_buffer *buf, int cpu)
{
struct ring_buffer_event *event;
struct trace_entry *entry;
......@@ -60,7 +60,7 @@ static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu)
* Test the trace buffer to see if all the elements
* are still sane.
*/
static int __maybe_unused trace_test_buffer(struct trace_buffer *buf, unsigned long *count)
static int __maybe_unused trace_test_buffer(struct array_buffer *buf, unsigned long *count)
{
unsigned long flags, cnt = 0;
int cpu, ret = 0;
......@@ -362,7 +362,7 @@ static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
msleep(100);
/* we should have nothing in the buffer */
ret = trace_test_buffer(&tr->trace_buffer, &count);
ret = trace_test_buffer(&tr->array_buffer, &count);
if (ret)
goto out;
......@@ -383,7 +383,7 @@ static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
ftrace_enabled = 0;
/* check the trace buffer */
ret = trace_test_buffer(&tr->trace_buffer, &count);
ret = trace_test_buffer(&tr->array_buffer, &count);
ftrace_enabled = 1;
tracing_start();
......@@ -682,7 +682,7 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
ftrace_enabled = 0;
/* check the trace buffer */
ret = trace_test_buffer(&tr->trace_buffer, &count);
ret = trace_test_buffer(&tr->array_buffer, &count);
ftrace_enabled = 1;
trace->reset(tr);
......@@ -768,7 +768,7 @@ trace_selftest_startup_function_graph(struct tracer *trace,
* Simulate the init() callback but we attach a watchdog callback
* to detect and recover from possible hangs
*/
tracing_reset_online_cpus(&tr->trace_buffer);
tracing_reset_online_cpus(&tr->array_buffer);
set_graph_array(tr);
ret = register_ftrace_graph(&fgraph_ops);
if (ret) {
......@@ -790,7 +790,7 @@ trace_selftest_startup_function_graph(struct tracer *trace,
tracing_stop();
/* check the trace buffer */
ret = trace_test_buffer(&tr->trace_buffer, &count);
ret = trace_test_buffer(&tr->array_buffer, &count);
/* Need to also simulate the tr->reset to remove this fgraph_ops */
tracing_stop_cmdline_record();
......@@ -848,7 +848,7 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
/* stop the tracing. */
tracing_stop();
/* check both trace buffers */
ret = trace_test_buffer(&tr->trace_buffer, NULL);
ret = trace_test_buffer(&tr->array_buffer, NULL);
if (!ret)
ret = trace_test_buffer(&tr->max_buffer, &count);
trace->reset(tr);
......@@ -910,7 +910,7 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
/* stop the tracing. */
tracing_stop();
/* check both trace buffers */
ret = trace_test_buffer(&tr->trace_buffer, NULL);
ret = trace_test_buffer(&tr->array_buffer, NULL);
if (!ret)
ret = trace_test_buffer(&tr->max_buffer, &count);
trace->reset(tr);
......@@ -976,7 +976,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
/* stop the tracing. */
tracing_stop();
/* check both trace buffers */
ret = trace_test_buffer(&tr->trace_buffer, NULL);
ret = trace_test_buffer(&tr->array_buffer, NULL);
if (ret)
goto out;
......@@ -1006,7 +1006,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
/* stop the tracing. */
tracing_stop();
/* check both trace buffers */
ret = trace_test_buffer(&tr->trace_buffer, NULL);
ret = trace_test_buffer(&tr->array_buffer, NULL);
if (ret)
goto out;
......@@ -1136,7 +1136,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
/* stop the tracing. */
tracing_stop();
/* check both trace buffers */
ret = trace_test_buffer(&tr->trace_buffer, NULL);
ret = trace_test_buffer(&tr->array_buffer, NULL);
if (!ret)
ret = trace_test_buffer(&tr->max_buffer, &count);
......@@ -1177,7 +1177,7 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
/* stop the tracing. */
tracing_stop();
/* check the trace buffer */
ret = trace_test_buffer(&tr->trace_buffer, &count);
ret = trace_test_buffer(&tr->array_buffer, &count);
trace->reset(tr);
tracing_start();
......
......@@ -345,7 +345,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
local_save_flags(irq_flags);
pc = preempt_count();
buffer = tr->trace_buffer.buffer;
buffer = tr->array_buffer.buffer;
event = trace_buffer_lock_reserve(buffer,
sys_data->enter_event->event.type, size, irq_flags, pc);
if (!event)
......@@ -391,7 +391,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
local_save_flags(irq_flags);
pc = preempt_count();
buffer = tr->trace_buffer.buffer;
buffer = tr->array_buffer.buffer;
event = trace_buffer_lock_reserve(buffer,
sys_data->exit_event->event.type, sizeof(*entry),
irq_flags, pc);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment