Commit a7603ff4 authored by Steven Rostedt's avatar Steven Rostedt Committed by Steven Rostedt

tracing: Replace the static global per_cpu arrays with allocated per_cpu

The global and max-tr currently use static per_cpu arrays for the CPU data
descriptors. But in order to get new allocated trace_arrays, they need to
be allocated per_cpu arrays. Instead of using the static arrays, switch
the global and max-tr to use allocated data.
Signed-off-by: default avatarSteven Rostedt <rostedt@goodmis.org>
parent ccb469a1
...@@ -191,8 +191,6 @@ static struct trace_array global_trace; ...@@ -191,8 +191,6 @@ static struct trace_array global_trace;
LIST_HEAD(ftrace_trace_arrays); LIST_HEAD(ftrace_trace_arrays);
static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
int filter_current_check_discard(struct ring_buffer *buffer, int filter_current_check_discard(struct ring_buffer *buffer,
struct ftrace_event_call *call, void *rec, struct ftrace_event_call *call, void *rec,
struct ring_buffer_event *event) struct ring_buffer_event *event)
...@@ -227,8 +225,6 @@ cycle_t ftrace_now(int cpu) ...@@ -227,8 +225,6 @@ cycle_t ftrace_now(int cpu)
*/ */
static struct trace_array max_tr; static struct trace_array max_tr;
static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data);
int tracing_is_enabled(void) int tracing_is_enabled(void)
{ {
return tracing_is_on(); return tracing_is_on();
...@@ -666,13 +662,13 @@ unsigned long __read_mostly tracing_max_latency; ...@@ -666,13 +662,13 @@ unsigned long __read_mostly tracing_max_latency;
static void static void
__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
{ {
struct trace_array_cpu *data = tr->data[cpu]; struct trace_array_cpu *data = per_cpu_ptr(tr->data, cpu);
struct trace_array_cpu *max_data; struct trace_array_cpu *max_data;
max_tr.cpu = cpu; max_tr.cpu = cpu;
max_tr.time_start = data->preempt_timestamp; max_tr.time_start = data->preempt_timestamp;
max_data = max_tr.data[cpu]; max_data = per_cpu_ptr(max_tr.data, cpu);
max_data->saved_latency = tracing_max_latency; max_data->saved_latency = tracing_max_latency;
max_data->critical_start = data->critical_start; max_data->critical_start = data->critical_start;
max_data->critical_end = data->critical_end; max_data->critical_end = data->critical_end;
...@@ -1984,7 +1980,7 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu) ...@@ -1984,7 +1980,7 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu)
unsigned long entries = 0; unsigned long entries = 0;
u64 ts; u64 ts;
tr->data[cpu]->skipped_entries = 0; per_cpu_ptr(tr->data, cpu)->skipped_entries = 0;
buf_iter = trace_buffer_iter(iter, cpu); buf_iter = trace_buffer_iter(iter, cpu);
if (!buf_iter) if (!buf_iter)
...@@ -2004,7 +2000,7 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu) ...@@ -2004,7 +2000,7 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu)
ring_buffer_read(buf_iter, NULL); ring_buffer_read(buf_iter, NULL);
} }
tr->data[cpu]->skipped_entries = entries; per_cpu_ptr(tr->data, cpu)->skipped_entries = entries;
} }
/* /*
...@@ -2099,8 +2095,8 @@ get_total_entries(struct trace_array *tr, unsigned long *total, unsigned long *e ...@@ -2099,8 +2095,8 @@ get_total_entries(struct trace_array *tr, unsigned long *total, unsigned long *e
* entries for the trace and we need to ignore the * entries for the trace and we need to ignore the
* ones before the time stamp. * ones before the time stamp.
*/ */
if (tr->data[cpu]->skipped_entries) { if (per_cpu_ptr(tr->data, cpu)->skipped_entries) {
count -= tr->data[cpu]->skipped_entries; count -= per_cpu_ptr(tr->data, cpu)->skipped_entries;
/* total is the same as the entries */ /* total is the same as the entries */
*total += count; *total += count;
} else } else
...@@ -2157,7 +2153,7 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter) ...@@ -2157,7 +2153,7 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
{ {
unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
struct trace_array *tr = iter->tr; struct trace_array *tr = iter->tr;
struct trace_array_cpu *data = tr->data[tr->cpu]; struct trace_array_cpu *data = per_cpu_ptr(tr->data, tr->cpu);
struct tracer *type = iter->trace; struct tracer *type = iter->trace;
unsigned long entries; unsigned long entries;
unsigned long total; unsigned long total;
...@@ -2227,7 +2223,7 @@ static void test_cpu_buff_start(struct trace_iterator *iter) ...@@ -2227,7 +2223,7 @@ static void test_cpu_buff_start(struct trace_iterator *iter)
if (cpumask_test_cpu(iter->cpu, iter->started)) if (cpumask_test_cpu(iter->cpu, iter->started))
return; return;
if (iter->tr->data[iter->cpu]->skipped_entries) if (per_cpu_ptr(iter->tr->data, iter->cpu)->skipped_entries)
return; return;
cpumask_set_cpu(iter->cpu, iter->started); cpumask_set_cpu(iter->cpu, iter->started);
...@@ -2858,12 +2854,12 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, ...@@ -2858,12 +2854,12 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
*/ */
if (cpumask_test_cpu(cpu, tracing_cpumask) && if (cpumask_test_cpu(cpu, tracing_cpumask) &&
!cpumask_test_cpu(cpu, tracing_cpumask_new)) { !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
atomic_inc(&tr->data[cpu]->disabled); atomic_inc(&per_cpu_ptr(tr->data, cpu)->disabled);
ring_buffer_record_disable_cpu(tr->buffer, cpu); ring_buffer_record_disable_cpu(tr->buffer, cpu);
} }
if (!cpumask_test_cpu(cpu, tracing_cpumask) && if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
cpumask_test_cpu(cpu, tracing_cpumask_new)) { cpumask_test_cpu(cpu, tracing_cpumask_new)) {
atomic_dec(&tr->data[cpu]->disabled); atomic_dec(&per_cpu_ptr(tr->data, cpu)->disabled);
ring_buffer_record_enable_cpu(tr->buffer, cpu); ring_buffer_record_enable_cpu(tr->buffer, cpu);
} }
} }
...@@ -3177,7 +3173,7 @@ static void set_buffer_entries(struct trace_array *tr, unsigned long val) ...@@ -3177,7 +3173,7 @@ static void set_buffer_entries(struct trace_array *tr, unsigned long val)
{ {
int cpu; int cpu;
for_each_tracing_cpu(cpu) for_each_tracing_cpu(cpu)
tr->data[cpu]->entries = val; per_cpu_ptr(tr->data, cpu)->entries = val;
} }
/* resize @tr's buffer to the size of @size_tr's entries */ /* resize @tr's buffer to the size of @size_tr's entries */
...@@ -3189,17 +3185,18 @@ static int resize_buffer_duplicate_size(struct trace_array *tr, ...@@ -3189,17 +3185,18 @@ static int resize_buffer_duplicate_size(struct trace_array *tr,
if (cpu_id == RING_BUFFER_ALL_CPUS) { if (cpu_id == RING_BUFFER_ALL_CPUS) {
for_each_tracing_cpu(cpu) { for_each_tracing_cpu(cpu) {
ret = ring_buffer_resize(tr->buffer, ret = ring_buffer_resize(tr->buffer,
size_tr->data[cpu]->entries, cpu); per_cpu_ptr(size_tr->data, cpu)->entries, cpu);
if (ret < 0) if (ret < 0)
break; break;
tr->data[cpu]->entries = size_tr->data[cpu]->entries; per_cpu_ptr(tr->data, cpu)->entries =
per_cpu_ptr(size_tr->data, cpu)->entries;
} }
} else { } else {
ret = ring_buffer_resize(tr->buffer, ret = ring_buffer_resize(tr->buffer,
size_tr->data[cpu_id]->entries, cpu_id); per_cpu_ptr(size_tr->data, cpu_id)->entries, cpu_id);
if (ret == 0) if (ret == 0)
tr->data[cpu_id]->entries = per_cpu_ptr(tr->data, cpu_id)->entries =
size_tr->data[cpu_id]->entries; per_cpu_ptr(size_tr->data, cpu_id)->entries;
} }
return ret; return ret;
...@@ -3256,13 +3253,13 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr, ...@@ -3256,13 +3253,13 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
if (cpu == RING_BUFFER_ALL_CPUS) if (cpu == RING_BUFFER_ALL_CPUS)
set_buffer_entries(&max_tr, size); set_buffer_entries(&max_tr, size);
else else
max_tr.data[cpu]->entries = size; per_cpu_ptr(max_tr.data, cpu)->entries = size;
out: out:
if (cpu == RING_BUFFER_ALL_CPUS) if (cpu == RING_BUFFER_ALL_CPUS)
set_buffer_entries(tr, size); set_buffer_entries(tr, size);
else else
tr->data[cpu]->entries = size; per_cpu_ptr(tr->data, cpu)->entries = size;
return ret; return ret;
} }
...@@ -3905,8 +3902,8 @@ tracing_entries_read(struct file *filp, char __user *ubuf, ...@@ -3905,8 +3902,8 @@ tracing_entries_read(struct file *filp, char __user *ubuf,
for_each_tracing_cpu(cpu) { for_each_tracing_cpu(cpu) {
/* fill in the size from first enabled cpu */ /* fill in the size from first enabled cpu */
if (size == 0) if (size == 0)
size = tr->data[cpu]->entries; size = per_cpu_ptr(tr->data, cpu)->entries;
if (size != tr->data[cpu]->entries) { if (size != per_cpu_ptr(tr->data, cpu)->entries) {
buf_size_same = 0; buf_size_same = 0;
break; break;
} }
...@@ -3922,7 +3919,7 @@ tracing_entries_read(struct file *filp, char __user *ubuf, ...@@ -3922,7 +3919,7 @@ tracing_entries_read(struct file *filp, char __user *ubuf,
} else } else
r = sprintf(buf, "X\n"); r = sprintf(buf, "X\n");
} else } else
r = sprintf(buf, "%lu\n", tr->data[tc->cpu]->entries >> 10); r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->data, tc->cpu)->entries >> 10);
mutex_unlock(&trace_types_lock); mutex_unlock(&trace_types_lock);
...@@ -3969,7 +3966,7 @@ tracing_total_entries_read(struct file *filp, char __user *ubuf, ...@@ -3969,7 +3966,7 @@ tracing_total_entries_read(struct file *filp, char __user *ubuf,
mutex_lock(&trace_types_lock); mutex_lock(&trace_types_lock);
for_each_tracing_cpu(cpu) { for_each_tracing_cpu(cpu) {
size += tr->data[cpu]->entries >> 10; size += per_cpu_ptr(tr->data, cpu)->entries >> 10;
if (!ring_buffer_expanded) if (!ring_buffer_expanded)
expanded_size += trace_buf_size >> 10; expanded_size += trace_buf_size >> 10;
} }
...@@ -4773,7 +4770,7 @@ static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu) ...@@ -4773,7 +4770,7 @@ static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
static void static void
tracing_init_debugfs_percpu(struct trace_array *tr, long cpu) tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
{ {
struct trace_array_cpu *data = tr->data[cpu]; struct trace_array_cpu *data = per_cpu_ptr(tr->data, cpu);
struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu); struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
struct dentry *d_cpu; struct dentry *d_cpu;
char cpu_dir[30]; /* 30 characters should be more than enough */ char cpu_dir[30]; /* 30 characters should be more than enough */
...@@ -5298,7 +5295,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) ...@@ -5298,7 +5295,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
trace_init_global_iter(&iter); trace_init_global_iter(&iter);
for_each_tracing_cpu(cpu) { for_each_tracing_cpu(cpu) {
atomic_inc(&iter.tr->data[cpu]->disabled); atomic_inc(&per_cpu_ptr(iter.tr->data, cpu)->disabled);
} }
old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ; old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
...@@ -5366,7 +5363,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) ...@@ -5366,7 +5363,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
trace_flags |= old_userobj; trace_flags |= old_userobj;
for_each_tracing_cpu(cpu) { for_each_tracing_cpu(cpu) {
atomic_dec(&iter.tr->data[cpu]->disabled); atomic_dec(&per_cpu_ptr(iter.tr->data, cpu)->disabled);
} }
tracing_on(); tracing_on();
} }
...@@ -5422,11 +5419,31 @@ __init static int tracer_alloc_buffers(void) ...@@ -5422,11 +5419,31 @@ __init static int tracer_alloc_buffers(void)
WARN_ON(1); WARN_ON(1);
goto out_free_cpumask; goto out_free_cpumask;
} }
global_trace.data = alloc_percpu(struct trace_array_cpu);
if (!global_trace.data) {
printk(KERN_ERR "tracer: failed to allocate percpu memory!\n");
WARN_ON(1);
goto out_free_cpumask;
}
for_each_tracing_cpu(i) {
memset(per_cpu_ptr(global_trace.data, i), 0, sizeof(struct trace_array_cpu));
per_cpu_ptr(global_trace.data, i)->trace_cpu.cpu = i;
per_cpu_ptr(global_trace.data, i)->trace_cpu.tr = &global_trace;
}
if (global_trace.buffer_disabled) if (global_trace.buffer_disabled)
tracing_off(); tracing_off();
#ifdef CONFIG_TRACER_MAX_TRACE #ifdef CONFIG_TRACER_MAX_TRACE
max_tr.data = alloc_percpu(struct trace_array_cpu);
if (!max_tr.data) {
printk(KERN_ERR "tracer: failed to allocate percpu memory!\n");
WARN_ON(1);
goto out_free_cpumask;
}
max_tr.buffer = ring_buffer_alloc(1, rb_flags); max_tr.buffer = ring_buffer_alloc(1, rb_flags);
raw_spin_lock_init(&max_tr.start_lock); raw_spin_lock_init(&max_tr.start_lock);
if (!max_tr.buffer) { if (!max_tr.buffer) {
...@@ -5435,18 +5452,15 @@ __init static int tracer_alloc_buffers(void) ...@@ -5435,18 +5452,15 @@ __init static int tracer_alloc_buffers(void)
ring_buffer_free(global_trace.buffer); ring_buffer_free(global_trace.buffer);
goto out_free_cpumask; goto out_free_cpumask;
} }
#endif
/* Allocate the first page for all buffers */
for_each_tracing_cpu(i) { for_each_tracing_cpu(i) {
global_trace.data[i] = &per_cpu(global_trace_cpu, i); memset(per_cpu_ptr(max_tr.data, i), 0, sizeof(struct trace_array_cpu));
global_trace.data[i]->trace_cpu.cpu = i; per_cpu_ptr(max_tr.data, i)->trace_cpu.cpu = i;
global_trace.data[i]->trace_cpu.tr = &global_trace; per_cpu_ptr(max_tr.data, i)->trace_cpu.tr = &max_tr;
max_tr.data[i] = &per_cpu(max_tr_data, i);
max_tr.data[i]->trace_cpu.cpu = i;
max_tr.data[i]->trace_cpu.tr = &max_tr;
} }
#endif
/* Allocate the first page for all buffers */
set_buffer_entries(&global_trace, set_buffer_entries(&global_trace,
ring_buffer_size(global_trace.buffer, 0)); ring_buffer_size(global_trace.buffer, 0));
#ifdef CONFIG_TRACER_MAX_TRACE #ifdef CONFIG_TRACER_MAX_TRACE
...@@ -5488,6 +5502,8 @@ __init static int tracer_alloc_buffers(void) ...@@ -5488,6 +5502,8 @@ __init static int tracer_alloc_buffers(void)
return 0; return 0;
out_free_cpumask: out_free_cpumask:
free_percpu(global_trace.data);
free_percpu(max_tr.data);
free_cpumask_var(tracing_cpumask); free_cpumask_var(tracing_cpumask);
out_free_buffer_mask: out_free_buffer_mask:
free_cpumask_var(tracing_buffer_mask); free_cpumask_var(tracing_buffer_mask);
......
...@@ -186,7 +186,7 @@ struct trace_array { ...@@ -186,7 +186,7 @@ struct trace_array {
struct list_head systems; struct list_head systems;
struct list_head events; struct list_head events;
struct task_struct *waiter; struct task_struct *waiter;
struct trace_array_cpu *data[NR_CPUS]; struct trace_array_cpu *data;
}; };
enum { enum {
......
...@@ -32,6 +32,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) ...@@ -32,6 +32,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
{ {
struct ftrace_event_call *call = &event_branch; struct ftrace_event_call *call = &event_branch;
struct trace_array *tr = branch_tracer; struct trace_array *tr = branch_tracer;
struct trace_array_cpu *data;
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct trace_branch *entry; struct trace_branch *entry;
struct ring_buffer *buffer; struct ring_buffer *buffer;
...@@ -51,7 +52,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) ...@@ -51,7 +52,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
local_irq_save(flags); local_irq_save(flags);
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) data = per_cpu_ptr(tr->data, cpu);
if (atomic_inc_return(&data->disabled) != 1)
goto out; goto out;
pc = preempt_count(); pc = preempt_count();
...@@ -80,7 +82,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) ...@@ -80,7 +82,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
__buffer_unlock_commit(buffer, event); __buffer_unlock_commit(buffer, event);
out: out:
atomic_dec(&tr->data[cpu]->disabled); atomic_dec(&data->disabled);
local_irq_restore(flags); local_irq_restore(flags);
} }
......
...@@ -76,7 +76,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip, ...@@ -76,7 +76,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
goto out; goto out;
cpu = smp_processor_id(); cpu = smp_processor_id();
data = tr->data[cpu]; data = per_cpu_ptr(tr->data, cpu);
if (!atomic_read(&data->disabled)) { if (!atomic_read(&data->disabled)) {
local_save_flags(flags); local_save_flags(flags);
trace_function(tr, ip, parent_ip, flags, pc); trace_function(tr, ip, parent_ip, flags, pc);
...@@ -107,7 +107,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip, ...@@ -107,7 +107,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
*/ */
local_irq_save(flags); local_irq_save(flags);
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
data = tr->data[cpu]; data = per_cpu_ptr(tr->data, cpu);
disabled = atomic_inc_return(&data->disabled); disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) { if (likely(disabled == 1)) {
......
...@@ -265,7 +265,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) ...@@ -265,7 +265,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
local_irq_save(flags); local_irq_save(flags);
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
data = tr->data[cpu]; data = per_cpu_ptr(tr->data, cpu);
disabled = atomic_inc_return(&data->disabled); disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) { if (likely(disabled == 1)) {
pc = preempt_count(); pc = preempt_count();
...@@ -350,7 +350,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace) ...@@ -350,7 +350,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
local_irq_save(flags); local_irq_save(flags);
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
data = tr->data[cpu]; data = per_cpu_ptr(tr->data, cpu);
disabled = atomic_inc_return(&data->disabled); disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) { if (likely(disabled == 1)) {
pc = preempt_count(); pc = preempt_count();
......
...@@ -121,7 +121,7 @@ static int func_prolog_dec(struct trace_array *tr, ...@@ -121,7 +121,7 @@ static int func_prolog_dec(struct trace_array *tr,
if (!irqs_disabled_flags(*flags)) if (!irqs_disabled_flags(*flags))
return 0; return 0;
*data = tr->data[cpu]; *data = per_cpu_ptr(tr->data, cpu);
disabled = atomic_inc_return(&(*data)->disabled); disabled = atomic_inc_return(&(*data)->disabled);
if (likely(disabled == 1)) if (likely(disabled == 1))
...@@ -380,7 +380,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip) ...@@ -380,7 +380,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
if (per_cpu(tracing_cpu, cpu)) if (per_cpu(tracing_cpu, cpu))
return; return;
data = tr->data[cpu]; data = per_cpu_ptr(tr->data, cpu);
if (unlikely(!data) || atomic_read(&data->disabled)) if (unlikely(!data) || atomic_read(&data->disabled))
return; return;
...@@ -418,7 +418,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip) ...@@ -418,7 +418,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
if (!tracer_enabled) if (!tracer_enabled)
return; return;
data = tr->data[cpu]; data = per_cpu_ptr(tr->data, cpu);
if (unlikely(!data) || if (unlikely(!data) ||
!data->critical_start || atomic_read(&data->disabled)) !data->critical_start || atomic_read(&data->disabled))
......
...@@ -26,7 +26,7 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file) ...@@ -26,7 +26,7 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
trace_init_global_iter(&iter); trace_init_global_iter(&iter);
for_each_tracing_cpu(cpu) { for_each_tracing_cpu(cpu) {
atomic_inc(&iter.tr->data[cpu]->disabled); atomic_inc(&per_cpu_ptr(iter.tr->data, cpu)->disabled);
} }
old_userobj = trace_flags; old_userobj = trace_flags;
...@@ -83,7 +83,7 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file) ...@@ -83,7 +83,7 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
trace_flags = old_userobj; trace_flags = old_userobj;
for_each_tracing_cpu(cpu) { for_each_tracing_cpu(cpu) {
atomic_dec(&iter.tr->data[cpu]->disabled); atomic_dec(&per_cpu_ptr(iter.tr->data, cpu)->disabled);
} }
for_each_tracing_cpu(cpu) for_each_tracing_cpu(cpu)
......
...@@ -330,7 +330,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, ...@@ -330,7 +330,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
void mmio_trace_rw(struct mmiotrace_rw *rw) void mmio_trace_rw(struct mmiotrace_rw *rw)
{ {
struct trace_array *tr = mmio_trace_array; struct trace_array *tr = mmio_trace_array;
struct trace_array_cpu *data = tr->data[smp_processor_id()]; struct trace_array_cpu *data = per_cpu_ptr(tr->data, smp_processor_id());
__trace_mmiotrace_rw(tr, data, rw); __trace_mmiotrace_rw(tr, data, rw);
} }
...@@ -363,7 +363,7 @@ void mmio_trace_mapping(struct mmiotrace_map *map) ...@@ -363,7 +363,7 @@ void mmio_trace_mapping(struct mmiotrace_map *map)
struct trace_array_cpu *data; struct trace_array_cpu *data;
preempt_disable(); preempt_disable();
data = tr->data[smp_processor_id()]; data = per_cpu_ptr(tr->data, smp_processor_id());
__trace_mmiotrace_map(tr, data, map); __trace_mmiotrace_map(tr, data, map);
preempt_enable(); preempt_enable();
} }
......
...@@ -69,7 +69,7 @@ probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *n ...@@ -69,7 +69,7 @@ probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *n
pc = preempt_count(); pc = preempt_count();
local_irq_save(flags); local_irq_save(flags);
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
data = ctx_trace->data[cpu]; data = per_cpu_ptr(ctx_trace->data, cpu);
if (likely(!atomic_read(&data->disabled))) if (likely(!atomic_read(&data->disabled)))
tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc); tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc);
...@@ -123,7 +123,7 @@ probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success) ...@@ -123,7 +123,7 @@ probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success)
pc = preempt_count(); pc = preempt_count();
local_irq_save(flags); local_irq_save(flags);
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
data = ctx_trace->data[cpu]; data = per_cpu_ptr(ctx_trace->data, cpu);
if (likely(!atomic_read(&data->disabled))) if (likely(!atomic_read(&data->disabled)))
tracing_sched_wakeup_trace(ctx_trace, wakee, current, tracing_sched_wakeup_trace(ctx_trace, wakee, current,
......
...@@ -89,7 +89,7 @@ func_prolog_preempt_disable(struct trace_array *tr, ...@@ -89,7 +89,7 @@ func_prolog_preempt_disable(struct trace_array *tr,
if (cpu != wakeup_current_cpu) if (cpu != wakeup_current_cpu)
goto out_enable; goto out_enable;
*data = tr->data[cpu]; *data = per_cpu_ptr(tr->data, cpu);
disabled = atomic_inc_return(&(*data)->disabled); disabled = atomic_inc_return(&(*data)->disabled);
if (unlikely(disabled != 1)) if (unlikely(disabled != 1))
goto out; goto out;
...@@ -353,7 +353,7 @@ probe_wakeup_sched_switch(void *ignore, ...@@ -353,7 +353,7 @@ probe_wakeup_sched_switch(void *ignore,
/* disable local data, not wakeup_cpu data */ /* disable local data, not wakeup_cpu data */
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->data, cpu)->disabled);
if (likely(disabled != 1)) if (likely(disabled != 1))
goto out; goto out;
...@@ -365,7 +365,7 @@ probe_wakeup_sched_switch(void *ignore, ...@@ -365,7 +365,7 @@ probe_wakeup_sched_switch(void *ignore,
goto out_unlock; goto out_unlock;
/* The task we are waiting for is waking up */ /* The task we are waiting for is waking up */
data = wakeup_trace->data[wakeup_cpu]; data = per_cpu_ptr(wakeup_trace->data, wakeup_cpu);
__trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc); __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc); tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
...@@ -387,7 +387,7 @@ probe_wakeup_sched_switch(void *ignore, ...@@ -387,7 +387,7 @@ probe_wakeup_sched_switch(void *ignore,
arch_spin_unlock(&wakeup_lock); arch_spin_unlock(&wakeup_lock);
local_irq_restore(flags); local_irq_restore(flags);
out: out:
atomic_dec(&wakeup_trace->data[cpu]->disabled); atomic_dec(&per_cpu_ptr(wakeup_trace->data, cpu)->disabled);
} }
static void __wakeup_reset(struct trace_array *tr) static void __wakeup_reset(struct trace_array *tr)
...@@ -435,7 +435,7 @@ probe_wakeup(void *ignore, struct task_struct *p, int success) ...@@ -435,7 +435,7 @@ probe_wakeup(void *ignore, struct task_struct *p, int success)
return; return;
pc = preempt_count(); pc = preempt_count();
disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->data, cpu)->disabled);
if (unlikely(disabled != 1)) if (unlikely(disabled != 1))
goto out; goto out;
...@@ -458,7 +458,7 @@ probe_wakeup(void *ignore, struct task_struct *p, int success) ...@@ -458,7 +458,7 @@ probe_wakeup(void *ignore, struct task_struct *p, int success)
local_save_flags(flags); local_save_flags(flags);
data = wakeup_trace->data[wakeup_cpu]; data = per_cpu_ptr(wakeup_trace->data, wakeup_cpu);
data->preempt_timestamp = ftrace_now(cpu); data->preempt_timestamp = ftrace_now(cpu);
tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc); tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
...@@ -472,7 +472,7 @@ probe_wakeup(void *ignore, struct task_struct *p, int success) ...@@ -472,7 +472,7 @@ probe_wakeup(void *ignore, struct task_struct *p, int success)
out_locked: out_locked:
arch_spin_unlock(&wakeup_lock); arch_spin_unlock(&wakeup_lock);
out: out:
atomic_dec(&wakeup_trace->data[cpu]->disabled); atomic_dec(&per_cpu_ptr(wakeup_trace->data, cpu)->disabled);
} }
static void start_wakeup_tracer(struct trace_array *tr) static void start_wakeup_tracer(struct trace_array *tr)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment