Commit 84e53ff7 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'tip/perf/core-2' of...

Merge branch 'tip/perf/core-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace into perf/core

Pull tracing updates from Steven Rostedt.
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents ccf59d8d 7bcfaf54
......@@ -2859,6 +2859,22 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
to facilitate early boot debugging.
See also Documentation/trace/events.txt
trace_options=[option-list]
[FTRACE] Enable or disable tracer options at boot.
The option-list is a comma delimited list of options
that can be enabled or disabled just as if you were
to echo the option name into
/sys/kernel/debug/tracing/trace_options
For example, to enable stacktrace option (to dump the
stack trace of each event), add to the command line:
trace_options=stacktrace
See also Documentation/trace/ftrace.txt "trace options"
section.
transparent_hugepage=
[KNL]
Format: [always|madvise|never]
......
......@@ -127,13 +127,13 @@ trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer,
void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc);
void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc);
void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc,
struct pt_regs *regs);
void trace_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc);
void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc,
struct pt_regs *regs);
void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event);
......
......@@ -527,9 +527,6 @@ __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
#else
static inline __printf(1, 2)
int trace_printk(const char *fmt, ...);
static inline void tracing_start(void) { }
static inline void tracing_stop(void) { }
static inline void ftrace_off_permanent(void) { }
......@@ -539,8 +536,8 @@ static inline void tracing_on(void) { }
static inline void tracing_off(void) { }
static inline int tracing_is_on(void) { return 0; }
static inline int
trace_printk(const char *fmt, ...)
static inline __printf(1, 2)
int trace_printk(const char *fmt, ...)
{
return 0;
}
......
......@@ -159,13 +159,14 @@ int ring_buffer_record_is_on(struct ring_buffer *buffer);
void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu);
u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu);
unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu);
unsigned long ring_buffer_entries(struct ring_buffer *buffer);
unsigned long ring_buffer_overruns(struct ring_buffer *buffer);
unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu);
unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);
unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu);
unsigned long ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu);
u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu);
void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
......
......@@ -545,8 +545,7 @@ ftrace_raw_event_##call(void *__data, proto) \
{ assign; } \
\
if (!filter_current_check_discard(buffer, event_call, entry, event)) \
trace_nowake_buffer_unlock_commit(buffer, \
event, irq_flags, pc); \
trace_buffer_unlock_commit(buffer, event, irq_flags, pc); \
}
/*
* The ftrace_test_probe is compiled out, it is only here as a build time check
......
......@@ -31,27 +31,4 @@ struct syscall_metadata {
struct ftrace_event_call *exit_event;
};
#ifdef CONFIG_FTRACE_SYSCALLS
extern unsigned long arch_syscall_addr(int nr);
extern int init_syscall_trace(struct ftrace_event_call *call);
extern int reg_event_syscall_enter(struct ftrace_event_call *call);
extern void unreg_event_syscall_enter(struct ftrace_event_call *call);
extern int reg_event_syscall_exit(struct ftrace_event_call *call);
extern void unreg_event_syscall_exit(struct ftrace_event_call *call);
extern int
ftrace_format_syscall(struct ftrace_event_call *call, struct trace_seq *s);
enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags,
struct trace_event *event);
enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags,
struct trace_event *event);
#endif
#ifdef CONFIG_PERF_EVENTS
int perf_sysenter_enable(struct ftrace_event_call *call);
void perf_sysenter_disable(struct ftrace_event_call *call);
int perf_sysexit_enable(struct ftrace_event_call *call);
void perf_sysexit_disable(struct ftrace_event_call *call);
#endif
#endif /* _TRACE_SYSCALL_H */
......@@ -119,6 +119,7 @@ config TRACING
select BINARY_PRINTF
select EVENT_TRACING
select TRACE_CLOCK
select IRQ_WORK
config GENERIC_TRACER
bool
......
......@@ -2868,7 +2868,7 @@ static int __init ftrace_mod_cmd_init(void)
{
return register_ftrace_command(&ftrace_mod_cmd);
}
device_initcall(ftrace_mod_cmd_init);
core_initcall(ftrace_mod_cmd_init);
static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs)
......@@ -4055,7 +4055,7 @@ static int __init ftrace_nodyn_init(void)
ftrace_enabled = 1;
return 0;
}
device_initcall(ftrace_nodyn_init);
core_initcall(ftrace_nodyn_init);
static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
static inline void ftrace_startup_enable(int command) { }
......@@ -4381,7 +4381,7 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf,
if (strlen(tmp) == 0)
return 1;
ret = strict_strtol(tmp, 10, &val);
ret = kstrtol(tmp, 10, &val);
if (ret < 0)
return ret;
......
......@@ -460,9 +460,10 @@ struct ring_buffer_per_cpu {
unsigned long lost_events;
unsigned long last_overrun;
local_t entries_bytes;
local_t commit_overrun;
local_t overrun;
local_t entries;
local_t overrun;
local_t commit_overrun;
local_t dropped_events;
local_t committing;
local_t commits;
unsigned long read;
......@@ -1820,7 +1821,7 @@ rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
}
/**
* ring_buffer_update_event - update event type and data
* rb_update_event - update event type and data
* @event: the even to update
* @type: the type of event
* @length: the size of the event field in the ring buffer
......@@ -2155,8 +2156,10 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
* If we are not in overwrite mode,
* this is easy, just stop here.
*/
if (!(buffer->flags & RB_FL_OVERWRITE))
if (!(buffer->flags & RB_FL_OVERWRITE)) {
local_inc(&cpu_buffer->dropped_events);
goto out_reset;
}
ret = rb_handle_head_page(cpu_buffer,
tail_page,
......@@ -2720,8 +2723,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
* and not the length of the event which would hold the header.
*/
int ring_buffer_write(struct ring_buffer *buffer,
unsigned long length,
void *data)
unsigned long length,
void *data)
{
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_event *event;
......@@ -2929,12 +2932,12 @@ rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
* @buffer: The ring buffer
* @cpu: The per CPU buffer to read from.
*/
unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
{
unsigned long flags;
struct ring_buffer_per_cpu *cpu_buffer;
struct buffer_page *bpage;
unsigned long ret;
u64 ret;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return 0;
......@@ -2995,7 +2998,8 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
/**
* ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
* ring_buffer_overrun_cpu - get the number of overruns caused by the ring
* buffer wrapping around (only if RB_FL_OVERWRITE is on).
* @buffer: The ring buffer
* @cpu: The per CPU buffer to get the number of overruns from
*/
......@@ -3015,7 +3019,9 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
/**
* ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits
* ring_buffer_commit_overrun_cpu - get the number of overruns caused by
* commits failing due to the buffer wrapping around while there are uncommitted
* events, such as during an interrupt storm.
* @buffer: The ring buffer
* @cpu: The per CPU buffer to get the number of overruns from
*/
......@@ -3035,6 +3041,28 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
}
EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
/**
* ring_buffer_dropped_events_cpu - get the number of dropped events caused by
* the ring buffer filling up (only if RB_FL_OVERWRITE is off).
* @buffer: The ring buffer
* @cpu: The per CPU buffer to get the number of overruns from
*/
unsigned long
ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long ret;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return 0;
cpu_buffer = buffer->buffers[cpu];
ret = local_read(&cpu_buffer->dropped_events);
return ret;
}
EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
/**
* ring_buffer_entries - get the number of entries in a buffer
* @buffer: The ring buffer
......@@ -3864,9 +3892,10 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
local_set(&cpu_buffer->reader_page->page->commit, 0);
cpu_buffer->reader_page->read = 0;
local_set(&cpu_buffer->commit_overrun, 0);
local_set(&cpu_buffer->entries_bytes, 0);
local_set(&cpu_buffer->overrun, 0);
local_set(&cpu_buffer->commit_overrun, 0);
local_set(&cpu_buffer->dropped_events, 0);
local_set(&cpu_buffer->entries, 0);
local_set(&cpu_buffer->committing, 0);
local_set(&cpu_buffer->commits, 0);
......
This diff is collapsed.
......@@ -285,8 +285,8 @@ struct tracer {
int (*set_flag)(u32 old_flags, u32 bit, int set);
struct tracer *next;
struct tracer_flags *flags;
int print_max;
int use_max_tr;
bool print_max;
bool use_max_tr;
};
......@@ -327,7 +327,6 @@ trace_buffer_iter(struct trace_iterator *iter, int cpu)
int tracer_init(struct tracer *t, struct trace_array *tr);
int tracing_is_enabled(void);
void trace_wake_up(void);
void tracing_reset(struct trace_array *tr, int cpu);
void tracing_reset_online_cpus(struct trace_array *tr);
void tracing_reset_current(int cpu);
......@@ -349,9 +348,6 @@ trace_buffer_lock_reserve(struct ring_buffer *buffer,
unsigned long len,
unsigned long flags,
int pc);
void trace_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc);
struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
struct trace_array_cpu *data);
......@@ -359,6 +355,9 @@ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
int *ent_cpu, u64 *ent_ts);
void __buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event);
int trace_empty(struct trace_iterator *iter);
void *trace_find_next_entry_inc(struct trace_iterator *iter);
......@@ -367,7 +366,6 @@ void trace_init_global_iter(struct trace_iterator *iter);
void tracing_iter_reset(struct trace_iterator *iter, int cpu);
void default_wait_pipe(struct trace_iterator *iter);
void poll_wait_pipe(struct trace_iterator *iter);
void ftrace(struct trace_array *tr,
......@@ -407,7 +405,6 @@ void tracing_sched_switch_assign_trace(struct trace_array *tr);
void tracing_stop_sched_switch_record(void);
void tracing_start_sched_switch_record(void);
int register_tracer(struct tracer *type);
void unregister_tracer(struct tracer *type);
int is_tracing_stopped(void);
enum trace_file_type {
TRACE_FILE_LAT_FMT = 1,
......@@ -841,6 +838,7 @@ extern const char *__start___trace_bprintk_fmt[];
extern const char *__stop___trace_bprintk_fmt[];
void trace_printk_init_buffers(void);
void trace_printk_start_comm(void);
#undef FTRACE_ENTRY
#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
......
......@@ -77,7 +77,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
entry->correct = val == expect;
if (!filter_check_discard(call, entry, buffer, event))
ring_buffer_unlock_commit(buffer, event);
__buffer_unlock_commit(buffer, event);
out:
atomic_dec(&tr->data[cpu]->disabled);
......@@ -199,7 +199,7 @@ __init static int init_branch_tracer(void)
}
return register_tracer(&branch_trace);
}
device_initcall(init_branch_tracer);
core_initcall(init_branch_tracer);
#else
static inline
......
......@@ -491,19 +491,6 @@ static void t_stop(struct seq_file *m, void *p)
mutex_unlock(&event_mutex);
}
static int
ftrace_event_seq_open(struct inode *inode, struct file *file)
{
const struct seq_operations *seq_ops;
if ((file->f_mode & FMODE_WRITE) &&
(file->f_flags & O_TRUNC))
ftrace_clear_events();
seq_ops = inode->i_private;
return seq_open(file, seq_ops);
}
static ssize_t
event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
loff_t *ppos)
......@@ -980,6 +967,9 @@ show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
return r;
}
static int ftrace_event_avail_open(struct inode *inode, struct file *file);
static int ftrace_event_set_open(struct inode *inode, struct file *file);
static const struct seq_operations show_event_seq_ops = {
.start = t_start,
.next = t_next,
......@@ -995,14 +985,14 @@ static const struct seq_operations show_set_event_seq_ops = {
};
static const struct file_operations ftrace_avail_fops = {
.open = ftrace_event_seq_open,
.open = ftrace_event_avail_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static const struct file_operations ftrace_set_event_fops = {
.open = ftrace_event_seq_open,
.open = ftrace_event_set_open,
.read = seq_read,
.write = ftrace_event_write,
.llseek = seq_lseek,
......@@ -1078,6 +1068,26 @@ static struct dentry *event_trace_events_dir(void)
return d_events;
}
static int
ftrace_event_avail_open(struct inode *inode, struct file *file)
{
const struct seq_operations *seq_ops = &show_event_seq_ops;
return seq_open(file, seq_ops);
}
static int
ftrace_event_set_open(struct inode *inode, struct file *file)
{
const struct seq_operations *seq_ops = &show_set_event_seq_ops;
if ((file->f_mode & FMODE_WRITE) &&
(file->f_flags & O_TRUNC))
ftrace_clear_events();
return seq_open(file, seq_ops);
}
static struct dentry *
event_subsystem_dir(const char *name, struct dentry *d_events)
{
......@@ -1489,6 +1499,9 @@ static __init int event_trace_enable(void)
if (ret)
pr_warn("Failed to enable trace event: %s\n", token);
}
trace_printk_start_comm();
return 0;
}
......@@ -1505,15 +1518,13 @@ static __init int event_trace_init(void)
return 0;
entry = debugfs_create_file("available_events", 0444, d_tracer,
(void *)&show_event_seq_ops,
&ftrace_avail_fops);
NULL, &ftrace_avail_fops);
if (!entry)
pr_warning("Could not create debugfs "
"'available_events' entry\n");
entry = debugfs_create_file("set_event", 0644, d_tracer,
(void *)&show_set_event_seq_ops,
&ftrace_set_event_fops);
NULL, &ftrace_set_event_fops);
if (!entry)
pr_warning("Could not create debugfs "
"'set_event' entry\n");
......@@ -1749,7 +1760,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip,
entry->ip = ip;
entry->parent_ip = parent_ip;
trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
trace_buffer_unlock_commit(buffer, event, flags, pc);
out:
atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
......
......@@ -1000,9 +1000,9 @@ static int init_pred(struct filter_parse_state *ps,
}
} else {
if (field->is_signed)
ret = strict_strtoll(pred->regex.pattern, 0, &val);
ret = kstrtoll(pred->regex.pattern, 0, &val);
else
ret = strict_strtoull(pred->regex.pattern, 0, &val);
ret = kstrtoull(pred->regex.pattern, 0, &val);
if (ret) {
parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0);
return -EINVAL;
......
......@@ -366,7 +366,7 @@ ftrace_trace_onoff_callback(struct ftrace_hash *hash,
* We use the callback data field (which is a pointer)
* as our counter.
*/
ret = strict_strtoul(number, 0, (unsigned long *)&count);
ret = kstrtoul(number, 0, (unsigned long *)&count);
if (ret)
return ret;
......@@ -411,5 +411,4 @@ static __init int init_function_trace(void)
init_func_cmd_traceon();
return register_tracer(&function_trace);
}
device_initcall(init_function_trace);
core_initcall(init_function_trace);
......@@ -223,7 +223,7 @@ int __trace_graph_entry(struct trace_array *tr,
entry = ring_buffer_event_data(event);
entry->graph_ent = *trace;
if (!filter_current_check_discard(buffer, call, entry, event))
ring_buffer_unlock_commit(buffer, event);
__buffer_unlock_commit(buffer, event);
return 1;
}
......@@ -327,7 +327,7 @@ void __trace_graph_return(struct trace_array *tr,
entry = ring_buffer_event_data(event);
entry->ret = *trace;
if (!filter_current_check_discard(buffer, call, entry, event))
ring_buffer_unlock_commit(buffer, event);
__buffer_unlock_commit(buffer, event);
}
void trace_graph_return(struct ftrace_graph_ret *trace)
......@@ -1474,4 +1474,4 @@ static __init int init_graph_trace(void)
return register_tracer(&graph_trace);
}
device_initcall(init_graph_trace);
core_initcall(init_graph_trace);
......@@ -604,7 +604,7 @@ static struct tracer irqsoff_tracer __read_mostly =
.reset = irqsoff_tracer_reset,
.start = irqsoff_tracer_start,
.stop = irqsoff_tracer_stop,
.print_max = 1,
.print_max = true,
.print_header = irqsoff_print_header,
.print_line = irqsoff_print_line,
.flags = &tracer_flags,
......@@ -614,7 +614,7 @@ static struct tracer irqsoff_tracer __read_mostly =
#endif
.open = irqsoff_trace_open,
.close = irqsoff_trace_close,
.use_max_tr = 1,
.use_max_tr = true,
};
# define register_irqsoff(trace) register_tracer(&trace)
#else
......@@ -637,7 +637,7 @@ static struct tracer preemptoff_tracer __read_mostly =
.reset = irqsoff_tracer_reset,
.start = irqsoff_tracer_start,
.stop = irqsoff_tracer_stop,
.print_max = 1,
.print_max = true,
.print_header = irqsoff_print_header,
.print_line = irqsoff_print_line,
.flags = &tracer_flags,
......@@ -647,7 +647,7 @@ static struct tracer preemptoff_tracer __read_mostly =
#endif
.open = irqsoff_trace_open,
.close = irqsoff_trace_close,
.use_max_tr = 1,
.use_max_tr = true,
};
# define register_preemptoff(trace) register_tracer(&trace)
#else
......@@ -672,7 +672,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
.reset = irqsoff_tracer_reset,
.start = irqsoff_tracer_start,
.stop = irqsoff_tracer_stop,
.print_max = 1,
.print_max = true,
.print_header = irqsoff_print_header,
.print_line = irqsoff_print_line,
.flags = &tracer_flags,
......@@ -682,7 +682,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
#endif
.open = irqsoff_trace_open,
.close = irqsoff_trace_close,
.use_max_tr = 1,
.use_max_tr = true,
};
# define register_preemptirqsoff(trace) register_tracer(&trace)
......@@ -698,4 +698,4 @@ __init static int init_irqsoff_tracer(void)
return 0;
}
device_initcall(init_irqsoff_tracer);
core_initcall(init_irqsoff_tracer);
......@@ -444,7 +444,7 @@ static int create_trace_probe(int argc, char **argv)
return -EINVAL;
}
/* an address specified */
ret = strict_strtoul(&argv[1][0], 0, (unsigned long *)&addr);
ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr);
if (ret) {
pr_info("Failed to parse address.\n");
return ret;
......@@ -751,8 +751,8 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
if (!filter_current_check_discard(buffer, call, entry, event))
trace_nowake_buffer_unlock_commit_regs(buffer, event,
irq_flags, pc, regs);
trace_buffer_unlock_commit_regs(buffer, event,
irq_flags, pc, regs);
}
/* Kretprobe handler */
......@@ -784,8 +784,8 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
if (!filter_current_check_discard(buffer, call, entry, event))
trace_nowake_buffer_unlock_commit_regs(buffer, event,
irq_flags, pc, regs);
trace_buffer_unlock_commit_regs(buffer, event,
irq_flags, pc, regs);
}
/* Event entry printers */
......
......@@ -441,7 +441,7 @@ static const struct fetch_type *find_fetch_type(const char *type)
goto fail;
type++;
if (strict_strtoul(type, 0, &bs))
if (kstrtoul(type, 0, &bs))
goto fail;
switch (bs) {
......@@ -501,8 +501,8 @@ int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset)
tmp = strchr(symbol, '+');
if (tmp) {
/* skip sign because strict_strtol doesn't accept '+' */
ret = strict_strtoul(tmp + 1, 0, offset);
/* skip sign because kstrtoul doesn't accept '+' */
ret = kstrtoul(tmp + 1, 0, offset);
if (ret)
return ret;
......@@ -533,7 +533,7 @@ static int parse_probe_vars(char *arg, const struct fetch_type *t,
else
ret = -EINVAL;
} else if (isdigit(arg[5])) {
ret = strict_strtoul(arg + 5, 10, &param);
ret = kstrtoul(arg + 5, 10, &param);
if (ret || param > PARAM_MAX_STACK)
ret = -EINVAL;
else {
......@@ -579,7 +579,7 @@ static int parse_probe_arg(char *arg, const struct fetch_type *t,
case '@': /* memory or symbol */
if (isdigit(arg[1])) {
ret = strict_strtoul(arg + 1, 0, &param);
ret = kstrtoul(arg + 1, 0, &param);
if (ret)
break;
......@@ -597,14 +597,14 @@ static int parse_probe_arg(char *arg, const struct fetch_type *t,
break;
case '+': /* deref memory */
arg++; /* Skip '+', because strict_strtol() rejects it. */
arg++; /* Skip '+', because kstrtol() rejects it. */
case '-':
tmp = strchr(arg, '(');
if (!tmp)
break;
*tmp = '\0';
ret = strict_strtol(arg, 0, &offset);
ret = kstrtol(arg, 0, &offset);
if (ret)
break;
......
......@@ -102,9 +102,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
entry->next_cpu = task_cpu(wakee);
if (!filter_check_discard(call, entry, buffer, event))
ring_buffer_unlock_commit(buffer, event);
ftrace_trace_stack(tr->buffer, flags, 6, pc);
ftrace_trace_userstack(tr->buffer, flags, pc);
trace_buffer_unlock_commit(buffer, event, flags, pc);
}
static void
......
......@@ -589,7 +589,7 @@ static struct tracer wakeup_tracer __read_mostly =
.reset = wakeup_tracer_reset,
.start = wakeup_tracer_start,
.stop = wakeup_tracer_stop,
.print_max = 1,
.print_max = true,
.print_header = wakeup_print_header,
.print_line = wakeup_print_line,
.flags = &tracer_flags,
......@@ -599,7 +599,7 @@ static struct tracer wakeup_tracer __read_mostly =
#endif
.open = wakeup_trace_open,
.close = wakeup_trace_close,
.use_max_tr = 1,
.use_max_tr = true,
};
static struct tracer wakeup_rt_tracer __read_mostly =
......@@ -610,7 +610,7 @@ static struct tracer wakeup_rt_tracer __read_mostly =
.start = wakeup_tracer_start,
.stop = wakeup_tracer_stop,
.wait_pipe = poll_wait_pipe,
.print_max = 1,
.print_max = true,
.print_header = wakeup_print_header,
.print_line = wakeup_print_line,
.flags = &tracer_flags,
......@@ -620,7 +620,7 @@ static struct tracer wakeup_rt_tracer __read_mostly =
#endif
.open = wakeup_trace_open,
.close = wakeup_trace_close,
.use_max_tr = 1,
.use_max_tr = true,
};
__init static int init_wakeup_tracer(void)
......@@ -637,4 +637,4 @@ __init static int init_wakeup_tracer(void)
return 0;
}
device_initcall(init_wakeup_tracer);
core_initcall(init_wakeup_tracer);
......@@ -320,7 +320,6 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
int (*func)(void))
{
int save_ftrace_enabled = ftrace_enabled;
int save_tracer_enabled = tracer_enabled;
unsigned long count;
char *func_name;
int ret;
......@@ -331,7 +330,6 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
/* enable tracing, and record the filter function */
ftrace_enabled = 1;
tracer_enabled = 1;
/* passed in by parameter to fool gcc from optimizing */
func();
......@@ -395,7 +393,6 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
out:
ftrace_enabled = save_ftrace_enabled;
tracer_enabled = save_tracer_enabled;
/* Enable tracing on all functions again */
ftrace_set_global_filter(NULL, 0, 1);
......@@ -452,7 +449,6 @@ static int
trace_selftest_function_recursion(void)
{
int save_ftrace_enabled = ftrace_enabled;
int save_tracer_enabled = tracer_enabled;
char *func_name;
int len;
int ret;
......@@ -465,7 +461,6 @@ trace_selftest_function_recursion(void)
/* enable tracing, and record the filter function */
ftrace_enabled = 1;
tracer_enabled = 1;
/* Handle PPC64 '.' name */
func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
......@@ -534,7 +529,6 @@ trace_selftest_function_recursion(void)
ret = 0;
out:
ftrace_enabled = save_ftrace_enabled;
tracer_enabled = save_tracer_enabled;
return ret;
}
......@@ -569,7 +563,6 @@ static int
trace_selftest_function_regs(void)
{
int save_ftrace_enabled = ftrace_enabled;
int save_tracer_enabled = tracer_enabled;
char *func_name;
int len;
int ret;
......@@ -586,7 +579,6 @@ trace_selftest_function_regs(void)
/* enable tracing, and record the filter function */
ftrace_enabled = 1;
tracer_enabled = 1;
/* Handle PPC64 '.' name */
func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
......@@ -648,7 +640,6 @@ trace_selftest_function_regs(void)
ret = 0;
out:
ftrace_enabled = save_ftrace_enabled;
tracer_enabled = save_tracer_enabled;
return ret;
}
......@@ -662,7 +653,6 @@ int
trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
{
int save_ftrace_enabled = ftrace_enabled;
int save_tracer_enabled = tracer_enabled;
unsigned long count;
int ret;
......@@ -671,7 +661,6 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
/* start the tracing */
ftrace_enabled = 1;
tracer_enabled = 1;
ret = tracer_init(trace, tr);
if (ret) {
......@@ -708,7 +697,6 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
ret = trace_selftest_function_regs();
out:
ftrace_enabled = save_ftrace_enabled;
tracer_enabled = save_tracer_enabled;
/* kill ftrace totally if we failed */
if (ret)
......@@ -1106,6 +1094,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
tracing_stop();
/* check both trace buffers */
ret = trace_test_buffer(tr, NULL);
printk("ret = %d\n", ret);
if (!ret)
ret = trace_test_buffer(&max_tr, &count);
......
......@@ -21,9 +21,6 @@ static int syscall_enter_register(struct ftrace_event_call *event,
static int syscall_exit_register(struct ftrace_event_call *event,
enum trace_reg type, void *data);
static int syscall_enter_define_fields(struct ftrace_event_call *call);
static int syscall_exit_define_fields(struct ftrace_event_call *call);
static struct list_head *
syscall_get_enter_fields(struct ftrace_event_call *call)
{
......@@ -32,30 +29,6 @@ syscall_get_enter_fields(struct ftrace_event_call *call)
return &entry->enter_fields;
}
struct trace_event_functions enter_syscall_print_funcs = {
.trace = print_syscall_enter,
};
struct trace_event_functions exit_syscall_print_funcs = {
.trace = print_syscall_exit,
};
struct ftrace_event_class event_class_syscall_enter = {
.system = "syscalls",
.reg = syscall_enter_register,
.define_fields = syscall_enter_define_fields,
.get_fields = syscall_get_enter_fields,
.raw_init = init_syscall_trace,
};
struct ftrace_event_class event_class_syscall_exit = {
.system = "syscalls",
.reg = syscall_exit_register,
.define_fields = syscall_exit_define_fields,
.fields = LIST_HEAD_INIT(event_class_syscall_exit.fields),
.raw_init = init_syscall_trace,
};
extern struct syscall_metadata *__start_syscalls_metadata[];
extern struct syscall_metadata *__stop_syscalls_metadata[];
......@@ -432,7 +405,7 @@ void unreg_event_syscall_exit(struct ftrace_event_call *call)
mutex_unlock(&syscall_trace_lock);
}
int init_syscall_trace(struct ftrace_event_call *call)
static int init_syscall_trace(struct ftrace_event_call *call)
{
int id;
int num;
......@@ -457,6 +430,30 @@ int init_syscall_trace(struct ftrace_event_call *call)
return id;
}
struct trace_event_functions enter_syscall_print_funcs = {
.trace = print_syscall_enter,
};
struct trace_event_functions exit_syscall_print_funcs = {
.trace = print_syscall_exit,
};
struct ftrace_event_class event_class_syscall_enter = {
.system = "syscalls",
.reg = syscall_enter_register,
.define_fields = syscall_enter_define_fields,
.get_fields = syscall_get_enter_fields,
.raw_init = init_syscall_trace,
};
struct ftrace_event_class event_class_syscall_exit = {
.system = "syscalls",
.reg = syscall_exit_register,
.define_fields = syscall_exit_define_fields,
.fields = LIST_HEAD_INIT(event_class_syscall_exit.fields),
.raw_init = init_syscall_trace,
};
unsigned long __init __weak arch_syscall_addr(int nr)
{
return (unsigned long)sys_call_table[nr];
......@@ -537,7 +534,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
}
int perf_sysenter_enable(struct ftrace_event_call *call)
static int perf_sysenter_enable(struct ftrace_event_call *call)
{
int ret = 0;
int num;
......@@ -558,7 +555,7 @@ int perf_sysenter_enable(struct ftrace_event_call *call)
return ret;
}
void perf_sysenter_disable(struct ftrace_event_call *call)
static void perf_sysenter_disable(struct ftrace_event_call *call)
{
int num;
......@@ -615,7 +612,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
}
int perf_sysexit_enable(struct ftrace_event_call *call)
static int perf_sysexit_enable(struct ftrace_event_call *call)
{
int ret = 0;
int num;
......@@ -636,7 +633,7 @@ int perf_sysexit_enable(struct ftrace_event_call *call)
return ret;
}
void perf_sysexit_disable(struct ftrace_event_call *call)
static void perf_sysexit_disable(struct ftrace_event_call *call)
{
int num;
......
......@@ -252,7 +252,7 @@ static int create_trace_uprobe(int argc, char **argv)
if (ret)
goto fail_address_parse;
ret = strict_strtoul(arg, 0, &offset);
ret = kstrtoul(arg, 0, &offset);
if (ret)
goto fail_address_parse;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment