Commit f17f36bb authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'tracing-fixes-for-linus' of...

Merge branch 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  tracing: user local buffer variable for trace branch tracer
  tracing: fix warning on kernel/trace/trace_branch.c andtrace_hw_branches.c
  ftrace: check for failure for all conversions
  tracing: correct module boundaries for ftrace_release
  tracing: fix transposed numbers of lock_depth and preempt_count
  trace: Fix missing assignment in trace_ctxwake_*
  tracing: Use free_percpu instead of kfree
  tracing: Check total refcount before releasing bufs in profile_enable failure
parents b924f959 8f6e8a31
...@@ -241,7 +241,7 @@ extern void ftrace_enable_daemon(void); ...@@ -241,7 +241,7 @@ extern void ftrace_enable_daemon(void);
# define ftrace_set_filter(buf, len, reset) do { } while (0) # define ftrace_set_filter(buf, len, reset) do { } while (0)
# define ftrace_disable_daemon() do { } while (0) # define ftrace_disable_daemon() do { } while (0)
# define ftrace_enable_daemon() do { } while (0) # define ftrace_enable_daemon() do { } while (0)
static inline void ftrace_release(void *start, unsigned long size) { } static inline void ftrace_release_mod(struct module *mod) {}
static inline int register_ftrace_command(struct ftrace_func_command *cmd) static inline int register_ftrace_command(struct ftrace_func_command *cmd)
{ {
return -EINVAL; return -EINVAL;
......
...@@ -1078,14 +1078,9 @@ static void ftrace_replace_code(int enable) ...@@ -1078,14 +1078,9 @@ static void ftrace_replace_code(int enable)
failed = __ftrace_replace_code(rec, enable); failed = __ftrace_replace_code(rec, enable);
if (failed) { if (failed) {
rec->flags |= FTRACE_FL_FAILED; rec->flags |= FTRACE_FL_FAILED;
if ((system_state == SYSTEM_BOOTING) || ftrace_bug(failed, rec->ip);
!core_kernel_text(rec->ip)) { /* Stop processing */
ftrace_free_rec(rec); return;
} else {
ftrace_bug(failed, rec->ip);
/* Stop processing */
return;
}
} }
} while_for_each_ftrace_rec(); } while_for_each_ftrace_rec();
} }
...@@ -2662,19 +2657,17 @@ static int ftrace_convert_nops(struct module *mod, ...@@ -2662,19 +2657,17 @@ static int ftrace_convert_nops(struct module *mod,
} }
#ifdef CONFIG_MODULES #ifdef CONFIG_MODULES
void ftrace_release(void *start, void *end) void ftrace_release_mod(struct module *mod)
{ {
struct dyn_ftrace *rec; struct dyn_ftrace *rec;
struct ftrace_page *pg; struct ftrace_page *pg;
unsigned long s = (unsigned long)start;
unsigned long e = (unsigned long)end;
if (ftrace_disabled || !start || start == end) if (ftrace_disabled)
return; return;
mutex_lock(&ftrace_lock); mutex_lock(&ftrace_lock);
do_for_each_ftrace_rec(pg, rec) { do_for_each_ftrace_rec(pg, rec) {
if ((rec->ip >= s) && (rec->ip < e)) { if (within_module_core(rec->ip, mod)) {
/* /*
* rec->ip is changed in ftrace_free_rec() * rec->ip is changed in ftrace_free_rec()
* It should not between s and e if record was freed. * It should not between s and e if record was freed.
...@@ -2706,9 +2699,7 @@ static int ftrace_module_notify(struct notifier_block *self, ...@@ -2706,9 +2699,7 @@ static int ftrace_module_notify(struct notifier_block *self,
mod->num_ftrace_callsites); mod->num_ftrace_callsites);
break; break;
case MODULE_STATE_GOING: case MODULE_STATE_GOING:
ftrace_release(mod->ftrace_callsites, ftrace_release_mod(mod);
mod->ftrace_callsites +
mod->num_ftrace_callsites);
break; break;
} }
......
...@@ -34,6 +34,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) ...@@ -34,6 +34,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
struct trace_array *tr = branch_tracer; struct trace_array *tr = branch_tracer;
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct trace_branch *entry; struct trace_branch *entry;
struct ring_buffer *buffer;
unsigned long flags; unsigned long flags;
int cpu, pc; int cpu, pc;
const char *p; const char *p;
...@@ -54,7 +55,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) ...@@ -54,7 +55,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
goto out; goto out;
pc = preempt_count(); pc = preempt_count();
event = trace_buffer_lock_reserve(tr, TRACE_BRANCH, buffer = tr->buffer;
event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH,
sizeof(*entry), flags, pc); sizeof(*entry), flags, pc);
if (!event) if (!event)
goto out; goto out;
...@@ -74,8 +76,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) ...@@ -74,8 +76,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
entry->line = f->line; entry->line = f->line;
entry->correct = val == expect; entry->correct = val == expect;
if (!filter_check_discard(call, entry, tr->buffer, event)) if (!filter_check_discard(call, entry, buffer, event))
ring_buffer_unlock_commit(tr->buffer, event); ring_buffer_unlock_commit(buffer, event);
out: out:
atomic_dec(&tr->data[cpu]->disabled); atomic_dec(&tr->data[cpu]->disabled);
......
...@@ -31,7 +31,7 @@ static int ftrace_profile_enable_event(struct ftrace_event_call *event) ...@@ -31,7 +31,7 @@ static int ftrace_profile_enable_event(struct ftrace_event_call *event)
if (atomic_inc_return(&event->profile_count)) if (atomic_inc_return(&event->profile_count))
return 0; return 0;
if (!total_profile_count++) { if (!total_profile_count) {
buf = (char *)alloc_percpu(profile_buf_t); buf = (char *)alloc_percpu(profile_buf_t);
if (!buf) if (!buf)
goto fail_buf; goto fail_buf;
...@@ -46,14 +46,19 @@ static int ftrace_profile_enable_event(struct ftrace_event_call *event) ...@@ -46,14 +46,19 @@ static int ftrace_profile_enable_event(struct ftrace_event_call *event)
} }
ret = event->profile_enable(); ret = event->profile_enable();
if (!ret) if (!ret) {
total_profile_count++;
return 0; return 0;
}
kfree(trace_profile_buf_nmi);
fail_buf_nmi: fail_buf_nmi:
kfree(trace_profile_buf); if (!total_profile_count) {
free_percpu(trace_profile_buf_nmi);
free_percpu(trace_profile_buf);
trace_profile_buf_nmi = NULL;
trace_profile_buf = NULL;
}
fail_buf: fail_buf:
total_profile_count--;
atomic_dec(&event->profile_count); atomic_dec(&event->profile_count);
return ret; return ret;
......
...@@ -165,6 +165,7 @@ void trace_hw_branch(u64 from, u64 to) ...@@ -165,6 +165,7 @@ void trace_hw_branch(u64 from, u64 to)
struct ftrace_event_call *call = &event_hw_branch; struct ftrace_event_call *call = &event_hw_branch;
struct trace_array *tr = hw_branch_trace; struct trace_array *tr = hw_branch_trace;
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct ring_buffer *buf;
struct hw_branch_entry *entry; struct hw_branch_entry *entry;
unsigned long irq1; unsigned long irq1;
int cpu; int cpu;
...@@ -180,7 +181,8 @@ void trace_hw_branch(u64 from, u64 to) ...@@ -180,7 +181,8 @@ void trace_hw_branch(u64 from, u64 to)
if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
goto out; goto out;
event = trace_buffer_lock_reserve(tr, TRACE_HW_BRANCHES, buf = tr->buffer;
event = trace_buffer_lock_reserve(buf, TRACE_HW_BRANCHES,
sizeof(*entry), 0, 0); sizeof(*entry), 0, 0);
if (!event) if (!event)
goto out; goto out;
...@@ -189,8 +191,8 @@ void trace_hw_branch(u64 from, u64 to) ...@@ -189,8 +191,8 @@ void trace_hw_branch(u64 from, u64 to)
entry->ent.type = TRACE_HW_BRANCHES; entry->ent.type = TRACE_HW_BRANCHES;
entry->from = from; entry->from = from;
entry->to = to; entry->to = to;
if (!filter_check_discard(call, entry, tr->buffer, event)) if (!filter_check_discard(call, entry, buf, event))
trace_buffer_unlock_commit(tr, event, 0, 0); trace_buffer_unlock_commit(buf, event, 0, 0);
out: out:
atomic_dec(&tr->data[cpu]->disabled); atomic_dec(&tr->data[cpu]->disabled);
......
...@@ -486,16 +486,18 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) ...@@ -486,16 +486,18 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
hardirq ? 'h' : softirq ? 's' : '.')) hardirq ? 'h' : softirq ? 's' : '.'))
return 0; return 0;
if (entry->lock_depth < 0) if (entry->preempt_count)
ret = trace_seq_putc(s, '.'); ret = trace_seq_printf(s, "%x", entry->preempt_count);
else else
ret = trace_seq_printf(s, "%d", entry->lock_depth); ret = trace_seq_putc(s, '.');
if (!ret) if (!ret)
return 0; return 0;
if (entry->preempt_count) if (entry->lock_depth < 0)
return trace_seq_printf(s, "%x", entry->preempt_count); return trace_seq_putc(s, '.');
return trace_seq_putc(s, '.');
return trace_seq_printf(s, "%d", entry->lock_depth);
} }
static int static int
...@@ -883,7 +885,7 @@ static int trace_ctxwake_raw(struct trace_iterator *iter, char S) ...@@ -883,7 +885,7 @@ static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
trace_assign_type(field, iter->ent); trace_assign_type(field, iter->ent);
if (!S) if (!S)
task_state_char(field->prev_state); S = task_state_char(field->prev_state);
T = task_state_char(field->next_state); T = task_state_char(field->next_state);
if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n", if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
field->prev_pid, field->prev_pid,
...@@ -918,7 +920,7 @@ static int trace_ctxwake_hex(struct trace_iterator *iter, char S) ...@@ -918,7 +920,7 @@ static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
trace_assign_type(field, iter->ent); trace_assign_type(field, iter->ent);
if (!S) if (!S)
task_state_char(field->prev_state); S = task_state_char(field->prev_state);
T = task_state_char(field->next_state); T = task_state_char(field->next_state);
SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid); SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment