Commit f150dba6 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'perf-fixes-for-linus-2' of...

Merge branch 'perf-fixes-for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'perf-fixes-for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  perf: Fix crash in swevents
  perf buildid-list: Fix --with-hits event processing
  perf scripts python: Give field dict to unhandled callback
  perf hist: fix objdump output parsing
  perf-record: Check correct pid when forking
  perf: Do the comm inheritance per thread in event__process_task
  perf: Use event__process_task from perf sched
  perf: Process comm events by tid
  blktrace: Fix new kernel-doc warnings
  perf_events: Fix unincremented buffer base on partial copy
  perf_events: Fix event scheduling issues introduced by transactional API
  perf_events, trace: Fix perf_trace_destroy(), mutex went missing
  perf_events, trace: Fix probe unregister race
  perf_events: Fix races in group composition
  perf_events: Fix races and clean up perf_event and perf_mmap_data interaction
parents 636667a5 c6df8d5a
...@@ -106,6 +106,7 @@ struct cpu_hw_events { ...@@ -106,6 +106,7 @@ struct cpu_hw_events {
int n_events; int n_events;
int n_added; int n_added;
int n_txn;
int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */ int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
u64 tags[X86_PMC_IDX_MAX]; u64 tags[X86_PMC_IDX_MAX];
struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
...@@ -983,6 +984,7 @@ static int x86_pmu_enable(struct perf_event *event) ...@@ -983,6 +984,7 @@ static int x86_pmu_enable(struct perf_event *event)
out: out:
cpuc->n_events = n; cpuc->n_events = n;
cpuc->n_added += n - n0; cpuc->n_added += n - n0;
cpuc->n_txn += n - n0;
return 0; return 0;
} }
...@@ -1089,6 +1091,14 @@ static void x86_pmu_disable(struct perf_event *event) ...@@ -1089,6 +1091,14 @@ static void x86_pmu_disable(struct perf_event *event)
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int i; int i;
/*
* If we're called during a txn, we don't need to do anything.
* The events never got scheduled and ->cancel_txn will truncate
* the event_list.
*/
if (cpuc->group_flag & PERF_EVENT_TXN_STARTED)
return;
x86_pmu_stop(event); x86_pmu_stop(event);
for (i = 0; i < cpuc->n_events; i++) { for (i = 0; i < cpuc->n_events; i++) {
...@@ -1379,6 +1389,7 @@ static void x86_pmu_start_txn(const struct pmu *pmu) ...@@ -1379,6 +1389,7 @@ static void x86_pmu_start_txn(const struct pmu *pmu)
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
cpuc->group_flag |= PERF_EVENT_TXN_STARTED; cpuc->group_flag |= PERF_EVENT_TXN_STARTED;
cpuc->n_txn = 0;
} }
/* /*
...@@ -1391,6 +1402,11 @@ static void x86_pmu_cancel_txn(const struct pmu *pmu) ...@@ -1391,6 +1402,11 @@ static void x86_pmu_cancel_txn(const struct pmu *pmu)
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
cpuc->group_flag &= ~PERF_EVENT_TXN_STARTED; cpuc->group_flag &= ~PERF_EVENT_TXN_STARTED;
/*
* Truncate the collected events.
*/
cpuc->n_added -= cpuc->n_txn;
cpuc->n_events -= cpuc->n_txn;
} }
/* /*
...@@ -1419,6 +1435,12 @@ static int x86_pmu_commit_txn(const struct pmu *pmu) ...@@ -1419,6 +1435,12 @@ static int x86_pmu_commit_txn(const struct pmu *pmu)
*/ */
memcpy(cpuc->assign, assign, n*sizeof(int)); memcpy(cpuc->assign, assign, n*sizeof(int));
/*
* Clear out the txn count so that ->cancel_txn() which gets
* run after ->commit_txn() doesn't undo things.
*/
cpuc->n_txn = 0;
return 0; return 0;
} }
......
...@@ -585,6 +585,7 @@ enum perf_event_active_state { ...@@ -585,6 +585,7 @@ enum perf_event_active_state {
struct file; struct file;
struct perf_mmap_data { struct perf_mmap_data {
atomic_t refcount;
struct rcu_head rcu_head; struct rcu_head rcu_head;
#ifdef CONFIG_PERF_USE_VMALLOC #ifdef CONFIG_PERF_USE_VMALLOC
struct work_struct work; struct work_struct work;
...@@ -592,7 +593,6 @@ struct perf_mmap_data { ...@@ -592,7 +593,6 @@ struct perf_mmap_data {
#endif #endif
int nr_pages; /* nr of data pages */ int nr_pages; /* nr of data pages */
int writable; /* are we writable */ int writable; /* are we writable */
int nr_locked; /* nr pages mlocked */
atomic_t poll; /* POLL_ for wakeups */ atomic_t poll; /* POLL_ for wakeups */
...@@ -631,6 +631,9 @@ struct swevent_hlist { ...@@ -631,6 +631,9 @@ struct swevent_hlist {
struct rcu_head rcu_head; struct rcu_head rcu_head;
}; };
#define PERF_ATTACH_CONTEXT 0x01
#define PERF_ATTACH_GROUP 0x02
/** /**
* struct perf_event - performance event kernel representation: * struct perf_event - performance event kernel representation:
*/ */
...@@ -643,10 +646,10 @@ struct perf_event { ...@@ -643,10 +646,10 @@ struct perf_event {
int nr_siblings; int nr_siblings;
int group_flags; int group_flags;
struct perf_event *group_leader; struct perf_event *group_leader;
struct perf_event *output;
const struct pmu *pmu; const struct pmu *pmu;
enum perf_event_active_state state; enum perf_event_active_state state;
unsigned int attach_state;
atomic64_t count; atomic64_t count;
/* /*
...@@ -704,6 +707,8 @@ struct perf_event { ...@@ -704,6 +707,8 @@ struct perf_event {
/* mmap bits */ /* mmap bits */
struct mutex mmap_mutex; struct mutex mmap_mutex;
atomic_t mmap_count; atomic_t mmap_count;
int mmap_locked;
struct user_struct *mmap_user;
struct perf_mmap_data *data; struct perf_mmap_data *data;
/* poll related */ /* poll related */
......
...@@ -725,7 +725,7 @@ perf_trace_##call(void *__data, proto) \ ...@@ -725,7 +725,7 @@ perf_trace_##call(void *__data, proto) \
\ \
{ assign; } \ { assign; } \
\ \
head = per_cpu_ptr(event_call->perf_events, smp_processor_id());\ head = this_cpu_ptr(event_call->perf_events); \
perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
__count, &__regs, head); \ __count, &__regs, head); \
} }
......
This diff is collapsed.
...@@ -842,6 +842,7 @@ static void blk_add_trace_split(void *ignore, ...@@ -842,6 +842,7 @@ static void blk_add_trace_split(void *ignore,
/** /**
* blk_add_trace_remap - Add a trace for a remap operation * blk_add_trace_remap - Add a trace for a remap operation
* @ignore: trace callback data parameter (not used)
* @q: queue the io is for * @q: queue the io is for
* @bio: the source bio * @bio: the source bio
* @dev: target device * @dev: target device
...@@ -873,6 +874,7 @@ static void blk_add_trace_remap(void *ignore, ...@@ -873,6 +874,7 @@ static void blk_add_trace_remap(void *ignore,
/** /**
* blk_add_trace_rq_remap - Add a trace for a request-remap operation * blk_add_trace_rq_remap - Add a trace for a request-remap operation
* @ignore: trace callback data parameter (not used)
* @q: queue the io is for * @q: queue the io is for
* @rq: the source request * @rq: the source request
* @dev: target device * @dev: target device
......
...@@ -116,7 +116,7 @@ int perf_trace_enable(struct perf_event *p_event) ...@@ -116,7 +116,7 @@ int perf_trace_enable(struct perf_event *p_event)
if (WARN_ON_ONCE(!list)) if (WARN_ON_ONCE(!list))
return -EINVAL; return -EINVAL;
list = per_cpu_ptr(list, smp_processor_id()); list = this_cpu_ptr(list);
hlist_add_head_rcu(&p_event->hlist_entry, list); hlist_add_head_rcu(&p_event->hlist_entry, list);
return 0; return 0;
...@@ -132,8 +132,9 @@ void perf_trace_destroy(struct perf_event *p_event) ...@@ -132,8 +132,9 @@ void perf_trace_destroy(struct perf_event *p_event)
struct ftrace_event_call *tp_event = p_event->tp_event; struct ftrace_event_call *tp_event = p_event->tp_event;
int i; int i;
mutex_lock(&event_mutex);
if (--tp_event->perf_refcount > 0) if (--tp_event->perf_refcount > 0)
return; goto out;
if (tp_event->class->reg) if (tp_event->class->reg)
tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER); tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER);
...@@ -142,6 +143,12 @@ void perf_trace_destroy(struct perf_event *p_event) ...@@ -142,6 +143,12 @@ void perf_trace_destroy(struct perf_event *p_event)
tp_event->class->perf_probe, tp_event->class->perf_probe,
tp_event); tp_event);
/*
* Ensure our callback won't be called anymore. See
* tracepoint_probe_unregister() and __DO_TRACE().
*/
synchronize_sched();
free_percpu(tp_event->perf_events); free_percpu(tp_event->perf_events);
tp_event->perf_events = NULL; tp_event->perf_events = NULL;
...@@ -151,6 +158,8 @@ void perf_trace_destroy(struct perf_event *p_event) ...@@ -151,6 +158,8 @@ void perf_trace_destroy(struct perf_event *p_event)
perf_trace_buf[i] = NULL; perf_trace_buf[i] = NULL;
} }
} }
out:
mutex_unlock(&event_mutex);
} }
__kprobes void *perf_trace_buf_prepare(int size, unsigned short type, __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
...@@ -169,7 +178,7 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type, ...@@ -169,7 +178,7 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
if (*rctxp < 0) if (*rctxp < 0)
return NULL; return NULL;
raw_data = per_cpu_ptr(perf_trace_buf[*rctxp], smp_processor_id()); raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]);
/* zero the dead bytes from align to not leak stack to user */ /* zero the dead bytes from align to not leak stack to user */
memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64)); memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
......
...@@ -1359,7 +1359,7 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp, ...@@ -1359,7 +1359,7 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp,
for (i = 0; i < tp->nr_args; i++) for (i = 0; i < tp->nr_args; i++)
call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset); call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset);
head = per_cpu_ptr(call->perf_events, smp_processor_id()); head = this_cpu_ptr(call->perf_events);
perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head); perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head);
} }
...@@ -1392,7 +1392,7 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri, ...@@ -1392,7 +1392,7 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
for (i = 0; i < tp->nr_args; i++) for (i = 0; i < tp->nr_args; i++)
call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset); call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset);
head = per_cpu_ptr(call->perf_events, smp_processor_id()); head = this_cpu_ptr(call->perf_events);
perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, regs, head); perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, regs, head);
} }
......
...@@ -519,7 +519,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) ...@@ -519,7 +519,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
syscall_get_arguments(current, regs, 0, sys_data->nb_args, syscall_get_arguments(current, regs, 0, sys_data->nb_args,
(unsigned long *)&rec->args); (unsigned long *)&rec->args);
head = per_cpu_ptr(sys_data->enter_event->perf_events, smp_processor_id()); head = this_cpu_ptr(sys_data->enter_event->perf_events);
perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head); perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head);
} }
...@@ -595,7 +595,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) ...@@ -595,7 +595,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
rec->nr = syscall_nr; rec->nr = syscall_nr;
rec->ret = syscall_get_return_value(current, regs); rec->ret = syscall_get_return_value(current, regs);
head = per_cpu_ptr(sys_data->exit_event->perf_events, smp_processor_id()); head = this_cpu_ptr(sys_data->exit_event->perf_events);
perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head); perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head);
} }
......
...@@ -43,8 +43,10 @@ static int __cmd_buildid_list(void) ...@@ -43,8 +43,10 @@ static int __cmd_buildid_list(void)
if (session == NULL) if (session == NULL)
return -1; return -1;
if (with_hits) if (with_hits) {
symbol_conf.full_paths = true;
perf_session__process_events(session, &build_id__mark_dso_hit_ops); perf_session__process_events(session, &build_id__mark_dso_hit_ops);
}
perf_session__fprintf_dsos_buildid(session, stdout, with_hits); perf_session__fprintf_dsos_buildid(session, stdout, with_hits);
......
...@@ -503,7 +503,6 @@ static int __cmd_record(int argc, const char **argv) ...@@ -503,7 +503,6 @@ static int __cmd_record(int argc, const char **argv)
{ {
int i, counter; int i, counter;
struct stat st; struct stat st;
pid_t pid = 0;
int flags; int flags;
int err; int err;
unsigned long waking = 0; unsigned long waking = 0;
...@@ -572,7 +571,7 @@ static int __cmd_record(int argc, const char **argv) ...@@ -572,7 +571,7 @@ static int __cmd_record(int argc, const char **argv)
if (forks) { if (forks) {
child_pid = fork(); child_pid = fork();
if (pid < 0) { if (child_pid < 0) {
perror("failed to fork"); perror("failed to fork");
exit(-1); exit(-1);
} }
......
...@@ -1645,6 +1645,7 @@ static struct perf_event_ops event_ops = { ...@@ -1645,6 +1645,7 @@ static struct perf_event_ops event_ops = {
.sample = process_sample_event, .sample = process_sample_event,
.comm = event__process_comm, .comm = event__process_comm,
.lost = event__process_lost, .lost = event__process_lost,
.fork = event__process_task,
.ordered_samples = true, .ordered_samples = true,
}; };
......
...@@ -51,8 +51,7 @@ def kmem__kmalloc(event_name, context, common_cpu, ...@@ -51,8 +51,7 @@ def kmem__kmalloc(event_name, context, common_cpu,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)), flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs, def trace_unhandled(event_name, context, event_fields_dict):
common_pid, common_comm):
try: try:
unhandled[event_name] += 1 unhandled[event_name] += 1
except TypeError: except TypeError:
......
...@@ -370,9 +370,9 @@ static int thread__set_comm_adjust(struct thread *self, const char *comm) ...@@ -370,9 +370,9 @@ static int thread__set_comm_adjust(struct thread *self, const char *comm)
int event__process_comm(event_t *self, struct perf_session *session) int event__process_comm(event_t *self, struct perf_session *session)
{ {
struct thread *thread = perf_session__findnew(session, self->comm.pid); struct thread *thread = perf_session__findnew(session, self->comm.tid);
dump_printf(": %s:%d\n", self->comm.comm, self->comm.pid); dump_printf(": %s:%d\n", self->comm.comm, self->comm.tid);
if (thread == NULL || thread__set_comm_adjust(thread, self->comm.comm)) { if (thread == NULL || thread__set_comm_adjust(thread, self->comm.comm)) {
dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
...@@ -532,16 +532,11 @@ int event__process_mmap(event_t *self, struct perf_session *session) ...@@ -532,16 +532,11 @@ int event__process_mmap(event_t *self, struct perf_session *session)
int event__process_task(event_t *self, struct perf_session *session) int event__process_task(event_t *self, struct perf_session *session)
{ {
struct thread *thread = perf_session__findnew(session, self->fork.pid); struct thread *thread = perf_session__findnew(session, self->fork.tid);
struct thread *parent = perf_session__findnew(session, self->fork.ppid); struct thread *parent = perf_session__findnew(session, self->fork.ptid);
dump_printf("(%d:%d):(%d:%d)\n", self->fork.pid, self->fork.tid, dump_printf("(%d:%d):(%d:%d)\n", self->fork.pid, self->fork.tid,
self->fork.ppid, self->fork.ptid); self->fork.ppid, self->fork.ptid);
/*
* A thread clone will have the same PID for both parent and child.
*/
if (thread == parent)
return 0;
if (self->header.type == PERF_RECORD_EXIT) if (self->header.type == PERF_RECORD_EXIT)
return 0; return 0;
......
...@@ -965,7 +965,7 @@ static int hist_entry__parse_objdump_line(struct hist_entry *self, FILE *file, ...@@ -965,7 +965,7 @@ static int hist_entry__parse_objdump_line(struct hist_entry *self, FILE *file,
* Parse hexa addresses followed by ':' * Parse hexa addresses followed by ':'
*/ */
line_ip = strtoull(tmp, &tmp2, 16); line_ip = strtoull(tmp, &tmp2, 16);
if (*tmp2 != ':') if (*tmp2 != ':' || tmp == tmp2)
line_ip = -1; line_ip = -1;
} }
......
...@@ -208,7 +208,7 @@ static void python_process_event(int cpu, void *data, ...@@ -208,7 +208,7 @@ static void python_process_event(int cpu, void *data,
int size __unused, int size __unused,
unsigned long long nsecs, char *comm) unsigned long long nsecs, char *comm)
{ {
PyObject *handler, *retval, *context, *t, *obj; PyObject *handler, *retval, *context, *t, *obj, *dict = NULL;
static char handler_name[256]; static char handler_name[256];
struct format_field *field; struct format_field *field;
unsigned long long val; unsigned long long val;
...@@ -232,6 +232,14 @@ static void python_process_event(int cpu, void *data, ...@@ -232,6 +232,14 @@ static void python_process_event(int cpu, void *data,
sprintf(handler_name, "%s__%s", event->system, event->name); sprintf(handler_name, "%s__%s", event->system, event->name);
handler = PyDict_GetItemString(main_dict, handler_name);
if (handler && !PyCallable_Check(handler))
handler = NULL;
if (!handler) {
dict = PyDict_New();
if (!dict)
Py_FatalError("couldn't create Python dict");
}
s = nsecs / NSECS_PER_SEC; s = nsecs / NSECS_PER_SEC;
ns = nsecs - s * NSECS_PER_SEC; ns = nsecs - s * NSECS_PER_SEC;
...@@ -242,12 +250,20 @@ static void python_process_event(int cpu, void *data, ...@@ -242,12 +250,20 @@ static void python_process_event(int cpu, void *data,
PyTuple_SetItem(t, n++, PyString_FromString(handler_name)); PyTuple_SetItem(t, n++, PyString_FromString(handler_name));
PyTuple_SetItem(t, n++, PyTuple_SetItem(t, n++,
PyCObject_FromVoidPtr(scripting_context, NULL)); PyCObject_FromVoidPtr(scripting_context, NULL));
if (handler) {
PyTuple_SetItem(t, n++, PyInt_FromLong(cpu)); PyTuple_SetItem(t, n++, PyInt_FromLong(cpu));
PyTuple_SetItem(t, n++, PyInt_FromLong(s)); PyTuple_SetItem(t, n++, PyInt_FromLong(s));
PyTuple_SetItem(t, n++, PyInt_FromLong(ns)); PyTuple_SetItem(t, n++, PyInt_FromLong(ns));
PyTuple_SetItem(t, n++, PyInt_FromLong(pid)); PyTuple_SetItem(t, n++, PyInt_FromLong(pid));
PyTuple_SetItem(t, n++, PyString_FromString(comm)); PyTuple_SetItem(t, n++, PyString_FromString(comm));
} else {
PyDict_SetItemString(dict, "common_cpu", PyInt_FromLong(cpu));
PyDict_SetItemString(dict, "common_s", PyInt_FromLong(s));
PyDict_SetItemString(dict, "common_ns", PyInt_FromLong(ns));
PyDict_SetItemString(dict, "common_pid", PyInt_FromLong(pid));
PyDict_SetItemString(dict, "common_comm", PyString_FromString(comm));
}
for (field = event->format.fields; field; field = field->next) { for (field = event->format.fields; field; field = field->next) {
if (field->flags & FIELD_IS_STRING) { if (field->flags & FIELD_IS_STRING) {
int offset; int offset;
...@@ -272,27 +288,31 @@ static void python_process_event(int cpu, void *data, ...@@ -272,27 +288,31 @@ static void python_process_event(int cpu, void *data,
obj = PyLong_FromUnsignedLongLong(val); obj = PyLong_FromUnsignedLongLong(val);
} }
} }
if (handler)
PyTuple_SetItem(t, n++, obj); PyTuple_SetItem(t, n++, obj);
else
PyDict_SetItemString(dict, field->name, obj);
} }
if (!handler)
PyTuple_SetItem(t, n++, dict);
if (_PyTuple_Resize(&t, n) == -1) if (_PyTuple_Resize(&t, n) == -1)
Py_FatalError("error resizing Python tuple"); Py_FatalError("error resizing Python tuple");
handler = PyDict_GetItemString(main_dict, handler_name); if (handler) {
if (handler && PyCallable_Check(handler)) {
retval = PyObject_CallObject(handler, t); retval = PyObject_CallObject(handler, t);
if (retval == NULL) if (retval == NULL)
handler_call_die(handler_name); handler_call_die(handler_name);
} else { } else {
handler = PyDict_GetItemString(main_dict, "trace_unhandled"); handler = PyDict_GetItemString(main_dict, "trace_unhandled");
if (handler && PyCallable_Check(handler)) { if (handler && PyCallable_Check(handler)) {
if (_PyTuple_Resize(&t, N_COMMON_FIELDS) == -1)
Py_FatalError("error resizing Python tuple");
retval = PyObject_CallObject(handler, t); retval = PyObject_CallObject(handler, t);
if (retval == NULL) if (retval == NULL)
handler_call_die("trace_unhandled"); handler_call_die("trace_unhandled");
} }
Py_DECREF(dict);
} }
Py_DECREF(t); Py_DECREF(t);
...@@ -548,12 +568,10 @@ static int python_generate_script(const char *outfile) ...@@ -548,12 +568,10 @@ static int python_generate_script(const char *outfile)
} }
fprintf(ofp, "def trace_unhandled(event_name, context, " fprintf(ofp, "def trace_unhandled(event_name, context, "
"common_cpu, common_secs, common_nsecs,\n\t\t" "event_fields_dict):\n");
"common_pid, common_comm):\n");
fprintf(ofp, "\t\tprint_header(event_name, common_cpu, " fprintf(ofp, "\t\tprint ' '.join(['%%s=%%s'%%(k,str(v))"
"common_secs, common_nsecs,\n\t\tcommon_pid, " "for k,v in sorted(event_fields_dict.items())])\n\n");
"common_comm)\n\n");
fprintf(ofp, "def print_header(" fprintf(ofp, "def print_header("
"event_name, cpu, secs, nsecs, pid, comm):\n" "event_name, cpu, secs, nsecs, pid, comm):\n"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment