Commit 90983b16 authored by Frederic Weisbecker's avatar Frederic Weisbecker Committed by Ingo Molnar

perf: Sanitize get_callchain_buffer()

In case of allocation failure, get_callchain_buffer() keeps the
refcount incremented for the current event.

As a result, when get_callchain_buffers() returns an error,
we must cleanup what it did by cancelling its last refcount
with a call to put_callchain_buffers().

This is a hack in order to be able to call free_event()
after that failure.

The original purpose of that was to simplify the failure
path. But this error handling is actually counter intuitive,
ugly and not very easy to follow because one expect to
see the resources used to perform a service to be cleaned
by the callee if case of failure, not by the caller.

So lets clean this up by cancelling the refcount from
get_callchain_buffer() in case of failure. And correctly free
the event accordingly in perf_event_alloc().
Signed-off-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Stephane Eranian <eranian@google.com>
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1374539466-4799-3-git-send-email-fweisbec@gmail.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 6050cb0b
...@@ -117,6 +117,8 @@ int get_callchain_buffers(void) ...@@ -117,6 +117,8 @@ int get_callchain_buffers(void)
err = alloc_callchain_buffers(); err = alloc_callchain_buffers();
exit: exit:
mutex_unlock(&callchain_mutex); mutex_unlock(&callchain_mutex);
if (err)
atomic_dec(&nr_callchain_events);
return err; return err;
} }
......
...@@ -6457,7 +6457,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, ...@@ -6457,7 +6457,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
struct pmu *pmu; struct pmu *pmu;
struct perf_event *event; struct perf_event *event;
struct hw_perf_event *hwc; struct hw_perf_event *hwc;
long err; long err = -EINVAL;
if ((unsigned)cpu >= nr_cpu_ids) { if ((unsigned)cpu >= nr_cpu_ids) {
if (!task || cpu != -1) if (!task || cpu != -1)
...@@ -6540,25 +6540,23 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, ...@@ -6540,25 +6540,23 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
* we currently do not support PERF_FORMAT_GROUP on inherited events * we currently do not support PERF_FORMAT_GROUP on inherited events
*/ */
if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
goto done; goto err_ns;
pmu = perf_init_event(event); pmu = perf_init_event(event);
done:
err = 0;
if (!pmu) if (!pmu)
err = -EINVAL; goto err_ns;
else if (IS_ERR(pmu)) else if (IS_ERR(pmu)) {
err = PTR_ERR(pmu); err = PTR_ERR(pmu);
goto err_ns;
if (err) {
if (event->ns)
put_pid_ns(event->ns);
kfree(event);
return ERR_PTR(err);
} }
if (!event->parent) { if (!event->parent) {
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
err = get_callchain_buffers();
if (err)
goto err_pmu;
}
if (event->attach_state & PERF_ATTACH_TASK) if (event->attach_state & PERF_ATTACH_TASK)
static_key_slow_inc(&perf_sched_events.key); static_key_slow_inc(&perf_sched_events.key);
if (event->attr.mmap || event->attr.mmap_data) if (event->attr.mmap || event->attr.mmap_data)
...@@ -6573,16 +6571,19 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, ...@@ -6573,16 +6571,19 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
atomic_inc(&per_cpu(perf_branch_stack_events, atomic_inc(&per_cpu(perf_branch_stack_events,
event->cpu)); event->cpu));
} }
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
err = get_callchain_buffers();
if (err) {
free_event(event);
return ERR_PTR(err);
}
}
} }
return event; return event;
err_pmu:
if (event->destroy)
event->destroy(event);
err_ns:
if (event->ns)
put_pid_ns(event->ns);
kfree(event);
return ERR_PTR(err);
} }
static int perf_copy_attr(struct perf_event_attr __user *uattr, static int perf_copy_attr(struct perf_event_attr __user *uattr,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment