Commit 6b099d9b authored by Alexander Shishkin's avatar Alexander Shishkin Committed by Ingo Molnar

perf/x86/intel/bts: Fix DS area sharing with x86_pmu events

Currently, the intel_bts driver relies on the DS area allocated by the x86_pmu
code in its event_init() path, which is a bug: creating a BTS event while
no x86_pmu events are present results in a NULL pointer dereference.

The same DS area is also used by PEBS sampling, which makes it quite a bit
trickier to have a separate one for intel_bts' purposes.

This patch makes intel_bts driver use the same DS allocation and reference
counting code as x86_pmu to make sure it is always present when either
intel_bts or x86_pmu need it.
Signed-off-by: default avatarAlexander Shishkin <alexander.shishkin@linux.intel.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: acme@infradead.org
Cc: adrian.hunter@intel.com
Link: http://lkml.kernel.org/r/1434024837-9916-2-git-send-email-alexander.shishkin@linux.intel.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 4b36f1a4
...@@ -270,11 +270,7 @@ static bool check_hw_exists(void) ...@@ -270,11 +270,7 @@ static bool check_hw_exists(void)
static void hw_perf_event_destroy(struct perf_event *event) static void hw_perf_event_destroy(struct perf_event *event)
{ {
if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) { x86_release_hardware();
release_pmc_hardware();
release_ds_buffers();
mutex_unlock(&pmc_reserve_mutex);
}
} }
void hw_perf_lbr_event_destroy(struct perf_event *event) void hw_perf_lbr_event_destroy(struct perf_event *event)
...@@ -324,6 +320,35 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event) ...@@ -324,6 +320,35 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
return x86_pmu_extra_regs(val, event); return x86_pmu_extra_regs(val, event);
} }
int x86_reserve_hardware(void)
{
int err = 0;
if (!atomic_inc_not_zero(&active_events)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_read(&active_events) == 0) {
if (!reserve_pmc_hardware())
err = -EBUSY;
else
reserve_ds_buffers();
}
if (!err)
atomic_inc(&active_events);
mutex_unlock(&pmc_reserve_mutex);
}
return err;
}
void x86_release_hardware(void)
{
if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
release_pmc_hardware();
release_ds_buffers();
mutex_unlock(&pmc_reserve_mutex);
}
}
/* /*
* Check if we can create event of a certain type (that no conflicting events * Check if we can create event of a certain type (that no conflicting events
* are present). * are present).
...@@ -336,9 +361,10 @@ int x86_add_exclusive(unsigned int what) ...@@ -336,9 +361,10 @@ int x86_add_exclusive(unsigned int what)
return 0; return 0;
mutex_lock(&pmc_reserve_mutex); mutex_lock(&pmc_reserve_mutex);
for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) {
if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i])) if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i]))
goto out; goto out;
}
atomic_inc(&x86_pmu.lbr_exclusive[what]); atomic_inc(&x86_pmu.lbr_exclusive[what]);
ret = 0; ret = 0;
...@@ -527,19 +553,7 @@ static int __x86_pmu_event_init(struct perf_event *event) ...@@ -527,19 +553,7 @@ static int __x86_pmu_event_init(struct perf_event *event)
if (!x86_pmu_initialized()) if (!x86_pmu_initialized())
return -ENODEV; return -ENODEV;
err = 0; err = x86_reserve_hardware();
if (!atomic_inc_not_zero(&active_events)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_read(&active_events) == 0) {
if (!reserve_pmc_hardware())
err = -EBUSY;
else
reserve_ds_buffers();
}
if (!err)
atomic_inc(&active_events);
mutex_unlock(&pmc_reserve_mutex);
}
if (err) if (err)
return err; return err;
......
...@@ -703,6 +703,10 @@ int x86_add_exclusive(unsigned int what); ...@@ -703,6 +703,10 @@ int x86_add_exclusive(unsigned int what);
void x86_del_exclusive(unsigned int what); void x86_del_exclusive(unsigned int what);
int x86_reserve_hardware(void);
void x86_release_hardware(void);
void hw_perf_lbr_event_destroy(struct perf_event *event); void hw_perf_lbr_event_destroy(struct perf_event *event);
int x86_setup_perfctr(struct perf_event *event); int x86_setup_perfctr(struct perf_event *event);
......
...@@ -483,17 +483,26 @@ static int bts_event_add(struct perf_event *event, int mode) ...@@ -483,17 +483,26 @@ static int bts_event_add(struct perf_event *event, int mode)
static void bts_event_destroy(struct perf_event *event) static void bts_event_destroy(struct perf_event *event)
{ {
x86_release_hardware();
x86_del_exclusive(x86_lbr_exclusive_bts); x86_del_exclusive(x86_lbr_exclusive_bts);
} }
static int bts_event_init(struct perf_event *event) static int bts_event_init(struct perf_event *event)
{ {
int ret;
if (event->attr.type != bts_pmu.type) if (event->attr.type != bts_pmu.type)
return -ENOENT; return -ENOENT;
if (x86_add_exclusive(x86_lbr_exclusive_bts)) if (x86_add_exclusive(x86_lbr_exclusive_bts))
return -EBUSY; return -EBUSY;
ret = x86_reserve_hardware();
if (ret) {
x86_del_exclusive(x86_lbr_exclusive_bts);
return ret;
}
event->destroy = bts_event_destroy; event->destroy = bts_event_destroy;
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment