Commit efe951d3 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf/x86: Fix perf,x86,cpuhp deadlock

More lockdep gifts, a 5-way lockup race:

	perf_event_create_kernel_counter()
	  perf_event_alloc()
	    perf_try_init_event()
	      x86_pmu_event_init()
		__x86_pmu_event_init()
		  x86_reserve_hardware()
 #0		    mutex_lock(&pmc_reserve_mutex);
		    reserve_ds_buffer()
 #1		      get_online_cpus()

	perf_event_release_kernel()
	  _free_event()
	    hw_perf_event_destroy()
	      x86_release_hardware()
 #0		mutex_lock(&pmc_reserve_mutex)
		release_ds_buffer()
 #1		  get_online_cpus()

 #1	do_cpu_up()
	  perf_event_init_cpu()
 #2	    mutex_lock(&pmus_lock)
 #3	    mutex_lock(&ctx->mutex)

	sys_perf_event_open()
	  mutex_lock_double()
 #3	    mutex_lock(ctx->mutex)
 #4	    mutex_lock_nested(ctx->mutex, 1);

	perf_try_init_event()
 #4	  mutex_lock_nested(ctx->mutex, 1)
	  x86_pmu_event_init()
	    intel_pmu_hw_config()
	      x86_add_exclusive()
 #0		mutex_lock(&pmc_reserve_mutex)

Fix it by using ordering constructs instead of locking.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 0c7296ca
...@@ -372,10 +372,9 @@ static int alloc_pebs_buffer(int cpu) ...@@ -372,10 +372,9 @@ static int alloc_pebs_buffer(int cpu)
static void release_pebs_buffer(int cpu) static void release_pebs_buffer(int cpu)
{ {
struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu); struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
struct debug_store *ds = hwev->ds;
void *cea; void *cea;
if (!ds || !x86_pmu.pebs) if (!x86_pmu.pebs)
return; return;
kfree(per_cpu(insn_buffer, cpu)); kfree(per_cpu(insn_buffer, cpu));
...@@ -384,7 +383,6 @@ static void release_pebs_buffer(int cpu) ...@@ -384,7 +383,6 @@ static void release_pebs_buffer(int cpu)
/* Clear the fixmap */ /* Clear the fixmap */
cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer; cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer;
ds_clear_cea(cea, x86_pmu.pebs_buffer_size); ds_clear_cea(cea, x86_pmu.pebs_buffer_size);
ds->pebs_buffer_base = 0;
dsfree_pages(hwev->ds_pebs_vaddr, x86_pmu.pebs_buffer_size); dsfree_pages(hwev->ds_pebs_vaddr, x86_pmu.pebs_buffer_size);
hwev->ds_pebs_vaddr = NULL; hwev->ds_pebs_vaddr = NULL;
} }
...@@ -419,16 +417,14 @@ static int alloc_bts_buffer(int cpu) ...@@ -419,16 +417,14 @@ static int alloc_bts_buffer(int cpu)
static void release_bts_buffer(int cpu) static void release_bts_buffer(int cpu)
{ {
struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu); struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
struct debug_store *ds = hwev->ds;
void *cea; void *cea;
if (!ds || !x86_pmu.bts) if (!x86_pmu.bts)
return; return;
/* Clear the fixmap */ /* Clear the fixmap */
cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer; cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer;
ds_clear_cea(cea, BTS_BUFFER_SIZE); ds_clear_cea(cea, BTS_BUFFER_SIZE);
ds->bts_buffer_base = 0;
dsfree_pages(hwev->ds_bts_vaddr, BTS_BUFFER_SIZE); dsfree_pages(hwev->ds_bts_vaddr, BTS_BUFFER_SIZE);
hwev->ds_bts_vaddr = NULL; hwev->ds_bts_vaddr = NULL;
} }
...@@ -454,16 +450,22 @@ void release_ds_buffers(void) ...@@ -454,16 +450,22 @@ void release_ds_buffers(void)
if (!x86_pmu.bts && !x86_pmu.pebs) if (!x86_pmu.bts && !x86_pmu.pebs)
return; return;
get_online_cpus(); for_each_possible_cpu(cpu)
for_each_online_cpu(cpu) release_ds_buffer(cpu);
for_each_possible_cpu(cpu) {
/*
* Again, ignore errors from offline CPUs, they will no longer
* observe cpu_hw_events.ds and not program the DS_AREA when
* they come up.
*/
fini_debug_store_on_cpu(cpu); fini_debug_store_on_cpu(cpu);
}
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
release_pebs_buffer(cpu); release_pebs_buffer(cpu);
release_bts_buffer(cpu); release_bts_buffer(cpu);
release_ds_buffer(cpu);
} }
put_online_cpus();
} }
void reserve_ds_buffers(void) void reserve_ds_buffers(void)
...@@ -483,8 +485,6 @@ void reserve_ds_buffers(void) ...@@ -483,8 +485,6 @@ void reserve_ds_buffers(void)
if (!x86_pmu.pebs) if (!x86_pmu.pebs)
pebs_err = 1; pebs_err = 1;
get_online_cpus();
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
if (alloc_ds_buffer(cpu)) { if (alloc_ds_buffer(cpu)) {
bts_err = 1; bts_err = 1;
...@@ -521,11 +521,14 @@ void reserve_ds_buffers(void) ...@@ -521,11 +521,14 @@ void reserve_ds_buffers(void)
if (x86_pmu.pebs && !pebs_err) if (x86_pmu.pebs && !pebs_err)
x86_pmu.pebs_active = 1; x86_pmu.pebs_active = 1;
for_each_online_cpu(cpu) for_each_possible_cpu(cpu) {
/*
* Ignores wrmsr_on_cpu() errors for offline CPUs they
* will get this call through intel_pmu_cpu_starting().
*/
init_debug_store_on_cpu(cpu); init_debug_store_on_cpu(cpu);
} }
}
put_online_cpus();
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment