Commit ba040653 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf/x86/intel: Simplify put_exclusive_constraints()

Don't bother with taking locks if we're not actually going to do
anything. Also, drop the _irqsave(), this is very much only called
from IRQ-disabled context.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 8736e548
...@@ -2130,7 +2130,6 @@ static void intel_put_excl_constraints(struct cpu_hw_events *cpuc, ...@@ -2130,7 +2130,6 @@ static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
int tid = cpuc->excl_thread_id; int tid = cpuc->excl_thread_id;
struct intel_excl_states *xl; struct intel_excl_states *xl;
unsigned long flags = 0; /* keep compiler happy */
/* /*
* nothing needed if in group validation mode * nothing needed if in group validation mode
...@@ -2141,7 +2140,6 @@ static void intel_put_excl_constraints(struct cpu_hw_events *cpuc, ...@@ -2141,7 +2140,6 @@ static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
if (WARN_ON_ONCE(!excl_cntrs)) if (WARN_ON_ONCE(!excl_cntrs))
return; return;
xl = &excl_cntrs->states[tid];
if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) { if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) {
hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT; hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT;
if (!--cpuc->n_excl) if (!--cpuc->n_excl)
...@@ -2149,22 +2147,25 @@ static void intel_put_excl_constraints(struct cpu_hw_events *cpuc, ...@@ -2149,22 +2147,25 @@ static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
} }
/* /*
* put_constraint may be called from x86_schedule_events() * If event was actually assigned, then mark the counter state as
* which already has the lock held so here make locking * unused now.
* conditional
*/ */
if (!xl->sched_started) if (hwc->idx >= 0) {
raw_spin_lock_irqsave(&excl_cntrs->lock, flags); xl = &excl_cntrs->states[tid];
/*
* put_constraint may be called from x86_schedule_events()
* which already has the lock held so here make locking
* conditional.
*/
if (!xl->sched_started)
raw_spin_lock(&excl_cntrs->lock);
/*
* if event was actually assigned, then mark the
* counter state as unused now
*/
if (hwc->idx >= 0)
xl->state[hwc->idx] = INTEL_EXCL_UNUSED; xl->state[hwc->idx] = INTEL_EXCL_UNUSED;
if (!xl->sched_started) if (!xl->sched_started)
raw_spin_unlock_irqrestore(&excl_cntrs->lock, flags); raw_spin_unlock(&excl_cntrs->lock);
}
} }
static void static void
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment