Commit 0c41e756 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf/x86/intel: Clean up intel_commit_scheduling() placement

Move the code of intel_commit_scheduling() to the right place, which is
in between start() and stop().

No change in functionality.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 17186ccd
...@@ -527,10 +527,10 @@ struct x86_pmu { ...@@ -527,10 +527,10 @@ struct x86_pmu {
void (*put_event_constraints)(struct cpu_hw_events *cpuc, void (*put_event_constraints)(struct cpu_hw_events *cpuc,
struct perf_event *event); struct perf_event *event);
void (*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr);
void (*start_scheduling)(struct cpu_hw_events *cpuc); void (*start_scheduling)(struct cpu_hw_events *cpuc);
void (*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr);
void (*stop_scheduling)(struct cpu_hw_events *cpuc); void (*stop_scheduling)(struct cpu_hw_events *cpuc);
struct event_constraint *event_constraints; struct event_constraint *event_constraints;
......
...@@ -1934,6 +1934,34 @@ intel_start_scheduling(struct cpu_hw_events *cpuc) ...@@ -1934,6 +1934,34 @@ intel_start_scheduling(struct cpu_hw_events *cpuc)
memcpy(xl->init_state, xl->state, sizeof(xl->init_state)); memcpy(xl->init_state, xl->state, sizeof(xl->init_state));
} }
static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
{
struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
struct event_constraint *c = cpuc->event_constraint[idx];
struct intel_excl_states *xl;
int tid = cpuc->excl_thread_id;
if (cpuc->is_fake || !is_ht_workaround_enabled())
return;
if (WARN_ON_ONCE(!excl_cntrs))
return;
if (!(c->flags & PERF_X86_EVENT_DYNAMIC))
return;
xl = &excl_cntrs->states[tid];
lockdep_assert_held(&excl_cntrs->lock);
if (cntr >= 0) {
if (c->flags & PERF_X86_EVENT_EXCL)
xl->init_state[cntr] = INTEL_EXCL_EXCLUSIVE;
else
xl->init_state[cntr] = INTEL_EXCL_SHARED;
}
}
static void static void
intel_stop_scheduling(struct cpu_hw_events *cpuc) intel_stop_scheduling(struct cpu_hw_events *cpuc)
{ {
...@@ -2184,34 +2212,6 @@ static void intel_put_event_constraints(struct cpu_hw_events *cpuc, ...@@ -2184,34 +2212,6 @@ static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
intel_put_excl_constraints(cpuc, event); intel_put_excl_constraints(cpuc, event);
} }
static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
{
struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
struct event_constraint *c = cpuc->event_constraint[idx];
struct intel_excl_states *xl;
int tid = cpuc->excl_thread_id;
if (cpuc->is_fake || !is_ht_workaround_enabled())
return;
if (WARN_ON_ONCE(!excl_cntrs))
return;
if (!(c->flags & PERF_X86_EVENT_DYNAMIC))
return;
xl = &excl_cntrs->states[tid];
lockdep_assert_held(&excl_cntrs->lock);
if (cntr >= 0) {
if (c->flags & PERF_X86_EVENT_EXCL)
xl->init_state[cntr] = INTEL_EXCL_EXCLUSIVE;
else
xl->init_state[cntr] = INTEL_EXCL_SHARED;
}
}
static void intel_pebs_aliases_core2(struct perf_event *event) static void intel_pebs_aliases_core2(struct perf_event *event)
{ {
if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
...@@ -2920,8 +2920,8 @@ static __init void intel_ht_bug(void) ...@@ -2920,8 +2920,8 @@ static __init void intel_ht_bug(void)
{ {
x86_pmu.flags |= PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED; x86_pmu.flags |= PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED;
x86_pmu.commit_scheduling = intel_commit_scheduling;
x86_pmu.start_scheduling = intel_start_scheduling; x86_pmu.start_scheduling = intel_start_scheduling;
x86_pmu.commit_scheduling = intel_commit_scheduling;
x86_pmu.stop_scheduling = intel_stop_scheduling; x86_pmu.stop_scheduling = intel_stop_scheduling;
} }
...@@ -3377,8 +3377,8 @@ static __init int fixup_ht_bug(void) ...@@ -3377,8 +3377,8 @@ static __init int fixup_ht_bug(void)
x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED); x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED);
x86_pmu.commit_scheduling = NULL;
x86_pmu.start_scheduling = NULL; x86_pmu.start_scheduling = NULL;
x86_pmu.commit_scheduling = NULL;
x86_pmu.stop_scheduling = NULL; x86_pmu.stop_scheduling = NULL;
watchdog_nmi_enable_all(); watchdog_nmi_enable_all();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment