Commit acba3c7e authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf, powerpc: Fix up flush_branch_stack() users

The recent LBR rework for x86 left a stray flush_branch_stack() user in
the PowerPC code, fix that up.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Cc: Anton Blanchard <anton@samba.org>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Christoph Lameter <cl@linux.com>
Cc: Joel Stanley <joel@jms.id.au>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michael Neuling <mikey@neuling.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: linuxppc-dev@lists.ozlabs.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 384b6055
...@@ -124,7 +124,7 @@ static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw) ...@@ -124,7 +124,7 @@ static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw)
static inline void power_pmu_bhrb_enable(struct perf_event *event) {} static inline void power_pmu_bhrb_enable(struct perf_event *event) {}
static inline void power_pmu_bhrb_disable(struct perf_event *event) {} static inline void power_pmu_bhrb_disable(struct perf_event *event) {}
static void power_pmu_flush_branch_stack(void) {} static void power_pmu_sched_task(struct perf_event_context *ctx, bool sched_in) {}
static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {} static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {}
static void pmao_restore_workaround(bool ebb) { } static void pmao_restore_workaround(bool ebb) { }
#endif /* CONFIG_PPC32 */ #endif /* CONFIG_PPC32 */
...@@ -350,6 +350,7 @@ static void power_pmu_bhrb_enable(struct perf_event *event) ...@@ -350,6 +350,7 @@ static void power_pmu_bhrb_enable(struct perf_event *event)
cpuhw->bhrb_context = event->ctx; cpuhw->bhrb_context = event->ctx;
} }
cpuhw->bhrb_users++; cpuhw->bhrb_users++;
perf_sched_cb_inc(event->ctx->pmu);
} }
static void power_pmu_bhrb_disable(struct perf_event *event) static void power_pmu_bhrb_disable(struct perf_event *event)
...@@ -361,6 +362,7 @@ static void power_pmu_bhrb_disable(struct perf_event *event) ...@@ -361,6 +362,7 @@ static void power_pmu_bhrb_disable(struct perf_event *event)
cpuhw->bhrb_users--; cpuhw->bhrb_users--;
WARN_ON_ONCE(cpuhw->bhrb_users < 0); WARN_ON_ONCE(cpuhw->bhrb_users < 0);
perf_sched_cb_dec(event->ctx->pmu);
if (!cpuhw->disabled && !cpuhw->bhrb_users) { if (!cpuhw->disabled && !cpuhw->bhrb_users) {
/* BHRB cannot be turned off when other /* BHRB cannot be turned off when other
...@@ -375,9 +377,12 @@ static void power_pmu_bhrb_disable(struct perf_event *event) ...@@ -375,9 +377,12 @@ static void power_pmu_bhrb_disable(struct perf_event *event)
/* Called from ctxsw to prevent one process's branch entries to /* Called from ctxsw to prevent one process's branch entries to
* mingle with the other process's entries during context switch. * mingle with the other process's entries during context switch.
*/ */
static void power_pmu_flush_branch_stack(void) static void power_pmu_sched_task(struct perf_event_context *ctx, bool sched_in)
{ {
if (ppmu->bhrb_nr) if (!ppmu->bhrb_nr)
return;
if (sched_in)
power_pmu_bhrb_reset(); power_pmu_bhrb_reset();
} }
/* Calculate the to address for a branch */ /* Calculate the to address for a branch */
...@@ -1901,7 +1906,7 @@ static struct pmu power_pmu = { ...@@ -1901,7 +1906,7 @@ static struct pmu power_pmu = {
.cancel_txn = power_pmu_cancel_txn, .cancel_txn = power_pmu_cancel_txn,
.commit_txn = power_pmu_commit_txn, .commit_txn = power_pmu_commit_txn,
.event_idx = power_pmu_event_idx, .event_idx = power_pmu_event_idx,
.flush_branch_stack = power_pmu_flush_branch_stack, .sched_task = power_pmu_sched_task,
}; };
/* /*
......
...@@ -261,11 +261,6 @@ struct pmu { ...@@ -261,11 +261,6 @@ struct pmu {
*/ */
int (*event_idx) (struct perf_event *event); /*optional */ int (*event_idx) (struct perf_event *event); /*optional */
/*
* flush branch stack on context-switches (needed in cpu-wide mode)
*/
void (*flush_branch_stack) (void);
/* /*
* context-switches callback * context-switches callback
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment