Commit ad5133b7 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf: Default PMU ops

Provide default implementations for the pmu txn methods, this
allows us to remove some conditional code.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 33696fc0
...@@ -565,8 +565,8 @@ struct pmu { ...@@ -565,8 +565,8 @@ struct pmu {
int *pmu_disable_count; int *pmu_disable_count;
void (*pmu_enable) (struct pmu *pmu); void (*pmu_enable) (struct pmu *pmu); /* optional */
void (*pmu_disable) (struct pmu *pmu); void (*pmu_disable) (struct pmu *pmu); /* optional */
/* /*
* Should return -ENOENT when the @event doesn't match this PMU. * Should return -ENOENT when the @event doesn't match this PMU.
...@@ -590,19 +590,19 @@ struct pmu { ...@@ -590,19 +590,19 @@ struct pmu {
* Start the transaction, after this ->enable() doesn't need to * Start the transaction, after this ->enable() doesn't need to
* do schedulability tests. * do schedulability tests.
*/ */
void (*start_txn) (struct pmu *pmu); void (*start_txn) (struct pmu *pmu); /* optional */
/* /*
* If ->start_txn() disabled the ->enable() schedulability test * If ->start_txn() disabled the ->enable() schedulability test
* then ->commit_txn() is required to perform one. On success * then ->commit_txn() is required to perform one. On success
* the transaction is closed. On error the transaction is kept * the transaction is closed. On error the transaction is kept
* open until ->cancel_txn() is called. * open until ->cancel_txn() is called.
*/ */
int (*commit_txn) (struct pmu *pmu); int (*commit_txn) (struct pmu *pmu); /* optional */
/* /*
* Will cancel the transaction, assumes ->disable() is called * Will cancel the transaction, assumes ->disable() is called
* for each successfull ->enable() during the transaction. * for each successfull ->enable() during the transaction.
*/ */
void (*cancel_txn) (struct pmu *pmu); void (*cancel_txn) (struct pmu *pmu); /* optional */
}; };
/** /**
......
...@@ -674,21 +674,14 @@ group_sched_in(struct perf_event *group_event, ...@@ -674,21 +674,14 @@ group_sched_in(struct perf_event *group_event,
{ {
struct perf_event *event, *partial_group = NULL; struct perf_event *event, *partial_group = NULL;
struct pmu *pmu = group_event->pmu; struct pmu *pmu = group_event->pmu;
bool txn = false;
if (group_event->state == PERF_EVENT_STATE_OFF) if (group_event->state == PERF_EVENT_STATE_OFF)
return 0; return 0;
/* Check if group transaction availabe */ pmu->start_txn(pmu);
if (pmu->start_txn)
txn = true;
if (txn)
pmu->start_txn(pmu);
if (event_sched_in(group_event, cpuctx, ctx)) { if (event_sched_in(group_event, cpuctx, ctx)) {
if (txn) pmu->cancel_txn(pmu);
pmu->cancel_txn(pmu);
return -EAGAIN; return -EAGAIN;
} }
...@@ -702,7 +695,7 @@ group_sched_in(struct perf_event *group_event, ...@@ -702,7 +695,7 @@ group_sched_in(struct perf_event *group_event,
} }
} }
if (!txn || !pmu->commit_txn(pmu)) if (!pmu->commit_txn(pmu))
return 0; return 0;
group_error: group_error:
...@@ -717,8 +710,7 @@ group_sched_in(struct perf_event *group_event, ...@@ -717,8 +710,7 @@ group_sched_in(struct perf_event *group_event,
} }
event_sched_out(group_event, cpuctx, ctx); event_sched_out(group_event, cpuctx, ctx);
if (txn) pmu->cancel_txn(pmu);
pmu->cancel_txn(pmu);
return -EAGAIN; return -EAGAIN;
} }
...@@ -4965,6 +4957,31 @@ static LIST_HEAD(pmus); ...@@ -4965,6 +4957,31 @@ static LIST_HEAD(pmus);
static DEFINE_MUTEX(pmus_lock); static DEFINE_MUTEX(pmus_lock);
static struct srcu_struct pmus_srcu; static struct srcu_struct pmus_srcu;
static void perf_pmu_nop_void(struct pmu *pmu)
{
}
static int perf_pmu_nop_int(struct pmu *pmu)
{
return 0;
}
static void perf_pmu_start_txn(struct pmu *pmu)
{
perf_pmu_disable(pmu);
}
static int perf_pmu_commit_txn(struct pmu *pmu)
{
perf_pmu_enable(pmu);
return 0;
}
static void perf_pmu_cancel_txn(struct pmu *pmu)
{
perf_pmu_enable(pmu);
}
int perf_pmu_register(struct pmu *pmu) int perf_pmu_register(struct pmu *pmu)
{ {
int ret; int ret;
...@@ -4974,6 +4991,29 @@ int perf_pmu_register(struct pmu *pmu) ...@@ -4974,6 +4991,29 @@ int perf_pmu_register(struct pmu *pmu)
pmu->pmu_disable_count = alloc_percpu(int); pmu->pmu_disable_count = alloc_percpu(int);
if (!pmu->pmu_disable_count) if (!pmu->pmu_disable_count)
goto unlock; goto unlock;
if (!pmu->start_txn) {
if (pmu->pmu_enable) {
/*
* If we have pmu_enable/pmu_disable calls, install
* transaction stubs that use that to try and batch
* hardware accesses.
*/
pmu->start_txn = perf_pmu_start_txn;
pmu->commit_txn = perf_pmu_commit_txn;
pmu->cancel_txn = perf_pmu_cancel_txn;
} else {
pmu->start_txn = perf_pmu_nop_void;
pmu->commit_txn = perf_pmu_nop_int;
pmu->cancel_txn = perf_pmu_nop_void;
}
}
if (!pmu->pmu_enable) {
pmu->pmu_enable = perf_pmu_nop_void;
pmu->pmu_disable = perf_pmu_nop_void;
}
list_add_rcu(&pmu->entry, &pmus); list_add_rcu(&pmu->entry, &pmus);
ret = 0; ret = 0;
unlock: unlock:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment