Commit b0a873eb authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf: Register PMU implementations

Simple registration interface for struct pmu, this provides the
infrastructure for removing all the weak functions.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 51b0fe39
...@@ -642,34 +642,39 @@ static int __hw_perf_event_init(struct perf_event *event) ...@@ -642,34 +642,39 @@ static int __hw_perf_event_init(struct perf_event *event)
return 0; return 0;
} }
static struct pmu pmu = {
.enable = alpha_pmu_enable,
.disable = alpha_pmu_disable,
.read = alpha_pmu_read,
.unthrottle = alpha_pmu_unthrottle,
};
/* /*
* Main entry point to initialise a HW performance event. * Main entry point to initialise a HW performance event.
*/ */
struct pmu *hw_perf_event_init(struct perf_event *event) static int alpha_pmu_event_init(struct perf_event *event)
{ {
int err; int err;
switch (event->attr.type) {
case PERF_TYPE_RAW:
case PERF_TYPE_HARDWARE:
case PERF_TYPE_HW_CACHE:
break;
default:
return -ENOENT;
}
if (!alpha_pmu) if (!alpha_pmu)
return ERR_PTR(-ENODEV); return -ENODEV;
/* Do the real initialisation work. */ /* Do the real initialisation work. */
err = __hw_perf_event_init(event); err = __hw_perf_event_init(event);
if (err) return err;
return ERR_PTR(err);
return &pmu;
} }
static struct pmu pmu = {
.event_init = alpha_pmu_event_init,
.enable = alpha_pmu_enable,
.disable = alpha_pmu_disable,
.read = alpha_pmu_read,
.unthrottle = alpha_pmu_unthrottle,
};
/* /*
* Main entry point - enable HW performance counters. * Main entry point - enable HW performance counters.
...@@ -838,5 +843,7 @@ void __init init_hw_perf_events(void) ...@@ -838,5 +843,7 @@ void __init init_hw_perf_events(void)
/* And set up PMU specification */ /* And set up PMU specification */
alpha_pmu = &ev67_pmu; alpha_pmu = &ev67_pmu;
perf_max_events = alpha_pmu->num_pmcs; perf_max_events = alpha_pmu->num_pmcs;
perf_pmu_register(&pmu);
} }
...@@ -306,12 +306,7 @@ armpmu_enable(struct perf_event *event) ...@@ -306,12 +306,7 @@ armpmu_enable(struct perf_event *event)
return err; return err;
} }
static struct pmu pmu = { static struct pmu pmu;
.enable = armpmu_enable,
.disable = armpmu_disable,
.unthrottle = armpmu_unthrottle,
.read = armpmu_read,
};
static int static int
validate_event(struct cpu_hw_events *cpuc, validate_event(struct cpu_hw_events *cpuc,
...@@ -491,20 +486,29 @@ __hw_perf_event_init(struct perf_event *event) ...@@ -491,20 +486,29 @@ __hw_perf_event_init(struct perf_event *event)
return err; return err;
} }
struct pmu * static int armpmu_event_init(struct perf_event *event)
hw_perf_event_init(struct perf_event *event)
{ {
int err = 0; int err = 0;
switch (event->attr.type) {
case PERF_TYPE_RAW:
case PERF_TYPE_HARDWARE:
case PERF_TYPE_HW_CACHE:
break;
default:
return -ENOENT;
}
if (!armpmu) if (!armpmu)
return ERR_PTR(-ENODEV); return -ENODEV;
event->destroy = hw_perf_event_destroy; event->destroy = hw_perf_event_destroy;
if (!atomic_inc_not_zero(&active_events)) { if (!atomic_inc_not_zero(&active_events)) {
if (atomic_read(&active_events) > perf_max_events) { if (atomic_read(&active_events) > perf_max_events) {
atomic_dec(&active_events); atomic_dec(&active_events);
return ERR_PTR(-ENOSPC); return -ENOSPC;
} }
mutex_lock(&pmu_reserve_mutex); mutex_lock(&pmu_reserve_mutex);
...@@ -518,15 +522,23 @@ hw_perf_event_init(struct perf_event *event) ...@@ -518,15 +522,23 @@ hw_perf_event_init(struct perf_event *event)
} }
if (err) if (err)
return ERR_PTR(err); return err;
err = __hw_perf_event_init(event); err = __hw_perf_event_init(event);
if (err) if (err)
hw_perf_event_destroy(event); hw_perf_event_destroy(event);
return err ? ERR_PTR(err) : &pmu; return err;
} }
static struct pmu pmu = {
.event_init = armpmu_event_init,
.enable = armpmu_enable,
.disable = armpmu_disable,
.unthrottle = armpmu_unthrottle,
.read = armpmu_read,
};
void void
hw_perf_enable(void) hw_perf_enable(void)
{ {
...@@ -2994,6 +3006,8 @@ init_hw_perf_events(void) ...@@ -2994,6 +3006,8 @@ init_hw_perf_events(void)
perf_max_events = -1; perf_max_events = -1;
} }
perf_pmu_register(&pmu);
return 0; return 0;
} }
arch_initcall(init_hw_perf_events); arch_initcall(init_hw_perf_events);
......
...@@ -904,16 +904,6 @@ int power_pmu_commit_txn(struct pmu *pmu) ...@@ -904,16 +904,6 @@ int power_pmu_commit_txn(struct pmu *pmu)
return 0; return 0;
} }
struct pmu power_pmu = {
.enable = power_pmu_enable,
.disable = power_pmu_disable,
.read = power_pmu_read,
.unthrottle = power_pmu_unthrottle,
.start_txn = power_pmu_start_txn,
.cancel_txn = power_pmu_cancel_txn,
.commit_txn = power_pmu_commit_txn,
};
/* /*
* Return 1 if we might be able to put event on a limited PMC, * Return 1 if we might be able to put event on a limited PMC,
* or 0 if not. * or 0 if not.
...@@ -1014,7 +1004,7 @@ static int hw_perf_cache_event(u64 config, u64 *eventp) ...@@ -1014,7 +1004,7 @@ static int hw_perf_cache_event(u64 config, u64 *eventp)
return 0; return 0;
} }
struct pmu *hw_perf_event_init(struct perf_event *event) static int power_pmu_event_init(struct perf_event *event)
{ {
u64 ev; u64 ev;
unsigned long flags; unsigned long flags;
...@@ -1026,25 +1016,27 @@ struct pmu *hw_perf_event_init(struct perf_event *event) ...@@ -1026,25 +1016,27 @@ struct pmu *hw_perf_event_init(struct perf_event *event)
struct cpu_hw_events *cpuhw; struct cpu_hw_events *cpuhw;
if (!ppmu) if (!ppmu)
return ERR_PTR(-ENXIO); return -ENOENT;
switch (event->attr.type) { switch (event->attr.type) {
case PERF_TYPE_HARDWARE: case PERF_TYPE_HARDWARE:
ev = event->attr.config; ev = event->attr.config;
if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
return ERR_PTR(-EOPNOTSUPP); return -EOPNOTSUPP;
ev = ppmu->generic_events[ev]; ev = ppmu->generic_events[ev];
break; break;
case PERF_TYPE_HW_CACHE: case PERF_TYPE_HW_CACHE:
err = hw_perf_cache_event(event->attr.config, &ev); err = hw_perf_cache_event(event->attr.config, &ev);
if (err) if (err)
return ERR_PTR(err); return err;
break; break;
case PERF_TYPE_RAW: case PERF_TYPE_RAW:
ev = event->attr.config; ev = event->attr.config;
break; break;
default: default:
return ERR_PTR(-EINVAL); return -ENOENT;
} }
event->hw.config_base = ev; event->hw.config_base = ev;
event->hw.idx = 0; event->hw.idx = 0;
...@@ -1081,7 +1073,7 @@ struct pmu *hw_perf_event_init(struct perf_event *event) ...@@ -1081,7 +1073,7 @@ struct pmu *hw_perf_event_init(struct perf_event *event)
*/ */
ev = normal_pmc_alternative(ev, flags); ev = normal_pmc_alternative(ev, flags);
if (!ev) if (!ev)
return ERR_PTR(-EINVAL); return -EINVAL;
} }
} }
...@@ -1095,19 +1087,19 @@ struct pmu *hw_perf_event_init(struct perf_event *event) ...@@ -1095,19 +1087,19 @@ struct pmu *hw_perf_event_init(struct perf_event *event)
n = collect_events(event->group_leader, ppmu->n_counter - 1, n = collect_events(event->group_leader, ppmu->n_counter - 1,
ctrs, events, cflags); ctrs, events, cflags);
if (n < 0) if (n < 0)
return ERR_PTR(-EINVAL); return -EINVAL;
} }
events[n] = ev; events[n] = ev;
ctrs[n] = event; ctrs[n] = event;
cflags[n] = flags; cflags[n] = flags;
if (check_excludes(ctrs, cflags, n, 1)) if (check_excludes(ctrs, cflags, n, 1))
return ERR_PTR(-EINVAL); return -EINVAL;
cpuhw = &get_cpu_var(cpu_hw_events); cpuhw = &get_cpu_var(cpu_hw_events);
err = power_check_constraints(cpuhw, events, cflags, n + 1); err = power_check_constraints(cpuhw, events, cflags, n + 1);
put_cpu_var(cpu_hw_events); put_cpu_var(cpu_hw_events);
if (err) if (err)
return ERR_PTR(-EINVAL); return -EINVAL;
event->hw.config = events[n]; event->hw.config = events[n];
event->hw.event_base = cflags[n]; event->hw.event_base = cflags[n];
...@@ -1132,11 +1124,20 @@ struct pmu *hw_perf_event_init(struct perf_event *event) ...@@ -1132,11 +1124,20 @@ struct pmu *hw_perf_event_init(struct perf_event *event)
} }
event->destroy = hw_perf_event_destroy; event->destroy = hw_perf_event_destroy;
if (err) return err;
return ERR_PTR(err);
return &power_pmu;
} }
struct pmu power_pmu = {
.event_init = power_pmu_event_init,
.enable = power_pmu_enable,
.disable = power_pmu_disable,
.read = power_pmu_read,
.unthrottle = power_pmu_unthrottle,
.start_txn = power_pmu_start_txn,
.cancel_txn = power_pmu_cancel_txn,
.commit_txn = power_pmu_commit_txn,
};
/* /*
* A counter has overflowed; update its count and record * A counter has overflowed; update its count and record
* things if requested. Note that interrupts are hard-disabled * things if requested. Note that interrupts are hard-disabled
...@@ -1342,6 +1343,7 @@ int register_power_pmu(struct power_pmu *pmu) ...@@ -1342,6 +1343,7 @@ int register_power_pmu(struct power_pmu *pmu)
freeze_events_kernel = MMCR0_FCHV; freeze_events_kernel = MMCR0_FCHV;
#endif /* CONFIG_PPC64 */ #endif /* CONFIG_PPC64 */
perf_pmu_register(&power_pmu);
perf_cpu_notifier(power_pmu_notifier); perf_cpu_notifier(power_pmu_notifier);
return 0; return 0;
......
...@@ -378,13 +378,6 @@ static void fsl_emb_pmu_unthrottle(struct perf_event *event) ...@@ -378,13 +378,6 @@ static void fsl_emb_pmu_unthrottle(struct perf_event *event)
local_irq_restore(flags); local_irq_restore(flags);
} }
static struct pmu fsl_emb_pmu = {
.enable = fsl_emb_pmu_enable,
.disable = fsl_emb_pmu_disable,
.read = fsl_emb_pmu_read,
.unthrottle = fsl_emb_pmu_unthrottle,
};
/* /*
* Release the PMU if this is the last perf_event. * Release the PMU if this is the last perf_event.
*/ */
...@@ -428,7 +421,7 @@ static int hw_perf_cache_event(u64 config, u64 *eventp) ...@@ -428,7 +421,7 @@ static int hw_perf_cache_event(u64 config, u64 *eventp)
return 0; return 0;
} }
struct pmu *hw_perf_event_init(struct perf_event *event) static int fsl_emb_pmu_event_init(struct perf_event *event)
{ {
u64 ev; u64 ev;
struct perf_event *events[MAX_HWEVENTS]; struct perf_event *events[MAX_HWEVENTS];
...@@ -441,14 +434,14 @@ struct pmu *hw_perf_event_init(struct perf_event *event) ...@@ -441,14 +434,14 @@ struct pmu *hw_perf_event_init(struct perf_event *event)
case PERF_TYPE_HARDWARE: case PERF_TYPE_HARDWARE:
ev = event->attr.config; ev = event->attr.config;
if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
return ERR_PTR(-EOPNOTSUPP); return -EOPNOTSUPP;
ev = ppmu->generic_events[ev]; ev = ppmu->generic_events[ev];
break; break;
case PERF_TYPE_HW_CACHE: case PERF_TYPE_HW_CACHE:
err = hw_perf_cache_event(event->attr.config, &ev); err = hw_perf_cache_event(event->attr.config, &ev);
if (err) if (err)
return ERR_PTR(err); return err;
break; break;
case PERF_TYPE_RAW: case PERF_TYPE_RAW:
...@@ -456,12 +449,12 @@ struct pmu *hw_perf_event_init(struct perf_event *event) ...@@ -456,12 +449,12 @@ struct pmu *hw_perf_event_init(struct perf_event *event)
break; break;
default: default:
return ERR_PTR(-EINVAL); return -ENOENT;
} }
event->hw.config = ppmu->xlate_event(ev); event->hw.config = ppmu->xlate_event(ev);
if (!(event->hw.config & FSL_EMB_EVENT_VALID)) if (!(event->hw.config & FSL_EMB_EVENT_VALID))
return ERR_PTR(-EINVAL); return -EINVAL;
/* /*
* If this is in a group, check if it can go on with all the * If this is in a group, check if it can go on with all the
...@@ -473,7 +466,7 @@ struct pmu *hw_perf_event_init(struct perf_event *event) ...@@ -473,7 +466,7 @@ struct pmu *hw_perf_event_init(struct perf_event *event)
n = collect_events(event->group_leader, n = collect_events(event->group_leader,
ppmu->n_counter - 1, events); ppmu->n_counter - 1, events);
if (n < 0) if (n < 0)
return ERR_PTR(-EINVAL); return -EINVAL;
} }
if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) { if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) {
...@@ -484,7 +477,7 @@ struct pmu *hw_perf_event_init(struct perf_event *event) ...@@ -484,7 +477,7 @@ struct pmu *hw_perf_event_init(struct perf_event *event)
} }
if (num_restricted >= ppmu->n_restricted) if (num_restricted >= ppmu->n_restricted)
return ERR_PTR(-EINVAL); return -EINVAL;
} }
event->hw.idx = -1; event->hw.idx = -1;
...@@ -497,7 +490,7 @@ struct pmu *hw_perf_event_init(struct perf_event *event) ...@@ -497,7 +490,7 @@ struct pmu *hw_perf_event_init(struct perf_event *event)
if (event->attr.exclude_kernel) if (event->attr.exclude_kernel)
event->hw.config_base |= PMLCA_FCS; event->hw.config_base |= PMLCA_FCS;
if (event->attr.exclude_idle) if (event->attr.exclude_idle)
return ERR_PTR(-ENOTSUPP); return -ENOTSUPP;
event->hw.last_period = event->hw.sample_period; event->hw.last_period = event->hw.sample_period;
local64_set(&event->hw.period_left, event->hw.last_period); local64_set(&event->hw.period_left, event->hw.last_period);
...@@ -523,11 +516,17 @@ struct pmu *hw_perf_event_init(struct perf_event *event) ...@@ -523,11 +516,17 @@ struct pmu *hw_perf_event_init(struct perf_event *event)
} }
event->destroy = hw_perf_event_destroy; event->destroy = hw_perf_event_destroy;
if (err) return err;
return ERR_PTR(err);
return &fsl_emb_pmu;
} }
static struct pmu fsl_emb_pmu = {
.event_init = fsl_emb_pmu_event_init,
.enable = fsl_emb_pmu_enable,
.disable = fsl_emb_pmu_disable,
.read = fsl_emb_pmu_read,
.unthrottle = fsl_emb_pmu_unthrottle,
};
/* /*
* A counter has overflowed; update its count and record * A counter has overflowed; update its count and record
* things if requested. Note that interrupts are hard-disabled * things if requested. Note that interrupts are hard-disabled
...@@ -651,5 +650,7 @@ int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu) ...@@ -651,5 +650,7 @@ int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu)
pr_info("%s performance monitor hardware support registered\n", pr_info("%s performance monitor hardware support registered\n",
pmu->name); pmu->name);
perf_pmu_register(&fsl_emb_pmu);
return 0; return 0;
} }
...@@ -257,26 +257,38 @@ static void sh_pmu_read(struct perf_event *event) ...@@ -257,26 +257,38 @@ static void sh_pmu_read(struct perf_event *event)
sh_perf_event_update(event, &event->hw, event->hw.idx); sh_perf_event_update(event, &event->hw, event->hw.idx);
} }
static struct pmu pmu = { static int sh_pmu_event_init(struct perf_event *event)
.enable = sh_pmu_enable,
.disable = sh_pmu_disable,
.read = sh_pmu_read,
};
struct pmu *hw_perf_event_init(struct perf_event *event)
{ {
int err = __hw_perf_event_init(event); int err;
switch (event->attr.type) {
case PERF_TYPE_RAW:
case PERF_TYPE_HW_CACHE:
case PERF_TYPE_HARDWARE:
err = __hw_perf_event_init(event);
break;
default:
return -ENOENT;
}
if (unlikely(err)) { if (unlikely(err)) {
if (event->destroy) if (event->destroy)
event->destroy(event); event->destroy(event);
return ERR_PTR(err);
} }
return &pmu; return err;
} }
static struct pmu pmu = {
.event_init = sh_pmu_event_init,
.enable = sh_pmu_enable,
.disable = sh_pmu_disable,
.read = sh_pmu_read,
};
static void sh_pmu_setup(int cpu) static void sh_pmu_setup(int cpu)
{
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
memset(cpuhw, 0, sizeof(struct cpu_hw_events)); memset(cpuhw, 0, sizeof(struct cpu_hw_events));
...@@ -325,6 +337,7 @@ int __cpuinit register_sh_pmu(struct sh_pmu *pmu) ...@@ -325,6 +337,7 @@ int __cpuinit register_sh_pmu(struct sh_pmu *pmu)
WARN_ON(pmu->num_events > MAX_HWEVENTS); WARN_ON(pmu->num_events > MAX_HWEVENTS);
perf_pmu_register(&pmu);
perf_cpu_notifier(sh_pmu_notifier); perf_cpu_notifier(sh_pmu_notifier);
return 0; return 0;
} }
...@@ -1025,7 +1025,7 @@ static int sparc_pmu_enable(struct perf_event *event) ...@@ -1025,7 +1025,7 @@ static int sparc_pmu_enable(struct perf_event *event)
return ret; return ret;
} }
static int __hw_perf_event_init(struct perf_event *event) static int sparc_pmu_event_init(struct perf_event *event)
{ {
struct perf_event_attr *attr = &event->attr; struct perf_event_attr *attr = &event->attr;
struct perf_event *evts[MAX_HWEVENTS]; struct perf_event *evts[MAX_HWEVENTS];
...@@ -1038,17 +1038,27 @@ static int __hw_perf_event_init(struct perf_event *event) ...@@ -1038,17 +1038,27 @@ static int __hw_perf_event_init(struct perf_event *event)
if (atomic_read(&nmi_active) < 0) if (atomic_read(&nmi_active) < 0)
return -ENODEV; return -ENODEV;
if (attr->type == PERF_TYPE_HARDWARE) { switch (attr->type) {
case PERF_TYPE_HARDWARE:
if (attr->config >= sparc_pmu->max_events) if (attr->config >= sparc_pmu->max_events)
return -EINVAL; return -EINVAL;
pmap = sparc_pmu->event_map(attr->config); pmap = sparc_pmu->event_map(attr->config);
} else if (attr->type == PERF_TYPE_HW_CACHE) { break;
case PERF_TYPE_HW_CACHE:
pmap = sparc_map_cache_event(attr->config); pmap = sparc_map_cache_event(attr->config);
if (IS_ERR(pmap)) if (IS_ERR(pmap))
return PTR_ERR(pmap); return PTR_ERR(pmap);
} else break;
case PERF_TYPE_RAW:
return -EOPNOTSUPP; return -EOPNOTSUPP;
default:
return -ENOENT;
}
/* We save the enable bits in the config_base. */ /* We save the enable bits in the config_base. */
hwc->config_base = sparc_pmu->irq_bit; hwc->config_base = sparc_pmu->irq_bit;
if (!attr->exclude_user) if (!attr->exclude_user)
...@@ -1143,6 +1153,7 @@ static int sparc_pmu_commit_txn(struct pmu *pmu) ...@@ -1143,6 +1153,7 @@ static int sparc_pmu_commit_txn(struct pmu *pmu)
} }
static struct pmu pmu = { static struct pmu pmu = {
.event_init = sparc_pmu_event_init,
.enable = sparc_pmu_enable, .enable = sparc_pmu_enable,
.disable = sparc_pmu_disable, .disable = sparc_pmu_disable,
.read = sparc_pmu_read, .read = sparc_pmu_read,
...@@ -1152,15 +1163,6 @@ static struct pmu pmu = { ...@@ -1152,15 +1163,6 @@ static struct pmu pmu = {
.commit_txn = sparc_pmu_commit_txn, .commit_txn = sparc_pmu_commit_txn,
}; };
struct pmu *hw_perf_event_init(struct perf_event *event)
{
int err = __hw_perf_event_init(event);
if (err)
return ERR_PTR(err);
return &pmu;
}
void perf_event_print_debug(void) void perf_event_print_debug(void)
{ {
unsigned long flags; unsigned long flags;
...@@ -1280,6 +1282,7 @@ void __init init_hw_perf_events(void) ...@@ -1280,6 +1282,7 @@ void __init init_hw_perf_events(void)
/* All sparc64 PMUs currently have 2 events. */ /* All sparc64 PMUs currently have 2 events. */
perf_max_events = 2; perf_max_events = 2;
perf_pmu_register(&pmu);
register_die_notifier(&perf_event_nmi_notifier); register_die_notifier(&perf_event_nmi_notifier);
} }
......
...@@ -530,7 +530,7 @@ static int x86_pmu_hw_config(struct perf_event *event) ...@@ -530,7 +530,7 @@ static int x86_pmu_hw_config(struct perf_event *event)
/* /*
* Setup the hardware configuration for a given attr_type * Setup the hardware configuration for a given attr_type
*/ */
static int __hw_perf_event_init(struct perf_event *event) static int __x86_pmu_event_init(struct perf_event *event)
{ {
int err; int err;
...@@ -1414,6 +1414,7 @@ void __init init_hw_perf_events(void) ...@@ -1414,6 +1414,7 @@ void __init init_hw_perf_events(void)
pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed); pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl); pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
perf_pmu_register(&pmu);
perf_cpu_notifier(x86_pmu_notifier); perf_cpu_notifier(x86_pmu_notifier);
} }
...@@ -1483,18 +1484,6 @@ static int x86_pmu_commit_txn(struct pmu *pmu) ...@@ -1483,18 +1484,6 @@ static int x86_pmu_commit_txn(struct pmu *pmu)
return 0; return 0;
} }
static struct pmu pmu = {
.enable = x86_pmu_enable,
.disable = x86_pmu_disable,
.start = x86_pmu_start,
.stop = x86_pmu_stop,
.read = x86_pmu_read,
.unthrottle = x86_pmu_unthrottle,
.start_txn = x86_pmu_start_txn,
.cancel_txn = x86_pmu_cancel_txn,
.commit_txn = x86_pmu_commit_txn,
};
/* /*
* validate that we can schedule this event * validate that we can schedule this event
*/ */
...@@ -1569,12 +1558,22 @@ static int validate_group(struct perf_event *event) ...@@ -1569,12 +1558,22 @@ static int validate_group(struct perf_event *event)
return ret; return ret;
} }
struct pmu *hw_perf_event_init(struct perf_event *event) int x86_pmu_event_init(struct perf_event *event)
{ {
struct pmu *tmp; struct pmu *tmp;
int err; int err;
err = __hw_perf_event_init(event); switch (event->attr.type) {
case PERF_TYPE_RAW:
case PERF_TYPE_HARDWARE:
case PERF_TYPE_HW_CACHE:
break;
default:
return -ENOENT;
}
err = __x86_pmu_event_init(event);
if (!err) { if (!err) {
/* /*
* we temporarily connect event to its pmu * we temporarily connect event to its pmu
...@@ -1594,12 +1593,24 @@ struct pmu *hw_perf_event_init(struct perf_event *event) ...@@ -1594,12 +1593,24 @@ struct pmu *hw_perf_event_init(struct perf_event *event)
if (err) { if (err) {
if (event->destroy) if (event->destroy)
event->destroy(event); event->destroy(event);
return ERR_PTR(err);
} }
return &pmu; return err;
} }
static struct pmu pmu = {
.event_init = x86_pmu_event_init,
.enable = x86_pmu_enable,
.disable = x86_pmu_disable,
.start = x86_pmu_start,
.stop = x86_pmu_stop,
.read = x86_pmu_read,
.unthrottle = x86_pmu_unthrottle,
.start_txn = x86_pmu_start_txn,
.cancel_txn = x86_pmu_cancel_txn,
.commit_txn = x86_pmu_commit_txn,
};
/* /*
* callchain support * callchain support
*/ */
......
...@@ -561,6 +561,13 @@ struct perf_event; ...@@ -561,6 +561,13 @@ struct perf_event;
* struct pmu - generic performance monitoring unit * struct pmu - generic performance monitoring unit
*/ */
struct pmu { struct pmu {
struct list_head entry;
/*
* Should return -ENOENT when the @event doesn't match this pmu
*/
int (*event_init) (struct perf_event *event);
int (*enable) (struct perf_event *event); int (*enable) (struct perf_event *event);
void (*disable) (struct perf_event *event); void (*disable) (struct perf_event *event);
int (*start) (struct perf_event *event); int (*start) (struct perf_event *event);
...@@ -849,7 +856,8 @@ struct perf_output_handle { ...@@ -849,7 +856,8 @@ struct perf_output_handle {
*/ */
extern int perf_max_events; extern int perf_max_events;
extern struct pmu *hw_perf_event_init(struct perf_event *event); extern int perf_pmu_register(struct pmu *pmu);
extern void perf_pmu_unregister(struct pmu *pmu);
extern void perf_event_task_sched_in(struct task_struct *task); extern void perf_event_task_sched_in(struct task_struct *task);
extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
......
...@@ -565,6 +565,34 @@ static struct notifier_block hw_breakpoint_exceptions_nb = { ...@@ -565,6 +565,34 @@ static struct notifier_block hw_breakpoint_exceptions_nb = {
.priority = 0x7fffffff .priority = 0x7fffffff
}; };
static void bp_perf_event_destroy(struct perf_event *event)
{
release_bp_slot(event);
}
static int hw_breakpoint_event_init(struct perf_event *bp)
{
int err;
if (bp->attr.type != PERF_TYPE_BREAKPOINT)
return -ENOENT;
err = register_perf_hw_breakpoint(bp);
if (err)
return err;
bp->destroy = bp_perf_event_destroy;
return 0;
}
static struct pmu perf_breakpoint = {
.event_init = hw_breakpoint_event_init,
.enable = arch_install_hw_breakpoint,
.disable = arch_uninstall_hw_breakpoint,
.read = hw_breakpoint_pmu_read,
};
static int __init init_hw_breakpoint(void) static int __init init_hw_breakpoint(void)
{ {
unsigned int **task_bp_pinned; unsigned int **task_bp_pinned;
...@@ -586,6 +614,8 @@ static int __init init_hw_breakpoint(void) ...@@ -586,6 +614,8 @@ static int __init init_hw_breakpoint(void)
constraints_initialized = 1; constraints_initialized = 1;
perf_pmu_register(&perf_breakpoint);
return register_die_notifier(&hw_breakpoint_exceptions_nb); return register_die_notifier(&hw_breakpoint_exceptions_nb);
err_alloc: err_alloc:
...@@ -601,8 +631,3 @@ static int __init init_hw_breakpoint(void) ...@@ -601,8 +631,3 @@ static int __init init_hw_breakpoint(void)
core_initcall(init_hw_breakpoint); core_initcall(init_hw_breakpoint);
struct pmu perf_ops_bp = {
.enable = arch_install_hw_breakpoint,
.disable = arch_uninstall_hw_breakpoint,
.read = hw_breakpoint_pmu_read,
};
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment