Commit 513c99ce authored by Sudeep KarkadaNagesha's avatar Sudeep KarkadaNagesha Committed by Will Deacon

ARM: perf: allocate CPU PMU dynamically at probe time

Supporting multiple, heterogeneous CPU PMUs requires us to allocate the
arm_pmu structures dynamically as the devices are probed.

This patch removes the static structure definitions for each CPU PMU
type and instead passes pointers to the PMU-specific init functions.
Signed-off-by: default avatarSudeep KarkadaNagesha <Sudeep.KarkadaNagesha@arm.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent e50c5418
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <asm/cputype.h> #include <asm/cputype.h>
...@@ -195,13 +196,13 @@ static struct platform_device_id __devinitdata cpu_pmu_plat_device_ids[] = { ...@@ -195,13 +196,13 @@ static struct platform_device_id __devinitdata cpu_pmu_plat_device_ids[] = {
/* /*
* CPU PMU identification and probing. * CPU PMU identification and probing.
*/ */
static struct arm_pmu *__devinit probe_current_pmu(void) static int __devinit probe_current_pmu(struct arm_pmu *pmu)
{ {
struct arm_pmu *pmu = NULL;
int cpu = get_cpu(); int cpu = get_cpu();
unsigned long cpuid = read_cpuid_id(); unsigned long cpuid = read_cpuid_id();
unsigned long implementor = (cpuid & 0xFF000000) >> 24; unsigned long implementor = (cpuid & 0xFF000000) >> 24;
unsigned long part_number = (cpuid & 0xFFF0); unsigned long part_number = (cpuid & 0xFFF0);
int ret = -ENODEV;
pr_info("probing PMU on CPU %d\n", cpu); pr_info("probing PMU on CPU %d\n", cpu);
...@@ -211,25 +212,25 @@ static struct arm_pmu *__devinit probe_current_pmu(void) ...@@ -211,25 +212,25 @@ static struct arm_pmu *__devinit probe_current_pmu(void)
case 0xB360: /* ARM1136 */ case 0xB360: /* ARM1136 */
case 0xB560: /* ARM1156 */ case 0xB560: /* ARM1156 */
case 0xB760: /* ARM1176 */ case 0xB760: /* ARM1176 */
pmu = armv6pmu_init(); ret = armv6pmu_init(pmu);
break; break;
case 0xB020: /* ARM11mpcore */ case 0xB020: /* ARM11mpcore */
pmu = armv6mpcore_pmu_init(); ret = armv6mpcore_pmu_init(pmu);
break; break;
case 0xC080: /* Cortex-A8 */ case 0xC080: /* Cortex-A8 */
pmu = armv7_a8_pmu_init(); ret = armv7_a8_pmu_init(pmu);
break; break;
case 0xC090: /* Cortex-A9 */ case 0xC090: /* Cortex-A9 */
pmu = armv7_a9_pmu_init(); ret = armv7_a9_pmu_init(pmu);
break; break;
case 0xC050: /* Cortex-A5 */ case 0xC050: /* Cortex-A5 */
pmu = armv7_a5_pmu_init(); ret = armv7_a5_pmu_init(pmu);
break; break;
case 0xC0F0: /* Cortex-A15 */ case 0xC0F0: /* Cortex-A15 */
pmu = armv7_a15_pmu_init(); ret = armv7_a15_pmu_init(pmu);
break; break;
case 0xC070: /* Cortex-A7 */ case 0xC070: /* Cortex-A7 */
pmu = armv7_a7_pmu_init(); ret = armv7_a7_pmu_init(pmu);
break; break;
} }
/* Intel CPUs [xscale]. */ /* Intel CPUs [xscale]. */
...@@ -237,39 +238,51 @@ static struct arm_pmu *__devinit probe_current_pmu(void) ...@@ -237,39 +238,51 @@ static struct arm_pmu *__devinit probe_current_pmu(void)
part_number = (cpuid >> 13) & 0x7; part_number = (cpuid >> 13) & 0x7;
switch (part_number) { switch (part_number) {
case 1: case 1:
pmu = xscale1pmu_init(); ret = xscale1pmu_init(pmu);
break; break;
case 2: case 2:
pmu = xscale2pmu_init(); ret = xscale2pmu_init(pmu);
break; break;
} }
} }
put_cpu(); put_cpu();
return pmu; return ret;
} }
static int __devinit cpu_pmu_device_probe(struct platform_device *pdev) static int __devinit cpu_pmu_device_probe(struct platform_device *pdev)
{ {
const struct of_device_id *of_id; const struct of_device_id *of_id;
struct arm_pmu *(*init_fn)(void); int (*init_fn)(struct arm_pmu *);
struct device_node *node = pdev->dev.of_node; struct device_node *node = pdev->dev.of_node;
struct arm_pmu *pmu;
int ret = -ENODEV;
if (cpu_pmu) { if (cpu_pmu) {
pr_info("attempt to register multiple PMU devices!"); pr_info("attempt to register multiple PMU devices!");
return -ENOSPC; return -ENOSPC;
} }
pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL);
if (!pmu) {
pr_info("failed to allocate PMU device!");
return -ENOMEM;
}
if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) { if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) {
init_fn = of_id->data; init_fn = of_id->data;
cpu_pmu = init_fn(); ret = init_fn(pmu);
} else { } else {
cpu_pmu = probe_current_pmu(); ret = probe_current_pmu(pmu);
} }
if (!cpu_pmu) if (ret) {
return -ENODEV; pr_info("failed to register PMU devices!");
kfree(pmu);
return ret;
}
cpu_pmu = pmu;
cpu_pmu->plat_device = pdev; cpu_pmu->plat_device = pdev;
cpu_pmu_init(cpu_pmu); cpu_pmu_init(cpu_pmu);
register_cpu_notifier(&cpu_pmu_hotplug_notifier); register_cpu_notifier(&cpu_pmu_hotplug_notifier);
......
...@@ -649,24 +649,22 @@ static int armv6_map_event(struct perf_event *event) ...@@ -649,24 +649,22 @@ static int armv6_map_event(struct perf_event *event)
&armv6_perf_cache_map, 0xFF); &armv6_perf_cache_map, 0xFF);
} }
static struct arm_pmu armv6pmu = { static int __devinit armv6pmu_init(struct arm_pmu *cpu_pmu)
.name = "v6",
.handle_irq = armv6pmu_handle_irq,
.enable = armv6pmu_enable_event,
.disable = armv6pmu_disable_event,
.read_counter = armv6pmu_read_counter,
.write_counter = armv6pmu_write_counter,
.get_event_idx = armv6pmu_get_event_idx,
.start = armv6pmu_start,
.stop = armv6pmu_stop,
.map_event = armv6_map_event,
.num_events = 3,
.max_period = (1LLU << 32) - 1,
};
static struct arm_pmu *__devinit armv6pmu_init(void)
{ {
return &armv6pmu; cpu_pmu->name = "v6";
cpu_pmu->handle_irq = armv6pmu_handle_irq;
cpu_pmu->enable = armv6pmu_enable_event;
cpu_pmu->disable = armv6pmu_disable_event;
cpu_pmu->read_counter = armv6pmu_read_counter;
cpu_pmu->write_counter = armv6pmu_write_counter;
cpu_pmu->get_event_idx = armv6pmu_get_event_idx;
cpu_pmu->start = armv6pmu_start;
cpu_pmu->stop = armv6pmu_stop;
cpu_pmu->map_event = armv6_map_event;
cpu_pmu->num_events = 3;
cpu_pmu->max_period = (1LLU << 32) - 1;
return 0;
} }
/* /*
...@@ -683,33 +681,31 @@ static int armv6mpcore_map_event(struct perf_event *event) ...@@ -683,33 +681,31 @@ static int armv6mpcore_map_event(struct perf_event *event)
&armv6mpcore_perf_cache_map, 0xFF); &armv6mpcore_perf_cache_map, 0xFF);
} }
static struct arm_pmu armv6mpcore_pmu = { static int __devinit armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
.name = "v6mpcore",
.handle_irq = armv6pmu_handle_irq,
.enable = armv6pmu_enable_event,
.disable = armv6mpcore_pmu_disable_event,
.read_counter = armv6pmu_read_counter,
.write_counter = armv6pmu_write_counter,
.get_event_idx = armv6pmu_get_event_idx,
.start = armv6pmu_start,
.stop = armv6pmu_stop,
.map_event = armv6mpcore_map_event,
.num_events = 3,
.max_period = (1LLU << 32) - 1,
};
static struct arm_pmu *__devinit armv6mpcore_pmu_init(void)
{ {
return &armv6mpcore_pmu; cpu_pmu->name = "v6mpcore";
cpu_pmu->handle_irq = armv6pmu_handle_irq;
cpu_pmu->enable = armv6pmu_enable_event;
cpu_pmu->disable = armv6mpcore_pmu_disable_event;
cpu_pmu->read_counter = armv6pmu_read_counter;
cpu_pmu->write_counter = armv6pmu_write_counter;
cpu_pmu->get_event_idx = armv6pmu_get_event_idx;
cpu_pmu->start = armv6pmu_start;
cpu_pmu->stop = armv6pmu_stop;
cpu_pmu->map_event = armv6mpcore_map_event;
cpu_pmu->num_events = 3;
cpu_pmu->max_period = (1LLU << 32) - 1;
return 0;
} }
#else #else
static struct arm_pmu *__devinit armv6pmu_init(void) static int armv6pmu_init(struct arm_pmu *cpu_pmu)
{ {
return NULL; return -ENODEV;
} }
static struct arm_pmu *__devinit armv6mpcore_pmu_init(void) static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
{ {
return NULL; return -ENODEV;
} }
#endif /* CONFIG_CPU_V6 || CONFIG_CPU_V6K */ #endif /* CONFIG_CPU_V6 || CONFIG_CPU_V6K */
...@@ -18,8 +18,6 @@ ...@@ -18,8 +18,6 @@
#ifdef CONFIG_CPU_V7 #ifdef CONFIG_CPU_V7
static struct arm_pmu armv7pmu;
/* /*
* Common ARMv7 event types * Common ARMv7 event types
* *
...@@ -1014,7 +1012,7 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) ...@@ -1014,7 +1012,7 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
* We only need to set the event for the cycle counter if we * We only need to set the event for the cycle counter if we
* have the ability to perform event filtering. * have the ability to perform event filtering.
*/ */
if (armv7pmu.set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER) if (cpu_pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
armv7_pmnc_write_evtsel(idx, hwc->config_base); armv7_pmnc_write_evtsel(idx, hwc->config_base);
/* /*
...@@ -1232,17 +1230,18 @@ static int armv7_a7_map_event(struct perf_event *event) ...@@ -1232,17 +1230,18 @@ static int armv7_a7_map_event(struct perf_event *event)
&armv7_a7_perf_cache_map, 0xFF); &armv7_a7_perf_cache_map, 0xFF);
} }
static struct arm_pmu armv7pmu = { static void armv7pmu_init(struct arm_pmu *cpu_pmu)
.handle_irq = armv7pmu_handle_irq, {
.enable = armv7pmu_enable_event, cpu_pmu->handle_irq = armv7pmu_handle_irq;
.disable = armv7pmu_disable_event, cpu_pmu->enable = armv7pmu_enable_event;
.read_counter = armv7pmu_read_counter, cpu_pmu->disable = armv7pmu_disable_event;
.write_counter = armv7pmu_write_counter, cpu_pmu->read_counter = armv7pmu_read_counter;
.get_event_idx = armv7pmu_get_event_idx, cpu_pmu->write_counter = armv7pmu_write_counter;
.start = armv7pmu_start, cpu_pmu->get_event_idx = armv7pmu_get_event_idx;
.stop = armv7pmu_stop, cpu_pmu->start = armv7pmu_start;
.reset = armv7pmu_reset, cpu_pmu->stop = armv7pmu_stop;
.max_period = (1LLU << 32) - 1, cpu_pmu->reset = armv7pmu_reset;
cpu_pmu->max_period = (1LLU << 32) - 1;
}; };
static u32 __devinit armv7_read_num_pmnc_events(void) static u32 __devinit armv7_read_num_pmnc_events(void)
...@@ -1256,70 +1255,75 @@ static u32 __devinit armv7_read_num_pmnc_events(void) ...@@ -1256,70 +1255,75 @@ static u32 __devinit armv7_read_num_pmnc_events(void)
return nb_cnt + 1; return nb_cnt + 1;
} }
static struct arm_pmu *__devinit armv7_a8_pmu_init(void) static int __devinit armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
{ {
armv7pmu.name = "ARMv7 Cortex-A8"; armv7pmu_init(cpu_pmu);
armv7pmu.map_event = armv7_a8_map_event; cpu_pmu->name = "ARMv7 Cortex-A8";
armv7pmu.num_events = armv7_read_num_pmnc_events(); cpu_pmu->map_event = armv7_a8_map_event;
return &armv7pmu; cpu_pmu->num_events = armv7_read_num_pmnc_events();
return 0;
} }
static struct arm_pmu *__devinit armv7_a9_pmu_init(void) static int __devinit armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
{ {
armv7pmu.name = "ARMv7 Cortex-A9"; armv7pmu_init(cpu_pmu);
armv7pmu.map_event = armv7_a9_map_event; cpu_pmu->name = "ARMv7 Cortex-A9";
armv7pmu.num_events = armv7_read_num_pmnc_events(); cpu_pmu->map_event = armv7_a9_map_event;
return &armv7pmu; cpu_pmu->num_events = armv7_read_num_pmnc_events();
return 0;
} }
static struct arm_pmu *__devinit armv7_a5_pmu_init(void) static int __devinit armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
{ {
armv7pmu.name = "ARMv7 Cortex-A5"; armv7pmu_init(cpu_pmu);
armv7pmu.map_event = armv7_a5_map_event; cpu_pmu->name = "ARMv7 Cortex-A5";
armv7pmu.num_events = armv7_read_num_pmnc_events(); cpu_pmu->map_event = armv7_a5_map_event;
return &armv7pmu; cpu_pmu->num_events = armv7_read_num_pmnc_events();
return 0;
} }
static struct arm_pmu *__devinit armv7_a15_pmu_init(void) static int __devinit armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
{ {
armv7pmu.name = "ARMv7 Cortex-A15"; armv7pmu_init(cpu_pmu);
armv7pmu.map_event = armv7_a15_map_event; cpu_pmu->name = "ARMv7 Cortex-A15";
armv7pmu.num_events = armv7_read_num_pmnc_events(); cpu_pmu->map_event = armv7_a15_map_event;
armv7pmu.set_event_filter = armv7pmu_set_event_filter; cpu_pmu->num_events = armv7_read_num_pmnc_events();
return &armv7pmu; cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
return 0;
} }
static struct arm_pmu *__devinit armv7_a7_pmu_init(void) static int __devinit armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
{ {
armv7pmu.name = "ARMv7 Cortex-A7"; armv7pmu_init(cpu_pmu);
armv7pmu.map_event = armv7_a7_map_event; cpu_pmu->name = "ARMv7 Cortex-A7";
armv7pmu.num_events = armv7_read_num_pmnc_events(); cpu_pmu->map_event = armv7_a7_map_event;
armv7pmu.set_event_filter = armv7pmu_set_event_filter; cpu_pmu->num_events = armv7_read_num_pmnc_events();
return &armv7pmu; cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
return 0;
} }
#else #else
static struct arm_pmu *__devinit armv7_a8_pmu_init(void) static inline int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
{ {
return NULL; return -ENODEV;
} }
static struct arm_pmu *__devinit armv7_a9_pmu_init(void) static inline int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
{ {
return NULL; return -ENODEV;
} }
static struct arm_pmu *__devinit armv7_a5_pmu_init(void) static inline int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
{ {
return NULL; return -ENODEV;
} }
static struct arm_pmu *__devinit armv7_a15_pmu_init(void) static inline int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
{ {
return NULL; return -ENODEV;
} }
static struct arm_pmu *__devinit armv7_a7_pmu_init(void) static inline int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
{ {
return NULL; return -ENODEV;
} }
#endif /* CONFIG_CPU_V7 */ #endif /* CONFIG_CPU_V7 */
...@@ -434,24 +434,22 @@ static int xscale_map_event(struct perf_event *event) ...@@ -434,24 +434,22 @@ static int xscale_map_event(struct perf_event *event)
&xscale_perf_cache_map, 0xFF); &xscale_perf_cache_map, 0xFF);
} }
static struct arm_pmu xscale1pmu = { static int __devinit xscale1pmu_init(struct arm_pmu *cpu_pmu)
.name = "xscale1",
.handle_irq = xscale1pmu_handle_irq,
.enable = xscale1pmu_enable_event,
.disable = xscale1pmu_disable_event,
.read_counter = xscale1pmu_read_counter,
.write_counter = xscale1pmu_write_counter,
.get_event_idx = xscale1pmu_get_event_idx,
.start = xscale1pmu_start,
.stop = xscale1pmu_stop,
.map_event = xscale_map_event,
.num_events = 3,
.max_period = (1LLU << 32) - 1,
};
static struct arm_pmu *__devinit xscale1pmu_init(void)
{ {
return &xscale1pmu; cpu_pmu->name = "xscale1";
cpu_pmu->handle_irq = xscale1pmu_handle_irq;
cpu_pmu->enable = xscale1pmu_enable_event;
cpu_pmu->disable = xscale1pmu_disable_event;
cpu_pmu->read_counter = xscale1pmu_read_counter;
cpu_pmu->write_counter = xscale1pmu_write_counter;
cpu_pmu->get_event_idx = xscale1pmu_get_event_idx;
cpu_pmu->start = xscale1pmu_start;
cpu_pmu->stop = xscale1pmu_stop;
cpu_pmu->map_event = xscale_map_event;
cpu_pmu->num_events = 3;
cpu_pmu->max_period = (1LLU << 32) - 1;
return 0;
} }
#define XSCALE2_OVERFLOWED_MASK 0x01f #define XSCALE2_OVERFLOWED_MASK 0x01f
...@@ -801,33 +799,31 @@ xscale2pmu_write_counter(int counter, u32 val) ...@@ -801,33 +799,31 @@ xscale2pmu_write_counter(int counter, u32 val)
} }
} }
static struct arm_pmu xscale2pmu = { static int __devinit xscale2pmu_init(struct arm_pmu *cpu_pmu)
.name = "xscale2",
.handle_irq = xscale2pmu_handle_irq,
.enable = xscale2pmu_enable_event,
.disable = xscale2pmu_disable_event,
.read_counter = xscale2pmu_read_counter,
.write_counter = xscale2pmu_write_counter,
.get_event_idx = xscale2pmu_get_event_idx,
.start = xscale2pmu_start,
.stop = xscale2pmu_stop,
.map_event = xscale_map_event,
.num_events = 5,
.max_period = (1LLU << 32) - 1,
};
static struct arm_pmu *__devinit xscale2pmu_init(void)
{ {
return &xscale2pmu; cpu_pmu->name = "xscale2";
cpu_pmu->handle_irq = xscale2pmu_handle_irq;
cpu_pmu->enable = xscale2pmu_enable_event;
cpu_pmu->disable = xscale2pmu_disable_event;
cpu_pmu->read_counter = xscale2pmu_read_counter;
cpu_pmu->write_counter = xscale2pmu_write_counter;
cpu_pmu->get_event_idx = xscale2pmu_get_event_idx;
cpu_pmu->start = xscale2pmu_start;
cpu_pmu->stop = xscale2pmu_stop;
cpu_pmu->map_event = xscale_map_event;
cpu_pmu->num_events = 5;
cpu_pmu->max_period = (1LLU << 32) - 1;
return 0;
} }
#else #else
static struct arm_pmu *__devinit xscale1pmu_init(void) static inline int xscale1pmu_init(struct arm_pmu *cpu_pmu)
{ {
return NULL; return -ENODEV;
} }
static struct arm_pmu *__devinit xscale2pmu_init(void) static inline int xscale2pmu_init(struct arm_pmu *cpu_pmu)
{ {
return NULL; return -ENODEV;
} }
#endif /* CONFIG_CPU_XSCALE */ #endif /* CONFIG_CPU_XSCALE */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment