Commit 1a246b9f authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar

perf/x86/intel/uncore: Convert to hotplug state machine

Convert the notifiers to state machine states and let the core code do the
setup for the already online CPUs. This notifier has a completely undocumented
ordering requirement versus perf hardcoded in the notifier priority. This
odering is only required for CPU down, so that hardware migration happens
before the core is notified about the outgoing CPU.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarAnna-Maria Gleixner <anna-maria@linutronix.de>
Reviewed-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Kan Liang <kan.liang@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: rt@linutronix.de
Link: http://lkml.kernel.org/r/20160713153333.752695801@linutronix.deSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 95ca792c
...@@ -1034,7 +1034,7 @@ static void uncore_pci_exit(void) ...@@ -1034,7 +1034,7 @@ static void uncore_pci_exit(void)
} }
} }
static void uncore_cpu_dying(int cpu) static int uncore_cpu_dying(unsigned int cpu)
{ {
struct intel_uncore_type *type, **types = uncore_msr_uncores; struct intel_uncore_type *type, **types = uncore_msr_uncores;
struct intel_uncore_pmu *pmu; struct intel_uncore_pmu *pmu;
...@@ -1051,16 +1051,19 @@ static void uncore_cpu_dying(int cpu) ...@@ -1051,16 +1051,19 @@ static void uncore_cpu_dying(int cpu)
uncore_box_exit(box); uncore_box_exit(box);
} }
} }
return 0;
} }
static void uncore_cpu_starting(int cpu, bool init) static int first_init;
static int uncore_cpu_starting(unsigned int cpu)
{ {
struct intel_uncore_type *type, **types = uncore_msr_uncores; struct intel_uncore_type *type, **types = uncore_msr_uncores;
struct intel_uncore_pmu *pmu; struct intel_uncore_pmu *pmu;
struct intel_uncore_box *box; struct intel_uncore_box *box;
int i, pkg, ncpus = 1; int i, pkg, ncpus = 1;
if (init) { if (first_init) {
/* /*
* On init we get the number of online cpus in the package * On init we get the number of online cpus in the package
* and set refcount for all of them. * and set refcount for all of them.
...@@ -1081,9 +1084,11 @@ static void uncore_cpu_starting(int cpu, bool init) ...@@ -1081,9 +1084,11 @@ static void uncore_cpu_starting(int cpu, bool init)
uncore_box_init(box); uncore_box_init(box);
} }
} }
return 0;
} }
static int uncore_cpu_prepare(int cpu) static int uncore_cpu_prepare(unsigned int cpu)
{ {
struct intel_uncore_type *type, **types = uncore_msr_uncores; struct intel_uncore_type *type, **types = uncore_msr_uncores;
struct intel_uncore_pmu *pmu; struct intel_uncore_pmu *pmu;
...@@ -1146,13 +1151,13 @@ static void uncore_change_context(struct intel_uncore_type **uncores, ...@@ -1146,13 +1151,13 @@ static void uncore_change_context(struct intel_uncore_type **uncores,
uncore_change_type_ctx(*uncores, old_cpu, new_cpu); uncore_change_type_ctx(*uncores, old_cpu, new_cpu);
} }
static void uncore_event_exit_cpu(int cpu) static int uncore_event_cpu_offline(unsigned int cpu)
{ {
int target; int target;
/* Check if exiting cpu is used for collecting uncore events */ /* Check if exiting cpu is used for collecting uncore events */
if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask)) if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
return; return 0;
/* Find a new cpu to collect uncore events */ /* Find a new cpu to collect uncore events */
target = cpumask_any_but(topology_core_cpumask(cpu), cpu); target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
...@@ -1165,9 +1170,10 @@ static void uncore_event_exit_cpu(int cpu) ...@@ -1165,9 +1170,10 @@ static void uncore_event_exit_cpu(int cpu)
uncore_change_context(uncore_msr_uncores, cpu, target); uncore_change_context(uncore_msr_uncores, cpu, target);
uncore_change_context(uncore_pci_uncores, cpu, target); uncore_change_context(uncore_pci_uncores, cpu, target);
return 0;
} }
static void uncore_event_init_cpu(int cpu) static int uncore_event_cpu_online(unsigned int cpu)
{ {
int target; int target;
...@@ -1177,50 +1183,15 @@ static void uncore_event_init_cpu(int cpu) ...@@ -1177,50 +1183,15 @@ static void uncore_event_init_cpu(int cpu)
*/ */
target = cpumask_any_and(&uncore_cpu_mask, topology_core_cpumask(cpu)); target = cpumask_any_and(&uncore_cpu_mask, topology_core_cpumask(cpu));
if (target < nr_cpu_ids) if (target < nr_cpu_ids)
return; return 0;
cpumask_set_cpu(cpu, &uncore_cpu_mask); cpumask_set_cpu(cpu, &uncore_cpu_mask);
uncore_change_context(uncore_msr_uncores, -1, cpu); uncore_change_context(uncore_msr_uncores, -1, cpu);
uncore_change_context(uncore_pci_uncores, -1, cpu); uncore_change_context(uncore_pci_uncores, -1, cpu);
return 0;
} }
static int uncore_cpu_notifier(struct notifier_block *self,
unsigned long action, void *hcpu)
{
unsigned int cpu = (long)hcpu;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
return notifier_from_errno(uncore_cpu_prepare(cpu));
case CPU_STARTING:
uncore_cpu_starting(cpu, false);
case CPU_DOWN_FAILED:
uncore_event_init_cpu(cpu);
break;
case CPU_UP_CANCELED:
case CPU_DYING:
uncore_cpu_dying(cpu);
break;
case CPU_DOWN_PREPARE:
uncore_event_exit_cpu(cpu);
break;
}
return NOTIFY_OK;
}
static struct notifier_block uncore_cpu_nb = {
.notifier_call = uncore_cpu_notifier,
/*
* to migrate uncore events, our notifier should be executed
* before perf core's notifier.
*/
.priority = CPU_PRI_PERF + 1,
};
static int __init type_pmu_register(struct intel_uncore_type *type) static int __init type_pmu_register(struct intel_uncore_type *type)
{ {
int i, ret; int i, ret;
...@@ -1264,41 +1235,6 @@ static int __init uncore_cpu_init(void) ...@@ -1264,41 +1235,6 @@ static int __init uncore_cpu_init(void)
return ret; return ret;
} }
static void __init uncore_cpu_setup(void *dummy)
{
uncore_cpu_starting(smp_processor_id(), true);
}
/* Lazy to avoid allocation of a few bytes for the normal case */
static __initdata DECLARE_BITMAP(packages, MAX_LOCAL_APIC);
static int __init uncore_cpumask_init(bool msr)
{
unsigned int cpu;
for_each_online_cpu(cpu) {
unsigned int pkg = topology_logical_package_id(cpu);
int ret;
if (test_and_set_bit(pkg, packages))
continue;
/*
* The first online cpu of each package allocates and takes
* the refcounts for all other online cpus in that package.
* If msrs are not enabled no allocation is required.
*/
if (msr) {
ret = uncore_cpu_prepare(cpu);
if (ret)
return ret;
}
uncore_event_init_cpu(cpu);
smp_call_function_single(cpu, uncore_cpu_setup, NULL, 1);
}
__register_cpu_notifier(&uncore_cpu_nb);
return 0;
}
#define X86_UNCORE_MODEL_MATCH(model, init) \ #define X86_UNCORE_MODEL_MATCH(model, init) \
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init } { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init }
...@@ -1420,11 +1356,33 @@ static int __init intel_uncore_init(void) ...@@ -1420,11 +1356,33 @@ static int __init intel_uncore_init(void)
if (cret && pret) if (cret && pret)
return -ENODEV; return -ENODEV;
cpu_notifier_register_begin(); /*
ret = uncore_cpumask_init(!cret); * Install callbacks. Core will call them for each online cpu.
if (ret) *
goto err; * The first online cpu of each package allocates and takes
cpu_notifier_register_done(); * the refcounts for all other online cpus in that package.
* If msrs are not enabled no allocation is required and
* uncore_cpu_prepare() is not called for each online cpu.
*/
if (!cret) {
ret = cpuhp_setup_state(CPUHP_PERF_X86_UNCORE_PREP,
"PERF_X86_UNCORE_PREP",
uncore_cpu_prepare, NULL);
if (ret)
goto err;
} else {
cpuhp_setup_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP,
"PERF_X86_UNCORE_PREP",
uncore_cpu_prepare, NULL);
}
first_init = 1;
cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING,
"AP_PERF_X86_UNCORE_STARTING",
uncore_cpu_starting, uncore_cpu_dying);
first_init = 0;
cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
"AP_PERF_X86_UNCORE_ONLINE",
uncore_event_cpu_online, uncore_event_cpu_offline);
return 0; return 0;
err: err:
...@@ -1432,17 +1390,16 @@ static int __init intel_uncore_init(void) ...@@ -1432,17 +1390,16 @@ static int __init intel_uncore_init(void)
on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1); on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1);
uncore_types_exit(uncore_msr_uncores); uncore_types_exit(uncore_msr_uncores);
uncore_pci_exit(); uncore_pci_exit();
cpu_notifier_register_done();
return ret; return ret;
} }
module_init(intel_uncore_init); module_init(intel_uncore_init);
static void __exit intel_uncore_exit(void) static void __exit intel_uncore_exit(void)
{ {
cpu_notifier_register_begin(); cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
__unregister_cpu_notifier(&uncore_cpu_nb); cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_STARTING);
cpuhp_remove_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP);
uncore_types_exit(uncore_msr_uncores); uncore_types_exit(uncore_msr_uncores);
uncore_pci_exit(); uncore_pci_exit();
cpu_notifier_register_done();
} }
module_exit(intel_uncore_exit); module_exit(intel_uncore_exit);
...@@ -6,6 +6,7 @@ enum cpuhp_state { ...@@ -6,6 +6,7 @@ enum cpuhp_state {
CPUHP_CREATE_THREADS, CPUHP_CREATE_THREADS,
CPUHP_PERF_PREPARE, CPUHP_PERF_PREPARE,
CPUHP_PERF_X86_PREPARE, CPUHP_PERF_X86_PREPARE,
CPUHP_PERF_X86_UNCORE_PREP,
CPUHP_NOTIFY_PREPARE, CPUHP_NOTIFY_PREPARE,
CPUHP_BRINGUP_CPU, CPUHP_BRINGUP_CPU,
CPUHP_AP_IDLE_DEAD, CPUHP_AP_IDLE_DEAD,
...@@ -18,6 +19,7 @@ enum cpuhp_state { ...@@ -18,6 +19,7 @@ enum cpuhp_state {
CPUHP_AP_IRQ_ARMADA_CASC_STARTING, CPUHP_AP_IRQ_ARMADA_CASC_STARTING,
CPUHP_AP_IRQ_BCM2836_STARTING, CPUHP_AP_IRQ_BCM2836_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY, CPUHP_AP_ARM_MVEBU_COHERENCY,
CPUHP_AP_PERF_X86_UNCORE_STARTING,
CPUHP_AP_PERF_X86_STARTING, CPUHP_AP_PERF_X86_STARTING,
CPUHP_AP_NOTIFY_STARTING, CPUHP_AP_NOTIFY_STARTING,
CPUHP_AP_ONLINE, CPUHP_AP_ONLINE,
...@@ -27,6 +29,7 @@ enum cpuhp_state { ...@@ -27,6 +29,7 @@ enum cpuhp_state {
CPUHP_AP_X86_VDSO_VMA_ONLINE, CPUHP_AP_X86_VDSO_VMA_ONLINE,
CPUHP_AP_PERF_ONLINE, CPUHP_AP_PERF_ONLINE,
CPUHP_AP_PERF_X86_ONLINE, CPUHP_AP_PERF_X86_ONLINE,
CPUHP_AP_PERF_X86_UNCORE_ONLINE,
CPUHP_AP_NOTIFY_ONLINE, CPUHP_AP_NOTIFY_ONLINE,
CPUHP_AP_ONLINE_DYN, CPUHP_AP_ONLINE_DYN,
CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment