Commit 3423cab3 authored by Catalin Marinas's avatar Catalin Marinas

Merge branch 'for-next/perf' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux

Support for the Cluster PMU part of the ARM DynamIQ Shared Unit (DSU).

* 'for-next/perf' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux:
  perf: ARM DynamIQ Shared Unit PMU support
  dt-bindings: Document devicetree binding for ARM DSU PMU
  arm_pmu: Use of_cpu_node_to_id helper
  arm64: Use of_cpu_node_to_id helper for CPU topology parsing
  irqchip: gic-v3: Use of_cpu_node_to_id helper
  coresight: of: Use of_cpu_node_to_id helper
  of: Add helper for mapping device node to logical CPU number
  perf: Export perf_event_update_userpage
parents 0d907188 7520fa99
* ARM DynamIQ Shared Unit (DSU) Performance Monitor Unit (PMU)
ARM DyanmIQ Shared Unit (DSU) integrates one or more CPU cores
with a shared L3 memory system, control logic and external interfaces to
form a multicore cluster. The PMU enables to gather various statistics on
the operations of the DSU. The PMU provides independent 32bit counters that
can count any of the supported events, along with a 64bit cycle counter.
The PMU is accessed via CPU system registers and has no MMIO component.
** DSU PMU required properties:
- compatible : should be one of :
"arm,dsu-pmu"
- interrupts : Exactly 1 SPI must be listed.
- cpus : List of phandles for the CPUs connected to this DSU instance.
** Example:
dsu-pmu-0 {
compatible = "arm,dsu-pmu";
interrupts = <GIC_SPI 02 IRQ_TYPE_LEVEL_HIGH>;
cpus = <&cpu_0>, <&cpu_1>;
};
ARM DynamIQ Shared Unit (DSU) PMU
==================================
ARM DynamIQ Shared Unit integrates one or more cores with an L3 memory system,
control logic and external interfaces to form a multicore cluster. The PMU
allows counting the various events related to the L3 cache, Snoop Control Unit
etc, using 32bit independent counters. It also provides a 64bit cycle counter.
The PMU can only be accessed via CPU system registers and are common to the
cores connected to the same DSU. Like most of the other uncore PMUs, DSU
PMU doesn't support process specific events and cannot be used in sampling mode.
The DSU provides a bitmap for a subset of implemented events via hardware
registers. There is no way for the driver to determine if the other events
are available or not. Hence the driver exposes only those events advertised
by the DSU, in "events" directory under :
/sys/bus/event_sources/devices/arm_dsu_<N>/
The user should refer to the TRM of the product to figure out the supported events
and use the raw event code for the unlisted events.
The driver also exposes the CPUs connected to the DSU instance in "associated_cpus".
e.g usage :
perf stat -a -e arm_dsu_0/cycles/
/*
* ARM DynamIQ Shared Unit (DSU) PMU Low level register access routines.
*
* Copyright (C) ARM Limited, 2017.
*
* Author: Suzuki K Poulose <suzuki.poulose@arm.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2, as published by the Free Software Foundation.
*/
#include <linux/bitops.h>
#include <linux/build_bug.h>
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/barrier.h>
#include <asm/sysreg.h>
#define CLUSTERPMCR_EL1 sys_reg(3, 0, 15, 5, 0)
#define CLUSTERPMCNTENSET_EL1 sys_reg(3, 0, 15, 5, 1)
#define CLUSTERPMCNTENCLR_EL1 sys_reg(3, 0, 15, 5, 2)
#define CLUSTERPMOVSSET_EL1 sys_reg(3, 0, 15, 5, 3)
#define CLUSTERPMOVSCLR_EL1 sys_reg(3, 0, 15, 5, 4)
#define CLUSTERPMSELR_EL1 sys_reg(3, 0, 15, 5, 5)
#define CLUSTERPMINTENSET_EL1 sys_reg(3, 0, 15, 5, 6)
#define CLUSTERPMINTENCLR_EL1 sys_reg(3, 0, 15, 5, 7)
#define CLUSTERPMCCNTR_EL1 sys_reg(3, 0, 15, 6, 0)
#define CLUSTERPMXEVTYPER_EL1 sys_reg(3, 0, 15, 6, 1)
#define CLUSTERPMXEVCNTR_EL1 sys_reg(3, 0, 15, 6, 2)
#define CLUSTERPMMDCR_EL1 sys_reg(3, 0, 15, 6, 3)
#define CLUSTERPMCEID0_EL1 sys_reg(3, 0, 15, 6, 4)
#define CLUSTERPMCEID1_EL1 sys_reg(3, 0, 15, 6, 5)
static inline u32 __dsu_pmu_read_pmcr(void)
{
return read_sysreg_s(CLUSTERPMCR_EL1);
}
static inline void __dsu_pmu_write_pmcr(u32 val)
{
write_sysreg_s(val, CLUSTERPMCR_EL1);
isb();
}
static inline u32 __dsu_pmu_get_reset_overflow(void)
{
u32 val = read_sysreg_s(CLUSTERPMOVSCLR_EL1);
/* Clear the bit */
write_sysreg_s(val, CLUSTERPMOVSCLR_EL1);
isb();
return val;
}
static inline void __dsu_pmu_select_counter(int counter)
{
write_sysreg_s(counter, CLUSTERPMSELR_EL1);
isb();
}
static inline u64 __dsu_pmu_read_counter(int counter)
{
__dsu_pmu_select_counter(counter);
return read_sysreg_s(CLUSTERPMXEVCNTR_EL1);
}
static inline void __dsu_pmu_write_counter(int counter, u64 val)
{
__dsu_pmu_select_counter(counter);
write_sysreg_s(val, CLUSTERPMXEVCNTR_EL1);
isb();
}
static inline void __dsu_pmu_set_event(int counter, u32 event)
{
__dsu_pmu_select_counter(counter);
write_sysreg_s(event, CLUSTERPMXEVTYPER_EL1);
isb();
}
static inline u64 __dsu_pmu_read_pmccntr(void)
{
return read_sysreg_s(CLUSTERPMCCNTR_EL1);
}
static inline void __dsu_pmu_write_pmccntr(u64 val)
{
write_sysreg_s(val, CLUSTERPMCCNTR_EL1);
isb();
}
static inline void __dsu_pmu_disable_counter(int counter)
{
write_sysreg_s(BIT(counter), CLUSTERPMCNTENCLR_EL1);
isb();
}
static inline void __dsu_pmu_enable_counter(int counter)
{
write_sysreg_s(BIT(counter), CLUSTERPMCNTENSET_EL1);
isb();
}
static inline void __dsu_pmu_counter_interrupt_enable(int counter)
{
write_sysreg_s(BIT(counter), CLUSTERPMINTENSET_EL1);
isb();
}
static inline void __dsu_pmu_counter_interrupt_disable(int counter)
{
write_sysreg_s(BIT(counter), CLUSTERPMINTENCLR_EL1);
isb();
}
static inline u32 __dsu_pmu_read_pmceid(int n)
{
switch (n) {
case 0:
return read_sysreg_s(CLUSTERPMCEID0_EL1);
case 1:
return read_sysreg_s(CLUSTERPMCEID1_EL1);
default:
BUILD_BUG();
return 0;
}
}
......@@ -37,18 +37,14 @@ static int __init get_cpu_for_node(struct device_node *node)
if (!cpu_node)
return -1;
for_each_possible_cpu(cpu) {
if (of_get_cpu_node(cpu, NULL) == cpu_node) {
topology_parse_cpu_capacity(cpu_node, cpu);
of_node_put(cpu_node);
return cpu;
}
}
pr_crit("Unable to find CPU node for %pOF\n", cpu_node);
cpu = of_cpu_node_to_id(cpu_node);
if (cpu >= 0)
topology_parse_cpu_capacity(cpu_node, cpu);
else
pr_crit("Unable to find CPU node for %pOF\n", cpu_node);
of_node_put(cpu_node);
return -1;
return cpu;
}
static int __init parse_core(struct device_node *core, int cluster_id,
......
......@@ -104,26 +104,17 @@ static int of_coresight_alloc_memory(struct device *dev,
int of_coresight_get_cpu(const struct device_node *node)
{
int cpu;
bool found;
struct device_node *dn, *np;
struct device_node *dn;
dn = of_parse_phandle(node, "cpu", 0);
/* Affinity defaults to CPU0 */
if (!dn)
return 0;
for_each_possible_cpu(cpu) {
np = of_cpu_device_node_get(cpu);
found = (dn == np);
of_node_put(np);
if (found)
break;
}
cpu = of_cpu_node_to_id(dn);
of_node_put(dn);
/* Affinity to CPU0 if no cpu nodes are found */
return found ? cpu : 0;
return (cpu < 0) ? 0 : cpu;
}
EXPORT_SYMBOL_GPL(of_coresight_get_cpu);
......
......@@ -1070,31 +1070,6 @@ static int __init gic_validate_dist_version(void __iomem *dist_base)
return 0;
}
static int get_cpu_number(struct device_node *dn)
{
const __be32 *cell;
u64 hwid;
int cpu;
cell = of_get_property(dn, "reg", NULL);
if (!cell)
return -1;
hwid = of_read_number(cell, of_n_addr_cells(dn));
/*
* Non affinity bits must be set to 0 in the DT
*/
if (hwid & ~MPIDR_HWID_BITMASK)
return -1;
for_each_possible_cpu(cpu)
if (cpu_logical_map(cpu) == hwid)
return cpu;
return -1;
}
/* Create all possible partitions at boot time */
static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
{
......@@ -1145,8 +1120,8 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
if (WARN_ON(!cpu_node))
continue;
cpu = get_cpu_number(cpu_node);
if (WARN_ON(cpu == -1))
cpu = of_cpu_node_to_id(cpu_node);
if (WARN_ON(cpu < 0))
continue;
pr_cont("%pOF[%d] ", cpu_node, cpu);
......
......@@ -315,6 +315,32 @@ struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
}
EXPORT_SYMBOL(of_get_cpu_node);
/**
* of_cpu_node_to_id: Get the logical CPU number for a given device_node
*
* @cpu_node: Pointer to the device_node for CPU.
*
* Returns the logical CPU number of the given CPU device_node.
* Returns -ENODEV if the CPU is not found.
*/
int of_cpu_node_to_id(struct device_node *cpu_node)
{
int cpu;
bool found = false;
struct device_node *np;
for_each_possible_cpu(cpu) {
np = of_cpu_device_node_get(cpu);
found = (cpu_node == np);
of_node_put(np);
if (found)
return cpu;
}
return -ENODEV;
}
EXPORT_SYMBOL(of_cpu_node_to_id);
/**
* __of_device_is_compatible() - Check if the node matches given constraints
* @device: pointer to node
......
......@@ -17,6 +17,15 @@ config ARM_PMU_ACPI
depends on ARM_PMU && ACPI
def_bool y
config ARM_DSU_PMU
tristate "ARM DynamIQ Shared Unit (DSU) PMU"
depends on ARM64
help
Provides support for performance monitor unit in ARM DynamIQ Shared
Unit (DSU). The DSU integrates one or more cores with an L3 memory
system, control logic. The PMU allows counting various events related
to DSU.
config HISI_PMU
bool "HiSilicon SoC PMU"
depends on ARM64 && ACPI
......
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_ARM_DSU_PMU) += arm_dsu_pmu.o
obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o
obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o
obj-$(CONFIG_HISI_PMU) += hisilicon/
......
This diff is collapsed.
......@@ -82,19 +82,10 @@ static int pmu_parse_irq_affinity(struct device_node *node, int i)
return -EINVAL;
}
/* Now look up the logical CPU number */
for_each_possible_cpu(cpu) {
struct device_node *cpu_dn;
cpu_dn = of_cpu_device_node_get(cpu);
of_node_put(cpu_dn);
if (dn == cpu_dn)
break;
}
if (cpu >= nr_cpu_ids) {
cpu = of_cpu_node_to_id(dn);
if (cpu < 0) {
pr_warn("failed to find logical CPU for %s\n", dn->name);
cpu = nr_cpu_ids;
}
of_node_put(dn);
......
......@@ -544,6 +544,8 @@ const char *of_prop_next_string(struct property *prop, const char *cur);
bool of_console_check(struct device_node *dn, char *name, int index);
extern int of_cpu_node_to_id(struct device_node *np);
#else /* CONFIG_OF */
static inline void of_core_init(void)
......@@ -916,6 +918,11 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag
{
}
static inline int of_cpu_node_to_id(struct device_node *np)
{
return -ENODEV;
}
#define of_match_ptr(_ptr) NULL
#define of_match_node(_matches, _node) NULL
#endif /* CONFIG_OF */
......
......@@ -4904,6 +4904,7 @@ void perf_event_update_userpage(struct perf_event *event)
unlock:
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(perf_event_update_userpage);
static int perf_mmap_fault(struct vm_fault *vmf)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment