Commit cda94408 authored by Catalin Marinas's avatar Catalin Marinas

Merge branch 'for-next/perf' of...

Merge branch 'for-next/perf' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux into for-next/core

* 'for-next/perf' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux:
  arm64: perf: add support for Cortex-A35
  arm64: perf: add support for Cortex-A73
  arm64: perf: Remove redundant entries from CPU-specific event maps
  arm64: perf: Connect additional events to pmu counters
  arm64: perf: Allow standard PMUv3 events to be extended by the CPU type
  perf: xgene: Remove unnecessary managed resources cleanup
  arm64: perf: Allow more than one cycle counter to be used
parents df5b95be e884f80c
...@@ -9,9 +9,11 @@ Required properties: ...@@ -9,9 +9,11 @@ Required properties:
- compatible : should be one of - compatible : should be one of
"apm,potenza-pmu" "apm,potenza-pmu"
"arm,armv8-pmuv3" "arm,armv8-pmuv3"
"arm,cortex-a73-pmu"
"arm,cortex-a72-pmu" "arm,cortex-a72-pmu"
"arm,cortex-a57-pmu" "arm,cortex-a57-pmu"
"arm,cortex-a53-pmu" "arm,cortex-a53-pmu"
"arm,cortex-a35-pmu"
"arm,cortex-a17-pmu" "arm,cortex-a17-pmu"
"arm,cortex-a15-pmu" "arm,cortex-a15-pmu"
"arm,cortex-a12-pmu" "arm,cortex-a12-pmu"
......
This diff is collapsed.
...@@ -47,6 +47,9 @@ armpmu_map_cache_event(const unsigned (*cache_map) ...@@ -47,6 +47,9 @@ armpmu_map_cache_event(const unsigned (*cache_map)
if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return -EINVAL; return -EINVAL;
if (!cache_map)
return -ENOENT;
ret = (int)(*cache_map)[cache_type][cache_op][cache_result]; ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
if (ret == CACHE_OP_UNSUPPORTED) if (ret == CACHE_OP_UNSUPPORTED)
...@@ -63,6 +66,9 @@ armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) ...@@ -63,6 +66,9 @@ armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
if (config >= PERF_COUNT_HW_MAX) if (config >= PERF_COUNT_HW_MAX)
return -EINVAL; return -EINVAL;
if (!event_map)
return -ENOENT;
mapping = (*event_map)[config]; mapping = (*event_map)[config];
return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
} }
......
...@@ -1147,7 +1147,6 @@ xgene_pmu_dev_add(struct xgene_pmu *xgene_pmu, struct xgene_pmu_dev_ctx *ctx) ...@@ -1147,7 +1147,6 @@ xgene_pmu_dev_add(struct xgene_pmu *xgene_pmu, struct xgene_pmu_dev_ctx *ctx)
{ {
struct device *dev = xgene_pmu->dev; struct device *dev = xgene_pmu->dev;
struct xgene_pmu_dev *pmu; struct xgene_pmu_dev *pmu;
int rc;
pmu = devm_kzalloc(dev, sizeof(*pmu), GFP_KERNEL); pmu = devm_kzalloc(dev, sizeof(*pmu), GFP_KERNEL);
if (!pmu) if (!pmu)
...@@ -1159,7 +1158,7 @@ xgene_pmu_dev_add(struct xgene_pmu *xgene_pmu, struct xgene_pmu_dev_ctx *ctx) ...@@ -1159,7 +1158,7 @@ xgene_pmu_dev_add(struct xgene_pmu *xgene_pmu, struct xgene_pmu_dev_ctx *ctx)
switch (pmu->inf->type) { switch (pmu->inf->type) {
case PMU_TYPE_L3C: case PMU_TYPE_L3C:
if (!(xgene_pmu->l3c_active_mask & pmu->inf->enable_mask)) if (!(xgene_pmu->l3c_active_mask & pmu->inf->enable_mask))
goto dev_err; return -ENODEV;
if (xgene_pmu->version == PCP_PMU_V3) if (xgene_pmu->version == PCP_PMU_V3)
pmu->attr_groups = l3c_pmu_v3_attr_groups; pmu->attr_groups = l3c_pmu_v3_attr_groups;
else else
...@@ -1177,7 +1176,7 @@ xgene_pmu_dev_add(struct xgene_pmu *xgene_pmu, struct xgene_pmu_dev_ctx *ctx) ...@@ -1177,7 +1176,7 @@ xgene_pmu_dev_add(struct xgene_pmu *xgene_pmu, struct xgene_pmu_dev_ctx *ctx)
break; break;
case PMU_TYPE_MCB: case PMU_TYPE_MCB:
if (!(xgene_pmu->mcb_active_mask & pmu->inf->enable_mask)) if (!(xgene_pmu->mcb_active_mask & pmu->inf->enable_mask))
goto dev_err; return -ENODEV;
if (xgene_pmu->version == PCP_PMU_V3) if (xgene_pmu->version == PCP_PMU_V3)
pmu->attr_groups = mcb_pmu_v3_attr_groups; pmu->attr_groups = mcb_pmu_v3_attr_groups;
else else
...@@ -1185,7 +1184,7 @@ xgene_pmu_dev_add(struct xgene_pmu *xgene_pmu, struct xgene_pmu_dev_ctx *ctx) ...@@ -1185,7 +1184,7 @@ xgene_pmu_dev_add(struct xgene_pmu *xgene_pmu, struct xgene_pmu_dev_ctx *ctx)
break; break;
case PMU_TYPE_MC: case PMU_TYPE_MC:
if (!(xgene_pmu->mc_active_mask & pmu->inf->enable_mask)) if (!(xgene_pmu->mc_active_mask & pmu->inf->enable_mask))
goto dev_err; return -ENODEV;
if (xgene_pmu->version == PCP_PMU_V3) if (xgene_pmu->version == PCP_PMU_V3)
pmu->attr_groups = mc_pmu_v3_attr_groups; pmu->attr_groups = mc_pmu_v3_attr_groups;
else else
...@@ -1195,19 +1194,14 @@ xgene_pmu_dev_add(struct xgene_pmu *xgene_pmu, struct xgene_pmu_dev_ctx *ctx) ...@@ -1195,19 +1194,14 @@ xgene_pmu_dev_add(struct xgene_pmu *xgene_pmu, struct xgene_pmu_dev_ctx *ctx)
return -EINVAL; return -EINVAL;
} }
rc = xgene_init_perf(pmu, ctx->name); if (xgene_init_perf(pmu, ctx->name)) {
if (rc) {
dev_err(dev, "%s PMU: Failed to init perf driver\n", ctx->name); dev_err(dev, "%s PMU: Failed to init perf driver\n", ctx->name);
goto dev_err; return -ENODEV;
} }
dev_info(dev, "%s PMU registered\n", ctx->name); dev_info(dev, "%s PMU registered\n", ctx->name);
return rc; return 0;
dev_err:
devm_kfree(dev, pmu);
return -ENODEV;
} }
static void _xgene_pmu_isr(int irq, struct xgene_pmu_dev *pmu_dev) static void _xgene_pmu_isr(int irq, struct xgene_pmu_dev *pmu_dev)
...@@ -1515,13 +1509,13 @@ xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu, ...@@ -1515,13 +1509,13 @@ xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
acpi_dev_free_resource_list(&resource_list); acpi_dev_free_resource_list(&resource_list);
if (rc < 0) { if (rc < 0) {
dev_err(dev, "PMU type %d: No resource address found\n", type); dev_err(dev, "PMU type %d: No resource address found\n", type);
goto err; return NULL;
} }
dev_csr = devm_ioremap_resource(dev, &res); dev_csr = devm_ioremap_resource(dev, &res);
if (IS_ERR(dev_csr)) { if (IS_ERR(dev_csr)) {
dev_err(dev, "PMU type %d: Fail to map resource\n", type); dev_err(dev, "PMU type %d: Fail to map resource\n", type);
goto err; return NULL;
} }
/* A PMU device node without enable-bit-index is always enabled */ /* A PMU device node without enable-bit-index is always enabled */
...@@ -1535,7 +1529,7 @@ xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu, ...@@ -1535,7 +1529,7 @@ xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
ctx->name = xgene_pmu_dev_name(dev, type, enable_bit); ctx->name = xgene_pmu_dev_name(dev, type, enable_bit);
if (!ctx->name) { if (!ctx->name) {
dev_err(dev, "PMU type %d: Fail to get device name\n", type); dev_err(dev, "PMU type %d: Fail to get device name\n", type);
goto err; return NULL;
} }
inf = &ctx->inf; inf = &ctx->inf;
inf->type = type; inf->type = type;
...@@ -1543,9 +1537,6 @@ xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu, ...@@ -1543,9 +1537,6 @@ xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
inf->enable_mask = 1 << enable_bit; inf->enable_mask = 1 << enable_bit;
return ctx; return ctx;
err:
devm_kfree(dev, ctx);
return NULL;
} }
static const struct acpi_device_id xgene_pmu_acpi_type_match[] = { static const struct acpi_device_id xgene_pmu_acpi_type_match[] = {
...@@ -1663,20 +1654,20 @@ xgene_pmu_dev_ctx *fdt_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu, ...@@ -1663,20 +1654,20 @@ xgene_pmu_dev_ctx *fdt_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
void __iomem *dev_csr; void __iomem *dev_csr;
struct resource res; struct resource res;
int enable_bit; int enable_bit;
int rc;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx) if (!ctx)
return NULL; return NULL;
rc = of_address_to_resource(np, 0, &res);
if (rc < 0) { if (of_address_to_resource(np, 0, &res) < 0) {
dev_err(dev, "PMU type %d: No resource address found\n", type); dev_err(dev, "PMU type %d: No resource address found\n", type);
goto err; return NULL;
} }
dev_csr = devm_ioremap_resource(dev, &res); dev_csr = devm_ioremap_resource(dev, &res);
if (IS_ERR(dev_csr)) { if (IS_ERR(dev_csr)) {
dev_err(dev, "PMU type %d: Fail to map resource\n", type); dev_err(dev, "PMU type %d: Fail to map resource\n", type);
goto err; return NULL;
} }
/* A PMU device node without enable-bit-index is always enabled */ /* A PMU device node without enable-bit-index is always enabled */
...@@ -1686,17 +1677,15 @@ xgene_pmu_dev_ctx *fdt_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu, ...@@ -1686,17 +1677,15 @@ xgene_pmu_dev_ctx *fdt_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
ctx->name = xgene_pmu_dev_name(dev, type, enable_bit); ctx->name = xgene_pmu_dev_name(dev, type, enable_bit);
if (!ctx->name) { if (!ctx->name) {
dev_err(dev, "PMU type %d: Fail to get device name\n", type); dev_err(dev, "PMU type %d: Fail to get device name\n", type);
goto err; return NULL;
} }
inf = &ctx->inf; inf = &ctx->inf;
inf->type = type; inf->type = type;
inf->csr = dev_csr; inf->csr = dev_csr;
inf->enable_mask = 1 << enable_bit; inf->enable_mask = 1 << enable_bit;
return ctx; return ctx;
err:
devm_kfree(dev, ctx);
return NULL;
} }
static int fdt_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu, static int fdt_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu,
...@@ -1868,22 +1857,20 @@ static int xgene_pmu_probe(struct platform_device *pdev) ...@@ -1868,22 +1857,20 @@ static int xgene_pmu_probe(struct platform_device *pdev)
xgene_pmu->pcppmu_csr = devm_ioremap_resource(&pdev->dev, res); xgene_pmu->pcppmu_csr = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(xgene_pmu->pcppmu_csr)) { if (IS_ERR(xgene_pmu->pcppmu_csr)) {
dev_err(&pdev->dev, "ioremap failed for PCP PMU resource\n"); dev_err(&pdev->dev, "ioremap failed for PCP PMU resource\n");
rc = PTR_ERR(xgene_pmu->pcppmu_csr); return PTR_ERR(xgene_pmu->pcppmu_csr);
goto err;
} }
irq = platform_get_irq(pdev, 0); irq = platform_get_irq(pdev, 0);
if (irq < 0) { if (irq < 0) {
dev_err(&pdev->dev, "No IRQ resource\n"); dev_err(&pdev->dev, "No IRQ resource\n");
rc = -EINVAL; return -EINVAL;
goto err;
} }
rc = devm_request_irq(&pdev->dev, irq, xgene_pmu_isr, rc = devm_request_irq(&pdev->dev, irq, xgene_pmu_isr,
IRQF_NOBALANCING | IRQF_NO_THREAD, IRQF_NOBALANCING | IRQF_NO_THREAD,
dev_name(&pdev->dev), xgene_pmu); dev_name(&pdev->dev), xgene_pmu);
if (rc) { if (rc) {
dev_err(&pdev->dev, "Could not request IRQ %d\n", irq); dev_err(&pdev->dev, "Could not request IRQ %d\n", irq);
goto err; return rc;
} }
raw_spin_lock_init(&xgene_pmu->lock); raw_spin_lock_init(&xgene_pmu->lock);
...@@ -1903,42 +1890,29 @@ static int xgene_pmu_probe(struct platform_device *pdev) ...@@ -1903,42 +1890,29 @@ static int xgene_pmu_probe(struct platform_device *pdev)
rc = irq_set_affinity(irq, &xgene_pmu->cpu); rc = irq_set_affinity(irq, &xgene_pmu->cpu);
if (rc) { if (rc) {
dev_err(&pdev->dev, "Failed to set interrupt affinity!\n"); dev_err(&pdev->dev, "Failed to set interrupt affinity!\n");
goto err; return rc;
} }
/* Walk through the tree for all PMU perf devices */ /* Walk through the tree for all PMU perf devices */
rc = xgene_pmu_probe_pmu_dev(xgene_pmu, pdev); rc = xgene_pmu_probe_pmu_dev(xgene_pmu, pdev);
if (rc) { if (rc) {
dev_err(&pdev->dev, "No PMU perf devices found!\n"); dev_err(&pdev->dev, "No PMU perf devices found!\n");
goto err; return rc;
} }
/* Enable interrupt */ /* Enable interrupt */
xgene_pmu->ops->unmask_int(xgene_pmu); xgene_pmu->ops->unmask_int(xgene_pmu);
return 0; return 0;
err:
if (xgene_pmu->pcppmu_csr)
devm_iounmap(&pdev->dev, xgene_pmu->pcppmu_csr);
devm_kfree(&pdev->dev, xgene_pmu);
return rc;
} }
static void static void
xgene_pmu_dev_cleanup(struct xgene_pmu *xgene_pmu, struct list_head *pmus) xgene_pmu_dev_cleanup(struct xgene_pmu *xgene_pmu, struct list_head *pmus)
{ {
struct xgene_pmu_dev_ctx *ctx; struct xgene_pmu_dev_ctx *ctx;
struct device *dev = xgene_pmu->dev;
struct xgene_pmu_dev *pmu_dev;
list_for_each_entry(ctx, pmus, next) { list_for_each_entry(ctx, pmus, next) {
pmu_dev = ctx->pmu_dev; perf_pmu_unregister(&ctx->pmu_dev->pmu);
if (pmu_dev->inf->csr)
devm_iounmap(dev, pmu_dev->inf->csr);
devm_kfree(dev, ctx);
devm_kfree(dev, pmu_dev);
} }
} }
...@@ -1951,10 +1925,6 @@ static int xgene_pmu_remove(struct platform_device *pdev) ...@@ -1951,10 +1925,6 @@ static int xgene_pmu_remove(struct platform_device *pdev)
xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->mcbpmus); xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->mcbpmus);
xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->mcpmus); xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->mcpmus);
if (xgene_pmu->pcppmu_csr)
devm_iounmap(&pdev->dev, xgene_pmu->pcppmu_csr);
devm_kfree(&pdev->dev, xgene_pmu);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment