Commit a5c8a019 authored by Thomas Gleixner's avatar Thomas Gleixner

Merge tag 'irqchip-for-4.8' of...

Merge tag 'irqchip-for-4.8' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms into irq/core

First drop of irqchip updates for 4.8 from Marc Zyngier:

 - Fix a few bugs in configuring the default trigger from the irqdomain layer
 - Make the genirq layer PM aware
 - Add PM capability to the ARM GIC driver
 - Add support for 2-level translation tables to the GICv3 ITS driver
parents ff5b706f 3faf24ea
...@@ -21,6 +21,7 @@ Main node required properties: ...@@ -21,6 +21,7 @@ Main node required properties:
"arm,pl390" "arm,pl390"
"arm,tc11mp-gic" "arm,tc11mp-gic"
"brcm,brahma-b15-gic" "brcm,brahma-b15-gic"
"nvidia,tegra210-agic"
"qcom,msm-8660-qgic" "qcom,msm-8660-qgic"
"qcom,msm-qgic2" "qcom,msm-qgic2"
- interrupt-controller : Identifies the node as an interrupt controller - interrupt-controller : Identifies the node as an interrupt controller
...@@ -68,7 +69,7 @@ Optional ...@@ -68,7 +69,7 @@ Optional
"ic_clk" (for "arm,arm11mp-gic") "ic_clk" (for "arm,arm11mp-gic")
"PERIPHCLKEN" (for "arm,cortex-a15-gic") "PERIPHCLKEN" (for "arm,cortex-a15-gic")
"PERIPHCLK", "PERIPHCLKEN" (for "arm,cortex-a9-gic") "PERIPHCLK", "PERIPHCLKEN" (for "arm,cortex-a9-gic")
"clk" (for "arm,gic-400") "clk" (for "arm,gic-400" and "nvidia,tegra210")
"gclk" (for "arm,pl390") "gclk" (for "arm,pl390")
- power-domains : A phandle and PM domain specifier as defined by bindings of - power-domains : A phandle and PM domain specifier as defined by bindings of
......
...@@ -8,6 +8,12 @@ config ARM_GIC ...@@ -8,6 +8,12 @@ config ARM_GIC
select IRQ_DOMAIN_HIERARCHY select IRQ_DOMAIN_HIERARCHY
select MULTI_IRQ_HANDLER select MULTI_IRQ_HANDLER
config ARM_GIC_PM
bool
depends on PM
select ARM_GIC
select PM_CLK
config ARM_GIC_MAX_NR config ARM_GIC_MAX_NR
int int
default 2 if ARCH_REALVIEW default 2 if ARCH_REALVIEW
......
...@@ -24,6 +24,7 @@ obj-$(CONFIG_ARCH_SUNXI) += irq-sun4i.o ...@@ -24,6 +24,7 @@ obj-$(CONFIG_ARCH_SUNXI) += irq-sun4i.o
obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi-nmi.o obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi-nmi.o
obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o
obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o
obj-$(CONFIG_ARM_GIC_PM) += irq-gic-pm.o
obj-$(CONFIG_REALVIEW_DT) += irq-gic-realview.o obj-$(CONFIG_REALVIEW_DT) += irq-gic-realview.o
obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o
obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o
......
...@@ -90,8 +90,8 @@ int gic_configure_irq(unsigned int irq, unsigned int type, ...@@ -90,8 +90,8 @@ int gic_configure_irq(unsigned int irq, unsigned int type,
return ret; return ret;
} }
void __init gic_dist_config(void __iomem *base, int gic_irqs, void gic_dist_config(void __iomem *base, int gic_irqs,
void (*sync_access)(void)) void (*sync_access)(void))
{ {
unsigned int i; unsigned int i;
......
/*
* Copyright (C) 2016 NVIDIA CORPORATION, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/irqchip/arm-gic.h>
#include <linux/platform_device.h>
#include <linux/pm_clock.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
struct gic_clk_data {
unsigned int num_clocks;
const char *const *clocks;
};
static int gic_runtime_resume(struct device *dev)
{
struct gic_chip_data *gic = dev_get_drvdata(dev);
int ret;
ret = pm_clk_resume(dev);
if (ret)
return ret;
/*
* On the very first resume, the pointer to the driver data
* will be NULL and this is intentional, because we do not
* want to restore the GIC on the very first resume. So if
* the pointer is not valid just return.
*/
if (!gic)
return 0;
gic_dist_restore(gic);
gic_cpu_restore(gic);
return 0;
}
static int gic_runtime_suspend(struct device *dev)
{
struct gic_chip_data *gic = dev_get_drvdata(dev);
gic_dist_save(gic);
gic_cpu_save(gic);
return pm_clk_suspend(dev);
}
static int gic_get_clocks(struct device *dev, const struct gic_clk_data *data)
{
struct clk *clk;
unsigned int i;
int ret;
if (!dev || !data)
return -EINVAL;
ret = pm_clk_create(dev);
if (ret)
return ret;
for (i = 0; i < data->num_clocks; i++) {
clk = of_clk_get_by_name(dev->of_node, data->clocks[i]);
if (IS_ERR(clk)) {
dev_err(dev, "failed to get clock %s\n",
data->clocks[i]);
ret = PTR_ERR(clk);
goto error;
}
ret = pm_clk_add_clk(dev, clk);
if (ret) {
dev_err(dev, "failed to add clock at index %d\n", i);
clk_put(clk);
goto error;
}
}
return 0;
error:
pm_clk_destroy(dev);
return ret;
}
static int gic_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct gic_clk_data *data;
struct gic_chip_data *gic;
int ret, irq;
data = of_device_get_match_data(&pdev->dev);
if (!data) {
dev_err(&pdev->dev, "no device match found\n");
return -ENODEV;
}
irq = irq_of_parse_and_map(dev->of_node, 0);
if (!irq) {
dev_err(dev, "no parent interrupt found!\n");
return -EINVAL;
}
ret = gic_get_clocks(dev, data);
if (ret)
goto irq_dispose;
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0)
goto rpm_disable;
ret = gic_of_init_child(dev, &gic, irq);
if (ret)
goto rpm_put;
platform_set_drvdata(pdev, gic);
pm_runtime_put(dev);
dev_info(dev, "GIC IRQ controller registered\n");
return 0;
rpm_put:
pm_runtime_put_sync(dev);
rpm_disable:
pm_runtime_disable(dev);
pm_clk_destroy(dev);
irq_dispose:
irq_dispose_mapping(irq);
return ret;
}
static const struct dev_pm_ops gic_pm_ops = {
SET_RUNTIME_PM_OPS(gic_runtime_suspend,
gic_runtime_resume, NULL)
};
static const char * const gic400_clocks[] = {
"clk",
};
static const struct gic_clk_data gic400_data = {
.num_clocks = ARRAY_SIZE(gic400_clocks),
.clocks = gic400_clocks,
};
static const struct of_device_id gic_match[] = {
{ .compatible = "nvidia,tegra210-agic", .data = &gic400_data },
{},
};
MODULE_DEVICE_TABLE(of, gic_match);
static struct platform_driver gic_driver = {
.probe = gic_probe,
.driver = {
.name = "gic",
.of_match_table = gic_match,
.pm = &gic_pm_ops,
}
};
builtin_platform_driver(gic_driver);
...@@ -56,13 +56,14 @@ struct its_collection { ...@@ -56,13 +56,14 @@ struct its_collection {
}; };
/* /*
* The ITS_BASER structure - contains memory information and cached * The ITS_BASER structure - contains memory information, cached
* value of BASER register configuration. * value of BASER register configuration and ITS page size.
*/ */
struct its_baser { struct its_baser {
void *base; void *base;
u64 val; u64 val;
u32 order; u32 order;
u32 psz;
}; };
/* /*
...@@ -824,180 +825,241 @@ static const char *its_base_type_string[] = { ...@@ -824,180 +825,241 @@ static const char *its_base_type_string[] = {
[GITS_BASER_TYPE_RESERVED7] = "Reserved (7)", [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)",
}; };
static void its_free_tables(struct its_node *its) static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
{ {
int i; u32 idx = baser - its->tables;
for (i = 0; i < GITS_BASER_NR_REGS; i++) { return readq_relaxed(its->base + GITS_BASER + (idx << 3));
if (its->tables[i].base) {
free_pages((unsigned long)its->tables[i].base,
its->tables[i].order);
its->tables[i].base = NULL;
}
}
} }
static int its_alloc_tables(const char *node_name, struct its_node *its) static void its_write_baser(struct its_node *its, struct its_baser *baser,
u64 val)
{ {
int err; u32 idx = baser - its->tables;
int i;
int psz = SZ_64K;
u64 shr = GITS_BASER_InnerShareable;
u64 cache;
u64 typer;
u32 ids;
if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) { writeq_relaxed(val, its->base + GITS_BASER + (idx << 3));
/* baser->val = its_read_baser(its, baser);
* erratum 22375: only alloc 8MB table size }
* erratum 24313: ignore memory access type
*/ static int its_setup_baser(struct its_node *its, struct its_baser *baser,
cache = 0; u64 cache, u64 shr, u32 psz, u32 order,
ids = 0x14; /* 20 bits, 8MB */ bool indirect)
} else { {
cache = GITS_BASER_WaWb; u64 val = its_read_baser(its, baser);
typer = readq_relaxed(its->base + GITS_TYPER); u64 esz = GITS_BASER_ENTRY_SIZE(val);
ids = GITS_TYPER_DEVBITS(typer); u64 type = GITS_BASER_TYPE(val);
u32 alloc_pages;
void *base;
u64 tmp;
retry_alloc_baser:
alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
if (alloc_pages > GITS_BASER_PAGES_MAX) {
pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
&its->phys_base, its_base_type_string[type],
alloc_pages, GITS_BASER_PAGES_MAX);
alloc_pages = GITS_BASER_PAGES_MAX;
order = get_order(GITS_BASER_PAGES_MAX * psz);
} }
its->device_ids = ids; base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!base)
return -ENOMEM;
for (i = 0; i < GITS_BASER_NR_REGS; i++) { retry_baser:
u64 val = readq_relaxed(its->base + GITS_BASER + i * 8); val = (virt_to_phys(base) |
u64 type = GITS_BASER_TYPE(val); (type << GITS_BASER_TYPE_SHIFT) |
u64 entry_size = GITS_BASER_ENTRY_SIZE(val); ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
int order = get_order(psz); ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) |
int alloc_pages; cache |
u64 tmp; shr |
void *base; GITS_BASER_VALID);
val |= indirect ? GITS_BASER_INDIRECT : 0x0;
switch (psz) {
case SZ_4K:
val |= GITS_BASER_PAGE_SIZE_4K;
break;
case SZ_16K:
val |= GITS_BASER_PAGE_SIZE_16K;
break;
case SZ_64K:
val |= GITS_BASER_PAGE_SIZE_64K;
break;
}
if (type == GITS_BASER_TYPE_NONE) its_write_baser(its, baser, val);
continue; tmp = baser->val;
if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
/* /*
* Allocate as many entries as required to fit the * Shareability didn't stick. Just use
* range of device IDs that the ITS can grok... The ID * whatever the read reported, which is likely
* space being incredibly sparse, this results in a * to be the only thing this redistributor
* massive waste of memory. * supports. If that's zero, make it
* * non-cacheable as well.
* For other tables, only allocate a single page.
*/ */
if (type == GITS_BASER_TYPE_DEVICE) { shr = tmp & GITS_BASER_SHAREABILITY_MASK;
/* if (!shr) {
* 'order' was initialized earlier to the default page cache = GITS_BASER_nC;
* granule of the the ITS. We can't have an allocation __flush_dcache_area(base, PAGE_ORDER_TO_SIZE(order));
* smaller than that. If the requested allocation
* is smaller, round up to the default page granule.
*/
order = max(get_order((1UL << ids) * entry_size),
order);
if (order >= MAX_ORDER) {
order = MAX_ORDER - 1;
pr_warn("%s: Device Table too large, reduce its page order to %u\n",
node_name, order);
}
}
retry_alloc_baser:
alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
if (alloc_pages > GITS_BASER_PAGES_MAX) {
alloc_pages = GITS_BASER_PAGES_MAX;
order = get_order(GITS_BASER_PAGES_MAX * psz);
pr_warn("%s: Device Table too large, reduce its page order to %u (%u pages)\n",
node_name, order, alloc_pages);
} }
goto retry_baser;
}
base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) {
if (!base) { /*
err = -ENOMEM; * Page size didn't stick. Let's try a smaller
goto out_free; * size and retry. If we reach 4K, then
} * something is horribly wrong...
*/
its->tables[i].base = base; free_pages((unsigned long)base, order);
its->tables[i].order = order; baser->base = NULL;
retry_baser:
val = (virt_to_phys(base) |
(type << GITS_BASER_TYPE_SHIFT) |
((entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
cache |
shr |
GITS_BASER_VALID);
switch (psz) { switch (psz) {
case SZ_4K:
val |= GITS_BASER_PAGE_SIZE_4K;
break;
case SZ_16K: case SZ_16K:
val |= GITS_BASER_PAGE_SIZE_16K; psz = SZ_4K;
break; goto retry_alloc_baser;
case SZ_64K: case SZ_64K:
val |= GITS_BASER_PAGE_SIZE_64K; psz = SZ_16K;
break; goto retry_alloc_baser;
} }
}
if (val != tmp) {
pr_err("ITS@%pa: %s doesn't stick: %lx %lx\n",
&its->phys_base, its_base_type_string[type],
(unsigned long) val, (unsigned long) tmp);
free_pages((unsigned long)base, order);
return -ENXIO;
}
val |= alloc_pages - 1; baser->order = order;
its->tables[i].val = val; baser->base = base;
baser->psz = psz;
tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
writeq_relaxed(val, its->base + GITS_BASER + i * 8); pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
tmp = readq_relaxed(its->base + GITS_BASER + i * 8); &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / tmp),
its_base_type_string[type],
(unsigned long)virt_to_phys(base),
indirect ? "indirect" : "flat", (int)esz,
psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) { return 0;
}
static bool its_parse_baser_device(struct its_node *its, struct its_baser *baser,
u32 psz, u32 *order)
{
u64 esz = GITS_BASER_ENTRY_SIZE(its_read_baser(its, baser));
u64 val = GITS_BASER_InnerShareable | GITS_BASER_WaWb;
u32 ids = its->device_ids;
u32 new_order = *order;
bool indirect = false;
/* No need to enable Indirection if memory requirement < (psz*2)bytes */
if ((esz << ids) > (psz * 2)) {
/*
* Find out whether hw supports a single or two-level table by
* table by reading bit at offset '62' after writing '1' to it.
*/
its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
indirect = !!(baser->val & GITS_BASER_INDIRECT);
if (indirect) {
/* /*
* Shareability didn't stick. Just use * The size of the lvl2 table is equal to ITS page size
* whatever the read reported, which is likely * which is 'psz'. For computing lvl1 table size,
* to be the only thing this redistributor * subtract ID bits that sparse lvl2 table from 'ids'
* supports. If that's zero, make it * which is reported by ITS hardware times lvl1 table
* non-cacheable as well. * entry size.
*/ */
shr = tmp & GITS_BASER_SHAREABILITY_MASK; ids -= ilog2(psz / esz);
if (!shr) { esz = GITS_LVL1_ENTRY_SIZE;
cache = GITS_BASER_nC;
__flush_dcache_area(base, PAGE_ORDER_TO_SIZE(order));
}
goto retry_baser;
} }
}
if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) { /*
/* * Allocate as many entries as required to fit the
* Page size didn't stick. Let's try a smaller * range of device IDs that the ITS can grok... The ID
* size and retry. If we reach 4K, then * space being incredibly sparse, this results in a
* something is horribly wrong... * massive waste of memory if two-level device table
*/ * feature is not supported by hardware.
free_pages((unsigned long)base, order); */
its->tables[i].base = NULL; new_order = max_t(u32, get_order(esz << ids), new_order);
if (new_order >= MAX_ORDER) {
new_order = MAX_ORDER - 1;
ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / esz);
pr_warn("ITS@%pa: Device Table too large, reduce ids %u->%u\n",
&its->phys_base, its->device_ids, ids);
}
switch (psz) { *order = new_order;
case SZ_16K:
psz = SZ_4K; return indirect;
goto retry_alloc_baser; }
case SZ_64K:
psz = SZ_16K;
goto retry_alloc_baser;
}
}
if (val != tmp) { static void its_free_tables(struct its_node *its)
pr_err("ITS: %s: GITS_BASER%d doesn't stick: %lx %lx\n", {
node_name, i, int i;
(unsigned long) val, (unsigned long) tmp);
err = -ENXIO; for (i = 0; i < GITS_BASER_NR_REGS; i++) {
goto out_free; if (its->tables[i].base) {
free_pages((unsigned long)its->tables[i].base,
its->tables[i].order);
its->tables[i].base = NULL;
} }
}
}
static int its_alloc_tables(struct its_node *its)
{
u64 typer = readq_relaxed(its->base + GITS_TYPER);
u32 ids = GITS_TYPER_DEVBITS(typer);
u64 shr = GITS_BASER_InnerShareable;
u64 cache = GITS_BASER_WaWb;
u32 psz = SZ_64K;
int err, i;
pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n", if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) {
(int)(PAGE_ORDER_TO_SIZE(order) / entry_size), /*
its_base_type_string[type], * erratum 22375: only alloc 8MB table size
(unsigned long)virt_to_phys(base), * erratum 24313: ignore memory access type
psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); */
cache = GITS_BASER_nCnB;
ids = 0x14; /* 20 bits, 8MB */
} }
return 0; its->device_ids = ids;
out_free: for (i = 0; i < GITS_BASER_NR_REGS; i++) {
its_free_tables(its); struct its_baser *baser = its->tables + i;
u64 val = its_read_baser(its, baser);
u64 type = GITS_BASER_TYPE(val);
u32 order = get_order(psz);
bool indirect = false;
return err; if (type == GITS_BASER_TYPE_NONE)
continue;
if (type == GITS_BASER_TYPE_DEVICE)
indirect = its_parse_baser_device(its, baser, psz, &order);
err = its_setup_baser(its, baser, cache, shr, psz, order, indirect);
if (err < 0) {
its_free_tables(its);
return err;
}
/* Update settings which will be used for next BASERn */
psz = baser->psz;
cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
}
return 0;
} }
static int its_alloc_collections(struct its_node *its) static int its_alloc_collections(struct its_node *its)
...@@ -1185,10 +1247,57 @@ static struct its_baser *its_get_baser(struct its_node *its, u32 type) ...@@ -1185,10 +1247,57 @@ static struct its_baser *its_get_baser(struct its_node *its, u32 type)
return NULL; return NULL;
} }
static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
{
struct its_baser *baser;
struct page *page;
u32 esz, idx;
__le64 *table;
baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
/* Don't allow device id that exceeds ITS hardware limit */
if (!baser)
return (ilog2(dev_id) < its->device_ids);
/* Don't allow device id that exceeds single, flat table limit */
esz = GITS_BASER_ENTRY_SIZE(baser->val);
if (!(baser->val & GITS_BASER_INDIRECT))
return (dev_id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
/* Compute 1st level table index & check if that exceeds table limit */
idx = dev_id >> ilog2(baser->psz / esz);
if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
return false;
table = baser->base;
/* Allocate memory for 2nd level table */
if (!table[idx]) {
page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(baser->psz));
if (!page)
return false;
/* Flush Lvl2 table to PoC if hw doesn't support coherency */
if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
__flush_dcache_area(page_address(page), baser->psz);
table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
/* Flush Lvl1 entry to PoC if hw doesn't support coherency */
if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
__flush_dcache_area(table + idx, GITS_LVL1_ENTRY_SIZE);
/* Ensure updated table contents are visible to ITS hardware */
dsb(sy);
}
return true;
}
static struct its_device *its_create_device(struct its_node *its, u32 dev_id, static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
int nvecs) int nvecs)
{ {
struct its_baser *baser;
struct its_device *dev; struct its_device *dev;
unsigned long *lpi_map; unsigned long *lpi_map;
unsigned long flags; unsigned long flags;
...@@ -1199,14 +1308,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, ...@@ -1199,14 +1308,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
int nr_ites; int nr_ites;
int sz; int sz;
baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE); if (!its_alloc_device_table(its, dev_id))
/* Don't allow 'dev_id' that exceeds single, flat table limit */
if (baser) {
if (dev_id >= (PAGE_ORDER_TO_SIZE(baser->order) /
GITS_BASER_ENTRY_SIZE(baser->val)))
return NULL;
} else if (ilog2(dev_id) >= its->device_ids)
return NULL; return NULL;
dev = kzalloc(sizeof(*dev), GFP_KERNEL); dev = kzalloc(sizeof(*dev), GFP_KERNEL);
...@@ -1569,7 +1671,7 @@ static int __init its_probe(struct device_node *node, ...@@ -1569,7 +1671,7 @@ static int __init its_probe(struct device_node *node,
its_enable_quirks(its); its_enable_quirks(its);
err = its_alloc_tables(node->full_name, its); err = its_alloc_tables(its);
if (err) if (err)
goto out_free_cmd; goto out_free_cmd;
......
...@@ -75,7 +75,7 @@ struct gic_chip_data { ...@@ -75,7 +75,7 @@ struct gic_chip_data {
void __iomem *raw_dist_base; void __iomem *raw_dist_base;
void __iomem *raw_cpu_base; void __iomem *raw_cpu_base;
u32 percpu_offset; u32 percpu_offset;
#ifdef CONFIG_CPU_PM #if defined(CONFIG_CPU_PM) || defined(CONFIG_ARM_GIC_PM)
u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)]; u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
u32 saved_spi_active[DIV_ROUND_UP(1020, 32)]; u32 saved_spi_active[DIV_ROUND_UP(1020, 32)];
u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)]; u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
...@@ -449,7 +449,7 @@ static void gic_cpu_if_up(struct gic_chip_data *gic) ...@@ -449,7 +449,7 @@ static void gic_cpu_if_up(struct gic_chip_data *gic)
} }
static void __init gic_dist_init(struct gic_chip_data *gic) static void gic_dist_init(struct gic_chip_data *gic)
{ {
unsigned int i; unsigned int i;
u32 cpumask; u32 cpumask;
...@@ -528,14 +528,14 @@ int gic_cpu_if_down(unsigned int gic_nr) ...@@ -528,14 +528,14 @@ int gic_cpu_if_down(unsigned int gic_nr)
return 0; return 0;
} }
#ifdef CONFIG_CPU_PM #if defined(CONFIG_CPU_PM) || defined(CONFIG_ARM_GIC_PM)
/* /*
* Saves the GIC distributor registers during suspend or idle. Must be called * Saves the GIC distributor registers during suspend or idle. Must be called
* with interrupts disabled but before powering down the GIC. After calling * with interrupts disabled but before powering down the GIC. After calling
* this function, no interrupts will be delivered by the GIC, and another * this function, no interrupts will be delivered by the GIC, and another
* platform-specific wakeup source must be enabled. * platform-specific wakeup source must be enabled.
*/ */
static void gic_dist_save(struct gic_chip_data *gic) void gic_dist_save(struct gic_chip_data *gic)
{ {
unsigned int gic_irqs; unsigned int gic_irqs;
void __iomem *dist_base; void __iomem *dist_base;
...@@ -574,7 +574,7 @@ static void gic_dist_save(struct gic_chip_data *gic) ...@@ -574,7 +574,7 @@ static void gic_dist_save(struct gic_chip_data *gic)
* handled normally, but any edge interrupts that occured will not be seen by * handled normally, but any edge interrupts that occured will not be seen by
* the GIC and need to be handled by the platform-specific wakeup source. * the GIC and need to be handled by the platform-specific wakeup source.
*/ */
static void gic_dist_restore(struct gic_chip_data *gic) void gic_dist_restore(struct gic_chip_data *gic)
{ {
unsigned int gic_irqs; unsigned int gic_irqs;
unsigned int i; unsigned int i;
...@@ -620,7 +620,7 @@ static void gic_dist_restore(struct gic_chip_data *gic) ...@@ -620,7 +620,7 @@ static void gic_dist_restore(struct gic_chip_data *gic)
writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL); writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL);
} }
static void gic_cpu_save(struct gic_chip_data *gic) void gic_cpu_save(struct gic_chip_data *gic)
{ {
int i; int i;
u32 *ptr; u32 *ptr;
...@@ -650,7 +650,7 @@ static void gic_cpu_save(struct gic_chip_data *gic) ...@@ -650,7 +650,7 @@ static void gic_cpu_save(struct gic_chip_data *gic)
} }
static void gic_cpu_restore(struct gic_chip_data *gic) void gic_cpu_restore(struct gic_chip_data *gic)
{ {
int i; int i;
u32 *ptr; u32 *ptr;
...@@ -727,7 +727,7 @@ static struct notifier_block gic_notifier_block = { ...@@ -727,7 +727,7 @@ static struct notifier_block gic_notifier_block = {
.notifier_call = gic_notifier, .notifier_call = gic_notifier,
}; };
static int __init gic_pm_init(struct gic_chip_data *gic) static int gic_pm_init(struct gic_chip_data *gic)
{ {
gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4, gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
sizeof(u32)); sizeof(u32));
...@@ -757,7 +757,7 @@ static int __init gic_pm_init(struct gic_chip_data *gic) ...@@ -757,7 +757,7 @@ static int __init gic_pm_init(struct gic_chip_data *gic)
return -ENOMEM; return -ENOMEM;
} }
#else #else
static int __init gic_pm_init(struct gic_chip_data *gic) static int gic_pm_init(struct gic_chip_data *gic)
{ {
return 0; return 0;
} }
...@@ -1032,32 +1032,31 @@ static const struct irq_domain_ops gic_irq_domain_ops = { ...@@ -1032,32 +1032,31 @@ static const struct irq_domain_ops gic_irq_domain_ops = {
.unmap = gic_irq_domain_unmap, .unmap = gic_irq_domain_unmap,
}; };
static int __init __gic_init_bases(struct gic_chip_data *gic, int irq_start, static void gic_init_chip(struct gic_chip_data *gic, struct device *dev,
struct fwnode_handle *handle) const char *name, bool use_eoimode1)
{ {
irq_hw_number_t hwirq_base;
int gic_irqs, irq_base, i, ret;
if (WARN_ON(!gic || gic->domain))
return -EINVAL;
/* Initialize irq_chip */ /* Initialize irq_chip */
gic->chip = gic_chip; gic->chip = gic_chip;
gic->chip.name = name;
gic->chip.parent_device = dev;
if (static_key_true(&supports_deactivate) && gic == &gic_data[0]) { if (use_eoimode1) {
gic->chip.irq_mask = gic_eoimode1_mask_irq; gic->chip.irq_mask = gic_eoimode1_mask_irq;
gic->chip.irq_eoi = gic_eoimode1_eoi_irq; gic->chip.irq_eoi = gic_eoimode1_eoi_irq;
gic->chip.irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity; gic->chip.irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity;
gic->chip.name = kasprintf(GFP_KERNEL, "GICv2");
} else {
gic->chip.name = kasprintf(GFP_KERNEL, "GIC-%d",
(int)(gic - &gic_data[0]));
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (gic == &gic_data[0]) if (gic == &gic_data[0])
gic->chip.irq_set_affinity = gic_set_affinity; gic->chip.irq_set_affinity = gic_set_affinity;
#endif #endif
}
static int gic_init_bases(struct gic_chip_data *gic, int irq_start,
struct fwnode_handle *handle)
{
irq_hw_number_t hwirq_base;
int gic_irqs, irq_base, ret;
if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) { if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
/* Frankein-GIC without banked registers... */ /* Frankein-GIC without banked registers... */
...@@ -1138,6 +1137,36 @@ static int __init __gic_init_bases(struct gic_chip_data *gic, int irq_start, ...@@ -1138,6 +1137,36 @@ static int __init __gic_init_bases(struct gic_chip_data *gic, int irq_start,
goto error; goto error;
} }
gic_dist_init(gic);
ret = gic_cpu_init(gic);
if (ret)
goto error;
ret = gic_pm_init(gic);
if (ret)
goto error;
return 0;
error:
if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
free_percpu(gic->dist_base.percpu_base);
free_percpu(gic->cpu_base.percpu_base);
}
return ret;
}
static int __init __gic_init_bases(struct gic_chip_data *gic,
int irq_start,
struct fwnode_handle *handle)
{
char *name;
int i, ret;
if (WARN_ON(!gic || gic->domain))
return -EINVAL;
if (gic == &gic_data[0]) { if (gic == &gic_data[0]) {
/* /*
* Initialize the CPU interface map to all CPUs. * Initialize the CPU interface map to all CPUs.
...@@ -1155,24 +1184,17 @@ static int __init __gic_init_bases(struct gic_chip_data *gic, int irq_start, ...@@ -1155,24 +1184,17 @@ static int __init __gic_init_bases(struct gic_chip_data *gic, int irq_start,
pr_info("GIC: Using split EOI/Deactivate mode\n"); pr_info("GIC: Using split EOI/Deactivate mode\n");
} }
gic_dist_init(gic); if (static_key_true(&supports_deactivate) && gic == &gic_data[0]) {
ret = gic_cpu_init(gic); name = kasprintf(GFP_KERNEL, "GICv2");
if (ret) gic_init_chip(gic, NULL, name, true);
goto error; } else {
name = kasprintf(GFP_KERNEL, "GIC-%d", (int)(gic-&gic_data[0]));
ret = gic_pm_init(gic); gic_init_chip(gic, NULL, name, false);
if (ret)
goto error;
return 0;
error:
if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
free_percpu(gic->dist_base.percpu_base);
free_percpu(gic->cpu_base.percpu_base);
} }
kfree(gic->chip.name); ret = gic_init_bases(gic, irq_start, handle);
if (ret)
kfree(name);
return ret; return ret;
} }
...@@ -1250,7 +1272,7 @@ static bool gic_check_eoimode(struct device_node *node, void __iomem **base) ...@@ -1250,7 +1272,7 @@ static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
return true; return true;
} }
static int __init gic_of_setup(struct gic_chip_data *gic, struct device_node *node) static int gic_of_setup(struct gic_chip_data *gic, struct device_node *node)
{ {
if (!gic || !node) if (!gic || !node)
return -EINVAL; return -EINVAL;
...@@ -1274,6 +1296,34 @@ static int __init gic_of_setup(struct gic_chip_data *gic, struct device_node *no ...@@ -1274,6 +1296,34 @@ static int __init gic_of_setup(struct gic_chip_data *gic, struct device_node *no
return -ENOMEM; return -ENOMEM;
} }
int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq)
{
int ret;
if (!dev || !dev->of_node || !gic || !irq)
return -EINVAL;
*gic = devm_kzalloc(dev, sizeof(**gic), GFP_KERNEL);
if (!*gic)
return -ENOMEM;
gic_init_chip(*gic, dev, dev->of_node->name, false);
ret = gic_of_setup(*gic, dev->of_node);
if (ret)
return ret;
ret = gic_init_bases(*gic, -1, &dev->of_node->fwnode);
if (ret) {
gic_teardown(*gic);
return ret;
}
irq_set_chained_handler_and_data(irq, gic_handle_cascade_irq, *gic);
return 0;
}
static void __init gic_of_setup_kvm_info(struct device_node *node) static void __init gic_of_setup_kvm_info(struct device_node *node)
{ {
int ret; int ret;
...@@ -1353,7 +1403,11 @@ IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init); ...@@ -1353,7 +1403,11 @@ IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init);
IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init); IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init); IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
IRQCHIP_DECLARE(pl390, "arm,pl390", gic_of_init); IRQCHIP_DECLARE(pl390, "arm,pl390", gic_of_init);
#else
int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq)
{
return -ENOTSUPP;
}
#endif #endif
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
......
...@@ -315,6 +315,7 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) ...@@ -315,6 +315,7 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
/** /**
* struct irq_chip - hardware interrupt chip descriptor * struct irq_chip - hardware interrupt chip descriptor
* *
* @parent_device: pointer to parent device for irqchip
* @name: name for /proc/interrupts * @name: name for /proc/interrupts
* @irq_startup: start up the interrupt (defaults to ->enable if NULL) * @irq_startup: start up the interrupt (defaults to ->enable if NULL)
* @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL) * @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL)
...@@ -354,6 +355,7 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) ...@@ -354,6 +355,7 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
* @flags: chip specific flags * @flags: chip specific flags
*/ */
struct irq_chip { struct irq_chip {
struct device *parent_device;
const char *name; const char *name;
unsigned int (*irq_startup)(struct irq_data *data); unsigned int (*irq_startup)(struct irq_data *data);
void (*irq_shutdown)(struct irq_data *data); void (*irq_shutdown)(struct irq_data *data);
...@@ -488,6 +490,8 @@ extern void handle_bad_irq(struct irq_desc *desc); ...@@ -488,6 +490,8 @@ extern void handle_bad_irq(struct irq_desc *desc);
extern void handle_nested_irq(unsigned int irq); extern void handle_nested_irq(unsigned int irq);
extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg); extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
extern int irq_chip_pm_get(struct irq_data *data);
extern int irq_chip_pm_put(struct irq_data *data);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
extern void irq_chip_enable_parent(struct irq_data *data); extern void irq_chip_enable_parent(struct irq_data *data);
extern void irq_chip_disable_parent(struct irq_data *data); extern void irq_chip_disable_parent(struct irq_data *data);
......
...@@ -204,6 +204,7 @@ ...@@ -204,6 +204,7 @@
#define GITS_BASER_NR_REGS 8 #define GITS_BASER_NR_REGS 8
#define GITS_BASER_VALID (1UL << 63) #define GITS_BASER_VALID (1UL << 63)
#define GITS_BASER_INDIRECT (1UL << 62)
#define GITS_BASER_nCnB (0UL << 59) #define GITS_BASER_nCnB (0UL << 59)
#define GITS_BASER_nC (1UL << 59) #define GITS_BASER_nC (1UL << 59)
#define GITS_BASER_RaWt (2UL << 59) #define GITS_BASER_RaWt (2UL << 59)
...@@ -228,6 +229,7 @@ ...@@ -228,6 +229,7 @@
#define GITS_BASER_PAGE_SIZE_64K (2UL << GITS_BASER_PAGE_SIZE_SHIFT) #define GITS_BASER_PAGE_SIZE_64K (2UL << GITS_BASER_PAGE_SIZE_SHIFT)
#define GITS_BASER_PAGE_SIZE_MASK (3UL << GITS_BASER_PAGE_SIZE_SHIFT) #define GITS_BASER_PAGE_SIZE_MASK (3UL << GITS_BASER_PAGE_SIZE_SHIFT)
#define GITS_BASER_PAGES_MAX 256 #define GITS_BASER_PAGES_MAX 256
#define GITS_BASER_PAGES_SHIFT (0)
#define GITS_BASER_TYPE_NONE 0 #define GITS_BASER_TYPE_NONE 0
#define GITS_BASER_TYPE_DEVICE 1 #define GITS_BASER_TYPE_DEVICE 1
...@@ -238,6 +240,8 @@ ...@@ -238,6 +240,8 @@
#define GITS_BASER_TYPE_RESERVED6 6 #define GITS_BASER_TYPE_RESERVED6 6
#define GITS_BASER_TYPE_RESERVED7 7 #define GITS_BASER_TYPE_RESERVED7 7
#define GITS_LVL1_ENTRY_SIZE (8UL)
/* /*
* ITS commands * ITS commands
*/ */
......
...@@ -101,9 +101,14 @@ ...@@ -101,9 +101,14 @@
#include <linux/irqdomain.h> #include <linux/irqdomain.h>
struct device_node; struct device_node;
struct gic_chip_data;
void gic_cascade_irq(unsigned int gic_nr, unsigned int irq); void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
int gic_cpu_if_down(unsigned int gic_nr); int gic_cpu_if_down(unsigned int gic_nr);
void gic_cpu_save(struct gic_chip_data *gic);
void gic_cpu_restore(struct gic_chip_data *gic);
void gic_dist_save(struct gic_chip_data *gic);
void gic_dist_restore(struct gic_chip_data *gic);
/* /*
* Subdrivers that need some preparatory work can initialize their * Subdrivers that need some preparatory work can initialize their
...@@ -111,6 +116,12 @@ int gic_cpu_if_down(unsigned int gic_nr); ...@@ -111,6 +116,12 @@ int gic_cpu_if_down(unsigned int gic_nr);
*/ */
int gic_of_init(struct device_node *node, struct device_node *parent); int gic_of_init(struct device_node *node, struct device_node *parent);
/*
* Initialises and registers a non-root or child GIC chip. Memory for
* the gic_chip_data structure is dynamically allocated.
*/
int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq);
/* /*
* Legacy platforms not converted to DT yet must use this to init * Legacy platforms not converted to DT yet must use this to init
* their GIC * their GIC
......
...@@ -452,6 +452,9 @@ static inline int irq_domain_alloc_irqs(struct irq_domain *domain, ...@@ -452,6 +452,9 @@ static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
return -1; return -1;
} }
static inline void irq_domain_free_irqs(unsigned int virq,
unsigned int nr_irqs) { }
static inline bool irq_domain_is_hierarchy(struct irq_domain *domain) static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
{ {
return false; return false;
......
...@@ -1093,3 +1093,43 @@ int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) ...@@ -1093,3 +1093,43 @@ int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
return 0; return 0;
} }
/**
* irq_chip_pm_get - Enable power for an IRQ chip
* @data: Pointer to interrupt specific data
*
* Enable the power to the IRQ chip referenced by the interrupt data
* structure.
*/
int irq_chip_pm_get(struct irq_data *data)
{
int retval;
if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device) {
retval = pm_runtime_get_sync(data->chip->parent_device);
if (retval < 0) {
pm_runtime_put_noidle(data->chip->parent_device);
return retval;
}
}
return 0;
}
/**
* irq_chip_pm_put - Disable power for an IRQ chip
* @data: Pointer to interrupt specific data
*
* Disable the power to the IRQ chip referenced by the interrupt data
* structure, belongs. Note that power will only be disabled, once this
* function has been called for all IRQs that have called irq_chip_pm_get().
*/
int irq_chip_pm_put(struct irq_data *data)
{
int retval = 0;
if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device)
retval = pm_runtime_put(data->chip->parent_device);
return (retval < 0) ? retval : 0;
}
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
*/ */
#include <linux/irqdesc.h> #include <linux/irqdesc.h>
#include <linux/kernel_stat.h> #include <linux/kernel_stat.h>
#include <linux/pm_runtime.h>
#ifdef CONFIG_SPARSE_IRQ #ifdef CONFIG_SPARSE_IRQ
# define IRQ_BITMAP_BITS (NR_IRQS + 8196) # define IRQ_BITMAP_BITS (NR_IRQS + 8196)
......
...@@ -567,6 +567,7 @@ static void of_phandle_args_to_fwspec(struct of_phandle_args *irq_data, ...@@ -567,6 +567,7 @@ static void of_phandle_args_to_fwspec(struct of_phandle_args *irq_data,
unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec) unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
{ {
struct irq_domain *domain; struct irq_domain *domain;
struct irq_data *irq_data;
irq_hw_number_t hwirq; irq_hw_number_t hwirq;
unsigned int type = IRQ_TYPE_NONE; unsigned int type = IRQ_TYPE_NONE;
int virq; int virq;
...@@ -588,15 +589,46 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec) ...@@ -588,15 +589,46 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
if (irq_domain_translate(domain, fwspec, &hwirq, &type)) if (irq_domain_translate(domain, fwspec, &hwirq, &type))
return 0; return 0;
if (irq_domain_is_hierarchy(domain)) { /*
* WARN if the irqchip returns a type with bits
* outside the sense mask set and clear these bits.
*/
if (WARN_ON(type & ~IRQ_TYPE_SENSE_MASK))
type &= IRQ_TYPE_SENSE_MASK;
/*
* If we've already configured this interrupt,
* don't do it again, or hell will break loose.
*/
virq = irq_find_mapping(domain, hwirq);
if (virq) {
/*
* If the trigger type is not specified or matches the
* current trigger type then we are done so return the
* interrupt number.
*/
if (type == IRQ_TYPE_NONE || type == irq_get_trigger_type(virq))
return virq;
/* /*
* If we've already configured this interrupt, * If the trigger type has not been set yet, then set
* don't do it again, or hell will break loose. * it now and return the interrupt number.
*/ */
virq = irq_find_mapping(domain, hwirq); if (irq_get_trigger_type(virq) == IRQ_TYPE_NONE) {
if (virq) irq_data = irq_get_irq_data(virq);
if (!irq_data)
return 0;
irqd_set_trigger_type(irq_data, type);
return virq; return virq;
}
pr_warn("type mismatch, failed to map hwirq-%lu for %s!\n",
hwirq, of_node_full_name(to_of_node(fwspec->fwnode)));
return 0;
}
if (irq_domain_is_hierarchy(domain)) {
virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, fwspec); virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, fwspec);
if (virq <= 0) if (virq <= 0)
return 0; return 0;
...@@ -607,10 +639,18 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec) ...@@ -607,10 +639,18 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
return virq; return virq;
} }
/* Set type if specified and different than the current one */ irq_data = irq_get_irq_data(virq);
if (type != IRQ_TYPE_NONE && if (!irq_data) {
type != irq_get_trigger_type(virq)) if (irq_domain_is_hierarchy(domain))
irq_set_irq_type(virq, type); irq_domain_free_irqs(virq, 1);
else
irq_dispose_mapping(virq);
return 0;
}
/* Store trigger type */
irqd_set_trigger_type(irq_data, type);
return virq; return virq;
} }
EXPORT_SYMBOL_GPL(irq_create_fwspec_mapping); EXPORT_SYMBOL_GPL(irq_create_fwspec_mapping);
......
...@@ -1116,6 +1116,13 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) ...@@ -1116,6 +1116,13 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
new->irq = irq; new->irq = irq;
/*
* If the trigger type is not specified by the caller,
* then use the default for this interrupt.
*/
if (!(new->flags & IRQF_TRIGGER_MASK))
new->flags |= irqd_get_trigger_type(&desc->irq_data);
/* /*
* Check whether the interrupt nests into another interrupt * Check whether the interrupt nests into another interrupt
* thread. * thread.
...@@ -1409,10 +1416,18 @@ int setup_irq(unsigned int irq, struct irqaction *act) ...@@ -1409,10 +1416,18 @@ int setup_irq(unsigned int irq, struct irqaction *act)
if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
return -EINVAL; return -EINVAL;
retval = irq_chip_pm_get(&desc->irq_data);
if (retval < 0)
return retval;
chip_bus_lock(desc); chip_bus_lock(desc);
retval = __setup_irq(irq, desc, act); retval = __setup_irq(irq, desc, act);
chip_bus_sync_unlock(desc); chip_bus_sync_unlock(desc);
if (retval)
irq_chip_pm_put(&desc->irq_data);
return retval; return retval;
} }
EXPORT_SYMBOL_GPL(setup_irq); EXPORT_SYMBOL_GPL(setup_irq);
...@@ -1506,6 +1521,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) ...@@ -1506,6 +1521,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
} }
} }
irq_chip_pm_put(&desc->irq_data);
module_put(desc->owner); module_put(desc->owner);
kfree(action->secondary); kfree(action->secondary);
return action; return action;
...@@ -1648,11 +1664,16 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler, ...@@ -1648,11 +1664,16 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
action->name = devname; action->name = devname;
action->dev_id = dev_id; action->dev_id = dev_id;
retval = irq_chip_pm_get(&desc->irq_data);
if (retval < 0)
return retval;
chip_bus_lock(desc); chip_bus_lock(desc);
retval = __setup_irq(irq, desc, action); retval = __setup_irq(irq, desc, action);
chip_bus_sync_unlock(desc); chip_bus_sync_unlock(desc);
if (retval) { if (retval) {
irq_chip_pm_put(&desc->irq_data);
kfree(action->secondary); kfree(action->secondary);
kfree(action); kfree(action);
} }
...@@ -1730,7 +1751,14 @@ void enable_percpu_irq(unsigned int irq, unsigned int type) ...@@ -1730,7 +1751,14 @@ void enable_percpu_irq(unsigned int irq, unsigned int type)
if (!desc) if (!desc)
return; return;
/*
* If the trigger type is not specified by the caller, then
* use the default for this interrupt.
*/
type &= IRQ_TYPE_SENSE_MASK; type &= IRQ_TYPE_SENSE_MASK;
if (type == IRQ_TYPE_NONE)
type = irqd_get_trigger_type(&desc->irq_data);
if (type != IRQ_TYPE_NONE) { if (type != IRQ_TYPE_NONE) {
int ret; int ret;
...@@ -1822,6 +1850,7 @@ static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_ ...@@ -1822,6 +1850,7 @@ static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_
unregister_handler_proc(irq, action); unregister_handler_proc(irq, action);
irq_chip_pm_put(&desc->irq_data);
module_put(desc->owner); module_put(desc->owner);
return action; return action;
...@@ -1884,10 +1913,18 @@ int setup_percpu_irq(unsigned int irq, struct irqaction *act) ...@@ -1884,10 +1913,18 @@ int setup_percpu_irq(unsigned int irq, struct irqaction *act)
if (!desc || !irq_settings_is_per_cpu_devid(desc)) if (!desc || !irq_settings_is_per_cpu_devid(desc))
return -EINVAL; return -EINVAL;
retval = irq_chip_pm_get(&desc->irq_data);
if (retval < 0)
return retval;
chip_bus_lock(desc); chip_bus_lock(desc);
retval = __setup_irq(irq, desc, act); retval = __setup_irq(irq, desc, act);
chip_bus_sync_unlock(desc); chip_bus_sync_unlock(desc);
if (retval)
irq_chip_pm_put(&desc->irq_data);
return retval; return retval;
} }
...@@ -1931,12 +1968,18 @@ int request_percpu_irq(unsigned int irq, irq_handler_t handler, ...@@ -1931,12 +1968,18 @@ int request_percpu_irq(unsigned int irq, irq_handler_t handler,
action->name = devname; action->name = devname;
action->percpu_dev_id = dev_id; action->percpu_dev_id = dev_id;
retval = irq_chip_pm_get(&desc->irq_data);
if (retval < 0)
return retval;
chip_bus_lock(desc); chip_bus_lock(desc);
retval = __setup_irq(irq, desc, action); retval = __setup_irq(irq, desc, action);
chip_bus_sync_unlock(desc); chip_bus_sync_unlock(desc);
if (retval) if (retval) {
irq_chip_pm_put(&desc->irq_data);
kfree(action); kfree(action);
}
return retval; return retval;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment