Commit 93cc1228 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq updates from Thomas Gleixner:
 "The interrupt subsystem delivers this time:

   - Refactoring of the GIC-V3 driver to prepare for the GIC-V4 support

   - Initial GIC-V4 support

   - Consolidation of the FSL MSI support

   - Utilize the effective affinity interface in various ARM irqchip
     drivers

   - Yet another interrupt chip driver (UniPhier AIDET)

   - Bulk conversion of the irq chip driver to use %pOF

   - The usual small fixes and improvements all over the place"

* 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (77 commits)
  irqchip/ls-scfg-msi: Add MSI affinity support
  irqchip/ls-scfg-msi: Add LS1043a v1.1 MSI support
  irqchip/ls-scfg-msi: Add LS1046a MSI support
  arm64: dts: ls1046a: Add MSI dts node
  arm64: dts: ls1043a: Share all MSIs
  arm: dts: ls1021a: Share all MSIs
  arm64: dts: ls1043a: Fix typo of MSI compatible string
  arm: dts: ls1021a: Fix typo of MSI compatible string
  irqchip/ls-scfg-msi: Fix typo of MSI compatible strings
  irqchip/irq-bcm7120-l2: Use correct I/O accessors for irq_fwd_mask
  irqchip/mmp: Make mmp_intc_conf const
  irqchip/gic: Make irq_chip const
  irqchip/gic-v3: Advertise GICv4 support to KVM
  irqchip/gic-v4: Enable low-level GICv4 operations
  irqchip/gic-v4: Add some basic documentation
  irqchip/gic-v4: Add VLPI configuration interface
  irqchip/gic-v4: Add VPE command interface
  irqchip/gic-v4: Add per-VM VPE domain creation
  irqchip/gic-v3-its: Set implementation defined bit to enable VLPIs
  irqchip/gic-v3-its: Allow doorbell interrupts to be injected/cleared
  ...
parents dd90cccf 9fbd7fd2
...@@ -4,8 +4,10 @@ Required properties: ...@@ -4,8 +4,10 @@ Required properties:
- compatible: should be "fsl,<soc-name>-msi" to identify - compatible: should be "fsl,<soc-name>-msi" to identify
Layerscape PCIe MSI controller block such as: Layerscape PCIe MSI controller block such as:
"fsl,1s1021a-msi" "fsl,ls1021a-msi"
"fsl,1s1043a-msi" "fsl,ls1043a-msi"
"fsl,ls1046a-msi"
"fsl,ls1043a-v1.1-msi"
- msi-controller: indicates that this is a PCIe MSI controller node - msi-controller: indicates that this is a PCIe MSI controller node
- reg: physical base address of the controller and length of memory mapped. - reg: physical base address of the controller and length of memory mapped.
- interrupts: an interrupt to the parent interrupt controller. - interrupts: an interrupt to the parent interrupt controller.
...@@ -23,7 +25,7 @@ MSI controller node ...@@ -23,7 +25,7 @@ MSI controller node
Examples: Examples:
msi1: msi-controller@1571000 { msi1: msi-controller@1571000 {
compatible = "fsl,1s1043a-msi"; compatible = "fsl,ls1043a-msi";
reg = <0x0 0x1571000 0x0 0x8>, reg = <0x0 0x1571000 0x0 0x8>,
msi-controller; msi-controller;
interrupts = <0 116 0x4>; interrupts = <0 116 0x4>;
......
UniPhier AIDET
UniPhier AIDET (ARM Interrupt Detector) is an add-on block for ARM GIC (Generic
Interrupt Controller). GIC itself can handle only high level and rising edge
interrupts. The AIDET provides logic inverter to support low level and falling
edge interrupts.
Required properties:
- compatible: Should be one of the following:
"socionext,uniphier-ld4-aidet" - for LD4 SoC
"socionext,uniphier-pro4-aidet" - for Pro4 SoC
"socionext,uniphier-sld8-aidet" - for sLD8 SoC
"socionext,uniphier-pro5-aidet" - for Pro5 SoC
"socionext,uniphier-pxs2-aidet" - for PXs2/LD6b SoC
"socionext,uniphier-ld11-aidet" - for LD11 SoC
"socionext,uniphier-ld20-aidet" - for LD20 SoC
"socionext,uniphier-pxs3-aidet" - for PXs3 SoC
- reg: Specifies offset and length of the register set for the device.
- interrupt-controller: Identifies the node as an interrupt controller
- #interrupt-cells : Specifies the number of cells needed to encode an interrupt
source. The value should be 2. The first cell defines the interrupt number
(corresponds to the SPI interrupt number of GIC). The second cell specifies
the trigger type as defined in interrupts.txt in this directory.
Example:
aidet: aidet@5fc20000 {
compatible = "socionext,uniphier-pro4-aidet";
reg = <0x5fc20000 0x200>;
interrupt-controller;
#interrupt-cells = <2>;
};
...@@ -312,6 +312,7 @@ IRQ ...@@ -312,6 +312,7 @@ IRQ
devm_irq_alloc_descs_from() devm_irq_alloc_descs_from()
devm_irq_alloc_generic_chip() devm_irq_alloc_generic_chip()
devm_irq_setup_generic_chip() devm_irq_setup_generic_chip()
devm_irq_sim_init()
LED LED
devm_led_classdev_register() devm_led_classdev_register()
......
...@@ -1993,6 +1993,7 @@ F: arch/arm64/boot/dts/socionext/ ...@@ -1993,6 +1993,7 @@ F: arch/arm64/boot/dts/socionext/
F: drivers/bus/uniphier-system-bus.c F: drivers/bus/uniphier-system-bus.c
F: drivers/clk/uniphier/ F: drivers/clk/uniphier/
F: drivers/i2c/busses/i2c-uniphier* F: drivers/i2c/busses/i2c-uniphier*
F: drivers/irqchip/irq-uniphier-aidet.c
F: drivers/pinctrl/uniphier/ F: drivers/pinctrl/uniphier/
F: drivers/reset/reset-uniphier.c F: drivers/reset/reset-uniphier.c
F: drivers/tty/serial/8250/8250_uniphier.c F: drivers/tty/serial/8250/8250_uniphier.c
......
...@@ -129,14 +129,14 @@ gic: interrupt-controller@1400000 { ...@@ -129,14 +129,14 @@ gic: interrupt-controller@1400000 {
}; };
msi1: msi-controller@1570e00 { msi1: msi-controller@1570e00 {
compatible = "fsl,1s1021a-msi"; compatible = "fsl,ls1021a-msi";
reg = <0x0 0x1570e00 0x0 0x8>; reg = <0x0 0x1570e00 0x0 0x8>;
msi-controller; msi-controller;
interrupts = <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>;
}; };
msi2: msi-controller@1570e08 { msi2: msi-controller@1570e08 {
compatible = "fsl,1s1021a-msi"; compatible = "fsl,ls1021a-msi";
reg = <0x0 0x1570e08 0x0 0x8>; reg = <0x0 0x1570e08 0x0 0x8>;
msi-controller; msi-controller;
interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
...@@ -699,7 +699,7 @@ pcie@3400000 { ...@@ -699,7 +699,7 @@ pcie@3400000 {
bus-range = <0x0 0xff>; bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */ ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */
0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ 0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
msi-parent = <&msi1>; msi-parent = <&msi1>, <&msi2>;
#interrupt-cells = <1>; #interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>; interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0000 0 0 1 &gic GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>, interrupt-map = <0000 0 0 1 &gic GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>,
...@@ -722,7 +722,7 @@ pcie@3500000 { ...@@ -722,7 +722,7 @@ pcie@3500000 {
bus-range = <0x0 0xff>; bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */ ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */
0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ 0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
msi-parent = <&msi2>; msi-parent = <&msi1>, <&msi2>;
#interrupt-cells = <1>; #interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>; interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0000 0 0 1 &gic GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>, interrupt-map = <0000 0 0 1 &gic GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>,
......
...@@ -275,6 +275,12 @@ static inline u64 __gic_readq_nonatomic(const volatile void __iomem *addr) ...@@ -275,6 +275,12 @@ static inline u64 __gic_readq_nonatomic(const volatile void __iomem *addr)
#define gicr_read_pendbaser(c) __gic_readq_nonatomic(c) #define gicr_read_pendbaser(c) __gic_readq_nonatomic(c)
#define gicr_write_pendbaser(v, c) __gic_writeq_nonatomic(v, c) #define gicr_write_pendbaser(v, c) __gic_writeq_nonatomic(v, c)
/*
* GICR_xLPIR - only the lower bits are significant
*/
#define gic_read_lpir(c) readl_relaxed(c)
#define gic_write_lpir(v, c) writel_relaxed(lower_32_bits(v), c)
/* /*
* GITS_TYPER is an ID register and doesn't need atomicity. * GITS_TYPER is an ID register and doesn't need atomicity.
*/ */
...@@ -291,5 +297,33 @@ static inline u64 __gic_readq_nonatomic(const volatile void __iomem *addr) ...@@ -291,5 +297,33 @@ static inline u64 __gic_readq_nonatomic(const volatile void __iomem *addr)
*/ */
#define gits_write_cwriter(v, c) __gic_writeq_nonatomic(v, c) #define gits_write_cwriter(v, c) __gic_writeq_nonatomic(v, c)
/*
* GITS_VPROPBASER - hi and lo bits may be accessed independently.
*/
#define gits_write_vpropbaser(v, c) __gic_writeq_nonatomic(v, c)
/*
* GITS_VPENDBASER - the Valid bit must be cleared before changing
* anything else.
*/
static inline void gits_write_vpendbaser(u64 val, void * __iomem addr)
{
u32 tmp;
tmp = readl_relaxed(addr + 4);
if (tmp & (GICR_VPENDBASER_Valid >> 32)) {
tmp &= ~(GICR_VPENDBASER_Valid >> 32);
writel_relaxed(tmp, addr + 4);
}
/*
* Use the fact that __gic_writeq_nonatomic writes the second
* half of the 64bit quantity after the first.
*/
__gic_writeq_nonatomic(val, addr);
}
#define gits_read_vpendbaser(c) __gic_readq_nonatomic(c)
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif /* !__ASM_ARCH_GICV3_H */ #endif /* !__ASM_ARCH_GICV3_H */
...@@ -39,6 +39,7 @@ config ARCH_HIP04 ...@@ -39,6 +39,7 @@ config ARCH_HIP04
select HAVE_ARM_ARCH_TIMER select HAVE_ARM_ARCH_TIMER
select MCPM if SMP select MCPM if SMP
select MCPM_QUAD_CLUSTER if SMP select MCPM_QUAD_CLUSTER if SMP
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
help help
Support for Hisilicon HiP04 SoC family Support for Hisilicon HiP04 SoC family
......
...@@ -653,21 +653,21 @@ sata: sata@3200000 { ...@@ -653,21 +653,21 @@ sata: sata@3200000 {
}; };
msi1: msi-controller1@1571000 { msi1: msi-controller1@1571000 {
compatible = "fsl,1s1043a-msi"; compatible = "fsl,ls1043a-msi";
reg = <0x0 0x1571000 0x0 0x8>; reg = <0x0 0x1571000 0x0 0x8>;
msi-controller; msi-controller;
interrupts = <0 116 0x4>; interrupts = <0 116 0x4>;
}; };
msi2: msi-controller2@1572000 { msi2: msi-controller2@1572000 {
compatible = "fsl,1s1043a-msi"; compatible = "fsl,ls1043a-msi";
reg = <0x0 0x1572000 0x0 0x8>; reg = <0x0 0x1572000 0x0 0x8>;
msi-controller; msi-controller;
interrupts = <0 126 0x4>; interrupts = <0 126 0x4>;
}; };
msi3: msi-controller3@1573000 { msi3: msi-controller3@1573000 {
compatible = "fsl,1s1043a-msi"; compatible = "fsl,ls1043a-msi";
reg = <0x0 0x1573000 0x0 0x8>; reg = <0x0 0x1573000 0x0 0x8>;
msi-controller; msi-controller;
interrupts = <0 160 0x4>; interrupts = <0 160 0x4>;
...@@ -689,7 +689,7 @@ pcie@3400000 { ...@@ -689,7 +689,7 @@ pcie@3400000 {
bus-range = <0x0 0xff>; bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */ ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */
0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ 0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
msi-parent = <&msi1>; msi-parent = <&msi1>, <&msi2>, <&msi3>;
#interrupt-cells = <1>; #interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>; interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0000 0 0 1 &gic 0 110 0x4>, interrupt-map = <0000 0 0 1 &gic 0 110 0x4>,
...@@ -714,7 +714,7 @@ pcie@3500000 { ...@@ -714,7 +714,7 @@ pcie@3500000 {
bus-range = <0x0 0xff>; bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */ ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */
0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ 0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
msi-parent = <&msi2>; msi-parent = <&msi1>, <&msi2>, <&msi3>;
#interrupt-cells = <1>; #interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>; interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0000 0 0 1 &gic 0 120 0x4>, interrupt-map = <0000 0 0 1 &gic 0 120 0x4>,
...@@ -739,7 +739,7 @@ pcie@3600000 { ...@@ -739,7 +739,7 @@ pcie@3600000 {
bus-range = <0x0 0xff>; bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x50 0x00010000 0x0 0x00010000 /* downstream I/O */ ranges = <0x81000000 0x0 0x00000000 0x50 0x00010000 0x0 0x00010000 /* downstream I/O */
0x82000000 0x0 0x40000000 0x50 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ 0x82000000 0x0 0x40000000 0x50 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
msi-parent = <&msi3>; msi-parent = <&msi1>, <&msi2>, <&msi3>;
#interrupt-cells = <1>; #interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>; interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0000 0 0 1 &gic 0 154 0x4>, interrupt-map = <0000 0 0 1 &gic 0 154 0x4>,
......
...@@ -630,6 +630,37 @@ sata: sata@3200000 { ...@@ -630,6 +630,37 @@ sata: sata@3200000 {
interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen 4 1>; clocks = <&clockgen 4 1>;
}; };
msi1: msi-controller@1580000 {
compatible = "fsl,ls1046a-msi";
msi-controller;
reg = <0x0 0x1580000 0x0 0x10000>;
interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
};
msi2: msi-controller@1590000 {
compatible = "fsl,ls1046a-msi";
msi-controller;
reg = <0x0 0x1590000 0x0 0x10000>;
interrupts = <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
};
msi3: msi-controller@15a0000 {
compatible = "fsl,ls1046a-msi";
msi-controller;
reg = <0x0 0x15a0000 0x0 0x10000>;
interrupts = <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
};
}; };
reserved-memory { reserved-memory {
......
...@@ -116,6 +116,8 @@ static inline void gic_write_bpr1(u32 val) ...@@ -116,6 +116,8 @@ static inline void gic_write_bpr1(u32 val)
#define gic_read_typer(c) readq_relaxed(c) #define gic_read_typer(c) readq_relaxed(c)
#define gic_write_irouter(v, c) writeq_relaxed(v, c) #define gic_write_irouter(v, c) writeq_relaxed(v, c)
#define gic_read_lpir(c) readq_relaxed(c)
#define gic_write_lpir(v, c) writeq_relaxed(v, c)
#define gic_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) #define gic_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
...@@ -133,5 +135,10 @@ static inline void gic_write_bpr1(u32 val) ...@@ -133,5 +135,10 @@ static inline void gic_write_bpr1(u32 val)
#define gicr_write_pendbaser(v, c) writeq_relaxed(v, c) #define gicr_write_pendbaser(v, c) writeq_relaxed(v, c)
#define gicr_read_pendbaser(c) readq_relaxed(c) #define gicr_read_pendbaser(c) readq_relaxed(c)
#define gits_write_vpropbaser(v, c) writeq_relaxed(v, c)
#define gits_write_vpendbaser(v, c) writeq_relaxed(v, c)
#define gits_read_vpendbaser(c) readq_relaxed(c)
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __ASM_ARCH_GICV3_H */ #endif /* __ASM_ARCH_GICV3_H */
...@@ -26,6 +26,7 @@ config METAG ...@@ -26,6 +26,7 @@ config METAG
select HAVE_SYSCALL_TRACEPOINTS select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UNDERSCORE_SYMBOL_PREFIX select HAVE_UNDERSCORE_SYMBOL_PREFIX
select IRQ_DOMAIN select IRQ_DOMAIN
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
select MODULES_USE_ELF_RELA select MODULES_USE_ELF_RELA
select OF select OF
select OF_EARLY_FLATTREE select OF_EARLY_FLATTREE
......
...@@ -7,6 +7,7 @@ config ARM_GIC ...@@ -7,6 +7,7 @@ config ARM_GIC
select IRQ_DOMAIN select IRQ_DOMAIN
select IRQ_DOMAIN_HIERARCHY select IRQ_DOMAIN_HIERARCHY
select MULTI_IRQ_HANDLER select MULTI_IRQ_HANDLER
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
config ARM_GIC_PM config ARM_GIC_PM
bool bool
...@@ -34,6 +35,7 @@ config ARM_GIC_V3 ...@@ -34,6 +35,7 @@ config ARM_GIC_V3
select MULTI_IRQ_HANDLER select MULTI_IRQ_HANDLER
select IRQ_DOMAIN_HIERARCHY select IRQ_DOMAIN_HIERARCHY
select PARTITION_PERCPU select PARTITION_PERCPU
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
config ARM_GIC_V3_ITS config ARM_GIC_V3_ITS
bool bool
...@@ -64,6 +66,7 @@ config ARMADA_370_XP_IRQ ...@@ -64,6 +66,7 @@ config ARMADA_370_XP_IRQ
bool bool
select GENERIC_IRQ_CHIP select GENERIC_IRQ_CHIP
select PCI_MSI if PCI select PCI_MSI if PCI
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
config ALPINE_MSI config ALPINE_MSI
bool bool
...@@ -93,11 +96,13 @@ config BCM6345_L1_IRQ ...@@ -93,11 +96,13 @@ config BCM6345_L1_IRQ
bool bool
select GENERIC_IRQ_CHIP select GENERIC_IRQ_CHIP
select IRQ_DOMAIN select IRQ_DOMAIN
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
config BCM7038_L1_IRQ config BCM7038_L1_IRQ
bool bool
select GENERIC_IRQ_CHIP select GENERIC_IRQ_CHIP
select IRQ_DOMAIN select IRQ_DOMAIN
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
config BCM7120_L2_IRQ config BCM7120_L2_IRQ
bool bool
...@@ -136,6 +141,7 @@ config IRQ_MIPS_CPU ...@@ -136,6 +141,7 @@ config IRQ_MIPS_CPU
select GENERIC_IRQ_IPI if SYS_SUPPORTS_MULTITHREADING select GENERIC_IRQ_IPI if SYS_SUPPORTS_MULTITHREADING
select IRQ_DOMAIN select IRQ_DOMAIN
select IRQ_DOMAIN_HIERARCHY if GENERIC_IRQ_IPI select IRQ_DOMAIN_HIERARCHY if GENERIC_IRQ_IPI
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
config CLPS711X_IRQCHIP config CLPS711X_IRQCHIP
bool bool
...@@ -217,6 +223,7 @@ config VERSATILE_FPGA_IRQ_NR ...@@ -217,6 +223,7 @@ config VERSATILE_FPGA_IRQ_NR
config XTENSA_MX config XTENSA_MX
bool bool
select IRQ_DOMAIN select IRQ_DOMAIN
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
config XILINX_INTC config XILINX_INTC
bool bool
...@@ -306,3 +313,11 @@ config QCOM_IRQ_COMBINER ...@@ -306,3 +313,11 @@ config QCOM_IRQ_COMBINER
help help
Say yes here to add support for the IRQ combiner devices embedded Say yes here to add support for the IRQ combiner devices embedded
in Qualcomm Technologies chips. in Qualcomm Technologies chips.
config IRQ_UNIPHIER_AIDET
bool "UniPhier AIDET support" if COMPILE_TEST
depends on ARCH_UNIPHIER || COMPILE_TEST
default ARCH_UNIPHIER
select IRQ_DOMAIN_HIERARCHY
help
Support for the UniPhier AIDET (ARM Interrupt Detector).
...@@ -28,7 +28,7 @@ obj-$(CONFIG_ARM_GIC_PM) += irq-gic-pm.o ...@@ -28,7 +28,7 @@ obj-$(CONFIG_ARM_GIC_PM) += irq-gic-pm.o
obj-$(CONFIG_ARCH_REALVIEW) += irq-gic-realview.o obj-$(CONFIG_ARCH_REALVIEW) += irq-gic-realview.o
obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o
obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o
obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-pci-msi.o irq-gic-v3-its-platform-msi.o obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-pci-msi.o irq-gic-v3-its-platform-msi.o irq-gic-v4.o
obj-$(CONFIG_PARTITION_PERCPU) += irq-partition-percpu.o obj-$(CONFIG_PARTITION_PERCPU) += irq-partition-percpu.o
obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o
obj-$(CONFIG_ARM_NVIC) += irq-nvic.o obj-$(CONFIG_ARM_NVIC) += irq-nvic.o
...@@ -78,3 +78,4 @@ obj-$(CONFIG_EZNPS_GIC) += irq-eznps.o ...@@ -78,3 +78,4 @@ obj-$(CONFIG_EZNPS_GIC) += irq-eznps.o
obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o
obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o
obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o
obj-$(CONFIG_IRQ_UNIPHIER_AIDET) += irq-uniphier-aidet.o
...@@ -203,7 +203,7 @@ static struct irq_chip armada_370_xp_msi_irq_chip = { ...@@ -203,7 +203,7 @@ static struct irq_chip armada_370_xp_msi_irq_chip = {
static struct msi_domain_info armada_370_xp_msi_domain_info = { static struct msi_domain_info armada_370_xp_msi_domain_info = {
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
MSI_FLAG_MULTI_PCI_MSI), MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
.chip = &armada_370_xp_msi_irq_chip, .chip = &armada_370_xp_msi_irq_chip,
}; };
...@@ -330,6 +330,8 @@ static int armada_xp_set_affinity(struct irq_data *d, ...@@ -330,6 +330,8 @@ static int armada_xp_set_affinity(struct irq_data *d,
writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq)); writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
raw_spin_unlock(&irq_controller_lock); raw_spin_unlock(&irq_controller_lock);
irq_data_update_effective_affinity(d, cpumask_of(cpu));
return IRQ_SET_MASK_OK; return IRQ_SET_MASK_OK;
} }
#endif #endif
...@@ -363,6 +365,7 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h, ...@@ -363,6 +365,7 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
} else { } else {
irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip, irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
handle_level_irq); handle_level_irq);
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
} }
irq_set_probe(virq); irq_set_probe(virq);
......
...@@ -147,13 +147,12 @@ static int __init armctrl_of_init(struct device_node *node, ...@@ -147,13 +147,12 @@ static int __init armctrl_of_init(struct device_node *node,
base = of_iomap(node, 0); base = of_iomap(node, 0);
if (!base) if (!base)
panic("%s: unable to map IC registers\n", panic("%pOF: unable to map IC registers\n", node);
node->full_name);
intc.domain = irq_domain_add_linear(node, MAKE_HWIRQ(NR_BANKS, 0), intc.domain = irq_domain_add_linear(node, MAKE_HWIRQ(NR_BANKS, 0),
&armctrl_ops, NULL); &armctrl_ops, NULL);
if (!intc.domain) if (!intc.domain)
panic("%s: unable to create IRQ domain\n", node->full_name); panic("%pOF: unable to create IRQ domain\n", node);
for (b = 0; b < NR_BANKS; b++) { for (b = 0; b < NR_BANKS; b++) {
intc.pending[b] = base + reg_pending[b]; intc.pending[b] = base + reg_pending[b];
...@@ -173,8 +172,8 @@ static int __init armctrl_of_init(struct device_node *node, ...@@ -173,8 +172,8 @@ static int __init armctrl_of_init(struct device_node *node,
int parent_irq = irq_of_parse_and_map(node, 0); int parent_irq = irq_of_parse_and_map(node, 0);
if (!parent_irq) { if (!parent_irq) {
panic("%s: unable to get parent interrupt.\n", panic("%pOF: unable to get parent interrupt.\n",
node->full_name); node);
} }
irq_set_chained_handler(parent_irq, bcm2836_chained_handle_irq); irq_set_chained_handler(parent_irq, bcm2836_chained_handle_irq);
} else { } else {
......
...@@ -282,8 +282,7 @@ static int __init bcm2836_arm_irqchip_l1_intc_of_init(struct device_node *node, ...@@ -282,8 +282,7 @@ static int __init bcm2836_arm_irqchip_l1_intc_of_init(struct device_node *node,
{ {
intc.base = of_iomap(node, 0); intc.base = of_iomap(node, 0);
if (!intc.base) { if (!intc.base) {
panic("%s: unable to map local interrupt registers\n", panic("%pOF: unable to map local interrupt registers\n", node);
node->full_name);
} }
bcm2835_init_local_timer_frequency(); bcm2835_init_local_timer_frequency();
...@@ -292,7 +291,7 @@ static int __init bcm2836_arm_irqchip_l1_intc_of_init(struct device_node *node, ...@@ -292,7 +291,7 @@ static int __init bcm2836_arm_irqchip_l1_intc_of_init(struct device_node *node,
&bcm2836_arm_irqchip_intc_ops, &bcm2836_arm_irqchip_intc_ops,
NULL); NULL);
if (!intc.domain) if (!intc.domain)
panic("%s: unable to create IRQ domain\n", node->full_name); panic("%pOF: unable to create IRQ domain\n", node);
bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTPSIRQ, bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTPSIRQ,
&bcm2836_arm_irqchip_timer); &bcm2836_arm_irqchip_timer);
......
...@@ -231,6 +231,8 @@ static int bcm6345_l1_set_affinity(struct irq_data *d, ...@@ -231,6 +231,8 @@ static int bcm6345_l1_set_affinity(struct irq_data *d,
} }
raw_spin_unlock_irqrestore(&intc->lock, flags); raw_spin_unlock_irqrestore(&intc->lock, flags);
irq_data_update_effective_affinity(d, cpumask_of(new_cpu));
return IRQ_SET_MASK_OK_NOCOPY; return IRQ_SET_MASK_OK_NOCOPY;
} }
...@@ -291,6 +293,7 @@ static int bcm6345_l1_map(struct irq_domain *d, unsigned int virq, ...@@ -291,6 +293,7 @@ static int bcm6345_l1_map(struct irq_domain *d, unsigned int virq,
irq_set_chip_and_handler(virq, irq_set_chip_and_handler(virq,
&bcm6345_l1_irq_chip, handle_percpu_irq); &bcm6345_l1_irq_chip, handle_percpu_irq);
irq_set_chip_data(virq, d->host_data); irq_set_chip_data(virq, d->host_data);
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
return 0; return 0;
} }
......
...@@ -212,6 +212,8 @@ static int bcm7038_l1_set_affinity(struct irq_data *d, ...@@ -212,6 +212,8 @@ static int bcm7038_l1_set_affinity(struct irq_data *d,
__bcm7038_l1_unmask(d, first_cpu); __bcm7038_l1_unmask(d, first_cpu);
raw_spin_unlock_irqrestore(&intc->lock, flags); raw_spin_unlock_irqrestore(&intc->lock, flags);
irq_data_update_effective_affinity(d, cpumask_of(first_cpu));
return 0; return 0;
} }
...@@ -299,6 +301,7 @@ static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq, ...@@ -299,6 +301,7 @@ static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq,
{ {
irq_set_chip_and_handler(virq, &bcm7038_l1_irq_chip, handle_level_irq); irq_set_chip_and_handler(virq, &bcm7038_l1_irq_chip, handle_level_irq);
irq_set_chip_data(virq, d->host_data); irq_set_chip_data(virq, d->host_data);
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
return 0; return 0;
} }
......
...@@ -250,12 +250,6 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn, ...@@ -250,12 +250,6 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
if (ret < 0) if (ret < 0)
goto out_free_l1_data; goto out_free_l1_data;
for (idx = 0; idx < data->n_words; idx++) {
__raw_writel(data->irq_fwd_mask[idx],
data->pair_base[idx] +
data->en_offset[idx]);
}
for (irq = 0; irq < data->num_parent_irqs; irq++) { for (irq = 0; irq < data->num_parent_irqs; irq++) {
ret = bcm7120_l2_intc_init_one(dn, data, irq, valid_mask); ret = bcm7120_l2_intc_init_one(dn, data, irq, valid_mask);
if (ret) if (ret)
...@@ -297,6 +291,10 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn, ...@@ -297,6 +291,10 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
gc->reg_base = data->pair_base[idx]; gc->reg_base = data->pair_base[idx];
ct->regs.mask = data->en_offset[idx]; ct->regs.mask = data->en_offset[idx];
/* gc->reg_base is defined and so is gc->writel */
irq_reg_writel(gc, data->irq_fwd_mask[idx],
data->en_offset[idx]);
ct->chip.irq_mask = irq_gc_mask_clr_bit; ct->chip.irq_mask = irq_gc_mask_clr_bit;
ct->chip.irq_unmask = irq_gc_mask_set_bit; ct->chip.irq_unmask = irq_gc_mask_set_bit;
ct->chip.irq_ack = irq_gc_noop; ct->chip.irq_ack = irq_gc_noop;
......
...@@ -341,13 +341,13 @@ static int __init irqcrossbar_init(struct device_node *node, ...@@ -341,13 +341,13 @@ static int __init irqcrossbar_init(struct device_node *node,
int err; int err;
if (!parent) { if (!parent) {
pr_err("%s: no parent, giving up\n", node->full_name); pr_err("%pOF: no parent, giving up\n", node);
return -ENODEV; return -ENODEV;
} }
parent_domain = irq_find_host(parent); parent_domain = irq_find_host(parent);
if (!parent_domain) { if (!parent_domain) {
pr_err("%s: unable to obtain parent domain\n", node->full_name); pr_err("%pOF: unable to obtain parent domain\n", node);
return -ENXIO; return -ENXIO;
} }
...@@ -360,7 +360,7 @@ static int __init irqcrossbar_init(struct device_node *node, ...@@ -360,7 +360,7 @@ static int __init irqcrossbar_init(struct device_node *node,
node, &crossbar_domain_ops, node, &crossbar_domain_ops,
NULL); NULL);
if (!domain) { if (!domain) {
pr_err("%s: failed to allocated domain\n", node->full_name); pr_err("%pOF: failed to allocated domain\n", node);
return -ENOMEM; return -ENOMEM;
} }
......
...@@ -78,7 +78,7 @@ static int __init digicolor_of_init(struct device_node *node, ...@@ -78,7 +78,7 @@ static int __init digicolor_of_init(struct device_node *node,
reg_base = of_iomap(node, 0); reg_base = of_iomap(node, 0);
if (!reg_base) { if (!reg_base) {
pr_err("%s: unable to map IC registers\n", node->full_name); pr_err("%pOF: unable to map IC registers\n", node);
return -ENXIO; return -ENXIO;
} }
...@@ -88,7 +88,7 @@ static int __init digicolor_of_init(struct device_node *node, ...@@ -88,7 +88,7 @@ static int __init digicolor_of_init(struct device_node *node,
ucregs = syscon_regmap_lookup_by_phandle(node, "syscon"); ucregs = syscon_regmap_lookup_by_phandle(node, "syscon");
if (IS_ERR(ucregs)) { if (IS_ERR(ucregs)) {
pr_err("%s: unable to map UC registers\n", node->full_name); pr_err("%pOF: unable to map UC registers\n", node);
return PTR_ERR(ucregs); return PTR_ERR(ucregs);
} }
/* channel 1, regular IRQs */ /* channel 1, regular IRQs */
...@@ -97,7 +97,7 @@ static int __init digicolor_of_init(struct device_node *node, ...@@ -97,7 +97,7 @@ static int __init digicolor_of_init(struct device_node *node,
digicolor_irq_domain = digicolor_irq_domain =
irq_domain_add_linear(node, 64, &irq_generic_chip_ops, NULL); irq_domain_add_linear(node, 64, &irq_generic_chip_ops, NULL);
if (!digicolor_irq_domain) { if (!digicolor_irq_domain) {
pr_err("%s: unable to create IRQ domain\n", node->full_name); pr_err("%pOF: unable to create IRQ domain\n", node);
return -ENOMEM; return -ENOMEM;
} }
...@@ -105,7 +105,7 @@ static int __init digicolor_of_init(struct device_node *node, ...@@ -105,7 +105,7 @@ static int __init digicolor_of_init(struct device_node *node,
"digicolor_irq", handle_level_irq, "digicolor_irq", handle_level_irq,
clr, 0, 0); clr, 0, 0);
if (ret) { if (ret) {
pr_err("%s: unable to allocate IRQ gc\n", node->full_name); pr_err("%pOF: unable to allocate IRQ gc\n", node);
return ret; return ret;
} }
......
...@@ -79,24 +79,24 @@ static int __init dw_apb_ictl_init(struct device_node *np, ...@@ -79,24 +79,24 @@ static int __init dw_apb_ictl_init(struct device_node *np,
/* Map the parent interrupt for the chained handler */ /* Map the parent interrupt for the chained handler */
irq = irq_of_parse_and_map(np, 0); irq = irq_of_parse_and_map(np, 0);
if (irq <= 0) { if (irq <= 0) {
pr_err("%s: unable to parse irq\n", np->full_name); pr_err("%pOF: unable to parse irq\n", np);
return -EINVAL; return -EINVAL;
} }
ret = of_address_to_resource(np, 0, &r); ret = of_address_to_resource(np, 0, &r);
if (ret) { if (ret) {
pr_err("%s: unable to get resource\n", np->full_name); pr_err("%pOF: unable to get resource\n", np);
return ret; return ret;
} }
if (!request_mem_region(r.start, resource_size(&r), np->full_name)) { if (!request_mem_region(r.start, resource_size(&r), np->full_name)) {
pr_err("%s: unable to request mem region\n", np->full_name); pr_err("%pOF: unable to request mem region\n", np);
return -ENOMEM; return -ENOMEM;
} }
iobase = ioremap(r.start, resource_size(&r)); iobase = ioremap(r.start, resource_size(&r));
if (!iobase) { if (!iobase) {
pr_err("%s: unable to map resource\n", np->full_name); pr_err("%pOF: unable to map resource\n", np);
ret = -ENOMEM; ret = -ENOMEM;
goto err_release; goto err_release;
} }
...@@ -123,7 +123,7 @@ static int __init dw_apb_ictl_init(struct device_node *np, ...@@ -123,7 +123,7 @@ static int __init dw_apb_ictl_init(struct device_node *np,
domain = irq_domain_add_linear(np, nrirqs, domain = irq_domain_add_linear(np, nrirqs,
&irq_generic_chip_ops, NULL); &irq_generic_chip_ops, NULL);
if (!domain) { if (!domain) {
pr_err("%s: unable to add irq domain\n", np->full_name); pr_err("%pOF: unable to add irq domain\n", np);
ret = -ENOMEM; ret = -ENOMEM;
goto err_unmap; goto err_unmap;
} }
...@@ -132,7 +132,7 @@ static int __init dw_apb_ictl_init(struct device_node *np, ...@@ -132,7 +132,7 @@ static int __init dw_apb_ictl_init(struct device_node *np,
handle_level_irq, clr, 0, handle_level_irq, clr, 0,
IRQ_GC_INIT_MASK_CACHE); IRQ_GC_INIT_MASK_CACHE);
if (ret) { if (ret) {
pr_err("%s: unable to alloc irq domain gc\n", np->full_name); pr_err("%pOF: unable to alloc irq domain gc\n", np);
goto err_unmap; goto err_unmap;
} }
......
...@@ -138,7 +138,7 @@ static int __init its_pci_of_msi_init(void) ...@@ -138,7 +138,7 @@ static int __init its_pci_of_msi_init(void)
if (its_pci_msi_init_one(of_node_to_fwnode(np), np->full_name)) if (its_pci_msi_init_one(of_node_to_fwnode(np), np->full_name))
continue; continue;
pr_info("PCI/MSI: %s domain created\n", np->full_name); pr_info("PCI/MSI: %pOF domain created\n", np);
} }
return 0; return 0;
......
This diff is collapsed.
/* /*
* Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved. * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
* Author: Marc Zyngier <marc.zyngier@arm.com> * Author: Marc Zyngier <marc.zyngier@arm.com>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
...@@ -423,24 +423,14 @@ static void __init gic_dist_init(void) ...@@ -423,24 +423,14 @@ static void __init gic_dist_init(void)
gic_write_irouter(affinity, base + GICD_IROUTER + i * 8); gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
} }
static int gic_populate_rdist(void) static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *))
{ {
unsigned long mpidr = cpu_logical_map(smp_processor_id()); int ret = -ENODEV;
u64 typer;
u32 aff;
int i; int i;
/*
* Convert affinity to a 32bit value that can be matched to
* GICR_TYPER bits [63:32].
*/
aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
MPIDR_AFFINITY_LEVEL(mpidr, 0));
for (i = 0; i < gic_data.nr_redist_regions; i++) { for (i = 0; i < gic_data.nr_redist_regions; i++) {
void __iomem *ptr = gic_data.redist_regions[i].redist_base; void __iomem *ptr = gic_data.redist_regions[i].redist_base;
u64 typer;
u32 reg; u32 reg;
reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK; reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
...@@ -452,15 +442,9 @@ static int gic_populate_rdist(void) ...@@ -452,15 +442,9 @@ static int gic_populate_rdist(void)
do { do {
typer = gic_read_typer(ptr + GICR_TYPER); typer = gic_read_typer(ptr + GICR_TYPER);
if ((typer >> 32) == aff) { ret = fn(gic_data.redist_regions + i, ptr);
u64 offset = ptr - gic_data.redist_regions[i].redist_base; if (!ret)
gic_data_rdist_rd_base() = ptr;
gic_data_rdist()->phys_base = gic_data.redist_regions[i].phys_base + offset;
pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
smp_processor_id(), mpidr, i,
&gic_data_rdist()->phys_base);
return 0; return 0;
}
if (gic_data.redist_regions[i].single_redist) if (gic_data.redist_regions[i].single_redist)
break; break;
...@@ -475,12 +459,71 @@ static int gic_populate_rdist(void) ...@@ -475,12 +459,71 @@ static int gic_populate_rdist(void)
} while (!(typer & GICR_TYPER_LAST)); } while (!(typer & GICR_TYPER_LAST));
} }
return ret ? -ENODEV : 0;
}
static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
{
unsigned long mpidr = cpu_logical_map(smp_processor_id());
u64 typer;
u32 aff;
/*
* Convert affinity to a 32bit value that can be matched to
* GICR_TYPER bits [63:32].
*/
aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
MPIDR_AFFINITY_LEVEL(mpidr, 0));
typer = gic_read_typer(ptr + GICR_TYPER);
if ((typer >> 32) == aff) {
u64 offset = ptr - region->redist_base;
gic_data_rdist_rd_base() = ptr;
gic_data_rdist()->phys_base = region->phys_base + offset;
pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
smp_processor_id(), mpidr,
(int)(region - gic_data.redist_regions),
&gic_data_rdist()->phys_base);
return 0;
}
/* Try next one */
return 1;
}
static int gic_populate_rdist(void)
{
if (gic_iterate_rdists(__gic_populate_rdist) == 0)
return 0;
/* We couldn't even deal with ourselves... */ /* We couldn't even deal with ourselves... */
WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n", WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n",
smp_processor_id(), mpidr); smp_processor_id(),
(unsigned long)cpu_logical_map(smp_processor_id()));
return -ENODEV; return -ENODEV;
} }
static int __gic_update_vlpi_properties(struct redist_region *region,
void __iomem *ptr)
{
u64 typer = gic_read_typer(ptr + GICR_TYPER);
gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS);
gic_data.rdists.has_direct_lpi &= !!(typer & GICR_TYPER_DirectLPIS);
return 1;
}
static void gic_update_vlpi_properties(void)
{
gic_iterate_rdists(__gic_update_vlpi_properties);
pr_info("%sVLPI support, %sdirect LPI support\n",
!gic_data.rdists.has_vlpis ? "no " : "",
!gic_data.rdists.has_direct_lpi ? "no " : "");
}
static void gic_cpu_sys_reg_init(void) static void gic_cpu_sys_reg_init(void)
{ {
/* /*
...@@ -677,6 +720,8 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, ...@@ -677,6 +720,8 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
else else
gic_dist_wait_for_rwp(); gic_dist_wait_for_rwp();
irq_data_update_effective_affinity(d, cpumask_of(cpu));
return IRQ_SET_MASK_OK_DONE; return IRQ_SET_MASK_OK_DONE;
} }
#else #else
...@@ -775,6 +820,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, ...@@ -775,6 +820,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_domain_set_info(d, irq, hw, chip, d->host_data, irq_domain_set_info(d, irq, hw, chip, d->host_data,
handle_fasteoi_irq, NULL, NULL); handle_fasteoi_irq, NULL, NULL);
irq_set_probe(irq); irq_set_probe(irq);
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
} }
/* LPIs */ /* LPIs */
if (hw >= 8192 && hw < GIC_ID_NR) { if (hw >= 8192 && hw < GIC_ID_NR) {
...@@ -953,6 +999,8 @@ static int __init gic_init_bases(void __iomem *dist_base, ...@@ -953,6 +999,8 @@ static int __init gic_init_bases(void __iomem *dist_base,
gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops, gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
&gic_data); &gic_data);
gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist)); gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
gic_data.rdists.has_vlpis = true;
gic_data.rdists.has_direct_lpi = true;
if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) { if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
err = -ENOMEM; err = -ENOMEM;
...@@ -961,6 +1009,8 @@ static int __init gic_init_bases(void __iomem *dist_base, ...@@ -961,6 +1009,8 @@ static int __init gic_init_bases(void __iomem *dist_base,
set_handle_irq(gic_handle_irq); set_handle_irq(gic_handle_irq);
gic_update_vlpi_properties();
if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis()) if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
its_init(handle, &gic_data.rdists, gic_data.domain); its_init(handle, &gic_data.rdists, gic_data.domain);
...@@ -1067,7 +1117,7 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node) ...@@ -1067,7 +1117,7 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
if (WARN_ON(cpu == -1)) if (WARN_ON(cpu == -1))
continue; continue;
pr_cont("%s[%d] ", cpu_node->full_name, cpu); pr_cont("%pOF[%d] ", cpu_node, cpu);
cpumask_set_cpu(cpu, &part->mask); cpumask_set_cpu(cpu, &part->mask);
} }
...@@ -1122,6 +1172,7 @@ static void __init gic_of_setup_kvm_info(struct device_node *node) ...@@ -1122,6 +1172,7 @@ static void __init gic_of_setup_kvm_info(struct device_node *node)
if (!ret) if (!ret)
gic_v3_kvm_info.vcpu = r; gic_v3_kvm_info.vcpu = r;
gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
gic_set_kvm_info(&gic_v3_kvm_info); gic_set_kvm_info(&gic_v3_kvm_info);
} }
...@@ -1135,15 +1186,13 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare ...@@ -1135,15 +1186,13 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
dist_base = of_iomap(node, 0); dist_base = of_iomap(node, 0);
if (!dist_base) { if (!dist_base) {
pr_err("%s: unable to map gic dist registers\n", pr_err("%pOF: unable to map gic dist registers\n", node);
node->full_name);
return -ENXIO; return -ENXIO;
} }
err = gic_validate_dist_version(dist_base); err = gic_validate_dist_version(dist_base);
if (err) { if (err) {
pr_err("%s: no distributor detected, giving up\n", pr_err("%pOF: no distributor detected, giving up\n", node);
node->full_name);
goto out_unmap_dist; goto out_unmap_dist;
} }
...@@ -1163,8 +1212,7 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare ...@@ -1163,8 +1212,7 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
ret = of_address_to_resource(node, 1 + i, &res); ret = of_address_to_resource(node, 1 + i, &res);
rdist_regs[i].redist_base = of_iomap(node, 1 + i); rdist_regs[i].redist_base = of_iomap(node, 1 + i);
if (ret || !rdist_regs[i].redist_base) { if (ret || !rdist_regs[i].redist_base) {
pr_err("%s: couldn't map region %d\n", pr_err("%pOF: couldn't map region %d\n", node, i);
node->full_name, i);
err = -ENODEV; err = -ENODEV;
goto out_unmap_rdist; goto out_unmap_rdist;
} }
...@@ -1418,6 +1466,7 @@ static void __init gic_acpi_setup_kvm_info(void) ...@@ -1418,6 +1466,7 @@ static void __init gic_acpi_setup_kvm_info(void)
vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1; vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
} }
gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
gic_set_kvm_info(&gic_v3_kvm_info); gic_set_kvm_info(&gic_v3_kvm_info);
} }
......
/*
* Copyright (C) 2016,2017 ARM Limited, All Rights Reserved.
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/msi.h>
#include <linux/sched.h>
#include <linux/irqchip/arm-gic-v4.h>
/*
* WARNING: The blurb below assumes that you understand the
* intricacies of GICv3, GICv4, and how a guest's view of a GICv3 gets
* translated into GICv4 commands. So it effectively targets at most
* two individuals. You know who you are.
*
* The core GICv4 code is designed to *avoid* exposing too much of the
* core GIC code (that would in turn leak into the hypervisor code),
* and instead provide a hypervisor agnostic interface to the HW (of
* course, the astute reader will quickly realize that hypervisor
* agnostic actually means KVM-specific - what were you thinking?).
*
* In order to achieve a modicum of isolation, we try to hide most of
* the GICv4 "stuff" behind normal irqchip operations:
*
* - Any guest-visible VLPI is backed by a Linux interrupt (and a
* physical LPI which gets unmapped when the guest maps the
* VLPI). This allows the same DevID/EventID pair to be either
* mapped to the LPI (host) or the VLPI (guest). Note that this is
* exclusive, and you cannot have both.
*
* - Enabling/disabling a VLPI is done by issuing mask/unmask calls.
*
* - Guest INT/CLEAR commands are implemented through
* irq_set_irqchip_state().
*
* - The *bizarre* stuff (mapping/unmapping an interrupt to a VLPI, or
* issuing an INV after changing a priority) gets shoved into the
* irq_set_vcpu_affinity() method. While this is quite horrible
* (let's face it, this is the irqchip version of an ioctl), it
* confines the crap to a single location. And map/unmap really is
* about setting the affinity of a VLPI to a vcpu, so only INV is
* majorly out of place. So there.
*
* A number of commands are simply not provided by this interface, as
* they do not make direct sense. For example, MAPD is purely local to
* the virtual ITS (because it references a virtual device, and the
* physical ITS is still very much in charge of the physical
* device). Same goes for things like MAPC (the physical ITS deals
* with the actual vPE affinity, and not the braindead concept of
* collection). SYNC is not provided either, as each and every command
* is followed by a VSYNC. This could be relaxed in the future, should
* this be seen as a bottleneck (yes, this means *never*).
*
* But handling VLPIs is only one side of the job of the GICv4
* code. The other (darker) side is to take care of the doorbell
* interrupts which are delivered when a VLPI targeting a non-running
* vcpu is being made pending.
*
* The choice made here is that each vcpu (VPE in old northern GICv4
* dialect) gets a single doorbell LPI, no matter how many interrupts
* are targeting it. This has a nice property, which is that the
* interrupt becomes a handle for the VPE, and that the hypervisor
* code can manipulate it through the normal interrupt API:
*
* - VMs (or rather the VM abstraction that matters to the GIC)
* contain an irq domain where each interrupt maps to a VPE. In
* turn, this domain sits on top of the normal LPI allocator, and a
* specially crafted irq_chip implementation.
*
* - mask/unmask do what is expected on the doorbell interrupt.
*
* - irq_set_affinity is used to move a VPE from one redistributor to
* another.
*
* - irq_set_vcpu_affinity once again gets hijacked for the purpose of
* creating a new sub-API, namely scheduling/descheduling a VPE
* (which involves programming GICR_V{PROP,PEND}BASER) and
* performing INVALL operations.
*/
static struct irq_domain *gic_domain;
static const struct irq_domain_ops *vpe_domain_ops;
int its_alloc_vcpu_irqs(struct its_vm *vm)
{
int vpe_base_irq, i;
vm->fwnode = irq_domain_alloc_named_id_fwnode("GICv4-vpe",
task_pid_nr(current));
if (!vm->fwnode)
goto err;
vm->domain = irq_domain_create_hierarchy(gic_domain, 0, vm->nr_vpes,
vm->fwnode, vpe_domain_ops,
vm);
if (!vm->domain)
goto err;
for (i = 0; i < vm->nr_vpes; i++) {
vm->vpes[i]->its_vm = vm;
vm->vpes[i]->idai = true;
}
vpe_base_irq = __irq_domain_alloc_irqs(vm->domain, -1, vm->nr_vpes,
NUMA_NO_NODE, vm,
false, NULL);
if (vpe_base_irq <= 0)
goto err;
for (i = 0; i < vm->nr_vpes; i++)
vm->vpes[i]->irq = vpe_base_irq + i;
return 0;
err:
if (vm->domain)
irq_domain_remove(vm->domain);
if (vm->fwnode)
irq_domain_free_fwnode(vm->fwnode);
return -ENOMEM;
}
void its_free_vcpu_irqs(struct its_vm *vm)
{
irq_domain_free_irqs(vm->vpes[0]->irq, vm->nr_vpes);
irq_domain_remove(vm->domain);
irq_domain_free_fwnode(vm->fwnode);
}
static int its_send_vpe_cmd(struct its_vpe *vpe, struct its_cmd_info *info)
{
return irq_set_vcpu_affinity(vpe->irq, info);
}
int its_schedule_vpe(struct its_vpe *vpe, bool on)
{
struct its_cmd_info info;
WARN_ON(preemptible());
info.cmd_type = on ? SCHEDULE_VPE : DESCHEDULE_VPE;
return its_send_vpe_cmd(vpe, &info);
}
int its_invall_vpe(struct its_vpe *vpe)
{
struct its_cmd_info info = {
.cmd_type = INVALL_VPE,
};
return its_send_vpe_cmd(vpe, &info);
}
int its_map_vlpi(int irq, struct its_vlpi_map *map)
{
struct its_cmd_info info = {
.cmd_type = MAP_VLPI,
.map = map,
};
/*
* The host will never see that interrupt firing again, so it
* is vital that we don't do any lazy masking.
*/
irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
return irq_set_vcpu_affinity(irq, &info);
}
int its_get_vlpi(int irq, struct its_vlpi_map *map)
{
struct its_cmd_info info = {
.cmd_type = GET_VLPI,
.map = map,
};
return irq_set_vcpu_affinity(irq, &info);
}
int its_unmap_vlpi(int irq)
{
irq_clear_status_flags(irq, IRQ_DISABLE_UNLAZY);
return irq_set_vcpu_affinity(irq, NULL);
}
int its_prop_update_vlpi(int irq, u8 config, bool inv)
{
struct its_cmd_info info = {
.cmd_type = inv ? PROP_UPDATE_AND_INV_VLPI : PROP_UPDATE_VLPI,
.config = config,
};
return irq_set_vcpu_affinity(irq, &info);
}
int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops)
{
if (domain) {
pr_info("ITS: Enabling GICv4 support\n");
gic_domain = domain;
vpe_domain_ops = ops;
return 0;
}
pr_err("ITS: No GICv4 VPE domain allocated\n");
return -ENODEV;
}
...@@ -344,6 +344,8 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, ...@@ -344,6 +344,8 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
writel_relaxed(val | bit, reg); writel_relaxed(val | bit, reg);
gic_unlock_irqrestore(flags); gic_unlock_irqrestore(flags);
irq_data_update_effective_affinity(d, cpumask_of(cpu));
return IRQ_SET_MASK_OK_DONE; return IRQ_SET_MASK_OK_DONE;
} }
#endif #endif
...@@ -413,7 +415,7 @@ static void gic_handle_cascade_irq(struct irq_desc *desc) ...@@ -413,7 +415,7 @@ static void gic_handle_cascade_irq(struct irq_desc *desc)
chained_irq_exit(chip, desc); chained_irq_exit(chip, desc);
} }
static struct irq_chip gic_chip = { static const struct irq_chip gic_chip = {
.irq_mask = gic_mask_irq, .irq_mask = gic_mask_irq,
.irq_unmask = gic_unmask_irq, .irq_unmask = gic_unmask_irq,
.irq_eoi = gic_eoi_irq, .irq_eoi = gic_eoi_irq,
...@@ -969,6 +971,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, ...@@ -969,6 +971,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data, irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data,
handle_fasteoi_irq, NULL, NULL); handle_fasteoi_irq, NULL, NULL);
irq_set_probe(irq); irq_set_probe(irq);
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
} }
return 0; return 0;
} }
......
...@@ -165,6 +165,8 @@ static int hip04_irq_set_affinity(struct irq_data *d, ...@@ -165,6 +165,8 @@ static int hip04_irq_set_affinity(struct irq_data *d,
writel_relaxed(val | bit, reg); writel_relaxed(val | bit, reg);
raw_spin_unlock(&irq_controller_lock); raw_spin_unlock(&irq_controller_lock);
irq_data_update_effective_affinity(d, cpumask_of(cpu));
return IRQ_SET_MASK_OK; return IRQ_SET_MASK_OK;
} }
#endif #endif
...@@ -312,6 +314,7 @@ static int hip04_irq_domain_map(struct irq_domain *d, unsigned int irq, ...@@ -312,6 +314,7 @@ static int hip04_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_set_chip_and_handler(irq, &hip04_irq_chip, irq_set_chip_and_handler(irq, &hip04_irq_chip,
handle_fasteoi_irq); handle_fasteoi_irq);
irq_set_probe(irq); irq_set_probe(irq);
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
} }
irq_set_chip_data(irq, d->host_data); irq_set_chip_data(irq, d->host_data);
return 0; return 0;
......
...@@ -214,13 +214,13 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node, ...@@ -214,13 +214,13 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node,
int i; int i;
if (!parent) { if (!parent) {
pr_err("%s: no parent, giving up\n", node->full_name); pr_err("%pOF: no parent, giving up\n", node);
return -ENODEV; return -ENODEV;
} }
parent_domain = irq_find_host(parent); parent_domain = irq_find_host(parent);
if (!parent_domain) { if (!parent_domain) {
pr_err("%s: unable to get parent domain\n", node->full_name); pr_err("%pOF: unable to get parent domain\n", node);
return -ENXIO; return -ENXIO;
} }
......
...@@ -191,7 +191,7 @@ static int __init lpc32xx_of_ic_init(struct device_node *node, ...@@ -191,7 +191,7 @@ static int __init lpc32xx_of_ic_init(struct device_node *node,
irqc->base = of_iomap(node, 0); irqc->base = of_iomap(node, 0);
if (!irqc->base) { if (!irqc->base) {
pr_err("%s: unable to map registers\n", node->full_name); pr_err("%pOF: unable to map registers\n", node);
kfree(irqc); kfree(irqc);
return -EINVAL; return -EINVAL;
} }
......
This diff is collapsed.
...@@ -518,6 +518,8 @@ static int meta_intc_set_affinity(struct irq_data *data, ...@@ -518,6 +518,8 @@ static int meta_intc_set_affinity(struct irq_data *data,
metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr); metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr);
irq_data_update_effective_affinity(data, cpumask_of(cpu));
return 0; return 0;
} }
#else #else
...@@ -578,6 +580,8 @@ static int meta_intc_map(struct irq_domain *d, unsigned int irq, ...@@ -578,6 +580,8 @@ static int meta_intc_map(struct irq_domain *d, unsigned int irq,
else else
irq_set_chip_and_handler(irq, &meta_intc_edge_chip, irq_set_chip_and_handler(irq, &meta_intc_edge_chip,
handle_edge_irq); handle_edge_irq);
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
return 0; return 0;
} }
......
...@@ -445,24 +445,27 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, ...@@ -445,24 +445,27 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
cpumask_t tmp = CPU_MASK_NONE; cpumask_t tmp = CPU_MASK_NONE;
unsigned long flags; unsigned long flags;
int i; int i, cpu;
cpumask_and(&tmp, cpumask, cpu_online_mask); cpumask_and(&tmp, cpumask, cpu_online_mask);
if (cpumask_empty(&tmp)) if (cpumask_empty(&tmp))
return -EINVAL; return -EINVAL;
cpu = cpumask_first(&tmp);
/* Assumption : cpumask refers to a single CPU */ /* Assumption : cpumask refers to a single CPU */
spin_lock_irqsave(&gic_lock, flags); spin_lock_irqsave(&gic_lock, flags);
/* Re-route this IRQ */ /* Re-route this IRQ */
gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp))); gic_map_to_vpe(irq, mips_cm_vp_id(cpu));
/* Update the pcpu_masks */ /* Update the pcpu_masks */
for (i = 0; i < min(gic_vpes, NR_CPUS); i++) for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
clear_bit(irq, pcpu_masks[i].pcpu_mask); clear_bit(irq, pcpu_masks[i].pcpu_mask);
set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask); set_bit(irq, pcpu_masks[cpu].pcpu_mask);
cpumask_copy(irq_data_get_affinity_mask(d), cpumask); cpumask_copy(irq_data_get_affinity_mask(d), cpumask);
irq_data_update_effective_affinity(d, cpumask_of(cpu));
spin_unlock_irqrestore(&gic_lock, flags); spin_unlock_irqrestore(&gic_lock, flags);
return IRQ_SET_MASK_OK_NOCOPY; return IRQ_SET_MASK_OK_NOCOPY;
...@@ -716,6 +719,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq, ...@@ -716,6 +719,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
if (err) if (err)
return err; return err;
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
return gic_shared_irq_domain_map(d, virq, hwirq, 0); return gic_shared_irq_domain_map(d, virq, hwirq, 0);
} }
......
...@@ -181,13 +181,13 @@ const struct irq_domain_ops mmp_irq_domain_ops = { ...@@ -181,13 +181,13 @@ const struct irq_domain_ops mmp_irq_domain_ops = {
.xlate = mmp_irq_domain_xlate, .xlate = mmp_irq_domain_xlate,
}; };
static struct mmp_intc_conf mmp_conf = { static const struct mmp_intc_conf mmp_conf = {
.conf_enable = 0x51, .conf_enable = 0x51,
.conf_disable = 0x0, .conf_disable = 0x0,
.conf_mask = 0x7f, .conf_mask = 0x7f,
}; };
static struct mmp_intc_conf mmp2_conf = { static const struct mmp_intc_conf mmp2_conf = {
.conf_enable = 0x20, .conf_enable = 0x20,
.conf_disable = 0x0, .conf_disable = 0x0,
.conf_mask = 0x7f, .conf_mask = 0x7f,
......
...@@ -178,8 +178,7 @@ static int __init mtk_sysirq_of_init(struct device_node *node, ...@@ -178,8 +178,7 @@ static int __init mtk_sysirq_of_init(struct device_node *node,
chip_data->intpol_words[i] = size / 4; chip_data->intpol_words[i] = size / 4;
chip_data->intpol_bases[i] = of_iomap(node, i); chip_data->intpol_bases[i] = of_iomap(node, i);
if (ret || !chip_data->intpol_bases[i]) { if (ret || !chip_data->intpol_bases[i]) {
pr_err("%s: couldn't map region %d\n", pr_err("%pOF: couldn't map region %d\n", node, i);
node->full_name, i);
ret = -ENODEV; ret = -ENODEV;
goto out_free_intpol; goto out_free_intpol;
} }
......
...@@ -179,7 +179,7 @@ static void __init icoll_add_domain(struct device_node *np, ...@@ -179,7 +179,7 @@ static void __init icoll_add_domain(struct device_node *np,
&icoll_irq_domain_ops, NULL); &icoll_irq_domain_ops, NULL);
if (!icoll_domain) if (!icoll_domain)
panic("%s: unable to create irq domain", np->full_name); panic("%pOF: unable to create irq domain", np);
} }
static void __iomem * __init icoll_init_iobase(struct device_node *np) static void __iomem * __init icoll_init_iobase(struct device_node *np)
...@@ -188,7 +188,7 @@ static void __iomem * __init icoll_init_iobase(struct device_node *np) ...@@ -188,7 +188,7 @@ static void __iomem * __init icoll_init_iobase(struct device_node *np)
icoll_base = of_io_request_and_map(np, 0, np->name); icoll_base = of_io_request_and_map(np, 0, np->name);
if (IS_ERR(icoll_base)) if (IS_ERR(icoll_base))
panic("%s: unable to map resource", np->full_name); panic("%pOF: unable to map resource", np);
return icoll_base; return icoll_base;
} }
......
...@@ -140,7 +140,7 @@ static int __init stm32_exti_init(struct device_node *node, ...@@ -140,7 +140,7 @@ static int __init stm32_exti_init(struct device_node *node,
base = of_iomap(node, 0); base = of_iomap(node, 0);
if (!base) { if (!base) {
pr_err("%s: Unable to map registers\n", node->full_name); pr_err("%pOF: Unable to map registers\n", node);
return -ENOMEM; return -ENOMEM;
} }
...@@ -149,7 +149,7 @@ static int __init stm32_exti_init(struct device_node *node, ...@@ -149,7 +149,7 @@ static int __init stm32_exti_init(struct device_node *node,
nr_exti = fls(readl_relaxed(base + EXTI_RTSR)); nr_exti = fls(readl_relaxed(base + EXTI_RTSR));
writel_relaxed(0, base + EXTI_RTSR); writel_relaxed(0, base + EXTI_RTSR);
pr_info("%s: %d External IRQs detected\n", node->full_name, nr_exti); pr_info("%pOF: %d External IRQs detected\n", node, nr_exti);
domain = irq_domain_add_linear(node, nr_exti, domain = irq_domain_add_linear(node, nr_exti,
&irq_exti_domain_ops, NULL); &irq_exti_domain_ops, NULL);
...@@ -163,8 +163,8 @@ static int __init stm32_exti_init(struct device_node *node, ...@@ -163,8 +163,8 @@ static int __init stm32_exti_init(struct device_node *node,
ret = irq_alloc_domain_generic_chips(domain, nr_exti, 1, "exti", ret = irq_alloc_domain_generic_chips(domain, nr_exti, 1, "exti",
handle_edge_irq, clr, 0, 0); handle_edge_irq, clr, 0, 0);
if (ret) { if (ret) {
pr_err("%s: Could not allocate generic interrupt chip.\n", pr_err("%pOF: Could not allocate generic interrupt chip.\n",
node->full_name); node);
goto out_free_domain; goto out_free_domain;
} }
......
...@@ -97,8 +97,8 @@ static int __init sun4i_of_init(struct device_node *node, ...@@ -97,8 +97,8 @@ static int __init sun4i_of_init(struct device_node *node,
{ {
sun4i_irq_base = of_iomap(node, 0); sun4i_irq_base = of_iomap(node, 0);
if (!sun4i_irq_base) if (!sun4i_irq_base)
panic("%s: unable to map IC registers\n", panic("%pOF: unable to map IC registers\n",
node->full_name); node);
/* Disable all interrupts */ /* Disable all interrupts */
writel(0, sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(0)); writel(0, sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(0));
...@@ -124,7 +124,7 @@ static int __init sun4i_of_init(struct device_node *node, ...@@ -124,7 +124,7 @@ static int __init sun4i_of_init(struct device_node *node,
sun4i_irq_domain = irq_domain_add_linear(node, 3 * 32, sun4i_irq_domain = irq_domain_add_linear(node, 3 * 32,
&sun4i_irq_ops, NULL); &sun4i_irq_ops, NULL);
if (!sun4i_irq_domain) if (!sun4i_irq_domain)
panic("%s: unable to create IRQ domain\n", node->full_name); panic("%pOF: unable to create IRQ domain\n", node);
set_handle_irq(sun4i_handle_irq); set_handle_irq(sun4i_handle_irq);
......
...@@ -291,13 +291,13 @@ static int __init tegra_ictlr_init(struct device_node *node, ...@@ -291,13 +291,13 @@ static int __init tegra_ictlr_init(struct device_node *node,
int err; int err;
if (!parent) { if (!parent) {
pr_err("%s: no parent, giving up\n", node->full_name); pr_err("%pOF: no parent, giving up\n", node);
return -ENODEV; return -ENODEV;
} }
parent_domain = irq_find_host(parent); parent_domain = irq_find_host(parent);
if (!parent_domain) { if (!parent_domain) {
pr_err("%s: unable to obtain parent domain\n", node->full_name); pr_err("%pOF: unable to obtain parent domain\n", node);
return -ENXIO; return -ENXIO;
} }
...@@ -329,29 +329,29 @@ static int __init tegra_ictlr_init(struct device_node *node, ...@@ -329,29 +329,29 @@ static int __init tegra_ictlr_init(struct device_node *node,
} }
if (!num_ictlrs) { if (!num_ictlrs) {
pr_err("%s: no valid regions, giving up\n", node->full_name); pr_err("%pOF: no valid regions, giving up\n", node);
err = -ENOMEM; err = -ENOMEM;
goto out_free; goto out_free;
} }
WARN(num_ictlrs != soc->num_ictlrs, WARN(num_ictlrs != soc->num_ictlrs,
"%s: Found %u interrupt controllers in DT; expected %u.\n", "%pOF: Found %u interrupt controllers in DT; expected %u.\n",
node->full_name, num_ictlrs, soc->num_ictlrs); node, num_ictlrs, soc->num_ictlrs);
domain = irq_domain_add_hierarchy(parent_domain, 0, num_ictlrs * 32, domain = irq_domain_add_hierarchy(parent_domain, 0, num_ictlrs * 32,
node, &tegra_ictlr_domain_ops, node, &tegra_ictlr_domain_ops,
lic); lic);
if (!domain) { if (!domain) {
pr_err("%s: failed to allocated domain\n", node->full_name); pr_err("%pOF: failed to allocated domain\n", node);
err = -ENOMEM; err = -ENOMEM;
goto out_unmap; goto out_unmap;
} }
tegra_ictlr_syscore_init(); tegra_ictlr_syscore_init();
pr_info("%s: %d interrupts forwarded to %s\n", pr_info("%pOF: %d interrupts forwarded to %pOF\n",
node->full_name, num_ictlrs * 32, parent->full_name); node, num_ictlrs * 32, parent);
return 0; return 0;
......
/*
* Driver for UniPhier AIDET (ARM Interrupt Detector)
*
* Copyright (C) 2017 Socionext Inc.
* Author: Masahiro Yamada <yamada.masahiro@socionext.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/bitops.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#define UNIPHIER_AIDET_NR_IRQS 256
#define UNIPHIER_AIDET_DETCONF 0x04 /* inverter register base */
struct uniphier_aidet_priv {
struct irq_domain *domain;
void __iomem *reg_base;
spinlock_t lock;
u32 saved_vals[UNIPHIER_AIDET_NR_IRQS / 32];
};
static void uniphier_aidet_reg_update(struct uniphier_aidet_priv *priv,
unsigned int reg, u32 mask, u32 val)
{
unsigned long flags;
u32 tmp;
spin_lock_irqsave(&priv->lock, flags);
tmp = readl_relaxed(priv->reg_base + reg);
tmp &= ~mask;
tmp |= mask & val;
writel_relaxed(tmp, priv->reg_base + reg);
spin_unlock_irqrestore(&priv->lock, flags);
}
static void uniphier_aidet_detconf_update(struct uniphier_aidet_priv *priv,
unsigned long index, unsigned int val)
{
unsigned int reg;
u32 mask;
reg = UNIPHIER_AIDET_DETCONF + index / 32 * 4;
mask = BIT(index % 32);
uniphier_aidet_reg_update(priv, reg, mask, val ? mask : 0);
}
static int uniphier_aidet_irq_set_type(struct irq_data *data, unsigned int type)
{
struct uniphier_aidet_priv *priv = data->chip_data;
unsigned int val;
/* enable inverter for active low triggers */
switch (type) {
case IRQ_TYPE_EDGE_RISING:
case IRQ_TYPE_LEVEL_HIGH:
val = 0;
break;
case IRQ_TYPE_EDGE_FALLING:
val = 1;
type = IRQ_TYPE_EDGE_RISING;
break;
case IRQ_TYPE_LEVEL_LOW:
val = 1;
type = IRQ_TYPE_LEVEL_HIGH;
break;
default:
return -EINVAL;
}
uniphier_aidet_detconf_update(priv, data->hwirq, val);
return irq_chip_set_type_parent(data, type);
}
static struct irq_chip uniphier_aidet_irq_chip = {
.name = "AIDET",
.irq_mask = irq_chip_mask_parent,
.irq_unmask = irq_chip_unmask_parent,
.irq_eoi = irq_chip_eoi_parent,
.irq_set_affinity = irq_chip_set_affinity_parent,
.irq_set_type = uniphier_aidet_irq_set_type,
};
static int uniphier_aidet_domain_translate(struct irq_domain *domain,
struct irq_fwspec *fwspec,
unsigned long *out_hwirq,
unsigned int *out_type)
{
if (WARN_ON(fwspec->param_count < 2))
return -EINVAL;
*out_hwirq = fwspec->param[0];
*out_type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
return 0;
}
static int uniphier_aidet_domain_alloc(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs,
void *arg)
{
struct irq_fwspec parent_fwspec;
irq_hw_number_t hwirq;
unsigned int type;
int ret;
if (nr_irqs != 1)
return -EINVAL;
ret = uniphier_aidet_domain_translate(domain, arg, &hwirq, &type);
if (ret)
return ret;
switch (type) {
case IRQ_TYPE_EDGE_RISING:
case IRQ_TYPE_LEVEL_HIGH:
break;
case IRQ_TYPE_EDGE_FALLING:
type = IRQ_TYPE_EDGE_RISING;
break;
case IRQ_TYPE_LEVEL_LOW:
type = IRQ_TYPE_LEVEL_HIGH;
break;
default:
return -EINVAL;
}
if (hwirq >= UNIPHIER_AIDET_NR_IRQS)
return -ENXIO;
ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
&uniphier_aidet_irq_chip,
domain->host_data);
if (ret)
return ret;
/* parent is GIC */
parent_fwspec.fwnode = domain->parent->fwnode;
parent_fwspec.param_count = 3;
parent_fwspec.param[0] = 0; /* SPI */
parent_fwspec.param[1] = hwirq;
parent_fwspec.param[2] = type;
return irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec);
}
static const struct irq_domain_ops uniphier_aidet_domain_ops = {
.alloc = uniphier_aidet_domain_alloc,
.free = irq_domain_free_irqs_common,
.translate = uniphier_aidet_domain_translate,
};
static int uniphier_aidet_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *parent_np;
struct irq_domain *parent_domain;
struct uniphier_aidet_priv *priv;
struct resource *res;
parent_np = of_irq_find_parent(dev->of_node);
if (!parent_np)
return -ENXIO;
parent_domain = irq_find_host(parent_np);
of_node_put(parent_np);
if (!parent_domain)
return -EPROBE_DEFER;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->reg_base = devm_ioremap_resource(dev, res);
if (IS_ERR(priv->reg_base))
return PTR_ERR(priv->reg_base);
spin_lock_init(&priv->lock);
priv->domain = irq_domain_create_hierarchy(
parent_domain, 0,
UNIPHIER_AIDET_NR_IRQS,
of_node_to_fwnode(dev->of_node),
&uniphier_aidet_domain_ops, priv);
if (!priv->domain)
return -ENOMEM;
platform_set_drvdata(pdev, priv);
return 0;
}
static int __maybe_unused uniphier_aidet_suspend(struct device *dev)
{
struct uniphier_aidet_priv *priv = dev_get_drvdata(dev);
int i;
for (i = 0; i < ARRAY_SIZE(priv->saved_vals); i++)
priv->saved_vals[i] = readl_relaxed(
priv->reg_base + UNIPHIER_AIDET_DETCONF + i * 4);
return 0;
}
static int __maybe_unused uniphier_aidet_resume(struct device *dev)
{
struct uniphier_aidet_priv *priv = dev_get_drvdata(dev);
int i;
for (i = 0; i < ARRAY_SIZE(priv->saved_vals); i++)
writel_relaxed(priv->saved_vals[i],
priv->reg_base + UNIPHIER_AIDET_DETCONF + i * 4);
return 0;
}
static const struct dev_pm_ops uniphier_aidet_pm_ops = {
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(uniphier_aidet_suspend,
uniphier_aidet_resume)
};
static const struct of_device_id uniphier_aidet_match[] = {
{ .compatible = "socionext,uniphier-ld4-aidet" },
{ .compatible = "socionext,uniphier-pro4-aidet" },
{ .compatible = "socionext,uniphier-sld8-aidet" },
{ .compatible = "socionext,uniphier-pro5-aidet" },
{ .compatible = "socionext,uniphier-pxs2-aidet" },
{ .compatible = "socionext,uniphier-ld11-aidet" },
{ .compatible = "socionext,uniphier-ld20-aidet" },
{ .compatible = "socionext,uniphier-pxs3-aidet" },
{ /* sentinel */ }
};
static struct platform_driver uniphier_aidet_driver = {
.probe = uniphier_aidet_probe,
.driver = {
.name = "uniphier-aidet",
.of_match_table = uniphier_aidet_match,
.pm = &uniphier_aidet_pm_ops,
},
};
builtin_platform_driver(uniphier_aidet_driver);
...@@ -186,8 +186,8 @@ static int __init xilinx_intc_of_init(struct device_node *intc, ...@@ -186,8 +186,8 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
if (irqc->intr_mask >> nr_irq) if (irqc->intr_mask >> nr_irq)
pr_warn("irq-xilinx: mismatch in kind-of-intr param\n"); pr_warn("irq-xilinx: mismatch in kind-of-intr param\n");
pr_info("irq-xilinx: %s: num_irq=%d, edge=0x%x\n", pr_info("irq-xilinx: %pOF: num_irq=%d, edge=0x%x\n",
intc->full_name, nr_irq, irqc->intr_mask); intc, nr_irq, irqc->intr_mask);
/* /*
......
...@@ -32,6 +32,7 @@ static int xtensa_mx_irq_map(struct irq_domain *d, unsigned int irq, ...@@ -32,6 +32,7 @@ static int xtensa_mx_irq_map(struct irq_domain *d, unsigned int irq,
irq_set_status_flags(irq, IRQ_LEVEL); irq_set_status_flags(irq, IRQ_LEVEL);
return 0; return 0;
} }
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
return xtensa_irq_map(d, irq, hw); return xtensa_irq_map(d, irq, hw);
} }
...@@ -121,9 +122,12 @@ static int xtensa_mx_irq_retrigger(struct irq_data *d) ...@@ -121,9 +122,12 @@ static int xtensa_mx_irq_retrigger(struct irq_data *d)
static int xtensa_mx_irq_set_affinity(struct irq_data *d, static int xtensa_mx_irq_set_affinity(struct irq_data *d,
const struct cpumask *dest, bool force) const struct cpumask *dest, bool force)
{ {
unsigned mask = 1u << cpumask_any_and(dest, cpu_online_mask); int cpu = cpumask_any_and(dest, cpu_online_mask);
unsigned mask = 1u << cpu;
set_er(mask, MIROUT(d->hwirq - HW_IRQ_MX_BASE)); set_er(mask, MIROUT(d->hwirq - HW_IRQ_MX_BASE));
irq_data_update_effective_affinity(d, cpumask_of(cpu));
return 0; return 0;
} }
......
...@@ -568,6 +568,8 @@ extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg); ...@@ -568,6 +568,8 @@ extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
extern int irq_chip_pm_get(struct irq_data *data); extern int irq_chip_pm_get(struct irq_data *data);
extern int irq_chip_pm_put(struct irq_data *data); extern int irq_chip_pm_put(struct irq_data *data);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
extern void handle_fasteoi_ack_irq(struct irq_desc *desc);
extern void handle_fasteoi_mask_irq(struct irq_desc *desc);
extern void irq_chip_enable_parent(struct irq_data *data); extern void irq_chip_enable_parent(struct irq_data *data);
extern void irq_chip_disable_parent(struct irq_data *data); extern void irq_chip_disable_parent(struct irq_data *data);
extern void irq_chip_ack_parent(struct irq_data *data); extern void irq_chip_ack_parent(struct irq_data *data);
...@@ -781,7 +783,10 @@ static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d) ...@@ -781,7 +783,10 @@ static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
static inline static inline
struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d) struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
{ {
return d->common->effective_affinity; if (!cpumask_empty(d->common->effective_affinity))
return d->common->effective_affinity;
return d->common->affinity;
} }
static inline void irq_data_update_effective_affinity(struct irq_data *d, static inline void irq_data_update_effective_affinity(struct irq_data *d,
const struct cpumask *m) const struct cpumask *m)
......
#ifndef _LINUX_IRQ_SIM_H
#define _LINUX_IRQ_SIM_H
/*
* Copyright (C) 2017 Bartosz Golaszewski <brgl@bgdev.pl>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/irq_work.h>
#include <linux/device.h>
/*
* Provides a framework for allocating simulated interrupts which can be
* requested like normal irqs and enqueued from process context.
*/
struct irq_sim_work_ctx {
struct irq_work work;
int irq;
};
struct irq_sim_irq_ctx {
int irqnum;
bool enabled;
};
struct irq_sim {
struct irq_sim_work_ctx work_ctx;
int irq_base;
unsigned int irq_count;
struct irq_sim_irq_ctx *irqs;
};
int irq_sim_init(struct irq_sim *sim, unsigned int num_irqs);
int devm_irq_sim_init(struct device *dev, struct irq_sim *sim,
unsigned int num_irqs);
void irq_sim_fini(struct irq_sim *sim);
void irq_sim_fire(struct irq_sim *sim, unsigned int offset);
int irq_sim_irqnum(struct irq_sim *sim, unsigned int offset);
#endif /* _LINUX_IRQ_SIM_H */
...@@ -27,6 +27,8 @@ struct gic_kvm_info { ...@@ -27,6 +27,8 @@ struct gic_kvm_info {
unsigned int maint_irq; unsigned int maint_irq;
/* Virtual control interface */ /* Virtual control interface */
struct resource vctrl; struct resource vctrl;
/* vlpi support */
bool has_v4;
}; };
const struct gic_kvm_info *gic_get_kvm_info(void); const struct gic_kvm_info *gic_get_kvm_info(void);
......
...@@ -204,6 +204,7 @@ ...@@ -204,6 +204,7 @@
#define GICR_TYPER_PLPIS (1U << 0) #define GICR_TYPER_PLPIS (1U << 0)
#define GICR_TYPER_VLPIS (1U << 1) #define GICR_TYPER_VLPIS (1U << 1)
#define GICR_TYPER_DirectLPIS (1U << 3)
#define GICR_TYPER_LAST (1U << 4) #define GICR_TYPER_LAST (1U << 4)
#define GIC_V3_REDIST_SIZE 0x20000 #define GIC_V3_REDIST_SIZE 0x20000
...@@ -211,6 +212,69 @@ ...@@ -211,6 +212,69 @@
#define LPI_PROP_GROUP1 (1 << 1) #define LPI_PROP_GROUP1 (1 << 1)
#define LPI_PROP_ENABLED (1 << 0) #define LPI_PROP_ENABLED (1 << 0)
/*
* Re-Distributor registers, offsets from VLPI_base
*/
#define GICR_VPROPBASER 0x0070
#define GICR_VPROPBASER_IDBITS_MASK 0x1f
#define GICR_VPROPBASER_SHAREABILITY_SHIFT (10)
#define GICR_VPROPBASER_INNER_CACHEABILITY_SHIFT (7)
#define GICR_VPROPBASER_OUTER_CACHEABILITY_SHIFT (56)
#define GICR_VPROPBASER_SHAREABILITY_MASK \
GIC_BASER_SHAREABILITY(GICR_VPROPBASER, SHAREABILITY_MASK)
#define GICR_VPROPBASER_INNER_CACHEABILITY_MASK \
GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, MASK)
#define GICR_VPROPBASER_OUTER_CACHEABILITY_MASK \
GIC_BASER_CACHEABILITY(GICR_VPROPBASER, OUTER, MASK)
#define GICR_VPROPBASER_CACHEABILITY_MASK \
GICR_VPROPBASER_INNER_CACHEABILITY_MASK
#define GICR_VPROPBASER_InnerShareable \
GIC_BASER_SHAREABILITY(GICR_VPROPBASER, InnerShareable)
#define GICR_VPROPBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nCnB)
#define GICR_VPROPBASER_nC GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nC)
#define GICR_VPROPBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWt)
#define GICR_VPROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWt)
#define GICR_VPROPBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWt)
#define GICR_VPROPBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWb)
#define GICR_VPROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWt)
#define GICR_VPROPBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWb)
#define GICR_VPENDBASER 0x0078
#define GICR_VPENDBASER_SHAREABILITY_SHIFT (10)
#define GICR_VPENDBASER_INNER_CACHEABILITY_SHIFT (7)
#define GICR_VPENDBASER_OUTER_CACHEABILITY_SHIFT (56)
#define GICR_VPENDBASER_SHAREABILITY_MASK \
GIC_BASER_SHAREABILITY(GICR_VPENDBASER, SHAREABILITY_MASK)
#define GICR_VPENDBASER_INNER_CACHEABILITY_MASK \
GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, MASK)
#define GICR_VPENDBASER_OUTER_CACHEABILITY_MASK \
GIC_BASER_CACHEABILITY(GICR_VPENDBASER, OUTER, MASK)
#define GICR_VPENDBASER_CACHEABILITY_MASK \
GICR_VPENDBASER_INNER_CACHEABILITY_MASK
#define GICR_VPENDBASER_NonShareable \
GIC_BASER_SHAREABILITY(GICR_VPENDBASER, NonShareable)
#define GICR_VPENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nCnB)
#define GICR_VPENDBASER_nC GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nC)
#define GICR_VPENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt)
#define GICR_VPENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt)
#define GICR_VPENDBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWt)
#define GICR_VPENDBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWb)
#define GICR_VPENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWt)
#define GICR_VPENDBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWb)
#define GICR_VPENDBASER_Dirty (1ULL << 60)
#define GICR_VPENDBASER_PendingLast (1ULL << 61)
#define GICR_VPENDBASER_IDAI (1ULL << 62)
#define GICR_VPENDBASER_Valid (1ULL << 63)
/* /*
* ITS registers, offsets from ITS_base * ITS registers, offsets from ITS_base
*/ */
...@@ -234,15 +298,21 @@ ...@@ -234,15 +298,21 @@
#define GITS_TRANSLATER 0x10040 #define GITS_TRANSLATER 0x10040
#define GITS_CTLR_ENABLE (1U << 0) #define GITS_CTLR_ENABLE (1U << 0)
#define GITS_CTLR_ImDe (1U << 1)
#define GITS_CTLR_ITS_NUMBER_SHIFT 4
#define GITS_CTLR_ITS_NUMBER (0xFU << GITS_CTLR_ITS_NUMBER_SHIFT)
#define GITS_CTLR_QUIESCENT (1U << 31) #define GITS_CTLR_QUIESCENT (1U << 31)
#define GITS_TYPER_PLPIS (1UL << 0) #define GITS_TYPER_PLPIS (1UL << 0)
#define GITS_TYPER_VLPIS (1UL << 1)
#define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4 #define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4
#define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
#define GITS_TYPER_IDBITS_SHIFT 8 #define GITS_TYPER_IDBITS_SHIFT 8
#define GITS_TYPER_DEVBITS_SHIFT 13 #define GITS_TYPER_DEVBITS_SHIFT 13
#define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1) #define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1)
#define GITS_TYPER_PTA (1UL << 19) #define GITS_TYPER_PTA (1UL << 19)
#define GITS_TYPER_HWCOLLCNT_SHIFT 24 #define GITS_TYPER_HWCOLLCNT_SHIFT 24
#define GITS_TYPER_VMOVP (1ULL << 37)
#define GITS_IIDR_REV_SHIFT 12 #define GITS_IIDR_REV_SHIFT 12
#define GITS_IIDR_REV_MASK (0xf << GITS_IIDR_REV_SHIFT) #define GITS_IIDR_REV_MASK (0xf << GITS_IIDR_REV_SHIFT)
...@@ -341,6 +411,18 @@ ...@@ -341,6 +411,18 @@
#define GITS_CMD_CLEAR 0x04 #define GITS_CMD_CLEAR 0x04
#define GITS_CMD_SYNC 0x05 #define GITS_CMD_SYNC 0x05
/*
* GICv4 ITS specific commands
*/
#define GITS_CMD_GICv4(x) ((x) | 0x20)
#define GITS_CMD_VINVALL GITS_CMD_GICv4(GITS_CMD_INVALL)
#define GITS_CMD_VMAPP GITS_CMD_GICv4(GITS_CMD_MAPC)
#define GITS_CMD_VMAPTI GITS_CMD_GICv4(GITS_CMD_MAPTI)
#define GITS_CMD_VMOVI GITS_CMD_GICv4(GITS_CMD_MOVI)
#define GITS_CMD_VSYNC GITS_CMD_GICv4(GITS_CMD_SYNC)
/* VMOVP is the odd one, as it doesn't have a physical counterpart */
#define GITS_CMD_VMOVP GITS_CMD_GICv4(2)
/* /*
* ITS error numbers * ITS error numbers
*/ */
...@@ -487,6 +569,8 @@ struct rdists { ...@@ -487,6 +569,8 @@ struct rdists {
struct page *prop_page; struct page *prop_page;
int id_bits; int id_bits;
u64 flags; u64 flags;
bool has_vlpis;
bool has_direct_lpi;
}; };
struct irq_domain; struct irq_domain;
......
/*
* Copyright (C) 2016,2017 ARM Limited, All Rights Reserved.
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __LINUX_IRQCHIP_ARM_GIC_V4_H
#define __LINUX_IRQCHIP_ARM_GIC_V4_H
struct its_vpe;
/* Embedded in kvm.arch */
struct its_vm {
struct fwnode_handle *fwnode;
struct irq_domain *domain;
struct page *vprop_page;
struct its_vpe **vpes;
int nr_vpes;
irq_hw_number_t db_lpi_base;
unsigned long *db_bitmap;
int nr_db_lpis;
};
/* Embedded in kvm_vcpu.arch */
struct its_vpe {
struct page *vpt_page;
struct its_vm *its_vm;
/* Doorbell interrupt */
int irq;
irq_hw_number_t vpe_db_lpi;
/* VPE proxy mapping */
int vpe_proxy_event;
/*
* This collection ID is used to indirect the target
* redistributor for this VPE. The ID itself isn't involved in
* programming of the ITS.
*/
u16 col_idx;
/* Unique (system-wide) VPE identifier */
u16 vpe_id;
/* Implementation Defined Area Invalid */
bool idai;
/* Pending VLPIs on schedule out? */
bool pending_last;
};
/*
* struct its_vlpi_map: structure describing the mapping of a
* VLPI. Only to be interpreted in the context of a physical interrupt
* it complements. To be used as the vcpu_info passed to
* irq_set_vcpu_affinity().
*
* @vm: Pointer to the GICv4 notion of a VM
* @vpe: Pointer to the GICv4 notion of a virtual CPU (VPE)
* @vintid: Virtual LPI number
* @db_enabled: Is the VPE doorbell to be generated?
*/
struct its_vlpi_map {
struct its_vm *vm;
struct its_vpe *vpe;
u32 vintid;
bool db_enabled;
};
enum its_vcpu_info_cmd_type {
MAP_VLPI,
GET_VLPI,
PROP_UPDATE_VLPI,
PROP_UPDATE_AND_INV_VLPI,
SCHEDULE_VPE,
DESCHEDULE_VPE,
INVALL_VPE,
};
struct its_cmd_info {
enum its_vcpu_info_cmd_type cmd_type;
union {
struct its_vlpi_map *map;
u8 config;
};
};
int its_alloc_vcpu_irqs(struct its_vm *vm);
void its_free_vcpu_irqs(struct its_vm *vm);
int its_schedule_vpe(struct its_vpe *vpe, bool on);
int its_invall_vpe(struct its_vpe *vpe);
int its_map_vlpi(int irq, struct its_vlpi_map *map);
int its_get_vlpi(int irq, struct its_vlpi_map *map);
int its_unmap_vlpi(int irq);
int its_prop_update_vlpi(int irq, u8 config, bool inv);
int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops);
#endif
...@@ -460,6 +460,9 @@ extern void irq_domain_free_irqs_common(struct irq_domain *domain, ...@@ -460,6 +460,9 @@ extern void irq_domain_free_irqs_common(struct irq_domain *domain,
extern void irq_domain_free_irqs_top(struct irq_domain *domain, extern void irq_domain_free_irqs_top(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs); unsigned int virq, unsigned int nr_irqs);
extern int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg);
extern int irq_domain_pop_irq(struct irq_domain *domain, int virq);
extern int irq_domain_alloc_irqs_parent(struct irq_domain *domain, extern int irq_domain_alloc_irqs_parent(struct irq_domain *domain,
unsigned int irq_base, unsigned int irq_base,
unsigned int nr_irqs, void *arg); unsigned int nr_irqs, void *arg);
......
...@@ -63,11 +63,20 @@ config GENERIC_IRQ_CHIP ...@@ -63,11 +63,20 @@ config GENERIC_IRQ_CHIP
config IRQ_DOMAIN config IRQ_DOMAIN
bool bool
# Support for simulated interrupts
config IRQ_SIM
bool
select IRQ_WORK
# Support for hierarchical irq domains # Support for hierarchical irq domains
config IRQ_DOMAIN_HIERARCHY config IRQ_DOMAIN_HIERARCHY
bool bool
select IRQ_DOMAIN select IRQ_DOMAIN
# Support for hierarchical fasteoi+edge and fasteoi+level handlers
config IRQ_FASTEOI_HIERARCHY_HANDLERS
bool
# Generic IRQ IPI support # Generic IRQ IPI support
config GENERIC_IRQ_IPI config GENERIC_IRQ_IPI
bool bool
......
...@@ -4,6 +4,7 @@ obj-$(CONFIG_IRQ_TIMINGS) += timings.o ...@@ -4,6 +4,7 @@ obj-$(CONFIG_IRQ_TIMINGS) += timings.o
obj-$(CONFIG_GENERIC_IRQ_CHIP) += generic-chip.o obj-$(CONFIG_GENERIC_IRQ_CHIP) += generic-chip.o
obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
obj-$(CONFIG_IRQ_DOMAIN) += irqdomain.o obj-$(CONFIG_IRQ_DOMAIN) += irqdomain.o
obj-$(CONFIG_IRQ_SIM) += irq_sim.o
obj-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
obj-$(CONFIG_GENERIC_IRQ_MIGRATION) += cpuhotplug.o obj-$(CONFIG_GENERIC_IRQ_MIGRATION) += cpuhotplug.o
......
...@@ -1098,6 +1098,112 @@ void irq_cpu_offline(void) ...@@ -1098,6 +1098,112 @@ void irq_cpu_offline(void)
} }
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
#ifdef CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS
/**
* handle_fasteoi_ack_irq - irq handler for edge hierarchy
* stacked on transparent controllers
*
* @desc: the interrupt description structure for this irq
*
* Like handle_fasteoi_irq(), but for use with hierarchy where
* the irq_chip also needs to have its ->irq_ack() function
* called.
*/
void handle_fasteoi_ack_irq(struct irq_desc *desc)
{
struct irq_chip *chip = desc->irq_data.chip;
raw_spin_lock(&desc->lock);
if (!irq_may_run(desc))
goto out;
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
/*
* If its disabled or no action available
* then mask it and get out of here:
*/
if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
desc->istate |= IRQS_PENDING;
mask_irq(desc);
goto out;
}
kstat_incr_irqs_this_cpu(desc);
if (desc->istate & IRQS_ONESHOT)
mask_irq(desc);
/* Start handling the irq */
desc->irq_data.chip->irq_ack(&desc->irq_data);
preflow_handler(desc);
handle_irq_event(desc);
cond_unmask_eoi_irq(desc, chip);
raw_spin_unlock(&desc->lock);
return;
out:
if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
chip->irq_eoi(&desc->irq_data);
raw_spin_unlock(&desc->lock);
}
EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq);
/**
* handle_fasteoi_mask_irq - irq handler for level hierarchy
* stacked on transparent controllers
*
* @desc: the interrupt description structure for this irq
*
* Like handle_fasteoi_irq(), but for use with hierarchy where
* the irq_chip also needs to have its ->irq_mask_ack() function
* called.
*/
void handle_fasteoi_mask_irq(struct irq_desc *desc)
{
struct irq_chip *chip = desc->irq_data.chip;
raw_spin_lock(&desc->lock);
mask_ack_irq(desc);
if (!irq_may_run(desc))
goto out;
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
/*
* If its disabled or no action available
* then mask it and get out of here:
*/
if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
desc->istate |= IRQS_PENDING;
mask_irq(desc);
goto out;
}
kstat_incr_irqs_this_cpu(desc);
if (desc->istate & IRQS_ONESHOT)
mask_irq(desc);
preflow_handler(desc);
handle_irq_event(desc);
cond_unmask_eoi_irq(desc, chip);
raw_spin_unlock(&desc->lock);
return;
out:
if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
chip->irq_eoi(&desc->irq_data);
raw_spin_unlock(&desc->lock);
}
EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq);
#endif /* CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS */
/** /**
* irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if
* NULL) * NULL)
...@@ -1111,6 +1217,7 @@ void irq_chip_enable_parent(struct irq_data *data) ...@@ -1111,6 +1217,7 @@ void irq_chip_enable_parent(struct irq_data *data)
else else
data->chip->irq_unmask(data); data->chip->irq_unmask(data);
} }
EXPORT_SYMBOL_GPL(irq_chip_enable_parent);
/** /**
* irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if
...@@ -1125,6 +1232,7 @@ void irq_chip_disable_parent(struct irq_data *data) ...@@ -1125,6 +1232,7 @@ void irq_chip_disable_parent(struct irq_data *data)
else else
data->chip->irq_mask(data); data->chip->irq_mask(data);
} }
EXPORT_SYMBOL_GPL(irq_chip_disable_parent);
/** /**
* irq_chip_ack_parent - Acknowledge the parent interrupt * irq_chip_ack_parent - Acknowledge the parent interrupt
...@@ -1187,6 +1295,7 @@ int irq_chip_set_affinity_parent(struct irq_data *data, ...@@ -1187,6 +1295,7 @@ int irq_chip_set_affinity_parent(struct irq_data *data,
return -ENOSYS; return -ENOSYS;
} }
EXPORT_SYMBOL_GPL(irq_chip_set_affinity_parent);
/** /**
* irq_chip_set_type_parent - Set IRQ type on the parent interrupt * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
*/ */
#include <linux/irqdomain.h> #include <linux/irqdomain.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/uaccess.h>
#include "internals.h" #include "internals.h"
...@@ -171,8 +172,55 @@ static int irq_debug_open(struct inode *inode, struct file *file) ...@@ -171,8 +172,55 @@ static int irq_debug_open(struct inode *inode, struct file *file)
return single_open(file, irq_debug_show, inode->i_private); return single_open(file, irq_debug_show, inode->i_private);
} }
static ssize_t irq_debug_write(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct irq_desc *desc = file_inode(file)->i_private;
char buf[8] = { 0, };
size_t size;
size = min(sizeof(buf) - 1, count);
if (copy_from_user(buf, user_buf, size))
return -EFAULT;
if (!strncmp(buf, "trigger", size)) {
unsigned long flags;
int err;
/* Try the HW interface first */
err = irq_set_irqchip_state(irq_desc_get_irq(desc),
IRQCHIP_STATE_PENDING, true);
if (!err)
return count;
/*
* Otherwise, try to inject via the resend interface,
* which may or may not succeed.
*/
chip_bus_lock(desc);
raw_spin_lock_irqsave(&desc->lock, flags);
if (irq_settings_is_level(desc)) {
/* Can't do level, sorry */
err = -EINVAL;
} else {
desc->istate |= IRQS_PENDING;
check_irq_resend(desc);
err = 0;
}
raw_spin_unlock_irqrestore(&desc->lock, flags);
chip_bus_sync_unlock(desc);
return err ? err : count;
}
return count;
}
static const struct file_operations dfs_irq_ops = { static const struct file_operations dfs_irq_ops = {
.open = irq_debug_open, .open = irq_debug_open,
.write = irq_debug_write,
.read = seq_read, .read = seq_read,
.llseek = seq_lseek, .llseek = seq_lseek,
.release = single_release, .release = single_release,
...@@ -186,7 +234,7 @@ void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *desc) ...@@ -186,7 +234,7 @@ void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *desc)
return; return;
sprintf(name, "%d", irq); sprintf(name, "%d", irq);
desc->debugfs_file = debugfs_create_file(name, 0444, irq_dir, desc, desc->debugfs_file = debugfs_create_file(name, 0644, irq_dir, desc,
&dfs_irq_ops); &dfs_irq_ops);
} }
......
...@@ -151,7 +151,7 @@ static inline void chip_bus_sync_unlock(struct irq_desc *desc) ...@@ -151,7 +151,7 @@ static inline void chip_bus_sync_unlock(struct irq_desc *desc)
#define IRQ_GET_DESC_CHECK_PERCPU (_IRQ_DESC_CHECK | _IRQ_DESC_PERCPU) #define IRQ_GET_DESC_CHECK_PERCPU (_IRQ_DESC_CHECK | _IRQ_DESC_PERCPU)
#define for_each_action_of_desc(desc, act) \ #define for_each_action_of_desc(desc, act) \
for (act = desc->act; act; act = act->next) for (act = desc->action; act; act = act->next)
struct irq_desc * struct irq_desc *
__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
......
/*
* Copyright (C) 2017 Bartosz Golaszewski <brgl@bgdev.pl>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/irq_sim.h>
#include <linux/irq.h>
struct irq_sim_devres {
struct irq_sim *sim;
};
static void irq_sim_irqmask(struct irq_data *data)
{
struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data);
irq_ctx->enabled = false;
}
static void irq_sim_irqunmask(struct irq_data *data)
{
struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data);
irq_ctx->enabled = true;
}
static struct irq_chip irq_sim_irqchip = {
.name = "irq_sim",
.irq_mask = irq_sim_irqmask,
.irq_unmask = irq_sim_irqunmask,
};
static void irq_sim_handle_irq(struct irq_work *work)
{
struct irq_sim_work_ctx *work_ctx;
work_ctx = container_of(work, struct irq_sim_work_ctx, work);
handle_simple_irq(irq_to_desc(work_ctx->irq));
}
/**
* irq_sim_init - Initialize the interrupt simulator: allocate a range of
* dummy interrupts.
*
* @sim: The interrupt simulator object to initialize.
* @num_irqs: Number of interrupts to allocate
*
* Returns 0 on success and a negative error number on failure.
*/
int irq_sim_init(struct irq_sim *sim, unsigned int num_irqs)
{
int i;
sim->irqs = kmalloc_array(num_irqs, sizeof(*sim->irqs), GFP_KERNEL);
if (!sim->irqs)
return -ENOMEM;
sim->irq_base = irq_alloc_descs(-1, 0, num_irqs, 0);
if (sim->irq_base < 0) {
kfree(sim->irqs);
return sim->irq_base;
}
for (i = 0; i < num_irqs; i++) {
sim->irqs[i].irqnum = sim->irq_base + i;
sim->irqs[i].enabled = false;
irq_set_chip(sim->irq_base + i, &irq_sim_irqchip);
irq_set_chip_data(sim->irq_base + i, &sim->irqs[i]);
irq_set_handler(sim->irq_base + i, &handle_simple_irq);
irq_modify_status(sim->irq_base + i,
IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE);
}
init_irq_work(&sim->work_ctx.work, irq_sim_handle_irq);
sim->irq_count = num_irqs;
return 0;
}
EXPORT_SYMBOL_GPL(irq_sim_init);
/**
* irq_sim_fini - Deinitialize the interrupt simulator: free the interrupt
* descriptors and allocated memory.
*
* @sim: The interrupt simulator to tear down.
*/
void irq_sim_fini(struct irq_sim *sim)
{
irq_work_sync(&sim->work_ctx.work);
irq_free_descs(sim->irq_base, sim->irq_count);
kfree(sim->irqs);
}
EXPORT_SYMBOL_GPL(irq_sim_fini);
static void devm_irq_sim_release(struct device *dev, void *res)
{
struct irq_sim_devres *this = res;
irq_sim_fini(this->sim);
}
/**
* irq_sim_init - Initialize the interrupt simulator for a managed device.
*
* @dev: Device to initialize the simulator object for.
* @sim: The interrupt simulator object to initialize.
* @num_irqs: Number of interrupts to allocate
*
* Returns 0 on success and a negative error number on failure.
*/
int devm_irq_sim_init(struct device *dev, struct irq_sim *sim,
unsigned int num_irqs)
{
struct irq_sim_devres *dr;
int rv;
dr = devres_alloc(devm_irq_sim_release, sizeof(*dr), GFP_KERNEL);
if (!dr)
return -ENOMEM;
rv = irq_sim_init(sim, num_irqs);
if (rv) {
devres_free(dr);
return rv;
}
dr->sim = sim;
devres_add(dev, dr);
return 0;
}
EXPORT_SYMBOL_GPL(devm_irq_sim_init);
/**
* irq_sim_fire - Enqueue an interrupt.
*
* @sim: The interrupt simulator object.
* @offset: Offset of the simulated interrupt which should be fired.
*/
void irq_sim_fire(struct irq_sim *sim, unsigned int offset)
{
if (sim->irqs[offset].enabled) {
sim->work_ctx.irq = irq_sim_irqnum(sim, offset);
irq_work_queue(&sim->work_ctx.work);
}
}
EXPORT_SYMBOL_GPL(irq_sim_fire);
/**
* irq_sim_irqnum - Get the allocated number of a dummy interrupt.
*
* @sim: The interrupt simulator object.
* @offset: Offset of the simulated interrupt for which to retrieve
* the number.
*/
int irq_sim_irqnum(struct irq_sim *sim, unsigned int offset)
{
return sim->irqs[offset].irqnum;
}
EXPORT_SYMBOL_GPL(irq_sim_irqnum);
...@@ -455,6 +455,31 @@ void irq_set_default_host(struct irq_domain *domain) ...@@ -455,6 +455,31 @@ void irq_set_default_host(struct irq_domain *domain)
} }
EXPORT_SYMBOL_GPL(irq_set_default_host); EXPORT_SYMBOL_GPL(irq_set_default_host);
static void irq_domain_clear_mapping(struct irq_domain *domain,
irq_hw_number_t hwirq)
{
if (hwirq < domain->revmap_size) {
domain->linear_revmap[hwirq] = 0;
} else {
mutex_lock(&revmap_trees_mutex);
radix_tree_delete(&domain->revmap_tree, hwirq);
mutex_unlock(&revmap_trees_mutex);
}
}
static void irq_domain_set_mapping(struct irq_domain *domain,
irq_hw_number_t hwirq,
struct irq_data *irq_data)
{
if (hwirq < domain->revmap_size) {
domain->linear_revmap[hwirq] = irq_data->irq;
} else {
mutex_lock(&revmap_trees_mutex);
radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
mutex_unlock(&revmap_trees_mutex);
}
}
void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq) void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
{ {
struct irq_data *irq_data = irq_get_irq_data(irq); struct irq_data *irq_data = irq_get_irq_data(irq);
...@@ -483,13 +508,7 @@ void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq) ...@@ -483,13 +508,7 @@ void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
domain->mapcount--; domain->mapcount--;
/* Clear reverse map for this hwirq */ /* Clear reverse map for this hwirq */
if (hwirq < domain->revmap_size) { irq_domain_clear_mapping(domain, hwirq);
domain->linear_revmap[hwirq] = 0;
} else {
mutex_lock(&revmap_trees_mutex);
radix_tree_delete(&domain->revmap_tree, hwirq);
mutex_unlock(&revmap_trees_mutex);
}
} }
int irq_domain_associate(struct irq_domain *domain, unsigned int virq, int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
...@@ -533,13 +552,7 @@ int irq_domain_associate(struct irq_domain *domain, unsigned int virq, ...@@ -533,13 +552,7 @@ int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
} }
domain->mapcount++; domain->mapcount++;
if (hwirq < domain->revmap_size) { irq_domain_set_mapping(domain, hwirq, irq_data);
domain->linear_revmap[hwirq] = virq;
} else {
mutex_lock(&revmap_trees_mutex);
radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
mutex_unlock(&revmap_trees_mutex);
}
mutex_unlock(&irq_domain_mutex); mutex_unlock(&irq_domain_mutex);
irq_clear_status_flags(virq, IRQ_NOREQUEST); irq_clear_status_flags(virq, IRQ_NOREQUEST);
...@@ -1138,16 +1151,9 @@ static void irq_domain_insert_irq(int virq) ...@@ -1138,16 +1151,9 @@ static void irq_domain_insert_irq(int virq)
for (data = irq_get_irq_data(virq); data; data = data->parent_data) { for (data = irq_get_irq_data(virq); data; data = data->parent_data) {
struct irq_domain *domain = data->domain; struct irq_domain *domain = data->domain;
irq_hw_number_t hwirq = data->hwirq;
domain->mapcount++; domain->mapcount++;
if (hwirq < domain->revmap_size) { irq_domain_set_mapping(domain, data->hwirq, data);
domain->linear_revmap[hwirq] = virq;
} else {
mutex_lock(&revmap_trees_mutex);
radix_tree_insert(&domain->revmap_tree, hwirq, data);
mutex_unlock(&revmap_trees_mutex);
}
/* If not already assigned, give the domain the chip's name */ /* If not already assigned, give the domain the chip's name */
if (!domain->name && data->chip) if (!domain->name && data->chip)
...@@ -1171,13 +1177,7 @@ static void irq_domain_remove_irq(int virq) ...@@ -1171,13 +1177,7 @@ static void irq_domain_remove_irq(int virq)
irq_hw_number_t hwirq = data->hwirq; irq_hw_number_t hwirq = data->hwirq;
domain->mapcount--; domain->mapcount--;
if (hwirq < domain->revmap_size) { irq_domain_clear_mapping(domain, hwirq);
domain->linear_revmap[hwirq] = 0;
} else {
mutex_lock(&revmap_trees_mutex);
radix_tree_delete(&domain->revmap_tree, hwirq);
mutex_unlock(&revmap_trees_mutex);
}
} }
} }
...@@ -1362,7 +1362,8 @@ static void irq_domain_free_irqs_hierarchy(struct irq_domain *domain, ...@@ -1362,7 +1362,8 @@ static void irq_domain_free_irqs_hierarchy(struct irq_domain *domain,
unsigned int irq_base, unsigned int irq_base,
unsigned int nr_irqs) unsigned int nr_irqs)
{ {
domain->ops->free(domain, irq_base, nr_irqs); if (domain->ops->free)
domain->ops->free(domain, irq_base, nr_irqs);
} }
int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain, int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
...@@ -1448,6 +1449,175 @@ int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, ...@@ -1448,6 +1449,175 @@ int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
return ret; return ret;
} }
/* The irq_data was moved, fix the revmap to refer to the new location */
static void irq_domain_fix_revmap(struct irq_data *d)
{
void **slot;
if (d->hwirq < d->domain->revmap_size)
return; /* Not using radix tree. */
/* Fix up the revmap. */
mutex_lock(&revmap_trees_mutex);
slot = radix_tree_lookup_slot(&d->domain->revmap_tree, d->hwirq);
if (slot)
radix_tree_replace_slot(&d->domain->revmap_tree, slot, d);
mutex_unlock(&revmap_trees_mutex);
}
/**
* irq_domain_push_irq() - Push a domain in to the top of a hierarchy.
* @domain: Domain to push.
* @virq: Irq to push the domain in to.
* @arg: Passed to the irq_domain_ops alloc() function.
*
* For an already existing irqdomain hierarchy, as might be obtained
* via a call to pci_enable_msix(), add an additional domain to the
* head of the processing chain. Must be called before request_irq()
* has been called.
*/
int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg)
{
struct irq_data *child_irq_data;
struct irq_data *root_irq_data = irq_get_irq_data(virq);
struct irq_desc *desc;
int rv = 0;
/*
* Check that no action has been set, which indicates the virq
* is in a state where this function doesn't have to deal with
* races between interrupt handling and maintaining the
* hierarchy. This will catch gross misuse. Attempting to
* make the check race free would require holding locks across
* calls to struct irq_domain_ops->alloc(), which could lead
* to deadlock, so we just do a simple check before starting.
*/
desc = irq_to_desc(virq);
if (!desc)
return -EINVAL;
if (WARN_ON(desc->action))
return -EBUSY;
if (domain == NULL)
return -EINVAL;
if (WARN_ON(!irq_domain_is_hierarchy(domain)))
return -EINVAL;
if (!root_irq_data)
return -EINVAL;
if (domain->parent != root_irq_data->domain)
return -EINVAL;
child_irq_data = kzalloc_node(sizeof(*child_irq_data), GFP_KERNEL,
irq_data_get_node(root_irq_data));
if (!child_irq_data)
return -ENOMEM;
mutex_lock(&irq_domain_mutex);
/* Copy the original irq_data. */
*child_irq_data = *root_irq_data;
/*
* Overwrite the root_irq_data, which is embedded in struct
* irq_desc, with values for this domain.
*/
root_irq_data->parent_data = child_irq_data;
root_irq_data->domain = domain;
root_irq_data->mask = 0;
root_irq_data->hwirq = 0;
root_irq_data->chip = NULL;
root_irq_data->chip_data = NULL;
/* May (probably does) set hwirq, chip, etc. */
rv = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg);
if (rv) {
/* Restore the original irq_data. */
*root_irq_data = *child_irq_data;
goto error;
}
irq_domain_fix_revmap(child_irq_data);
irq_domain_set_mapping(domain, root_irq_data->hwirq, root_irq_data);
error:
mutex_unlock(&irq_domain_mutex);
return rv;
}
EXPORT_SYMBOL_GPL(irq_domain_push_irq);
/**
* irq_domain_pop_irq() - Remove a domain from the top of a hierarchy.
* @domain: Domain to remove.
* @virq: Irq to remove the domain from.
*
* Undo the effects of a call to irq_domain_push_irq(). Must be
* called either before request_irq() or after free_irq().
*/
int irq_domain_pop_irq(struct irq_domain *domain, int virq)
{
struct irq_data *root_irq_data = irq_get_irq_data(virq);
struct irq_data *child_irq_data;
struct irq_data *tmp_irq_data;
struct irq_desc *desc;
/*
* Check that no action is set, which indicates the virq is in
* a state where this function doesn't have to deal with races
* between interrupt handling and maintaining the hierarchy.
* This will catch gross misuse. Attempting to make the check
* race free would require holding locks across calls to
* struct irq_domain_ops->free(), which could lead to
* deadlock, so we just do a simple check before starting.
*/
desc = irq_to_desc(virq);
if (!desc)
return -EINVAL;
if (WARN_ON(desc->action))
return -EBUSY;
if (domain == NULL)
return -EINVAL;
if (!root_irq_data)
return -EINVAL;
tmp_irq_data = irq_domain_get_irq_data(domain, virq);
/* We can only "pop" if this domain is at the top of the list */
if (WARN_ON(root_irq_data != tmp_irq_data))
return -EINVAL;
if (WARN_ON(root_irq_data->domain != domain))
return -EINVAL;
child_irq_data = root_irq_data->parent_data;
if (WARN_ON(!child_irq_data))
return -EINVAL;
mutex_lock(&irq_domain_mutex);
root_irq_data->parent_data = NULL;
irq_domain_clear_mapping(domain, root_irq_data->hwirq);
irq_domain_free_irqs_hierarchy(domain, virq, 1);
/* Restore the original irq_data. */
*root_irq_data = *child_irq_data;
irq_domain_fix_revmap(root_irq_data);
mutex_unlock(&irq_domain_mutex);
kfree(child_irq_data);
return 0;
}
EXPORT_SYMBOL_GPL(irq_domain_pop_irq);
/** /**
* irq_domain_free_irqs - Free IRQ number and associated data structures * irq_domain_free_irqs - Free IRQ number and associated data structures
* @virq: base IRQ number * @virq: base IRQ number
......
...@@ -400,8 +400,18 @@ int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info) ...@@ -400,8 +400,18 @@ int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
return -EINVAL; return -EINVAL;
data = irq_desc_get_irq_data(desc); data = irq_desc_get_irq_data(desc);
chip = irq_data_get_irq_chip(data); do {
if (chip && chip->irq_set_vcpu_affinity) chip = irq_data_get_irq_chip(data);
if (chip && chip->irq_set_vcpu_affinity)
break;
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
data = data->parent_data;
#else
data = NULL;
#endif
} while (data);
if (data)
ret = chip->irq_set_vcpu_affinity(data, vcpu_info); ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
irq_put_desc_unlock(desc, flags); irq_put_desc_unlock(desc, flags);
......
...@@ -61,12 +61,12 @@ static int show_irq_affinity(int type, struct seq_file *m) ...@@ -61,12 +61,12 @@ static int show_irq_affinity(int type, struct seq_file *m)
case EFFECTIVE: case EFFECTIVE:
case EFFECTIVE_LIST: case EFFECTIVE_LIST:
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
mask = desc->irq_common_data.effective_affinity; mask = irq_data_get_effective_affinity_mask(&desc->irq_data);
break; break;
#else
return -EINVAL;
#endif #endif
}; default:
return -EINVAL;
}
switch (type) { switch (type) {
case AFFINITY_LIST: case AFFINITY_LIST:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment