Commit 93cc1228 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq updates from Thomas Gleixner:
 "The interrupt subsystem delivers this time:

   - Refactoring of the GIC-V3 driver to prepare for the GIC-V4 support

   - Initial GIC-V4 support

   - Consolidation of the FSL MSI support

   - Utilize the effective affinity interface in various ARM irqchip
     drivers

   - Yet another interrupt chip driver (UniPhier AIDET)

   - Bulk conversion of the irq chip driver to use %pOF

   - The usual small fixes and improvements all over the place"

* 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (77 commits)
  irqchip/ls-scfg-msi: Add MSI affinity support
  irqchip/ls-scfg-msi: Add LS1043a v1.1 MSI support
  irqchip/ls-scfg-msi: Add LS1046a MSI support
  arm64: dts: ls1046a: Add MSI dts node
  arm64: dts: ls1043a: Share all MSIs
  arm: dts: ls1021a: Share all MSIs
  arm64: dts: ls1043a: Fix typo of MSI compatible string
  arm: dts: ls1021a: Fix typo of MSI compatible string
  irqchip/ls-scfg-msi: Fix typo of MSI compatible strings
  irqchip/irq-bcm7120-l2: Use correct I/O accessors for irq_fwd_mask
  irqchip/mmp: Make mmp_intc_conf const
  irqchip/gic: Make irq_chip const
  irqchip/gic-v3: Advertise GICv4 support to KVM
  irqchip/gic-v4: Enable low-level GICv4 operations
  irqchip/gic-v4: Add some basic documentation
  irqchip/gic-v4: Add VLPI configuration interface
  irqchip/gic-v4: Add VPE command interface
  irqchip/gic-v4: Add per-VM VPE domain creation
  irqchip/gic-v3-its: Set implementation defined bit to enable VLPIs
  irqchip/gic-v3-its: Allow doorbell interrupts to be injected/cleared
  ...
parents dd90cccf 9fbd7fd2
...@@ -4,8 +4,10 @@ Required properties: ...@@ -4,8 +4,10 @@ Required properties:
- compatible: should be "fsl,<soc-name>-msi" to identify - compatible: should be "fsl,<soc-name>-msi" to identify
Layerscape PCIe MSI controller block such as: Layerscape PCIe MSI controller block such as:
"fsl,1s1021a-msi" "fsl,ls1021a-msi"
"fsl,1s1043a-msi" "fsl,ls1043a-msi"
"fsl,ls1046a-msi"
"fsl,ls1043a-v1.1-msi"
- msi-controller: indicates that this is a PCIe MSI controller node - msi-controller: indicates that this is a PCIe MSI controller node
- reg: physical base address of the controller and length of memory mapped. - reg: physical base address of the controller and length of memory mapped.
- interrupts: an interrupt to the parent interrupt controller. - interrupts: an interrupt to the parent interrupt controller.
...@@ -23,7 +25,7 @@ MSI controller node ...@@ -23,7 +25,7 @@ MSI controller node
Examples: Examples:
msi1: msi-controller@1571000 { msi1: msi-controller@1571000 {
compatible = "fsl,1s1043a-msi"; compatible = "fsl,ls1043a-msi";
reg = <0x0 0x1571000 0x0 0x8>, reg = <0x0 0x1571000 0x0 0x8>,
msi-controller; msi-controller;
interrupts = <0 116 0x4>; interrupts = <0 116 0x4>;
......
UniPhier AIDET
UniPhier AIDET (ARM Interrupt Detector) is an add-on block for ARM GIC (Generic
Interrupt Controller). GIC itself can handle only high level and rising edge
interrupts. The AIDET provides logic inverter to support low level and falling
edge interrupts.
Required properties:
- compatible: Should be one of the following:
"socionext,uniphier-ld4-aidet" - for LD4 SoC
"socionext,uniphier-pro4-aidet" - for Pro4 SoC
"socionext,uniphier-sld8-aidet" - for sLD8 SoC
"socionext,uniphier-pro5-aidet" - for Pro5 SoC
"socionext,uniphier-pxs2-aidet" - for PXs2/LD6b SoC
"socionext,uniphier-ld11-aidet" - for LD11 SoC
"socionext,uniphier-ld20-aidet" - for LD20 SoC
"socionext,uniphier-pxs3-aidet" - for PXs3 SoC
- reg: Specifies offset and length of the register set for the device.
- interrupt-controller: Identifies the node as an interrupt controller
- #interrupt-cells : Specifies the number of cells needed to encode an interrupt
source. The value should be 2. The first cell defines the interrupt number
(corresponds to the SPI interrupt number of GIC). The second cell specifies
the trigger type as defined in interrupts.txt in this directory.
Example:
aidet: aidet@5fc20000 {
compatible = "socionext,uniphier-pro4-aidet";
reg = <0x5fc20000 0x200>;
interrupt-controller;
#interrupt-cells = <2>;
};
...@@ -312,6 +312,7 @@ IRQ ...@@ -312,6 +312,7 @@ IRQ
devm_irq_alloc_descs_from() devm_irq_alloc_descs_from()
devm_irq_alloc_generic_chip() devm_irq_alloc_generic_chip()
devm_irq_setup_generic_chip() devm_irq_setup_generic_chip()
devm_irq_sim_init()
LED LED
devm_led_classdev_register() devm_led_classdev_register()
......
...@@ -1993,6 +1993,7 @@ F: arch/arm64/boot/dts/socionext/ ...@@ -1993,6 +1993,7 @@ F: arch/arm64/boot/dts/socionext/
F: drivers/bus/uniphier-system-bus.c F: drivers/bus/uniphier-system-bus.c
F: drivers/clk/uniphier/ F: drivers/clk/uniphier/
F: drivers/i2c/busses/i2c-uniphier* F: drivers/i2c/busses/i2c-uniphier*
F: drivers/irqchip/irq-uniphier-aidet.c
F: drivers/pinctrl/uniphier/ F: drivers/pinctrl/uniphier/
F: drivers/reset/reset-uniphier.c F: drivers/reset/reset-uniphier.c
F: drivers/tty/serial/8250/8250_uniphier.c F: drivers/tty/serial/8250/8250_uniphier.c
......
...@@ -129,14 +129,14 @@ gic: interrupt-controller@1400000 { ...@@ -129,14 +129,14 @@ gic: interrupt-controller@1400000 {
}; };
msi1: msi-controller@1570e00 { msi1: msi-controller@1570e00 {
compatible = "fsl,1s1021a-msi"; compatible = "fsl,ls1021a-msi";
reg = <0x0 0x1570e00 0x0 0x8>; reg = <0x0 0x1570e00 0x0 0x8>;
msi-controller; msi-controller;
interrupts = <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>;
}; };
msi2: msi-controller@1570e08 { msi2: msi-controller@1570e08 {
compatible = "fsl,1s1021a-msi"; compatible = "fsl,ls1021a-msi";
reg = <0x0 0x1570e08 0x0 0x8>; reg = <0x0 0x1570e08 0x0 0x8>;
msi-controller; msi-controller;
interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
...@@ -699,7 +699,7 @@ pcie@3400000 { ...@@ -699,7 +699,7 @@ pcie@3400000 {
bus-range = <0x0 0xff>; bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */ ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */
0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ 0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
msi-parent = <&msi1>; msi-parent = <&msi1>, <&msi2>;
#interrupt-cells = <1>; #interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>; interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0000 0 0 1 &gic GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>, interrupt-map = <0000 0 0 1 &gic GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>,
...@@ -722,7 +722,7 @@ pcie@3500000 { ...@@ -722,7 +722,7 @@ pcie@3500000 {
bus-range = <0x0 0xff>; bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */ ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */
0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ 0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
msi-parent = <&msi2>; msi-parent = <&msi1>, <&msi2>;
#interrupt-cells = <1>; #interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>; interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0000 0 0 1 &gic GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>, interrupt-map = <0000 0 0 1 &gic GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>,
......
...@@ -275,6 +275,12 @@ static inline u64 __gic_readq_nonatomic(const volatile void __iomem *addr) ...@@ -275,6 +275,12 @@ static inline u64 __gic_readq_nonatomic(const volatile void __iomem *addr)
#define gicr_read_pendbaser(c) __gic_readq_nonatomic(c) #define gicr_read_pendbaser(c) __gic_readq_nonatomic(c)
#define gicr_write_pendbaser(v, c) __gic_writeq_nonatomic(v, c) #define gicr_write_pendbaser(v, c) __gic_writeq_nonatomic(v, c)
/*
* GICR_xLPIR - only the lower bits are significant
*/
#define gic_read_lpir(c) readl_relaxed(c)
#define gic_write_lpir(v, c) writel_relaxed(lower_32_bits(v), c)
/* /*
* GITS_TYPER is an ID register and doesn't need atomicity. * GITS_TYPER is an ID register and doesn't need atomicity.
*/ */
...@@ -291,5 +297,33 @@ static inline u64 __gic_readq_nonatomic(const volatile void __iomem *addr) ...@@ -291,5 +297,33 @@ static inline u64 __gic_readq_nonatomic(const volatile void __iomem *addr)
*/ */
#define gits_write_cwriter(v, c) __gic_writeq_nonatomic(v, c) #define gits_write_cwriter(v, c) __gic_writeq_nonatomic(v, c)
/*
* GITS_VPROPBASER - hi and lo bits may be accessed independently.
*/
#define gits_write_vpropbaser(v, c) __gic_writeq_nonatomic(v, c)
/*
* GITS_VPENDBASER - the Valid bit must be cleared before changing
* anything else.
*/
static inline void gits_write_vpendbaser(u64 val, void * __iomem addr)
{
u32 tmp;
tmp = readl_relaxed(addr + 4);
if (tmp & (GICR_VPENDBASER_Valid >> 32)) {
tmp &= ~(GICR_VPENDBASER_Valid >> 32);
writel_relaxed(tmp, addr + 4);
}
/*
* Use the fact that __gic_writeq_nonatomic writes the second
* half of the 64bit quantity after the first.
*/
__gic_writeq_nonatomic(val, addr);
}
#define gits_read_vpendbaser(c) __gic_readq_nonatomic(c)
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif /* !__ASM_ARCH_GICV3_H */ #endif /* !__ASM_ARCH_GICV3_H */
...@@ -39,6 +39,7 @@ config ARCH_HIP04 ...@@ -39,6 +39,7 @@ config ARCH_HIP04
select HAVE_ARM_ARCH_TIMER select HAVE_ARM_ARCH_TIMER
select MCPM if SMP select MCPM if SMP
select MCPM_QUAD_CLUSTER if SMP select MCPM_QUAD_CLUSTER if SMP
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
help help
Support for Hisilicon HiP04 SoC family Support for Hisilicon HiP04 SoC family
......
...@@ -653,21 +653,21 @@ sata: sata@3200000 { ...@@ -653,21 +653,21 @@ sata: sata@3200000 {
}; };
msi1: msi-controller1@1571000 { msi1: msi-controller1@1571000 {
compatible = "fsl,1s1043a-msi"; compatible = "fsl,ls1043a-msi";
reg = <0x0 0x1571000 0x0 0x8>; reg = <0x0 0x1571000 0x0 0x8>;
msi-controller; msi-controller;
interrupts = <0 116 0x4>; interrupts = <0 116 0x4>;
}; };
msi2: msi-controller2@1572000 { msi2: msi-controller2@1572000 {
compatible = "fsl,1s1043a-msi"; compatible = "fsl,ls1043a-msi";
reg = <0x0 0x1572000 0x0 0x8>; reg = <0x0 0x1572000 0x0 0x8>;
msi-controller; msi-controller;
interrupts = <0 126 0x4>; interrupts = <0 126 0x4>;
}; };
msi3: msi-controller3@1573000 { msi3: msi-controller3@1573000 {
compatible = "fsl,1s1043a-msi"; compatible = "fsl,ls1043a-msi";
reg = <0x0 0x1573000 0x0 0x8>; reg = <0x0 0x1573000 0x0 0x8>;
msi-controller; msi-controller;
interrupts = <0 160 0x4>; interrupts = <0 160 0x4>;
...@@ -689,7 +689,7 @@ pcie@3400000 { ...@@ -689,7 +689,7 @@ pcie@3400000 {
bus-range = <0x0 0xff>; bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */ ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */
0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ 0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
msi-parent = <&msi1>; msi-parent = <&msi1>, <&msi2>, <&msi3>;
#interrupt-cells = <1>; #interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>; interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0000 0 0 1 &gic 0 110 0x4>, interrupt-map = <0000 0 0 1 &gic 0 110 0x4>,
...@@ -714,7 +714,7 @@ pcie@3500000 { ...@@ -714,7 +714,7 @@ pcie@3500000 {
bus-range = <0x0 0xff>; bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */ ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */
0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ 0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
msi-parent = <&msi2>; msi-parent = <&msi1>, <&msi2>, <&msi3>;
#interrupt-cells = <1>; #interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>; interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0000 0 0 1 &gic 0 120 0x4>, interrupt-map = <0000 0 0 1 &gic 0 120 0x4>,
...@@ -739,7 +739,7 @@ pcie@3600000 { ...@@ -739,7 +739,7 @@ pcie@3600000 {
bus-range = <0x0 0xff>; bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x50 0x00010000 0x0 0x00010000 /* downstream I/O */ ranges = <0x81000000 0x0 0x00000000 0x50 0x00010000 0x0 0x00010000 /* downstream I/O */
0x82000000 0x0 0x40000000 0x50 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ 0x82000000 0x0 0x40000000 0x50 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
msi-parent = <&msi3>; msi-parent = <&msi1>, <&msi2>, <&msi3>;
#interrupt-cells = <1>; #interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>; interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0000 0 0 1 &gic 0 154 0x4>, interrupt-map = <0000 0 0 1 &gic 0 154 0x4>,
......
...@@ -630,6 +630,37 @@ sata: sata@3200000 { ...@@ -630,6 +630,37 @@ sata: sata@3200000 {
interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen 4 1>; clocks = <&clockgen 4 1>;
}; };
msi1: msi-controller@1580000 {
compatible = "fsl,ls1046a-msi";
msi-controller;
reg = <0x0 0x1580000 0x0 0x10000>;
interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
};
msi2: msi-controller@1590000 {
compatible = "fsl,ls1046a-msi";
msi-controller;
reg = <0x0 0x1590000 0x0 0x10000>;
interrupts = <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
};
msi3: msi-controller@15a0000 {
compatible = "fsl,ls1046a-msi";
msi-controller;
reg = <0x0 0x15a0000 0x0 0x10000>;
interrupts = <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
};
}; };
reserved-memory { reserved-memory {
......
...@@ -116,6 +116,8 @@ static inline void gic_write_bpr1(u32 val) ...@@ -116,6 +116,8 @@ static inline void gic_write_bpr1(u32 val)
#define gic_read_typer(c) readq_relaxed(c) #define gic_read_typer(c) readq_relaxed(c)
#define gic_write_irouter(v, c) writeq_relaxed(v, c) #define gic_write_irouter(v, c) writeq_relaxed(v, c)
#define gic_read_lpir(c) readq_relaxed(c)
#define gic_write_lpir(v, c) writeq_relaxed(v, c)
#define gic_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) #define gic_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
...@@ -133,5 +135,10 @@ static inline void gic_write_bpr1(u32 val) ...@@ -133,5 +135,10 @@ static inline void gic_write_bpr1(u32 val)
#define gicr_write_pendbaser(v, c) writeq_relaxed(v, c) #define gicr_write_pendbaser(v, c) writeq_relaxed(v, c)
#define gicr_read_pendbaser(c) readq_relaxed(c) #define gicr_read_pendbaser(c) readq_relaxed(c)
#define gits_write_vpropbaser(v, c) writeq_relaxed(v, c)
#define gits_write_vpendbaser(v, c) writeq_relaxed(v, c)
#define gits_read_vpendbaser(c) readq_relaxed(c)
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __ASM_ARCH_GICV3_H */ #endif /* __ASM_ARCH_GICV3_H */
...@@ -26,6 +26,7 @@ config METAG ...@@ -26,6 +26,7 @@ config METAG
select HAVE_SYSCALL_TRACEPOINTS select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UNDERSCORE_SYMBOL_PREFIX select HAVE_UNDERSCORE_SYMBOL_PREFIX
select IRQ_DOMAIN select IRQ_DOMAIN
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
select MODULES_USE_ELF_RELA select MODULES_USE_ELF_RELA
select OF select OF
select OF_EARLY_FLATTREE select OF_EARLY_FLATTREE
......
...@@ -7,6 +7,7 @@ config ARM_GIC ...@@ -7,6 +7,7 @@ config ARM_GIC
select IRQ_DOMAIN select IRQ_DOMAIN
select IRQ_DOMAIN_HIERARCHY select IRQ_DOMAIN_HIERARCHY
select MULTI_IRQ_HANDLER select MULTI_IRQ_HANDLER
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
config ARM_GIC_PM config ARM_GIC_PM
bool bool
...@@ -34,6 +35,7 @@ config ARM_GIC_V3 ...@@ -34,6 +35,7 @@ config ARM_GIC_V3
select MULTI_IRQ_HANDLER select MULTI_IRQ_HANDLER
select IRQ_DOMAIN_HIERARCHY select IRQ_DOMAIN_HIERARCHY
select PARTITION_PERCPU select PARTITION_PERCPU
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
config ARM_GIC_V3_ITS config ARM_GIC_V3_ITS
bool bool
...@@ -64,6 +66,7 @@ config ARMADA_370_XP_IRQ ...@@ -64,6 +66,7 @@ config ARMADA_370_XP_IRQ
bool bool
select GENERIC_IRQ_CHIP select GENERIC_IRQ_CHIP
select PCI_MSI if PCI select PCI_MSI if PCI
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
config ALPINE_MSI config ALPINE_MSI
bool bool
...@@ -93,11 +96,13 @@ config BCM6345_L1_IRQ ...@@ -93,11 +96,13 @@ config BCM6345_L1_IRQ
bool bool
select GENERIC_IRQ_CHIP select GENERIC_IRQ_CHIP
select IRQ_DOMAIN select IRQ_DOMAIN
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
config BCM7038_L1_IRQ config BCM7038_L1_IRQ
bool bool
select GENERIC_IRQ_CHIP select GENERIC_IRQ_CHIP
select IRQ_DOMAIN select IRQ_DOMAIN
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
config BCM7120_L2_IRQ config BCM7120_L2_IRQ
bool bool
...@@ -136,6 +141,7 @@ config IRQ_MIPS_CPU ...@@ -136,6 +141,7 @@ config IRQ_MIPS_CPU
select GENERIC_IRQ_IPI if SYS_SUPPORTS_MULTITHREADING select GENERIC_IRQ_IPI if SYS_SUPPORTS_MULTITHREADING
select IRQ_DOMAIN select IRQ_DOMAIN
select IRQ_DOMAIN_HIERARCHY if GENERIC_IRQ_IPI select IRQ_DOMAIN_HIERARCHY if GENERIC_IRQ_IPI
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
config CLPS711X_IRQCHIP config CLPS711X_IRQCHIP
bool bool
...@@ -217,6 +223,7 @@ config VERSATILE_FPGA_IRQ_NR ...@@ -217,6 +223,7 @@ config VERSATILE_FPGA_IRQ_NR
config XTENSA_MX config XTENSA_MX
bool bool
select IRQ_DOMAIN select IRQ_DOMAIN
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
config XILINX_INTC config XILINX_INTC
bool bool
...@@ -306,3 +313,11 @@ config QCOM_IRQ_COMBINER ...@@ -306,3 +313,11 @@ config QCOM_IRQ_COMBINER
help help
Say yes here to add support for the IRQ combiner devices embedded Say yes here to add support for the IRQ combiner devices embedded
in Qualcomm Technologies chips. in Qualcomm Technologies chips.
config IRQ_UNIPHIER_AIDET
bool "UniPhier AIDET support" if COMPILE_TEST
depends on ARCH_UNIPHIER || COMPILE_TEST
default ARCH_UNIPHIER
select IRQ_DOMAIN_HIERARCHY
help
Support for the UniPhier AIDET (ARM Interrupt Detector).
...@@ -28,7 +28,7 @@ obj-$(CONFIG_ARM_GIC_PM) += irq-gic-pm.o ...@@ -28,7 +28,7 @@ obj-$(CONFIG_ARM_GIC_PM) += irq-gic-pm.o
obj-$(CONFIG_ARCH_REALVIEW) += irq-gic-realview.o obj-$(CONFIG_ARCH_REALVIEW) += irq-gic-realview.o
obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o
obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o
obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-pci-msi.o irq-gic-v3-its-platform-msi.o obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-pci-msi.o irq-gic-v3-its-platform-msi.o irq-gic-v4.o
obj-$(CONFIG_PARTITION_PERCPU) += irq-partition-percpu.o obj-$(CONFIG_PARTITION_PERCPU) += irq-partition-percpu.o
obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o
obj-$(CONFIG_ARM_NVIC) += irq-nvic.o obj-$(CONFIG_ARM_NVIC) += irq-nvic.o
...@@ -78,3 +78,4 @@ obj-$(CONFIG_EZNPS_GIC) += irq-eznps.o ...@@ -78,3 +78,4 @@ obj-$(CONFIG_EZNPS_GIC) += irq-eznps.o
obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o
obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o
obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o
obj-$(CONFIG_IRQ_UNIPHIER_AIDET) += irq-uniphier-aidet.o
...@@ -203,7 +203,7 @@ static struct irq_chip armada_370_xp_msi_irq_chip = { ...@@ -203,7 +203,7 @@ static struct irq_chip armada_370_xp_msi_irq_chip = {
static struct msi_domain_info armada_370_xp_msi_domain_info = { static struct msi_domain_info armada_370_xp_msi_domain_info = {
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
MSI_FLAG_MULTI_PCI_MSI), MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
.chip = &armada_370_xp_msi_irq_chip, .chip = &armada_370_xp_msi_irq_chip,
}; };
...@@ -330,6 +330,8 @@ static int armada_xp_set_affinity(struct irq_data *d, ...@@ -330,6 +330,8 @@ static int armada_xp_set_affinity(struct irq_data *d,
writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq)); writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
raw_spin_unlock(&irq_controller_lock); raw_spin_unlock(&irq_controller_lock);
irq_data_update_effective_affinity(d, cpumask_of(cpu));
return IRQ_SET_MASK_OK; return IRQ_SET_MASK_OK;
} }
#endif #endif
...@@ -363,6 +365,7 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h, ...@@ -363,6 +365,7 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
} else { } else {
irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip, irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
handle_level_irq); handle_level_irq);
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
} }
irq_set_probe(virq); irq_set_probe(virq);
......
...@@ -147,13 +147,12 @@ static int __init armctrl_of_init(struct device_node *node, ...@@ -147,13 +147,12 @@ static int __init armctrl_of_init(struct device_node *node,
base = of_iomap(node, 0); base = of_iomap(node, 0);
if (!base) if (!base)
panic("%s: unable to map IC registers\n", panic("%pOF: unable to map IC registers\n", node);
node->full_name);
intc.domain = irq_domain_add_linear(node, MAKE_HWIRQ(NR_BANKS, 0), intc.domain = irq_domain_add_linear(node, MAKE_HWIRQ(NR_BANKS, 0),
&armctrl_ops, NULL); &armctrl_ops, NULL);
if (!intc.domain) if (!intc.domain)
panic("%s: unable to create IRQ domain\n", node->full_name); panic("%pOF: unable to create IRQ domain\n", node);
for (b = 0; b < NR_BANKS; b++) { for (b = 0; b < NR_BANKS; b++) {
intc.pending[b] = base + reg_pending[b]; intc.pending[b] = base + reg_pending[b];
...@@ -173,8 +172,8 @@ static int __init armctrl_of_init(struct device_node *node, ...@@ -173,8 +172,8 @@ static int __init armctrl_of_init(struct device_node *node,
int parent_irq = irq_of_parse_and_map(node, 0); int parent_irq = irq_of_parse_and_map(node, 0);
if (!parent_irq) { if (!parent_irq) {
panic("%s: unable to get parent interrupt.\n", panic("%pOF: unable to get parent interrupt.\n",
node->full_name); node);
} }
irq_set_chained_handler(parent_irq, bcm2836_chained_handle_irq); irq_set_chained_handler(parent_irq, bcm2836_chained_handle_irq);
} else { } else {
......
...@@ -282,8 +282,7 @@ static int __init bcm2836_arm_irqchip_l1_intc_of_init(struct device_node *node, ...@@ -282,8 +282,7 @@ static int __init bcm2836_arm_irqchip_l1_intc_of_init(struct device_node *node,
{ {
intc.base = of_iomap(node, 0); intc.base = of_iomap(node, 0);
if (!intc.base) { if (!intc.base) {
panic("%s: unable to map local interrupt registers\n", panic("%pOF: unable to map local interrupt registers\n", node);
node->full_name);
} }
bcm2835_init_local_timer_frequency(); bcm2835_init_local_timer_frequency();
...@@ -292,7 +291,7 @@ static int __init bcm2836_arm_irqchip_l1_intc_of_init(struct device_node *node, ...@@ -292,7 +291,7 @@ static int __init bcm2836_arm_irqchip_l1_intc_of_init(struct device_node *node,
&bcm2836_arm_irqchip_intc_ops, &bcm2836_arm_irqchip_intc_ops,
NULL); NULL);
if (!intc.domain) if (!intc.domain)
panic("%s: unable to create IRQ domain\n", node->full_name); panic("%pOF: unable to create IRQ domain\n", node);
bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTPSIRQ, bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTPSIRQ,
&bcm2836_arm_irqchip_timer); &bcm2836_arm_irqchip_timer);
......
...@@ -231,6 +231,8 @@ static int bcm6345_l1_set_affinity(struct irq_data *d, ...@@ -231,6 +231,8 @@ static int bcm6345_l1_set_affinity(struct irq_data *d,
} }
raw_spin_unlock_irqrestore(&intc->lock, flags); raw_spin_unlock_irqrestore(&intc->lock, flags);
irq_data_update_effective_affinity(d, cpumask_of(new_cpu));
return IRQ_SET_MASK_OK_NOCOPY; return IRQ_SET_MASK_OK_NOCOPY;
} }
...@@ -291,6 +293,7 @@ static int bcm6345_l1_map(struct irq_domain *d, unsigned int virq, ...@@ -291,6 +293,7 @@ static int bcm6345_l1_map(struct irq_domain *d, unsigned int virq,
irq_set_chip_and_handler(virq, irq_set_chip_and_handler(virq,
&bcm6345_l1_irq_chip, handle_percpu_irq); &bcm6345_l1_irq_chip, handle_percpu_irq);
irq_set_chip_data(virq, d->host_data); irq_set_chip_data(virq, d->host_data);
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
return 0; return 0;
} }
......
...@@ -212,6 +212,8 @@ static int bcm7038_l1_set_affinity(struct irq_data *d, ...@@ -212,6 +212,8 @@ static int bcm7038_l1_set_affinity(struct irq_data *d,
__bcm7038_l1_unmask(d, first_cpu); __bcm7038_l1_unmask(d, first_cpu);
raw_spin_unlock_irqrestore(&intc->lock, flags); raw_spin_unlock_irqrestore(&intc->lock, flags);
irq_data_update_effective_affinity(d, cpumask_of(first_cpu));
return 0; return 0;
} }
...@@ -299,6 +301,7 @@ static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq, ...@@ -299,6 +301,7 @@ static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq,
{ {
irq_set_chip_and_handler(virq, &bcm7038_l1_irq_chip, handle_level_irq); irq_set_chip_and_handler(virq, &bcm7038_l1_irq_chip, handle_level_irq);
irq_set_chip_data(virq, d->host_data); irq_set_chip_data(virq, d->host_data);
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
return 0; return 0;
} }
......
...@@ -250,12 +250,6 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn, ...@@ -250,12 +250,6 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
if (ret < 0) if (ret < 0)
goto out_free_l1_data; goto out_free_l1_data;
for (idx = 0; idx < data->n_words; idx++) {
__raw_writel(data->irq_fwd_mask[idx],
data->pair_base[idx] +
data->en_offset[idx]);
}
for (irq = 0; irq < data->num_parent_irqs; irq++) { for (irq = 0; irq < data->num_parent_irqs; irq++) {
ret = bcm7120_l2_intc_init_one(dn, data, irq, valid_mask); ret = bcm7120_l2_intc_init_one(dn, data, irq, valid_mask);
if (ret) if (ret)
...@@ -297,6 +291,10 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn, ...@@ -297,6 +291,10 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
gc->reg_base = data->pair_base[idx]; gc->reg_base = data->pair_base[idx];
ct->regs.mask = data->en_offset[idx]; ct->regs.mask = data->en_offset[idx];
/* gc->reg_base is defined and so is gc->writel */
irq_reg_writel(gc, data->irq_fwd_mask[idx],
data->en_offset[idx]);
ct->chip.irq_mask = irq_gc_mask_clr_bit; ct->chip.irq_mask = irq_gc_mask_clr_bit;
ct->chip.irq_unmask = irq_gc_mask_set_bit; ct->chip.irq_unmask = irq_gc_mask_set_bit;
ct->chip.irq_ack = irq_gc_noop; ct->chip.irq_ack = irq_gc_noop;
......
...@@ -341,13 +341,13 @@ static int __init irqcrossbar_init(struct device_node *node, ...@@ -341,13 +341,13 @@ static int __init irqcrossbar_init(struct device_node *node,
int err; int err;
if (!parent) { if (!parent) {
pr_err("%s: no parent, giving up\n", node->full_name); pr_err("%pOF: no parent, giving up\n", node);
return -ENODEV; return -ENODEV;
} }
parent_domain = irq_find_host(parent); parent_domain = irq_find_host(parent);
if (!parent_domain) { if (!parent_domain) {
pr_err("%s: unable to obtain parent domain\n", node->full_name); pr_err("%pOF: unable to obtain parent domain\n", node);
return -ENXIO; return -ENXIO;
} }
...@@ -360,7 +360,7 @@ static int __init irqcrossbar_init(struct device_node *node, ...@@ -360,7 +360,7 @@ static int __init irqcrossbar_init(struct device_node *node,
node, &crossbar_domain_ops, node, &crossbar_domain_ops,
NULL); NULL);
if (!domain) { if (!domain) {
pr_err("%s: failed to allocated domain\n", node->full_name); pr_err("%pOF: failed to allocated domain\n", node);
return -ENOMEM; return -ENOMEM;
} }
......
...@@ -78,7 +78,7 @@ static int __init digicolor_of_init(struct device_node *node, ...@@ -78,7 +78,7 @@ static int __init digicolor_of_init(struct device_node *node,
reg_base = of_iomap(node, 0); reg_base = of_iomap(node, 0);
if (!reg_base) { if (!reg_base) {
pr_err("%s: unable to map IC registers\n", node->full_name); pr_err("%pOF: unable to map IC registers\n", node);
return -ENXIO; return -ENXIO;
} }
...@@ -88,7 +88,7 @@ static int __init digicolor_of_init(struct device_node *node, ...@@ -88,7 +88,7 @@ static int __init digicolor_of_init(struct device_node *node,
ucregs = syscon_regmap_lookup_by_phandle(node, "syscon"); ucregs = syscon_regmap_lookup_by_phandle(node, "syscon");
if (IS_ERR(ucregs)) { if (IS_ERR(ucregs)) {
pr_err("%s: unable to map UC registers\n", node->full_name); pr_err("%pOF: unable to map UC registers\n", node);
return PTR_ERR(ucregs); return PTR_ERR(ucregs);
} }
/* channel 1, regular IRQs */ /* channel 1, regular IRQs */
...@@ -97,7 +97,7 @@ static int __init digicolor_of_init(struct device_node *node, ...@@ -97,7 +97,7 @@ static int __init digicolor_of_init(struct device_node *node,
digicolor_irq_domain = digicolor_irq_domain =
irq_domain_add_linear(node, 64, &irq_generic_chip_ops, NULL); irq_domain_add_linear(node, 64, &irq_generic_chip_ops, NULL);
if (!digicolor_irq_domain) { if (!digicolor_irq_domain) {
pr_err("%s: unable to create IRQ domain\n", node->full_name); pr_err("%pOF: unable to create IRQ domain\n", node);
return -ENOMEM; return -ENOMEM;
} }
...@@ -105,7 +105,7 @@ static int __init digicolor_of_init(struct device_node *node, ...@@ -105,7 +105,7 @@ static int __init digicolor_of_init(struct device_node *node,
"digicolor_irq", handle_level_irq, "digicolor_irq", handle_level_irq,
clr, 0, 0); clr, 0, 0);
if (ret) { if (ret) {
pr_err("%s: unable to allocate IRQ gc\n", node->full_name); pr_err("%pOF: unable to allocate IRQ gc\n", node);
return ret; return ret;
} }
......
...@@ -79,24 +79,24 @@ static int __init dw_apb_ictl_init(struct device_node *np, ...@@ -79,24 +79,24 @@ static int __init dw_apb_ictl_init(struct device_node *np,
/* Map the parent interrupt for the chained handler */ /* Map the parent interrupt for the chained handler */
irq = irq_of_parse_and_map(np, 0); irq = irq_of_parse_and_map(np, 0);
if (irq <= 0) { if (irq <= 0) {
pr_err("%s: unable to parse irq\n", np->full_name); pr_err("%pOF: unable to parse irq\n", np);
return -EINVAL; return -EINVAL;
} }
ret = of_address_to_resource(np, 0, &r); ret = of_address_to_resource(np, 0, &r);
if (ret) { if (ret) {
pr_err("%s: unable to get resource\n", np->full_name); pr_err("%pOF: unable to get resource\n", np);
return ret; return ret;
} }
if (!request_mem_region(r.start, resource_size(&r), np->full_name)) { if (!request_mem_region(r.start, resource_size(&r), np->full_name)) {
pr_err("%s: unable to request mem region\n", np->full_name); pr_err("%pOF: unable to request mem region\n", np);
return -ENOMEM; return -ENOMEM;
} }
iobase = ioremap(r.start, resource_size(&r)); iobase = ioremap(r.start, resource_size(&r));
if (!iobase) { if (!iobase) {
pr_err("%s: unable to map resource\n", np->full_name); pr_err("%pOF: unable to map resource\n", np);
ret = -ENOMEM; ret = -ENOMEM;
goto err_release; goto err_release;
} }
...@@ -123,7 +123,7 @@ static int __init dw_apb_ictl_init(struct device_node *np, ...@@ -123,7 +123,7 @@ static int __init dw_apb_ictl_init(struct device_node *np,
domain = irq_domain_add_linear(np, nrirqs, domain = irq_domain_add_linear(np, nrirqs,
&irq_generic_chip_ops, NULL); &irq_generic_chip_ops, NULL);
if (!domain) { if (!domain) {
pr_err("%s: unable to add irq domain\n", np->full_name); pr_err("%pOF: unable to add irq domain\n", np);
ret = -ENOMEM; ret = -ENOMEM;
goto err_unmap; goto err_unmap;
} }
...@@ -132,7 +132,7 @@ static int __init dw_apb_ictl_init(struct device_node *np, ...@@ -132,7 +132,7 @@ static int __init dw_apb_ictl_init(struct device_node *np,
handle_level_irq, clr, 0, handle_level_irq, clr, 0,
IRQ_GC_INIT_MASK_CACHE); IRQ_GC_INIT_MASK_CACHE);
if (ret) { if (ret) {
pr_err("%s: unable to alloc irq domain gc\n", np->full_name); pr_err("%pOF: unable to alloc irq domain gc\n", np);
goto err_unmap; goto err_unmap;
} }
......
...@@ -138,7 +138,7 @@ static int __init its_pci_of_msi_init(void) ...@@ -138,7 +138,7 @@ static int __init its_pci_of_msi_init(void)
if (its_pci_msi_init_one(of_node_to_fwnode(np), np->full_name)) if (its_pci_msi_init_one(of_node_to_fwnode(np), np->full_name))
continue; continue;
pr_info("PCI/MSI: %s domain created\n", np->full_name); pr_info("PCI/MSI: %pOF domain created\n", np);
} }
return 0; return 0;
......
/* /*
* Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved. * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
* Author: Marc Zyngier <marc.zyngier@arm.com> * Author: Marc Zyngier <marc.zyngier@arm.com>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include <linux/irqchip.h> #include <linux/irqchip.h>
#include <linux/irqchip/arm-gic-v3.h> #include <linux/irqchip/arm-gic-v3.h>
#include <linux/irqchip/arm-gic-v4.h>
#include <asm/cputype.h> #include <asm/cputype.h>
#include <asm/exception.h> #include <asm/exception.h>
...@@ -48,6 +49,19 @@ ...@@ -48,6 +49,19 @@
#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
static u32 lpi_id_bits;
/*
* We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
* deal with (one configuration byte per interrupt). PENDBASE has to
* be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
*/
#define LPI_NRBITS lpi_id_bits
#define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
#define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
#define LPI_PROP_DEFAULT_PRIO 0xa0
/* /*
* Collection structure - just an ID, and a redistributor address to * Collection structure - just an ID, and a redistributor address to
* ping. We use one per CPU as a bag of interrupts assigned to this * ping. We use one per CPU as a bag of interrupts assigned to this
...@@ -88,6 +102,7 @@ struct its_node { ...@@ -88,6 +102,7 @@ struct its_node {
u32 ite_size; u32 ite_size;
u32 device_ids; u32 device_ids;
int numa_node; int numa_node;
bool is_v4;
}; };
#define ITS_ITT_ALIGN SZ_256 #define ITS_ITT_ALIGN SZ_256
...@@ -100,11 +115,17 @@ struct event_lpi_map { ...@@ -100,11 +115,17 @@ struct event_lpi_map {
u16 *col_map; u16 *col_map;
irq_hw_number_t lpi_base; irq_hw_number_t lpi_base;
int nr_lpis; int nr_lpis;
struct mutex vlpi_lock;
struct its_vm *vm;
struct its_vlpi_map *vlpi_maps;
int nr_vlpis;
}; };
/* /*
* The ITS view of a device - belongs to an ITS, a collection, owns an * The ITS view of a device - belongs to an ITS, owns an interrupt
* interrupt translation table, and a list of interrupts. * translation table, and a list of interrupts. If it some of its
* LPIs are injected into a guest (GICv4), the event_map.vm field
* indicates which one.
*/ */
struct its_device { struct its_device {
struct list_head entry; struct list_head entry;
...@@ -115,13 +136,33 @@ struct its_device { ...@@ -115,13 +136,33 @@ struct its_device {
u32 device_id; u32 device_id;
}; };
static struct {
raw_spinlock_t lock;
struct its_device *dev;
struct its_vpe **vpes;
int next_victim;
} vpe_proxy;
static LIST_HEAD(its_nodes); static LIST_HEAD(its_nodes);
static DEFINE_SPINLOCK(its_lock); static DEFINE_SPINLOCK(its_lock);
static struct rdists *gic_rdists; static struct rdists *gic_rdists;
static struct irq_domain *its_parent; static struct irq_domain *its_parent;
/*
* We have a maximum number of 16 ITSs in the whole system if we're
* using the ITSList mechanism
*/
#define ITS_LIST_MAX 16
static unsigned long its_list_map;
static u16 vmovp_seq_num;
static DEFINE_RAW_SPINLOCK(vmovp_lock);
static DEFINE_IDA(its_vpeid_ida);
#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
static struct its_collection *dev_event_to_col(struct its_device *its_dev, static struct its_collection *dev_event_to_col(struct its_device *its_dev,
u32 event) u32 event)
...@@ -142,6 +183,11 @@ struct its_cmd_desc { ...@@ -142,6 +183,11 @@ struct its_cmd_desc {
u32 event_id; u32 event_id;
} its_inv_cmd; } its_inv_cmd;
struct {
struct its_device *dev;
u32 event_id;
} its_clear_cmd;
struct { struct {
struct its_device *dev; struct its_device *dev;
u32 event_id; u32 event_id;
...@@ -177,6 +223,38 @@ struct its_cmd_desc { ...@@ -177,6 +223,38 @@ struct its_cmd_desc {
struct { struct {
struct its_collection *col; struct its_collection *col;
} its_invall_cmd; } its_invall_cmd;
struct {
struct its_vpe *vpe;
} its_vinvall_cmd;
struct {
struct its_vpe *vpe;
struct its_collection *col;
bool valid;
} its_vmapp_cmd;
struct {
struct its_vpe *vpe;
struct its_device *dev;
u32 virt_id;
u32 event_id;
bool db_enabled;
} its_vmapti_cmd;
struct {
struct its_vpe *vpe;
struct its_device *dev;
u32 event_id;
bool db_enabled;
} its_vmovi_cmd;
struct {
struct its_vpe *vpe;
struct its_collection *col;
u16 seq_num;
u16 its_list;
} its_vmovp_cmd;
}; };
}; };
...@@ -193,6 +271,9 @@ struct its_cmd_block { ...@@ -193,6 +271,9 @@ struct its_cmd_block {
typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *, typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *,
struct its_cmd_desc *); struct its_cmd_desc *);
typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_cmd_block *,
struct its_cmd_desc *);
static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l) static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
{ {
u64 mask = GENMASK_ULL(h, l); u64 mask = GENMASK_ULL(h, l);
...@@ -245,6 +326,46 @@ static void its_encode_collection(struct its_cmd_block *cmd, u16 col) ...@@ -245,6 +326,46 @@ static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
its_mask_encode(&cmd->raw_cmd[2], col, 15, 0); its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
} }
static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid)
{
its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32);
}
static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id)
{
its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0);
}
static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id)
{
its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32);
}
static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid)
{
its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0);
}
static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num)
{
its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32);
}
static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
{
its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0);
}
static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
{
its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 50, 16);
}
static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
{
its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
}
static inline void its_fixup_cmd(struct its_cmd_block *cmd) static inline void its_fixup_cmd(struct its_cmd_block *cmd)
{ {
/* Let's fixup BE commands */ /* Let's fixup BE commands */
...@@ -358,6 +479,40 @@ static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd, ...@@ -358,6 +479,40 @@ static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd,
return col; return col;
} }
static struct its_collection *its_build_int_cmd(struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
struct its_collection *col;
col = dev_event_to_col(desc->its_int_cmd.dev,
desc->its_int_cmd.event_id);
its_encode_cmd(cmd, GITS_CMD_INT);
its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
its_encode_event_id(cmd, desc->its_int_cmd.event_id);
its_fixup_cmd(cmd);
return col;
}
static struct its_collection *its_build_clear_cmd(struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
struct its_collection *col;
col = dev_event_to_col(desc->its_clear_cmd.dev,
desc->its_clear_cmd.event_id);
its_encode_cmd(cmd, GITS_CMD_CLEAR);
its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
its_fixup_cmd(cmd);
return col;
}
static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd, static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
struct its_cmd_desc *desc) struct its_cmd_desc *desc)
{ {
...@@ -369,6 +524,94 @@ static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd, ...@@ -369,6 +524,94 @@ static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
return NULL; return NULL;
} }
static struct its_vpe *its_build_vinvall_cmd(struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
its_encode_cmd(cmd, GITS_CMD_VINVALL);
its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
its_fixup_cmd(cmd);
return desc->its_vinvall_cmd.vpe;
}
static struct its_vpe *its_build_vmapp_cmd(struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
unsigned long vpt_addr;
vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
its_encode_cmd(cmd, GITS_CMD_VMAPP);
its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
its_encode_target(cmd, desc->its_vmapp_cmd.col->target_address);
its_encode_vpt_addr(cmd, vpt_addr);
its_encode_vpt_size(cmd, LPI_NRBITS - 1);
its_fixup_cmd(cmd);
return desc->its_vmapp_cmd.vpe;
}
static struct its_vpe *its_build_vmapti_cmd(struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
u32 db;
if (desc->its_vmapti_cmd.db_enabled)
db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
else
db = 1023;
its_encode_cmd(cmd, GITS_CMD_VMAPTI);
its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id);
its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id);
its_encode_db_phys_id(cmd, db);
its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id);
its_fixup_cmd(cmd);
return desc->its_vmapti_cmd.vpe;
}
static struct its_vpe *its_build_vmovi_cmd(struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
u32 db;
if (desc->its_vmovi_cmd.db_enabled)
db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
else
db = 1023;
its_encode_cmd(cmd, GITS_CMD_VMOVI);
its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id);
its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id);
its_encode_db_phys_id(cmd, db);
its_encode_db_valid(cmd, true);
its_fixup_cmd(cmd);
return desc->its_vmovi_cmd.vpe;
}
static struct its_vpe *its_build_vmovp_cmd(struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
its_encode_cmd(cmd, GITS_CMD_VMOVP);
its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
its_encode_target(cmd, desc->its_vmovp_cmd.col->target_address);
its_fixup_cmd(cmd);
return desc->its_vmovp_cmd.vpe;
}
static u64 its_cmd_ptr_to_offset(struct its_node *its, static u64 its_cmd_ptr_to_offset(struct its_node *its,
struct its_cmd_block *ptr) struct its_cmd_block *ptr)
{ {
...@@ -453,7 +696,13 @@ static void its_wait_for_range_completion(struct its_node *its, ...@@ -453,7 +696,13 @@ static void its_wait_for_range_completion(struct its_node *its,
while (1) { while (1) {
rd_idx = readl_relaxed(its->base + GITS_CREADR); rd_idx = readl_relaxed(its->base + GITS_CREADR);
if (rd_idx >= to_idx || rd_idx < from_idx)
/* Direct case */
if (from_idx < to_idx && rd_idx >= to_idx)
break;
/* Wrapped case */
if (from_idx >= to_idx && rd_idx >= to_idx && rd_idx < from_idx)
break; break;
count--; count--;
...@@ -466,42 +715,84 @@ static void its_wait_for_range_completion(struct its_node *its, ...@@ -466,42 +715,84 @@ static void its_wait_for_range_completion(struct its_node *its,
} }
} }
static void its_send_single_command(struct its_node *its, /* Warning, macro hell follows */
its_cmd_builder_t builder, #define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \
struct its_cmd_desc *desc) void name(struct its_node *its, \
buildtype builder, \
struct its_cmd_desc *desc) \
{ \
struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \
synctype *sync_obj; \
unsigned long flags; \
\
raw_spin_lock_irqsave(&its->lock, flags); \
\
cmd = its_allocate_entry(its); \
if (!cmd) { /* We're soooooo screewed... */ \
raw_spin_unlock_irqrestore(&its->lock, flags); \
return; \
} \
sync_obj = builder(cmd, desc); \
its_flush_cmd(its, cmd); \
\
if (sync_obj) { \
sync_cmd = its_allocate_entry(its); \
if (!sync_cmd) \
goto post; \
\
buildfn(sync_cmd, sync_obj); \
its_flush_cmd(its, sync_cmd); \
} \
\
post: \
next_cmd = its_post_commands(its); \
raw_spin_unlock_irqrestore(&its->lock, flags); \
\
its_wait_for_range_completion(its, cmd, next_cmd); \
}
static void its_build_sync_cmd(struct its_cmd_block *sync_cmd,
struct its_collection *sync_col)
{ {
struct its_cmd_block *cmd, *sync_cmd, *next_cmd; its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
struct its_collection *sync_col; its_encode_target(sync_cmd, sync_col->target_address);
unsigned long flags;
raw_spin_lock_irqsave(&its->lock, flags); its_fixup_cmd(sync_cmd);
}
cmd = its_allocate_entry(its); static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
if (!cmd) { /* We're soooooo screewed... */ struct its_collection, its_build_sync_cmd)
pr_err_ratelimited("ITS can't allocate, dropping command\n");
raw_spin_unlock_irqrestore(&its->lock, flags); static void its_build_vsync_cmd(struct its_cmd_block *sync_cmd,
return; struct its_vpe *sync_vpe)
} {
sync_col = builder(cmd, desc); its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
its_flush_cmd(its, cmd); its_encode_vpeid(sync_cmd, sync_vpe->vpe_id);
if (sync_col) {
sync_cmd = its_allocate_entry(its);
if (!sync_cmd) {
pr_err_ratelimited("ITS can't SYNC, skipping\n");
goto post;
}
its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
its_encode_target(sync_cmd, sync_col->target_address);
its_fixup_cmd(sync_cmd); its_fixup_cmd(sync_cmd);
its_flush_cmd(its, sync_cmd); }
}
post: static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t,
next_cmd = its_post_commands(its); struct its_vpe, its_build_vsync_cmd)
raw_spin_unlock_irqrestore(&its->lock, flags);
static void its_send_int(struct its_device *dev, u32 event_id)
{
struct its_cmd_desc desc;
desc.its_int_cmd.dev = dev;
desc.its_int_cmd.event_id = event_id;
its_send_single_command(dev->its, its_build_int_cmd, &desc);
}
static void its_send_clear(struct its_device *dev, u32 event_id)
{
struct its_cmd_desc desc;
its_wait_for_range_completion(its, cmd, next_cmd); desc.its_clear_cmd.dev = dev;
desc.its_clear_cmd.event_id = event_id;
its_send_single_command(dev->its, its_build_clear_cmd, &desc);
} }
static void its_send_inv(struct its_device *dev, u32 event_id) static void its_send_inv(struct its_device *dev, u32 event_id)
...@@ -577,6 +868,106 @@ static void its_send_invall(struct its_node *its, struct its_collection *col) ...@@ -577,6 +868,106 @@ static void its_send_invall(struct its_node *its, struct its_collection *col)
its_send_single_command(its, its_build_invall_cmd, &desc); its_send_single_command(its, its_build_invall_cmd, &desc);
} }
static void its_send_vmapti(struct its_device *dev, u32 id)
{
struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
struct its_cmd_desc desc;
desc.its_vmapti_cmd.vpe = map->vpe;
desc.its_vmapti_cmd.dev = dev;
desc.its_vmapti_cmd.virt_id = map->vintid;
desc.its_vmapti_cmd.event_id = id;
desc.its_vmapti_cmd.db_enabled = map->db_enabled;
its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
}
static void its_send_vmovi(struct its_device *dev, u32 id)
{
struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
struct its_cmd_desc desc;
desc.its_vmovi_cmd.vpe = map->vpe;
desc.its_vmovi_cmd.dev = dev;
desc.its_vmovi_cmd.event_id = id;
desc.its_vmovi_cmd.db_enabled = map->db_enabled;
its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
}
static void its_send_vmapp(struct its_vpe *vpe, bool valid)
{
struct its_cmd_desc desc;
struct its_node *its;
desc.its_vmapp_cmd.vpe = vpe;
desc.its_vmapp_cmd.valid = valid;
list_for_each_entry(its, &its_nodes, entry) {
if (!its->is_v4)
continue;
desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
}
}
static void its_send_vmovp(struct its_vpe *vpe)
{
struct its_cmd_desc desc;
struct its_node *its;
unsigned long flags;
int col_id = vpe->col_idx;
desc.its_vmovp_cmd.vpe = vpe;
desc.its_vmovp_cmd.its_list = (u16)its_list_map;
if (!its_list_map) {
its = list_first_entry(&its_nodes, struct its_node, entry);
desc.its_vmovp_cmd.seq_num = 0;
desc.its_vmovp_cmd.col = &its->collections[col_id];
its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
return;
}
/*
* Yet another marvel of the architecture. If using the
* its_list "feature", we need to make sure that all ITSs
* receive all VMOVP commands in the same order. The only way
* to guarantee this is to make vmovp a serialization point.
*
* Wall <-- Head.
*/
raw_spin_lock_irqsave(&vmovp_lock, flags);
desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
/* Emit VMOVPs */
list_for_each_entry(its, &its_nodes, entry) {
if (!its->is_v4)
continue;
desc.its_vmovp_cmd.col = &its->collections[col_id];
its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
}
raw_spin_unlock_irqrestore(&vmovp_lock, flags);
}
static void its_send_vinvall(struct its_vpe *vpe)
{
struct its_cmd_desc desc;
struct its_node *its;
desc.its_vinvall_cmd.vpe = vpe;
list_for_each_entry(its, &its_nodes, entry) {
if (!its->is_v4)
continue;
its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
}
}
/* /*
* irqchip functions - assumes MSI, mostly. * irqchip functions - assumes MSI, mostly.
*/ */
...@@ -587,17 +978,26 @@ static inline u32 its_get_event_id(struct irq_data *d) ...@@ -587,17 +978,26 @@ static inline u32 its_get_event_id(struct irq_data *d)
return d->hwirq - its_dev->event_map.lpi_base; return d->hwirq - its_dev->event_map.lpi_base;
} }
static void lpi_set_config(struct irq_data *d, bool enable) static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
{ {
irq_hw_number_t hwirq;
struct page *prop_page;
u8 *cfg;
if (irqd_is_forwarded_to_vcpu(d)) {
struct its_device *its_dev = irq_data_get_irq_chip_data(d); struct its_device *its_dev = irq_data_get_irq_chip_data(d);
irq_hw_number_t hwirq = d->hwirq; u32 event = its_get_event_id(d);
u32 id = its_get_event_id(d);
u8 *cfg = page_address(gic_rdists->prop_page) + hwirq - 8192;
if (enable) prop_page = its_dev->event_map.vm->vprop_page;
*cfg |= LPI_PROP_ENABLED; hwirq = its_dev->event_map.vlpi_maps[event].vintid;
else } else {
*cfg &= ~LPI_PROP_ENABLED; prop_page = gic_rdists->prop_page;
hwirq = d->hwirq;
}
cfg = page_address(prop_page) + hwirq - 8192;
*cfg &= ~clr;
*cfg |= set | LPI_PROP_GROUP1;
/* /*
* Make the above write visible to the redistributors. * Make the above write visible to the redistributors.
...@@ -608,17 +1008,53 @@ static void lpi_set_config(struct irq_data *d, bool enable) ...@@ -608,17 +1008,53 @@ static void lpi_set_config(struct irq_data *d, bool enable)
gic_flush_dcache_to_poc(cfg, sizeof(*cfg)); gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
else else
dsb(ishst); dsb(ishst);
its_send_inv(its_dev, id); }
static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
lpi_write_config(d, clr, set);
its_send_inv(its_dev, its_get_event_id(d));
}
static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d);
if (its_dev->event_map.vlpi_maps[event].db_enabled == enable)
return;
its_dev->event_map.vlpi_maps[event].db_enabled = enable;
/*
* More fun with the architecture:
*
* Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
* value or to 1023, depending on the enable bit. But that
* would be issueing a mapping for an /existing/ DevID+EventID
* pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
* to the /same/ vPE, using this opportunity to adjust the
* doorbell. Mouahahahaha. We loves it, Precious.
*/
its_send_vmovi(its_dev, event);
} }
static void its_mask_irq(struct irq_data *d) static void its_mask_irq(struct irq_data *d)
{ {
lpi_set_config(d, false); if (irqd_is_forwarded_to_vcpu(d))
its_vlpi_set_doorbell(d, false);
lpi_update_config(d, LPI_PROP_ENABLED, 0);
} }
static void its_unmask_irq(struct irq_data *d) static void its_unmask_irq(struct irq_data *d)
{ {
lpi_set_config(d, true); if (irqd_is_forwarded_to_vcpu(d))
its_vlpi_set_doorbell(d, true);
lpi_update_config(d, 0, LPI_PROP_ENABLED);
} }
static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
...@@ -630,6 +1066,10 @@ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, ...@@ -630,6 +1066,10 @@ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
struct its_collection *target_col; struct its_collection *target_col;
u32 id = its_get_event_id(d); u32 id = its_get_event_id(d);
/* A forwarded interrupt should use irq_set_vcpu_affinity */
if (irqd_is_forwarded_to_vcpu(d))
return -EINVAL;
/* lpi cannot be routed to a redistributor that is on a foreign node */ /* lpi cannot be routed to a redistributor that is on a foreign node */
if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
if (its_dev->its->numa_node >= 0) { if (its_dev->its->numa_node >= 0) {
...@@ -649,6 +1089,7 @@ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, ...@@ -649,6 +1089,7 @@ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
target_col = &its_dev->its->collections[cpu]; target_col = &its_dev->its->collections[cpu];
its_send_movi(its_dev, target_col, id); its_send_movi(its_dev, target_col, id);
its_dev->event_map.col_map[id] = cpu; its_dev->event_map.col_map[id] = cpu;
irq_data_update_effective_affinity(d, cpumask_of(cpu));
} }
return IRQ_SET_MASK_OK_DONE; return IRQ_SET_MASK_OK_DONE;
...@@ -670,16 +1111,191 @@ static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) ...@@ -670,16 +1111,191 @@ static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
iommu_dma_map_msi_msg(d->irq, msg); iommu_dma_map_msi_msg(d->irq, msg);
} }
static struct irq_chip its_irq_chip = { static int its_irq_set_irqchip_state(struct irq_data *d,
.name = "ITS", enum irqchip_irq_state which,
.irq_mask = its_mask_irq, bool state)
.irq_unmask = its_unmask_irq, {
.irq_eoi = irq_chip_eoi_parent, struct its_device *its_dev = irq_data_get_irq_chip_data(d);
.irq_set_affinity = its_set_affinity, u32 event = its_get_event_id(d);
.irq_compose_msi_msg = its_irq_compose_msi_msg,
}; if (which != IRQCHIP_STATE_PENDING)
return -EINVAL;
/*
if (state)
its_send_int(its_dev, event);
else
its_send_clear(its_dev, event);
return 0;
}
static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d);
int ret = 0;
if (!info->map)
return -EINVAL;
mutex_lock(&its_dev->event_map.vlpi_lock);
if (!its_dev->event_map.vm) {
struct its_vlpi_map *maps;
maps = kzalloc(sizeof(*maps) * its_dev->event_map.nr_lpis,
GFP_KERNEL);
if (!maps) {
ret = -ENOMEM;
goto out;
}
its_dev->event_map.vm = info->map->vm;
its_dev->event_map.vlpi_maps = maps;
} else if (its_dev->event_map.vm != info->map->vm) {
ret = -EINVAL;
goto out;
}
/* Get our private copy of the mapping information */
its_dev->event_map.vlpi_maps[event] = *info->map;
if (irqd_is_forwarded_to_vcpu(d)) {
/* Already mapped, move it around */
its_send_vmovi(its_dev, event);
} else {
/* Drop the physical mapping */
its_send_discard(its_dev, event);
/* and install the virtual one */
its_send_vmapti(its_dev, event);
irqd_set_forwarded_to_vcpu(d);
/* Increment the number of VLPIs */
its_dev->event_map.nr_vlpis++;
}
out:
mutex_unlock(&its_dev->event_map.vlpi_lock);
return ret;
}
static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d);
int ret = 0;
mutex_lock(&its_dev->event_map.vlpi_lock);
if (!its_dev->event_map.vm ||
!its_dev->event_map.vlpi_maps[event].vm) {
ret = -EINVAL;
goto out;
}
/* Copy our mapping information to the incoming request */
*info->map = its_dev->event_map.vlpi_maps[event];
out:
mutex_unlock(&its_dev->event_map.vlpi_lock);
return ret;
}
static int its_vlpi_unmap(struct irq_data *d)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d);
int ret = 0;
mutex_lock(&its_dev->event_map.vlpi_lock);
if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
ret = -EINVAL;
goto out;
}
/* Drop the virtual mapping */
its_send_discard(its_dev, event);
/* and restore the physical one */
irqd_clr_forwarded_to_vcpu(d);
its_send_mapti(its_dev, d->hwirq, event);
lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO |
LPI_PROP_ENABLED |
LPI_PROP_GROUP1));
/*
* Drop the refcount and make the device available again if
* this was the last VLPI.
*/
if (!--its_dev->event_map.nr_vlpis) {
its_dev->event_map.vm = NULL;
kfree(its_dev->event_map.vlpi_maps);
}
out:
mutex_unlock(&its_dev->event_map.vlpi_lock);
return ret;
}
static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
return -EINVAL;
if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI)
lpi_update_config(d, 0xff, info->config);
else
lpi_write_config(d, 0xff, info->config);
its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED));
return 0;
}
static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
struct its_cmd_info *info = vcpu_info;
/* Need a v4 ITS */
if (!its_dev->its->is_v4)
return -EINVAL;
/* Unmap request? */
if (!info)
return its_vlpi_unmap(d);
switch (info->cmd_type) {
case MAP_VLPI:
return its_vlpi_map(d, info);
case GET_VLPI:
return its_vlpi_get(d, info);
case PROP_UPDATE_VLPI:
case PROP_UPDATE_AND_INV_VLPI:
return its_vlpi_prop_update(d, info);
default:
return -EINVAL;
}
}
static struct irq_chip its_irq_chip = {
.name = "ITS",
.irq_mask = its_mask_irq,
.irq_unmask = its_unmask_irq,
.irq_eoi = irq_chip_eoi_parent,
.irq_set_affinity = its_set_affinity,
.irq_compose_msi_msg = its_irq_compose_msi_msg,
.irq_set_irqchip_state = its_irq_set_irqchip_state,
.irq_set_vcpu_affinity = its_irq_set_vcpu_affinity,
};
/*
* How we allocate LPIs: * How we allocate LPIs:
* *
* The GIC has id_bits bits for interrupt identifiers. From there, we * The GIC has id_bits bits for interrupt identifiers. From there, we
...@@ -695,7 +1311,6 @@ static struct irq_chip its_irq_chip = { ...@@ -695,7 +1311,6 @@ static struct irq_chip its_irq_chip = {
static unsigned long *lpi_bitmap; static unsigned long *lpi_bitmap;
static u32 lpi_chunks; static u32 lpi_chunks;
static u32 lpi_id_bits;
static DEFINE_SPINLOCK(lpi_lock); static DEFINE_SPINLOCK(lpi_lock);
static int its_lpi_to_chunk(int lpi) static int its_lpi_to_chunk(int lpi)
...@@ -766,16 +1381,15 @@ static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids) ...@@ -766,16 +1381,15 @@ static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids)
return bitmap; return bitmap;
} }
static void its_lpi_free(struct event_lpi_map *map) static void its_lpi_free_chunks(unsigned long *bitmap, int base, int nr_ids)
{ {
int base = map->lpi_base;
int nr_ids = map->nr_lpis;
int lpi; int lpi;
spin_lock(&lpi_lock); spin_lock(&lpi_lock);
for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) { for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) {
int chunk = its_lpi_to_chunk(lpi); int chunk = its_lpi_to_chunk(lpi);
BUG_ON(chunk > lpi_chunks); BUG_ON(chunk > lpi_chunks);
if (test_bit(chunk, lpi_bitmap)) { if (test_bit(chunk, lpi_bitmap)) {
clear_bit(chunk, lpi_bitmap); clear_bit(chunk, lpi_bitmap);
...@@ -786,28 +1400,40 @@ static void its_lpi_free(struct event_lpi_map *map) ...@@ -786,28 +1400,40 @@ static void its_lpi_free(struct event_lpi_map *map)
spin_unlock(&lpi_lock); spin_unlock(&lpi_lock);
kfree(map->lpi_map); kfree(bitmap);
kfree(map->col_map);
} }
/* static struct page *its_allocate_prop_table(gfp_t gfp_flags)
* We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to {
* deal with (one configuration byte per interrupt). PENDBASE has to struct page *prop_page;
* be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
*/
#define LPI_NRBITS lpi_id_bits
#define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
#define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
#define LPI_PROP_DEFAULT_PRIO 0xa0 prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
if (!prop_page)
return NULL;
/* Priority 0xa0, Group-1, disabled */
memset(page_address(prop_page),
LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1,
LPI_PROPBASE_SZ);
/* Make sure the GIC will observe the written configuration */
gic_flush_dcache_to_poc(page_address(prop_page), LPI_PROPBASE_SZ);
return prop_page;
}
static void its_free_prop_table(struct page *prop_page)
{
free_pages((unsigned long)page_address(prop_page),
get_order(LPI_PROPBASE_SZ));
}
static int __init its_alloc_lpi_tables(void) static int __init its_alloc_lpi_tables(void)
{ {
phys_addr_t paddr; phys_addr_t paddr;
lpi_id_bits = min_t(u32, gic_rdists->id_bits, ITS_MAX_LPI_NRBITS); lpi_id_bits = min_t(u32, gic_rdists->id_bits, ITS_MAX_LPI_NRBITS);
gic_rdists->prop_page = alloc_pages(GFP_NOWAIT, gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT);
get_order(LPI_PROPBASE_SZ));
if (!gic_rdists->prop_page) { if (!gic_rdists->prop_page) {
pr_err("Failed to allocate PROPBASE\n"); pr_err("Failed to allocate PROPBASE\n");
return -ENOMEM; return -ENOMEM;
...@@ -816,14 +1442,6 @@ static int __init its_alloc_lpi_tables(void) ...@@ -816,14 +1442,6 @@ static int __init its_alloc_lpi_tables(void)
paddr = page_to_phys(gic_rdists->prop_page); paddr = page_to_phys(gic_rdists->prop_page);
pr_info("GIC: using LPI property table @%pa\n", &paddr); pr_info("GIC: using LPI property table @%pa\n", &paddr);
/* Priority 0xa0, Group-1, disabled */
memset(page_address(gic_rdists->prop_page),
LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1,
LPI_PROPBASE_SZ);
/* Make sure the GIC will observe the written configuration */
gic_flush_dcache_to_poc(page_address(gic_rdists->prop_page), LPI_PROPBASE_SZ);
return its_lpi_init(lpi_id_bits); return its_lpi_init(lpi_id_bits);
} }
...@@ -962,10 +1580,13 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser, ...@@ -962,10 +1580,13 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser,
return 0; return 0;
} }
static bool its_parse_baser_device(struct its_node *its, struct its_baser *baser, static bool its_parse_indirect_baser(struct its_node *its,
struct its_baser *baser,
u32 psz, u32 *order) u32 psz, u32 *order)
{ {
u64 esz = GITS_BASER_ENTRY_SIZE(its_read_baser(its, baser)); u64 tmp = its_read_baser(its, baser);
u64 type = GITS_BASER_TYPE(tmp);
u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb; u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
u32 ids = its->device_ids; u32 ids = its->device_ids;
u32 new_order = *order; u32 new_order = *order;
...@@ -1004,8 +1625,9 @@ static bool its_parse_baser_device(struct its_node *its, struct its_baser *baser ...@@ -1004,8 +1625,9 @@ static bool its_parse_baser_device(struct its_node *its, struct its_baser *baser
if (new_order >= MAX_ORDER) { if (new_order >= MAX_ORDER) {
new_order = MAX_ORDER - 1; new_order = MAX_ORDER - 1;
ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz); ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
pr_warn("ITS@%pa: Device Table too large, reduce ids %u->%u\n", pr_warn("ITS@%pa: %s Table too large, reduce ids %u->%u\n",
&its->phys_base, its->device_ids, ids); &its->phys_base, its_base_type_string[type],
its->device_ids, ids);
} }
*order = new_order; *order = new_order;
...@@ -1053,11 +1675,16 @@ static int its_alloc_tables(struct its_node *its) ...@@ -1053,11 +1675,16 @@ static int its_alloc_tables(struct its_node *its)
u32 order = get_order(psz); u32 order = get_order(psz);
bool indirect = false; bool indirect = false;
if (type == GITS_BASER_TYPE_NONE) switch (type) {
case GITS_BASER_TYPE_NONE:
continue; continue;
if (type == GITS_BASER_TYPE_DEVICE) case GITS_BASER_TYPE_DEVICE:
indirect = its_parse_baser_device(its, baser, psz, &order); case GITS_BASER_TYPE_VCPU:
indirect = its_parse_indirect_baser(its, baser,
psz, &order);
break;
}
err = its_setup_baser(its, baser, cache, shr, psz, order, indirect); err = its_setup_baser(its, baser, cache, shr, psz, order, indirect);
if (err < 0) { if (err < 0) {
...@@ -1084,6 +1711,30 @@ static int its_alloc_collections(struct its_node *its) ...@@ -1084,6 +1711,30 @@ static int its_alloc_collections(struct its_node *its)
return 0; return 0;
} }
static struct page *its_allocate_pending_table(gfp_t gfp_flags)
{
struct page *pend_page;
/*
* The pending pages have to be at least 64kB aligned,
* hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below.
*/
pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
if (!pend_page)
return NULL;
/* Make sure the GIC will observe the zero-ed page */
gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
return pend_page;
}
static void its_free_pending_table(struct page *pt)
{
free_pages((unsigned long)page_address(pt),
get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
}
static void its_cpu_init_lpis(void) static void its_cpu_init_lpis(void)
{ {
void __iomem *rbase = gic_data_rdist_rd_base(); void __iomem *rbase = gic_data_rdist_rd_base();
...@@ -1094,21 +1745,14 @@ static void its_cpu_init_lpis(void) ...@@ -1094,21 +1745,14 @@ static void its_cpu_init_lpis(void)
pend_page = gic_data_rdist()->pend_page; pend_page = gic_data_rdist()->pend_page;
if (!pend_page) { if (!pend_page) {
phys_addr_t paddr; phys_addr_t paddr;
/*
* The pending pages have to be at least 64kB aligned, pend_page = its_allocate_pending_table(GFP_NOWAIT);
* hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below.
*/
pend_page = alloc_pages(GFP_NOWAIT | __GFP_ZERO,
get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
if (!pend_page) { if (!pend_page) {
pr_err("Failed to allocate PENDBASE for CPU%d\n", pr_err("Failed to allocate PENDBASE for CPU%d\n",
smp_processor_id()); smp_processor_id());
return; return;
} }
/* Make sure the GIC will observe the zero-ed page */
gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
paddr = page_to_phys(pend_page); paddr = page_to_phys(pend_page);
pr_info("CPU%d: using LPI pending table @%pa\n", pr_info("CPU%d: using LPI pending table @%pa\n",
smp_processor_id(), &paddr); smp_processor_id(), &paddr);
...@@ -1259,26 +1903,19 @@ static struct its_baser *its_get_baser(struct its_node *its, u32 type) ...@@ -1259,26 +1903,19 @@ static struct its_baser *its_get_baser(struct its_node *its, u32 type)
return NULL; return NULL;
} }
static bool its_alloc_device_table(struct its_node *its, u32 dev_id) static bool its_alloc_table_entry(struct its_baser *baser, u32 id)
{ {
struct its_baser *baser;
struct page *page; struct page *page;
u32 esz, idx; u32 esz, idx;
__le64 *table; __le64 *table;
baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
/* Don't allow device id that exceeds ITS hardware limit */
if (!baser)
return (ilog2(dev_id) < its->device_ids);
/* Don't allow device id that exceeds single, flat table limit */ /* Don't allow device id that exceeds single, flat table limit */
esz = GITS_BASER_ENTRY_SIZE(baser->val); esz = GITS_BASER_ENTRY_SIZE(baser->val);
if (!(baser->val & GITS_BASER_INDIRECT)) if (!(baser->val & GITS_BASER_INDIRECT))
return (dev_id < (PAGE_ORDER_TO_SIZE(baser->order) / esz)); return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
/* Compute 1st level table index & check if that exceeds table limit */ /* Compute 1st level table index & check if that exceeds table limit */
idx = dev_id >> ilog2(baser->psz / esz); idx = id >> ilog2(baser->psz / esz);
if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE)) if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
return false; return false;
...@@ -1307,11 +1944,52 @@ static bool its_alloc_device_table(struct its_node *its, u32 dev_id) ...@@ -1307,11 +1944,52 @@ static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
return true; return true;
} }
static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
{
struct its_baser *baser;
baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
/* Don't allow device id that exceeds ITS hardware limit */
if (!baser)
return (ilog2(dev_id) < its->device_ids);
return its_alloc_table_entry(baser, dev_id);
}
static bool its_alloc_vpe_table(u32 vpe_id)
{
struct its_node *its;
/*
* Make sure the L2 tables are allocated on *all* v4 ITSs. We
* could try and only do it on ITSs corresponding to devices
* that have interrupts targeted at this VPE, but the
* complexity becomes crazy (and you have tons of memory
* anyway, right?).
*/
list_for_each_entry(its, &its_nodes, entry) {
struct its_baser *baser;
if (!its->is_v4)
continue;
baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
if (!baser)
return false;
if (!its_alloc_table_entry(baser, vpe_id))
return false;
}
return true;
}
static struct its_device *its_create_device(struct its_node *its, u32 dev_id, static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
int nvecs) int nvecs, bool alloc_lpis)
{ {
struct its_device *dev; struct its_device *dev;
unsigned long *lpi_map; unsigned long *lpi_map = NULL;
unsigned long flags; unsigned long flags;
u16 *col_map = NULL; u16 *col_map = NULL;
void *itt; void *itt;
...@@ -1333,11 +2011,18 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, ...@@ -1333,11 +2011,18 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
sz = nr_ites * its->ite_size; sz = nr_ites * its->ite_size;
sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
itt = kzalloc(sz, GFP_KERNEL); itt = kzalloc(sz, GFP_KERNEL);
if (alloc_lpis) {
lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis); lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
if (lpi_map) if (lpi_map)
col_map = kzalloc(sizeof(*col_map) * nr_lpis, GFP_KERNEL); col_map = kzalloc(sizeof(*col_map) * nr_lpis,
GFP_KERNEL);
} else {
col_map = kzalloc(sizeof(*col_map) * nr_ites, GFP_KERNEL);
nr_lpis = 0;
lpi_base = 0;
}
if (!dev || !itt || !lpi_map || !col_map) { if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
kfree(dev); kfree(dev);
kfree(itt); kfree(itt);
kfree(lpi_map); kfree(lpi_map);
...@@ -1354,6 +2039,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, ...@@ -1354,6 +2039,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
dev->event_map.col_map = col_map; dev->event_map.col_map = col_map;
dev->event_map.lpi_base = lpi_base; dev->event_map.lpi_base = lpi_base;
dev->event_map.nr_lpis = nr_lpis; dev->event_map.nr_lpis = nr_lpis;
mutex_init(&dev->event_map.vlpi_lock);
dev->device_id = dev_id; dev->device_id = dev_id;
INIT_LIST_HEAD(&dev->entry); INIT_LIST_HEAD(&dev->entry);
...@@ -1412,6 +2098,16 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev, ...@@ -1412,6 +2098,16 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
msi_info = msi_get_domain_info(domain); msi_info = msi_get_domain_info(domain);
its = msi_info->data; its = msi_info->data;
if (!gic_rdists->has_direct_lpi &&
vpe_proxy.dev &&
vpe_proxy.dev->its == its &&
dev_id == vpe_proxy.dev->device_id) {
/* Bad luck. Get yourself a better implementation */
WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n",
dev_id);
return -EINVAL;
}
its_dev = its_find_device(its, dev_id); its_dev = its_find_device(its, dev_id);
if (its_dev) { if (its_dev) {
/* /*
...@@ -1423,7 +2119,7 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev, ...@@ -1423,7 +2119,7 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
goto out; goto out;
} }
its_dev = its_create_device(its, dev_id, nvec); its_dev = its_create_device(its, dev_id, nvec, true);
if (!its_dev) if (!its_dev)
return -ENOMEM; return -ENOMEM;
...@@ -1481,6 +2177,7 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, ...@@ -1481,6 +2177,7 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
irq_domain_set_hwirq_and_chip(domain, virq + i, irq_domain_set_hwirq_and_chip(domain, virq + i,
hwirq, &its_irq_chip, its_dev); hwirq, &its_irq_chip, its_dev);
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i)));
pr_debug("ID:%d pID:%d vID:%d\n", pr_debug("ID:%d pID:%d vID:%d\n",
(int)(hwirq - its_dev->event_map.lpi_base), (int)(hwirq - its_dev->event_map.lpi_base),
(int) hwirq, virq + i); (int) hwirq, virq + i);
...@@ -1495,13 +2192,16 @@ static void its_irq_domain_activate(struct irq_domain *domain, ...@@ -1495,13 +2192,16 @@ static void its_irq_domain_activate(struct irq_domain *domain,
struct its_device *its_dev = irq_data_get_irq_chip_data(d); struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d); u32 event = its_get_event_id(d);
const struct cpumask *cpu_mask = cpu_online_mask; const struct cpumask *cpu_mask = cpu_online_mask;
int cpu;
/* get the cpu_mask of local node */ /* get the cpu_mask of local node */
if (its_dev->its->numa_node >= 0) if (its_dev->its->numa_node >= 0)
cpu_mask = cpumask_of_node(its_dev->its->numa_node); cpu_mask = cpumask_of_node(its_dev->its->numa_node);
/* Bind the LPI to the first possible CPU */ /* Bind the LPI to the first possible CPU */
its_dev->event_map.col_map[event] = cpumask_first(cpu_mask); cpu = cpumask_first(cpu_mask);
its_dev->event_map.col_map[event] = cpu;
irq_data_update_effective_affinity(d, cpumask_of(cpu));
/* Map the GIC IRQ and event to the device */ /* Map the GIC IRQ and event to the device */
its_send_mapti(its_dev, d->hwirq, event); its_send_mapti(its_dev, d->hwirq, event);
...@@ -1539,7 +2239,10 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, ...@@ -1539,7 +2239,10 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
/* If all interrupts have been freed, start mopping the floor */ /* If all interrupts have been freed, start mopping the floor */
if (bitmap_empty(its_dev->event_map.lpi_map, if (bitmap_empty(its_dev->event_map.lpi_map,
its_dev->event_map.nr_lpis)) { its_dev->event_map.nr_lpis)) {
its_lpi_free(&its_dev->event_map); its_lpi_free_chunks(its_dev->event_map.lpi_map,
its_dev->event_map.lpi_base,
its_dev->event_map.nr_lpis);
kfree(its_dev->event_map.col_map);
/* Unmap device/itt */ /* Unmap device/itt */
its_send_mapd(its_dev, 0); its_send_mapd(its_dev, 0);
...@@ -1556,6 +2259,451 @@ static const struct irq_domain_ops its_domain_ops = { ...@@ -1556,6 +2259,451 @@ static const struct irq_domain_ops its_domain_ops = {
.deactivate = its_irq_domain_deactivate, .deactivate = its_irq_domain_deactivate,
}; };
/*
* This is insane.
*
* If a GICv4 doesn't implement Direct LPIs (which is extremely
* likely), the only way to perform an invalidate is to use a fake
* device to issue an INV command, implying that the LPI has first
* been mapped to some event on that device. Since this is not exactly
* cheap, we try to keep that mapping around as long as possible, and
* only issue an UNMAP if we're short on available slots.
*
* Broken by design(tm).
*/
static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
{
/* Already unmapped? */
if (vpe->vpe_proxy_event == -1)
return;
its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
/*
* We don't track empty slots at all, so let's move the
* next_victim pointer if we can quickly reuse that slot
* instead of nuking an existing entry. Not clear that this is
* always a win though, and this might just generate a ripple
* effect... Let's just hope VPEs don't migrate too often.
*/
if (vpe_proxy.vpes[vpe_proxy.next_victim])
vpe_proxy.next_victim = vpe->vpe_proxy_event;
vpe->vpe_proxy_event = -1;
}
static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
{
if (!gic_rdists->has_direct_lpi) {
unsigned long flags;
raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
its_vpe_db_proxy_unmap_locked(vpe);
raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
}
}
static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
{
/* Already mapped? */
if (vpe->vpe_proxy_event != -1)
return;
/* This slot was already allocated. Kick the other VPE out. */
if (vpe_proxy.vpes[vpe_proxy.next_victim])
its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]);
/* Map the new VPE instead */
vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
vpe->vpe_proxy_event = vpe_proxy.next_victim;
vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites;
vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
}
static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
{
unsigned long flags;
struct its_collection *target_col;
if (gic_rdists->has_direct_lpi) {
void __iomem *rdbase;
rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
cpu_relax();
return;
}
raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
its_vpe_db_proxy_map_locked(vpe);
target_col = &vpe_proxy.dev->its->collections[to];
its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
}
static int its_vpe_set_affinity(struct irq_data *d,
const struct cpumask *mask_val,
bool force)
{
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
int cpu = cpumask_first(mask_val);
/*
* Changing affinity is mega expensive, so let's be as lazy as
* we can and only do it if we really have to. Also, if mapped
* into the proxy device, we need to move the doorbell
* interrupt to its new location.
*/
if (vpe->col_idx != cpu) {
int from = vpe->col_idx;
vpe->col_idx = cpu;
its_send_vmovp(vpe);
its_vpe_db_proxy_move(vpe, from, cpu);
}
return IRQ_SET_MASK_OK_DONE;
}
static void its_vpe_schedule(struct its_vpe *vpe)
{
void * __iomem vlpi_base = gic_data_rdist_vlpi_base();
u64 val;
/* Schedule the VPE */
val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
GENMASK_ULL(51, 12);
val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
val |= GICR_VPROPBASER_RaWb;
val |= GICR_VPROPBASER_InnerShareable;
gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
val = virt_to_phys(page_address(vpe->vpt_page)) &
GENMASK_ULL(51, 16);
val |= GICR_VPENDBASER_RaWaWb;
val |= GICR_VPENDBASER_NonShareable;
/*
* There is no good way of finding out if the pending table is
* empty as we can race against the doorbell interrupt very
* easily. So in the end, vpe->pending_last is only an
* indication that the vcpu has something pending, not one
* that the pending table is empty. A good implementation
* would be able to read its coarse map pretty quickly anyway,
* making this a tolerable issue.
*/
val |= GICR_VPENDBASER_PendingLast;
val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
val |= GICR_VPENDBASER_Valid;
gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
}
static void its_vpe_deschedule(struct its_vpe *vpe)
{
void * __iomem vlpi_base = gic_data_rdist_vlpi_base();
u32 count = 1000000; /* 1s! */
bool clean;
u64 val;
/* We're being scheduled out */
val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
val &= ~GICR_VPENDBASER_Valid;
gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
do {
val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
clean = !(val & GICR_VPENDBASER_Dirty);
if (!clean) {
count--;
cpu_relax();
udelay(1);
}
} while (!clean && count);
if (unlikely(!clean && !count)) {
pr_err_ratelimited("ITS virtual pending table not cleaning\n");
vpe->idai = false;
vpe->pending_last = true;
} else {
vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
}
}
static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
{
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
struct its_cmd_info *info = vcpu_info;
switch (info->cmd_type) {
case SCHEDULE_VPE:
its_vpe_schedule(vpe);
return 0;
case DESCHEDULE_VPE:
its_vpe_deschedule(vpe);
return 0;
case INVALL_VPE:
its_send_vinvall(vpe);
return 0;
default:
return -EINVAL;
}
}
static void its_vpe_send_cmd(struct its_vpe *vpe,
void (*cmd)(struct its_device *, u32))
{
unsigned long flags;
raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
its_vpe_db_proxy_map_locked(vpe);
cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
}
static void its_vpe_send_inv(struct irq_data *d)
{
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
if (gic_rdists->has_direct_lpi) {
void __iomem *rdbase;
rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_INVLPIR);
while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
cpu_relax();
} else {
its_vpe_send_cmd(vpe, its_send_inv);
}
}
static void its_vpe_mask_irq(struct irq_data *d)
{
/*
* We need to unmask the LPI, which is described by the parent
* irq_data. Instead of calling into the parent (which won't
* exactly do the right thing, let's simply use the
* parent_data pointer. Yes, I'm naughty.
*/
lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
its_vpe_send_inv(d);
}
static void its_vpe_unmask_irq(struct irq_data *d)
{
/* Same hack as above... */
lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
its_vpe_send_inv(d);
}
static int its_vpe_set_irqchip_state(struct irq_data *d,
enum irqchip_irq_state which,
bool state)
{
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
if (which != IRQCHIP_STATE_PENDING)
return -EINVAL;
if (gic_rdists->has_direct_lpi) {
void __iomem *rdbase;
rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
if (state) {
gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
} else {
gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
cpu_relax();
}
} else {
if (state)
its_vpe_send_cmd(vpe, its_send_int);
else
its_vpe_send_cmd(vpe, its_send_clear);
}
return 0;
}
static struct irq_chip its_vpe_irq_chip = {
.name = "GICv4-vpe",
.irq_mask = its_vpe_mask_irq,
.irq_unmask = its_vpe_unmask_irq,
.irq_eoi = irq_chip_eoi_parent,
.irq_set_affinity = its_vpe_set_affinity,
.irq_set_irqchip_state = its_vpe_set_irqchip_state,
.irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity,
};
static int its_vpe_id_alloc(void)
{
return ida_simple_get(&its_vpeid_ida, 0, 1 << 16, GFP_KERNEL);
}
static void its_vpe_id_free(u16 id)
{
ida_simple_remove(&its_vpeid_ida, id);
}
static int its_vpe_init(struct its_vpe *vpe)
{
struct page *vpt_page;
int vpe_id;
/* Allocate vpe_id */
vpe_id = its_vpe_id_alloc();
if (vpe_id < 0)
return vpe_id;
/* Allocate VPT */
vpt_page = its_allocate_pending_table(GFP_KERNEL);
if (!vpt_page) {
its_vpe_id_free(vpe_id);
return -ENOMEM;
}
if (!its_alloc_vpe_table(vpe_id)) {
its_vpe_id_free(vpe_id);
its_free_pending_table(vpe->vpt_page);
return -ENOMEM;
}
vpe->vpe_id = vpe_id;
vpe->vpt_page = vpt_page;
vpe->vpe_proxy_event = -1;
return 0;
}
static void its_vpe_teardown(struct its_vpe *vpe)
{
its_vpe_db_proxy_unmap(vpe);
its_vpe_id_free(vpe->vpe_id);
its_free_pending_table(vpe->vpt_page);
}
static void its_vpe_irq_domain_free(struct irq_domain *domain,
unsigned int virq,
unsigned int nr_irqs)
{
struct its_vm *vm = domain->host_data;
int i;
irq_domain_free_irqs_parent(domain, virq, nr_irqs);
for (i = 0; i < nr_irqs; i++) {
struct irq_data *data = irq_domain_get_irq_data(domain,
virq + i);
struct its_vpe *vpe = irq_data_get_irq_chip_data(data);
BUG_ON(vm != vpe->its_vm);
clear_bit(data->hwirq, vm->db_bitmap);
its_vpe_teardown(vpe);
irq_domain_reset_irq_data(data);
}
if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
its_lpi_free_chunks(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
its_free_prop_table(vm->vprop_page);
}
}
static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *args)
{
struct its_vm *vm = args;
unsigned long *bitmap;
struct page *vprop_page;
int base, nr_ids, i, err = 0;
BUG_ON(!vm);
bitmap = its_lpi_alloc_chunks(nr_irqs, &base, &nr_ids);
if (!bitmap)
return -ENOMEM;
if (nr_ids < nr_irqs) {
its_lpi_free_chunks(bitmap, base, nr_ids);
return -ENOMEM;
}
vprop_page = its_allocate_prop_table(GFP_KERNEL);
if (!vprop_page) {
its_lpi_free_chunks(bitmap, base, nr_ids);
return -ENOMEM;
}
vm->db_bitmap = bitmap;
vm->db_lpi_base = base;
vm->nr_db_lpis = nr_ids;
vm->vprop_page = vprop_page;
for (i = 0; i < nr_irqs; i++) {
vm->vpes[i]->vpe_db_lpi = base + i;
err = its_vpe_init(vm->vpes[i]);
if (err)
break;
err = its_irq_gic_domain_alloc(domain, virq + i,
vm->vpes[i]->vpe_db_lpi);
if (err)
break;
irq_domain_set_hwirq_and_chip(domain, virq + i, i,
&its_vpe_irq_chip, vm->vpes[i]);
set_bit(i, bitmap);
}
if (err) {
if (i > 0)
its_vpe_irq_domain_free(domain, virq, i - 1);
its_lpi_free_chunks(bitmap, base, nr_ids);
its_free_prop_table(vprop_page);
}
return err;
}
static void its_vpe_irq_domain_activate(struct irq_domain *domain,
struct irq_data *d)
{
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
/* Map the VPE to the first possible CPU */
vpe->col_idx = cpumask_first(cpu_online_mask);
its_send_vmapp(vpe, true);
its_send_vinvall(vpe);
}
static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
struct irq_data *d)
{
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
its_send_vmapp(vpe, false);
}
static const struct irq_domain_ops its_vpe_domain_ops = {
.alloc = its_vpe_irq_domain_alloc,
.free = its_vpe_irq_domain_free,
.activate = its_vpe_irq_domain_activate,
.deactivate = its_vpe_irq_domain_deactivate,
};
static int its_force_quiescent(void __iomem *base) static int its_force_quiescent(void __iomem *base)
{ {
u32 count = 1000000; /* 1s */ u32 count = 1000000; /* 1s */
...@@ -1571,7 +2719,7 @@ static int its_force_quiescent(void __iomem *base) ...@@ -1571,7 +2719,7 @@ static int its_force_quiescent(void __iomem *base)
return 0; return 0;
/* Disable the generation of all interrupts to this ITS */ /* Disable the generation of all interrupts to this ITS */
val &= ~GITS_CTLR_ENABLE; val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe);
writel_relaxed(val, base + GITS_CTLR); writel_relaxed(val, base + GITS_CTLR);
/* Poll GITS_CTLR and wait until ITS becomes quiescent */ /* Poll GITS_CTLR and wait until ITS becomes quiescent */
...@@ -1672,13 +2820,92 @@ static int its_init_domain(struct fwnode_handle *handle, struct its_node *its) ...@@ -1672,13 +2820,92 @@ static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
return 0; return 0;
} }
static int its_init_vpe_domain(void)
{
struct its_node *its;
u32 devid;
int entries;
if (gic_rdists->has_direct_lpi) {
pr_info("ITS: Using DirectLPI for VPE invalidation\n");
return 0;
}
/* Any ITS will do, even if not v4 */
its = list_first_entry(&its_nodes, struct its_node, entry);
entries = roundup_pow_of_two(nr_cpu_ids);
vpe_proxy.vpes = kzalloc(sizeof(*vpe_proxy.vpes) * entries,
GFP_KERNEL);
if (!vpe_proxy.vpes) {
pr_err("ITS: Can't allocate GICv4 proxy device array\n");
return -ENOMEM;
}
/* Use the last possible DevID */
devid = GENMASK(its->device_ids - 1, 0);
vpe_proxy.dev = its_create_device(its, devid, entries, false);
if (!vpe_proxy.dev) {
kfree(vpe_proxy.vpes);
pr_err("ITS: Can't allocate GICv4 proxy device\n");
return -ENOMEM;
}
BUG_ON(entries != vpe_proxy.dev->nr_ites);
raw_spin_lock_init(&vpe_proxy.lock);
vpe_proxy.next_victim = 0;
pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n",
devid, vpe_proxy.dev->nr_ites);
return 0;
}
static int __init its_compute_its_list_map(struct resource *res,
void __iomem *its_base)
{
int its_number;
u32 ctlr;
/*
* This is assumed to be done early enough that we're
* guaranteed to be single-threaded, hence no
* locking. Should this change, we should address
* this.
*/
its_number = find_first_zero_bit(&its_list_map, ITS_LIST_MAX);
if (its_number >= ITS_LIST_MAX) {
pr_err("ITS@%pa: No ITSList entry available!\n",
&res->start);
return -EINVAL;
}
ctlr = readl_relaxed(its_base + GITS_CTLR);
ctlr &= ~GITS_CTLR_ITS_NUMBER;
ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT;
writel_relaxed(ctlr, its_base + GITS_CTLR);
ctlr = readl_relaxed(its_base + GITS_CTLR);
if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) {
its_number = ctlr & GITS_CTLR_ITS_NUMBER;
its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT;
}
if (test_and_set_bit(its_number, &its_list_map)) {
pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
&res->start, its_number);
return -EINVAL;
}
return its_number;
}
static int __init its_probe_one(struct resource *res, static int __init its_probe_one(struct resource *res,
struct fwnode_handle *handle, int numa_node) struct fwnode_handle *handle, int numa_node)
{ {
struct its_node *its; struct its_node *its;
void __iomem *its_base; void __iomem *its_base;
u32 val; u32 val, ctlr;
u64 baser, tmp; u64 baser, tmp, typer;
int err; int err;
its_base = ioremap(res->start, resource_size(res)); its_base = ioremap(res->start, resource_size(res));
...@@ -1711,9 +2938,24 @@ static int __init its_probe_one(struct resource *res, ...@@ -1711,9 +2938,24 @@ static int __init its_probe_one(struct resource *res,
raw_spin_lock_init(&its->lock); raw_spin_lock_init(&its->lock);
INIT_LIST_HEAD(&its->entry); INIT_LIST_HEAD(&its->entry);
INIT_LIST_HEAD(&its->its_device_list); INIT_LIST_HEAD(&its->its_device_list);
typer = gic_read_typer(its_base + GITS_TYPER);
its->base = its_base; its->base = its_base;
its->phys_base = res->start; its->phys_base = res->start;
its->ite_size = ((gic_read_typer(its_base + GITS_TYPER) >> 4) & 0xf) + 1; its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer);
its->is_v4 = !!(typer & GITS_TYPER_VLPIS);
if (its->is_v4) {
if (!(typer & GITS_TYPER_VMOVP)) {
err = its_compute_its_list_map(res, its_base);
if (err < 0)
goto out_free_its;
pr_info("ITS@%pa: Using ITS number %d\n",
&res->start, err);
} else {
pr_info("ITS@%pa: Single VMOVP capable\n", &res->start);
}
}
its->numa_node = numa_node; its->numa_node = numa_node;
its->cmd_base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, its->cmd_base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
...@@ -1760,7 +3002,11 @@ static int __init its_probe_one(struct resource *res, ...@@ -1760,7 +3002,11 @@ static int __init its_probe_one(struct resource *res,
} }
gits_write_cwriter(0, its->base + GITS_CWRITER); gits_write_cwriter(0, its->base + GITS_CWRITER);
writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR); ctlr = readl_relaxed(its->base + GITS_CTLR);
ctlr |= GITS_CTLR_ENABLE;
if (its->is_v4)
ctlr |= GITS_CTLR_ImDe;
writel_relaxed(ctlr, its->base + GITS_CTLR);
err = its_init_domain(handle, its); err = its_init_domain(handle, its);
if (err) if (err)
...@@ -1816,13 +3062,13 @@ static int __init its_of_probe(struct device_node *node) ...@@ -1816,13 +3062,13 @@ static int __init its_of_probe(struct device_node *node)
for (np = of_find_matching_node(node, its_device_id); np; for (np = of_find_matching_node(node, its_device_id); np;
np = of_find_matching_node(np, its_device_id)) { np = of_find_matching_node(np, its_device_id)) {
if (!of_property_read_bool(np, "msi-controller")) { if (!of_property_read_bool(np, "msi-controller")) {
pr_warn("%s: no msi-controller property, ITS ignored\n", pr_warn("%pOF: no msi-controller property, ITS ignored\n",
np->full_name); np);
continue; continue;
} }
if (of_address_to_resource(np, 0, &res)) { if (of_address_to_resource(np, 0, &res)) {
pr_warn("%s: no regs?\n", np->full_name); pr_warn("%pOF: no regs?\n", np);
continue; continue;
} }
...@@ -1984,6 +3230,9 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, ...@@ -1984,6 +3230,9 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
struct irq_domain *parent_domain) struct irq_domain *parent_domain)
{ {
struct device_node *of_node; struct device_node *of_node;
struct its_node *its;
bool has_v4 = false;
int err;
its_parent = parent_domain; its_parent = parent_domain;
of_node = to_of_node(handle); of_node = to_of_node(handle);
...@@ -1998,5 +3247,20 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, ...@@ -1998,5 +3247,20 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
} }
gic_rdists = rdists; gic_rdists = rdists;
return its_alloc_lpi_tables(); err = its_alloc_lpi_tables();
if (err)
return err;
list_for_each_entry(its, &its_nodes, entry)
has_v4 |= its->is_v4;
if (has_v4 & rdists->has_vlpis) {
if (its_init_vpe_domain() ||
its_init_v4(parent_domain, &its_vpe_domain_ops)) {
rdists->has_vlpis = false;
pr_err("ITS: Disabling GICv4 support\n");
}
}
return 0;
} }
/* /*
* Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved. * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
* Author: Marc Zyngier <marc.zyngier@arm.com> * Author: Marc Zyngier <marc.zyngier@arm.com>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
...@@ -423,24 +423,14 @@ static void __init gic_dist_init(void) ...@@ -423,24 +423,14 @@ static void __init gic_dist_init(void)
gic_write_irouter(affinity, base + GICD_IROUTER + i * 8); gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
} }
static int gic_populate_rdist(void) static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *))
{ {
unsigned long mpidr = cpu_logical_map(smp_processor_id()); int ret = -ENODEV;
u64 typer;
u32 aff;
int i; int i;
/*
* Convert affinity to a 32bit value that can be matched to
* GICR_TYPER bits [63:32].
*/
aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
MPIDR_AFFINITY_LEVEL(mpidr, 0));
for (i = 0; i < gic_data.nr_redist_regions; i++) { for (i = 0; i < gic_data.nr_redist_regions; i++) {
void __iomem *ptr = gic_data.redist_regions[i].redist_base; void __iomem *ptr = gic_data.redist_regions[i].redist_base;
u64 typer;
u32 reg; u32 reg;
reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK; reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
...@@ -452,15 +442,9 @@ static int gic_populate_rdist(void) ...@@ -452,15 +442,9 @@ static int gic_populate_rdist(void)
do { do {
typer = gic_read_typer(ptr + GICR_TYPER); typer = gic_read_typer(ptr + GICR_TYPER);
if ((typer >> 32) == aff) { ret = fn(gic_data.redist_regions + i, ptr);
u64 offset = ptr - gic_data.redist_regions[i].redist_base; if (!ret)
gic_data_rdist_rd_base() = ptr;
gic_data_rdist()->phys_base = gic_data.redist_regions[i].phys_base + offset;
pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
smp_processor_id(), mpidr, i,
&gic_data_rdist()->phys_base);
return 0; return 0;
}
if (gic_data.redist_regions[i].single_redist) if (gic_data.redist_regions[i].single_redist)
break; break;
...@@ -475,12 +459,71 @@ static int gic_populate_rdist(void) ...@@ -475,12 +459,71 @@ static int gic_populate_rdist(void)
} while (!(typer & GICR_TYPER_LAST)); } while (!(typer & GICR_TYPER_LAST));
} }
return ret ? -ENODEV : 0;
}
static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
{
unsigned long mpidr = cpu_logical_map(smp_processor_id());
u64 typer;
u32 aff;
/*
* Convert affinity to a 32bit value that can be matched to
* GICR_TYPER bits [63:32].
*/
aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
MPIDR_AFFINITY_LEVEL(mpidr, 0));
typer = gic_read_typer(ptr + GICR_TYPER);
if ((typer >> 32) == aff) {
u64 offset = ptr - region->redist_base;
gic_data_rdist_rd_base() = ptr;
gic_data_rdist()->phys_base = region->phys_base + offset;
pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
smp_processor_id(), mpidr,
(int)(region - gic_data.redist_regions),
&gic_data_rdist()->phys_base);
return 0;
}
/* Try next one */
return 1;
}
static int gic_populate_rdist(void)
{
if (gic_iterate_rdists(__gic_populate_rdist) == 0)
return 0;
/* We couldn't even deal with ourselves... */ /* We couldn't even deal with ourselves... */
WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n", WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n",
smp_processor_id(), mpidr); smp_processor_id(),
(unsigned long)cpu_logical_map(smp_processor_id()));
return -ENODEV; return -ENODEV;
} }
static int __gic_update_vlpi_properties(struct redist_region *region,
void __iomem *ptr)
{
u64 typer = gic_read_typer(ptr + GICR_TYPER);
gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS);
gic_data.rdists.has_direct_lpi &= !!(typer & GICR_TYPER_DirectLPIS);
return 1;
}
static void gic_update_vlpi_properties(void)
{
gic_iterate_rdists(__gic_update_vlpi_properties);
pr_info("%sVLPI support, %sdirect LPI support\n",
!gic_data.rdists.has_vlpis ? "no " : "",
!gic_data.rdists.has_direct_lpi ? "no " : "");
}
static void gic_cpu_sys_reg_init(void) static void gic_cpu_sys_reg_init(void)
{ {
/* /*
...@@ -677,6 +720,8 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, ...@@ -677,6 +720,8 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
else else
gic_dist_wait_for_rwp(); gic_dist_wait_for_rwp();
irq_data_update_effective_affinity(d, cpumask_of(cpu));
return IRQ_SET_MASK_OK_DONE; return IRQ_SET_MASK_OK_DONE;
} }
#else #else
...@@ -775,6 +820,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, ...@@ -775,6 +820,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_domain_set_info(d, irq, hw, chip, d->host_data, irq_domain_set_info(d, irq, hw, chip, d->host_data,
handle_fasteoi_irq, NULL, NULL); handle_fasteoi_irq, NULL, NULL);
irq_set_probe(irq); irq_set_probe(irq);
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
} }
/* LPIs */ /* LPIs */
if (hw >= 8192 && hw < GIC_ID_NR) { if (hw >= 8192 && hw < GIC_ID_NR) {
...@@ -953,6 +999,8 @@ static int __init gic_init_bases(void __iomem *dist_base, ...@@ -953,6 +999,8 @@ static int __init gic_init_bases(void __iomem *dist_base,
gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops, gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
&gic_data); &gic_data);
gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist)); gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
gic_data.rdists.has_vlpis = true;
gic_data.rdists.has_direct_lpi = true;
if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) { if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
err = -ENOMEM; err = -ENOMEM;
...@@ -961,6 +1009,8 @@ static int __init gic_init_bases(void __iomem *dist_base, ...@@ -961,6 +1009,8 @@ static int __init gic_init_bases(void __iomem *dist_base,
set_handle_irq(gic_handle_irq); set_handle_irq(gic_handle_irq);
gic_update_vlpi_properties();
if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis()) if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
its_init(handle, &gic_data.rdists, gic_data.domain); its_init(handle, &gic_data.rdists, gic_data.domain);
...@@ -1067,7 +1117,7 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node) ...@@ -1067,7 +1117,7 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
if (WARN_ON(cpu == -1)) if (WARN_ON(cpu == -1))
continue; continue;
pr_cont("%s[%d] ", cpu_node->full_name, cpu); pr_cont("%pOF[%d] ", cpu_node, cpu);
cpumask_set_cpu(cpu, &part->mask); cpumask_set_cpu(cpu, &part->mask);
} }
...@@ -1122,6 +1172,7 @@ static void __init gic_of_setup_kvm_info(struct device_node *node) ...@@ -1122,6 +1172,7 @@ static void __init gic_of_setup_kvm_info(struct device_node *node)
if (!ret) if (!ret)
gic_v3_kvm_info.vcpu = r; gic_v3_kvm_info.vcpu = r;
gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
gic_set_kvm_info(&gic_v3_kvm_info); gic_set_kvm_info(&gic_v3_kvm_info);
} }
...@@ -1135,15 +1186,13 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare ...@@ -1135,15 +1186,13 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
dist_base = of_iomap(node, 0); dist_base = of_iomap(node, 0);
if (!dist_base) { if (!dist_base) {
pr_err("%s: unable to map gic dist registers\n", pr_err("%pOF: unable to map gic dist registers\n", node);
node->full_name);
return -ENXIO; return -ENXIO;
} }
err = gic_validate_dist_version(dist_base); err = gic_validate_dist_version(dist_base);
if (err) { if (err) {
pr_err("%s: no distributor detected, giving up\n", pr_err("%pOF: no distributor detected, giving up\n", node);
node->full_name);
goto out_unmap_dist; goto out_unmap_dist;
} }
...@@ -1163,8 +1212,7 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare ...@@ -1163,8 +1212,7 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
ret = of_address_to_resource(node, 1 + i, &res); ret = of_address_to_resource(node, 1 + i, &res);
rdist_regs[i].redist_base = of_iomap(node, 1 + i); rdist_regs[i].redist_base = of_iomap(node, 1 + i);
if (ret || !rdist_regs[i].redist_base) { if (ret || !rdist_regs[i].redist_base) {
pr_err("%s: couldn't map region %d\n", pr_err("%pOF: couldn't map region %d\n", node, i);
node->full_name, i);
err = -ENODEV; err = -ENODEV;
goto out_unmap_rdist; goto out_unmap_rdist;
} }
...@@ -1418,6 +1466,7 @@ static void __init gic_acpi_setup_kvm_info(void) ...@@ -1418,6 +1466,7 @@ static void __init gic_acpi_setup_kvm_info(void)
vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1; vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
} }
gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
gic_set_kvm_info(&gic_v3_kvm_info); gic_set_kvm_info(&gic_v3_kvm_info);
} }
......
/*
* Copyright (C) 2016,2017 ARM Limited, All Rights Reserved.
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/msi.h>
#include <linux/sched.h>
#include <linux/irqchip/arm-gic-v4.h>
/*
* WARNING: The blurb below assumes that you understand the
* intricacies of GICv3, GICv4, and how a guest's view of a GICv3 gets
* translated into GICv4 commands. So it effectively targets at most
* two individuals. You know who you are.
*
* The core GICv4 code is designed to *avoid* exposing too much of the
* core GIC code (that would in turn leak into the hypervisor code),
* and instead provide a hypervisor agnostic interface to the HW (of
* course, the astute reader will quickly realize that hypervisor
* agnostic actually means KVM-specific - what were you thinking?).
*
* In order to achieve a modicum of isolation, we try to hide most of
* the GICv4 "stuff" behind normal irqchip operations:
*
* - Any guest-visible VLPI is backed by a Linux interrupt (and a
* physical LPI which gets unmapped when the guest maps the
* VLPI). This allows the same DevID/EventID pair to be either
* mapped to the LPI (host) or the VLPI (guest). Note that this is
* exclusive, and you cannot have both.
*
* - Enabling/disabling a VLPI is done by issuing mask/unmask calls.
*
* - Guest INT/CLEAR commands are implemented through
* irq_set_irqchip_state().
*
* - The *bizarre* stuff (mapping/unmapping an interrupt to a VLPI, or
* issuing an INV after changing a priority) gets shoved into the
* irq_set_vcpu_affinity() method. While this is quite horrible
* (let's face it, this is the irqchip version of an ioctl), it
* confines the crap to a single location. And map/unmap really is
* about setting the affinity of a VLPI to a vcpu, so only INV is
* majorly out of place. So there.
*
* A number of commands are simply not provided by this interface, as
* they do not make direct sense. For example, MAPD is purely local to
* the virtual ITS (because it references a virtual device, and the
* physical ITS is still very much in charge of the physical
* device). Same goes for things like MAPC (the physical ITS deals
* with the actual vPE affinity, and not the braindead concept of
* collection). SYNC is not provided either, as each and every command
* is followed by a VSYNC. This could be relaxed in the future, should
* this be seen as a bottleneck (yes, this means *never*).
*
* But handling VLPIs is only one side of the job of the GICv4
* code. The other (darker) side is to take care of the doorbell
* interrupts which are delivered when a VLPI targeting a non-running
* vcpu is being made pending.
*
* The choice made here is that each vcpu (VPE in old northern GICv4
* dialect) gets a single doorbell LPI, no matter how many interrupts
* are targeting it. This has a nice property, which is that the
* interrupt becomes a handle for the VPE, and that the hypervisor
* code can manipulate it through the normal interrupt API:
*
* - VMs (or rather the VM abstraction that matters to the GIC)
* contain an irq domain where each interrupt maps to a VPE. In
* turn, this domain sits on top of the normal LPI allocator, and a
* specially crafted irq_chip implementation.
*
* - mask/unmask do what is expected on the doorbell interrupt.
*
* - irq_set_affinity is used to move a VPE from one redistributor to
* another.
*
* - irq_set_vcpu_affinity once again gets hijacked for the purpose of
* creating a new sub-API, namely scheduling/descheduling a VPE
* (which involves programming GICR_V{PROP,PEND}BASER) and
* performing INVALL operations.
*/
static struct irq_domain *gic_domain;
static const struct irq_domain_ops *vpe_domain_ops;
int its_alloc_vcpu_irqs(struct its_vm *vm)
{
int vpe_base_irq, i;
vm->fwnode = irq_domain_alloc_named_id_fwnode("GICv4-vpe",
task_pid_nr(current));
if (!vm->fwnode)
goto err;
vm->domain = irq_domain_create_hierarchy(gic_domain, 0, vm->nr_vpes,
vm->fwnode, vpe_domain_ops,
vm);
if (!vm->domain)
goto err;
for (i = 0; i < vm->nr_vpes; i++) {
vm->vpes[i]->its_vm = vm;
vm->vpes[i]->idai = true;
}
vpe_base_irq = __irq_domain_alloc_irqs(vm->domain, -1, vm->nr_vpes,
NUMA_NO_NODE, vm,
false, NULL);
if (vpe_base_irq <= 0)
goto err;
for (i = 0; i < vm->nr_vpes; i++)
vm->vpes[i]->irq = vpe_base_irq + i;
return 0;
err:
if (vm->domain)
irq_domain_remove(vm->domain);
if (vm->fwnode)
irq_domain_free_fwnode(vm->fwnode);
return -ENOMEM;
}
void its_free_vcpu_irqs(struct its_vm *vm)
{
irq_domain_free_irqs(vm->vpes[0]->irq, vm->nr_vpes);
irq_domain_remove(vm->domain);
irq_domain_free_fwnode(vm->fwnode);
}
static int its_send_vpe_cmd(struct its_vpe *vpe, struct its_cmd_info *info)
{
return irq_set_vcpu_affinity(vpe->irq, info);
}
int its_schedule_vpe(struct its_vpe *vpe, bool on)
{
struct its_cmd_info info;
WARN_ON(preemptible());
info.cmd_type = on ? SCHEDULE_VPE : DESCHEDULE_VPE;
return its_send_vpe_cmd(vpe, &info);
}
int its_invall_vpe(struct its_vpe *vpe)
{
struct its_cmd_info info = {
.cmd_type = INVALL_VPE,
};
return its_send_vpe_cmd(vpe, &info);
}
int its_map_vlpi(int irq, struct its_vlpi_map *map)
{
struct its_cmd_info info = {
.cmd_type = MAP_VLPI,
.map = map,
};
/*
* The host will never see that interrupt firing again, so it
* is vital that we don't do any lazy masking.
*/
irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
return irq_set_vcpu_affinity(irq, &info);
}
int its_get_vlpi(int irq, struct its_vlpi_map *map)
{
struct its_cmd_info info = {
.cmd_type = GET_VLPI,
.map = map,
};
return irq_set_vcpu_affinity(irq, &info);
}
int its_unmap_vlpi(int irq)
{
irq_clear_status_flags(irq, IRQ_DISABLE_UNLAZY);
return irq_set_vcpu_affinity(irq, NULL);
}
int its_prop_update_vlpi(int irq, u8 config, bool inv)
{
struct its_cmd_info info = {
.cmd_type = inv ? PROP_UPDATE_AND_INV_VLPI : PROP_UPDATE_VLPI,
.config = config,
};
return irq_set_vcpu_affinity(irq, &info);
}
int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops)
{
if (domain) {
pr_info("ITS: Enabling GICv4 support\n");
gic_domain = domain;
vpe_domain_ops = ops;
return 0;
}
pr_err("ITS: No GICv4 VPE domain allocated\n");
return -ENODEV;
}
...@@ -344,6 +344,8 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, ...@@ -344,6 +344,8 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
writel_relaxed(val | bit, reg); writel_relaxed(val | bit, reg);
gic_unlock_irqrestore(flags); gic_unlock_irqrestore(flags);
irq_data_update_effective_affinity(d, cpumask_of(cpu));
return IRQ_SET_MASK_OK_DONE; return IRQ_SET_MASK_OK_DONE;
} }
#endif #endif
...@@ -413,7 +415,7 @@ static void gic_handle_cascade_irq(struct irq_desc *desc) ...@@ -413,7 +415,7 @@ static void gic_handle_cascade_irq(struct irq_desc *desc)
chained_irq_exit(chip, desc); chained_irq_exit(chip, desc);
} }
static struct irq_chip gic_chip = { static const struct irq_chip gic_chip = {
.irq_mask = gic_mask_irq, .irq_mask = gic_mask_irq,
.irq_unmask = gic_unmask_irq, .irq_unmask = gic_unmask_irq,
.irq_eoi = gic_eoi_irq, .irq_eoi = gic_eoi_irq,
...@@ -969,6 +971,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, ...@@ -969,6 +971,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data, irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data,
handle_fasteoi_irq, NULL, NULL); handle_fasteoi_irq, NULL, NULL);
irq_set_probe(irq); irq_set_probe(irq);
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
} }
return 0; return 0;
} }
......
...@@ -165,6 +165,8 @@ static int hip04_irq_set_affinity(struct irq_data *d, ...@@ -165,6 +165,8 @@ static int hip04_irq_set_affinity(struct irq_data *d,
writel_relaxed(val | bit, reg); writel_relaxed(val | bit, reg);
raw_spin_unlock(&irq_controller_lock); raw_spin_unlock(&irq_controller_lock);
irq_data_update_effective_affinity(d, cpumask_of(cpu));
return IRQ_SET_MASK_OK; return IRQ_SET_MASK_OK;
} }
#endif #endif
...@@ -312,6 +314,7 @@ static int hip04_irq_domain_map(struct irq_domain *d, unsigned int irq, ...@@ -312,6 +314,7 @@ static int hip04_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_set_chip_and_handler(irq, &hip04_irq_chip, irq_set_chip_and_handler(irq, &hip04_irq_chip,
handle_fasteoi_irq); handle_fasteoi_irq);
irq_set_probe(irq); irq_set_probe(irq);
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
} }
irq_set_chip_data(irq, d->host_data); irq_set_chip_data(irq, d->host_data);
return 0; return 0;
......
...@@ -214,13 +214,13 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node, ...@@ -214,13 +214,13 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node,
int i; int i;
if (!parent) { if (!parent) {
pr_err("%s: no parent, giving up\n", node->full_name); pr_err("%pOF: no parent, giving up\n", node);
return -ENODEV; return -ENODEV;
} }
parent_domain = irq_find_host(parent); parent_domain = irq_find_host(parent);
if (!parent_domain) { if (!parent_domain) {
pr_err("%s: unable to get parent domain\n", node->full_name); pr_err("%pOF: unable to get parent domain\n", node);
return -ENXIO; return -ENXIO;
} }
......
...@@ -191,7 +191,7 @@ static int __init lpc32xx_of_ic_init(struct device_node *node, ...@@ -191,7 +191,7 @@ static int __init lpc32xx_of_ic_init(struct device_node *node,
irqc->base = of_iomap(node, 0); irqc->base = of_iomap(node, 0);
if (!irqc->base) { if (!irqc->base) {
pr_err("%s: unable to map registers\n", node->full_name); pr_err("%pOF: unable to map registers\n", node);
kfree(irqc); kfree(irqc);
return -EINVAL; return -EINVAL;
} }
......
...@@ -17,13 +17,32 @@ ...@@ -17,13 +17,32 @@
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/irqchip/chained_irq.h> #include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h> #include <linux/irqdomain.h>
#include <linux/of_irq.h>
#include <linux/of_pci.h> #include <linux/of_pci.h>
#include <linux/of_platform.h> #include <linux/of_platform.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#define MSI_MAX_IRQS 32 #define MSI_IRQS_PER_MSIR 32
#define MSI_IBS_SHIFT 3 #define MSI_MSIR_OFFSET 4
#define MSIR 4
#define MSI_LS1043V1_1_IRQS_PER_MSIR 8
#define MSI_LS1043V1_1_MSIR_OFFSET 0x10
struct ls_scfg_msi_cfg {
u32 ibs_shift; /* Shift of interrupt bit select */
u32 msir_irqs; /* The irq number per MSIR */
u32 msir_base; /* The base address of MSIR */
};
struct ls_scfg_msir {
struct ls_scfg_msi *msi_data;
unsigned int index;
unsigned int gic_irq;
unsigned int bit_start;
unsigned int bit_end;
unsigned int srs; /* Shared interrupt register select */
void __iomem *reg;
};
struct ls_scfg_msi { struct ls_scfg_msi {
spinlock_t lock; spinlock_t lock;
...@@ -32,8 +51,11 @@ struct ls_scfg_msi { ...@@ -32,8 +51,11 @@ struct ls_scfg_msi {
struct irq_domain *msi_domain; struct irq_domain *msi_domain;
void __iomem *regs; void __iomem *regs;
phys_addr_t msiir_addr; phys_addr_t msiir_addr;
int irq; struct ls_scfg_msi_cfg *cfg;
DECLARE_BITMAP(used, MSI_MAX_IRQS); u32 msir_num;
struct ls_scfg_msir *msir;
u32 irqs_num;
unsigned long *used;
}; };
static struct irq_chip ls_scfg_msi_irq_chip = { static struct irq_chip ls_scfg_msi_irq_chip = {
...@@ -49,19 +71,56 @@ static struct msi_domain_info ls_scfg_msi_domain_info = { ...@@ -49,19 +71,56 @@ static struct msi_domain_info ls_scfg_msi_domain_info = {
.chip = &ls_scfg_msi_irq_chip, .chip = &ls_scfg_msi_irq_chip,
}; };
static int msi_affinity_flag = 1;
static int __init early_parse_ls_scfg_msi(char *p)
{
if (p && strncmp(p, "no-affinity", 11) == 0)
msi_affinity_flag = 0;
else
msi_affinity_flag = 1;
return 0;
}
early_param("lsmsi", early_parse_ls_scfg_msi);
static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
{ {
struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(data); struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(data);
msg->address_hi = upper_32_bits(msi_data->msiir_addr); msg->address_hi = upper_32_bits(msi_data->msiir_addr);
msg->address_lo = lower_32_bits(msi_data->msiir_addr); msg->address_lo = lower_32_bits(msi_data->msiir_addr);
msg->data = data->hwirq << MSI_IBS_SHIFT; msg->data = data->hwirq;
if (msi_affinity_flag)
msg->data |= cpumask_first(data->common->affinity);
} }
static int ls_scfg_msi_set_affinity(struct irq_data *irq_data, static int ls_scfg_msi_set_affinity(struct irq_data *irq_data,
const struct cpumask *mask, bool force) const struct cpumask *mask, bool force)
{ {
struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(irq_data);
u32 cpu;
if (!msi_affinity_flag)
return -EINVAL;
if (!force)
cpu = cpumask_any_and(mask, cpu_online_mask);
else
cpu = cpumask_first(mask);
if (cpu >= msi_data->msir_num)
return -EINVAL;
if (msi_data->msir[cpu].gic_irq <= 0) {
pr_warn("cannot bind the irq to cpu%d\n", cpu);
return -EINVAL; return -EINVAL;
}
cpumask_copy(irq_data->common->affinity, mask);
return IRQ_SET_MASK_OK;
} }
static struct irq_chip ls_scfg_msi_parent_chip = { static struct irq_chip ls_scfg_msi_parent_chip = {
...@@ -81,8 +140,8 @@ static int ls_scfg_msi_domain_irq_alloc(struct irq_domain *domain, ...@@ -81,8 +140,8 @@ static int ls_scfg_msi_domain_irq_alloc(struct irq_domain *domain,
WARN_ON(nr_irqs != 1); WARN_ON(nr_irqs != 1);
spin_lock(&msi_data->lock); spin_lock(&msi_data->lock);
pos = find_first_zero_bit(msi_data->used, MSI_MAX_IRQS); pos = find_first_zero_bit(msi_data->used, msi_data->irqs_num);
if (pos < MSI_MAX_IRQS) if (pos < msi_data->irqs_num)
__set_bit(pos, msi_data->used); __set_bit(pos, msi_data->used);
else else
err = -ENOSPC; err = -ENOSPC;
...@@ -106,7 +165,7 @@ static void ls_scfg_msi_domain_irq_free(struct irq_domain *domain, ...@@ -106,7 +165,7 @@ static void ls_scfg_msi_domain_irq_free(struct irq_domain *domain,
int pos; int pos;
pos = d->hwirq; pos = d->hwirq;
if (pos < 0 || pos >= MSI_MAX_IRQS) { if (pos < 0 || pos >= msi_data->irqs_num) {
pr_err("failed to teardown msi. Invalid hwirq %d\n", pos); pr_err("failed to teardown msi. Invalid hwirq %d\n", pos);
return; return;
} }
...@@ -123,15 +182,22 @@ static const struct irq_domain_ops ls_scfg_msi_domain_ops = { ...@@ -123,15 +182,22 @@ static const struct irq_domain_ops ls_scfg_msi_domain_ops = {
static void ls_scfg_msi_irq_handler(struct irq_desc *desc) static void ls_scfg_msi_irq_handler(struct irq_desc *desc)
{ {
struct ls_scfg_msi *msi_data = irq_desc_get_handler_data(desc); struct ls_scfg_msir *msir = irq_desc_get_handler_data(desc);
struct ls_scfg_msi *msi_data = msir->msi_data;
unsigned long val; unsigned long val;
int pos, virq; int pos, size, virq, hwirq;
chained_irq_enter(irq_desc_get_chip(desc), desc); chained_irq_enter(irq_desc_get_chip(desc), desc);
val = ioread32be(msi_data->regs + MSIR); val = ioread32be(msir->reg);
for_each_set_bit(pos, &val, MSI_MAX_IRQS) {
virq = irq_find_mapping(msi_data->parent, (31 - pos)); pos = msir->bit_start;
size = msir->bit_end + 1;
for_each_set_bit_from(pos, &val, size) {
hwirq = ((msir->bit_end - pos) << msi_data->cfg->ibs_shift) |
msir->srs;
virq = irq_find_mapping(msi_data->parent, hwirq);
if (virq) if (virq)
generic_handle_irq(virq); generic_handle_irq(virq);
} }
...@@ -143,7 +209,7 @@ static int ls_scfg_msi_domains_init(struct ls_scfg_msi *msi_data) ...@@ -143,7 +209,7 @@ static int ls_scfg_msi_domains_init(struct ls_scfg_msi *msi_data)
{ {
/* Initialize MSI domain parent */ /* Initialize MSI domain parent */
msi_data->parent = irq_domain_add_linear(NULL, msi_data->parent = irq_domain_add_linear(NULL,
MSI_MAX_IRQS, msi_data->irqs_num,
&ls_scfg_msi_domain_ops, &ls_scfg_msi_domain_ops,
msi_data); msi_data);
if (!msi_data->parent) { if (!msi_data->parent) {
...@@ -164,16 +230,117 @@ static int ls_scfg_msi_domains_init(struct ls_scfg_msi *msi_data) ...@@ -164,16 +230,117 @@ static int ls_scfg_msi_domains_init(struct ls_scfg_msi *msi_data)
return 0; return 0;
} }
static int ls_scfg_msi_setup_hwirq(struct ls_scfg_msi *msi_data, int index)
{
struct ls_scfg_msir *msir;
int virq, i, hwirq;
virq = platform_get_irq(msi_data->pdev, index);
if (virq <= 0)
return -ENODEV;
msir = &msi_data->msir[index];
msir->index = index;
msir->msi_data = msi_data;
msir->gic_irq = virq;
msir->reg = msi_data->regs + msi_data->cfg->msir_base + 4 * index;
if (msi_data->cfg->msir_irqs == MSI_LS1043V1_1_IRQS_PER_MSIR) {
msir->bit_start = 32 - ((msir->index + 1) *
MSI_LS1043V1_1_IRQS_PER_MSIR);
msir->bit_end = msir->bit_start +
MSI_LS1043V1_1_IRQS_PER_MSIR - 1;
} else {
msir->bit_start = 0;
msir->bit_end = msi_data->cfg->msir_irqs - 1;
}
irq_set_chained_handler_and_data(msir->gic_irq,
ls_scfg_msi_irq_handler,
msir);
if (msi_affinity_flag) {
/* Associate MSIR interrupt to the cpu */
irq_set_affinity(msir->gic_irq, get_cpu_mask(index));
msir->srs = 0; /* This value is determined by the CPU */
} else
msir->srs = index;
/* Release the hwirqs corresponding to this MSIR */
if (!msi_affinity_flag || msir->index == 0) {
for (i = 0; i < msi_data->cfg->msir_irqs; i++) {
hwirq = i << msi_data->cfg->ibs_shift | msir->index;
bitmap_clear(msi_data->used, hwirq, 1);
}
}
return 0;
}
static int ls_scfg_msi_teardown_hwirq(struct ls_scfg_msir *msir)
{
struct ls_scfg_msi *msi_data = msir->msi_data;
int i, hwirq;
if (msir->gic_irq > 0)
irq_set_chained_handler_and_data(msir->gic_irq, NULL, NULL);
for (i = 0; i < msi_data->cfg->msir_irqs; i++) {
hwirq = i << msi_data->cfg->ibs_shift | msir->index;
bitmap_set(msi_data->used, hwirq, 1);
}
return 0;
}
static struct ls_scfg_msi_cfg ls1021_msi_cfg = {
.ibs_shift = 3,
.msir_irqs = MSI_IRQS_PER_MSIR,
.msir_base = MSI_MSIR_OFFSET,
};
static struct ls_scfg_msi_cfg ls1046_msi_cfg = {
.ibs_shift = 2,
.msir_irqs = MSI_IRQS_PER_MSIR,
.msir_base = MSI_MSIR_OFFSET,
};
static struct ls_scfg_msi_cfg ls1043_v1_1_msi_cfg = {
.ibs_shift = 2,
.msir_irqs = MSI_LS1043V1_1_IRQS_PER_MSIR,
.msir_base = MSI_LS1043V1_1_MSIR_OFFSET,
};
static const struct of_device_id ls_scfg_msi_id[] = {
/* The following two misspelled compatibles are obsolete */
{ .compatible = "fsl,1s1021a-msi", .data = &ls1021_msi_cfg},
{ .compatible = "fsl,1s1043a-msi", .data = &ls1021_msi_cfg},
{ .compatible = "fsl,ls1021a-msi", .data = &ls1021_msi_cfg },
{ .compatible = "fsl,ls1043a-msi", .data = &ls1021_msi_cfg },
{ .compatible = "fsl,ls1043a-v1.1-msi", .data = &ls1043_v1_1_msi_cfg },
{ .compatible = "fsl,ls1046a-msi", .data = &ls1046_msi_cfg },
{},
};
MODULE_DEVICE_TABLE(of, ls_scfg_msi_id);
static int ls_scfg_msi_probe(struct platform_device *pdev) static int ls_scfg_msi_probe(struct platform_device *pdev)
{ {
const struct of_device_id *match;
struct ls_scfg_msi *msi_data; struct ls_scfg_msi *msi_data;
struct resource *res; struct resource *res;
int ret; int i, ret;
match = of_match_device(ls_scfg_msi_id, &pdev->dev);
if (!match)
return -ENODEV;
msi_data = devm_kzalloc(&pdev->dev, sizeof(*msi_data), GFP_KERNEL); msi_data = devm_kzalloc(&pdev->dev, sizeof(*msi_data), GFP_KERNEL);
if (!msi_data) if (!msi_data)
return -ENOMEM; return -ENOMEM;
msi_data->cfg = (struct ls_scfg_msi_cfg *) match->data;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
msi_data->regs = devm_ioremap_resource(&pdev->dev, res); msi_data->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(msi_data->regs)) { if (IS_ERR(msi_data->regs)) {
...@@ -182,23 +349,48 @@ static int ls_scfg_msi_probe(struct platform_device *pdev) ...@@ -182,23 +349,48 @@ static int ls_scfg_msi_probe(struct platform_device *pdev)
} }
msi_data->msiir_addr = res->start; msi_data->msiir_addr = res->start;
msi_data->irq = platform_get_irq(pdev, 0);
if (msi_data->irq <= 0) {
dev_err(&pdev->dev, "failed to get MSI irq\n");
return -ENODEV;
}
msi_data->pdev = pdev; msi_data->pdev = pdev;
spin_lock_init(&msi_data->lock); spin_lock_init(&msi_data->lock);
msi_data->irqs_num = MSI_IRQS_PER_MSIR *
(1 << msi_data->cfg->ibs_shift);
msi_data->used = devm_kcalloc(&pdev->dev,
BITS_TO_LONGS(msi_data->irqs_num),
sizeof(*msi_data->used),
GFP_KERNEL);
if (!msi_data->used)
return -ENOMEM;
/*
* Reserve all the hwirqs
* The available hwirqs will be released in ls1_msi_setup_hwirq()
*/
bitmap_set(msi_data->used, 0, msi_data->irqs_num);
msi_data->msir_num = of_irq_count(pdev->dev.of_node);
if (msi_affinity_flag) {
u32 cpu_num;
cpu_num = num_possible_cpus();
if (msi_data->msir_num >= cpu_num)
msi_data->msir_num = cpu_num;
else
msi_affinity_flag = 0;
}
msi_data->msir = devm_kcalloc(&pdev->dev, msi_data->msir_num,
sizeof(*msi_data->msir),
GFP_KERNEL);
if (!msi_data->msir)
return -ENOMEM;
for (i = 0; i < msi_data->msir_num; i++)
ls_scfg_msi_setup_hwirq(msi_data, i);
ret = ls_scfg_msi_domains_init(msi_data); ret = ls_scfg_msi_domains_init(msi_data);
if (ret) if (ret)
return ret; return ret;
irq_set_chained_handler_and_data(msi_data->irq,
ls_scfg_msi_irq_handler,
msi_data);
platform_set_drvdata(pdev, msi_data); platform_set_drvdata(pdev, msi_data);
return 0; return 0;
...@@ -207,8 +399,10 @@ static int ls_scfg_msi_probe(struct platform_device *pdev) ...@@ -207,8 +399,10 @@ static int ls_scfg_msi_probe(struct platform_device *pdev)
static int ls_scfg_msi_remove(struct platform_device *pdev) static int ls_scfg_msi_remove(struct platform_device *pdev)
{ {
struct ls_scfg_msi *msi_data = platform_get_drvdata(pdev); struct ls_scfg_msi *msi_data = platform_get_drvdata(pdev);
int i;
irq_set_chained_handler_and_data(msi_data->irq, NULL, NULL); for (i = 0; i < msi_data->msir_num; i++)
ls_scfg_msi_teardown_hwirq(&msi_data->msir[i]);
irq_domain_remove(msi_data->msi_domain); irq_domain_remove(msi_data->msi_domain);
irq_domain_remove(msi_data->parent); irq_domain_remove(msi_data->parent);
...@@ -218,12 +412,6 @@ static int ls_scfg_msi_remove(struct platform_device *pdev) ...@@ -218,12 +412,6 @@ static int ls_scfg_msi_remove(struct platform_device *pdev)
return 0; return 0;
} }
static const struct of_device_id ls_scfg_msi_id[] = {
{ .compatible = "fsl,1s1021a-msi", },
{ .compatible = "fsl,1s1043a-msi", },
{},
};
static struct platform_driver ls_scfg_msi_driver = { static struct platform_driver ls_scfg_msi_driver = {
.driver = { .driver = {
.name = "ls-scfg-msi", .name = "ls-scfg-msi",
......
...@@ -518,6 +518,8 @@ static int meta_intc_set_affinity(struct irq_data *data, ...@@ -518,6 +518,8 @@ static int meta_intc_set_affinity(struct irq_data *data,
metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr); metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr);
irq_data_update_effective_affinity(data, cpumask_of(cpu));
return 0; return 0;
} }
#else #else
...@@ -578,6 +580,8 @@ static int meta_intc_map(struct irq_domain *d, unsigned int irq, ...@@ -578,6 +580,8 @@ static int meta_intc_map(struct irq_domain *d, unsigned int irq,
else else
irq_set_chip_and_handler(irq, &meta_intc_edge_chip, irq_set_chip_and_handler(irq, &meta_intc_edge_chip,
handle_edge_irq); handle_edge_irq);
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
return 0; return 0;
} }
......
...@@ -445,24 +445,27 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, ...@@ -445,24 +445,27 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
cpumask_t tmp = CPU_MASK_NONE; cpumask_t tmp = CPU_MASK_NONE;
unsigned long flags; unsigned long flags;
int i; int i, cpu;
cpumask_and(&tmp, cpumask, cpu_online_mask); cpumask_and(&tmp, cpumask, cpu_online_mask);
if (cpumask_empty(&tmp)) if (cpumask_empty(&tmp))
return -EINVAL; return -EINVAL;
cpu = cpumask_first(&tmp);
/* Assumption : cpumask refers to a single CPU */ /* Assumption : cpumask refers to a single CPU */
spin_lock_irqsave(&gic_lock, flags); spin_lock_irqsave(&gic_lock, flags);
/* Re-route this IRQ */ /* Re-route this IRQ */
gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp))); gic_map_to_vpe(irq, mips_cm_vp_id(cpu));
/* Update the pcpu_masks */ /* Update the pcpu_masks */
for (i = 0; i < min(gic_vpes, NR_CPUS); i++) for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
clear_bit(irq, pcpu_masks[i].pcpu_mask); clear_bit(irq, pcpu_masks[i].pcpu_mask);
set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask); set_bit(irq, pcpu_masks[cpu].pcpu_mask);
cpumask_copy(irq_data_get_affinity_mask(d), cpumask); cpumask_copy(irq_data_get_affinity_mask(d), cpumask);
irq_data_update_effective_affinity(d, cpumask_of(cpu));
spin_unlock_irqrestore(&gic_lock, flags); spin_unlock_irqrestore(&gic_lock, flags);
return IRQ_SET_MASK_OK_NOCOPY; return IRQ_SET_MASK_OK_NOCOPY;
...@@ -716,6 +719,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq, ...@@ -716,6 +719,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
if (err) if (err)
return err; return err;
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
return gic_shared_irq_domain_map(d, virq, hwirq, 0); return gic_shared_irq_domain_map(d, virq, hwirq, 0);
} }
......
...@@ -181,13 +181,13 @@ const struct irq_domain_ops mmp_irq_domain_ops = { ...@@ -181,13 +181,13 @@ const struct irq_domain_ops mmp_irq_domain_ops = {
.xlate = mmp_irq_domain_xlate, .xlate = mmp_irq_domain_xlate,
}; };
static struct mmp_intc_conf mmp_conf = { static const struct mmp_intc_conf mmp_conf = {
.conf_enable = 0x51, .conf_enable = 0x51,
.conf_disable = 0x0, .conf_disable = 0x0,
.conf_mask = 0x7f, .conf_mask = 0x7f,
}; };
static struct mmp_intc_conf mmp2_conf = { static const struct mmp_intc_conf mmp2_conf = {
.conf_enable = 0x20, .conf_enable = 0x20,
.conf_disable = 0x0, .conf_disable = 0x0,
.conf_mask = 0x7f, .conf_mask = 0x7f,
......
...@@ -178,8 +178,7 @@ static int __init mtk_sysirq_of_init(struct device_node *node, ...@@ -178,8 +178,7 @@ static int __init mtk_sysirq_of_init(struct device_node *node,
chip_data->intpol_words[i] = size / 4; chip_data->intpol_words[i] = size / 4;
chip_data->intpol_bases[i] = of_iomap(node, i); chip_data->intpol_bases[i] = of_iomap(node, i);
if (ret || !chip_data->intpol_bases[i]) { if (ret || !chip_data->intpol_bases[i]) {
pr_err("%s: couldn't map region %d\n", pr_err("%pOF: couldn't map region %d\n", node, i);
node->full_name, i);
ret = -ENODEV; ret = -ENODEV;
goto out_free_intpol; goto out_free_intpol;
} }
......
...@@ -179,7 +179,7 @@ static void __init icoll_add_domain(struct device_node *np, ...@@ -179,7 +179,7 @@ static void __init icoll_add_domain(struct device_node *np,
&icoll_irq_domain_ops, NULL); &icoll_irq_domain_ops, NULL);
if (!icoll_domain) if (!icoll_domain)
panic("%s: unable to create irq domain", np->full_name); panic("%pOF: unable to create irq domain", np);
} }
static void __iomem * __init icoll_init_iobase(struct device_node *np) static void __iomem * __init icoll_init_iobase(struct device_node *np)
...@@ -188,7 +188,7 @@ static void __iomem * __init icoll_init_iobase(struct device_node *np) ...@@ -188,7 +188,7 @@ static void __iomem * __init icoll_init_iobase(struct device_node *np)
icoll_base = of_io_request_and_map(np, 0, np->name); icoll_base = of_io_request_and_map(np, 0, np->name);
if (IS_ERR(icoll_base)) if (IS_ERR(icoll_base))
panic("%s: unable to map resource", np->full_name); panic("%pOF: unable to map resource", np);
return icoll_base; return icoll_base;
} }
......
...@@ -140,7 +140,7 @@ static int __init stm32_exti_init(struct device_node *node, ...@@ -140,7 +140,7 @@ static int __init stm32_exti_init(struct device_node *node,
base = of_iomap(node, 0); base = of_iomap(node, 0);
if (!base) { if (!base) {
pr_err("%s: Unable to map registers\n", node->full_name); pr_err("%pOF: Unable to map registers\n", node);
return -ENOMEM; return -ENOMEM;
} }
...@@ -149,7 +149,7 @@ static int __init stm32_exti_init(struct device_node *node, ...@@ -149,7 +149,7 @@ static int __init stm32_exti_init(struct device_node *node,
nr_exti = fls(readl_relaxed(base + EXTI_RTSR)); nr_exti = fls(readl_relaxed(base + EXTI_RTSR));
writel_relaxed(0, base + EXTI_RTSR); writel_relaxed(0, base + EXTI_RTSR);
pr_info("%s: %d External IRQs detected\n", node->full_name, nr_exti); pr_info("%pOF: %d External IRQs detected\n", node, nr_exti);
domain = irq_domain_add_linear(node, nr_exti, domain = irq_domain_add_linear(node, nr_exti,
&irq_exti_domain_ops, NULL); &irq_exti_domain_ops, NULL);
...@@ -163,8 +163,8 @@ static int __init stm32_exti_init(struct device_node *node, ...@@ -163,8 +163,8 @@ static int __init stm32_exti_init(struct device_node *node,
ret = irq_alloc_domain_generic_chips(domain, nr_exti, 1, "exti", ret = irq_alloc_domain_generic_chips(domain, nr_exti, 1, "exti",
handle_edge_irq, clr, 0, 0); handle_edge_irq, clr, 0, 0);
if (ret) { if (ret) {
pr_err("%s: Could not allocate generic interrupt chip.\n", pr_err("%pOF: Could not allocate generic interrupt chip.\n",
node->full_name); node);
goto out_free_domain; goto out_free_domain;
} }
......
...@@ -97,8 +97,8 @@ static int __init sun4i_of_init(struct device_node *node, ...@@ -97,8 +97,8 @@ static int __init sun4i_of_init(struct device_node *node,
{ {
sun4i_irq_base = of_iomap(node, 0); sun4i_irq_base = of_iomap(node, 0);
if (!sun4i_irq_base) if (!sun4i_irq_base)
panic("%s: unable to map IC registers\n", panic("%pOF: unable to map IC registers\n",
node->full_name); node);
/* Disable all interrupts */ /* Disable all interrupts */
writel(0, sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(0)); writel(0, sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(0));
...@@ -124,7 +124,7 @@ static int __init sun4i_of_init(struct device_node *node, ...@@ -124,7 +124,7 @@ static int __init sun4i_of_init(struct device_node *node,
sun4i_irq_domain = irq_domain_add_linear(node, 3 * 32, sun4i_irq_domain = irq_domain_add_linear(node, 3 * 32,
&sun4i_irq_ops, NULL); &sun4i_irq_ops, NULL);
if (!sun4i_irq_domain) if (!sun4i_irq_domain)
panic("%s: unable to create IRQ domain\n", node->full_name); panic("%pOF: unable to create IRQ domain\n", node);
set_handle_irq(sun4i_handle_irq); set_handle_irq(sun4i_handle_irq);
......
...@@ -291,13 +291,13 @@ static int __init tegra_ictlr_init(struct device_node *node, ...@@ -291,13 +291,13 @@ static int __init tegra_ictlr_init(struct device_node *node,
int err; int err;
if (!parent) { if (!parent) {
pr_err("%s: no parent, giving up\n", node->full_name); pr_err("%pOF: no parent, giving up\n", node);
return -ENODEV; return -ENODEV;
} }
parent_domain = irq_find_host(parent); parent_domain = irq_find_host(parent);
if (!parent_domain) { if (!parent_domain) {
pr_err("%s: unable to obtain parent domain\n", node->full_name); pr_err("%pOF: unable to obtain parent domain\n", node);
return -ENXIO; return -ENXIO;
} }
...@@ -329,29 +329,29 @@ static int __init tegra_ictlr_init(struct device_node *node, ...@@ -329,29 +329,29 @@ static int __init tegra_ictlr_init(struct device_node *node,
} }
if (!num_ictlrs) { if (!num_ictlrs) {
pr_err("%s: no valid regions, giving up\n", node->full_name); pr_err("%pOF: no valid regions, giving up\n", node);
err = -ENOMEM; err = -ENOMEM;
goto out_free; goto out_free;
} }
WARN(num_ictlrs != soc->num_ictlrs, WARN(num_ictlrs != soc->num_ictlrs,
"%s: Found %u interrupt controllers in DT; expected %u.\n", "%pOF: Found %u interrupt controllers in DT; expected %u.\n",
node->full_name, num_ictlrs, soc->num_ictlrs); node, num_ictlrs, soc->num_ictlrs);
domain = irq_domain_add_hierarchy(parent_domain, 0, num_ictlrs * 32, domain = irq_domain_add_hierarchy(parent_domain, 0, num_ictlrs * 32,
node, &tegra_ictlr_domain_ops, node, &tegra_ictlr_domain_ops,
lic); lic);
if (!domain) { if (!domain) {
pr_err("%s: failed to allocated domain\n", node->full_name); pr_err("%pOF: failed to allocated domain\n", node);
err = -ENOMEM; err = -ENOMEM;
goto out_unmap; goto out_unmap;
} }
tegra_ictlr_syscore_init(); tegra_ictlr_syscore_init();
pr_info("%s: %d interrupts forwarded to %s\n", pr_info("%pOF: %d interrupts forwarded to %pOF\n",
node->full_name, num_ictlrs * 32, parent->full_name); node, num_ictlrs * 32, parent);
return 0; return 0;
......
/*
* Driver for UniPhier AIDET (ARM Interrupt Detector)
*
* Copyright (C) 2017 Socionext Inc.
* Author: Masahiro Yamada <yamada.masahiro@socionext.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/bitops.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#define UNIPHIER_AIDET_NR_IRQS 256
#define UNIPHIER_AIDET_DETCONF 0x04 /* inverter register base */
struct uniphier_aidet_priv {
struct irq_domain *domain;
void __iomem *reg_base;
spinlock_t lock;
u32 saved_vals[UNIPHIER_AIDET_NR_IRQS / 32];
};
static void uniphier_aidet_reg_update(struct uniphier_aidet_priv *priv,
unsigned int reg, u32 mask, u32 val)
{
unsigned long flags;
u32 tmp;
spin_lock_irqsave(&priv->lock, flags);
tmp = readl_relaxed(priv->reg_base + reg);
tmp &= ~mask;
tmp |= mask & val;
writel_relaxed(tmp, priv->reg_base + reg);
spin_unlock_irqrestore(&priv->lock, flags);
}
static void uniphier_aidet_detconf_update(struct uniphier_aidet_priv *priv,
unsigned long index, unsigned int val)
{
unsigned int reg;
u32 mask;
reg = UNIPHIER_AIDET_DETCONF + index / 32 * 4;
mask = BIT(index % 32);
uniphier_aidet_reg_update(priv, reg, mask, val ? mask : 0);
}
static int uniphier_aidet_irq_set_type(struct irq_data *data, unsigned int type)
{
struct uniphier_aidet_priv *priv = data->chip_data;
unsigned int val;
/* enable inverter for active low triggers */
switch (type) {
case IRQ_TYPE_EDGE_RISING:
case IRQ_TYPE_LEVEL_HIGH:
val = 0;
break;
case IRQ_TYPE_EDGE_FALLING:
val = 1;
type = IRQ_TYPE_EDGE_RISING;
break;
case IRQ_TYPE_LEVEL_LOW:
val = 1;
type = IRQ_TYPE_LEVEL_HIGH;
break;
default:
return -EINVAL;
}
uniphier_aidet_detconf_update(priv, data->hwirq, val);
return irq_chip_set_type_parent(data, type);
}
static struct irq_chip uniphier_aidet_irq_chip = {
.name = "AIDET",
.irq_mask = irq_chip_mask_parent,
.irq_unmask = irq_chip_unmask_parent,
.irq_eoi = irq_chip_eoi_parent,
.irq_set_affinity = irq_chip_set_affinity_parent,
.irq_set_type = uniphier_aidet_irq_set_type,
};
static int uniphier_aidet_domain_translate(struct irq_domain *domain,
struct irq_fwspec *fwspec,
unsigned long *out_hwirq,
unsigned int *out_type)
{
if (WARN_ON(fwspec->param_count < 2))
return -EINVAL;
*out_hwirq = fwspec->param[0];
*out_type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
return 0;
}
static int uniphier_aidet_domain_alloc(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs,
void *arg)
{
struct irq_fwspec parent_fwspec;
irq_hw_number_t hwirq;
unsigned int type;
int ret;
if (nr_irqs != 1)
return -EINVAL;
ret = uniphier_aidet_domain_translate(domain, arg, &hwirq, &type);
if (ret)
return ret;
switch (type) {
case IRQ_TYPE_EDGE_RISING:
case IRQ_TYPE_LEVEL_HIGH:
break;
case IRQ_TYPE_EDGE_FALLING:
type = IRQ_TYPE_EDGE_RISING;
break;
case IRQ_TYPE_LEVEL_LOW:
type = IRQ_TYPE_LEVEL_HIGH;
break;
default:
return -EINVAL;
}
if (hwirq >= UNIPHIER_AIDET_NR_IRQS)
return -ENXIO;
ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
&uniphier_aidet_irq_chip,
domain->host_data);
if (ret)
return ret;
/* parent is GIC */
parent_fwspec.fwnode = domain->parent->fwnode;
parent_fwspec.param_count = 3;
parent_fwspec.param[0] = 0; /* SPI */
parent_fwspec.param[1] = hwirq;
parent_fwspec.param[2] = type;
return irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec);
}
static const struct irq_domain_ops uniphier_aidet_domain_ops = {
.alloc = uniphier_aidet_domain_alloc,
.free = irq_domain_free_irqs_common,
.translate = uniphier_aidet_domain_translate,
};
static int uniphier_aidet_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *parent_np;
struct irq_domain *parent_domain;
struct uniphier_aidet_priv *priv;
struct resource *res;
parent_np = of_irq_find_parent(dev->of_node);
if (!parent_np)
return -ENXIO;
parent_domain = irq_find_host(parent_np);
of_node_put(parent_np);
if (!parent_domain)
return -EPROBE_DEFER;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->reg_base = devm_ioremap_resource(dev, res);
if (IS_ERR(priv->reg_base))
return PTR_ERR(priv->reg_base);
spin_lock_init(&priv->lock);
priv->domain = irq_domain_create_hierarchy(
parent_domain, 0,
UNIPHIER_AIDET_NR_IRQS,
of_node_to_fwnode(dev->of_node),
&uniphier_aidet_domain_ops, priv);
if (!priv->domain)
return -ENOMEM;
platform_set_drvdata(pdev, priv);
return 0;
}
static int __maybe_unused uniphier_aidet_suspend(struct device *dev)
{
struct uniphier_aidet_priv *priv = dev_get_drvdata(dev);
int i;
for (i = 0; i < ARRAY_SIZE(priv->saved_vals); i++)
priv->saved_vals[i] = readl_relaxed(
priv->reg_base + UNIPHIER_AIDET_DETCONF + i * 4);
return 0;
}
static int __maybe_unused uniphier_aidet_resume(struct device *dev)
{
struct uniphier_aidet_priv *priv = dev_get_drvdata(dev);
int i;
for (i = 0; i < ARRAY_SIZE(priv->saved_vals); i++)
writel_relaxed(priv->saved_vals[i],
priv->reg_base + UNIPHIER_AIDET_DETCONF + i * 4);
return 0;
}
static const struct dev_pm_ops uniphier_aidet_pm_ops = {
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(uniphier_aidet_suspend,
uniphier_aidet_resume)
};
static const struct of_device_id uniphier_aidet_match[] = {
{ .compatible = "socionext,uniphier-ld4-aidet" },
{ .compatible = "socionext,uniphier-pro4-aidet" },
{ .compatible = "socionext,uniphier-sld8-aidet" },
{ .compatible = "socionext,uniphier-pro5-aidet" },
{ .compatible = "socionext,uniphier-pxs2-aidet" },
{ .compatible = "socionext,uniphier-ld11-aidet" },
{ .compatible = "socionext,uniphier-ld20-aidet" },
{ .compatible = "socionext,uniphier-pxs3-aidet" },
{ /* sentinel */ }
};
static struct platform_driver uniphier_aidet_driver = {
.probe = uniphier_aidet_probe,
.driver = {
.name = "uniphier-aidet",
.of_match_table = uniphier_aidet_match,
.pm = &uniphier_aidet_pm_ops,
},
};
builtin_platform_driver(uniphier_aidet_driver);
...@@ -186,8 +186,8 @@ static int __init xilinx_intc_of_init(struct device_node *intc, ...@@ -186,8 +186,8 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
if (irqc->intr_mask >> nr_irq) if (irqc->intr_mask >> nr_irq)
pr_warn("irq-xilinx: mismatch in kind-of-intr param\n"); pr_warn("irq-xilinx: mismatch in kind-of-intr param\n");
pr_info("irq-xilinx: %s: num_irq=%d, edge=0x%x\n", pr_info("irq-xilinx: %pOF: num_irq=%d, edge=0x%x\n",
intc->full_name, nr_irq, irqc->intr_mask); intc, nr_irq, irqc->intr_mask);
/* /*
......
...@@ -32,6 +32,7 @@ static int xtensa_mx_irq_map(struct irq_domain *d, unsigned int irq, ...@@ -32,6 +32,7 @@ static int xtensa_mx_irq_map(struct irq_domain *d, unsigned int irq,
irq_set_status_flags(irq, IRQ_LEVEL); irq_set_status_flags(irq, IRQ_LEVEL);
return 0; return 0;
} }
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
return xtensa_irq_map(d, irq, hw); return xtensa_irq_map(d, irq, hw);
} }
...@@ -121,9 +122,12 @@ static int xtensa_mx_irq_retrigger(struct irq_data *d) ...@@ -121,9 +122,12 @@ static int xtensa_mx_irq_retrigger(struct irq_data *d)
static int xtensa_mx_irq_set_affinity(struct irq_data *d, static int xtensa_mx_irq_set_affinity(struct irq_data *d,
const struct cpumask *dest, bool force) const struct cpumask *dest, bool force)
{ {
unsigned mask = 1u << cpumask_any_and(dest, cpu_online_mask); int cpu = cpumask_any_and(dest, cpu_online_mask);
unsigned mask = 1u << cpu;
set_er(mask, MIROUT(d->hwirq - HW_IRQ_MX_BASE)); set_er(mask, MIROUT(d->hwirq - HW_IRQ_MX_BASE));
irq_data_update_effective_affinity(d, cpumask_of(cpu));
return 0; return 0;
} }
......
...@@ -568,6 +568,8 @@ extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg); ...@@ -568,6 +568,8 @@ extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
extern int irq_chip_pm_get(struct irq_data *data); extern int irq_chip_pm_get(struct irq_data *data);
extern int irq_chip_pm_put(struct irq_data *data); extern int irq_chip_pm_put(struct irq_data *data);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
extern void handle_fasteoi_ack_irq(struct irq_desc *desc);
extern void handle_fasteoi_mask_irq(struct irq_desc *desc);
extern void irq_chip_enable_parent(struct irq_data *data); extern void irq_chip_enable_parent(struct irq_data *data);
extern void irq_chip_disable_parent(struct irq_data *data); extern void irq_chip_disable_parent(struct irq_data *data);
extern void irq_chip_ack_parent(struct irq_data *data); extern void irq_chip_ack_parent(struct irq_data *data);
...@@ -781,7 +783,10 @@ static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d) ...@@ -781,7 +783,10 @@ static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
static inline static inline
struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d) struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
{ {
if (!cpumask_empty(d->common->effective_affinity))
return d->common->effective_affinity; return d->common->effective_affinity;
return d->common->affinity;
} }
static inline void irq_data_update_effective_affinity(struct irq_data *d, static inline void irq_data_update_effective_affinity(struct irq_data *d,
const struct cpumask *m) const struct cpumask *m)
......
#ifndef _LINUX_IRQ_SIM_H
#define _LINUX_IRQ_SIM_H
/*
* Copyright (C) 2017 Bartosz Golaszewski <brgl@bgdev.pl>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/irq_work.h>
#include <linux/device.h>
/*
* Provides a framework for allocating simulated interrupts which can be
* requested like normal irqs and enqueued from process context.
*/
struct irq_sim_work_ctx {
struct irq_work work;
int irq;
};
struct irq_sim_irq_ctx {
int irqnum;
bool enabled;
};
struct irq_sim {
struct irq_sim_work_ctx work_ctx;
int irq_base;
unsigned int irq_count;
struct irq_sim_irq_ctx *irqs;
};
int irq_sim_init(struct irq_sim *sim, unsigned int num_irqs);
int devm_irq_sim_init(struct device *dev, struct irq_sim *sim,
unsigned int num_irqs);
void irq_sim_fini(struct irq_sim *sim);
void irq_sim_fire(struct irq_sim *sim, unsigned int offset);
int irq_sim_irqnum(struct irq_sim *sim, unsigned int offset);
#endif /* _LINUX_IRQ_SIM_H */
...@@ -27,6 +27,8 @@ struct gic_kvm_info { ...@@ -27,6 +27,8 @@ struct gic_kvm_info {
unsigned int maint_irq; unsigned int maint_irq;
/* Virtual control interface */ /* Virtual control interface */
struct resource vctrl; struct resource vctrl;
/* vlpi support */
bool has_v4;
}; };
const struct gic_kvm_info *gic_get_kvm_info(void); const struct gic_kvm_info *gic_get_kvm_info(void);
......
...@@ -204,6 +204,7 @@ ...@@ -204,6 +204,7 @@
#define GICR_TYPER_PLPIS (1U << 0) #define GICR_TYPER_PLPIS (1U << 0)
#define GICR_TYPER_VLPIS (1U << 1) #define GICR_TYPER_VLPIS (1U << 1)
#define GICR_TYPER_DirectLPIS (1U << 3)
#define GICR_TYPER_LAST (1U << 4) #define GICR_TYPER_LAST (1U << 4)
#define GIC_V3_REDIST_SIZE 0x20000 #define GIC_V3_REDIST_SIZE 0x20000
...@@ -211,6 +212,69 @@ ...@@ -211,6 +212,69 @@
#define LPI_PROP_GROUP1 (1 << 1) #define LPI_PROP_GROUP1 (1 << 1)
#define LPI_PROP_ENABLED (1 << 0) #define LPI_PROP_ENABLED (1 << 0)
/*
* Re-Distributor registers, offsets from VLPI_base
*/
#define GICR_VPROPBASER 0x0070
#define GICR_VPROPBASER_IDBITS_MASK 0x1f
#define GICR_VPROPBASER_SHAREABILITY_SHIFT (10)
#define GICR_VPROPBASER_INNER_CACHEABILITY_SHIFT (7)
#define GICR_VPROPBASER_OUTER_CACHEABILITY_SHIFT (56)
#define GICR_VPROPBASER_SHAREABILITY_MASK \
GIC_BASER_SHAREABILITY(GICR_VPROPBASER, SHAREABILITY_MASK)
#define GICR_VPROPBASER_INNER_CACHEABILITY_MASK \
GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, MASK)
#define GICR_VPROPBASER_OUTER_CACHEABILITY_MASK \
GIC_BASER_CACHEABILITY(GICR_VPROPBASER, OUTER, MASK)
#define GICR_VPROPBASER_CACHEABILITY_MASK \
GICR_VPROPBASER_INNER_CACHEABILITY_MASK
#define GICR_VPROPBASER_InnerShareable \
GIC_BASER_SHAREABILITY(GICR_VPROPBASER, InnerShareable)
#define GICR_VPROPBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nCnB)
#define GICR_VPROPBASER_nC GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nC)
#define GICR_VPROPBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWt)
#define GICR_VPROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWt)
#define GICR_VPROPBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWt)
#define GICR_VPROPBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWb)
#define GICR_VPROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWt)
#define GICR_VPROPBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWb)
#define GICR_VPENDBASER 0x0078
#define GICR_VPENDBASER_SHAREABILITY_SHIFT (10)
#define GICR_VPENDBASER_INNER_CACHEABILITY_SHIFT (7)
#define GICR_VPENDBASER_OUTER_CACHEABILITY_SHIFT (56)
#define GICR_VPENDBASER_SHAREABILITY_MASK \
GIC_BASER_SHAREABILITY(GICR_VPENDBASER, SHAREABILITY_MASK)
#define GICR_VPENDBASER_INNER_CACHEABILITY_MASK \
GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, MASK)
#define GICR_VPENDBASER_OUTER_CACHEABILITY_MASK \
GIC_BASER_CACHEABILITY(GICR_VPENDBASER, OUTER, MASK)
#define GICR_VPENDBASER_CACHEABILITY_MASK \
GICR_VPENDBASER_INNER_CACHEABILITY_MASK
#define GICR_VPENDBASER_NonShareable \
GIC_BASER_SHAREABILITY(GICR_VPENDBASER, NonShareable)
#define GICR_VPENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nCnB)
#define GICR_VPENDBASER_nC GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nC)
#define GICR_VPENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt)
#define GICR_VPENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt)
#define GICR_VPENDBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWt)
#define GICR_VPENDBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWb)
#define GICR_VPENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWt)
#define GICR_VPENDBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWb)
#define GICR_VPENDBASER_Dirty (1ULL << 60)
#define GICR_VPENDBASER_PendingLast (1ULL << 61)
#define GICR_VPENDBASER_IDAI (1ULL << 62)
#define GICR_VPENDBASER_Valid (1ULL << 63)
/* /*
* ITS registers, offsets from ITS_base * ITS registers, offsets from ITS_base
*/ */
...@@ -234,15 +298,21 @@ ...@@ -234,15 +298,21 @@
#define GITS_TRANSLATER 0x10040 #define GITS_TRANSLATER 0x10040
#define GITS_CTLR_ENABLE (1U << 0) #define GITS_CTLR_ENABLE (1U << 0)
#define GITS_CTLR_ImDe (1U << 1)
#define GITS_CTLR_ITS_NUMBER_SHIFT 4
#define GITS_CTLR_ITS_NUMBER (0xFU << GITS_CTLR_ITS_NUMBER_SHIFT)
#define GITS_CTLR_QUIESCENT (1U << 31) #define GITS_CTLR_QUIESCENT (1U << 31)
#define GITS_TYPER_PLPIS (1UL << 0) #define GITS_TYPER_PLPIS (1UL << 0)
#define GITS_TYPER_VLPIS (1UL << 1)
#define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4 #define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4
#define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
#define GITS_TYPER_IDBITS_SHIFT 8 #define GITS_TYPER_IDBITS_SHIFT 8
#define GITS_TYPER_DEVBITS_SHIFT 13 #define GITS_TYPER_DEVBITS_SHIFT 13
#define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1) #define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1)
#define GITS_TYPER_PTA (1UL << 19) #define GITS_TYPER_PTA (1UL << 19)
#define GITS_TYPER_HWCOLLCNT_SHIFT 24 #define GITS_TYPER_HWCOLLCNT_SHIFT 24
#define GITS_TYPER_VMOVP (1ULL << 37)
#define GITS_IIDR_REV_SHIFT 12 #define GITS_IIDR_REV_SHIFT 12
#define GITS_IIDR_REV_MASK (0xf << GITS_IIDR_REV_SHIFT) #define GITS_IIDR_REV_MASK (0xf << GITS_IIDR_REV_SHIFT)
...@@ -341,6 +411,18 @@ ...@@ -341,6 +411,18 @@
#define GITS_CMD_CLEAR 0x04 #define GITS_CMD_CLEAR 0x04
#define GITS_CMD_SYNC 0x05 #define GITS_CMD_SYNC 0x05
/*
* GICv4 ITS specific commands
*/
#define GITS_CMD_GICv4(x) ((x) | 0x20)
#define GITS_CMD_VINVALL GITS_CMD_GICv4(GITS_CMD_INVALL)
#define GITS_CMD_VMAPP GITS_CMD_GICv4(GITS_CMD_MAPC)
#define GITS_CMD_VMAPTI GITS_CMD_GICv4(GITS_CMD_MAPTI)
#define GITS_CMD_VMOVI GITS_CMD_GICv4(GITS_CMD_MOVI)
#define GITS_CMD_VSYNC GITS_CMD_GICv4(GITS_CMD_SYNC)
/* VMOVP is the odd one, as it doesn't have a physical counterpart */
#define GITS_CMD_VMOVP GITS_CMD_GICv4(2)
/* /*
* ITS error numbers * ITS error numbers
*/ */
...@@ -487,6 +569,8 @@ struct rdists { ...@@ -487,6 +569,8 @@ struct rdists {
struct page *prop_page; struct page *prop_page;
int id_bits; int id_bits;
u64 flags; u64 flags;
bool has_vlpis;
bool has_direct_lpi;
}; };
struct irq_domain; struct irq_domain;
......
/*
* Copyright (C) 2016,2017 ARM Limited, All Rights Reserved.
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __LINUX_IRQCHIP_ARM_GIC_V4_H
#define __LINUX_IRQCHIP_ARM_GIC_V4_H
struct its_vpe;
/* Embedded in kvm.arch */
struct its_vm {
struct fwnode_handle *fwnode;
struct irq_domain *domain;
struct page *vprop_page;
struct its_vpe **vpes;
int nr_vpes;
irq_hw_number_t db_lpi_base;
unsigned long *db_bitmap;
int nr_db_lpis;
};
/* Embedded in kvm_vcpu.arch */
struct its_vpe {
struct page *vpt_page;
struct its_vm *its_vm;
/* Doorbell interrupt */
int irq;
irq_hw_number_t vpe_db_lpi;
/* VPE proxy mapping */
int vpe_proxy_event;
/*
* This collection ID is used to indirect the target
* redistributor for this VPE. The ID itself isn't involved in
* programming of the ITS.
*/
u16 col_idx;
/* Unique (system-wide) VPE identifier */
u16 vpe_id;
/* Implementation Defined Area Invalid */
bool idai;
/* Pending VLPIs on schedule out? */
bool pending_last;
};
/*
* struct its_vlpi_map: structure describing the mapping of a
* VLPI. Only to be interpreted in the context of a physical interrupt
* it complements. To be used as the vcpu_info passed to
* irq_set_vcpu_affinity().
*
* @vm: Pointer to the GICv4 notion of a VM
* @vpe: Pointer to the GICv4 notion of a virtual CPU (VPE)
* @vintid: Virtual LPI number
* @db_enabled: Is the VPE doorbell to be generated?
*/
struct its_vlpi_map {
struct its_vm *vm;
struct its_vpe *vpe;
u32 vintid;
bool db_enabled;
};
enum its_vcpu_info_cmd_type {
MAP_VLPI,
GET_VLPI,
PROP_UPDATE_VLPI,
PROP_UPDATE_AND_INV_VLPI,
SCHEDULE_VPE,
DESCHEDULE_VPE,
INVALL_VPE,
};
struct its_cmd_info {
enum its_vcpu_info_cmd_type cmd_type;
union {
struct its_vlpi_map *map;
u8 config;
};
};
int its_alloc_vcpu_irqs(struct its_vm *vm);
void its_free_vcpu_irqs(struct its_vm *vm);
int its_schedule_vpe(struct its_vpe *vpe, bool on);
int its_invall_vpe(struct its_vpe *vpe);
int its_map_vlpi(int irq, struct its_vlpi_map *map);
int its_get_vlpi(int irq, struct its_vlpi_map *map);
int its_unmap_vlpi(int irq);
int its_prop_update_vlpi(int irq, u8 config, bool inv);
int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops);
#endif
...@@ -460,6 +460,9 @@ extern void irq_domain_free_irqs_common(struct irq_domain *domain, ...@@ -460,6 +460,9 @@ extern void irq_domain_free_irqs_common(struct irq_domain *domain,
extern void irq_domain_free_irqs_top(struct irq_domain *domain, extern void irq_domain_free_irqs_top(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs); unsigned int virq, unsigned int nr_irqs);
extern int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg);
extern int irq_domain_pop_irq(struct irq_domain *domain, int virq);
extern int irq_domain_alloc_irqs_parent(struct irq_domain *domain, extern int irq_domain_alloc_irqs_parent(struct irq_domain *domain,
unsigned int irq_base, unsigned int irq_base,
unsigned int nr_irqs, void *arg); unsigned int nr_irqs, void *arg);
......
...@@ -63,11 +63,20 @@ config GENERIC_IRQ_CHIP ...@@ -63,11 +63,20 @@ config GENERIC_IRQ_CHIP
config IRQ_DOMAIN config IRQ_DOMAIN
bool bool
# Support for simulated interrupts
config IRQ_SIM
bool
select IRQ_WORK
# Support for hierarchical irq domains # Support for hierarchical irq domains
config IRQ_DOMAIN_HIERARCHY config IRQ_DOMAIN_HIERARCHY
bool bool
select IRQ_DOMAIN select IRQ_DOMAIN
# Support for hierarchical fasteoi+edge and fasteoi+level handlers
config IRQ_FASTEOI_HIERARCHY_HANDLERS
bool
# Generic IRQ IPI support # Generic IRQ IPI support
config GENERIC_IRQ_IPI config GENERIC_IRQ_IPI
bool bool
......
...@@ -4,6 +4,7 @@ obj-$(CONFIG_IRQ_TIMINGS) += timings.o ...@@ -4,6 +4,7 @@ obj-$(CONFIG_IRQ_TIMINGS) += timings.o
obj-$(CONFIG_GENERIC_IRQ_CHIP) += generic-chip.o obj-$(CONFIG_GENERIC_IRQ_CHIP) += generic-chip.o
obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
obj-$(CONFIG_IRQ_DOMAIN) += irqdomain.o obj-$(CONFIG_IRQ_DOMAIN) += irqdomain.o
obj-$(CONFIG_IRQ_SIM) += irq_sim.o
obj-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
obj-$(CONFIG_GENERIC_IRQ_MIGRATION) += cpuhotplug.o obj-$(CONFIG_GENERIC_IRQ_MIGRATION) += cpuhotplug.o
......
...@@ -1098,6 +1098,112 @@ void irq_cpu_offline(void) ...@@ -1098,6 +1098,112 @@ void irq_cpu_offline(void)
} }
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
#ifdef CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS
/**
* handle_fasteoi_ack_irq - irq handler for edge hierarchy
* stacked on transparent controllers
*
* @desc: the interrupt description structure for this irq
*
* Like handle_fasteoi_irq(), but for use with hierarchy where
* the irq_chip also needs to have its ->irq_ack() function
* called.
*/
void handle_fasteoi_ack_irq(struct irq_desc *desc)
{
struct irq_chip *chip = desc->irq_data.chip;
raw_spin_lock(&desc->lock);
if (!irq_may_run(desc))
goto out;
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
/*
* If its disabled or no action available
* then mask it and get out of here:
*/
if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
desc->istate |= IRQS_PENDING;
mask_irq(desc);
goto out;
}
kstat_incr_irqs_this_cpu(desc);
if (desc->istate & IRQS_ONESHOT)
mask_irq(desc);
/* Start handling the irq */
desc->irq_data.chip->irq_ack(&desc->irq_data);
preflow_handler(desc);
handle_irq_event(desc);
cond_unmask_eoi_irq(desc, chip);
raw_spin_unlock(&desc->lock);
return;
out:
if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
chip->irq_eoi(&desc->irq_data);
raw_spin_unlock(&desc->lock);
}
EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq);
/**
* handle_fasteoi_mask_irq - irq handler for level hierarchy
* stacked on transparent controllers
*
* @desc: the interrupt description structure for this irq
*
* Like handle_fasteoi_irq(), but for use with hierarchy where
* the irq_chip also needs to have its ->irq_mask_ack() function
* called.
*/
void handle_fasteoi_mask_irq(struct irq_desc *desc)
{
struct irq_chip *chip = desc->irq_data.chip;
raw_spin_lock(&desc->lock);
mask_ack_irq(desc);
if (!irq_may_run(desc))
goto out;
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
/*
* If its disabled or no action available
* then mask it and get out of here:
*/
if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
desc->istate |= IRQS_PENDING;
mask_irq(desc);
goto out;
}
kstat_incr_irqs_this_cpu(desc);
if (desc->istate & IRQS_ONESHOT)
mask_irq(desc);
preflow_handler(desc);
handle_irq_event(desc);
cond_unmask_eoi_irq(desc, chip);
raw_spin_unlock(&desc->lock);
return;
out:
if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
chip->irq_eoi(&desc->irq_data);
raw_spin_unlock(&desc->lock);
}
EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq);
#endif /* CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS */
/** /**
* irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if
* NULL) * NULL)
...@@ -1111,6 +1217,7 @@ void irq_chip_enable_parent(struct irq_data *data) ...@@ -1111,6 +1217,7 @@ void irq_chip_enable_parent(struct irq_data *data)
else else
data->chip->irq_unmask(data); data->chip->irq_unmask(data);
} }
EXPORT_SYMBOL_GPL(irq_chip_enable_parent);
/** /**
* irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if
...@@ -1125,6 +1232,7 @@ void irq_chip_disable_parent(struct irq_data *data) ...@@ -1125,6 +1232,7 @@ void irq_chip_disable_parent(struct irq_data *data)
else else
data->chip->irq_mask(data); data->chip->irq_mask(data);
} }
EXPORT_SYMBOL_GPL(irq_chip_disable_parent);
/** /**
* irq_chip_ack_parent - Acknowledge the parent interrupt * irq_chip_ack_parent - Acknowledge the parent interrupt
...@@ -1187,6 +1295,7 @@ int irq_chip_set_affinity_parent(struct irq_data *data, ...@@ -1187,6 +1295,7 @@ int irq_chip_set_affinity_parent(struct irq_data *data,
return -ENOSYS; return -ENOSYS;
} }
EXPORT_SYMBOL_GPL(irq_chip_set_affinity_parent);
/** /**
* irq_chip_set_type_parent - Set IRQ type on the parent interrupt * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
*/ */
#include <linux/irqdomain.h> #include <linux/irqdomain.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/uaccess.h>
#include "internals.h" #include "internals.h"
...@@ -171,8 +172,55 @@ static int irq_debug_open(struct inode *inode, struct file *file) ...@@ -171,8 +172,55 @@ static int irq_debug_open(struct inode *inode, struct file *file)
return single_open(file, irq_debug_show, inode->i_private); return single_open(file, irq_debug_show, inode->i_private);
} }
static ssize_t irq_debug_write(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct irq_desc *desc = file_inode(file)->i_private;
char buf[8] = { 0, };
size_t size;
size = min(sizeof(buf) - 1, count);
if (copy_from_user(buf, user_buf, size))
return -EFAULT;
if (!strncmp(buf, "trigger", size)) {
unsigned long flags;
int err;
/* Try the HW interface first */
err = irq_set_irqchip_state(irq_desc_get_irq(desc),
IRQCHIP_STATE_PENDING, true);
if (!err)
return count;
/*
* Otherwise, try to inject via the resend interface,
* which may or may not succeed.
*/
chip_bus_lock(desc);
raw_spin_lock_irqsave(&desc->lock, flags);
if (irq_settings_is_level(desc)) {
/* Can't do level, sorry */
err = -EINVAL;
} else {
desc->istate |= IRQS_PENDING;
check_irq_resend(desc);
err = 0;
}
raw_spin_unlock_irqrestore(&desc->lock, flags);
chip_bus_sync_unlock(desc);
return err ? err : count;
}
return count;
}
static const struct file_operations dfs_irq_ops = { static const struct file_operations dfs_irq_ops = {
.open = irq_debug_open, .open = irq_debug_open,
.write = irq_debug_write,
.read = seq_read, .read = seq_read,
.llseek = seq_lseek, .llseek = seq_lseek,
.release = single_release, .release = single_release,
...@@ -186,7 +234,7 @@ void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *desc) ...@@ -186,7 +234,7 @@ void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *desc)
return; return;
sprintf(name, "%d", irq); sprintf(name, "%d", irq);
desc->debugfs_file = debugfs_create_file(name, 0444, irq_dir, desc, desc->debugfs_file = debugfs_create_file(name, 0644, irq_dir, desc,
&dfs_irq_ops); &dfs_irq_ops);
} }
......
...@@ -151,7 +151,7 @@ static inline void chip_bus_sync_unlock(struct irq_desc *desc) ...@@ -151,7 +151,7 @@ static inline void chip_bus_sync_unlock(struct irq_desc *desc)
#define IRQ_GET_DESC_CHECK_PERCPU (_IRQ_DESC_CHECK | _IRQ_DESC_PERCPU) #define IRQ_GET_DESC_CHECK_PERCPU (_IRQ_DESC_CHECK | _IRQ_DESC_PERCPU)
#define for_each_action_of_desc(desc, act) \ #define for_each_action_of_desc(desc, act) \
for (act = desc->act; act; act = act->next) for (act = desc->action; act; act = act->next)
struct irq_desc * struct irq_desc *
__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
......
/*
* Copyright (C) 2017 Bartosz Golaszewski <brgl@bgdev.pl>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/irq_sim.h>
#include <linux/irq.h>
struct irq_sim_devres {
struct irq_sim *sim;
};
static void irq_sim_irqmask(struct irq_data *data)
{
struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data);
irq_ctx->enabled = false;
}
static void irq_sim_irqunmask(struct irq_data *data)
{
struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data);
irq_ctx->enabled = true;
}
static struct irq_chip irq_sim_irqchip = {
.name = "irq_sim",
.irq_mask = irq_sim_irqmask,
.irq_unmask = irq_sim_irqunmask,
};
static void irq_sim_handle_irq(struct irq_work *work)
{
struct irq_sim_work_ctx *work_ctx;
work_ctx = container_of(work, struct irq_sim_work_ctx, work);
handle_simple_irq(irq_to_desc(work_ctx->irq));
}
/**
* irq_sim_init - Initialize the interrupt simulator: allocate a range of
* dummy interrupts.
*
* @sim: The interrupt simulator object to initialize.
* @num_irqs: Number of interrupts to allocate
*
* Returns 0 on success and a negative error number on failure.
*/
int irq_sim_init(struct irq_sim *sim, unsigned int num_irqs)
{
int i;
sim->irqs = kmalloc_array(num_irqs, sizeof(*sim->irqs), GFP_KERNEL);
if (!sim->irqs)
return -ENOMEM;
sim->irq_base = irq_alloc_descs(-1, 0, num_irqs, 0);
if (sim->irq_base < 0) {
kfree(sim->irqs);
return sim->irq_base;
}
for (i = 0; i < num_irqs; i++) {
sim->irqs[i].irqnum = sim->irq_base + i;
sim->irqs[i].enabled = false;
irq_set_chip(sim->irq_base + i, &irq_sim_irqchip);
irq_set_chip_data(sim->irq_base + i, &sim->irqs[i]);
irq_set_handler(sim->irq_base + i, &handle_simple_irq);
irq_modify_status(sim->irq_base + i,
IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE);
}
init_irq_work(&sim->work_ctx.work, irq_sim_handle_irq);
sim->irq_count = num_irqs;
return 0;
}
EXPORT_SYMBOL_GPL(irq_sim_init);
/**
* irq_sim_fini - Deinitialize the interrupt simulator: free the interrupt
* descriptors and allocated memory.
*
* @sim: The interrupt simulator to tear down.
*/
void irq_sim_fini(struct irq_sim *sim)
{
irq_work_sync(&sim->work_ctx.work);
irq_free_descs(sim->irq_base, sim->irq_count);
kfree(sim->irqs);
}
EXPORT_SYMBOL_GPL(irq_sim_fini);
static void devm_irq_sim_release(struct device *dev, void *res)
{
struct irq_sim_devres *this = res;
irq_sim_fini(this->sim);
}
/**
* irq_sim_init - Initialize the interrupt simulator for a managed device.
*
* @dev: Device to initialize the simulator object for.
* @sim: The interrupt simulator object to initialize.
* @num_irqs: Number of interrupts to allocate
*
* Returns 0 on success and a negative error number on failure.
*/
int devm_irq_sim_init(struct device *dev, struct irq_sim *sim,
unsigned int num_irqs)
{
struct irq_sim_devres *dr;
int rv;
dr = devres_alloc(devm_irq_sim_release, sizeof(*dr), GFP_KERNEL);
if (!dr)
return -ENOMEM;
rv = irq_sim_init(sim, num_irqs);
if (rv) {
devres_free(dr);
return rv;
}
dr->sim = sim;
devres_add(dev, dr);
return 0;
}
EXPORT_SYMBOL_GPL(devm_irq_sim_init);
/**
* irq_sim_fire - Enqueue an interrupt.
*
* @sim: The interrupt simulator object.
* @offset: Offset of the simulated interrupt which should be fired.
*/
void irq_sim_fire(struct irq_sim *sim, unsigned int offset)
{
if (sim->irqs[offset].enabled) {
sim->work_ctx.irq = irq_sim_irqnum(sim, offset);
irq_work_queue(&sim->work_ctx.work);
}
}
EXPORT_SYMBOL_GPL(irq_sim_fire);
/**
* irq_sim_irqnum - Get the allocated number of a dummy interrupt.
*
* @sim: The interrupt simulator object.
* @offset: Offset of the simulated interrupt for which to retrieve
* the number.
*/
int irq_sim_irqnum(struct irq_sim *sim, unsigned int offset)
{
return sim->irqs[offset].irqnum;
}
EXPORT_SYMBOL_GPL(irq_sim_irqnum);
...@@ -455,6 +455,31 @@ void irq_set_default_host(struct irq_domain *domain) ...@@ -455,6 +455,31 @@ void irq_set_default_host(struct irq_domain *domain)
} }
EXPORT_SYMBOL_GPL(irq_set_default_host); EXPORT_SYMBOL_GPL(irq_set_default_host);
static void irq_domain_clear_mapping(struct irq_domain *domain,
irq_hw_number_t hwirq)
{
if (hwirq < domain->revmap_size) {
domain->linear_revmap[hwirq] = 0;
} else {
mutex_lock(&revmap_trees_mutex);
radix_tree_delete(&domain->revmap_tree, hwirq);
mutex_unlock(&revmap_trees_mutex);
}
}
static void irq_domain_set_mapping(struct irq_domain *domain,
irq_hw_number_t hwirq,
struct irq_data *irq_data)
{
if (hwirq < domain->revmap_size) {
domain->linear_revmap[hwirq] = irq_data->irq;
} else {
mutex_lock(&revmap_trees_mutex);
radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
mutex_unlock(&revmap_trees_mutex);
}
}
void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq) void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
{ {
struct irq_data *irq_data = irq_get_irq_data(irq); struct irq_data *irq_data = irq_get_irq_data(irq);
...@@ -483,13 +508,7 @@ void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq) ...@@ -483,13 +508,7 @@ void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
domain->mapcount--; domain->mapcount--;
/* Clear reverse map for this hwirq */ /* Clear reverse map for this hwirq */
if (hwirq < domain->revmap_size) { irq_domain_clear_mapping(domain, hwirq);
domain->linear_revmap[hwirq] = 0;
} else {
mutex_lock(&revmap_trees_mutex);
radix_tree_delete(&domain->revmap_tree, hwirq);
mutex_unlock(&revmap_trees_mutex);
}
} }
int irq_domain_associate(struct irq_domain *domain, unsigned int virq, int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
...@@ -533,13 +552,7 @@ int irq_domain_associate(struct irq_domain *domain, unsigned int virq, ...@@ -533,13 +552,7 @@ int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
} }
domain->mapcount++; domain->mapcount++;
if (hwirq < domain->revmap_size) { irq_domain_set_mapping(domain, hwirq, irq_data);
domain->linear_revmap[hwirq] = virq;
} else {
mutex_lock(&revmap_trees_mutex);
radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
mutex_unlock(&revmap_trees_mutex);
}
mutex_unlock(&irq_domain_mutex); mutex_unlock(&irq_domain_mutex);
irq_clear_status_flags(virq, IRQ_NOREQUEST); irq_clear_status_flags(virq, IRQ_NOREQUEST);
...@@ -1138,16 +1151,9 @@ static void irq_domain_insert_irq(int virq) ...@@ -1138,16 +1151,9 @@ static void irq_domain_insert_irq(int virq)
for (data = irq_get_irq_data(virq); data; data = data->parent_data) { for (data = irq_get_irq_data(virq); data; data = data->parent_data) {
struct irq_domain *domain = data->domain; struct irq_domain *domain = data->domain;
irq_hw_number_t hwirq = data->hwirq;
domain->mapcount++; domain->mapcount++;
if (hwirq < domain->revmap_size) { irq_domain_set_mapping(domain, data->hwirq, data);
domain->linear_revmap[hwirq] = virq;
} else {
mutex_lock(&revmap_trees_mutex);
radix_tree_insert(&domain->revmap_tree, hwirq, data);
mutex_unlock(&revmap_trees_mutex);
}
/* If not already assigned, give the domain the chip's name */ /* If not already assigned, give the domain the chip's name */
if (!domain->name && data->chip) if (!domain->name && data->chip)
...@@ -1171,13 +1177,7 @@ static void irq_domain_remove_irq(int virq) ...@@ -1171,13 +1177,7 @@ static void irq_domain_remove_irq(int virq)
irq_hw_number_t hwirq = data->hwirq; irq_hw_number_t hwirq = data->hwirq;
domain->mapcount--; domain->mapcount--;
if (hwirq < domain->revmap_size) { irq_domain_clear_mapping(domain, hwirq);
domain->linear_revmap[hwirq] = 0;
} else {
mutex_lock(&revmap_trees_mutex);
radix_tree_delete(&domain->revmap_tree, hwirq);
mutex_unlock(&revmap_trees_mutex);
}
} }
} }
...@@ -1362,6 +1362,7 @@ static void irq_domain_free_irqs_hierarchy(struct irq_domain *domain, ...@@ -1362,6 +1362,7 @@ static void irq_domain_free_irqs_hierarchy(struct irq_domain *domain,
unsigned int irq_base, unsigned int irq_base,
unsigned int nr_irqs) unsigned int nr_irqs)
{ {
if (domain->ops->free)
domain->ops->free(domain, irq_base, nr_irqs); domain->ops->free(domain, irq_base, nr_irqs);
} }
...@@ -1448,6 +1449,175 @@ int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, ...@@ -1448,6 +1449,175 @@ int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
return ret; return ret;
} }
/* The irq_data was moved, fix the revmap to refer to the new location */
static void irq_domain_fix_revmap(struct irq_data *d)
{
void **slot;
if (d->hwirq < d->domain->revmap_size)
return; /* Not using radix tree. */
/* Fix up the revmap. */
mutex_lock(&revmap_trees_mutex);
slot = radix_tree_lookup_slot(&d->domain->revmap_tree, d->hwirq);
if (slot)
radix_tree_replace_slot(&d->domain->revmap_tree, slot, d);
mutex_unlock(&revmap_trees_mutex);
}
/**
* irq_domain_push_irq() - Push a domain in to the top of a hierarchy.
* @domain: Domain to push.
* @virq: Irq to push the domain in to.
* @arg: Passed to the irq_domain_ops alloc() function.
*
* For an already existing irqdomain hierarchy, as might be obtained
* via a call to pci_enable_msix(), add an additional domain to the
* head of the processing chain. Must be called before request_irq()
* has been called.
*/
int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg)
{
struct irq_data *child_irq_data;
struct irq_data *root_irq_data = irq_get_irq_data(virq);
struct irq_desc *desc;
int rv = 0;
/*
* Check that no action has been set, which indicates the virq
* is in a state where this function doesn't have to deal with
* races between interrupt handling and maintaining the
* hierarchy. This will catch gross misuse. Attempting to
* make the check race free would require holding locks across
* calls to struct irq_domain_ops->alloc(), which could lead
* to deadlock, so we just do a simple check before starting.
*/
desc = irq_to_desc(virq);
if (!desc)
return -EINVAL;
if (WARN_ON(desc->action))
return -EBUSY;
if (domain == NULL)
return -EINVAL;
if (WARN_ON(!irq_domain_is_hierarchy(domain)))
return -EINVAL;
if (!root_irq_data)
return -EINVAL;
if (domain->parent != root_irq_data->domain)
return -EINVAL;
child_irq_data = kzalloc_node(sizeof(*child_irq_data), GFP_KERNEL,
irq_data_get_node(root_irq_data));
if (!child_irq_data)
return -ENOMEM;
mutex_lock(&irq_domain_mutex);
/* Copy the original irq_data. */
*child_irq_data = *root_irq_data;
/*
* Overwrite the root_irq_data, which is embedded in struct
* irq_desc, with values for this domain.
*/
root_irq_data->parent_data = child_irq_data;
root_irq_data->domain = domain;
root_irq_data->mask = 0;
root_irq_data->hwirq = 0;
root_irq_data->chip = NULL;
root_irq_data->chip_data = NULL;
/* May (probably does) set hwirq, chip, etc. */
rv = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg);
if (rv) {
/* Restore the original irq_data. */
*root_irq_data = *child_irq_data;
goto error;
}
irq_domain_fix_revmap(child_irq_data);
irq_domain_set_mapping(domain, root_irq_data->hwirq, root_irq_data);
error:
mutex_unlock(&irq_domain_mutex);
return rv;
}
EXPORT_SYMBOL_GPL(irq_domain_push_irq);
/**
* irq_domain_pop_irq() - Remove a domain from the top of a hierarchy.
* @domain: Domain to remove.
* @virq: Irq to remove the domain from.
*
* Undo the effects of a call to irq_domain_push_irq(). Must be
* called either before request_irq() or after free_irq().
*/
int irq_domain_pop_irq(struct irq_domain *domain, int virq)
{
struct irq_data *root_irq_data = irq_get_irq_data(virq);
struct irq_data *child_irq_data;
struct irq_data *tmp_irq_data;
struct irq_desc *desc;
/*
* Check that no action is set, which indicates the virq is in
* a state where this function doesn't have to deal with races
* between interrupt handling and maintaining the hierarchy.
* This will catch gross misuse. Attempting to make the check
* race free would require holding locks across calls to
* struct irq_domain_ops->free(), which could lead to
* deadlock, so we just do a simple check before starting.
*/
desc = irq_to_desc(virq);
if (!desc)
return -EINVAL;
if (WARN_ON(desc->action))
return -EBUSY;
if (domain == NULL)
return -EINVAL;
if (!root_irq_data)
return -EINVAL;
tmp_irq_data = irq_domain_get_irq_data(domain, virq);
/* We can only "pop" if this domain is at the top of the list */
if (WARN_ON(root_irq_data != tmp_irq_data))
return -EINVAL;
if (WARN_ON(root_irq_data->domain != domain))
return -EINVAL;
child_irq_data = root_irq_data->parent_data;
if (WARN_ON(!child_irq_data))
return -EINVAL;
mutex_lock(&irq_domain_mutex);
root_irq_data->parent_data = NULL;
irq_domain_clear_mapping(domain, root_irq_data->hwirq);
irq_domain_free_irqs_hierarchy(domain, virq, 1);
/* Restore the original irq_data. */
*root_irq_data = *child_irq_data;
irq_domain_fix_revmap(root_irq_data);
mutex_unlock(&irq_domain_mutex);
kfree(child_irq_data);
return 0;
}
EXPORT_SYMBOL_GPL(irq_domain_pop_irq);
/** /**
* irq_domain_free_irqs - Free IRQ number and associated data structures * irq_domain_free_irqs - Free IRQ number and associated data structures
* @virq: base IRQ number * @virq: base IRQ number
......
...@@ -400,8 +400,18 @@ int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info) ...@@ -400,8 +400,18 @@ int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
return -EINVAL; return -EINVAL;
data = irq_desc_get_irq_data(desc); data = irq_desc_get_irq_data(desc);
do {
chip = irq_data_get_irq_chip(data); chip = irq_data_get_irq_chip(data);
if (chip && chip->irq_set_vcpu_affinity) if (chip && chip->irq_set_vcpu_affinity)
break;
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
data = data->parent_data;
#else
data = NULL;
#endif
} while (data);
if (data)
ret = chip->irq_set_vcpu_affinity(data, vcpu_info); ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
irq_put_desc_unlock(desc, flags); irq_put_desc_unlock(desc, flags);
......
...@@ -61,12 +61,12 @@ static int show_irq_affinity(int type, struct seq_file *m) ...@@ -61,12 +61,12 @@ static int show_irq_affinity(int type, struct seq_file *m)
case EFFECTIVE: case EFFECTIVE:
case EFFECTIVE_LIST: case EFFECTIVE_LIST:
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
mask = desc->irq_common_data.effective_affinity; mask = irq_data_get_effective_affinity_mask(&desc->irq_data);
break; break;
#else
return -EINVAL;
#endif #endif
}; default:
return -EINVAL;
}
switch (type) { switch (type) {
case AFFINITY_LIST: case AFFINITY_LIST:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment