Commit 47b59d8e authored by Joerg Roedel's avatar Joerg Roedel

Merge branches 'arm/exynos', 'arm/renesas', 'arm/rockchip', 'arm/omap',...

Merge branches 'arm/exynos', 'arm/renesas', 'arm/rockchip', 'arm/omap', 'arm/mediatek', 'arm/tegra', 'arm/qcom', 'arm/smmu', 'ppc/pamu', 'x86/vt-d', 'x86/amd', 's390' and 'core' into next
* QCOM IOMMU v1 Implementation
Qualcomm "B" family devices which are not compatible with arm-smmu have
a similar looking IOMMU but without access to the global register space,
and optionally requiring additional configuration to route context irqs
to non-secure vs secure interrupt line.
** Required properties:
- compatible : Should be one of:
"qcom,msm8916-iommu"
Followed by "qcom,msm-iommu-v1".
- clock-names : Should be a pair of "iface" (required for IOMMUs
register group access) and "bus" (required for
the IOMMUs underlying bus access).
- clocks : Phandles for respective clocks described by
clock-names.
- #address-cells : must be 1.
- #size-cells : must be 1.
- #iommu-cells : Must be 1. Index identifies the context-bank #.
- ranges : Base address and size of the iommu context banks.
- qcom,iommu-secure-id : secure-id.
- List of sub-nodes, one per translation context bank. Each sub-node
has the following required properties:
- compatible : Should be one of:
- "qcom,msm-iommu-v1-ns" : non-secure context bank
- "qcom,msm-iommu-v1-sec" : secure context bank
- reg : Base address and size of context bank within the iommu
- interrupts : The context fault irq.
** Optional properties:
- reg : Base address and size of the SMMU local base, should
be only specified if the iommu requires configuration
for routing of context bank irq's to secure vs non-
secure lines. (Ie. if the iommu contains secure
context banks)
** Examples:
apps_iommu: iommu@1e20000 {
#address-cells = <1>;
#size-cells = <1>;
#iommu-cells = <1>;
compatible = "qcom,msm8916-iommu", "qcom,msm-iommu-v1";
ranges = <0 0x1e20000 0x40000>;
reg = <0x1ef0000 0x3000>;
clocks = <&gcc GCC_SMMU_CFG_CLK>,
<&gcc GCC_APSS_TCU_CLK>;
clock-names = "iface", "bus";
qcom,iommu-secure-id = <17>;
// mdp_0:
iommu-ctx@4000 {
compatible = "qcom,msm-iommu-v1-ns";
reg = <0x4000 0x1000>;
interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>;
};
// venus_ns:
iommu-ctx@5000 {
compatible = "qcom,msm-iommu-v1-sec";
reg = <0x5000 0x1000>;
interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>;
};
};
gpu_iommu: iommu@1f08000 {
#address-cells = <1>;
#size-cells = <1>;
#iommu-cells = <1>;
compatible = "qcom,msm8916-iommu", "qcom,msm-iommu-v1";
ranges = <0 0x1f08000 0x10000>;
clocks = <&gcc GCC_SMMU_CFG_CLK>,
<&gcc GCC_GFX_TCU_CLK>;
clock-names = "iface", "bus";
qcom,iommu-secure-id = <18>;
// gfx3d_user:
iommu-ctx@1000 {
compatible = "qcom,msm-iommu-v1-ns";
reg = <0x1000 0x1000>;
interrupts = <GIC_SPI 241 IRQ_TYPE_LEVEL_HIGH>;
};
// gfx3d_priv:
iommu-ctx@2000 {
compatible = "qcom,msm-iommu-v1-ns";
reg = <0x2000 0x1000>;
interrupts = <GIC_SPI 242 IRQ_TYPE_LEVEL_HIGH>;
};
};
...
venus: video-codec@1d00000 {
...
iommus = <&apps_iommu 5>;
};
mdp: mdp@1a01000 {
...
iommus = <&apps_iommu 4>;
};
gpu@01c00000 {
...
iommus = <&gpu_iommu 1>, <&gpu_iommu 2>;
};
...@@ -15,6 +15,11 @@ Required properties: ...@@ -15,6 +15,11 @@ Required properties:
to associate with its master device. See: to associate with its master device. See:
Documentation/devicetree/bindings/iommu/iommu.txt Documentation/devicetree/bindings/iommu/iommu.txt
Optional properties:
- rockchip,disable-mmu-reset : Don't use the mmu reset operation.
Some mmu instances may produce unexpected results
when the reset operation is used.
Example: Example:
vopl_mmu: iommu@ff940300 { vopl_mmu: iommu@ff940300 {
......
...@@ -15,6 +15,9 @@ Required properties: ...@@ -15,6 +15,9 @@ Required properties:
the register. the register.
- "smi" : It's the clock for transfer data and command. - "smi" : It's the clock for transfer data and command.
Required property for mt2701:
- mediatek,larb-id :the hardware id of this larb.
Example: Example:
larb1: larb@16010000 { larb1: larb@16010000 {
compatible = "mediatek,mt8173-smi-larb"; compatible = "mediatek,mt8173-smi-larb";
...@@ -25,3 +28,15 @@ Example: ...@@ -25,3 +28,15 @@ Example:
<&vdecsys CLK_VDEC_LARB_CKEN>; <&vdecsys CLK_VDEC_LARB_CKEN>;
clock-names = "apb", "smi"; clock-names = "apb", "smi";
}; };
Example for mt2701:
larb0: larb@14010000 {
compatible = "mediatek,mt2701-smi-larb";
reg = <0 0x14010000 0 0x1000>;
mediatek,smi = <&smi_common>;
mediatek,larb-id = <0>;
clocks = <&mmsys CLK_MM_SMI_LARB0>,
<&mmsys CLK_MM_SMI_LARB0>;
clock-names = "apb", "smi";
power-domains = <&scpsys MT2701_POWER_DOMAIN_DISP>;
};
...@@ -10940,6 +10940,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/rkuo/linux-hexagon-kernel.g ...@@ -10940,6 +10940,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/rkuo/linux-hexagon-kernel.g
S: Supported S: Supported
F: arch/hexagon/ F: arch/hexagon/
QUALCOMM IOMMU
M: Rob Clark <robdclark@gmail.com>
L: iommu@lists.linux-foundation.org
L: linux-arm-msm@vger.kernel.org
S: Maintained
F: drivers/iommu/qcom_iommu.c
QUALCOMM VENUS VIDEO ACCELERATOR DRIVER QUALCOMM VENUS VIDEO ACCELERATOR DRIVER
M: Stanimir Varbanov <stanimir.varbanov@linaro.org> M: Stanimir Varbanov <stanimir.varbanov@linaro.org>
L: linux-media@vger.kernel.org L: linux-media@vger.kernel.org
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/iommu.h>
#include <asm-generic/pci.h> #include <asm-generic/pci.h>
#include <asm/pci_clp.h> #include <asm/pci_clp.h>
#include <asm/pci_debug.h> #include <asm/pci_debug.h>
...@@ -122,6 +123,8 @@ struct zpci_dev { ...@@ -122,6 +123,8 @@ struct zpci_dev {
unsigned long iommu_pages; unsigned long iommu_pages;
unsigned int next_bit; unsigned int next_bit;
struct iommu_device iommu_dev; /* IOMMU core handle */
char res_name[16]; char res_name[16];
struct zpci_bar_struct bars[PCI_BAR_COUNT]; struct zpci_bar_struct bars[PCI_BAR_COUNT];
...@@ -174,6 +177,10 @@ int clp_enable_fh(struct zpci_dev *, u8); ...@@ -174,6 +177,10 @@ int clp_enable_fh(struct zpci_dev *, u8);
int clp_disable_fh(struct zpci_dev *); int clp_disable_fh(struct zpci_dev *);
int clp_get_state(u32 fid, enum zpci_state *state); int clp_get_state(u32 fid, enum zpci_state *state);
/* IOMMU Interface */
int zpci_init_iommu(struct zpci_dev *zdev);
void zpci_destroy_iommu(struct zpci_dev *zdev);
#ifdef CONFIG_PCI #ifdef CONFIG_PCI
/* Error handling and recovery */ /* Error handling and recovery */
void zpci_event_error(void *); void zpci_event_error(void *);
......
...@@ -776,6 +776,7 @@ void pcibios_remove_bus(struct pci_bus *bus) ...@@ -776,6 +776,7 @@ void pcibios_remove_bus(struct pci_bus *bus)
zpci_exit_slot(zdev); zpci_exit_slot(zdev);
zpci_cleanup_bus_resources(zdev); zpci_cleanup_bus_resources(zdev);
zpci_destroy_iommu(zdev);
zpci_free_domain(zdev); zpci_free_domain(zdev);
spin_lock(&zpci_list_lock); spin_lock(&zpci_list_lock);
...@@ -848,11 +849,15 @@ int zpci_create_device(struct zpci_dev *zdev) ...@@ -848,11 +849,15 @@ int zpci_create_device(struct zpci_dev *zdev)
if (rc) if (rc)
goto out; goto out;
rc = zpci_init_iommu(zdev);
if (rc)
goto out_free;
mutex_init(&zdev->lock); mutex_init(&zdev->lock);
if (zdev->state == ZPCI_FN_STATE_CONFIGURED) { if (zdev->state == ZPCI_FN_STATE_CONFIGURED) {
rc = zpci_enable_device(zdev); rc = zpci_enable_device(zdev);
if (rc) if (rc)
goto out_free; goto out_destroy_iommu;
} }
rc = zpci_scan_bus(zdev); rc = zpci_scan_bus(zdev);
if (rc) if (rc)
...@@ -869,6 +874,8 @@ int zpci_create_device(struct zpci_dev *zdev) ...@@ -869,6 +874,8 @@ int zpci_create_device(struct zpci_dev *zdev)
out_disable: out_disable:
if (zdev->state == ZPCI_FN_STATE_ONLINE) if (zdev->state == ZPCI_FN_STATE_ONLINE)
zpci_disable_device(zdev); zpci_disable_device(zdev);
out_destroy_iommu:
zpci_destroy_iommu(zdev);
out_free: out_free:
zpci_free_domain(zdev); zpci_free_domain(zdev);
out: out:
......
...@@ -76,6 +76,8 @@ config IOMMU_DMA ...@@ -76,6 +76,8 @@ config IOMMU_DMA
config FSL_PAMU config FSL_PAMU
bool "Freescale IOMMU support" bool "Freescale IOMMU support"
depends on PCI
depends on PHYS_64BIT
depends on PPC_E500MC || (COMPILE_TEST && PPC) depends on PPC_E500MC || (COMPILE_TEST && PPC)
select IOMMU_API select IOMMU_API
select GENERIC_ALLOCATOR select GENERIC_ALLOCATOR
...@@ -253,6 +255,7 @@ config TEGRA_IOMMU_SMMU ...@@ -253,6 +255,7 @@ config TEGRA_IOMMU_SMMU
config EXYNOS_IOMMU config EXYNOS_IOMMU
bool "Exynos IOMMU Support" bool "Exynos IOMMU Support"
depends on ARCH_EXYNOS && MMU depends on ARCH_EXYNOS && MMU
depends on !CPU_BIG_ENDIAN # revisit driver if we can enable big-endian ptes
select IOMMU_API select IOMMU_API
select ARM_DMA_USE_IOMMU select ARM_DMA_USE_IOMMU
help help
...@@ -367,4 +370,14 @@ config MTK_IOMMU_V1 ...@@ -367,4 +370,14 @@ config MTK_IOMMU_V1
if unsure, say N here. if unsure, say N here.
config QCOM_IOMMU
# Note: iommu drivers cannot (yet?) be built as modules
bool "Qualcomm IOMMU Support"
depends on ARCH_QCOM || COMPILE_TEST
select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE
select ARM_DMA_USE_IOMMU
help
Support for IOMMU on certain Qualcomm SoCs.
endif # IOMMU_SUPPORT endif # IOMMU_SUPPORT
...@@ -27,3 +27,4 @@ obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o ...@@ -27,3 +27,4 @@ obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o
obj-$(CONFIG_S390_IOMMU) += s390-iommu.o obj-$(CONFIG_S390_IOMMU) += s390-iommu.o
obj-$(CONFIG_QCOM_IOMMU) += qcom_iommu.o
This diff is collapsed.
This diff is collapsed.
...@@ -87,4 +87,6 @@ static inline bool iommu_feature(struct amd_iommu *iommu, u64 f) ...@@ -87,4 +87,6 @@ static inline bool iommu_feature(struct amd_iommu *iommu, u64 f)
return !!(iommu->features & f); return !!(iommu->features & f);
} }
extern bool translation_pre_enabled(struct amd_iommu *iommu);
extern struct iommu_dev_data *get_dev_data(struct device *dev);
#endif /* _ASM_X86_AMD_IOMMU_PROTO_H */ #endif /* _ASM_X86_AMD_IOMMU_PROTO_H */
...@@ -250,6 +250,14 @@ ...@@ -250,6 +250,14 @@
#define GA_GUEST_NR 0x1 #define GA_GUEST_NR 0x1
/* Bit value definition for dte irq remapping fields*/
#define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6)
#define DTE_IRQ_REMAP_INTCTL_MASK (0x3ULL << 60)
#define DTE_IRQ_TABLE_LEN_MASK (0xfULL << 1)
#define DTE_IRQ_REMAP_INTCTL (2ULL << 60)
#define DTE_IRQ_TABLE_LEN (8ULL << 1)
#define DTE_IRQ_REMAP_ENABLE 1ULL
#define PAGE_MODE_NONE 0x00 #define PAGE_MODE_NONE 0x00
#define PAGE_MODE_1_LEVEL 0x01 #define PAGE_MODE_1_LEVEL 0x01
#define PAGE_MODE_2_LEVEL 0x02 #define PAGE_MODE_2_LEVEL 0x02
...@@ -265,7 +273,7 @@ ...@@ -265,7 +273,7 @@
#define PM_LEVEL_INDEX(x, a) (((a) >> PM_LEVEL_SHIFT((x))) & 0x1ffULL) #define PM_LEVEL_INDEX(x, a) (((a) >> PM_LEVEL_SHIFT((x))) & 0x1ffULL)
#define PM_LEVEL_ENC(x) (((x) << 9) & 0xe00ULL) #define PM_LEVEL_ENC(x) (((x) << 9) & 0xe00ULL)
#define PM_LEVEL_PDE(x, a) ((a) | PM_LEVEL_ENC((x)) | \ #define PM_LEVEL_PDE(x, a) ((a) | PM_LEVEL_ENC((x)) | \
IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW) IOMMU_PTE_PR | IOMMU_PTE_IR | IOMMU_PTE_IW)
#define PM_PTE_LEVEL(pte) (((pte) >> 9) & 0x7ULL) #define PM_PTE_LEVEL(pte) (((pte) >> 9) & 0x7ULL)
#define PM_MAP_4k 0 #define PM_MAP_4k 0
...@@ -314,19 +322,29 @@ ...@@ -314,19 +322,29 @@
#define PTE_LEVEL_PAGE_SIZE(level) \ #define PTE_LEVEL_PAGE_SIZE(level) \
(1ULL << (12 + (9 * (level)))) (1ULL << (12 + (9 * (level))))
#define IOMMU_PTE_P (1ULL << 0) /*
#define IOMMU_PTE_TV (1ULL << 1) * Bit value definition for I/O PTE fields
*/
#define IOMMU_PTE_PR (1ULL << 0)
#define IOMMU_PTE_U (1ULL << 59) #define IOMMU_PTE_U (1ULL << 59)
#define IOMMU_PTE_FC (1ULL << 60) #define IOMMU_PTE_FC (1ULL << 60)
#define IOMMU_PTE_IR (1ULL << 61) #define IOMMU_PTE_IR (1ULL << 61)
#define IOMMU_PTE_IW (1ULL << 62) #define IOMMU_PTE_IW (1ULL << 62)
/*
* Bit value definition for DTE fields
*/
#define DTE_FLAG_V (1ULL << 0)
#define DTE_FLAG_TV (1ULL << 1)
#define DTE_FLAG_IR (1ULL << 61)
#define DTE_FLAG_IW (1ULL << 62)
#define DTE_FLAG_IOTLB (1ULL << 32) #define DTE_FLAG_IOTLB (1ULL << 32)
#define DTE_FLAG_SA (1ULL << 34)
#define DTE_FLAG_GV (1ULL << 55) #define DTE_FLAG_GV (1ULL << 55)
#define DTE_FLAG_MASK (0x3ffULL << 32) #define DTE_FLAG_MASK (0x3ffULL << 32)
#define DTE_GLX_SHIFT (56) #define DTE_GLX_SHIFT (56)
#define DTE_GLX_MASK (3) #define DTE_GLX_MASK (3)
#define DEV_DOMID_MASK 0xffffULL
#define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL) #define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL)
#define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL) #define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL)
...@@ -343,7 +361,7 @@ ...@@ -343,7 +361,7 @@
#define GCR3_VALID 0x01ULL #define GCR3_VALID 0x01ULL
#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL) #define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL)
#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P) #define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_PR)
#define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK)) #define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK))
#define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07) #define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07)
...@@ -435,6 +453,8 @@ struct iommu_domain; ...@@ -435,6 +453,8 @@ struct iommu_domain;
struct irq_domain; struct irq_domain;
struct amd_irte_ops; struct amd_irte_ops;
#define AMD_IOMMU_FLAG_TRANS_PRE_ENABLED (1 << 0)
/* /*
* This structure contains generic data for IOMMU protection domains * This structure contains generic data for IOMMU protection domains
* independent of their use. * independent of their use.
...@@ -569,6 +589,7 @@ struct amd_iommu { ...@@ -569,6 +589,7 @@ struct amd_iommu {
struct amd_irte_ops *irte_ops; struct amd_irte_ops *irte_ops;
#endif #endif
u32 flags;
volatile u64 __aligned(8) cmd_sem; volatile u64 __aligned(8) cmd_sem;
}; };
...@@ -599,6 +620,30 @@ struct devid_map { ...@@ -599,6 +620,30 @@ struct devid_map {
bool cmd_line; bool cmd_line;
}; };
/*
* This struct contains device specific data for the IOMMU
*/
struct iommu_dev_data {
struct list_head list; /* For domain->dev_list */
struct list_head dev_data_list; /* For global dev_data_list */
struct protection_domain *domain; /* Domain the device is bound to */
u16 devid; /* PCI Device ID */
u16 alias; /* Alias Device ID */
bool iommu_v2; /* Device can make use of IOMMUv2 */
bool passthrough; /* Device is identity mapped */
struct {
bool enabled;
int qdep;
} ats; /* ATS state */
bool pri_tlp; /* PASID TLB required for
PPR completions */
u32 errata; /* Bitmap for errata to apply */
bool use_vapic; /* Enable device to use vapic mode */
bool defer_attach;
struct ratelimit_state rs; /* Ratelimit IOPF messages */
};
/* Map HPET and IOAPIC ids to the devid used by the IOMMU */ /* Map HPET and IOAPIC ids to the devid used by the IOMMU */
extern struct list_head ioapic_map; extern struct list_head ioapic_map;
extern struct list_head hpet_map; extern struct list_head hpet_map;
......
...@@ -562,14 +562,30 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data) ...@@ -562,14 +562,30 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
unsigned long flags; unsigned long flags;
struct fault *fault; struct fault *fault;
bool finish; bool finish;
u16 tag; u16 tag, devid;
int ret; int ret;
struct iommu_dev_data *dev_data;
struct pci_dev *pdev = NULL;
iommu_fault = data; iommu_fault = data;
tag = iommu_fault->tag & 0x1ff; tag = iommu_fault->tag & 0x1ff;
finish = (iommu_fault->tag >> 9) & 1; finish = (iommu_fault->tag >> 9) & 1;
devid = iommu_fault->device_id;
pdev = pci_get_bus_and_slot(PCI_BUS_NUM(devid), devid & 0xff);
if (!pdev)
return -ENODEV;
dev_data = get_dev_data(&pdev->dev);
/* In kdump kernel pci dev is not initialized yet -> send INVALID */
ret = NOTIFY_DONE; ret = NOTIFY_DONE;
if (translation_pre_enabled(amd_iommu_rlookup_table[devid])
&& dev_data->defer_attach) {
amd_iommu_complete_ppr(pdev, iommu_fault->pasid,
PPR_INVALID, tag);
goto out;
}
dev_state = get_device_state(iommu_fault->device_id); dev_state = get_device_state(iommu_fault->device_id);
if (dev_state == NULL) if (dev_state == NULL)
goto out; goto out;
......
/*
* IOMMU API for ARM architected SMMU implementations.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright (C) 2013 ARM Limited
*
* Author: Will Deacon <will.deacon@arm.com>
*/
#ifndef _ARM_SMMU_REGS_H
#define _ARM_SMMU_REGS_H
/* Configuration registers */
#define ARM_SMMU_GR0_sCR0 0x0
#define sCR0_CLIENTPD (1 << 0)
#define sCR0_GFRE (1 << 1)
#define sCR0_GFIE (1 << 2)
#define sCR0_EXIDENABLE (1 << 3)
#define sCR0_GCFGFRE (1 << 4)
#define sCR0_GCFGFIE (1 << 5)
#define sCR0_USFCFG (1 << 10)
#define sCR0_VMIDPNE (1 << 11)
#define sCR0_PTM (1 << 12)
#define sCR0_FB (1 << 13)
#define sCR0_VMID16EN (1 << 31)
#define sCR0_BSU_SHIFT 14
#define sCR0_BSU_MASK 0x3
/* Auxiliary Configuration register */
#define ARM_SMMU_GR0_sACR 0x10
/* Identification registers */
#define ARM_SMMU_GR0_ID0 0x20
#define ARM_SMMU_GR0_ID1 0x24
#define ARM_SMMU_GR0_ID2 0x28
#define ARM_SMMU_GR0_ID3 0x2c
#define ARM_SMMU_GR0_ID4 0x30
#define ARM_SMMU_GR0_ID5 0x34
#define ARM_SMMU_GR0_ID6 0x38
#define ARM_SMMU_GR0_ID7 0x3c
#define ARM_SMMU_GR0_sGFSR 0x48
#define ARM_SMMU_GR0_sGFSYNR0 0x50
#define ARM_SMMU_GR0_sGFSYNR1 0x54
#define ARM_SMMU_GR0_sGFSYNR2 0x58
#define ID0_S1TS (1 << 30)
#define ID0_S2TS (1 << 29)
#define ID0_NTS (1 << 28)
#define ID0_SMS (1 << 27)
#define ID0_ATOSNS (1 << 26)
#define ID0_PTFS_NO_AARCH32 (1 << 25)
#define ID0_PTFS_NO_AARCH32S (1 << 24)
#define ID0_CTTW (1 << 14)
#define ID0_NUMIRPT_SHIFT 16
#define ID0_NUMIRPT_MASK 0xff
#define ID0_NUMSIDB_SHIFT 9
#define ID0_NUMSIDB_MASK 0xf
#define ID0_EXIDS (1 << 8)
#define ID0_NUMSMRG_SHIFT 0
#define ID0_NUMSMRG_MASK 0xff
#define ID1_PAGESIZE (1 << 31)
#define ID1_NUMPAGENDXB_SHIFT 28
#define ID1_NUMPAGENDXB_MASK 7
#define ID1_NUMS2CB_SHIFT 16
#define ID1_NUMS2CB_MASK 0xff
#define ID1_NUMCB_SHIFT 0
#define ID1_NUMCB_MASK 0xff
#define ID2_OAS_SHIFT 4
#define ID2_OAS_MASK 0xf
#define ID2_IAS_SHIFT 0
#define ID2_IAS_MASK 0xf
#define ID2_UBS_SHIFT 8
#define ID2_UBS_MASK 0xf
#define ID2_PTFS_4K (1 << 12)
#define ID2_PTFS_16K (1 << 13)
#define ID2_PTFS_64K (1 << 14)
#define ID2_VMID16 (1 << 15)
#define ID7_MAJOR_SHIFT 4
#define ID7_MAJOR_MASK 0xf
/* Global TLB invalidation */
#define ARM_SMMU_GR0_TLBIVMID 0x64
#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
#define ARM_SMMU_GR0_TLBIALLH 0x6c
#define ARM_SMMU_GR0_sTLBGSYNC 0x70
#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
#define sTLBGSTATUS_GSACTIVE (1 << 0)
/* Stream mapping registers */
#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
#define SMR_VALID (1 << 31)
#define SMR_MASK_SHIFT 16
#define SMR_ID_SHIFT 0
#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
#define S2CR_CBNDX_SHIFT 0
#define S2CR_CBNDX_MASK 0xff
#define S2CR_EXIDVALID (1 << 10)
#define S2CR_TYPE_SHIFT 16
#define S2CR_TYPE_MASK 0x3
enum arm_smmu_s2cr_type {
S2CR_TYPE_TRANS,
S2CR_TYPE_BYPASS,
S2CR_TYPE_FAULT,
};
#define S2CR_PRIVCFG_SHIFT 24
#define S2CR_PRIVCFG_MASK 0x3
enum arm_smmu_s2cr_privcfg {
S2CR_PRIVCFG_DEFAULT,
S2CR_PRIVCFG_DIPAN,
S2CR_PRIVCFG_UNPRIV,
S2CR_PRIVCFG_PRIV,
};
/* Context bank attribute registers */
#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
#define CBAR_VMID_SHIFT 0
#define CBAR_VMID_MASK 0xff
#define CBAR_S1_BPSHCFG_SHIFT 8
#define CBAR_S1_BPSHCFG_MASK 3
#define CBAR_S1_BPSHCFG_NSH 3
#define CBAR_S1_MEMATTR_SHIFT 12
#define CBAR_S1_MEMATTR_MASK 0xf
#define CBAR_S1_MEMATTR_WB 0xf
#define CBAR_TYPE_SHIFT 16
#define CBAR_TYPE_MASK 0x3
#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
#define CBAR_IRPTNDX_SHIFT 24
#define CBAR_IRPTNDX_MASK 0xff
#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
#define CBA2R_RW64_32BIT (0 << 0)
#define CBA2R_RW64_64BIT (1 << 0)
#define CBA2R_VMID_SHIFT 16
#define CBA2R_VMID_MASK 0xffff
#define ARM_SMMU_CB_SCTLR 0x0
#define ARM_SMMU_CB_ACTLR 0x4
#define ARM_SMMU_CB_RESUME 0x8
#define ARM_SMMU_CB_TTBCR2 0x10
#define ARM_SMMU_CB_TTBR0 0x20
#define ARM_SMMU_CB_TTBR1 0x28
#define ARM_SMMU_CB_TTBCR 0x30
#define ARM_SMMU_CB_CONTEXTIDR 0x34
#define ARM_SMMU_CB_S1_MAIR0 0x38
#define ARM_SMMU_CB_S1_MAIR1 0x3c
#define ARM_SMMU_CB_PAR 0x50
#define ARM_SMMU_CB_FSR 0x58
#define ARM_SMMU_CB_FAR 0x60
#define ARM_SMMU_CB_FSYNR0 0x68
#define ARM_SMMU_CB_S1_TLBIVA 0x600
#define ARM_SMMU_CB_S1_TLBIASID 0x610
#define ARM_SMMU_CB_S1_TLBIVAL 0x620
#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
#define ARM_SMMU_CB_TLBSYNC 0x7f0
#define ARM_SMMU_CB_TLBSTATUS 0x7f4
#define ARM_SMMU_CB_ATS1PR 0x800
#define ARM_SMMU_CB_ATSR 0x8f0
#define SCTLR_S1_ASIDPNE (1 << 12)
#define SCTLR_CFCFG (1 << 7)
#define SCTLR_CFIE (1 << 6)
#define SCTLR_CFRE (1 << 5)
#define SCTLR_E (1 << 4)
#define SCTLR_AFE (1 << 2)
#define SCTLR_TRE (1 << 1)
#define SCTLR_M (1 << 0)
#define CB_PAR_F (1 << 0)
#define ATSR_ACTIVE (1 << 0)
#define RESUME_RETRY (0 << 0)
#define RESUME_TERMINATE (1 << 0)
#define TTBCR2_SEP_SHIFT 15
#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
#define TTBCR2_AS (1 << 4)
#define TTBRn_ASID_SHIFT 48
#define FSR_MULTI (1 << 31)
#define FSR_SS (1 << 30)
#define FSR_UUT (1 << 8)
#define FSR_ASF (1 << 7)
#define FSR_TLBLKF (1 << 6)
#define FSR_TLBMCF (1 << 5)
#define FSR_EF (1 << 4)
#define FSR_PF (1 << 3)
#define FSR_AFF (1 << 2)
#define FSR_TF (1 << 1)
#define FSR_IGN (FSR_AFF | FSR_ASF | \
FSR_TLBMCF | FSR_TLBLKF)
#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
#define FSYNR0_WNR (1 << 4)
#endif /* _ARM_SMMU_REGS_H */
...@@ -2852,9 +2852,15 @@ static int arm_smmu_device_remove(struct platform_device *pdev) ...@@ -2852,9 +2852,15 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
struct arm_smmu_device *smmu = platform_get_drvdata(pdev); struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
arm_smmu_device_disable(smmu); arm_smmu_device_disable(smmu);
return 0; return 0;
} }
static void arm_smmu_device_shutdown(struct platform_device *pdev)
{
arm_smmu_device_remove(pdev);
}
static const struct of_device_id arm_smmu_of_match[] = { static const struct of_device_id arm_smmu_of_match[] = {
{ .compatible = "arm,smmu-v3", }, { .compatible = "arm,smmu-v3", },
{ }, { },
...@@ -2868,6 +2874,7 @@ static struct platform_driver arm_smmu_driver = { ...@@ -2868,6 +2874,7 @@ static struct platform_driver arm_smmu_driver = {
}, },
.probe = arm_smmu_device_probe, .probe = arm_smmu_device_probe,
.remove = arm_smmu_device_remove, .remove = arm_smmu_device_remove,
.shutdown = arm_smmu_device_shutdown,
}; };
module_platform_driver(arm_smmu_driver); module_platform_driver(arm_smmu_driver);
......
This diff is collapsed.
...@@ -1343,7 +1343,7 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep, ...@@ -1343,7 +1343,7 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
if (mask) { if (mask) {
BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1)); BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1; addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE; desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
} else } else
desc.high = QI_DEV_IOTLB_ADDR(addr); desc.high = QI_DEV_IOTLB_ADDR(addr);
......
...@@ -54,10 +54,6 @@ typedef u32 sysmmu_pte_t; ...@@ -54,10 +54,6 @@ typedef u32 sysmmu_pte_t;
#define lv2ent_small(pent) ((*(pent) & 2) == 2) #define lv2ent_small(pent) ((*(pent) & 2) == 2)
#define lv2ent_large(pent) ((*(pent) & 3) == 1) #define lv2ent_large(pent) ((*(pent) & 3) == 1)
#ifdef CONFIG_BIG_ENDIAN
#warning "revisit driver if we can enable big-endian ptes"
#endif
/* /*
* v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces
* v5.0 introduced support for 36bit physical address space by shifting * v5.0 introduced support for 36bit physical address space by shifting
...@@ -569,7 +565,7 @@ static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data, ...@@ -569,7 +565,7 @@ static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
spin_unlock_irqrestore(&data->lock, flags); spin_unlock_irqrestore(&data->lock, flags);
} }
static struct iommu_ops exynos_iommu_ops; static const struct iommu_ops exynos_iommu_ops;
static int __init exynos_sysmmu_probe(struct platform_device *pdev) static int __init exynos_sysmmu_probe(struct platform_device *pdev)
{ {
...@@ -659,6 +655,13 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev) ...@@ -659,6 +655,13 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev)
} }
} }
/*
* use the first registered sysmmu device for performing
* dma mapping operations on iommu page tables (cpu cache flush)
*/
if (!dma_dev)
dma_dev = &pdev->dev;
pm_runtime_enable(dev); pm_runtime_enable(dev);
return 0; return 0;
...@@ -1323,7 +1326,7 @@ static int exynos_iommu_of_xlate(struct device *dev, ...@@ -1323,7 +1326,7 @@ static int exynos_iommu_of_xlate(struct device *dev,
return 0; return 0;
} }
static struct iommu_ops exynos_iommu_ops = { static const struct iommu_ops exynos_iommu_ops = {
.domain_alloc = exynos_iommu_domain_alloc, .domain_alloc = exynos_iommu_domain_alloc,
.domain_free = exynos_iommu_domain_free, .domain_free = exynos_iommu_domain_free,
.attach_dev = exynos_iommu_attach_device, .attach_dev = exynos_iommu_attach_device,
...@@ -1339,8 +1342,6 @@ static struct iommu_ops exynos_iommu_ops = { ...@@ -1339,8 +1342,6 @@ static struct iommu_ops exynos_iommu_ops = {
.of_xlate = exynos_iommu_of_xlate, .of_xlate = exynos_iommu_of_xlate,
}; };
static bool init_done;
static int __init exynos_iommu_init(void) static int __init exynos_iommu_init(void)
{ {
int ret; int ret;
...@@ -1373,8 +1374,6 @@ static int __init exynos_iommu_init(void) ...@@ -1373,8 +1374,6 @@ static int __init exynos_iommu_init(void)
goto err_set_iommu; goto err_set_iommu;
} }
init_done = true;
return 0; return 0;
err_set_iommu: err_set_iommu:
kmem_cache_free(lv2table_kmem_cache, zero_lv2_table); kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
...@@ -1384,27 +1383,6 @@ static int __init exynos_iommu_init(void) ...@@ -1384,27 +1383,6 @@ static int __init exynos_iommu_init(void)
kmem_cache_destroy(lv2table_kmem_cache); kmem_cache_destroy(lv2table_kmem_cache);
return ret; return ret;
} }
core_initcall(exynos_iommu_init);
static int __init exynos_iommu_of_setup(struct device_node *np) IOMMU_OF_DECLARE(exynos_iommu_of, "samsung,exynos-sysmmu", NULL);
{
struct platform_device *pdev;
if (!init_done)
exynos_iommu_init();
pdev = of_platform_device_create(np, NULL, platform_bus_type.dev_root);
if (!pdev)
return -ENODEV;
/*
* use the first registered sysmmu device for performing
* dma mapping operations on iommu page tables (cpu cache flush)
*/
if (!dma_dev)
dma_dev = &pdev->dev;
return 0;
}
IOMMU_OF_DECLARE(exynos_iommu_of, "samsung,exynos-sysmmu",
exynos_iommu_of_setup);
...@@ -42,6 +42,8 @@ struct pamu_isr_data { ...@@ -42,6 +42,8 @@ struct pamu_isr_data {
static struct paace *ppaact; static struct paace *ppaact;
static struct paace *spaact; static struct paace *spaact;
static bool probed; /* Has PAMU been probed? */
/* /*
* Table for matching compatible strings, for device tree * Table for matching compatible strings, for device tree
* guts node, for QorIQ SOCs. * guts node, for QorIQ SOCs.
...@@ -530,8 +532,8 @@ u32 get_stash_id(u32 stash_dest_hint, u32 vcpu) ...@@ -530,8 +532,8 @@ u32 get_stash_id(u32 stash_dest_hint, u32 vcpu)
if (node) { if (node) {
prop = of_get_property(node, "cache-stash-id", NULL); prop = of_get_property(node, "cache-stash-id", NULL);
if (!prop) { if (!prop) {
pr_debug("missing cache-stash-id at %s\n", pr_debug("missing cache-stash-id at %pOF\n",
node->full_name); node);
of_node_put(node); of_node_put(node);
return ~(u32)0; return ~(u32)0;
} }
...@@ -557,8 +559,8 @@ u32 get_stash_id(u32 stash_dest_hint, u32 vcpu) ...@@ -557,8 +559,8 @@ u32 get_stash_id(u32 stash_dest_hint, u32 vcpu)
if (stash_dest_hint == cache_level) { if (stash_dest_hint == cache_level) {
prop = of_get_property(node, "cache-stash-id", NULL); prop = of_get_property(node, "cache-stash-id", NULL);
if (!prop) { if (!prop) {
pr_debug("missing cache-stash-id at %s\n", pr_debug("missing cache-stash-id at %pOF\n",
node->full_name); node);
of_node_put(node); of_node_put(node);
return ~(u32)0; return ~(u32)0;
} }
...@@ -568,8 +570,7 @@ u32 get_stash_id(u32 stash_dest_hint, u32 vcpu) ...@@ -568,8 +570,7 @@ u32 get_stash_id(u32 stash_dest_hint, u32 vcpu)
prop = of_get_property(node, "next-level-cache", NULL); prop = of_get_property(node, "next-level-cache", NULL);
if (!prop) { if (!prop) {
pr_debug("can't find next-level-cache at %s\n", pr_debug("can't find next-level-cache at %pOF\n", node);
node->full_name);
of_node_put(node); of_node_put(node);
return ~(u32)0; /* can't traverse any further */ return ~(u32)0; /* can't traverse any further */
} }
...@@ -1033,6 +1034,9 @@ static int fsl_pamu_probe(struct platform_device *pdev) ...@@ -1033,6 +1034,9 @@ static int fsl_pamu_probe(struct platform_device *pdev)
* NOTE : All PAMUs share the same LIODN tables. * NOTE : All PAMUs share the same LIODN tables.
*/ */
if (WARN_ON(probed))
return -EBUSY;
pamu_regs = of_iomap(dev->of_node, 0); pamu_regs = of_iomap(dev->of_node, 0);
if (!pamu_regs) { if (!pamu_regs) {
dev_err(dev, "ioremap of PAMU node failed\n"); dev_err(dev, "ioremap of PAMU node failed\n");
...@@ -1063,8 +1067,7 @@ static int fsl_pamu_probe(struct platform_device *pdev) ...@@ -1063,8 +1067,7 @@ static int fsl_pamu_probe(struct platform_device *pdev)
guts_node = of_find_matching_node(NULL, guts_device_ids); guts_node = of_find_matching_node(NULL, guts_device_ids);
if (!guts_node) { if (!guts_node) {
dev_err(dev, "could not find GUTS node %s\n", dev_err(dev, "could not find GUTS node %pOF\n", dev->of_node);
dev->of_node->full_name);
ret = -ENODEV; ret = -ENODEV;
goto error; goto error;
} }
...@@ -1172,6 +1175,8 @@ static int fsl_pamu_probe(struct platform_device *pdev) ...@@ -1172,6 +1175,8 @@ static int fsl_pamu_probe(struct platform_device *pdev)
setup_liodns(); setup_liodns();
probed = true;
return 0; return 0;
error_genpool: error_genpool:
...@@ -1246,8 +1251,7 @@ static __init int fsl_pamu_init(void) ...@@ -1246,8 +1251,7 @@ static __init int fsl_pamu_init(void)
pdev = platform_device_alloc("fsl-of-pamu", 0); pdev = platform_device_alloc("fsl-of-pamu", 0);
if (!pdev) { if (!pdev) {
pr_err("could not allocate device %s\n", pr_err("could not allocate device %pOF\n", np);
np->full_name);
ret = -ENOMEM; ret = -ENOMEM;
goto error_device_alloc; goto error_device_alloc;
} }
...@@ -1259,8 +1263,7 @@ static __init int fsl_pamu_init(void) ...@@ -1259,8 +1263,7 @@ static __init int fsl_pamu_init(void)
ret = platform_device_add(pdev); ret = platform_device_add(pdev);
if (ret) { if (ret) {
pr_err("could not add device %s (err=%i)\n", pr_err("could not add device %pOF (err=%i)\n", np, ret);
np->full_name, ret);
goto error_device_add; goto error_device_add;
} }
......
...@@ -33,6 +33,8 @@ static struct kmem_cache *fsl_pamu_domain_cache; ...@@ -33,6 +33,8 @@ static struct kmem_cache *fsl_pamu_domain_cache;
static struct kmem_cache *iommu_devinfo_cache; static struct kmem_cache *iommu_devinfo_cache;
static DEFINE_SPINLOCK(device_domain_lock); static DEFINE_SPINLOCK(device_domain_lock);
struct iommu_device pamu_iommu; /* IOMMU core code handle */
static struct fsl_dma_domain *to_fsl_dma_domain(struct iommu_domain *dom) static struct fsl_dma_domain *to_fsl_dma_domain(struct iommu_domain *dom)
{ {
return container_of(dom, struct fsl_dma_domain, iommu_domain); return container_of(dom, struct fsl_dma_domain, iommu_domain);
...@@ -619,8 +621,8 @@ static int handle_attach_device(struct fsl_dma_domain *dma_domain, ...@@ -619,8 +621,8 @@ static int handle_attach_device(struct fsl_dma_domain *dma_domain,
for (i = 0; i < num; i++) { for (i = 0; i < num; i++) {
/* Ensure that LIODN value is valid */ /* Ensure that LIODN value is valid */
if (liodn[i] >= PAACE_NUMBER_ENTRIES) { if (liodn[i] >= PAACE_NUMBER_ENTRIES) {
pr_debug("Invalid liodn %d, attach device failed for %s\n", pr_debug("Invalid liodn %d, attach device failed for %pOF\n",
liodn[i], dev->of_node->full_name); liodn[i], dev->of_node);
ret = -EINVAL; ret = -EINVAL;
break; break;
} }
...@@ -684,8 +686,7 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain, ...@@ -684,8 +686,7 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain,
liodn_cnt = len / sizeof(u32); liodn_cnt = len / sizeof(u32);
ret = handle_attach_device(dma_domain, dev, liodn, liodn_cnt); ret = handle_attach_device(dma_domain, dev, liodn, liodn_cnt);
} else { } else {
pr_debug("missing fsl,liodn property at %s\n", pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
dev->of_node->full_name);
ret = -EINVAL; ret = -EINVAL;
} }
...@@ -720,8 +721,7 @@ static void fsl_pamu_detach_device(struct iommu_domain *domain, ...@@ -720,8 +721,7 @@ static void fsl_pamu_detach_device(struct iommu_domain *domain,
if (prop) if (prop)
detach_device(dev, dma_domain); detach_device(dev, dma_domain);
else else
pr_debug("missing fsl,liodn property at %s\n", pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
dev->of_node->full_name);
} }
static int configure_domain_geometry(struct iommu_domain *domain, void *data) static int configure_domain_geometry(struct iommu_domain *domain, void *data)
...@@ -983,11 +983,14 @@ static int fsl_pamu_add_device(struct device *dev) ...@@ -983,11 +983,14 @@ static int fsl_pamu_add_device(struct device *dev)
iommu_group_put(group); iommu_group_put(group);
iommu_device_link(&pamu_iommu, dev);
return 0; return 0;
} }
static void fsl_pamu_remove_device(struct device *dev) static void fsl_pamu_remove_device(struct device *dev)
{ {
iommu_device_unlink(&pamu_iommu, dev);
iommu_group_remove_device(dev); iommu_group_remove_device(dev);
} }
...@@ -1073,6 +1076,19 @@ int __init pamu_domain_init(void) ...@@ -1073,6 +1076,19 @@ int __init pamu_domain_init(void)
if (ret) if (ret)
return ret; return ret;
ret = iommu_device_sysfs_add(&pamu_iommu, NULL, NULL, "iommu0");
if (ret)
return ret;
iommu_device_set_ops(&pamu_iommu, &fsl_pamu_ops);
ret = iommu_device_register(&pamu_iommu);
if (ret) {
iommu_device_sysfs_remove(&pamu_iommu);
pr_err("Can't register iommu device\n");
return ret;
}
bus_set_iommu(&platform_bus_type, &fsl_pamu_ops); bus_set_iommu(&platform_bus_type, &fsl_pamu_ops);
bus_set_iommu(&pci_bus_type, &fsl_pamu_ops); bus_set_iommu(&pci_bus_type, &fsl_pamu_ops);
......
This diff is collapsed.
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <linux/pci-ats.h> #include <linux/pci-ats.h>
#include <linux/dmar.h> #include <linux/dmar.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <asm/page.h>
static irqreturn_t prq_event_thread(int irq, void *d); static irqreturn_t prq_event_thread(int irq, void *d);
...@@ -555,6 +556,14 @@ static bool access_error(struct vm_area_struct *vma, struct page_req_dsc *req) ...@@ -555,6 +556,14 @@ static bool access_error(struct vm_area_struct *vma, struct page_req_dsc *req)
return (requested & ~vma->vm_flags) != 0; return (requested & ~vma->vm_flags) != 0;
} }
static bool is_canonical_address(u64 addr)
{
int shift = 64 - (__VIRTUAL_MASK_SHIFT + 1);
long saddr = (long) addr;
return (((saddr << shift) >> shift) == saddr);
}
static irqreturn_t prq_event_thread(int irq, void *d) static irqreturn_t prq_event_thread(int irq, void *d)
{ {
struct intel_iommu *iommu = d; struct intel_iommu *iommu = d;
...@@ -612,6 +621,11 @@ static irqreturn_t prq_event_thread(int irq, void *d) ...@@ -612,6 +621,11 @@ static irqreturn_t prq_event_thread(int irq, void *d)
/* If the mm is already defunct, don't handle faults. */ /* If the mm is already defunct, don't handle faults. */
if (!mmget_not_zero(svm->mm)) if (!mmget_not_zero(svm->mm))
goto bad_req; goto bad_req;
/* If address is not canonical, return invalid response */
if (!is_canonical_address(address))
goto bad_req;
down_read(&svm->mm->mmap_sem); down_read(&svm->mm->mmap_sem);
vma = find_extend_vma(svm->mm, address); vma = find_extend_vma(svm->mm, address);
if (!vma || address < vma->vm_start) if (!vma || address < vma->vm_start)
......
...@@ -527,6 +527,8 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group, ...@@ -527,6 +527,8 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
} }
iommu_flush_tlb_all(domain);
out: out:
iommu_put_resv_regions(dev, &mappings); iommu_put_resv_regions(dev, &mappings);
...@@ -1005,11 +1007,10 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev) ...@@ -1005,11 +1007,10 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
if (group) if (group)
return group; return group;
group = ERR_PTR(-EINVAL); if (!ops)
return ERR_PTR(-EINVAL);
if (ops && ops->device_group)
group = ops->device_group(dev);
group = ops->device_group(dev);
if (WARN_ON_ONCE(group == NULL)) if (WARN_ON_ONCE(group == NULL))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
...@@ -1283,6 +1284,10 @@ static int __iommu_attach_device(struct iommu_domain *domain, ...@@ -1283,6 +1284,10 @@ static int __iommu_attach_device(struct iommu_domain *domain,
struct device *dev) struct device *dev)
{ {
int ret; int ret;
if ((domain->ops->is_attach_deferred != NULL) &&
domain->ops->is_attach_deferred(domain, dev))
return 0;
if (unlikely(domain->ops->attach_dev == NULL)) if (unlikely(domain->ops->attach_dev == NULL))
return -ENODEV; return -ENODEV;
...@@ -1298,12 +1303,8 @@ int iommu_attach_device(struct iommu_domain *domain, struct device *dev) ...@@ -1298,12 +1303,8 @@ int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
int ret; int ret;
group = iommu_group_get(dev); group = iommu_group_get(dev);
/* FIXME: Remove this when groups a mandatory for iommu drivers */
if (group == NULL)
return __iommu_attach_device(domain, dev);
/* /*
* We have a group - lock it to make sure the device-count doesn't * Lock the group to make sure the device-count doesn't
* change while we are attaching * change while we are attaching
*/ */
mutex_lock(&group->mutex); mutex_lock(&group->mutex);
...@@ -1324,6 +1325,10 @@ EXPORT_SYMBOL_GPL(iommu_attach_device); ...@@ -1324,6 +1325,10 @@ EXPORT_SYMBOL_GPL(iommu_attach_device);
static void __iommu_detach_device(struct iommu_domain *domain, static void __iommu_detach_device(struct iommu_domain *domain,
struct device *dev) struct device *dev)
{ {
if ((domain->ops->is_attach_deferred != NULL) &&
domain->ops->is_attach_deferred(domain, dev))
return;
if (unlikely(domain->ops->detach_dev == NULL)) if (unlikely(domain->ops->detach_dev == NULL))
return; return;
...@@ -1336,9 +1341,6 @@ void iommu_detach_device(struct iommu_domain *domain, struct device *dev) ...@@ -1336,9 +1341,6 @@ void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
struct iommu_group *group; struct iommu_group *group;
group = iommu_group_get(dev); group = iommu_group_get(dev);
/* FIXME: Remove this when groups a mandatory for iommu drivers */
if (group == NULL)
return __iommu_detach_device(domain, dev);
mutex_lock(&group->mutex); mutex_lock(&group->mutex);
if (iommu_group_device_count(group) != 1) { if (iommu_group_device_count(group) != 1) {
...@@ -1360,8 +1362,7 @@ struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) ...@@ -1360,8 +1362,7 @@ struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
struct iommu_group *group; struct iommu_group *group;
group = iommu_group_get(dev); group = iommu_group_get(dev);
/* FIXME: Remove this when groups a mandatory for iommu drivers */ if (!group)
if (group == NULL)
return NULL; return NULL;
domain = group->domain; domain = group->domain;
...@@ -1556,13 +1557,16 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova, ...@@ -1556,13 +1557,16 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
} }
EXPORT_SYMBOL_GPL(iommu_map); EXPORT_SYMBOL_GPL(iommu_map);
size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) static size_t __iommu_unmap(struct iommu_domain *domain,
unsigned long iova, size_t size,
bool sync)
{ {
const struct iommu_ops *ops = domain->ops;
size_t unmapped_page, unmapped = 0; size_t unmapped_page, unmapped = 0;
unsigned int min_pagesz;
unsigned long orig_iova = iova; unsigned long orig_iova = iova;
unsigned int min_pagesz;
if (unlikely(domain->ops->unmap == NULL || if (unlikely(ops->unmap == NULL ||
domain->pgsize_bitmap == 0UL)) domain->pgsize_bitmap == 0UL))
return -ENODEV; return -ENODEV;
...@@ -1592,10 +1596,13 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) ...@@ -1592,10 +1596,13 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
while (unmapped < size) { while (unmapped < size) {
size_t pgsize = iommu_pgsize(domain, iova, size - unmapped); size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
unmapped_page = domain->ops->unmap(domain, iova, pgsize); unmapped_page = ops->unmap(domain, iova, pgsize);
if (!unmapped_page) if (!unmapped_page)
break; break;
if (sync && ops->iotlb_range_add)
ops->iotlb_range_add(domain, iova, pgsize);
pr_debug("unmapped: iova 0x%lx size 0x%zx\n", pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
iova, unmapped_page); iova, unmapped_page);
...@@ -1603,11 +1610,27 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) ...@@ -1603,11 +1610,27 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
unmapped += unmapped_page; unmapped += unmapped_page;
} }
if (sync && ops->iotlb_sync)
ops->iotlb_sync(domain);
trace_unmap(orig_iova, size, unmapped); trace_unmap(orig_iova, size, unmapped);
return unmapped; return unmapped;
} }
size_t iommu_unmap(struct iommu_domain *domain,
unsigned long iova, size_t size)
{
return __iommu_unmap(domain, iova, size, true);
}
EXPORT_SYMBOL_GPL(iommu_unmap); EXPORT_SYMBOL_GPL(iommu_unmap);
size_t iommu_unmap_fast(struct iommu_domain *domain,
unsigned long iova, size_t size)
{
return __iommu_unmap(domain, iova, size, false);
}
EXPORT_SYMBOL_GPL(iommu_unmap_fast);
size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova, size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg, unsigned int nents, int prot) struct scatterlist *sg, unsigned int nents, int prot)
{ {
......
...@@ -32,6 +32,8 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad, ...@@ -32,6 +32,8 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad,
unsigned long limit_pfn); unsigned long limit_pfn);
static void init_iova_rcaches(struct iova_domain *iovad); static void init_iova_rcaches(struct iova_domain *iovad);
static void free_iova_rcaches(struct iova_domain *iovad); static void free_iova_rcaches(struct iova_domain *iovad);
static void fq_destroy_all_entries(struct iova_domain *iovad);
static void fq_flush_timeout(unsigned long data);
void void
init_iova_domain(struct iova_domain *iovad, unsigned long granule, init_iova_domain(struct iova_domain *iovad, unsigned long granule,
...@@ -50,10 +52,61 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule, ...@@ -50,10 +52,61 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
iovad->granule = granule; iovad->granule = granule;
iovad->start_pfn = start_pfn; iovad->start_pfn = start_pfn;
iovad->dma_32bit_pfn = pfn_32bit + 1; iovad->dma_32bit_pfn = pfn_32bit + 1;
iovad->flush_cb = NULL;
iovad->fq = NULL;
init_iova_rcaches(iovad); init_iova_rcaches(iovad);
} }
EXPORT_SYMBOL_GPL(init_iova_domain); EXPORT_SYMBOL_GPL(init_iova_domain);
static void free_iova_flush_queue(struct iova_domain *iovad)
{
if (!iovad->fq)
return;
if (timer_pending(&iovad->fq_timer))
del_timer(&iovad->fq_timer);
fq_destroy_all_entries(iovad);
free_percpu(iovad->fq);
iovad->fq = NULL;
iovad->flush_cb = NULL;
iovad->entry_dtor = NULL;
}
int init_iova_flush_queue(struct iova_domain *iovad,
iova_flush_cb flush_cb, iova_entry_dtor entry_dtor)
{
int cpu;
atomic64_set(&iovad->fq_flush_start_cnt, 0);
atomic64_set(&iovad->fq_flush_finish_cnt, 0);
iovad->fq = alloc_percpu(struct iova_fq);
if (!iovad->fq)
return -ENOMEM;
iovad->flush_cb = flush_cb;
iovad->entry_dtor = entry_dtor;
for_each_possible_cpu(cpu) {
struct iova_fq *fq;
fq = per_cpu_ptr(iovad->fq, cpu);
fq->head = 0;
fq->tail = 0;
spin_lock_init(&fq->lock);
}
setup_timer(&iovad->fq_timer, fq_flush_timeout, (unsigned long)iovad);
atomic_set(&iovad->fq_timer_on, 0);
return 0;
}
EXPORT_SYMBOL_GPL(init_iova_flush_queue);
static struct rb_node * static struct rb_node *
__get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn) __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
{ {
...@@ -423,6 +476,135 @@ free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size) ...@@ -423,6 +476,135 @@ free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
} }
EXPORT_SYMBOL_GPL(free_iova_fast); EXPORT_SYMBOL_GPL(free_iova_fast);
#define fq_ring_for_each(i, fq) \
for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE)
static inline bool fq_full(struct iova_fq *fq)
{
assert_spin_locked(&fq->lock);
return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head);
}
static inline unsigned fq_ring_add(struct iova_fq *fq)
{
unsigned idx = fq->tail;
assert_spin_locked(&fq->lock);
fq->tail = (idx + 1) % IOVA_FQ_SIZE;
return idx;
}
static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq)
{
u64 counter = atomic64_read(&iovad->fq_flush_finish_cnt);
unsigned idx;
assert_spin_locked(&fq->lock);
fq_ring_for_each(idx, fq) {
if (fq->entries[idx].counter >= counter)
break;
if (iovad->entry_dtor)
iovad->entry_dtor(fq->entries[idx].data);
free_iova_fast(iovad,
fq->entries[idx].iova_pfn,
fq->entries[idx].pages);
fq->head = (fq->head + 1) % IOVA_FQ_SIZE;
}
}
static void iova_domain_flush(struct iova_domain *iovad)
{
atomic64_inc(&iovad->fq_flush_start_cnt);
iovad->flush_cb(iovad);
atomic64_inc(&iovad->fq_flush_finish_cnt);
}
static void fq_destroy_all_entries(struct iova_domain *iovad)
{
int cpu;
/*
* This code runs when the iova_domain is being detroyed, so don't
* bother to free iovas, just call the entry_dtor on all remaining
* entries.
*/
if (!iovad->entry_dtor)
return;
for_each_possible_cpu(cpu) {
struct iova_fq *fq = per_cpu_ptr(iovad->fq, cpu);
int idx;
fq_ring_for_each(idx, fq)
iovad->entry_dtor(fq->entries[idx].data);
}
}
static void fq_flush_timeout(unsigned long data)
{
struct iova_domain *iovad = (struct iova_domain *)data;
int cpu;
atomic_set(&iovad->fq_timer_on, 0);
iova_domain_flush(iovad);
for_each_possible_cpu(cpu) {
unsigned long flags;
struct iova_fq *fq;
fq = per_cpu_ptr(iovad->fq, cpu);
spin_lock_irqsave(&fq->lock, flags);
fq_ring_free(iovad, fq);
spin_unlock_irqrestore(&fq->lock, flags);
}
}
void queue_iova(struct iova_domain *iovad,
unsigned long pfn, unsigned long pages,
unsigned long data)
{
struct iova_fq *fq = get_cpu_ptr(iovad->fq);
unsigned long flags;
unsigned idx;
spin_lock_irqsave(&fq->lock, flags);
/*
* First remove all entries from the flush queue that have already been
* flushed out on another CPU. This makes the fq_full() check below less
* likely to be true.
*/
fq_ring_free(iovad, fq);
if (fq_full(fq)) {
iova_domain_flush(iovad);
fq_ring_free(iovad, fq);
}
idx = fq_ring_add(fq);
fq->entries[idx].iova_pfn = pfn;
fq->entries[idx].pages = pages;
fq->entries[idx].data = data;
fq->entries[idx].counter = atomic64_read(&iovad->fq_flush_start_cnt);
spin_unlock_irqrestore(&fq->lock, flags);
if (atomic_cmpxchg(&iovad->fq_timer_on, 0, 1) == 0)
mod_timer(&iovad->fq_timer,
jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
put_cpu_ptr(iovad->fq);
}
EXPORT_SYMBOL_GPL(queue_iova);
/** /**
* put_iova_domain - destroys the iova doamin * put_iova_domain - destroys the iova doamin
* @iovad: - iova domain in question. * @iovad: - iova domain in question.
...@@ -433,6 +615,7 @@ void put_iova_domain(struct iova_domain *iovad) ...@@ -433,6 +615,7 @@ void put_iova_domain(struct iova_domain *iovad)
struct rb_node *node; struct rb_node *node;
unsigned long flags; unsigned long flags;
free_iova_flush_queue(iovad);
free_iova_rcaches(iovad); free_iova_rcaches(iovad);
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
node = rb_first(&iovad->rbroot); node = rb_first(&iovad->rbroot);
......
This diff is collapsed.
...@@ -393,6 +393,7 @@ static struct msm_iommu_dev *find_iommu_for_dev(struct device *dev) ...@@ -393,6 +393,7 @@ static struct msm_iommu_dev *find_iommu_for_dev(struct device *dev)
static int msm_iommu_add_device(struct device *dev) static int msm_iommu_add_device(struct device *dev)
{ {
struct msm_iommu_dev *iommu; struct msm_iommu_dev *iommu;
struct iommu_group *group;
unsigned long flags; unsigned long flags;
int ret = 0; int ret = 0;
...@@ -406,7 +407,16 @@ static int msm_iommu_add_device(struct device *dev) ...@@ -406,7 +407,16 @@ static int msm_iommu_add_device(struct device *dev)
spin_unlock_irqrestore(&msm_iommu_lock, flags); spin_unlock_irqrestore(&msm_iommu_lock, flags);
return ret; if (ret)
return ret;
group = iommu_group_get_for_dev(dev);
if (IS_ERR(group))
return PTR_ERR(group);
iommu_group_put(group);
return 0;
} }
static void msm_iommu_remove_device(struct device *dev) static void msm_iommu_remove_device(struct device *dev)
...@@ -421,6 +431,8 @@ static void msm_iommu_remove_device(struct device *dev) ...@@ -421,6 +431,8 @@ static void msm_iommu_remove_device(struct device *dev)
iommu_device_unlink(&iommu->iommu, dev); iommu_device_unlink(&iommu->iommu, dev);
spin_unlock_irqrestore(&msm_iommu_lock, flags); spin_unlock_irqrestore(&msm_iommu_lock, flags);
iommu_group_remove_device(dev);
} }
static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
...@@ -700,6 +712,7 @@ static struct iommu_ops msm_iommu_ops = { ...@@ -700,6 +712,7 @@ static struct iommu_ops msm_iommu_ops = {
.iova_to_phys = msm_iommu_iova_to_phys, .iova_to_phys = msm_iommu_iova_to_phys,
.add_device = msm_iommu_add_device, .add_device = msm_iommu_add_device,
.remove_device = msm_iommu_remove_device, .remove_device = msm_iommu_remove_device,
.device_group = generic_device_group,
.pgsize_bitmap = MSM_IOMMU_PGSIZES, .pgsize_bitmap = MSM_IOMMU_PGSIZES,
.of_xlate = qcom_iommu_of_xlate, .of_xlate = qcom_iommu_of_xlate,
}; };
......
This diff is collapsed.
...@@ -34,6 +34,12 @@ struct mtk_iommu_suspend_reg { ...@@ -34,6 +34,12 @@ struct mtk_iommu_suspend_reg {
u32 int_main_control; u32 int_main_control;
}; };
enum mtk_iommu_plat {
M4U_MT2701,
M4U_MT2712,
M4U_MT8173,
};
struct mtk_iommu_domain; struct mtk_iommu_domain;
struct mtk_iommu_data { struct mtk_iommu_data {
...@@ -50,6 +56,9 @@ struct mtk_iommu_data { ...@@ -50,6 +56,9 @@ struct mtk_iommu_data {
bool tlb_flush_active; bool tlb_flush_active;
struct iommu_device iommu; struct iommu_device iommu;
enum mtk_iommu_plat m4u_plat;
struct list_head list;
}; };
static inline int compare_of(struct device *dev, void *data) static inline int compare_of(struct device *dev, void *data)
......
...@@ -25,6 +25,8 @@ ...@@ -25,6 +25,8 @@
#include <linux/of_pci.h> #include <linux/of_pci.h>
#include <linux/slab.h> #include <linux/slab.h>
#define NO_IOMMU 1
static const struct of_device_id __iommu_of_table_sentinel static const struct of_device_id __iommu_of_table_sentinel
__used __section(__iommu_of_table_end); __used __section(__iommu_of_table_end);
...@@ -109,8 +111,8 @@ static bool of_iommu_driver_present(struct device_node *np) ...@@ -109,8 +111,8 @@ static bool of_iommu_driver_present(struct device_node *np)
return of_match_node(&__iommu_of_table, np); return of_match_node(&__iommu_of_table, np);
} }
static const struct iommu_ops static int of_iommu_xlate(struct device *dev,
*of_iommu_xlate(struct device *dev, struct of_phandle_args *iommu_spec) struct of_phandle_args *iommu_spec)
{ {
const struct iommu_ops *ops; const struct iommu_ops *ops;
struct fwnode_handle *fwnode = &iommu_spec->np->fwnode; struct fwnode_handle *fwnode = &iommu_spec->np->fwnode;
...@@ -120,95 +122,53 @@ static const struct iommu_ops ...@@ -120,95 +122,53 @@ static const struct iommu_ops
if ((ops && !ops->of_xlate) || if ((ops && !ops->of_xlate) ||
!of_device_is_available(iommu_spec->np) || !of_device_is_available(iommu_spec->np) ||
(!ops && !of_iommu_driver_present(iommu_spec->np))) (!ops && !of_iommu_driver_present(iommu_spec->np)))
return NULL; return NO_IOMMU;
err = iommu_fwspec_init(dev, &iommu_spec->np->fwnode, ops); err = iommu_fwspec_init(dev, &iommu_spec->np->fwnode, ops);
if (err) if (err)
return ERR_PTR(err); return err;
/* /*
* The otherwise-empty fwspec handily serves to indicate the specific * The otherwise-empty fwspec handily serves to indicate the specific
* IOMMU device we're waiting for, which will be useful if we ever get * IOMMU device we're waiting for, which will be useful if we ever get
* a proper probe-ordering dependency mechanism in future. * a proper probe-ordering dependency mechanism in future.
*/ */
if (!ops) if (!ops)
return ERR_PTR(-EPROBE_DEFER); return -EPROBE_DEFER;
err = ops->of_xlate(dev, iommu_spec);
if (err)
return ERR_PTR(err);
return ops; return ops->of_xlate(dev, iommu_spec);
} }
static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data) struct of_pci_iommu_alias_info {
{ struct device *dev;
struct of_phandle_args *iommu_spec = data; struct device_node *np;
};
iommu_spec->args[0] = alias;
return iommu_spec->np == pdev->bus->dev.of_node;
}
static const struct iommu_ops static int of_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
*of_pci_iommu_init(struct pci_dev *pdev, struct device_node *bridge_np)
{ {
const struct iommu_ops *ops; struct of_pci_iommu_alias_info *info = data;
struct of_phandle_args iommu_spec; struct of_phandle_args iommu_spec = { .args_count = 1 };
int err; int err;
/* err = of_pci_map_rid(info->np, alias, "iommu-map",
* Start by tracing the RID alias down the PCI topology as
* far as the host bridge whose OF node we have...
* (we're not even attempting to handle multi-alias devices yet)
*/
iommu_spec.args_count = 1;
iommu_spec.np = bridge_np;
pci_for_each_dma_alias(pdev, __get_pci_rid, &iommu_spec);
/*
* ...then find out what that becomes once it escapes the PCI
* bus into the system beyond, and which IOMMU it ends up at.
*/
iommu_spec.np = NULL;
err = of_pci_map_rid(bridge_np, iommu_spec.args[0], "iommu-map",
"iommu-map-mask", &iommu_spec.np, "iommu-map-mask", &iommu_spec.np,
iommu_spec.args); iommu_spec.args);
if (err) if (err)
return err == -ENODEV ? NULL : ERR_PTR(err); return err == -ENODEV ? NO_IOMMU : err;
ops = of_iommu_xlate(&pdev->dev, &iommu_spec);
err = of_iommu_xlate(info->dev, &iommu_spec);
of_node_put(iommu_spec.np); of_node_put(iommu_spec.np);
return ops; if (err)
} return err;
static const struct iommu_ops
*of_platform_iommu_init(struct device *dev, struct device_node *np)
{
struct of_phandle_args iommu_spec;
const struct iommu_ops *ops = NULL;
int idx = 0;
/*
* We don't currently walk up the tree looking for a parent IOMMU.
* See the `Notes:' section of
* Documentation/devicetree/bindings/iommu/iommu.txt
*/
while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells",
idx, &iommu_spec)) {
ops = of_iommu_xlate(dev, &iommu_spec);
of_node_put(iommu_spec.np);
idx++;
if (IS_ERR_OR_NULL(ops))
break;
}
return ops; return info->np == pdev->bus->dev.of_node;
} }
const struct iommu_ops *of_iommu_configure(struct device *dev, const struct iommu_ops *of_iommu_configure(struct device *dev,
struct device_node *master_np) struct device_node *master_np)
{ {
const struct iommu_ops *ops; const struct iommu_ops *ops = NULL;
struct iommu_fwspec *fwspec = dev->iommu_fwspec; struct iommu_fwspec *fwspec = dev->iommu_fwspec;
int err = NO_IOMMU;
if (!master_np) if (!master_np)
return NULL; return NULL;
...@@ -221,25 +181,54 @@ const struct iommu_ops *of_iommu_configure(struct device *dev, ...@@ -221,25 +181,54 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
iommu_fwspec_free(dev); iommu_fwspec_free(dev);
} }
if (dev_is_pci(dev)) /*
ops = of_pci_iommu_init(to_pci_dev(dev), master_np); * We don't currently walk up the tree looking for a parent IOMMU.
else * See the `Notes:' section of
ops = of_platform_iommu_init(dev, master_np); * Documentation/devicetree/bindings/iommu/iommu.txt
*/
if (dev_is_pci(dev)) {
struct of_pci_iommu_alias_info info = {
.dev = dev,
.np = master_np,
};
err = pci_for_each_dma_alias(to_pci_dev(dev),
of_pci_iommu_init, &info);
} else {
struct of_phandle_args iommu_spec;
int idx = 0;
while (!of_parse_phandle_with_args(master_np, "iommus",
"#iommu-cells",
idx, &iommu_spec)) {
err = of_iommu_xlate(dev, &iommu_spec);
of_node_put(iommu_spec.np);
idx++;
if (err)
break;
}
}
/*
* Two success conditions can be represented by non-negative err here:
* >0 : there is no IOMMU, or one was unavailable for non-fatal reasons
* 0 : we found an IOMMU, and dev->fwspec is initialised appropriately
* <0 : any actual error
*/
if (!err)
ops = dev->iommu_fwspec->ops;
/* /*
* If we have reason to believe the IOMMU driver missed the initial * If we have reason to believe the IOMMU driver missed the initial
* add_device callback for dev, replay it to get things in order. * add_device callback for dev, replay it to get things in order.
*/ */
if (!IS_ERR_OR_NULL(ops) && ops->add_device && if (ops && ops->add_device && dev->bus && !dev->iommu_group)
dev->bus && !dev->iommu_group) { err = ops->add_device(dev);
int err = ops->add_device(dev);
if (err)
ops = ERR_PTR(err);
}
/* Ignore all other errors apart from EPROBE_DEFER */ /* Ignore all other errors apart from EPROBE_DEFER */
if (IS_ERR(ops) && (PTR_ERR(ops) != -EPROBE_DEFER)) { if (err == -EPROBE_DEFER) {
dev_dbg(dev, "Adding to IOMMU failed: %ld\n", PTR_ERR(ops)); ops = ERR_PTR(err);
} else if (err < 0) {
dev_dbg(dev, "Adding to IOMMU failed: %d\n", err);
ops = NULL; ops = NULL;
} }
...@@ -255,8 +244,7 @@ static int __init of_iommu_init(void) ...@@ -255,8 +244,7 @@ static int __init of_iommu_init(void)
const of_iommu_init_fn init_fn = match->data; const of_iommu_init_fn init_fn = match->data;
if (init_fn && init_fn(np)) if (init_fn && init_fn(np))
pr_err("Failed to initialise IOMMU %s\n", pr_err("Failed to initialise IOMMU %pOF\n", np);
of_node_full_name(np));
} }
return 0; return 0;
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
* published by the Free Software Foundation. * published by the Free Software Foundation.
*/ */
#include <linux/dma-mapping.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
...@@ -29,8 +30,6 @@ ...@@ -29,8 +30,6 @@
#include <linux/regmap.h> #include <linux/regmap.h>
#include <linux/mfd/syscon.h> #include <linux/mfd/syscon.h>
#include <asm/cacheflush.h>
#include <linux/platform_data/iommu-omap.h> #include <linux/platform_data/iommu-omap.h>
#include "omap-iopgtable.h" #include "omap-iopgtable.h"
...@@ -454,36 +453,35 @@ static void flush_iotlb_all(struct omap_iommu *obj) ...@@ -454,36 +453,35 @@ static void flush_iotlb_all(struct omap_iommu *obj)
/* /*
* H/W pagetable operations * H/W pagetable operations
*/ */
static void flush_iopgd_range(u32 *first, u32 *last) static void flush_iopte_range(struct device *dev, dma_addr_t dma,
unsigned long offset, int num_entries)
{ {
/* FIXME: L2 cache should be taken care of if it exists */ size_t size = num_entries * sizeof(u32);
do {
asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd"
: : "r" (first));
first += L1_CACHE_BYTES / sizeof(*first);
} while (first <= last);
}
static void flush_iopte_range(u32 *first, u32 *last) dma_sync_single_range_for_device(dev, dma, offset, size, DMA_TO_DEVICE);
{
/* FIXME: L2 cache should be taken care of if it exists */
do {
asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte"
: : "r" (first));
first += L1_CACHE_BYTES / sizeof(*first);
} while (first <= last);
} }
static void iopte_free(u32 *iopte) static void iopte_free(struct omap_iommu *obj, u32 *iopte, bool dma_valid)
{ {
dma_addr_t pt_dma;
/* Note: freed iopte's must be clean ready for re-use */ /* Note: freed iopte's must be clean ready for re-use */
if (iopte) if (iopte) {
if (dma_valid) {
pt_dma = virt_to_phys(iopte);
dma_unmap_single(obj->dev, pt_dma, IOPTE_TABLE_SIZE,
DMA_TO_DEVICE);
}
kmem_cache_free(iopte_cachep, iopte); kmem_cache_free(iopte_cachep, iopte);
}
} }
static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da) static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd,
dma_addr_t *pt_dma, u32 da)
{ {
u32 *iopte; u32 *iopte;
unsigned long offset = iopgd_index(da) * sizeof(da);
/* a table has already existed */ /* a table has already existed */
if (*iopgd) if (*iopgd)
...@@ -500,18 +498,38 @@ static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da) ...@@ -500,18 +498,38 @@ static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da)
if (!iopte) if (!iopte)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
*pt_dma = dma_map_single(obj->dev, iopte, IOPTE_TABLE_SIZE,
DMA_TO_DEVICE);
if (dma_mapping_error(obj->dev, *pt_dma)) {
dev_err(obj->dev, "DMA map error for L2 table\n");
iopte_free(obj, iopte, false);
return ERR_PTR(-ENOMEM);
}
/*
* we rely on dma address and the physical address to be
* the same for mapping the L2 table
*/
if (WARN_ON(*pt_dma != virt_to_phys(iopte))) {
dev_err(obj->dev, "DMA translation error for L2 table\n");
dma_unmap_single(obj->dev, *pt_dma, IOPTE_TABLE_SIZE,
DMA_TO_DEVICE);
iopte_free(obj, iopte, false);
return ERR_PTR(-ENOMEM);
}
*iopgd = virt_to_phys(iopte) | IOPGD_TABLE; *iopgd = virt_to_phys(iopte) | IOPGD_TABLE;
flush_iopgd_range(iopgd, iopgd);
flush_iopte_range(obj->dev, obj->pd_dma, offset, 1);
dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte); dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte);
} else { } else {
/* We raced, free the reduniovant table */ /* We raced, free the reduniovant table */
iopte_free(iopte); iopte_free(obj, iopte, false);
} }
pte_ready: pte_ready:
iopte = iopte_offset(iopgd, da); iopte = iopte_offset(iopgd, da);
*pt_dma = virt_to_phys(iopte);
dev_vdbg(obj->dev, dev_vdbg(obj->dev,
"%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n", "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
__func__, da, iopgd, *iopgd, iopte, *iopte); __func__, da, iopgd, *iopgd, iopte, *iopte);
...@@ -522,6 +540,7 @@ static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da) ...@@ -522,6 +540,7 @@ static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da)
static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
{ {
u32 *iopgd = iopgd_offset(obj, da); u32 *iopgd = iopgd_offset(obj, da);
unsigned long offset = iopgd_index(da) * sizeof(da);
if ((da | pa) & ~IOSECTION_MASK) { if ((da | pa) & ~IOSECTION_MASK) {
dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
...@@ -530,13 +549,14 @@ static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) ...@@ -530,13 +549,14 @@ static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
} }
*iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION; *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION;
flush_iopgd_range(iopgd, iopgd); flush_iopte_range(obj->dev, obj->pd_dma, offset, 1);
return 0; return 0;
} }
static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
{ {
u32 *iopgd = iopgd_offset(obj, da); u32 *iopgd = iopgd_offset(obj, da);
unsigned long offset = iopgd_index(da) * sizeof(da);
int i; int i;
if ((da | pa) & ~IOSUPER_MASK) { if ((da | pa) & ~IOSUPER_MASK) {
...@@ -547,20 +567,22 @@ static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) ...@@ -547,20 +567,22 @@ static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
for (i = 0; i < 16; i++) for (i = 0; i < 16; i++)
*(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER; *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER;
flush_iopgd_range(iopgd, iopgd + 15); flush_iopte_range(obj->dev, obj->pd_dma, offset, 16);
return 0; return 0;
} }
static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
{ {
u32 *iopgd = iopgd_offset(obj, da); u32 *iopgd = iopgd_offset(obj, da);
u32 *iopte = iopte_alloc(obj, iopgd, da); dma_addr_t pt_dma;
u32 *iopte = iopte_alloc(obj, iopgd, &pt_dma, da);
unsigned long offset = iopte_index(da) * sizeof(da);
if (IS_ERR(iopte)) if (IS_ERR(iopte))
return PTR_ERR(iopte); return PTR_ERR(iopte);
*iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL; *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL;
flush_iopte_range(iopte, iopte); flush_iopte_range(obj->dev, pt_dma, offset, 1);
dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n", dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
__func__, da, pa, iopte, *iopte); __func__, da, pa, iopte, *iopte);
...@@ -571,7 +593,9 @@ static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) ...@@ -571,7 +593,9 @@ static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
{ {
u32 *iopgd = iopgd_offset(obj, da); u32 *iopgd = iopgd_offset(obj, da);
u32 *iopte = iopte_alloc(obj, iopgd, da); dma_addr_t pt_dma;
u32 *iopte = iopte_alloc(obj, iopgd, &pt_dma, da);
unsigned long offset = iopte_index(da) * sizeof(da);
int i; int i;
if ((da | pa) & ~IOLARGE_MASK) { if ((da | pa) & ~IOLARGE_MASK) {
...@@ -585,7 +609,7 @@ static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) ...@@ -585,7 +609,7 @@ static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
for (i = 0; i < 16; i++) for (i = 0; i < 16; i++)
*(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE; *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE;
flush_iopte_range(iopte, iopte + 15); flush_iopte_range(obj->dev, pt_dma, offset, 16);
return 0; return 0;
} }
...@@ -674,6 +698,9 @@ static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da) ...@@ -674,6 +698,9 @@ static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da)
size_t bytes; size_t bytes;
u32 *iopgd = iopgd_offset(obj, da); u32 *iopgd = iopgd_offset(obj, da);
int nent = 1; int nent = 1;
dma_addr_t pt_dma;
unsigned long pd_offset = iopgd_index(da) * sizeof(da);
unsigned long pt_offset = iopte_index(da) * sizeof(da);
if (!*iopgd) if (!*iopgd)
return 0; return 0;
...@@ -690,7 +717,8 @@ static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da) ...@@ -690,7 +717,8 @@ static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da)
} }
bytes *= nent; bytes *= nent;
memset(iopte, 0, nent * sizeof(*iopte)); memset(iopte, 0, nent * sizeof(*iopte));
flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte)); pt_dma = virt_to_phys(iopte);
flush_iopte_range(obj->dev, pt_dma, pt_offset, nent);
/* /*
* do table walk to check if this table is necessary or not * do table walk to check if this table is necessary or not
...@@ -700,7 +728,7 @@ static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da) ...@@ -700,7 +728,7 @@ static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da)
if (iopte[i]) if (iopte[i])
goto out; goto out;
iopte_free(iopte); iopte_free(obj, iopte, true);
nent = 1; /* for the next L1 entry */ nent = 1; /* for the next L1 entry */
} else { } else {
bytes = IOPGD_SIZE; bytes = IOPGD_SIZE;
...@@ -712,7 +740,7 @@ static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da) ...@@ -712,7 +740,7 @@ static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da)
bytes *= nent; bytes *= nent;
} }
memset(iopgd, 0, nent * sizeof(*iopgd)); memset(iopgd, 0, nent * sizeof(*iopgd));
flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd)); flush_iopte_range(obj->dev, obj->pd_dma, pd_offset, nent);
out: out:
return bytes; return bytes;
} }
...@@ -738,6 +766,7 @@ static size_t iopgtable_clear_entry(struct omap_iommu *obj, u32 da) ...@@ -738,6 +766,7 @@ static size_t iopgtable_clear_entry(struct omap_iommu *obj, u32 da)
static void iopgtable_clear_entry_all(struct omap_iommu *obj) static void iopgtable_clear_entry_all(struct omap_iommu *obj)
{ {
unsigned long offset;
int i; int i;
spin_lock(&obj->page_table_lock); spin_lock(&obj->page_table_lock);
...@@ -748,15 +777,16 @@ static void iopgtable_clear_entry_all(struct omap_iommu *obj) ...@@ -748,15 +777,16 @@ static void iopgtable_clear_entry_all(struct omap_iommu *obj)
da = i << IOPGD_SHIFT; da = i << IOPGD_SHIFT;
iopgd = iopgd_offset(obj, da); iopgd = iopgd_offset(obj, da);
offset = iopgd_index(da) * sizeof(da);
if (!*iopgd) if (!*iopgd)
continue; continue;
if (iopgd_is_table(*iopgd)) if (iopgd_is_table(*iopgd))
iopte_free(iopte_offset(iopgd, 0)); iopte_free(obj, iopte_offset(iopgd, 0), true);
*iopgd = 0; *iopgd = 0;
flush_iopgd_range(iopgd, iopgd); flush_iopte_range(obj->dev, obj->pd_dma, offset, 1);
} }
flush_iotlb_all(obj); flush_iotlb_all(obj);
...@@ -786,7 +816,7 @@ static irqreturn_t iommu_fault_handler(int irq, void *data) ...@@ -786,7 +816,7 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
if (!report_iommu_fault(domain, obj->dev, da, 0)) if (!report_iommu_fault(domain, obj->dev, da, 0))
return IRQ_HANDLED; return IRQ_HANDLED;
iommu_disable(obj); iommu_write_reg(obj, 0, MMU_IRQENABLE);
iopgd = iopgd_offset(obj, da); iopgd = iopgd_offset(obj, da);
...@@ -815,10 +845,18 @@ static int omap_iommu_attach(struct omap_iommu *obj, u32 *iopgd) ...@@ -815,10 +845,18 @@ static int omap_iommu_attach(struct omap_iommu *obj, u32 *iopgd)
spin_lock(&obj->iommu_lock); spin_lock(&obj->iommu_lock);
obj->pd_dma = dma_map_single(obj->dev, iopgd, IOPGD_TABLE_SIZE,
DMA_TO_DEVICE);
if (dma_mapping_error(obj->dev, obj->pd_dma)) {
dev_err(obj->dev, "DMA map error for L1 table\n");
err = -ENOMEM;
goto out_err;
}
obj->iopgd = iopgd; obj->iopgd = iopgd;
err = iommu_enable(obj); err = iommu_enable(obj);
if (err) if (err)
goto err_enable; goto out_err;
flush_iotlb_all(obj); flush_iotlb_all(obj);
spin_unlock(&obj->iommu_lock); spin_unlock(&obj->iommu_lock);
...@@ -827,7 +865,7 @@ static int omap_iommu_attach(struct omap_iommu *obj, u32 *iopgd) ...@@ -827,7 +865,7 @@ static int omap_iommu_attach(struct omap_iommu *obj, u32 *iopgd)
return 0; return 0;
err_enable: out_err:
spin_unlock(&obj->iommu_lock); spin_unlock(&obj->iommu_lock);
return err; return err;
...@@ -844,7 +882,10 @@ static void omap_iommu_detach(struct omap_iommu *obj) ...@@ -844,7 +882,10 @@ static void omap_iommu_detach(struct omap_iommu *obj)
spin_lock(&obj->iommu_lock); spin_lock(&obj->iommu_lock);
dma_unmap_single(obj->dev, obj->pd_dma, IOPGD_TABLE_SIZE,
DMA_TO_DEVICE);
iommu_disable(obj); iommu_disable(obj);
obj->pd_dma = 0;
obj->iopgd = NULL; obj->iopgd = NULL;
spin_unlock(&obj->iommu_lock); spin_unlock(&obj->iommu_lock);
...@@ -1008,11 +1049,6 @@ static struct platform_driver omap_iommu_driver = { ...@@ -1008,11 +1049,6 @@ static struct platform_driver omap_iommu_driver = {
}, },
}; };
static void iopte_cachep_ctor(void *iopte)
{
clean_dcache_area(iopte, IOPTE_TABLE_SIZE);
}
static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz) static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
{ {
memset(e, 0, sizeof(*e)); memset(e, 0, sizeof(*e));
...@@ -1159,7 +1195,6 @@ static struct iommu_domain *omap_iommu_domain_alloc(unsigned type) ...@@ -1159,7 +1195,6 @@ static struct iommu_domain *omap_iommu_domain_alloc(unsigned type)
if (WARN_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE))) if (WARN_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE)))
goto fail_align; goto fail_align;
clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE);
spin_lock_init(&omap_domain->lock); spin_lock_init(&omap_domain->lock);
omap_domain->domain.geometry.aperture_start = 0; omap_domain->domain.geometry.aperture_start = 0;
...@@ -1347,7 +1382,7 @@ static int __init omap_iommu_init(void) ...@@ -1347,7 +1382,7 @@ static int __init omap_iommu_init(void)
of_node_put(np); of_node_put(np);
p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags, p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags,
iopte_cachep_ctor); NULL);
if (!p) if (!p)
return -ENOMEM; return -ENOMEM;
iopte_cachep = p; iopte_cachep = p;
......
...@@ -61,6 +61,7 @@ struct omap_iommu { ...@@ -61,6 +61,7 @@ struct omap_iommu {
*/ */
u32 *iopgd; u32 *iopgd;
spinlock_t page_table_lock; /* protect iopgd */ spinlock_t page_table_lock; /* protect iopgd */
dma_addr_t pd_dma;
int nr_tlb_entries; int nr_tlb_entries;
......
This diff is collapsed.
...@@ -90,7 +90,9 @@ struct rk_iommu { ...@@ -90,7 +90,9 @@ struct rk_iommu {
struct device *dev; struct device *dev;
void __iomem **bases; void __iomem **bases;
int num_mmu; int num_mmu;
int irq; int *irq;
int num_irq;
bool reset_disabled;
struct iommu_device iommu; struct iommu_device iommu;
struct list_head node; /* entry in rk_iommu_domain.iommus */ struct list_head node; /* entry in rk_iommu_domain.iommus */
struct iommu_domain *domain; /* domain to which iommu is attached */ struct iommu_domain *domain; /* domain to which iommu is attached */
...@@ -414,6 +416,9 @@ static int rk_iommu_force_reset(struct rk_iommu *iommu) ...@@ -414,6 +416,9 @@ static int rk_iommu_force_reset(struct rk_iommu *iommu)
int ret, i; int ret, i;
u32 dte_addr; u32 dte_addr;
if (iommu->reset_disabled)
return 0;
/* /*
* Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY
* and verifying that upper 5 nybbles are read back. * and verifying that upper 5 nybbles are read back.
...@@ -825,10 +830,12 @@ static int rk_iommu_attach_device(struct iommu_domain *domain, ...@@ -825,10 +830,12 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
iommu->domain = domain; iommu->domain = domain;
ret = devm_request_irq(iommu->dev, iommu->irq, rk_iommu_irq, for (i = 0; i < iommu->num_irq; i++) {
IRQF_SHARED, dev_name(dev), iommu); ret = devm_request_irq(iommu->dev, iommu->irq[i], rk_iommu_irq,
if (ret) IRQF_SHARED, dev_name(dev), iommu);
return ret; if (ret)
return ret;
}
for (i = 0; i < iommu->num_mmu; i++) { for (i = 0; i < iommu->num_mmu; i++) {
rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
...@@ -878,7 +885,8 @@ static void rk_iommu_detach_device(struct iommu_domain *domain, ...@@ -878,7 +885,8 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
} }
rk_iommu_disable_stall(iommu); rk_iommu_disable_stall(iommu);
devm_free_irq(iommu->dev, iommu->irq, iommu); for (i = 0; i < iommu->num_irq; i++)
devm_free_irq(iommu->dev, iommu->irq[i], iommu);
iommu->domain = NULL; iommu->domain = NULL;
...@@ -1008,20 +1016,20 @@ static int rk_iommu_group_set_iommudata(struct iommu_group *group, ...@@ -1008,20 +1016,20 @@ static int rk_iommu_group_set_iommudata(struct iommu_group *group,
ret = of_parse_phandle_with_args(np, "iommus", "#iommu-cells", 0, ret = of_parse_phandle_with_args(np, "iommus", "#iommu-cells", 0,
&args); &args);
if (ret) { if (ret) {
dev_err(dev, "of_parse_phandle_with_args(%s) => %d\n", dev_err(dev, "of_parse_phandle_with_args(%pOF) => %d\n",
np->full_name, ret); np, ret);
return ret; return ret;
} }
if (args.args_count != 0) { if (args.args_count != 0) {
dev_err(dev, "incorrect number of iommu params found for %s (found %d, expected 0)\n", dev_err(dev, "incorrect number of iommu params found for %pOF (found %d, expected 0)\n",
args.np->full_name, args.args_count); args.np, args.args_count);
return -EINVAL; return -EINVAL;
} }
pd = of_find_device_by_node(args.np); pd = of_find_device_by_node(args.np);
of_node_put(args.np); of_node_put(args.np);
if (!pd) { if (!pd) {
dev_err(dev, "iommu %s not found\n", args.np->full_name); dev_err(dev, "iommu %pOF not found\n", args.np);
return -EPROBE_DEFER; return -EPROBE_DEFER;
} }
...@@ -1157,12 +1165,28 @@ static int rk_iommu_probe(struct platform_device *pdev) ...@@ -1157,12 +1165,28 @@ static int rk_iommu_probe(struct platform_device *pdev)
if (iommu->num_mmu == 0) if (iommu->num_mmu == 0)
return PTR_ERR(iommu->bases[0]); return PTR_ERR(iommu->bases[0]);
iommu->irq = platform_get_irq(pdev, 0); iommu->num_irq = platform_irq_count(pdev);
if (iommu->irq < 0) { if (iommu->num_irq < 0)
dev_err(dev, "Failed to get IRQ, %d\n", iommu->irq); return iommu->num_irq;
if (iommu->num_irq == 0)
return -ENXIO; return -ENXIO;
iommu->irq = devm_kcalloc(dev, iommu->num_irq, sizeof(*iommu->irq),
GFP_KERNEL);
if (!iommu->irq)
return -ENOMEM;
for (i = 0; i < iommu->num_irq; i++) {
iommu->irq[i] = platform_get_irq(pdev, i);
if (iommu->irq[i] < 0) {
dev_err(dev, "Failed to get IRQ, %d\n", iommu->irq[i]);
return -ENXIO;
}
} }
iommu->reset_disabled = device_property_read_bool(dev,
"rockchip,disable-mmu-reset");
err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev)); err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev));
if (err) if (err)
return err; return err;
......
...@@ -18,6 +18,8 @@ ...@@ -18,6 +18,8 @@
*/ */
#define S390_IOMMU_PGSIZES (~0xFFFUL) #define S390_IOMMU_PGSIZES (~0xFFFUL)
static const struct iommu_ops s390_iommu_ops;
struct s390_domain { struct s390_domain {
struct iommu_domain domain; struct iommu_domain domain;
struct list_head devices; struct list_head devices;
...@@ -166,11 +168,13 @@ static void s390_iommu_detach_device(struct iommu_domain *domain, ...@@ -166,11 +168,13 @@ static void s390_iommu_detach_device(struct iommu_domain *domain,
static int s390_iommu_add_device(struct device *dev) static int s390_iommu_add_device(struct device *dev)
{ {
struct iommu_group *group = iommu_group_get_for_dev(dev); struct iommu_group *group = iommu_group_get_for_dev(dev);
struct zpci_dev *zdev = to_pci_dev(dev)->sysdata;
if (IS_ERR(group)) if (IS_ERR(group))
return PTR_ERR(group); return PTR_ERR(group);
iommu_group_put(group); iommu_group_put(group);
iommu_device_link(&zdev->iommu_dev, dev);
return 0; return 0;
} }
...@@ -197,6 +201,7 @@ static void s390_iommu_remove_device(struct device *dev) ...@@ -197,6 +201,7 @@ static void s390_iommu_remove_device(struct device *dev)
s390_iommu_detach_device(domain, dev); s390_iommu_detach_device(domain, dev);
} }
iommu_device_unlink(&zdev->iommu_dev, dev);
iommu_group_remove_device(dev); iommu_group_remove_device(dev);
} }
...@@ -327,7 +332,37 @@ static size_t s390_iommu_unmap(struct iommu_domain *domain, ...@@ -327,7 +332,37 @@ static size_t s390_iommu_unmap(struct iommu_domain *domain,
return size; return size;
} }
static struct iommu_ops s390_iommu_ops = { int zpci_init_iommu(struct zpci_dev *zdev)
{
int rc = 0;
rc = iommu_device_sysfs_add(&zdev->iommu_dev, NULL, NULL,
"s390-iommu.%08x", zdev->fid);
if (rc)
goto out_err;
iommu_device_set_ops(&zdev->iommu_dev, &s390_iommu_ops);
rc = iommu_device_register(&zdev->iommu_dev);
if (rc)
goto out_sysfs;
return 0;
out_sysfs:
iommu_device_sysfs_remove(&zdev->iommu_dev);
out_err:
return rc;
}
void zpci_destroy_iommu(struct zpci_dev *zdev)
{
iommu_device_unregister(&zdev->iommu_dev);
iommu_device_sysfs_remove(&zdev->iommu_dev);
}
static const struct iommu_ops s390_iommu_ops = {
.capable = s390_iommu_capable, .capable = s390_iommu_capable,
.domain_alloc = s390_domain_alloc, .domain_alloc = s390_domain_alloc,
.domain_free = s390_domain_free, .domain_free = s390_domain_free,
......
...@@ -61,6 +61,8 @@ struct gart_device { ...@@ -61,6 +61,8 @@ struct gart_device {
struct list_head client; struct list_head client;
spinlock_t client_lock; /* for client list */ spinlock_t client_lock; /* for client list */
struct device *dev; struct device *dev;
struct iommu_device iommu; /* IOMMU Core handle */
}; };
struct gart_domain { struct gart_domain {
...@@ -334,12 +336,35 @@ static bool gart_iommu_capable(enum iommu_cap cap) ...@@ -334,12 +336,35 @@ static bool gart_iommu_capable(enum iommu_cap cap)
return false; return false;
} }
static int gart_iommu_add_device(struct device *dev)
{
struct iommu_group *group = iommu_group_get_for_dev(dev);
if (IS_ERR(group))
return PTR_ERR(group);
iommu_group_put(group);
iommu_device_link(&gart_handle->iommu, dev);
return 0;
}
static void gart_iommu_remove_device(struct device *dev)
{
iommu_group_remove_device(dev);
iommu_device_unlink(&gart_handle->iommu, dev);
}
static const struct iommu_ops gart_iommu_ops = { static const struct iommu_ops gart_iommu_ops = {
.capable = gart_iommu_capable, .capable = gart_iommu_capable,
.domain_alloc = gart_iommu_domain_alloc, .domain_alloc = gart_iommu_domain_alloc,
.domain_free = gart_iommu_domain_free, .domain_free = gart_iommu_domain_free,
.attach_dev = gart_iommu_attach_dev, .attach_dev = gart_iommu_attach_dev,
.detach_dev = gart_iommu_detach_dev, .detach_dev = gart_iommu_detach_dev,
.add_device = gart_iommu_add_device,
.remove_device = gart_iommu_remove_device,
.device_group = generic_device_group,
.map = gart_iommu_map, .map = gart_iommu_map,
.map_sg = default_iommu_map_sg, .map_sg = default_iommu_map_sg,
.unmap = gart_iommu_unmap, .unmap = gart_iommu_unmap,
...@@ -378,6 +403,7 @@ static int tegra_gart_probe(struct platform_device *pdev) ...@@ -378,6 +403,7 @@ static int tegra_gart_probe(struct platform_device *pdev)
struct resource *res, *res_remap; struct resource *res, *res_remap;
void __iomem *gart_regs; void __iomem *gart_regs;
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
int ret;
if (gart_handle) if (gart_handle)
return -EIO; return -EIO;
...@@ -404,6 +430,22 @@ static int tegra_gart_probe(struct platform_device *pdev) ...@@ -404,6 +430,22 @@ static int tegra_gart_probe(struct platform_device *pdev)
return -ENXIO; return -ENXIO;
} }
ret = iommu_device_sysfs_add(&gart->iommu, &pdev->dev, NULL,
dev_name(&pdev->dev));
if (ret) {
dev_err(dev, "Failed to register IOMMU in sysfs\n");
return ret;
}
iommu_device_set_ops(&gart->iommu, &gart_iommu_ops);
ret = iommu_device_register(&gart->iommu);
if (ret) {
dev_err(dev, "Failed to register IOMMU\n");
iommu_device_sysfs_remove(&gart->iommu);
return ret;
}
gart->dev = &pdev->dev; gart->dev = &pdev->dev;
spin_lock_init(&gart->pte_lock); spin_lock_init(&gart->pte_lock);
spin_lock_init(&gart->client_lock); spin_lock_init(&gart->client_lock);
...@@ -430,6 +472,9 @@ static int tegra_gart_remove(struct platform_device *pdev) ...@@ -430,6 +472,9 @@ static int tegra_gart_remove(struct platform_device *pdev)
{ {
struct gart_device *gart = platform_get_drvdata(pdev); struct gart_device *gart = platform_get_drvdata(pdev);
iommu_device_unregister(&gart->iommu);
iommu_device_sysfs_remove(&gart->iommu);
writel(0, gart->regs + GART_CONFIG); writel(0, gart->regs + GART_CONFIG);
if (gart->savedata) if (gart->savedata)
vfree(gart->savedata); vfree(gart->savedata);
......
...@@ -36,6 +36,8 @@ struct tegra_smmu { ...@@ -36,6 +36,8 @@ struct tegra_smmu {
struct list_head list; struct list_head list;
struct dentry *debugfs; struct dentry *debugfs;
struct iommu_device iommu; /* IOMMU Core code handle */
}; };
struct tegra_smmu_as { struct tegra_smmu_as {
...@@ -704,6 +706,7 @@ static struct tegra_smmu *tegra_smmu_find(struct device_node *np) ...@@ -704,6 +706,7 @@ static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
static int tegra_smmu_add_device(struct device *dev) static int tegra_smmu_add_device(struct device *dev)
{ {
struct device_node *np = dev->of_node; struct device_node *np = dev->of_node;
struct iommu_group *group;
struct of_phandle_args args; struct of_phandle_args args;
unsigned int index = 0; unsigned int index = 0;
...@@ -719,18 +722,33 @@ static int tegra_smmu_add_device(struct device *dev) ...@@ -719,18 +722,33 @@ static int tegra_smmu_add_device(struct device *dev)
* first match. * first match.
*/ */
dev->archdata.iommu = smmu; dev->archdata.iommu = smmu;
iommu_device_link(&smmu->iommu, dev);
break; break;
} }
index++; index++;
} }
group = iommu_group_get_for_dev(dev);
if (IS_ERR(group))
return PTR_ERR(group);
iommu_group_put(group);
return 0; return 0;
} }
static void tegra_smmu_remove_device(struct device *dev) static void tegra_smmu_remove_device(struct device *dev)
{ {
struct tegra_smmu *smmu = dev->archdata.iommu;
if (smmu)
iommu_device_unlink(&smmu->iommu, dev);
dev->archdata.iommu = NULL; dev->archdata.iommu = NULL;
iommu_group_remove_device(dev);
} }
static const struct iommu_ops tegra_smmu_ops = { static const struct iommu_ops tegra_smmu_ops = {
...@@ -741,6 +759,7 @@ static const struct iommu_ops tegra_smmu_ops = { ...@@ -741,6 +759,7 @@ static const struct iommu_ops tegra_smmu_ops = {
.detach_dev = tegra_smmu_detach_dev, .detach_dev = tegra_smmu_detach_dev,
.add_device = tegra_smmu_add_device, .add_device = tegra_smmu_add_device,
.remove_device = tegra_smmu_remove_device, .remove_device = tegra_smmu_remove_device,
.device_group = generic_device_group,
.map = tegra_smmu_map, .map = tegra_smmu_map,
.unmap = tegra_smmu_unmap, .unmap = tegra_smmu_unmap,
.map_sg = default_iommu_map_sg, .map_sg = default_iommu_map_sg,
...@@ -930,9 +949,24 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev, ...@@ -930,9 +949,24 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
tegra_smmu_ahb_enable(); tegra_smmu_ahb_enable();
err = iommu_device_sysfs_add(&smmu->iommu, dev, NULL, dev_name(dev));
if (err)
return ERR_PTR(err);
iommu_device_set_ops(&smmu->iommu, &tegra_smmu_ops);
err = iommu_device_register(&smmu->iommu);
if (err) {
iommu_device_sysfs_remove(&smmu->iommu);
return ERR_PTR(err);
}
err = bus_set_iommu(&platform_bus_type, &tegra_smmu_ops); err = bus_set_iommu(&platform_bus_type, &tegra_smmu_ops);
if (err < 0) if (err < 0) {
iommu_device_unregister(&smmu->iommu);
iommu_device_sysfs_remove(&smmu->iommu);
return ERR_PTR(err); return ERR_PTR(err);
}
if (IS_ENABLED(CONFIG_DEBUG_FS)) if (IS_ENABLED(CONFIG_DEBUG_FS))
tegra_smmu_debugfs_init(smmu); tegra_smmu_debugfs_init(smmu);
...@@ -942,6 +976,9 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev, ...@@ -942,6 +976,9 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
void tegra_smmu_remove(struct tegra_smmu *smmu) void tegra_smmu_remove(struct tegra_smmu *smmu)
{ {
iommu_device_unregister(&smmu->iommu);
iommu_device_sysfs_remove(&smmu->iommu);
if (IS_ENABLED(CONFIG_DEBUG_FS)) if (IS_ENABLED(CONFIG_DEBUG_FS))
tegra_smmu_debugfs_exit(smmu); tegra_smmu_debugfs_exit(smmu);
} }
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/device.h> #include <linux/device.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_platform.h> #include <linux/of_platform.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
...@@ -23,7 +24,10 @@ ...@@ -23,7 +24,10 @@
#include <soc/mediatek/smi.h> #include <soc/mediatek/smi.h>
#include <dt-bindings/memory/mt2701-larb-port.h> #include <dt-bindings/memory/mt2701-larb-port.h>
/* mt8173 */
#define SMI_LARB_MMU_EN 0xf00 #define SMI_LARB_MMU_EN 0xf00
/* mt2701 */
#define REG_SMI_SECUR_CON_BASE 0x5c0 #define REG_SMI_SECUR_CON_BASE 0x5c0
/* every register control 8 port, register offset 0x4 */ /* every register control 8 port, register offset 0x4 */
...@@ -41,7 +45,12 @@ ...@@ -41,7 +45,12 @@
/* mt2701 domain should be set to 3 */ /* mt2701 domain should be set to 3 */
#define SMI_SECUR_CON_VAL_DOMAIN(id) (0x3 << ((((id) & 0x7) << 2) + 1)) #define SMI_SECUR_CON_VAL_DOMAIN(id) (0x3 << ((((id) & 0x7) << 2) + 1))
/* mt2712 */
#define SMI_LARB_NONSEC_CON(id) (0x380 + ((id) * 4))
#define F_MMU_EN BIT(0)
struct mtk_smi_larb_gen { struct mtk_smi_larb_gen {
bool need_larbid;
int port_in_larb[MTK_LARB_NR_MAX + 1]; int port_in_larb[MTK_LARB_NR_MAX + 1];
void (*config_port)(struct device *); void (*config_port)(struct device *);
}; };
...@@ -148,6 +157,15 @@ mtk_smi_larb_bind(struct device *dev, struct device *master, void *data) ...@@ -148,6 +157,15 @@ mtk_smi_larb_bind(struct device *dev, struct device *master, void *data)
struct mtk_smi_iommu *smi_iommu = data; struct mtk_smi_iommu *smi_iommu = data;
unsigned int i; unsigned int i;
if (larb->larb_gen->need_larbid) {
larb->mmu = &smi_iommu->larb_imu[larb->larbid].mmu;
return 0;
}
/*
* If there is no larbid property, Loop to find the corresponding
* iommu information.
*/
for (i = 0; i < smi_iommu->larb_nr; i++) { for (i = 0; i < smi_iommu->larb_nr; i++) {
if (dev == smi_iommu->larb_imu[i].dev) { if (dev == smi_iommu->larb_imu[i].dev) {
/* The 'mmu' may be updated in iommu-attach/detach. */ /* The 'mmu' may be updated in iommu-attach/detach. */
...@@ -158,13 +176,32 @@ mtk_smi_larb_bind(struct device *dev, struct device *master, void *data) ...@@ -158,13 +176,32 @@ mtk_smi_larb_bind(struct device *dev, struct device *master, void *data)
return -ENODEV; return -ENODEV;
} }
static void mtk_smi_larb_config_port(struct device *dev) static void mtk_smi_larb_config_port_mt2712(struct device *dev)
{ {
struct mtk_smi_larb *larb = dev_get_drvdata(dev); struct mtk_smi_larb *larb = dev_get_drvdata(dev);
u32 reg;
int i;
writel(*larb->mmu, larb->base + SMI_LARB_MMU_EN); /*
* larb 8/9 is the bdpsys larb, the iommu_en is enabled defaultly.
* Don't need to set it again.
*/
if (larb->larbid == 8 || larb->larbid == 9)
return;
for_each_set_bit(i, (unsigned long *)larb->mmu, 32) {
reg = readl_relaxed(larb->base + SMI_LARB_NONSEC_CON(i));
reg |= F_MMU_EN;
writel(reg, larb->base + SMI_LARB_NONSEC_CON(i));
}
} }
static void mtk_smi_larb_config_port_mt8173(struct device *dev)
{
struct mtk_smi_larb *larb = dev_get_drvdata(dev);
writel(*larb->mmu, larb->base + SMI_LARB_MMU_EN);
}
static void mtk_smi_larb_config_port_gen1(struct device *dev) static void mtk_smi_larb_config_port_gen1(struct device *dev)
{ {
...@@ -210,10 +247,11 @@ static const struct component_ops mtk_smi_larb_component_ops = { ...@@ -210,10 +247,11 @@ static const struct component_ops mtk_smi_larb_component_ops = {
static const struct mtk_smi_larb_gen mtk_smi_larb_mt8173 = { static const struct mtk_smi_larb_gen mtk_smi_larb_mt8173 = {
/* mt8173 do not need the port in larb */ /* mt8173 do not need the port in larb */
.config_port = mtk_smi_larb_config_port, .config_port = mtk_smi_larb_config_port_mt8173,
}; };
static const struct mtk_smi_larb_gen mtk_smi_larb_mt2701 = { static const struct mtk_smi_larb_gen mtk_smi_larb_mt2701 = {
.need_larbid = true,
.port_in_larb = { .port_in_larb = {
LARB0_PORT_OFFSET, LARB1_PORT_OFFSET, LARB0_PORT_OFFSET, LARB1_PORT_OFFSET,
LARB2_PORT_OFFSET, LARB3_PORT_OFFSET LARB2_PORT_OFFSET, LARB3_PORT_OFFSET
...@@ -221,6 +259,11 @@ static const struct mtk_smi_larb_gen mtk_smi_larb_mt2701 = { ...@@ -221,6 +259,11 @@ static const struct mtk_smi_larb_gen mtk_smi_larb_mt2701 = {
.config_port = mtk_smi_larb_config_port_gen1, .config_port = mtk_smi_larb_config_port_gen1,
}; };
static const struct mtk_smi_larb_gen mtk_smi_larb_mt2712 = {
.need_larbid = true,
.config_port = mtk_smi_larb_config_port_mt2712,
};
static const struct of_device_id mtk_smi_larb_of_ids[] = { static const struct of_device_id mtk_smi_larb_of_ids[] = {
{ {
.compatible = "mediatek,mt8173-smi-larb", .compatible = "mediatek,mt8173-smi-larb",
...@@ -230,6 +273,10 @@ static const struct of_device_id mtk_smi_larb_of_ids[] = { ...@@ -230,6 +273,10 @@ static const struct of_device_id mtk_smi_larb_of_ids[] = {
.compatible = "mediatek,mt2701-smi-larb", .compatible = "mediatek,mt2701-smi-larb",
.data = &mtk_smi_larb_mt2701 .data = &mtk_smi_larb_mt2701
}, },
{
.compatible = "mediatek,mt2712-smi-larb",
.data = &mtk_smi_larb_mt2712
},
{} {}
}; };
...@@ -240,20 +287,13 @@ static int mtk_smi_larb_probe(struct platform_device *pdev) ...@@ -240,20 +287,13 @@ static int mtk_smi_larb_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct device_node *smi_node; struct device_node *smi_node;
struct platform_device *smi_pdev; struct platform_device *smi_pdev;
const struct of_device_id *of_id; int err;
if (!dev->pm_domain)
return -EPROBE_DEFER;
of_id = of_match_node(mtk_smi_larb_of_ids, pdev->dev.of_node);
if (!of_id)
return -EINVAL;
larb = devm_kzalloc(dev, sizeof(*larb), GFP_KERNEL); larb = devm_kzalloc(dev, sizeof(*larb), GFP_KERNEL);
if (!larb) if (!larb)
return -ENOMEM; return -ENOMEM;
larb->larb_gen = of_id->data; larb->larb_gen = of_device_get_match_data(dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
larb->base = devm_ioremap_resource(dev, res); larb->base = devm_ioremap_resource(dev, res);
if (IS_ERR(larb->base)) if (IS_ERR(larb->base))
...@@ -268,6 +308,15 @@ static int mtk_smi_larb_probe(struct platform_device *pdev) ...@@ -268,6 +308,15 @@ static int mtk_smi_larb_probe(struct platform_device *pdev)
return PTR_ERR(larb->smi.clk_smi); return PTR_ERR(larb->smi.clk_smi);
larb->smi.dev = dev; larb->smi.dev = dev;
if (larb->larb_gen->need_larbid) {
err = of_property_read_u32(dev->of_node, "mediatek,larb-id",
&larb->larbid);
if (err) {
dev_err(dev, "missing larbid property\n");
return err;
}
}
smi_node = of_parse_phandle(dev->of_node, "mediatek,smi", 0); smi_node = of_parse_phandle(dev->of_node, "mediatek,smi", 0);
if (!smi_node) if (!smi_node)
return -EINVAL; return -EINVAL;
...@@ -275,6 +324,8 @@ static int mtk_smi_larb_probe(struct platform_device *pdev) ...@@ -275,6 +324,8 @@ static int mtk_smi_larb_probe(struct platform_device *pdev)
smi_pdev = of_find_device_by_node(smi_node); smi_pdev = of_find_device_by_node(smi_node);
of_node_put(smi_node); of_node_put(smi_node);
if (smi_pdev) { if (smi_pdev) {
if (!platform_get_drvdata(smi_pdev))
return -EPROBE_DEFER;
larb->smi_common_dev = &smi_pdev->dev; larb->smi_common_dev = &smi_pdev->dev;
} else { } else {
dev_err(dev, "Failed to get the smi_common device\n"); dev_err(dev, "Failed to get the smi_common device\n");
...@@ -311,6 +362,10 @@ static const struct of_device_id mtk_smi_common_of_ids[] = { ...@@ -311,6 +362,10 @@ static const struct of_device_id mtk_smi_common_of_ids[] = {
.compatible = "mediatek,mt2701-smi-common", .compatible = "mediatek,mt2701-smi-common",
.data = (void *)MTK_SMI_GEN1 .data = (void *)MTK_SMI_GEN1
}, },
{
.compatible = "mediatek,mt2712-smi-common",
.data = (void *)MTK_SMI_GEN2
},
{} {}
}; };
...@@ -319,11 +374,8 @@ static int mtk_smi_common_probe(struct platform_device *pdev) ...@@ -319,11 +374,8 @@ static int mtk_smi_common_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct mtk_smi *common; struct mtk_smi *common;
struct resource *res; struct resource *res;
const struct of_device_id *of_id;
enum mtk_smi_gen smi_gen; enum mtk_smi_gen smi_gen;
int ret;
if (!dev->pm_domain)
return -EPROBE_DEFER;
common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL); common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL);
if (!common) if (!common)
...@@ -338,17 +390,13 @@ static int mtk_smi_common_probe(struct platform_device *pdev) ...@@ -338,17 +390,13 @@ static int mtk_smi_common_probe(struct platform_device *pdev)
if (IS_ERR(common->clk_smi)) if (IS_ERR(common->clk_smi))
return PTR_ERR(common->clk_smi); return PTR_ERR(common->clk_smi);
of_id = of_match_node(mtk_smi_common_of_ids, pdev->dev.of_node);
if (!of_id)
return -EINVAL;
/* /*
* for mtk smi gen 1, we need to get the ao(always on) base to config * for mtk smi gen 1, we need to get the ao(always on) base to config
* m4u port, and we need to enable the aync clock for transform the smi * m4u port, and we need to enable the aync clock for transform the smi
* clock into emi clock domain, but for mtk smi gen2, there's no smi ao * clock into emi clock domain, but for mtk smi gen2, there's no smi ao
* base. * base.
*/ */
smi_gen = (enum mtk_smi_gen)of_id->data; smi_gen = (enum mtk_smi_gen)of_device_get_match_data(dev);
if (smi_gen == MTK_SMI_GEN1) { if (smi_gen == MTK_SMI_GEN1) {
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
common->smi_ao_base = devm_ioremap_resource(dev, res); common->smi_ao_base = devm_ioremap_resource(dev, res);
...@@ -359,7 +407,9 @@ static int mtk_smi_common_probe(struct platform_device *pdev) ...@@ -359,7 +407,9 @@ static int mtk_smi_common_probe(struct platform_device *pdev)
if (IS_ERR(common->clk_async)) if (IS_ERR(common->clk_async))
return PTR_ERR(common->clk_async); return PTR_ERR(common->clk_async);
clk_prepare_enable(common->clk_async); ret = clk_prepare_enable(common->clk_async);
if (ret)
return ret;
} }
pm_runtime_enable(dev); pm_runtime_enable(dev);
platform_set_drvdata(pdev, common); platform_set_drvdata(pdev, common);
...@@ -403,4 +453,4 @@ static int __init mtk_smi_init(void) ...@@ -403,4 +453,4 @@ static int __init mtk_smi_init(void)
return ret; return ret;
} }
subsys_initcall(mtk_smi_init); module_init(mtk_smi_init);
...@@ -15,10 +15,6 @@ ...@@ -15,10 +15,6 @@
#define __DTS_IOMMU_PORT_MT8173_H #define __DTS_IOMMU_PORT_MT8173_H
#define MTK_M4U_ID(larb, port) (((larb) << 5) | (port)) #define MTK_M4U_ID(larb, port) (((larb) << 5) | (port))
/* Local arbiter ID */
#define MTK_M4U_TO_LARB(id) (((id) >> 5) & 0x7)
/* PortID within the local arbiter */
#define MTK_M4U_TO_PORT(id) ((id) & 0x1f)
#define M4U_LARB0_ID 0 #define M4U_LARB0_ID 0
#define M4U_LARB1_ID 1 #define M4U_LARB1_ID 1
......
This diff is collapsed.
This diff is collapsed.
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#ifdef CONFIG_MTK_SMI #ifdef CONFIG_MTK_SMI
#define MTK_LARB_NR_MAX 8 #define MTK_LARB_NR_MAX 16
#define MTK_SMI_MMU_EN(port) BIT(port) #define MTK_SMI_MMU_EN(port) BIT(port)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment