Commit 1daa56bc authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-updates-v5.5' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull iommu updates from Joerg Roedel:

 - Conversion of the AMD IOMMU driver to use the dma-iommu code for
   imlementing the DMA-API. This gets rid of quite some code in the
   driver itself, but also has some potential for regressions (non are
   known at the moment).

 - Support for the Qualcomm SMMUv2 implementation in the SDM845 SoC.
   This also includes some firmware interface changes, but those are
   acked by the respective maintainers.

 - Preparatory work to support two distinct page-tables per domain in
   the ARM-SMMU driver

 - Power management improvements for the ARM SMMUv2

 - Custom PASID allocator support

 - Multiple PCI DMA alias support for the AMD IOMMU driver

 - Adaption of the Mediatek driver to the changed IO/TLB flush interface
   of the IOMMU core code.

 - Preparatory patches for the Renesas IOMMU driver to support future
   hardware.

* tag 'iommu-updates-v5.5' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (62 commits)
  iommu/rockchip: Don't provoke WARN for harmless IRQs
  iommu/vt-d: Turn off translations at shutdown
  iommu/vt-d: Check VT-d RMRR region in BIOS is reported as reserved
  iommu/arm-smmu: Remove duplicate error message
  iommu/arm-smmu-v3: Don't display an error when IRQ lines are missing
  iommu/ipmmu-vmsa: Add utlb_offset_base
  iommu/ipmmu-vmsa: Add helper functions for "uTLB" registers
  iommu/ipmmu-vmsa: Calculate context registers' offset instead of a macro
  iommu/ipmmu-vmsa: Add helper functions for MMU "context" registers
  iommu/ipmmu-vmsa: tidyup register definitions
  iommu/ipmmu-vmsa: Remove all unused register definitions
  iommu/mediatek: Reduce the tlb flush timeout value
  iommu/mediatek: Get rid of the pgtlock
  iommu/mediatek: Move the tlb_sync into tlb_flush
  iommu/mediatek: Delete the leaf in the tlb_flush
  iommu/mediatek: Use gather to achieve the tlb range flush
  iommu/mediatek: Add a new tlb_lock for tlb_flush
  iommu/mediatek: Correct the flush_iotlb_all callback
  iommu/io-pgtable-arm: Rename IOMMU_QCOM_SYS_CACHE and improve doc
  iommu/io-pgtable-arm: Rationalise MAIR handling
  ...
parents a5255bc3 9b3a713f
...@@ -15,6 +15,7 @@ Required Properties: ...@@ -15,6 +15,7 @@ Required Properties:
- "renesas,ipmmu-r8a7744" for the R8A7744 (RZ/G1N) IPMMU. - "renesas,ipmmu-r8a7744" for the R8A7744 (RZ/G1N) IPMMU.
- "renesas,ipmmu-r8a7745" for the R8A7745 (RZ/G1E) IPMMU. - "renesas,ipmmu-r8a7745" for the R8A7745 (RZ/G1E) IPMMU.
- "renesas,ipmmu-r8a774a1" for the R8A774A1 (RZ/G2M) IPMMU. - "renesas,ipmmu-r8a774a1" for the R8A774A1 (RZ/G2M) IPMMU.
- "renesas,ipmmu-r8a774b1" for the R8A774B1 (RZ/G2N) IPMMU.
- "renesas,ipmmu-r8a774c0" for the R8A774C0 (RZ/G2E) IPMMU. - "renesas,ipmmu-r8a774c0" for the R8A774C0 (RZ/G2E) IPMMU.
- "renesas,ipmmu-r8a7790" for the R8A7790 (R-Car H2) IPMMU. - "renesas,ipmmu-r8a7790" for the R8A7790 (R-Car H2) IPMMU.
- "renesas,ipmmu-r8a7791" for the R8A7791 (R-Car M2-W) IPMMU. - "renesas,ipmmu-r8a7791" for the R8A7791 (R-Car M2-W) IPMMU.
......
...@@ -2,6 +2,8 @@ ...@@ -2,6 +2,8 @@
#ifndef _ASM_IA64_IOMMU_H #ifndef _ASM_IA64_IOMMU_H
#define _ASM_IA64_IOMMU_H 1 #define _ASM_IA64_IOMMU_H 1
#include <linux/acpi.h>
/* 10 seconds */ /* 10 seconds */
#define DMAR_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10) #define DMAR_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10)
...@@ -9,6 +11,9 @@ extern void no_iommu_init(void); ...@@ -9,6 +11,9 @@ extern void no_iommu_init(void);
#ifdef CONFIG_INTEL_IOMMU #ifdef CONFIG_INTEL_IOMMU
extern int force_iommu, no_iommu; extern int force_iommu, no_iommu;
extern int iommu_detected; extern int iommu_detected;
static inline int __init
arch_rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr) { return 0; }
#else #else
#define no_iommu (1) #define no_iommu (1)
#define iommu_detected (0) #define iommu_detected (0)
......
...@@ -2,10 +2,28 @@ ...@@ -2,10 +2,28 @@
#ifndef _ASM_X86_IOMMU_H #ifndef _ASM_X86_IOMMU_H
#define _ASM_X86_IOMMU_H #define _ASM_X86_IOMMU_H
#include <linux/acpi.h>
#include <asm/e820/api.h>
extern int force_iommu, no_iommu; extern int force_iommu, no_iommu;
extern int iommu_detected; extern int iommu_detected;
/* 10 seconds */ /* 10 seconds */
#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) #define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
static inline int __init
arch_rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr)
{
u64 start = rmrr->base_address;
u64 end = rmrr->end_address + 1;
if (e820__mapped_all(start, end, E820_TYPE_RESERVED))
return 0;
pr_err(FW_BUG "No firmware reserved region can cover this RMRR [%#018Lx-%#018Lx], contact BIOS vendor for fixes\n",
start, end - 1);
return -EINVAL;
}
#endif /* _ASM_X86_IOMMU_H */ #endif /* _ASM_X86_IOMMU_H */
...@@ -614,3 +614,8 @@ int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val) ...@@ -614,3 +614,8 @@ int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val)
return qcom_scm_call_atomic2(QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE, return qcom_scm_call_atomic2(QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE,
addr, val); addr, val);
} }
int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev, bool enable)
{
return -ENODEV;
}
...@@ -62,32 +62,72 @@ static DEFINE_MUTEX(qcom_scm_lock); ...@@ -62,32 +62,72 @@ static DEFINE_MUTEX(qcom_scm_lock);
#define FIRST_EXT_ARG_IDX 3 #define FIRST_EXT_ARG_IDX 3
#define N_REGISTER_ARGS (MAX_QCOM_SCM_ARGS - N_EXT_QCOM_SCM_ARGS + 1) #define N_REGISTER_ARGS (MAX_QCOM_SCM_ARGS - N_EXT_QCOM_SCM_ARGS + 1)
/** static void __qcom_scm_call_do(const struct qcom_scm_desc *desc,
* qcom_scm_call() - Invoke a syscall in the secure world struct arm_smccc_res *res, u32 fn_id,
* @dev: device u64 x5, u32 type)
* @svc_id: service identifier {
* @cmd_id: command identifier u64 cmd;
* @desc: Descriptor structure containing arguments and return values struct arm_smccc_quirk quirk = { .id = ARM_SMCCC_QUIRK_QCOM_A6 };
*
* Sends a command to the SCM and waits for the command to finish processing. cmd = ARM_SMCCC_CALL_VAL(type, qcom_smccc_convention,
* This should *only* be called in pre-emptible context. ARM_SMCCC_OWNER_SIP, fn_id);
*/
static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id, quirk.state.a6 = 0;
const struct qcom_scm_desc *desc,
struct arm_smccc_res *res) do {
arm_smccc_smc_quirk(cmd, desc->arginfo, desc->args[0],
desc->args[1], desc->args[2], x5,
quirk.state.a6, 0, res, &quirk);
if (res->a0 == QCOM_SCM_INTERRUPTED)
cmd = res->a0;
} while (res->a0 == QCOM_SCM_INTERRUPTED);
}
static void qcom_scm_call_do(const struct qcom_scm_desc *desc,
struct arm_smccc_res *res, u32 fn_id,
u64 x5, bool atomic)
{
int retry_count = 0;
if (atomic) {
__qcom_scm_call_do(desc, res, fn_id, x5, ARM_SMCCC_FAST_CALL);
return;
}
do {
mutex_lock(&qcom_scm_lock);
__qcom_scm_call_do(desc, res, fn_id, x5,
ARM_SMCCC_STD_CALL);
mutex_unlock(&qcom_scm_lock);
if (res->a0 == QCOM_SCM_V2_EBUSY) {
if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY)
break;
msleep(QCOM_SCM_EBUSY_WAIT_MS);
}
} while (res->a0 == QCOM_SCM_V2_EBUSY);
}
static int ___qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
const struct qcom_scm_desc *desc,
struct arm_smccc_res *res, bool atomic)
{ {
int arglen = desc->arginfo & 0xf; int arglen = desc->arginfo & 0xf;
int retry_count = 0, i; int i;
u32 fn_id = QCOM_SCM_FNID(svc_id, cmd_id); u32 fn_id = QCOM_SCM_FNID(svc_id, cmd_id);
u64 cmd, x5 = desc->args[FIRST_EXT_ARG_IDX]; u64 x5 = desc->args[FIRST_EXT_ARG_IDX];
dma_addr_t args_phys = 0; dma_addr_t args_phys = 0;
void *args_virt = NULL; void *args_virt = NULL;
size_t alloc_len; size_t alloc_len;
struct arm_smccc_quirk quirk = {.id = ARM_SMCCC_QUIRK_QCOM_A6}; gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL;
if (unlikely(arglen > N_REGISTER_ARGS)) { if (unlikely(arglen > N_REGISTER_ARGS)) {
alloc_len = N_EXT_QCOM_SCM_ARGS * sizeof(u64); alloc_len = N_EXT_QCOM_SCM_ARGS * sizeof(u64);
args_virt = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL); args_virt = kzalloc(PAGE_ALIGN(alloc_len), flag);
if (!args_virt) if (!args_virt)
return -ENOMEM; return -ENOMEM;
...@@ -117,45 +157,55 @@ static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id, ...@@ -117,45 +157,55 @@ static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
x5 = args_phys; x5 = args_phys;
} }
do { qcom_scm_call_do(desc, res, fn_id, x5, atomic);
mutex_lock(&qcom_scm_lock);
cmd = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL,
qcom_smccc_convention,
ARM_SMCCC_OWNER_SIP, fn_id);
quirk.state.a6 = 0;
do {
arm_smccc_smc_quirk(cmd, desc->arginfo, desc->args[0],
desc->args[1], desc->args[2], x5,
quirk.state.a6, 0, res, &quirk);
if (res->a0 == QCOM_SCM_INTERRUPTED)
cmd = res->a0;
} while (res->a0 == QCOM_SCM_INTERRUPTED);
mutex_unlock(&qcom_scm_lock);
if (res->a0 == QCOM_SCM_V2_EBUSY) {
if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY)
break;
msleep(QCOM_SCM_EBUSY_WAIT_MS);
}
} while (res->a0 == QCOM_SCM_V2_EBUSY);
if (args_virt) { if (args_virt) {
dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE); dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE);
kfree(args_virt); kfree(args_virt);
} }
if (res->a0 < 0) if ((long)res->a0 < 0)
return qcom_scm_remap_error(res->a0); return qcom_scm_remap_error(res->a0);
return 0; return 0;
} }
/**
* qcom_scm_call() - Invoke a syscall in the secure world
* @dev: device
* @svc_id: service identifier
* @cmd_id: command identifier
* @desc: Descriptor structure containing arguments and return values
*
* Sends a command to the SCM and waits for the command to finish processing.
* This should *only* be called in pre-emptible context.
*/
static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
const struct qcom_scm_desc *desc,
struct arm_smccc_res *res)
{
might_sleep();
return ___qcom_scm_call(dev, svc_id, cmd_id, desc, res, false);
}
/**
* qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
* @dev: device
* @svc_id: service identifier
* @cmd_id: command identifier
* @desc: Descriptor structure containing arguments and return values
* @res: Structure containing results from SMC/HVC call
*
* Sends a command to the SCM and waits for the command to finish processing.
* This can be called in atomic context.
*/
static int qcom_scm_call_atomic(struct device *dev, u32 svc_id, u32 cmd_id,
const struct qcom_scm_desc *desc,
struct arm_smccc_res *res)
{
return ___qcom_scm_call(dev, svc_id, cmd_id, desc, res, true);
}
/** /**
* qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
* @entry: Entry point function for the cpus * @entry: Entry point function for the cpus
...@@ -502,3 +552,16 @@ int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val) ...@@ -502,3 +552,16 @@ int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val)
return qcom_scm_call(dev, QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE, return qcom_scm_call(dev, QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE,
&desc, &res); &desc, &res);
} }
int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev, bool en)
{
struct qcom_scm_desc desc = {0};
struct arm_smccc_res res;
desc.args[0] = QCOM_SCM_CONFIG_ERRATA1_CLIENT_ALL;
desc.args[1] = en;
desc.arginfo = QCOM_SCM_ARGS(2);
return qcom_scm_call_atomic(dev, QCOM_SCM_SVC_SMMU_PROGRAM,
QCOM_SCM_CONFIG_ERRATA1, &desc, &res);
}
...@@ -345,6 +345,12 @@ int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) ...@@ -345,6 +345,12 @@ int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
} }
EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init); EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init);
int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
{
return __qcom_scm_qsmmu500_wait_safe_toggle(__scm->dev, en);
}
EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle);
int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val) int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
{ {
return __qcom_scm_io_readl(__scm->dev, addr, val); return __qcom_scm_io_readl(__scm->dev, addr, val);
......
...@@ -91,10 +91,15 @@ extern int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id, ...@@ -91,10 +91,15 @@ extern int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id,
u32 spare); u32 spare);
#define QCOM_SCM_IOMMU_SECURE_PTBL_SIZE 3 #define QCOM_SCM_IOMMU_SECURE_PTBL_SIZE 3
#define QCOM_SCM_IOMMU_SECURE_PTBL_INIT 4 #define QCOM_SCM_IOMMU_SECURE_PTBL_INIT 4
#define QCOM_SCM_SVC_SMMU_PROGRAM 0x15
#define QCOM_SCM_CONFIG_ERRATA1 0x3
#define QCOM_SCM_CONFIG_ERRATA1_CLIENT_ALL 0x2
extern int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare, extern int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare,
size_t *size); size_t *size);
extern int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr, extern int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr,
u32 size, u32 spare); u32 size, u32 spare);
extern int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev,
bool enable);
#define QCOM_MEM_PROT_ASSIGN_ID 0x16 #define QCOM_MEM_PROT_ASSIGN_ID 0x16
extern int __qcom_scm_assign_mem(struct device *dev, extern int __qcom_scm_assign_mem(struct device *dev,
phys_addr_t mem_region, size_t mem_sz, phys_addr_t mem_region, size_t mem_sz,
......
...@@ -3,6 +3,10 @@ ...@@ -3,6 +3,10 @@
config IOMMU_IOVA config IOMMU_IOVA
tristate tristate
# The IOASID library may also be used by non-IOMMU_API users
config IOASID
tristate
# IOMMU_API always gets selected by whoever wants it. # IOMMU_API always gets selected by whoever wants it.
config IOMMU_API config IOMMU_API
bool bool
...@@ -138,6 +142,7 @@ config AMD_IOMMU ...@@ -138,6 +142,7 @@ config AMD_IOMMU
select PCI_PASID select PCI_PASID
select IOMMU_API select IOMMU_API
select IOMMU_IOVA select IOMMU_IOVA
select IOMMU_DMA
depends on X86_64 && PCI && ACPI depends on X86_64 && PCI && ACPI
---help--- ---help---
With this option you can enable support for AMD IOMMU hardware in With this option you can enable support for AMD IOMMU hardware in
......
...@@ -7,13 +7,14 @@ obj-$(CONFIG_IOMMU_DMA) += dma-iommu.o ...@@ -7,13 +7,14 @@ obj-$(CONFIG_IOMMU_DMA) += dma-iommu.o
obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) += io-pgtable-arm-v7s.o obj-$(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) += io-pgtable-arm-v7s.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
obj-$(CONFIG_IOASID) += ioasid.o
obj-$(CONFIG_IOMMU_IOVA) += iova.o obj-$(CONFIG_IOMMU_IOVA) += iova.o
obj-$(CONFIG_OF_IOMMU) += of_iommu.o obj-$(CONFIG_OF_IOMMU) += of_iommu.o
obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o amd_iommu_quirks.o obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o amd_iommu_quirks.o
obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o
obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
obj-$(CONFIG_ARM_SMMU) += arm-smmu.o arm-smmu-impl.o obj-$(CONFIG_ARM_SMMU) += arm-smmu.o arm-smmu-impl.o arm-smmu-qcom.o
obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o
obj-$(CONFIG_DMAR_TABLE) += dmar.o obj-$(CONFIG_DMAR_TABLE) += dmar.o
obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o intel-pasid.o obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o intel-pasid.o
......
This diff is collapsed.
...@@ -468,7 +468,6 @@ struct protection_domain { ...@@ -468,7 +468,6 @@ struct protection_domain {
struct iommu_domain domain; /* generic domain handle used by struct iommu_domain domain; /* generic domain handle used by
iommu core code */ iommu core code */
spinlock_t lock; /* mostly used to lock the page table*/ spinlock_t lock; /* mostly used to lock the page table*/
struct mutex api_lock; /* protect page tables in the iommu-api path */
u16 id; /* the domain id written to the device table */ u16 id; /* the domain id written to the device table */
int mode; /* paging mode (0-6 levels) */ int mode; /* paging mode (0-6 levels) */
u64 *pt_root; /* page table root pointer */ u64 *pt_root; /* page table root pointer */
...@@ -639,8 +638,8 @@ struct iommu_dev_data { ...@@ -639,8 +638,8 @@ struct iommu_dev_data {
struct list_head list; /* For domain->dev_list */ struct list_head list; /* For domain->dev_list */
struct llist_node dev_data_list; /* For global dev_data_list */ struct llist_node dev_data_list; /* For global dev_data_list */
struct protection_domain *domain; /* Domain the device is bound to */ struct protection_domain *domain; /* Domain the device is bound to */
struct pci_dev *pdev;
u16 devid; /* PCI Device ID */ u16 devid; /* PCI Device ID */
u16 alias; /* Alias Device ID */
bool iommu_v2; /* Device can make use of IOMMUv2 */ bool iommu_v2; /* Device can make use of IOMMUv2 */
bool passthrough; /* Device is identity mapped */ bool passthrough; /* Device is identity mapped */
struct { struct {
......
...@@ -109,7 +109,7 @@ static struct arm_smmu_device *cavium_smmu_impl_init(struct arm_smmu_device *smm ...@@ -109,7 +109,7 @@ static struct arm_smmu_device *cavium_smmu_impl_init(struct arm_smmu_device *smm
#define ARM_MMU500_ACR_S2CRB_TLBEN (1 << 10) #define ARM_MMU500_ACR_S2CRB_TLBEN (1 << 10)
#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8) #define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
static int arm_mmu500_reset(struct arm_smmu_device *smmu) int arm_mmu500_reset(struct arm_smmu_device *smmu)
{ {
u32 reg, major; u32 reg, major;
int i; int i;
...@@ -170,5 +170,8 @@ struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu) ...@@ -170,5 +170,8 @@ struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu)
"calxeda,smmu-secure-config-access")) "calxeda,smmu-secure-config-access"))
smmu->impl = &calxeda_impl; smmu->impl = &calxeda_impl;
if (of_device_is_compatible(smmu->dev->of_node, "qcom,sdm845-smmu-500"))
return qcom_smmu_impl_init(smmu);
return smmu; return smmu;
} }
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
*/
#include <linux/qcom_scm.h>
#include "arm-smmu.h"
struct qcom_smmu {
struct arm_smmu_device smmu;
};
static int qcom_sdm845_smmu500_reset(struct arm_smmu_device *smmu)
{
int ret;
arm_mmu500_reset(smmu);
/*
* To address performance degradation in non-real time clients,
* such as USB and UFS, turn off wait-for-safe on sdm845 based boards,
* such as MTP and db845, whose firmwares implement secure monitor
* call handlers to turn on/off the wait-for-safe logic.
*/
ret = qcom_scm_qsmmu500_wait_safe_toggle(0);
if (ret)
dev_warn(smmu->dev, "Failed to turn off SAFE logic\n");
return ret;
}
static const struct arm_smmu_impl qcom_smmu_impl = {
.reset = qcom_sdm845_smmu500_reset,
};
struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu)
{
struct qcom_smmu *qsmmu;
qsmmu = devm_kzalloc(smmu->dev, sizeof(*qsmmu), GFP_KERNEL);
if (!qsmmu)
return ERR_PTR(-ENOMEM);
qsmmu->smmu = *smmu;
qsmmu->smmu.impl = &qcom_smmu_impl;
devm_kfree(smmu->dev, smmu);
return &qsmmu->smmu;
}
...@@ -2172,7 +2172,7 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain, ...@@ -2172,7 +2172,7 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
cfg->cd.asid = (u16)asid; cfg->cd.asid = (u16)asid;
cfg->cd.ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0]; cfg->cd.ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
cfg->cd.tcr = pgtbl_cfg->arm_lpae_s1_cfg.tcr; cfg->cd.tcr = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair[0]; cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair;
return 0; return 0;
out_free_asid: out_free_asid:
...@@ -2448,7 +2448,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) ...@@ -2448,7 +2448,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
} }
static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot) phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{ {
struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
...@@ -3611,19 +3611,19 @@ static int arm_smmu_device_probe(struct platform_device *pdev) ...@@ -3611,19 +3611,19 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
/* Interrupt lines */ /* Interrupt lines */
irq = platform_get_irq_byname(pdev, "combined"); irq = platform_get_irq_byname_optional(pdev, "combined");
if (irq > 0) if (irq > 0)
smmu->combined_irq = irq; smmu->combined_irq = irq;
else { else {
irq = platform_get_irq_byname(pdev, "eventq"); irq = platform_get_irq_byname_optional(pdev, "eventq");
if (irq > 0) if (irq > 0)
smmu->evtq.q.irq = irq; smmu->evtq.q.irq = irq;
irq = platform_get_irq_byname(pdev, "priq"); irq = platform_get_irq_byname_optional(pdev, "priq");
if (irq > 0) if (irq > 0)
smmu->priq.q.irq = irq; smmu->priq.q.irq = irq;
irq = platform_get_irq_byname(pdev, "gerror"); irq = platform_get_irq_byname_optional(pdev, "gerror");
if (irq > 0) if (irq > 0)
smmu->gerr_irq = irq; smmu->gerr_irq = irq;
} }
......
This diff is collapsed.
...@@ -79,6 +79,8 @@ ...@@ -79,6 +79,8 @@
#define ID7_MINOR GENMASK(3, 0) #define ID7_MINOR GENMASK(3, 0)
#define ARM_SMMU_GR0_sGFSR 0x48 #define ARM_SMMU_GR0_sGFSR 0x48
#define sGFSR_USF BIT(1)
#define ARM_SMMU_GR0_sGFSYNR0 0x50 #define ARM_SMMU_GR0_sGFSYNR0 0x50
#define ARM_SMMU_GR0_sGFSYNR1 0x54 #define ARM_SMMU_GR0_sGFSYNR1 0x54
#define ARM_SMMU_GR0_sGFSYNR2 0x58 #define ARM_SMMU_GR0_sGFSYNR2 0x58
...@@ -304,17 +306,10 @@ enum arm_smmu_domain_stage { ...@@ -304,17 +306,10 @@ enum arm_smmu_domain_stage {
ARM_SMMU_DOMAIN_BYPASS, ARM_SMMU_DOMAIN_BYPASS,
}; };
struct arm_smmu_flush_ops {
struct iommu_flush_ops tlb;
void (*tlb_inv_range)(unsigned long iova, size_t size, size_t granule,
bool leaf, void *cookie);
void (*tlb_sync)(void *cookie);
};
struct arm_smmu_domain { struct arm_smmu_domain {
struct arm_smmu_device *smmu; struct arm_smmu_device *smmu;
struct io_pgtable_ops *pgtbl_ops; struct io_pgtable_ops *pgtbl_ops;
const struct arm_smmu_flush_ops *flush_ops; const struct iommu_flush_ops *flush_ops;
struct arm_smmu_cfg cfg; struct arm_smmu_cfg cfg;
enum arm_smmu_domain_stage stage; enum arm_smmu_domain_stage stage;
bool non_strict; bool non_strict;
...@@ -335,6 +330,8 @@ struct arm_smmu_impl { ...@@ -335,6 +330,8 @@ struct arm_smmu_impl {
int (*cfg_probe)(struct arm_smmu_device *smmu); int (*cfg_probe)(struct arm_smmu_device *smmu);
int (*reset)(struct arm_smmu_device *smmu); int (*reset)(struct arm_smmu_device *smmu);
int (*init_context)(struct arm_smmu_domain *smmu_domain); int (*init_context)(struct arm_smmu_domain *smmu_domain);
void (*tlb_sync)(struct arm_smmu_device *smmu, int page, int sync,
int status);
}; };
static inline void __iomem *arm_smmu_page(struct arm_smmu_device *smmu, int n) static inline void __iomem *arm_smmu_page(struct arm_smmu_device *smmu, int n)
...@@ -398,5 +395,8 @@ static inline void arm_smmu_writeq(struct arm_smmu_device *smmu, int page, ...@@ -398,5 +395,8 @@ static inline void arm_smmu_writeq(struct arm_smmu_device *smmu, int page,
arm_smmu_writeq((s), ARM_SMMU_CB((s), (n)), (o), (v)) arm_smmu_writeq((s), ARM_SMMU_CB((s), (n)), (o), (v))
struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu); struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu);
struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu);
int arm_mmu500_reset(struct arm_smmu_device *smmu);
#endif /* _ARM_SMMU_H */ #endif /* _ARM_SMMU_H */
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/crash_dump.h>
struct iommu_dma_msi_page { struct iommu_dma_msi_page {
struct list_head list; struct list_head list;
...@@ -353,6 +354,21 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, ...@@ -353,6 +354,21 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
return iova_reserve_iommu_regions(dev, domain); return iova_reserve_iommu_regions(dev, domain);
} }
static int iommu_dma_deferred_attach(struct device *dev,
struct iommu_domain *domain)
{
const struct iommu_ops *ops = domain->ops;
if (!is_kdump_kernel())
return 0;
if (unlikely(ops->is_attach_deferred &&
ops->is_attach_deferred(domain, dev)))
return iommu_attach_device(domain, dev);
return 0;
}
/** /**
* dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
* page flags. * page flags.
...@@ -461,7 +477,7 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr, ...@@ -461,7 +477,7 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
} }
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
size_t size, int prot) size_t size, int prot, dma_addr_t dma_mask)
{ {
struct iommu_domain *domain = iommu_get_dma_domain(dev); struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie; struct iommu_dma_cookie *cookie = domain->iova_cookie;
...@@ -469,13 +485,16 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, ...@@ -469,13 +485,16 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
size_t iova_off = iova_offset(iovad, phys); size_t iova_off = iova_offset(iovad, phys);
dma_addr_t iova; dma_addr_t iova;
if (unlikely(iommu_dma_deferred_attach(dev, domain)))
return DMA_MAPPING_ERROR;
size = iova_align(iovad, size + iova_off); size = iova_align(iovad, size + iova_off);
iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev);
if (!iova) if (!iova)
return DMA_MAPPING_ERROR; return DMA_MAPPING_ERROR;
if (iommu_map(domain, iova, phys - iova_off, size, prot)) { if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) {
iommu_dma_free_iova(cookie, iova, size); iommu_dma_free_iova(cookie, iova, size);
return DMA_MAPPING_ERROR; return DMA_MAPPING_ERROR;
} }
...@@ -578,6 +597,9 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size, ...@@ -578,6 +597,9 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
*dma_handle = DMA_MAPPING_ERROR; *dma_handle = DMA_MAPPING_ERROR;
if (unlikely(iommu_dma_deferred_attach(dev, domain)))
return NULL;
min_size = alloc_sizes & -alloc_sizes; min_size = alloc_sizes & -alloc_sizes;
if (min_size < PAGE_SIZE) { if (min_size < PAGE_SIZE) {
min_size = PAGE_SIZE; min_size = PAGE_SIZE;
...@@ -610,7 +632,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size, ...@@ -610,7 +632,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
arch_dma_prep_coherent(sg_page(sg), sg->length); arch_dma_prep_coherent(sg_page(sg), sg->length);
} }
if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, ioprot) if (iommu_map_sg_atomic(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
< size) < size)
goto out_free_sg; goto out_free_sg;
...@@ -710,7 +732,7 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, ...@@ -710,7 +732,7 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
int prot = dma_info_to_prot(dir, coherent, attrs); int prot = dma_info_to_prot(dir, coherent, attrs);
dma_addr_t dma_handle; dma_addr_t dma_handle;
dma_handle =__iommu_dma_map(dev, phys, size, prot); dma_handle = __iommu_dma_map(dev, phys, size, prot, dma_get_mask(dev));
if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
dma_handle != DMA_MAPPING_ERROR) dma_handle != DMA_MAPPING_ERROR)
arch_sync_dma_for_device(phys, size, dir); arch_sync_dma_for_device(phys, size, dir);
...@@ -820,6 +842,9 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, ...@@ -820,6 +842,9 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
unsigned long mask = dma_get_seg_boundary(dev); unsigned long mask = dma_get_seg_boundary(dev);
int i; int i;
if (unlikely(iommu_dma_deferred_attach(dev, domain)))
return 0;
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
iommu_dma_sync_sg_for_device(dev, sg, nents, dir); iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
...@@ -870,7 +895,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, ...@@ -870,7 +895,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
* We'll leave any physical concatenation to the IOMMU driver's * We'll leave any physical concatenation to the IOMMU driver's
* implementation - it knows better than we do. * implementation - it knows better than we do.
*/ */
if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len) if (iommu_map_sg_atomic(domain, iova, sg, nents, prot) < iova_len)
goto out_free_iova; goto out_free_iova;
return __finalise_sg(dev, sg, nents, iova); return __finalise_sg(dev, sg, nents, iova);
...@@ -910,7 +935,8 @@ static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, ...@@ -910,7 +935,8 @@ static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir, unsigned long attrs) size_t size, enum dma_data_direction dir, unsigned long attrs)
{ {
return __iommu_dma_map(dev, phys, size, return __iommu_dma_map(dev, phys, size,
dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO); dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
dma_get_mask(dev));
} }
static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
...@@ -1016,7 +1042,8 @@ static void *iommu_dma_alloc(struct device *dev, size_t size, ...@@ -1016,7 +1042,8 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
if (!cpu_addr) if (!cpu_addr)
return NULL; return NULL;
*handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot); *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot,
dev->coherent_dma_mask);
if (*handle == DMA_MAPPING_ERROR) { if (*handle == DMA_MAPPING_ERROR) {
__iommu_dma_free(dev, size, cpu_addr); __iommu_dma_free(dev, size, cpu_addr);
return NULL; return NULL;
......
...@@ -895,8 +895,11 @@ int __init detect_intel_iommu(void) ...@@ -895,8 +895,11 @@ int __init detect_intel_iommu(void)
} }
#ifdef CONFIG_X86 #ifdef CONFIG_X86
if (!ret) if (!ret) {
x86_init.iommu.iommu_init = intel_iommu_init; x86_init.iommu.iommu_init = intel_iommu_init;
x86_platform.iommu_shutdown = intel_iommu_shutdown;
}
#endif #endif
if (dmar_tbl) { if (dmar_tbl) {
......
...@@ -1073,7 +1073,7 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size, ...@@ -1073,7 +1073,7 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
*/ */
static int exynos_iommu_map(struct iommu_domain *iommu_domain, static int exynos_iommu_map(struct iommu_domain *iommu_domain,
unsigned long l_iova, phys_addr_t paddr, size_t size, unsigned long l_iova, phys_addr_t paddr, size_t size,
int prot) int prot, gfp_t gfp)
{ {
struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
sysmmu_pte_t *entry; sysmmu_pte_t *entry;
......
...@@ -2420,14 +2420,24 @@ static void domain_remove_dev_info(struct dmar_domain *domain) ...@@ -2420,14 +2420,24 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
spin_unlock_irqrestore(&device_domain_lock, flags); spin_unlock_irqrestore(&device_domain_lock, flags);
} }
/*
* find_domain
* Note: we use struct device->archdata.iommu stores the info
*/
static struct dmar_domain *find_domain(struct device *dev) static struct dmar_domain *find_domain(struct device *dev)
{ {
struct device_domain_info *info; struct device_domain_info *info;
if (unlikely(dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO ||
dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO))
return NULL;
/* No lock here, assumes no domain exit in normal case */
info = dev->archdata.iommu;
if (likely(info))
return info->domain;
return NULL;
}
static struct dmar_domain *deferred_attach_domain(struct device *dev)
{
if (unlikely(dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO)) { if (unlikely(dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO)) {
struct iommu_domain *domain; struct iommu_domain *domain;
...@@ -2437,12 +2447,7 @@ static struct dmar_domain *find_domain(struct device *dev) ...@@ -2437,12 +2447,7 @@ static struct dmar_domain *find_domain(struct device *dev)
intel_iommu_attach_device(domain, dev); intel_iommu_attach_device(domain, dev);
} }
/* No lock here, assumes no domain exit in normal case */ return find_domain(dev);
info = dev->archdata.iommu;
if (likely(info))
return info->domain;
return NULL;
} }
static inline struct device_domain_info * static inline struct device_domain_info *
...@@ -3512,7 +3517,7 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr, ...@@ -3512,7 +3517,7 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
BUG_ON(dir == DMA_NONE); BUG_ON(dir == DMA_NONE);
domain = find_domain(dev); domain = deferred_attach_domain(dev);
if (!domain) if (!domain)
return DMA_MAPPING_ERROR; return DMA_MAPPING_ERROR;
...@@ -3732,7 +3737,7 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele ...@@ -3732,7 +3737,7 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
if (!iommu_need_mapping(dev)) if (!iommu_need_mapping(dev))
return dma_direct_map_sg(dev, sglist, nelems, dir, attrs); return dma_direct_map_sg(dev, sglist, nelems, dir, attrs);
domain = find_domain(dev); domain = deferred_attach_domain(dev);
if (!domain) if (!domain)
return 0; return 0;
...@@ -3827,7 +3832,7 @@ bounce_map_single(struct device *dev, phys_addr_t paddr, size_t size, ...@@ -3827,7 +3832,7 @@ bounce_map_single(struct device *dev, phys_addr_t paddr, size_t size,
int prot = 0; int prot = 0;
int ret; int ret;
domain = find_domain(dev); domain = deferred_attach_domain(dev);
if (WARN_ON(dir == DMA_NONE || !domain)) if (WARN_ON(dir == DMA_NONE || !domain))
return DMA_MAPPING_ERROR; return DMA_MAPPING_ERROR;
...@@ -4314,13 +4319,19 @@ int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg) ...@@ -4314,13 +4319,19 @@ int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
{ {
struct acpi_dmar_reserved_memory *rmrr; struct acpi_dmar_reserved_memory *rmrr;
struct dmar_rmrr_unit *rmrru; struct dmar_rmrr_unit *rmrru;
int ret;
rmrr = (struct acpi_dmar_reserved_memory *)header;
ret = arch_rmrr_sanity_check(rmrr);
if (ret)
return ret;
rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL); rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
if (!rmrru) if (!rmrru)
goto out; goto out;
rmrru->hdr = header; rmrru->hdr = header;
rmrr = (struct acpi_dmar_reserved_memory *)header;
rmrru->base_address = rmrr->base_address; rmrru->base_address = rmrr->base_address;
rmrru->end_address = rmrr->end_address; rmrru->end_address = rmrr->end_address;
...@@ -4759,6 +4770,26 @@ static void intel_disable_iommus(void) ...@@ -4759,6 +4770,26 @@ static void intel_disable_iommus(void)
iommu_disable_translation(iommu); iommu_disable_translation(iommu);
} }
void intel_iommu_shutdown(void)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu = NULL;
if (no_iommu || dmar_disabled)
return;
down_write(&dmar_global_lock);
/* Disable PMRs explicitly here. */
for_each_iommu(iommu, drhd)
iommu_disable_protect_mem_regions(iommu);
/* Make sure the IOMMUs are switched off */
intel_disable_iommus();
up_write(&dmar_global_lock);
}
static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev) static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
{ {
struct iommu_device *iommu_dev = dev_to_iommu_device(dev); struct iommu_device *iommu_dev = dev_to_iommu_device(dev);
...@@ -5440,7 +5471,7 @@ static void intel_iommu_aux_detach_device(struct iommu_domain *domain, ...@@ -5440,7 +5471,7 @@ static void intel_iommu_aux_detach_device(struct iommu_domain *domain,
static int intel_iommu_map(struct iommu_domain *domain, static int intel_iommu_map(struct iommu_domain *domain,
unsigned long iova, phys_addr_t hpa, unsigned long iova, phys_addr_t hpa,
size_t size, int iommu_prot) size_t size, int iommu_prot, gfp_t gfp)
{ {
struct dmar_domain *dmar_domain = to_dmar_domain(domain); struct dmar_domain *dmar_domain = to_dmar_domain(domain);
u64 max_addr; u64 max_addr;
......
...@@ -846,27 +846,28 @@ struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns = { ...@@ -846,27 +846,28 @@ struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns = {
#ifdef CONFIG_IOMMU_IO_PGTABLE_ARMV7S_SELFTEST #ifdef CONFIG_IOMMU_IO_PGTABLE_ARMV7S_SELFTEST
static struct io_pgtable_cfg *cfg_cookie; static struct io_pgtable_cfg *cfg_cookie __initdata;
static void dummy_tlb_flush_all(void *cookie) static void __init dummy_tlb_flush_all(void *cookie)
{ {
WARN_ON(cookie != cfg_cookie); WARN_ON(cookie != cfg_cookie);
} }
static void dummy_tlb_flush(unsigned long iova, size_t size, size_t granule, static void __init dummy_tlb_flush(unsigned long iova, size_t size,
void *cookie) size_t granule, void *cookie)
{ {
WARN_ON(cookie != cfg_cookie); WARN_ON(cookie != cfg_cookie);
WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
} }
static void dummy_tlb_add_page(struct iommu_iotlb_gather *gather, static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
unsigned long iova, size_t granule, void *cookie) unsigned long iova, size_t granule,
void *cookie)
{ {
dummy_tlb_flush(iova, granule, granule, cookie); dummy_tlb_flush(iova, granule, granule, cookie);
} }
static const struct iommu_flush_ops dummy_tlb_ops = { static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
.tlb_flush_all = dummy_tlb_flush_all, .tlb_flush_all = dummy_tlb_flush_all,
.tlb_flush_walk = dummy_tlb_flush, .tlb_flush_walk = dummy_tlb_flush,
.tlb_flush_leaf = dummy_tlb_flush, .tlb_flush_leaf = dummy_tlb_flush,
......
This diff is collapsed.
This diff is collapsed.
...@@ -1665,6 +1665,36 @@ int iommu_attach_device(struct iommu_domain *domain, struct device *dev) ...@@ -1665,6 +1665,36 @@ int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
} }
EXPORT_SYMBOL_GPL(iommu_attach_device); EXPORT_SYMBOL_GPL(iommu_attach_device);
int iommu_cache_invalidate(struct iommu_domain *domain, struct device *dev,
struct iommu_cache_invalidate_info *inv_info)
{
if (unlikely(!domain->ops->cache_invalidate))
return -ENODEV;
return domain->ops->cache_invalidate(domain, dev, inv_info);
}
EXPORT_SYMBOL_GPL(iommu_cache_invalidate);
int iommu_sva_bind_gpasid(struct iommu_domain *domain,
struct device *dev, struct iommu_gpasid_bind_data *data)
{
if (unlikely(!domain->ops->sva_bind_gpasid))
return -ENODEV;
return domain->ops->sva_bind_gpasid(domain, dev, data);
}
EXPORT_SYMBOL_GPL(iommu_sva_bind_gpasid);
int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
ioasid_t pasid)
{
if (unlikely(!domain->ops->sva_unbind_gpasid))
return -ENODEV;
return domain->ops->sva_unbind_gpasid(dev, pasid);
}
EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid);
static void __iommu_detach_device(struct iommu_domain *domain, static void __iommu_detach_device(struct iommu_domain *domain,
struct device *dev) struct device *dev)
{ {
...@@ -1854,8 +1884,8 @@ static size_t iommu_pgsize(struct iommu_domain *domain, ...@@ -1854,8 +1884,8 @@ static size_t iommu_pgsize(struct iommu_domain *domain,
return pgsize; return pgsize;
} }
int iommu_map(struct iommu_domain *domain, unsigned long iova, int __iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot) phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{ {
const struct iommu_ops *ops = domain->ops; const struct iommu_ops *ops = domain->ops;
unsigned long orig_iova = iova; unsigned long orig_iova = iova;
...@@ -1892,8 +1922,8 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova, ...@@ -1892,8 +1922,8 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n", pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
iova, &paddr, pgsize); iova, &paddr, pgsize);
ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
ret = ops->map(domain, iova, paddr, pgsize, prot);
if (ret) if (ret)
break; break;
...@@ -1913,8 +1943,22 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova, ...@@ -1913,8 +1943,22 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
return ret; return ret;
} }
int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
{
might_sleep();
return __iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
}
EXPORT_SYMBOL_GPL(iommu_map); EXPORT_SYMBOL_GPL(iommu_map);
int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
{
return __iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
}
EXPORT_SYMBOL_GPL(iommu_map_atomic);
static size_t __iommu_unmap(struct iommu_domain *domain, static size_t __iommu_unmap(struct iommu_domain *domain,
unsigned long iova, size_t size, unsigned long iova, size_t size,
struct iommu_iotlb_gather *iotlb_gather) struct iommu_iotlb_gather *iotlb_gather)
...@@ -1991,8 +2035,9 @@ size_t iommu_unmap_fast(struct iommu_domain *domain, ...@@ -1991,8 +2035,9 @@ size_t iommu_unmap_fast(struct iommu_domain *domain,
} }
EXPORT_SYMBOL_GPL(iommu_unmap_fast); EXPORT_SYMBOL_GPL(iommu_unmap_fast);
size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg, unsigned int nents, int prot) struct scatterlist *sg, unsigned int nents, int prot,
gfp_t gfp)
{ {
size_t len = 0, mapped = 0; size_t len = 0, mapped = 0;
phys_addr_t start; phys_addr_t start;
...@@ -2003,7 +2048,9 @@ size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, ...@@ -2003,7 +2048,9 @@ size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
phys_addr_t s_phys = sg_phys(sg); phys_addr_t s_phys = sg_phys(sg);
if (len && s_phys != start + len) { if (len && s_phys != start + len) {
ret = iommu_map(domain, iova + mapped, start, len, prot); ret = __iommu_map(domain, iova + mapped, start,
len, prot, gfp);
if (ret) if (ret)
goto out_err; goto out_err;
...@@ -2031,8 +2078,22 @@ size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, ...@@ -2031,8 +2078,22 @@ size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
return 0; return 0;
} }
size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg, unsigned int nents, int prot)
{
might_sleep();
return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL);
}
EXPORT_SYMBOL_GPL(iommu_map_sg); EXPORT_SYMBOL_GPL(iommu_map_sg);
size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg, unsigned int nents, int prot)
{
return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
}
EXPORT_SYMBOL_GPL(iommu_map_sg_atomic);
int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
phys_addr_t paddr, u64 size, int prot) phys_addr_t paddr, u64 size, int prot)
{ {
......
This diff is collapsed.
...@@ -504,7 +504,7 @@ static void msm_iommu_detach_dev(struct iommu_domain *domain, ...@@ -504,7 +504,7 @@ static void msm_iommu_detach_dev(struct iommu_domain *domain,
} }
static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova, static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t pa, size_t len, int prot) phys_addr_t pa, size_t len, int prot, gfp_t gfp)
{ {
struct msm_priv *priv = to_msm_priv(domain); struct msm_priv *priv = to_msm_priv(domain);
unsigned long flags; unsigned long flags;
......
...@@ -101,8 +101,6 @@ ...@@ -101,8 +101,6 @@
#define MTK_M4U_TO_PORT(id) ((id) & 0x1f) #define MTK_M4U_TO_PORT(id) ((id) & 0x1f)
struct mtk_iommu_domain { struct mtk_iommu_domain {
spinlock_t pgtlock; /* lock for page table */
struct io_pgtable_cfg cfg; struct io_pgtable_cfg cfg;
struct io_pgtable_ops *iop; struct io_pgtable_ops *iop;
...@@ -173,13 +171,16 @@ static void mtk_iommu_tlb_flush_all(void *cookie) ...@@ -173,13 +171,16 @@ static void mtk_iommu_tlb_flush_all(void *cookie)
} }
} }
static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size, static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size,
size_t granule, bool leaf, size_t granule, void *cookie)
void *cookie)
{ {
struct mtk_iommu_data *data = cookie; struct mtk_iommu_data *data = cookie;
unsigned long flags;
int ret;
u32 tmp;
for_each_m4u(data) { for_each_m4u(data) {
spin_lock_irqsave(&data->tlb_lock, flags);
writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
data->base + REG_MMU_INV_SEL); data->base + REG_MMU_INV_SEL);
...@@ -188,23 +189,10 @@ static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size, ...@@ -188,23 +189,10 @@ static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size,
data->base + REG_MMU_INVLD_END_A); data->base + REG_MMU_INVLD_END_A);
writel_relaxed(F_MMU_INV_RANGE, writel_relaxed(F_MMU_INV_RANGE,
data->base + REG_MMU_INVALIDATE); data->base + REG_MMU_INVALIDATE);
data->tlb_flush_active = true;
}
}
static void mtk_iommu_tlb_sync(void *cookie)
{
struct mtk_iommu_data *data = cookie;
int ret;
u32 tmp;
for_each_m4u(data) {
/* Avoid timing out if there's nothing to wait for */
if (!data->tlb_flush_active)
return;
/* tlb sync */
ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE,
tmp, tmp != 0, 10, 100000); tmp, tmp != 0, 10, 1000);
if (ret) { if (ret) {
dev_warn(data->dev, dev_warn(data->dev,
"Partial TLB flush timed out, falling back to full flush\n"); "Partial TLB flush timed out, falling back to full flush\n");
...@@ -212,35 +200,24 @@ static void mtk_iommu_tlb_sync(void *cookie) ...@@ -212,35 +200,24 @@ static void mtk_iommu_tlb_sync(void *cookie)
} }
/* Clear the CPE status */ /* Clear the CPE status */
writel_relaxed(0, data->base + REG_MMU_CPE_DONE); writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
data->tlb_flush_active = false; spin_unlock_irqrestore(&data->tlb_lock, flags);
} }
} }
static void mtk_iommu_tlb_flush_walk(unsigned long iova, size_t size,
size_t granule, void *cookie)
{
mtk_iommu_tlb_add_flush_nosync(iova, size, granule, false, cookie);
mtk_iommu_tlb_sync(cookie);
}
static void mtk_iommu_tlb_flush_leaf(unsigned long iova, size_t size,
size_t granule, void *cookie)
{
mtk_iommu_tlb_add_flush_nosync(iova, size, granule, true, cookie);
mtk_iommu_tlb_sync(cookie);
}
static void mtk_iommu_tlb_flush_page_nosync(struct iommu_iotlb_gather *gather, static void mtk_iommu_tlb_flush_page_nosync(struct iommu_iotlb_gather *gather,
unsigned long iova, size_t granule, unsigned long iova, size_t granule,
void *cookie) void *cookie)
{ {
mtk_iommu_tlb_add_flush_nosync(iova, granule, granule, true, cookie); struct mtk_iommu_data *data = cookie;
struct iommu_domain *domain = &data->m4u_dom->domain;
iommu_iotlb_gather_add_page(domain, gather, iova, granule);
} }
static const struct iommu_flush_ops mtk_iommu_flush_ops = { static const struct iommu_flush_ops mtk_iommu_flush_ops = {
.tlb_flush_all = mtk_iommu_tlb_flush_all, .tlb_flush_all = mtk_iommu_tlb_flush_all,
.tlb_flush_walk = mtk_iommu_tlb_flush_walk, .tlb_flush_walk = mtk_iommu_tlb_flush_range_sync,
.tlb_flush_leaf = mtk_iommu_tlb_flush_leaf, .tlb_flush_leaf = mtk_iommu_tlb_flush_range_sync,
.tlb_add_page = mtk_iommu_tlb_flush_page_nosync, .tlb_add_page = mtk_iommu_tlb_flush_page_nosync,
}; };
...@@ -316,8 +293,6 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom) ...@@ -316,8 +293,6 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom)
{ {
struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
spin_lock_init(&dom->pgtlock);
dom->cfg = (struct io_pgtable_cfg) { dom->cfg = (struct io_pgtable_cfg) {
.quirks = IO_PGTABLE_QUIRK_ARM_NS | .quirks = IO_PGTABLE_QUIRK_ARM_NS |
IO_PGTABLE_QUIRK_NO_PERMS | IO_PGTABLE_QUIRK_NO_PERMS |
...@@ -412,22 +387,17 @@ static void mtk_iommu_detach_device(struct iommu_domain *domain, ...@@ -412,22 +387,17 @@ static void mtk_iommu_detach_device(struct iommu_domain *domain,
} }
static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot) phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{ {
struct mtk_iommu_domain *dom = to_mtk_domain(domain); struct mtk_iommu_domain *dom = to_mtk_domain(domain);
struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
unsigned long flags;
int ret;
/* The "4GB mode" M4U physically can not use the lower remap of Dram. */ /* The "4GB mode" M4U physically can not use the lower remap of Dram. */
if (data->enable_4GB) if (data->enable_4GB)
paddr |= BIT_ULL(32); paddr |= BIT_ULL(32);
spin_lock_irqsave(&dom->pgtlock, flags); /* Synchronize with the tlb_lock */
ret = dom->iop->map(dom->iop, iova, paddr, size, prot); return dom->iop->map(dom->iop, iova, paddr, size, prot);
spin_unlock_irqrestore(&dom->pgtlock, flags);
return ret;
} }
static size_t mtk_iommu_unmap(struct iommu_domain *domain, static size_t mtk_iommu_unmap(struct iommu_domain *domain,
...@@ -435,25 +405,26 @@ static size_t mtk_iommu_unmap(struct iommu_domain *domain, ...@@ -435,25 +405,26 @@ static size_t mtk_iommu_unmap(struct iommu_domain *domain,
struct iommu_iotlb_gather *gather) struct iommu_iotlb_gather *gather)
{ {
struct mtk_iommu_domain *dom = to_mtk_domain(domain); struct mtk_iommu_domain *dom = to_mtk_domain(domain);
unsigned long flags;
size_t unmapsz;
spin_lock_irqsave(&dom->pgtlock, flags);
unmapsz = dom->iop->unmap(dom->iop, iova, size, gather);
spin_unlock_irqrestore(&dom->pgtlock, flags);
return unmapsz; return dom->iop->unmap(dom->iop, iova, size, gather);
} }
static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain) static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain)
{ {
mtk_iommu_tlb_sync(mtk_iommu_get_m4u_data()); mtk_iommu_tlb_flush_all(mtk_iommu_get_m4u_data());
} }
static void mtk_iommu_iotlb_sync(struct iommu_domain *domain, static void mtk_iommu_iotlb_sync(struct iommu_domain *domain,
struct iommu_iotlb_gather *gather) struct iommu_iotlb_gather *gather)
{ {
mtk_iommu_tlb_sync(mtk_iommu_get_m4u_data()); struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
size_t length = gather->end - gather->start;
if (gather->start == ULONG_MAX)
return;
mtk_iommu_tlb_flush_range_sync(gather->start, length, gather->pgsize,
data);
} }
static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain, static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
...@@ -461,13 +432,9 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain, ...@@ -461,13 +432,9 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
{ {
struct mtk_iommu_domain *dom = to_mtk_domain(domain); struct mtk_iommu_domain *dom = to_mtk_domain(domain);
struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
unsigned long flags;
phys_addr_t pa; phys_addr_t pa;
spin_lock_irqsave(&dom->pgtlock, flags);
pa = dom->iop->iova_to_phys(dom->iop, iova); pa = dom->iop->iova_to_phys(dom->iop, iova);
spin_unlock_irqrestore(&dom->pgtlock, flags);
if (data->enable_4GB && pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE) if (data->enable_4GB && pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE)
pa &= ~BIT_ULL(32); pa &= ~BIT_ULL(32);
...@@ -733,6 +700,7 @@ static int mtk_iommu_probe(struct platform_device *pdev) ...@@ -733,6 +700,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
if (ret) if (ret)
return ret; return ret;
spin_lock_init(&data->tlb_lock);
list_add_tail(&data->list, &m4ulist); list_add_tail(&data->list, &m4ulist);
if (!iommu_present(&platform_bus_type)) if (!iommu_present(&platform_bus_type))
......
...@@ -57,7 +57,7 @@ struct mtk_iommu_data { ...@@ -57,7 +57,7 @@ struct mtk_iommu_data {
struct mtk_iommu_domain *m4u_dom; struct mtk_iommu_domain *m4u_dom;
struct iommu_group *m4u_group; struct iommu_group *m4u_group;
bool enable_4GB; bool enable_4GB;
bool tlb_flush_active; spinlock_t tlb_lock; /* lock for tlb range flush */
struct iommu_device iommu; struct iommu_device iommu;
const struct mtk_iommu_plat_data *plat_data; const struct mtk_iommu_plat_data *plat_data;
......
...@@ -295,7 +295,7 @@ static void mtk_iommu_detach_device(struct iommu_domain *domain, ...@@ -295,7 +295,7 @@ static void mtk_iommu_detach_device(struct iommu_domain *domain,
} }
static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot) phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{ {
struct mtk_iommu_domain *dom = to_mtk_domain(domain); struct mtk_iommu_domain *dom = to_mtk_domain(domain);
unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT; unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT;
......
...@@ -1339,7 +1339,7 @@ static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz) ...@@ -1339,7 +1339,7 @@ static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
} }
static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
phys_addr_t pa, size_t bytes, int prot) phys_addr_t pa, size_t bytes, int prot, gfp_t gfp)
{ {
struct omap_iommu_domain *omap_domain = to_omap_domain(domain); struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct device *dev = omap_domain->dev; struct device *dev = omap_domain->dev;
......
...@@ -284,9 +284,9 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain, ...@@ -284,9 +284,9 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
/* MAIRs (stage-1 only) */ /* MAIRs (stage-1 only) */
iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR0, iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR0,
pgtbl_cfg.arm_lpae_s1_cfg.mair[0]); pgtbl_cfg.arm_lpae_s1_cfg.mair);
iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR1, iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR1,
pgtbl_cfg.arm_lpae_s1_cfg.mair[1]); pgtbl_cfg.arm_lpae_s1_cfg.mair >> 32);
/* SCTLR */ /* SCTLR */
reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE |
...@@ -423,7 +423,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de ...@@ -423,7 +423,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
} }
static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova, static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot) phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{ {
int ret; int ret;
unsigned long flags; unsigned long flags;
...@@ -539,8 +539,8 @@ static int qcom_iommu_add_device(struct device *dev) ...@@ -539,8 +539,8 @@ static int qcom_iommu_add_device(struct device *dev)
} }
group = iommu_group_get_for_dev(dev); group = iommu_group_get_for_dev(dev);
if (IS_ERR_OR_NULL(group)) if (IS_ERR(group))
return PTR_ERR_OR_ZERO(group); return PTR_ERR(group);
iommu_group_put(group); iommu_group_put(group);
iommu_device_link(&qcom_iommu->iommu, dev); iommu_device_link(&qcom_iommu->iommu, dev);
......
...@@ -527,7 +527,7 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id) ...@@ -527,7 +527,7 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
int i, err; int i, err;
err = pm_runtime_get_if_in_use(iommu->dev); err = pm_runtime_get_if_in_use(iommu->dev);
if (WARN_ON_ONCE(err <= 0)) if (!err || WARN_ON_ONCE(err < 0))
return ret; return ret;
if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks))) if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)))
...@@ -758,7 +758,7 @@ static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr, ...@@ -758,7 +758,7 @@ static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
} }
static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova, static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
phys_addr_t paddr, size_t size, int prot) phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{ {
struct rk_iommu_domain *rk_domain = to_rk_domain(domain); struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags; unsigned long flags;
...@@ -980,13 +980,13 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type) ...@@ -980,13 +980,13 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
if (!dma_dev) if (!dma_dev)
return NULL; return NULL;
rk_domain = devm_kzalloc(dma_dev, sizeof(*rk_domain), GFP_KERNEL); rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL);
if (!rk_domain) if (!rk_domain)
return NULL; return NULL;
if (type == IOMMU_DOMAIN_DMA && if (type == IOMMU_DOMAIN_DMA &&
iommu_get_dma_cookie(&rk_domain->domain)) iommu_get_dma_cookie(&rk_domain->domain))
return NULL; goto err_free_domain;
/* /*
* rk32xx iommus use a 2 level pagetable. * rk32xx iommus use a 2 level pagetable.
...@@ -1021,6 +1021,8 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type) ...@@ -1021,6 +1021,8 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
err_put_cookie: err_put_cookie:
if (type == IOMMU_DOMAIN_DMA) if (type == IOMMU_DOMAIN_DMA)
iommu_put_dma_cookie(&rk_domain->domain); iommu_put_dma_cookie(&rk_domain->domain);
err_free_domain:
kfree(rk_domain);
return NULL; return NULL;
} }
...@@ -1049,6 +1051,7 @@ static void rk_iommu_domain_free(struct iommu_domain *domain) ...@@ -1049,6 +1051,7 @@ static void rk_iommu_domain_free(struct iommu_domain *domain)
if (domain->type == IOMMU_DOMAIN_DMA) if (domain->type == IOMMU_DOMAIN_DMA)
iommu_put_dma_cookie(&rk_domain->domain); iommu_put_dma_cookie(&rk_domain->domain);
kfree(rk_domain);
} }
static int rk_iommu_add_device(struct device *dev) static int rk_iommu_add_device(struct device *dev)
......
...@@ -265,7 +265,7 @@ static int s390_iommu_update_trans(struct s390_domain *s390_domain, ...@@ -265,7 +265,7 @@ static int s390_iommu_update_trans(struct s390_domain *s390_domain,
} }
static int s390_iommu_map(struct iommu_domain *domain, unsigned long iova, static int s390_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot) phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{ {
struct s390_domain *s390_domain = to_s390_domain(domain); struct s390_domain *s390_domain = to_s390_domain(domain);
int flags = ZPCI_PTE_VALID, rc = 0; int flags = ZPCI_PTE_VALID, rc = 0;
......
...@@ -178,7 +178,7 @@ static inline int __gart_iommu_map(struct gart_device *gart, unsigned long iova, ...@@ -178,7 +178,7 @@ static inline int __gart_iommu_map(struct gart_device *gart, unsigned long iova,
} }
static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova, static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t pa, size_t bytes, int prot) phys_addr_t pa, size_t bytes, int prot, gfp_t gfp)
{ {
struct gart_device *gart = gart_handle; struct gart_device *gart = gart_handle;
int ret; int ret;
......
...@@ -159,9 +159,9 @@ static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr) ...@@ -159,9 +159,9 @@ static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr)
return (addr & smmu->pfn_mask) == addr; return (addr & smmu->pfn_mask) == addr;
} }
static dma_addr_t smmu_pde_to_dma(u32 pde) static dma_addr_t smmu_pde_to_dma(struct tegra_smmu *smmu, u32 pde)
{ {
return pde << 12; return (dma_addr_t)(pde & smmu->pfn_mask) << 12;
} }
static void smmu_flush_ptc_all(struct tegra_smmu *smmu) static void smmu_flush_ptc_all(struct tegra_smmu *smmu)
...@@ -240,7 +240,7 @@ static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu, ...@@ -240,7 +240,7 @@ static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
static inline void smmu_flush(struct tegra_smmu *smmu) static inline void smmu_flush(struct tegra_smmu *smmu)
{ {
smmu_readl(smmu, SMMU_CONFIG); smmu_readl(smmu, SMMU_PTB_ASID);
} }
static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp) static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp)
...@@ -351,6 +351,20 @@ static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup, ...@@ -351,6 +351,20 @@ static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup,
unsigned int i; unsigned int i;
u32 value; u32 value;
group = tegra_smmu_find_swgroup(smmu, swgroup);
if (group) {
value = smmu_readl(smmu, group->reg);
value &= ~SMMU_ASID_MASK;
value |= SMMU_ASID_VALUE(asid);
value |= SMMU_ASID_ENABLE;
smmu_writel(smmu, value, group->reg);
} else {
pr_warn("%s group from swgroup %u not found\n", __func__,
swgroup);
/* No point moving ahead if group was not found */
return;
}
for (i = 0; i < smmu->soc->num_clients; i++) { for (i = 0; i < smmu->soc->num_clients; i++) {
const struct tegra_mc_client *client = &smmu->soc->clients[i]; const struct tegra_mc_client *client = &smmu->soc->clients[i];
...@@ -361,15 +375,6 @@ static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup, ...@@ -361,15 +375,6 @@ static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup,
value |= BIT(client->smmu.bit); value |= BIT(client->smmu.bit);
smmu_writel(smmu, value, client->smmu.reg); smmu_writel(smmu, value, client->smmu.reg);
} }
group = tegra_smmu_find_swgroup(smmu, swgroup);
if (group) {
value = smmu_readl(smmu, group->reg);
value &= ~SMMU_ASID_MASK;
value |= SMMU_ASID_VALUE(asid);
value |= SMMU_ASID_ENABLE;
smmu_writel(smmu, value, group->reg);
}
} }
static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup, static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup,
...@@ -549,6 +554,7 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova, ...@@ -549,6 +554,7 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
dma_addr_t *dmap) dma_addr_t *dmap)
{ {
unsigned int pd_index = iova_pd_index(iova); unsigned int pd_index = iova_pd_index(iova);
struct tegra_smmu *smmu = as->smmu;
struct page *pt_page; struct page *pt_page;
u32 *pd; u32 *pd;
...@@ -557,7 +563,7 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova, ...@@ -557,7 +563,7 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
return NULL; return NULL;
pd = page_address(as->pd); pd = page_address(as->pd);
*dmap = smmu_pde_to_dma(pd[pd_index]); *dmap = smmu_pde_to_dma(smmu, pd[pd_index]);
return tegra_smmu_pte_offset(pt_page, iova); return tegra_smmu_pte_offset(pt_page, iova);
} }
...@@ -599,7 +605,7 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova, ...@@ -599,7 +605,7 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
} else { } else {
u32 *pd = page_address(as->pd); u32 *pd = page_address(as->pd);
*dmap = smmu_pde_to_dma(pd[pde]); *dmap = smmu_pde_to_dma(smmu, pd[pde]);
} }
return tegra_smmu_pte_offset(as->pts[pde], iova); return tegra_smmu_pte_offset(as->pts[pde], iova);
...@@ -624,7 +630,7 @@ static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova) ...@@ -624,7 +630,7 @@ static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
if (--as->count[pde] == 0) { if (--as->count[pde] == 0) {
struct tegra_smmu *smmu = as->smmu; struct tegra_smmu *smmu = as->smmu;
u32 *pd = page_address(as->pd); u32 *pd = page_address(as->pd);
dma_addr_t pte_dma = smmu_pde_to_dma(pd[pde]); dma_addr_t pte_dma = smmu_pde_to_dma(smmu, pd[pde]);
tegra_smmu_set_pde(as, iova, 0); tegra_smmu_set_pde(as, iova, 0);
...@@ -650,7 +656,7 @@ static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova, ...@@ -650,7 +656,7 @@ static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
} }
static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova, static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot) phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{ {
struct tegra_smmu_as *as = to_smmu_as(domain); struct tegra_smmu_as *as = to_smmu_as(domain);
dma_addr_t pte_dma; dma_addr_t pte_dma;
......
...@@ -153,7 +153,6 @@ static off_t viommu_get_write_desc_offset(struct viommu_dev *viommu, ...@@ -153,7 +153,6 @@ static off_t viommu_get_write_desc_offset(struct viommu_dev *viommu,
*/ */
static int __viommu_sync_req(struct viommu_dev *viommu) static int __viommu_sync_req(struct viommu_dev *viommu)
{ {
int ret = 0;
unsigned int len; unsigned int len;
size_t write_len; size_t write_len;
struct viommu_request *req; struct viommu_request *req;
...@@ -182,7 +181,7 @@ static int __viommu_sync_req(struct viommu_dev *viommu) ...@@ -182,7 +181,7 @@ static int __viommu_sync_req(struct viommu_dev *viommu)
kfree(req); kfree(req);
} }
return ret; return 0;
} }
static int viommu_sync_req(struct viommu_dev *viommu) static int viommu_sync_req(struct viommu_dev *viommu)
...@@ -713,7 +712,7 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev) ...@@ -713,7 +712,7 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
} }
static int viommu_map(struct iommu_domain *domain, unsigned long iova, static int viommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot) phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{ {
int ret; int ret;
u32 flags; u32 flags;
......
...@@ -366,6 +366,8 @@ static int __maybe_unused mtk_smi_larb_suspend(struct device *dev) ...@@ -366,6 +366,8 @@ static int __maybe_unused mtk_smi_larb_suspend(struct device *dev)
static const struct dev_pm_ops smi_larb_pm_ops = { static const struct dev_pm_ops smi_larb_pm_ops = {
SET_RUNTIME_PM_OPS(mtk_smi_larb_suspend, mtk_smi_larb_resume, NULL) SET_RUNTIME_PM_OPS(mtk_smi_larb_suspend, mtk_smi_larb_resume, NULL)
SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
}; };
static struct platform_driver mtk_smi_larb_driver = { static struct platform_driver mtk_smi_larb_driver = {
...@@ -507,6 +509,8 @@ static int __maybe_unused mtk_smi_common_suspend(struct device *dev) ...@@ -507,6 +509,8 @@ static int __maybe_unused mtk_smi_common_suspend(struct device *dev)
static const struct dev_pm_ops smi_common_pm_ops = { static const struct dev_pm_ops smi_common_pm_ops = {
SET_RUNTIME_PM_OPS(mtk_smi_common_suspend, mtk_smi_common_resume, NULL) SET_RUNTIME_PM_OPS(mtk_smi_common_suspend, mtk_smi_common_resume, NULL)
SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
}; };
static struct platform_driver mtk_smi_common_driver = { static struct platform_driver mtk_smi_common_driver = {
......
...@@ -129,6 +129,7 @@ static inline int dmar_res_noop(struct acpi_dmar_header *hdr, void *arg) ...@@ -129,6 +129,7 @@ static inline int dmar_res_noop(struct acpi_dmar_header *hdr, void *arg)
#ifdef CONFIG_INTEL_IOMMU #ifdef CONFIG_INTEL_IOMMU
extern int iommu_detected, no_iommu; extern int iommu_detected, no_iommu;
extern int intel_iommu_init(void); extern int intel_iommu_init(void);
extern void intel_iommu_shutdown(void);
extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg); extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg);
extern int dmar_parse_one_atsr(struct acpi_dmar_header *header, void *arg); extern int dmar_parse_one_atsr(struct acpi_dmar_header *header, void *arg);
extern int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg); extern int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg);
...@@ -137,6 +138,7 @@ extern int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert); ...@@ -137,6 +138,7 @@ extern int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert);
extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info); extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info);
#else /* !CONFIG_INTEL_IOMMU: */ #else /* !CONFIG_INTEL_IOMMU: */
static inline int intel_iommu_init(void) { return -ENODEV; } static inline int intel_iommu_init(void) { return -ENODEV; }
static inline void intel_iommu_shutdown(void) { }
#define dmar_parse_one_rmrr dmar_res_noop #define dmar_parse_one_rmrr dmar_res_noop
#define dmar_parse_one_atsr dmar_res_noop #define dmar_parse_one_atsr dmar_res_noop
......
...@@ -102,7 +102,7 @@ struct io_pgtable_cfg { ...@@ -102,7 +102,7 @@ struct io_pgtable_cfg {
struct { struct {
u64 ttbr[2]; u64 ttbr[2];
u64 tcr; u64 tcr;
u64 mair[2]; u64 mair;
} arm_lpae_s1_cfg; } arm_lpae_s1_cfg;
struct { struct {
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_IOASID_H
#define __LINUX_IOASID_H
#include <linux/types.h>
#include <linux/errno.h>
#define INVALID_IOASID ((ioasid_t)-1)
typedef unsigned int ioasid_t;
typedef ioasid_t (*ioasid_alloc_fn_t)(ioasid_t min, ioasid_t max, void *data);
typedef void (*ioasid_free_fn_t)(ioasid_t ioasid, void *data);
struct ioasid_set {
int dummy;
};
/**
* struct ioasid_allocator_ops - IOASID allocator helper functions and data
*
* @alloc: helper function to allocate IOASID
* @free: helper function to free IOASID
* @list: for tracking ops that share helper functions but not data
* @pdata: data belong to the allocator, provided when calling alloc()
*/
struct ioasid_allocator_ops {
ioasid_alloc_fn_t alloc;
ioasid_free_fn_t free;
struct list_head list;
void *pdata;
};
#define DECLARE_IOASID_SET(name) struct ioasid_set name = { 0 }
#if IS_ENABLED(CONFIG_IOASID)
ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max,
void *private);
void ioasid_free(ioasid_t ioasid);
void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid,
bool (*getter)(void *));
int ioasid_register_allocator(struct ioasid_allocator_ops *allocator);
void ioasid_unregister_allocator(struct ioasid_allocator_ops *allocator);
int ioasid_set_data(ioasid_t ioasid, void *data);
#else /* !CONFIG_IOASID */
static inline ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min,
ioasid_t max, void *private)
{
return INVALID_IOASID;
}
static inline void ioasid_free(ioasid_t ioasid)
{
}
static inline void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid,
bool (*getter)(void *))
{
return NULL;
}
static inline int ioasid_register_allocator(struct ioasid_allocator_ops *allocator)
{
return -ENOTSUPP;
}
static inline void ioasid_unregister_allocator(struct ioasid_allocator_ops *allocator)
{
}
static inline int ioasid_set_data(ioasid_t ioasid, void *data)
{
return -ENOTSUPP;
}
#endif /* CONFIG_IOASID */
#endif /* __LINUX_IOASID_H */
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/ioasid.h>
#include <uapi/linux/iommu.h> #include <uapi/linux/iommu.h>
#define IOMMU_READ (1 << 0) #define IOMMU_READ (1 << 0)
...@@ -31,11 +32,11 @@ ...@@ -31,11 +32,11 @@
*/ */
#define IOMMU_PRIV (1 << 5) #define IOMMU_PRIV (1 << 5)
/* /*
* Non-coherent masters on few Qualcomm SoCs can use this page protection flag * Non-coherent masters can use this page protection flag to set cacheable
* to set correct cacheability attributes to use an outer level of cache - * memory attributes for only a transparent outer level of cache, also known as
* last level cache, aka system cache. * the last-level or system cache.
*/ */
#define IOMMU_QCOM_SYS_CACHE (1 << 6) #define IOMMU_SYS_CACHE_ONLY (1 << 6)
struct iommu_ops; struct iommu_ops;
struct iommu_group; struct iommu_group;
...@@ -244,7 +245,10 @@ struct iommu_iotlb_gather { ...@@ -244,7 +245,10 @@ struct iommu_iotlb_gather {
* @sva_unbind: Unbind process address space from device * @sva_unbind: Unbind process address space from device
* @sva_get_pasid: Get PASID associated to a SVA handle * @sva_get_pasid: Get PASID associated to a SVA handle
* @page_response: handle page request response * @page_response: handle page request response
* @cache_invalidate: invalidate translation caches
* @pgsize_bitmap: bitmap of all possible supported page sizes * @pgsize_bitmap: bitmap of all possible supported page sizes
* @sva_bind_gpasid: bind guest pasid and mm
* @sva_unbind_gpasid: unbind guest pasid and mm
*/ */
struct iommu_ops { struct iommu_ops {
bool (*capable)(enum iommu_cap); bool (*capable)(enum iommu_cap);
...@@ -256,7 +260,7 @@ struct iommu_ops { ...@@ -256,7 +260,7 @@ struct iommu_ops {
int (*attach_dev)(struct iommu_domain *domain, struct device *dev); int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
void (*detach_dev)(struct iommu_domain *domain, struct device *dev); void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
int (*map)(struct iommu_domain *domain, unsigned long iova, int (*map)(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot); phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
size_t size, struct iommu_iotlb_gather *iotlb_gather); size_t size, struct iommu_iotlb_gather *iotlb_gather);
void (*flush_iotlb_all)(struct iommu_domain *domain); void (*flush_iotlb_all)(struct iommu_domain *domain);
...@@ -306,6 +310,12 @@ struct iommu_ops { ...@@ -306,6 +310,12 @@ struct iommu_ops {
int (*page_response)(struct device *dev, int (*page_response)(struct device *dev,
struct iommu_fault_event *evt, struct iommu_fault_event *evt,
struct iommu_page_response *msg); struct iommu_page_response *msg);
int (*cache_invalidate)(struct iommu_domain *domain, struct device *dev,
struct iommu_cache_invalidate_info *inv_info);
int (*sva_bind_gpasid)(struct iommu_domain *domain,
struct device *dev, struct iommu_gpasid_bind_data *data);
int (*sva_unbind_gpasid)(struct device *dev, int pasid);
unsigned long pgsize_bitmap; unsigned long pgsize_bitmap;
}; };
...@@ -417,10 +427,19 @@ extern int iommu_attach_device(struct iommu_domain *domain, ...@@ -417,10 +427,19 @@ extern int iommu_attach_device(struct iommu_domain *domain,
struct device *dev); struct device *dev);
extern void iommu_detach_device(struct iommu_domain *domain, extern void iommu_detach_device(struct iommu_domain *domain,
struct device *dev); struct device *dev);
extern int iommu_cache_invalidate(struct iommu_domain *domain,
struct device *dev,
struct iommu_cache_invalidate_info *inv_info);
extern int iommu_sva_bind_gpasid(struct iommu_domain *domain,
struct device *dev, struct iommu_gpasid_bind_data *data);
extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
struct device *dev, ioasid_t pasid);
extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev); extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
extern struct iommu_domain *iommu_get_dma_domain(struct device *dev); extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
extern int iommu_map(struct iommu_domain *domain, unsigned long iova, extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot); phys_addr_t paddr, size_t size, int prot);
extern int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot);
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t size); size_t size);
extern size_t iommu_unmap_fast(struct iommu_domain *domain, extern size_t iommu_unmap_fast(struct iommu_domain *domain,
...@@ -428,6 +447,9 @@ extern size_t iommu_unmap_fast(struct iommu_domain *domain, ...@@ -428,6 +447,9 @@ extern size_t iommu_unmap_fast(struct iommu_domain *domain,
struct iommu_iotlb_gather *iotlb_gather); struct iommu_iotlb_gather *iotlb_gather);
extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg,unsigned int nents, int prot); struct scatterlist *sg,unsigned int nents, int prot);
extern size_t iommu_map_sg_atomic(struct iommu_domain *domain,
unsigned long iova, struct scatterlist *sg,
unsigned int nents, int prot);
extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
extern void iommu_set_fault_handler(struct iommu_domain *domain, extern void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler, void *token); iommu_fault_handler_t handler, void *token);
...@@ -662,6 +684,13 @@ static inline int iommu_map(struct iommu_domain *domain, unsigned long iova, ...@@ -662,6 +684,13 @@ static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
return -ENODEV; return -ENODEV;
} }
static inline int iommu_map_atomic(struct iommu_domain *domain,
unsigned long iova, phys_addr_t paddr,
size_t size, int prot)
{
return -ENODEV;
}
static inline size_t iommu_unmap(struct iommu_domain *domain, static inline size_t iommu_unmap(struct iommu_domain *domain,
unsigned long iova, size_t size) unsigned long iova, size_t size)
{ {
...@@ -682,6 +711,13 @@ static inline size_t iommu_map_sg(struct iommu_domain *domain, ...@@ -682,6 +711,13 @@ static inline size_t iommu_map_sg(struct iommu_domain *domain,
return 0; return 0;
} }
static inline size_t iommu_map_sg_atomic(struct iommu_domain *domain,
unsigned long iova, struct scatterlist *sg,
unsigned int nents, int prot)
{
return 0;
}
static inline void iommu_flush_tlb_all(struct iommu_domain *domain) static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
{ {
} }
...@@ -1005,6 +1041,25 @@ static inline int iommu_sva_get_pasid(struct iommu_sva *handle) ...@@ -1005,6 +1041,25 @@ static inline int iommu_sva_get_pasid(struct iommu_sva *handle)
return IOMMU_PASID_INVALID; return IOMMU_PASID_INVALID;
} }
static inline int
iommu_cache_invalidate(struct iommu_domain *domain,
struct device *dev,
struct iommu_cache_invalidate_info *inv_info)
{
return -ENODEV;
}
static inline int iommu_sva_bind_gpasid(struct iommu_domain *domain,
struct device *dev, struct iommu_gpasid_bind_data *data)
{
return -ENODEV;
}
static inline int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
struct device *dev, int pasid)
{
return -ENODEV;
}
#endif /* CONFIG_IOMMU_API */ #endif /* CONFIG_IOMMU_API */
#ifdef CONFIG_IOMMU_DEBUGFS #ifdef CONFIG_IOMMU_DEBUGFS
......
...@@ -58,6 +58,7 @@ extern int qcom_scm_set_remote_state(u32 state, u32 id); ...@@ -58,6 +58,7 @@ extern int qcom_scm_set_remote_state(u32 state, u32 id);
extern int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare); extern int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare);
extern int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size); extern int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size);
extern int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare); extern int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare);
extern int qcom_scm_qsmmu500_wait_safe_toggle(bool en);
extern int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val); extern int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val);
extern int qcom_scm_io_writel(phys_addr_t addr, unsigned int val); extern int qcom_scm_io_writel(phys_addr_t addr, unsigned int val);
#else #else
...@@ -97,6 +98,7 @@ qcom_scm_set_remote_state(u32 state,u32 id) { return -ENODEV; } ...@@ -97,6 +98,7 @@ qcom_scm_set_remote_state(u32 state,u32 id) { return -ENODEV; }
static inline int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) { return -ENODEV; } static inline int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) { return -ENODEV; }
static inline int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) { return -ENODEV; } static inline int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) { return -ENODEV; }
static inline int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) { return -ENODEV; } static inline int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) { return -ENODEV; }
static inline int qcom_scm_qsmmu500_wait_safe_toggle(bool en) { return -ENODEV; }
static inline int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val) { return -ENODEV; } static inline int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val) { return -ENODEV; }
static inline int qcom_scm_io_writel(phys_addr_t addr, unsigned int val) { return -ENODEV; } static inline int qcom_scm_io_writel(phys_addr_t addr, unsigned int val) { return -ENODEV; }
#endif #endif
......
...@@ -152,4 +152,173 @@ struct iommu_page_response { ...@@ -152,4 +152,173 @@ struct iommu_page_response {
__u32 code; __u32 code;
}; };
/* defines the granularity of the invalidation */
enum iommu_inv_granularity {
IOMMU_INV_GRANU_DOMAIN, /* domain-selective invalidation */
IOMMU_INV_GRANU_PASID, /* PASID-selective invalidation */
IOMMU_INV_GRANU_ADDR, /* page-selective invalidation */
IOMMU_INV_GRANU_NR, /* number of invalidation granularities */
};
/**
* struct iommu_inv_addr_info - Address Selective Invalidation Structure
*
* @flags: indicates the granularity of the address-selective invalidation
* - If the PASID bit is set, the @pasid field is populated and the invalidation
* relates to cache entries tagged with this PASID and matching the address
* range.
* - If ARCHID bit is set, @archid is populated and the invalidation relates
* to cache entries tagged with this architecture specific ID and matching
* the address range.
* - Both PASID and ARCHID can be set as they may tag different caches.
* - If neither PASID or ARCHID is set, global addr invalidation applies.
* - The LEAF flag indicates whether only the leaf PTE caching needs to be
* invalidated and other paging structure caches can be preserved.
* @pasid: process address space ID
* @archid: architecture-specific ID
* @addr: first stage/level input address
* @granule_size: page/block size of the mapping in bytes
* @nb_granules: number of contiguous granules to be invalidated
*/
struct iommu_inv_addr_info {
#define IOMMU_INV_ADDR_FLAGS_PASID (1 << 0)
#define IOMMU_INV_ADDR_FLAGS_ARCHID (1 << 1)
#define IOMMU_INV_ADDR_FLAGS_LEAF (1 << 2)
__u32 flags;
__u32 archid;
__u64 pasid;
__u64 addr;
__u64 granule_size;
__u64 nb_granules;
};
/**
* struct iommu_inv_pasid_info - PASID Selective Invalidation Structure
*
* @flags: indicates the granularity of the PASID-selective invalidation
* - If the PASID bit is set, the @pasid field is populated and the invalidation
* relates to cache entries tagged with this PASID and matching the address
* range.
* - If the ARCHID bit is set, the @archid is populated and the invalidation
* relates to cache entries tagged with this architecture specific ID and
* matching the address range.
* - Both PASID and ARCHID can be set as they may tag different caches.
* - At least one of PASID or ARCHID must be set.
* @pasid: process address space ID
* @archid: architecture-specific ID
*/
struct iommu_inv_pasid_info {
#define IOMMU_INV_PASID_FLAGS_PASID (1 << 0)
#define IOMMU_INV_PASID_FLAGS_ARCHID (1 << 1)
__u32 flags;
__u32 archid;
__u64 pasid;
};
/**
* struct iommu_cache_invalidate_info - First level/stage invalidation
* information
* @version: API version of this structure
* @cache: bitfield that allows to select which caches to invalidate
* @granularity: defines the lowest granularity used for the invalidation:
* domain > PASID > addr
* @padding: reserved for future use (should be zero)
* @pasid_info: invalidation data when @granularity is %IOMMU_INV_GRANU_PASID
* @addr_info: invalidation data when @granularity is %IOMMU_INV_GRANU_ADDR
*
* Not all the combinations of cache/granularity are valid:
*
* +--------------+---------------+---------------+---------------+
* | type / | DEV_IOTLB | IOTLB | PASID |
* | granularity | | | cache |
* +==============+===============+===============+===============+
* | DOMAIN | N/A | Y | Y |
* +--------------+---------------+---------------+---------------+
* | PASID | Y | Y | Y |
* +--------------+---------------+---------------+---------------+
* | ADDR | Y | Y | N/A |
* +--------------+---------------+---------------+---------------+
*
* Invalidations by %IOMMU_INV_GRANU_DOMAIN don't take any argument other than
* @version and @cache.
*
* If multiple cache types are invalidated simultaneously, they all
* must support the used granularity.
*/
struct iommu_cache_invalidate_info {
#define IOMMU_CACHE_INVALIDATE_INFO_VERSION_1 1
__u32 version;
/* IOMMU paging structure cache */
#define IOMMU_CACHE_INV_TYPE_IOTLB (1 << 0) /* IOMMU IOTLB */
#define IOMMU_CACHE_INV_TYPE_DEV_IOTLB (1 << 1) /* Device IOTLB */
#define IOMMU_CACHE_INV_TYPE_PASID (1 << 2) /* PASID cache */
#define IOMMU_CACHE_INV_TYPE_NR (3)
__u8 cache;
__u8 granularity;
__u8 padding[2];
union {
struct iommu_inv_pasid_info pasid_info;
struct iommu_inv_addr_info addr_info;
};
};
/**
* struct iommu_gpasid_bind_data_vtd - Intel VT-d specific data on device and guest
* SVA binding.
*
* @flags: VT-d PASID table entry attributes
* @pat: Page attribute table data to compute effective memory type
* @emt: Extended memory type
*
* Only guest vIOMMU selectable and effective options are passed down to
* the host IOMMU.
*/
struct iommu_gpasid_bind_data_vtd {
#define IOMMU_SVA_VTD_GPASID_SRE (1 << 0) /* supervisor request */
#define IOMMU_SVA_VTD_GPASID_EAFE (1 << 1) /* extended access enable */
#define IOMMU_SVA_VTD_GPASID_PCD (1 << 2) /* page-level cache disable */
#define IOMMU_SVA_VTD_GPASID_PWT (1 << 3) /* page-level write through */
#define IOMMU_SVA_VTD_GPASID_EMTE (1 << 4) /* extended mem type enable */
#define IOMMU_SVA_VTD_GPASID_CD (1 << 5) /* PASID-level cache disable */
__u64 flags;
__u32 pat;
__u32 emt;
};
/**
* struct iommu_gpasid_bind_data - Information about device and guest PASID binding
* @version: Version of this data structure
* @format: PASID table entry format
* @flags: Additional information on guest bind request
* @gpgd: Guest page directory base of the guest mm to bind
* @hpasid: Process address space ID used for the guest mm in host IOMMU
* @gpasid: Process address space ID used for the guest mm in guest IOMMU
* @addr_width: Guest virtual address width
* @padding: Reserved for future use (should be zero)
* @vtd: Intel VT-d specific data
*
* Guest to host PASID mapping can be an identity or non-identity, where guest
* has its own PASID space. For non-identify mapping, guest to host PASID lookup
* is needed when VM programs guest PASID into an assigned device. VMM may
* trap such PASID programming then request host IOMMU driver to convert guest
* PASID to host PASID based on this bind data.
*/
struct iommu_gpasid_bind_data {
#define IOMMU_GPASID_BIND_VERSION_1 1
__u32 version;
#define IOMMU_PASID_FORMAT_INTEL_VTD 1
__u32 format;
#define IOMMU_SVA_GPASID_VAL (1 << 0) /* guest PASID valid */
__u64 flags;
__u64 gpgd;
__u64 hpasid;
__u64 gpasid;
__u32 addr_width;
__u8 padding[12];
/* Vendor specific data */
union {
struct iommu_gpasid_bind_data_vtd vtd;
};
};
#endif /* _UAPI_IOMMU_H */ #endif /* _UAPI_IOMMU_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment