Commit 2cd83ba5 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-v4.15-rc1' of git://github.com/awilliam/linux-vfio

Pull IOMMU updates from Alex Williamson:
 "As Joerg mentioned[1], he's out on paternity leave through the end of
  the year and I'm filling in for him in the interim:

   - Enforce MSI multiple IRQ alignment in AMD IOMMU

   - VT-d PASID error handling fixes

   - Add r8a7795 IPMMU support

   - Manage runtime PM links on exynos at {add,remove}_device callbacks

   - Fix Mediatek driver name to avoid conflict

   - Add terminate support to qcom fault handler

   - 64-bit IOVA optimizations

   - Simplfy IOVA domain destruction, better use of rcache, and skip
     anchor nodes on copy

   - Convert to IOMMU TLB sync API in io-pgtable-arm{-v7s}

   - Drop command queue lock when waiting for CMD_SYNC completion on ARM
     SMMU implementations supporting MSI to cacheable memory

   - iomu-vmsa cleanup inspired by missed IOTLB sync callbacks

   - Fix sleeping lock with preemption disabled for RT

   - Dual MMU support for TI DRA7xx DSPs

   - Optional flush option on IOVA allocation avoiding overhead when
     caller can try other options

  [1] https://lkml.org/lkml/2017/10/22/72"

* tag 'iommu-v4.15-rc1' of git://github.com/awilliam/linux-vfio: (54 commits)
  iommu/iova: Use raw_cpu_ptr() instead of get_cpu_ptr() for ->fq
  iommu/mediatek: Fix driver name
  iommu/ipmmu-vmsa: Hook up r8a7795 DT matching code
  iommu/ipmmu-vmsa: Allow two bit SL0
  iommu/ipmmu-vmsa: Make IMBUSCTR setup optional
  iommu/ipmmu-vmsa: Write IMCTR twice
  iommu/ipmmu-vmsa: IPMMU device is 40-bit bus master
  iommu/ipmmu-vmsa: Make use of IOMMU_OF_DECLARE()
  iommu/ipmmu-vmsa: Enable multi context support
  iommu/ipmmu-vmsa: Add optional root device feature
  iommu/ipmmu-vmsa: Introduce features, break out alias
  iommu/ipmmu-vmsa: Unify ipmmu_ops
  iommu/ipmmu-vmsa: Clean up struct ipmmu_vmsa_iommu_priv
  iommu/ipmmu-vmsa: Simplify group allocation
  iommu/ipmmu-vmsa: Unify domain alloc/free
  iommu/ipmmu-vmsa: Fix return value check in ipmmu_find_group_dma()
  iommu/vt-d: Clear pasid table entry when memory unbound
  iommu/vt-d: Clear Page Request Overflow fault bit
  iommu/vt-d: Missing checks for pasid tables if allocation fails
  iommu/amd: Limit the IOVA page range to the specified addresses
  ...
parents 670ffccb 56f19441
......@@ -155,8 +155,7 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
order = __ffs(tegra->domain->pgsize_bitmap);
init_iova_domain(&tegra->carveout.domain, 1UL << order,
carveout_start >> order,
carveout_end >> order);
carveout_start >> order);
tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
......
......@@ -198,8 +198,7 @@ static int host1x_probe(struct platform_device *pdev)
order = __ffs(host->domain->pgsize_bitmap);
init_iova_domain(&host->iova, 1UL << order,
geometry->aperture_start >> order,
geometry->aperture_end >> order);
geometry->aperture_start >> order);
host->iova_end = geometry->aperture_end;
}
......
......@@ -63,7 +63,6 @@
/* IO virtual address start page frame number */
#define IOVA_START_PFN (1)
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
/* Reserved IOVA ranges */
#define MSI_RANGE_START (0xfee00000)
......@@ -1547,10 +1546,11 @@ static unsigned long dma_ops_alloc_iova(struct device *dev,
if (dma_mask > DMA_BIT_MASK(32))
pfn = alloc_iova_fast(&dma_dom->iovad, pages,
IOVA_PFN(DMA_BIT_MASK(32)));
IOVA_PFN(DMA_BIT_MASK(32)), false);
if (!pfn)
pfn = alloc_iova_fast(&dma_dom->iovad, pages, IOVA_PFN(dma_mask));
pfn = alloc_iova_fast(&dma_dom->iovad, pages,
IOVA_PFN(dma_mask), true);
return (pfn << PAGE_SHIFT);
}
......@@ -1788,8 +1788,7 @@ static struct dma_ops_domain *dma_ops_domain_alloc(void)
if (!dma_dom->domain.pt_root)
goto free_dma_dom;
init_iova_domain(&dma_dom->iovad, PAGE_SIZE,
IOVA_START_PFN, DMA_32BIT_PFN);
init_iova_domain(&dma_dom->iovad, PAGE_SIZE, IOVA_START_PFN);
if (init_iova_flush_queue(&dma_dom->iovad, iova_domain_flush_tlb, NULL))
goto free_dma_dom;
......@@ -2383,11 +2382,9 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
size_t size,
int dir)
{
dma_addr_t flush_addr;
dma_addr_t i, start;
unsigned int pages;
flush_addr = dma_addr;
pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
dma_addr &= PAGE_MASK;
start = dma_addr;
......@@ -2696,8 +2693,7 @@ static int init_reserved_iova_ranges(void)
struct pci_dev *pdev = NULL;
struct iova *val;
init_iova_domain(&reserved_iova_ranges, PAGE_SIZE,
IOVA_START_PFN, DMA_32BIT_PFN);
init_iova_domain(&reserved_iova_ranges, PAGE_SIZE, IOVA_START_PFN);
lockdep_set_class(&reserved_iova_ranges.iova_rbtree_lock,
&reserved_rbtree_key);
......@@ -3155,7 +3151,7 @@ static void amd_iommu_apply_resv_region(struct device *dev,
unsigned long start, end;
start = IOVA_PFN(region->start);
end = IOVA_PFN(region->start + region->length);
end = IOVA_PFN(region->start + region->length - 1);
WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL);
}
......@@ -3663,11 +3659,11 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
return table;
}
static int alloc_irq_index(u16 devid, int count)
static int alloc_irq_index(u16 devid, int count, bool align)
{
struct irq_remap_table *table;
int index, c, alignment = 1;
unsigned long flags;
int index, c;
struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
if (!iommu)
......@@ -3677,16 +3673,21 @@ static int alloc_irq_index(u16 devid, int count)
if (!table)
return -ENODEV;
if (align)
alignment = roundup_pow_of_two(count);
spin_lock_irqsave(&table->lock, flags);
/* Scan table for free entries */
for (c = 0, index = table->min_index;
index < MAX_IRQS_PER_TABLE;
++index) {
if (!iommu->irte_ops->is_allocated(table, index))
for (index = ALIGN(table->min_index, alignment), c = 0;
index < MAX_IRQS_PER_TABLE;) {
if (!iommu->irte_ops->is_allocated(table, index)) {
c += 1;
else
} else {
c = 0;
index = ALIGN(index + 1, alignment);
continue;
}
if (c == count) {
for (; c != 0; --c)
......@@ -3695,6 +3696,8 @@ static int alloc_irq_index(u16 devid, int count)
index -= count - 1;
goto out;
}
index++;
}
index = -ENOSPC;
......@@ -4099,7 +4102,9 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
else
ret = -ENOMEM;
} else {
index = alloc_irq_index(devid, nr_irqs);
bool align = (info->type == X86_IRQ_ALLOC_TYPE_MSI);
index = alloc_irq_index(devid, nr_irqs, align);
}
if (index < 0) {
pr_warn("Failed to allocate IRTE\n");
......
This diff is collapsed.
......@@ -59,6 +59,7 @@
#define ARM_MMU500_ACTLR_CPRE (1 << 1)
#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
#define ARM_MMU500_ACR_S2CRB_TLBEN (1 << 10)
#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
......@@ -119,14 +120,6 @@ enum arm_smmu_implementation {
CAVIUM_SMMUV2,
};
/* Until ACPICA headers cover IORT rev. C */
#ifndef ACPI_IORT_SMMU_CORELINK_MMU401
#define ACPI_IORT_SMMU_CORELINK_MMU401 0x4
#endif
#ifndef ACPI_IORT_SMMU_CAVIUM_THUNDERX
#define ACPI_IORT_SMMU_CAVIUM_THUNDERX 0x5
#endif
struct arm_smmu_s2cr {
struct iommu_group *group;
int count;
......@@ -250,6 +243,7 @@ enum arm_smmu_domain_stage {
struct arm_smmu_domain {
struct arm_smmu_device *smmu;
struct io_pgtable_ops *pgtbl_ops;
const struct iommu_gather_ops *tlb_ops;
struct arm_smmu_cfg cfg;
enum arm_smmu_domain_stage stage;
struct mutex init_mutex; /* Protects smmu pointer */
......@@ -735,7 +729,6 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
enum io_pgtable_fmt fmt;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
const struct iommu_gather_ops *tlb_ops;
mutex_lock(&smmu_domain->init_mutex);
if (smmu_domain->smmu)
......@@ -813,7 +806,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
ias = min(ias, 32UL);
oas = min(oas, 32UL);
}
tlb_ops = &arm_smmu_s1_tlb_ops;
smmu_domain->tlb_ops = &arm_smmu_s1_tlb_ops;
break;
case ARM_SMMU_DOMAIN_NESTED:
/*
......@@ -833,9 +826,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
oas = min(oas, 40UL);
}
if (smmu->version == ARM_SMMU_V2)
tlb_ops = &arm_smmu_s2_tlb_ops_v2;
smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v2;
else
tlb_ops = &arm_smmu_s2_tlb_ops_v1;
smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v1;
break;
default:
ret = -EINVAL;
......@@ -863,7 +856,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
.pgsize_bitmap = smmu->pgsize_bitmap,
.ias = ias,
.oas = oas,
.tlb = tlb_ops,
.tlb = smmu_domain->tlb_ops,
.iommu_dev = smmu->dev,
};
......@@ -1259,6 +1252,14 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
return ops->unmap(ops, iova, size);
}
static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
if (smmu_domain->tlb_ops)
smmu_domain->tlb_ops->tlb_sync(smmu_domain);
}
static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
dma_addr_t iova)
{
......@@ -1562,6 +1563,8 @@ static struct iommu_ops arm_smmu_ops = {
.map = arm_smmu_map,
.unmap = arm_smmu_unmap,
.map_sg = default_iommu_map_sg,
.flush_iotlb_all = arm_smmu_iotlb_sync,
.iotlb_sync = arm_smmu_iotlb_sync,
.iova_to_phys = arm_smmu_iova_to_phys,
.add_device = arm_smmu_add_device,
.remove_device = arm_smmu_remove_device,
......@@ -1606,7 +1609,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
* Allow unmatched Stream IDs to allocate bypass
* TLB entries for reduced latency.
*/
reg |= ARM_MMU500_ACR_SMTNMB_TLBEN;
reg |= ARM_MMU500_ACR_SMTNMB_TLBEN | ARM_MMU500_ACR_S2CRB_TLBEN;
writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
}
......
......@@ -292,18 +292,7 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
/* ...then finally give it a kicking to make sure it fits */
base_pfn = max_t(unsigned long, base_pfn,
domain->geometry.aperture_start >> order);
end_pfn = min_t(unsigned long, end_pfn,
domain->geometry.aperture_end >> order);
}
/*
* PCI devices may have larger DMA masks, but still prefer allocating
* within a 32-bit mask to avoid DAC addressing. Such limitations don't
* apply to the typical platform device, so for those we may as well
* leave the cache limit at the top of their range to save an rb_last()
* traversal on every allocation.
*/
if (dev && dev_is_pci(dev))
end_pfn &= DMA_BIT_MASK(32) >> order;
/* start_pfn is always nonzero for an already-initialised domain */
if (iovad->start_pfn) {
......@@ -312,16 +301,11 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
pr_warn("Incompatible range for DMA domain\n");
return -EFAULT;
}
/*
* If we have devices with different DMA masks, move the free
* area cache limit down for the benefit of the smaller one.
*/
iovad->dma_32bit_pfn = min(end_pfn + 1, iovad->dma_32bit_pfn);
return 0;
}
init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
init_iova_domain(iovad, 1UL << order, base_pfn);
if (!dev)
return 0;
......@@ -386,10 +370,12 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
/* Try to get PCI devices a SAC address */
if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
iova = alloc_iova_fast(iovad, iova_len, DMA_BIT_MASK(32) >> shift);
iova = alloc_iova_fast(iovad, iova_len,
DMA_BIT_MASK(32) >> shift, false);
if (!iova)
iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift);
iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
true);
return (dma_addr_t)iova << shift;
}
......
......@@ -801,13 +801,16 @@ int __init dmar_dev_scope_init(void)
dmar_free_pci_notify_info(info);
}
}
bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
}
return dmar_dev_scope_status;
}
void dmar_register_bus_notifier(void)
{
bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
}
int __init dmar_table_init(void)
{
......@@ -1676,7 +1679,8 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
raw_spin_lock_irqsave(&iommu->register_lock, flag);
}
writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
writel(DMA_FSTS_PFO | DMA_FSTS_PPF | DMA_FSTS_PRO,
iommu->reg + DMAR_FSTS_REG);
unlock_exit:
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
......
......@@ -263,6 +263,7 @@ struct exynos_iommu_domain {
struct sysmmu_drvdata {
struct device *sysmmu; /* SYSMMU controller device */
struct device *master; /* master device (owner) */
struct device_link *link; /* runtime PM link to master */
void __iomem *sfrbase; /* our registers */
struct clk *clk; /* SYSMMU's clock */
struct clk *aclk; /* SYSMMU's aclk clock */
......@@ -1250,6 +1251,8 @@ static struct iommu_group *get_device_iommu_group(struct device *dev)
static int exynos_iommu_add_device(struct device *dev)
{
struct exynos_iommu_owner *owner = dev->archdata.iommu;
struct sysmmu_drvdata *data;
struct iommu_group *group;
if (!has_sysmmu(dev))
......@@ -1260,6 +1263,15 @@ static int exynos_iommu_add_device(struct device *dev)
if (IS_ERR(group))
return PTR_ERR(group);
list_for_each_entry(data, &owner->controllers, owner_node) {
/*
* SYSMMU will be runtime activated via device link
* (dependency) to its master device, so there are no
* direct calls to pm_runtime_get/put in this driver.
*/
data->link = device_link_add(dev, data->sysmmu,
DL_FLAG_PM_RUNTIME);
}
iommu_group_put(group);
return 0;
......@@ -1268,6 +1280,7 @@ static int exynos_iommu_add_device(struct device *dev)
static void exynos_iommu_remove_device(struct device *dev)
{
struct exynos_iommu_owner *owner = dev->archdata.iommu;
struct sysmmu_drvdata *data;
if (!has_sysmmu(dev))
return;
......@@ -1283,6 +1296,9 @@ static void exynos_iommu_remove_device(struct device *dev)
}
}
iommu_group_remove_device(dev);
list_for_each_entry(data, &owner->controllers, owner_node)
device_link_del(data->link);
}
static int exynos_iommu_of_xlate(struct device *dev,
......@@ -1316,13 +1332,6 @@ static int exynos_iommu_of_xlate(struct device *dev,
list_add_tail(&data->owner_node, &owner->controllers);
data->master = dev;
/*
* SYSMMU will be runtime activated via device link (dependency) to its
* master device, so there are no direct calls to pm_runtime_get/put
* in this driver.
*/
device_link_add(dev, data->sysmmu, DL_FLAG_PM_RUNTIME);
return 0;
}
......
......@@ -82,8 +82,6 @@
#define IOVA_START_PFN (1)
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
/* page table handling */
#define LEVEL_STRIDE (9)
......@@ -1878,8 +1876,7 @@ static int dmar_init_reserved_ranges(void)
struct iova *iova;
int i;
init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN,
DMA_32BIT_PFN);
init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN);
lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
&reserved_rbtree_key);
......@@ -1938,8 +1935,7 @@ static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
unsigned long sagaw;
int err;
init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
DMA_32BIT_PFN);
init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
err = init_iova_flush_queue(&domain->iovad,
iommu_flush_iova, iova_entry_free);
......@@ -2058,7 +2054,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
if (context_copied(context)) {
u16 did_old = context_domain_id(context);
if (did_old >= 0 && did_old < cap_ndoms(iommu->cap)) {
if (did_old < cap_ndoms(iommu->cap)) {
iommu->flush.flush_context(iommu, did_old,
(((u16)bus) << 8) | devfn,
DMA_CCMD_MASK_NOBIT,
......@@ -3473,11 +3469,12 @@ static unsigned long intel_alloc_iova(struct device *dev,
* from higher range
*/
iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
IOVA_PFN(DMA_BIT_MASK(32)));
IOVA_PFN(DMA_BIT_MASK(32)), false);
if (iova_pfn)
return iova_pfn;
}
iova_pfn = alloc_iova_fast(&domain->iovad, nrpages, IOVA_PFN(dma_mask));
iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
IOVA_PFN(dma_mask), true);
if (unlikely(!iova_pfn)) {
pr_err("Allocating %ld-page iova for %s failed",
nrpages, dev_name(dev));
......@@ -4752,6 +4749,16 @@ int __init intel_iommu_init(void)
goto out_free_dmar;
}
up_write(&dmar_global_lock);
/*
* The bus notifier takes the dmar_global_lock, so lockdep will
* complain later when we register it under the lock.
*/
dmar_register_bus_notifier();
down_write(&dmar_global_lock);
if (no_iommu || dmar_disabled) {
/*
* We exit the function here to ensure IOMMU's remapping and
......@@ -4897,8 +4904,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
{
int adjust_width;
init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
DMA_32BIT_PFN);
init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
domain_reserve_special_ranges(domain);
/* calculate AGAW */
......
......@@ -292,7 +292,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
int pasid_max;
int ret;
if (WARN_ON(!iommu))
if (WARN_ON(!iommu || !iommu->pasid_table))
return -EINVAL;
if (dev_is_pci(dev)) {
......@@ -458,6 +458,8 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
kfree_rcu(sdev, rcu);
if (list_empty(&svm->devs)) {
svm->iommu->pasid_table[svm->pasid].val = 0;
wmb();
idr_remove(&svm->iommu->pasid_idr, svm->pasid);
if (svm->mm)
......
......@@ -660,16 +660,11 @@ static int arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova,
size_t size)
{
struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
size_t unmapped;
if (WARN_ON(upper_32_bits(iova)))
return 0;
unmapped = __arm_v7s_unmap(data, iova, size, 1, data->pgd);
if (unmapped)
io_pgtable_tlb_sync(&data->iop);
return unmapped;
return __arm_v7s_unmap(data, iova, size, 1, data->pgd);
}
static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops,
......
......@@ -609,7 +609,6 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
size_t size)
{
size_t unmapped;
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
arm_lpae_iopte *ptep = data->pgd;
int lvl = ARM_LPAE_START_LVL(data);
......@@ -617,11 +616,7 @@ static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias)))
return 0;
unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep);
if (unmapped)
io_pgtable_tlb_sync(&data->iop);
return unmapped;
return __arm_lpae_unmap(data, iova, size, lvl, ptep);
}
static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
......
This diff is collapsed.
This diff is collapsed.
......@@ -392,6 +392,11 @@ static size_t mtk_iommu_unmap(struct iommu_domain *domain,
return unmapsz;
}
static void mtk_iommu_iotlb_sync(struct iommu_domain *domain)
{
mtk_iommu_tlb_sync(mtk_iommu_get_m4u_data());
}
static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
......@@ -491,6 +496,8 @@ static struct iommu_ops mtk_iommu_ops = {
.map = mtk_iommu_map,
.unmap = mtk_iommu_unmap,
.map_sg = default_iommu_map_sg,
.flush_iotlb_all = mtk_iommu_iotlb_sync,
.iotlb_sync = mtk_iommu_iotlb_sync,
.iova_to_phys = mtk_iommu_iova_to_phys,
.add_device = mtk_iommu_add_device,
.remove_device = mtk_iommu_remove_device,
......
......@@ -708,7 +708,7 @@ static struct platform_driver mtk_iommu_driver = {
.probe = mtk_iommu_probe,
.remove = mtk_iommu_remove,
.driver = {
.name = "mtk-iommu",
.name = "mtk-iommu-v1",
.of_match_table = mtk_iommu_of_ids,
.pm = &mtk_iommu_pm_ops,
}
......
This diff is collapsed.
......@@ -28,18 +28,27 @@ struct iotlb_entry {
u32 endian, elsz, mixed;
};
/**
* struct omap_iommu_device - omap iommu device data
* @pgtable: page table used by an omap iommu attached to a domain
* @iommu_dev: pointer to store an omap iommu instance attached to a domain
*/
struct omap_iommu_device {
u32 *pgtable;
struct omap_iommu *iommu_dev;
};
/**
* struct omap_iommu_domain - omap iommu domain
* @pgtable: the page table
* @iommu_dev: an omap iommu device attached to this domain. only a single
* iommu device can be attached for now.
* @num_iommus: number of iommus in this domain
* @iommus: omap iommu device data for all iommus in this domain
* @dev: Device using this domain.
* @lock: domain lock, should be taken when attaching/detaching
* @domain: generic domain handle used by iommu core code
*/
struct omap_iommu_domain {
u32 *pgtable;
struct omap_iommu *iommu_dev;
u32 num_iommus;
struct omap_iommu_device *iommus;
struct device *dev;
spinlock_t lock;
struct iommu_domain domain;
......@@ -97,17 +106,6 @@ struct iotlb_lock {
short vict;
};
/**
* dev_to_omap_iommu() - retrieves an omap iommu object from a user device
* @dev: iommu client device
*/
static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
{
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
return arch_data->iommu_dev;
}
/*
* MMU Register offsets
*/
......
......@@ -66,6 +66,7 @@ struct qcom_iommu_ctx {
void __iomem *base;
bool secure_init;
u8 asid; /* asid and ctx bank # are 1:1 */
struct iommu_domain *domain;
};
struct qcom_iommu_domain {
......@@ -194,12 +195,15 @@ static irqreturn_t qcom_iommu_fault(int irq, void *dev)
fsynr = iommu_readl(ctx, ARM_SMMU_CB_FSYNR0);
iova = iommu_readq(ctx, ARM_SMMU_CB_FAR);
if (!report_iommu_fault(ctx->domain, ctx->dev, iova, 0)) {
dev_err_ratelimited(ctx->dev,
"Unhandled context fault: fsr=0x%x, "
"iova=0x%016llx, fsynr=0x%x, cb=%d\n",
fsr, iova, fsynr, ctx->asid);
}
iommu_writel(ctx, ARM_SMMU_CB_FSR, fsr);
iommu_writel(ctx, ARM_SMMU_CB_RESUME, RESUME_TERMINATE);
return IRQ_HANDLED;
}
......@@ -274,12 +278,14 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
/* SCTLR */
reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE |
SCTLR_M | SCTLR_S1_ASIDPNE;
SCTLR_M | SCTLR_S1_ASIDPNE | SCTLR_CFCFG;
if (IS_ENABLED(CONFIG_BIG_ENDIAN))
reg |= SCTLR_E;
iommu_writel(ctx, ARM_SMMU_CB_SCTLR, reg);
ctx->domain = domain;
}
mutex_unlock(&qcom_domain->init_mutex);
......@@ -395,6 +401,8 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
/* Disable the context bank: */
iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0);
ctx->domain = NULL;
}
pm_runtime_put_sync(qcom_iommu->dev);
......@@ -443,6 +451,19 @@ static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
return ret;
}
static void qcom_iommu_iotlb_sync(struct iommu_domain *domain)
{
struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
struct io_pgtable *pgtable = container_of(qcom_domain->pgtbl_ops,
struct io_pgtable, ops);
if (!qcom_domain->pgtbl_ops)
return;
pm_runtime_get_sync(qcom_domain->iommu->dev);
qcom_iommu_tlb_sync(pgtable->cookie);
pm_runtime_put_sync(qcom_domain->iommu->dev);
}
static phys_addr_t qcom_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
......@@ -570,6 +591,8 @@ static const struct iommu_ops qcom_iommu_ops = {
.map = qcom_iommu_map,
.unmap = qcom_iommu_unmap,
.map_sg = default_iommu_map_sg,
.flush_iotlb_all = qcom_iommu_iotlb_sync,
.iotlb_sync = qcom_iommu_iotlb_sync,
.iova_to_phys = qcom_iommu_iova_to_phys,
.add_device = qcom_iommu_add_device,
.remove_device = qcom_iommu_remove_device,
......
......@@ -39,8 +39,7 @@ void scif_rma_ep_init(struct scif_endpt *ep)
struct scif_endpt_rma_info *rma = &ep->rma_info;
mutex_init(&rma->rma_lock);
init_iova_domain(&rma->iovad, PAGE_SIZE, SCIF_IOVA_START_PFN,
SCIF_DMA_64BIT_PFN);
init_iova_domain(&rma->iovad, PAGE_SIZE, SCIF_IOVA_START_PFN);
spin_lock_init(&rma->tc_lock);
mutex_init(&rma->mmn_lock);
INIT_LIST_HEAD(&rma->reg_list);
......
......@@ -112,6 +112,7 @@ static inline bool dmar_rcu_check(void)
extern int dmar_table_init(void);
extern int dmar_dev_scope_init(void);
extern void dmar_register_bus_notifier(void);
extern int dmar_parse_dev_scope(void *start, void *end, int *cnt,
struct dmar_dev_scope **devices, u16 segment);
extern void *dmar_alloc_dev_scope(void *start, void *end, int *cnt);
......
......@@ -212,6 +212,7 @@
#define DMA_FSTS_IQE (1 << 4)
#define DMA_FSTS_ICE (1 << 5)
#define DMA_FSTS_ITE (1 << 6)
#define DMA_FSTS_PRO (1 << 7)
#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
/* FRCD_REG, 32 bits access */
......
......@@ -70,10 +70,12 @@ struct iova_fq {
struct iova_domain {
spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
struct rb_root rbroot; /* iova domain rbtree root */
struct rb_node *cached32_node; /* Save last alloced node */
struct rb_node *cached_node; /* Save last alloced node */
struct rb_node *cached32_node; /* Save last 32-bit alloced node */
unsigned long granule; /* pfn granularity for this domain */
unsigned long start_pfn; /* Lower limit for this domain */
unsigned long dma_32bit_pfn;
struct iova anchor; /* rbtree lookup anchor */
struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
iova_flush_cb flush_cb; /* Call-Back function to flush IOMMU
......@@ -148,12 +150,12 @@ void queue_iova(struct iova_domain *iovad,
unsigned long pfn, unsigned long pages,
unsigned long data);
unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
unsigned long limit_pfn);
unsigned long limit_pfn, bool flush_rcache);
struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
unsigned long pfn_hi);
void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
unsigned long start_pfn, unsigned long pfn_32bit);
unsigned long start_pfn);
int init_iova_flush_queue(struct iova_domain *iovad,
iova_flush_cb flush_cb, iova_entry_dtor entry_dtor);
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
......@@ -210,7 +212,8 @@ static inline void queue_iova(struct iova_domain *iovad,
static inline unsigned long alloc_iova_fast(struct iova_domain *iovad,
unsigned long size,
unsigned long limit_pfn)
unsigned long limit_pfn,
bool flush_rcache)
{
return 0;
}
......@@ -229,8 +232,7 @@ static inline void copy_reserved_iova(struct iova_domain *from,
static inline void init_iova_domain(struct iova_domain *iovad,
unsigned long granule,
unsigned long start_pfn,
unsigned long pfn_32bit)
unsigned long start_pfn)
{
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment