Commit 91838e2d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-updates-v3.13' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull IOMMU updates from Joerg Roedel:
 "This time the updates contain:

   - Tracepoints for certain IOMMU-API functions to make their use
     easier to debug
   - A tracepoint for IOMMU page faults to make it easier to get them in
     user space
   - Updates and fixes for the new ARM SMMU driver after the first
     hardware showed up
   - Various other fixes and cleanups in other IOMMU drivers"

* tag 'iommu-updates-v3.13' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (26 commits)
  iommu/shmobile: Enable the driver on all ARM platforms
  iommu/tegra-smmu: Staticize tegra_smmu_pm_ops
  iommu/tegra-gart: Staticize tegra_gart_pm_ops
  iommu/vt-d: Use list_for_each_entry_safe() for dmar_domain->devices traversal
  iommu/vt-d: Use for_each_drhd_unit() instead of list_for_each_entry()
  iommu/vt-d: Fixed interaction of VFIO_IOMMU_MAP_DMA with IOMMU address limits
  iommu/arm-smmu: Clear global and context bank fault status registers
  iommu/arm-smmu: Print context fault information
  iommu/arm-smmu: Check for num_context_irqs > 0 to avoid divide by zero exception
  iommu/arm-smmu: Refine check for proper size of mapped region
  iommu/arm-smmu: Switch to subsys_initcall for driver registration
  iommu/arm-smmu: use relaxed accessors where possible
  iommu/arm-smmu: replace devm_request_and_ioremap by devm_ioremap_resource
  iommu: Remove stack trace from broken irq remapping warning
  iommu: Change iommu driver to call io_page_fault trace event
  iommu: Add iommu_error class event to iommu trace
  iommu/tegra: gart: cleanup devm_* functions usage
  iommu/tegra: Print phys_addr_t using %pa
  iommu: No need to pass '0x' when '%pa' is used
  iommu: Change iommu driver to call unmap trace event
  ...
parents f0804804 bb51eeee
...@@ -206,7 +206,7 @@ config SHMOBILE_IPMMU_TLB ...@@ -206,7 +206,7 @@ config SHMOBILE_IPMMU_TLB
config SHMOBILE_IOMMU config SHMOBILE_IOMMU
bool "IOMMU for Renesas IPMMU/IPMMUI" bool "IOMMU for Renesas IPMMU/IPMMUI"
default n default n
depends on (ARM && ARCH_SHMOBILE) depends on ARM || COMPILE_TEST
select IOMMU_API select IOMMU_API
select ARM_DMA_USE_IOMMU select ARM_DMA_USE_IOMMU
select SHMOBILE_IPMMU select SHMOBILE_IPMMU
......
obj-$(CONFIG_IOMMU_API) += iommu.o obj-$(CONFIG_IOMMU_API) += iommu.o
obj-$(CONFIG_IOMMU_API) += iommu-traces.o
obj-$(CONFIG_OF_IOMMU) += of_iommu.o obj-$(CONFIG_OF_IOMMU) += of_iommu.o
obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
......
...@@ -590,6 +590,9 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev) ...@@ -590,6 +590,9 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
ret = IRQ_HANDLED; ret = IRQ_HANDLED;
resume = RESUME_RETRY; resume = RESUME_RETRY;
} else { } else {
dev_err_ratelimited(smmu->dev,
"Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n",
iova, fsynr, root_cfg->cbndx);
ret = IRQ_NONE; ret = IRQ_NONE;
resume = RESUME_TERMINATE; resume = RESUME_TERMINATE;
} }
...@@ -778,7 +781,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) ...@@ -778,7 +781,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
#ifdef __BIG_ENDIAN #ifdef __BIG_ENDIAN
reg |= SCTLR_E; reg |= SCTLR_E;
#endif #endif
writel(reg, cb_base + ARM_SMMU_CB_SCTLR); writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
} }
static int arm_smmu_init_domain_context(struct iommu_domain *domain, static int arm_smmu_init_domain_context(struct iommu_domain *domain,
...@@ -1562,9 +1565,13 @@ static struct iommu_ops arm_smmu_ops = { ...@@ -1562,9 +1565,13 @@ static struct iommu_ops arm_smmu_ops = {
static void arm_smmu_device_reset(struct arm_smmu_device *smmu) static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
{ {
void __iomem *gr0_base = ARM_SMMU_GR0(smmu); void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
void __iomem *sctlr_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB_SCTLR; void __iomem *cb_base;
int i = 0; int i = 0;
u32 scr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sCR0); u32 reg;
/* Clear Global FSR */
reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
writel(reg, gr0_base + ARM_SMMU_GR0_sGFSR);
/* Mark all SMRn as invalid and all S2CRn as bypass */ /* Mark all SMRn as invalid and all S2CRn as bypass */
for (i = 0; i < smmu->num_mapping_groups; ++i) { for (i = 0; i < smmu->num_mapping_groups; ++i) {
...@@ -1572,33 +1579,38 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) ...@@ -1572,33 +1579,38 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
writel_relaxed(S2CR_TYPE_BYPASS, gr0_base + ARM_SMMU_GR0_S2CR(i)); writel_relaxed(S2CR_TYPE_BYPASS, gr0_base + ARM_SMMU_GR0_S2CR(i));
} }
/* Make sure all context banks are disabled */ /* Make sure all context banks are disabled and clear CB_FSR */
for (i = 0; i < smmu->num_context_banks; ++i) for (i = 0; i < smmu->num_context_banks; ++i) {
writel_relaxed(0, sctlr_base + ARM_SMMU_CB(smmu, i)); cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
}
/* Invalidate the TLB, just in case */ /* Invalidate the TLB, just in case */
writel_relaxed(0, gr0_base + ARM_SMMU_GR0_STLBIALL); writel_relaxed(0, gr0_base + ARM_SMMU_GR0_STLBIALL);
writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH); writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH); writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sCR0);
/* Enable fault reporting */ /* Enable fault reporting */
scr0 |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE); reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
/* Disable TLB broadcasting. */ /* Disable TLB broadcasting. */
scr0 |= (sCR0_VMIDPNE | sCR0_PTM); reg |= (sCR0_VMIDPNE | sCR0_PTM);
/* Enable client access, but bypass when no mapping is found */ /* Enable client access, but bypass when no mapping is found */
scr0 &= ~(sCR0_CLIENTPD | sCR0_USFCFG); reg &= ~(sCR0_CLIENTPD | sCR0_USFCFG);
/* Disable forced broadcasting */ /* Disable forced broadcasting */
scr0 &= ~sCR0_FB; reg &= ~sCR0_FB;
/* Don't upgrade barriers */ /* Don't upgrade barriers */
scr0 &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT); reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
/* Push the button */ /* Push the button */
arm_smmu_tlb_sync(smmu); arm_smmu_tlb_sync(smmu);
writel(scr0, gr0_base + ARM_SMMU_GR0_sCR0); writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sCR0);
} }
static int arm_smmu_id_size_to_bits(int size) static int arm_smmu_id_size_to_bits(int size)
...@@ -1703,13 +1715,12 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) ...@@ -1703,13 +1715,12 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1); id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
smmu->pagesize = (id & ID1_PAGESIZE) ? SZ_64K : SZ_4K; smmu->pagesize = (id & ID1_PAGESIZE) ? SZ_64K : SZ_4K;
/* Check that we ioremapped enough */ /* Check for size mismatch of SMMU address space from mapped region */
size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1); size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
size *= (smmu->pagesize << 1); size *= (smmu->pagesize << 1);
if (smmu->size < size) if (smmu->size != size)
dev_warn(smmu->dev, dev_warn(smmu->dev, "SMMU address space size (0x%lx) differs "
"device is 0x%lx bytes but only mapped 0x%lx!\n", "from mapped region size (0x%lx)!\n", size, smmu->size);
size, smmu->size);
smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) &
ID1_NUMS2CB_MASK; ID1_NUMS2CB_MASK;
...@@ -1784,15 +1795,10 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) ...@@ -1784,15 +1795,10 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
smmu->dev = dev; smmu->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) { smmu->base = devm_ioremap_resource(dev, res);
dev_err(dev, "missing base address/size\n"); if (IS_ERR(smmu->base))
return -ENODEV; return PTR_ERR(smmu->base);
}
smmu->size = resource_size(res); smmu->size = resource_size(res);
smmu->base = devm_request_and_ioremap(dev, res);
if (!smmu->base)
return -EADDRNOTAVAIL;
if (of_property_read_u32(dev->of_node, "#global-interrupts", if (of_property_read_u32(dev->of_node, "#global-interrupts",
&smmu->num_global_irqs)) { &smmu->num_global_irqs)) {
...@@ -1807,12 +1813,11 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) ...@@ -1807,12 +1813,11 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
smmu->num_context_irqs++; smmu->num_context_irqs++;
} }
if (num_irqs < smmu->num_global_irqs) { if (!smmu->num_context_irqs) {
dev_warn(dev, "found %d interrupts but expected at least %d\n", dev_err(dev, "found %d interrupts but expected at least %d\n",
num_irqs, smmu->num_global_irqs); num_irqs, smmu->num_global_irqs + 1);
smmu->num_global_irqs = num_irqs; return -ENODEV;
} }
smmu->num_context_irqs = num_irqs - smmu->num_global_irqs;
smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs, smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
GFP_KERNEL); GFP_KERNEL);
...@@ -1936,7 +1941,7 @@ static int arm_smmu_device_remove(struct platform_device *pdev) ...@@ -1936,7 +1941,7 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
free_irq(smmu->irqs[i], smmu); free_irq(smmu->irqs[i], smmu);
/* Turn the thing off */ /* Turn the thing off */
writel(sCR0_CLIENTPD, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_sCR0); writel_relaxed(sCR0_CLIENTPD, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_sCR0);
return 0; return 0;
} }
...@@ -1984,7 +1989,7 @@ static void __exit arm_smmu_exit(void) ...@@ -1984,7 +1989,7 @@ static void __exit arm_smmu_exit(void)
return platform_driver_unregister(&arm_smmu_driver); return platform_driver_unregister(&arm_smmu_driver);
} }
module_init(arm_smmu_init); subsys_initcall(arm_smmu_init);
module_exit(arm_smmu_exit); module_exit(arm_smmu_exit);
MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations"); MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
......
...@@ -403,7 +403,7 @@ dmar_find_matched_drhd_unit(struct pci_dev *dev) ...@@ -403,7 +403,7 @@ dmar_find_matched_drhd_unit(struct pci_dev *dev)
dev = pci_physfn(dev); dev = pci_physfn(dev);
list_for_each_entry(dmaru, &dmar_drhd_units, list) { for_each_drhd_unit(dmaru) {
drhd = container_of(dmaru->hdr, drhd = container_of(dmaru->hdr,
struct acpi_dmar_hardware_unit, struct acpi_dmar_hardware_unit,
header); header);
......
...@@ -782,7 +782,11 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, ...@@ -782,7 +782,11 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
int offset; int offset;
BUG_ON(!domain->pgd); BUG_ON(!domain->pgd);
BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
if (addr_width < BITS_PER_LONG && pfn >> addr_width)
/* Address beyond IOMMU's addressing capabilities. */
return NULL;
parent = domain->pgd; parent = domain->pgd;
while (level > 0) { while (level > 0) {
...@@ -3777,11 +3781,10 @@ static void iommu_detach_dependent_devices(struct intel_iommu *iommu, ...@@ -3777,11 +3781,10 @@ static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
static void domain_remove_one_dev_info(struct dmar_domain *domain, static void domain_remove_one_dev_info(struct dmar_domain *domain,
struct pci_dev *pdev) struct pci_dev *pdev)
{ {
struct device_domain_info *info; struct device_domain_info *info, *tmp;
struct intel_iommu *iommu; struct intel_iommu *iommu;
unsigned long flags; unsigned long flags;
int found = 0; int found = 0;
struct list_head *entry, *tmp;
iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number, iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
pdev->devfn); pdev->devfn);
...@@ -3789,8 +3792,7 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain, ...@@ -3789,8 +3792,7 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
return; return;
spin_lock_irqsave(&device_domain_lock, flags); spin_lock_irqsave(&device_domain_lock, flags);
list_for_each_safe(entry, tmp, &domain->devices) { list_for_each_entry_safe(info, tmp, &domain->devices, link) {
info = list_entry(entry, struct device_domain_info, link);
if (info->segment == pci_domain_nr(pdev->bus) && if (info->segment == pci_domain_nr(pdev->bus) &&
info->bus == pdev->bus->number && info->bus == pdev->bus->number &&
info->devfn == pdev->devfn) { info->devfn == pdev->devfn) {
......
...@@ -525,12 +525,13 @@ static int __init intel_irq_remapping_supported(void) ...@@ -525,12 +525,13 @@ static int __init intel_irq_remapping_supported(void)
if (disable_irq_remap) if (disable_irq_remap)
return 0; return 0;
if (irq_remap_broken) { if (irq_remap_broken) {
WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND, printk(KERN_WARNING
"This system BIOS has enabled interrupt remapping\n" "This system BIOS has enabled interrupt remapping\n"
"on a chipset that contains an erratum making that\n" "on a chipset that contains an erratum making that\n"
"feature unstable. To maintain system stability\n" "feature unstable. To maintain system stability\n"
"interrupt remapping is being disabled. Please\n" "interrupt remapping is being disabled. Please\n"
"contact your BIOS vendor for an update\n"); "contact your BIOS vendor for an update\n");
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
disable_irq_remap = 1; disable_irq_remap = 1;
return 0; return 0;
} }
......
/*
* iommu trace points
*
* Copyright (C) 2013 Shuah Khan <shuah.kh@samsung.com>
*
*/
#include <linux/string.h>
#include <linux/types.h>
#define CREATE_TRACE_POINTS
#include <trace/events/iommu.h>
/* iommu_group_event */
EXPORT_TRACEPOINT_SYMBOL_GPL(add_device_to_group);
EXPORT_TRACEPOINT_SYMBOL_GPL(remove_device_from_group);
/* iommu_device_event */
EXPORT_TRACEPOINT_SYMBOL_GPL(attach_device_to_domain);
EXPORT_TRACEPOINT_SYMBOL_GPL(detach_device_from_domain);
/* iommu_map_unmap */
EXPORT_TRACEPOINT_SYMBOL_GPL(map);
EXPORT_TRACEPOINT_SYMBOL_GPL(unmap);
/* iommu_error */
EXPORT_TRACEPOINT_SYMBOL_GPL(io_page_fault);
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <linux/idr.h> #include <linux/idr.h>
#include <linux/notifier.h> #include <linux/notifier.h>
#include <linux/err.h> #include <linux/err.h>
#include <trace/events/iommu.h>
static struct kset *iommu_group_kset; static struct kset *iommu_group_kset;
static struct ida iommu_group_ida; static struct ida iommu_group_ida;
...@@ -363,6 +364,8 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev) ...@@ -363,6 +364,8 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev)
/* Notify any listeners about change to group. */ /* Notify any listeners about change to group. */
blocking_notifier_call_chain(&group->notifier, blocking_notifier_call_chain(&group->notifier,
IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev); IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
trace_add_device_to_group(group->id, dev);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(iommu_group_add_device); EXPORT_SYMBOL_GPL(iommu_group_add_device);
...@@ -399,6 +402,8 @@ void iommu_group_remove_device(struct device *dev) ...@@ -399,6 +402,8 @@ void iommu_group_remove_device(struct device *dev)
sysfs_remove_link(group->devices_kobj, device->name); sysfs_remove_link(group->devices_kobj, device->name);
sysfs_remove_link(&dev->kobj, "iommu_group"); sysfs_remove_link(&dev->kobj, "iommu_group");
trace_remove_device_from_group(group->id, dev);
kfree(device->name); kfree(device->name);
kfree(device); kfree(device);
dev->iommu_group = NULL; dev->iommu_group = NULL;
...@@ -680,10 +685,14 @@ EXPORT_SYMBOL_GPL(iommu_domain_free); ...@@ -680,10 +685,14 @@ EXPORT_SYMBOL_GPL(iommu_domain_free);
int iommu_attach_device(struct iommu_domain *domain, struct device *dev) int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
{ {
int ret;
if (unlikely(domain->ops->attach_dev == NULL)) if (unlikely(domain->ops->attach_dev == NULL))
return -ENODEV; return -ENODEV;
return domain->ops->attach_dev(domain, dev); ret = domain->ops->attach_dev(domain, dev);
if (!ret)
trace_attach_device_to_domain(dev);
return ret;
} }
EXPORT_SYMBOL_GPL(iommu_attach_device); EXPORT_SYMBOL_GPL(iommu_attach_device);
...@@ -693,6 +702,7 @@ void iommu_detach_device(struct iommu_domain *domain, struct device *dev) ...@@ -693,6 +702,7 @@ void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
return; return;
domain->ops->detach_dev(domain, dev); domain->ops->detach_dev(domain, dev);
trace_detach_device_from_domain(dev);
} }
EXPORT_SYMBOL_GPL(iommu_detach_device); EXPORT_SYMBOL_GPL(iommu_detach_device);
...@@ -807,17 +817,17 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova, ...@@ -807,17 +817,17 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
* size of the smallest page supported by the hardware * size of the smallest page supported by the hardware
*/ */
if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
pr_err("unaligned: iova 0x%lx pa 0x%pa size 0x%zx min_pagesz 0x%x\n", pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
iova, &paddr, size, min_pagesz); iova, &paddr, size, min_pagesz);
return -EINVAL; return -EINVAL;
} }
pr_debug("map: iova 0x%lx pa 0x%pa size 0x%zx\n", iova, &paddr, size); pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
while (size) { while (size) {
size_t pgsize = iommu_pgsize(domain, iova | paddr, size); size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
pr_debug("mapping: iova 0x%lx pa 0x%pa pgsize 0x%zx\n", pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
iova, &paddr, pgsize); iova, &paddr, pgsize);
ret = domain->ops->map(domain, iova, paddr, pgsize, prot); ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
...@@ -832,6 +842,8 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova, ...@@ -832,6 +842,8 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
/* unroll mapping in case something went wrong */ /* unroll mapping in case something went wrong */
if (ret) if (ret)
iommu_unmap(domain, orig_iova, orig_size - size); iommu_unmap(domain, orig_iova, orig_size - size);
else
trace_map(iova, paddr, size);
return ret; return ret;
} }
...@@ -880,6 +892,7 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) ...@@ -880,6 +892,7 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
unmapped += unmapped_page; unmapped += unmapped_page;
} }
trace_unmap(iova, 0, size);
return unmapped; return unmapped;
} }
EXPORT_SYMBOL_GPL(iommu_unmap); EXPORT_SYMBOL_GPL(iommu_unmap);
......
...@@ -252,7 +252,7 @@ static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova, ...@@ -252,7 +252,7 @@ static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
spin_lock_irqsave(&gart->pte_lock, flags); spin_lock_irqsave(&gart->pte_lock, flags);
pfn = __phys_to_pfn(pa); pfn = __phys_to_pfn(pa);
if (!pfn_valid(pfn)) { if (!pfn_valid(pfn)) {
dev_err(gart->dev, "Invalid page: %08x\n", pa); dev_err(gart->dev, "Invalid page: %pa\n", &pa);
spin_unlock_irqrestore(&gart->pte_lock, flags); spin_unlock_irqrestore(&gart->pte_lock, flags);
return -EINVAL; return -EINVAL;
} }
...@@ -295,8 +295,8 @@ static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain, ...@@ -295,8 +295,8 @@ static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
pa = (pte & GART_PAGE_MASK); pa = (pte & GART_PAGE_MASK);
if (!pfn_valid(__phys_to_pfn(pa))) { if (!pfn_valid(__phys_to_pfn(pa))) {
dev_err(gart->dev, "No entry for %08llx:%08x\n", dev_err(gart->dev, "No entry for %08llx:%pa\n",
(unsigned long long)iova, pa); (unsigned long long)iova, &pa);
gart_dump_table(gart); gart_dump_table(gart);
return -EINVAL; return -EINVAL;
} }
...@@ -351,7 +351,6 @@ static int tegra_gart_probe(struct platform_device *pdev) ...@@ -351,7 +351,6 @@ static int tegra_gart_probe(struct platform_device *pdev)
struct gart_device *gart; struct gart_device *gart;
struct resource *res, *res_remap; struct resource *res, *res_remap;
void __iomem *gart_regs; void __iomem *gart_regs;
int err;
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
if (gart_handle) if (gart_handle)
...@@ -376,8 +375,7 @@ static int tegra_gart_probe(struct platform_device *pdev) ...@@ -376,8 +375,7 @@ static int tegra_gart_probe(struct platform_device *pdev)
gart_regs = devm_ioremap(dev, res->start, resource_size(res)); gart_regs = devm_ioremap(dev, res->start, resource_size(res));
if (!gart_regs) { if (!gart_regs) {
dev_err(dev, "failed to remap GART registers\n"); dev_err(dev, "failed to remap GART registers\n");
err = -ENXIO; return -ENXIO;
goto fail;
} }
gart->dev = &pdev->dev; gart->dev = &pdev->dev;
...@@ -391,8 +389,7 @@ static int tegra_gart_probe(struct platform_device *pdev) ...@@ -391,8 +389,7 @@ static int tegra_gart_probe(struct platform_device *pdev)
gart->savedata = vmalloc(sizeof(u32) * gart->page_count); gart->savedata = vmalloc(sizeof(u32) * gart->page_count);
if (!gart->savedata) { if (!gart->savedata) {
dev_err(dev, "failed to allocate context save area\n"); dev_err(dev, "failed to allocate context save area\n");
err = -ENOMEM; return -ENOMEM;
goto fail;
} }
platform_set_drvdata(pdev, gart); platform_set_drvdata(pdev, gart);
...@@ -401,32 +398,20 @@ static int tegra_gart_probe(struct platform_device *pdev) ...@@ -401,32 +398,20 @@ static int tegra_gart_probe(struct platform_device *pdev)
gart_handle = gart; gart_handle = gart;
bus_set_iommu(&platform_bus_type, &gart_iommu_ops); bus_set_iommu(&platform_bus_type, &gart_iommu_ops);
return 0; return 0;
fail:
if (gart_regs)
devm_iounmap(dev, gart_regs);
if (gart && gart->savedata)
vfree(gart->savedata);
devm_kfree(dev, gart);
return err;
} }
static int tegra_gart_remove(struct platform_device *pdev) static int tegra_gart_remove(struct platform_device *pdev)
{ {
struct gart_device *gart = platform_get_drvdata(pdev); struct gart_device *gart = platform_get_drvdata(pdev);
struct device *dev = gart->dev;
writel(0, gart->regs + GART_CONFIG); writel(0, gart->regs + GART_CONFIG);
if (gart->savedata) if (gart->savedata)
vfree(gart->savedata); vfree(gart->savedata);
if (gart->regs)
devm_iounmap(dev, gart->regs);
devm_kfree(dev, gart);
gart_handle = NULL; gart_handle = NULL;
return 0; return 0;
} }
const struct dev_pm_ops tegra_gart_pm_ops = { static const struct dev_pm_ops tegra_gart_pm_ops = {
.suspend = tegra_gart_suspend, .suspend = tegra_gart_suspend,
.resume = tegra_gart_resume, .resume = tegra_gart_resume,
}; };
......
...@@ -731,7 +731,7 @@ static int smmu_iommu_map(struct iommu_domain *domain, unsigned long iova, ...@@ -731,7 +731,7 @@ static int smmu_iommu_map(struct iommu_domain *domain, unsigned long iova,
unsigned long pfn = __phys_to_pfn(pa); unsigned long pfn = __phys_to_pfn(pa);
unsigned long flags; unsigned long flags;
dev_dbg(as->smmu->dev, "[%d] %08lx:%08x\n", as->asid, iova, pa); dev_dbg(as->smmu->dev, "[%d] %08lx:%pa\n", as->asid, iova, &pa);
if (!pfn_valid(pfn)) if (!pfn_valid(pfn))
return -ENOMEM; return -ENOMEM;
...@@ -1254,7 +1254,7 @@ static int tegra_smmu_remove(struct platform_device *pdev) ...@@ -1254,7 +1254,7 @@ static int tegra_smmu_remove(struct platform_device *pdev)
return 0; return 0;
} }
const struct dev_pm_ops tegra_smmu_pm_ops = { static const struct dev_pm_ops tegra_smmu_pm_ops = {
.suspend = tegra_smmu_suspend, .suspend = tegra_smmu_suspend,
.resume = tegra_smmu_resume, .resume = tegra_smmu_resume,
}; };
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/types.h> #include <linux/types.h>
#include <trace/events/iommu.h>
#define IOMMU_READ (1) #define IOMMU_READ (1)
#define IOMMU_WRITE (2) #define IOMMU_WRITE (2)
...@@ -227,6 +228,7 @@ static inline int report_iommu_fault(struct iommu_domain *domain, ...@@ -227,6 +228,7 @@ static inline int report_iommu_fault(struct iommu_domain *domain,
ret = domain->handler(domain, dev, iova, flags, ret = domain->handler(domain, dev, iova, flags,
domain->handler_token); domain->handler_token);
trace_io_page_fault(dev, iova, flags);
return ret; return ret;
} }
......
/*
* iommu trace points
*
* Copyright (C) 2013 Shuah Khan <shuah.kh@samsung.com>
*
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM iommu
#if !defined(_TRACE_IOMMU_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_IOMMU_H
#include <linux/tracepoint.h>
#include <linux/pci.h>
struct device;
DECLARE_EVENT_CLASS(iommu_group_event,
TP_PROTO(int group_id, struct device *dev),
TP_ARGS(group_id, dev),
TP_STRUCT__entry(
__field(int, gid)
__string(device, dev_name(dev))
),
TP_fast_assign(
__entry->gid = group_id;
__assign_str(device, dev_name(dev));
),
TP_printk("IOMMU: groupID=%d device=%s",
__entry->gid, __get_str(device)
)
);
DEFINE_EVENT(iommu_group_event, add_device_to_group,
TP_PROTO(int group_id, struct device *dev),
TP_ARGS(group_id, dev)
);
DEFINE_EVENT(iommu_group_event, remove_device_from_group,
TP_PROTO(int group_id, struct device *dev),
TP_ARGS(group_id, dev)
);
DECLARE_EVENT_CLASS(iommu_device_event,
TP_PROTO(struct device *dev),
TP_ARGS(dev),
TP_STRUCT__entry(
__string(device, dev_name(dev))
),
TP_fast_assign(
__assign_str(device, dev_name(dev));
),
TP_printk("IOMMU: device=%s", __get_str(device)
)
);
DEFINE_EVENT(iommu_device_event, attach_device_to_domain,
TP_PROTO(struct device *dev),
TP_ARGS(dev)
);
DEFINE_EVENT(iommu_device_event, detach_device_from_domain,
TP_PROTO(struct device *dev),
TP_ARGS(dev)
);
DECLARE_EVENT_CLASS(iommu_map_unmap,
TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
TP_ARGS(iova, paddr, size),
TP_STRUCT__entry(
__field(u64, iova)
__field(u64, paddr)
__field(int, size)
),
TP_fast_assign(
__entry->iova = iova;
__entry->paddr = paddr;
__entry->size = size;
),
TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=0x%x",
__entry->iova, __entry->paddr, __entry->size
)
);
DEFINE_EVENT(iommu_map_unmap, map,
TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
TP_ARGS(iova, paddr, size)
);
DEFINE_EVENT_PRINT(iommu_map_unmap, unmap,
TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
TP_ARGS(iova, paddr, size),
TP_printk("IOMMU: iova=0x%016llx size=0x%x",
__entry->iova, __entry->size
)
);
DECLARE_EVENT_CLASS(iommu_error,
TP_PROTO(struct device *dev, unsigned long iova, int flags),
TP_ARGS(dev, iova, flags),
TP_STRUCT__entry(
__string(device, dev_name(dev))
__string(driver, dev_driver_string(dev))
__field(u64, iova)
__field(int, flags)
),
TP_fast_assign(
__assign_str(device, dev_name(dev));
__assign_str(driver, dev_driver_string(dev));
__entry->iova = iova;
__entry->flags = flags;
),
TP_printk("IOMMU:%s %s iova=0x%016llx flags=0x%04x",
__get_str(driver), __get_str(device),
__entry->iova, __entry->flags
)
);
DEFINE_EVENT(iommu_error, io_page_fault,
TP_PROTO(struct device *dev, unsigned long iova, int flags),
TP_ARGS(dev, iova, flags)
);
#endif /* _TRACE_IOMMU_H */
/* This part must be outside protection */
#include <trace/define_trace.h>
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment