Commit 8d140667 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-updates-v3.19' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull IOMMU updates from Joerg Roedel:
 "This time with:

   - A new IOMMU-API call: iommu_map_sg() to map multiple non-contiguous
     pages into an IO address space with only one API call.  This allows
     certain optimizations in the IOMMU driver.

   - DMAR device hotplug in the Intel VT-d driver.  It is now possible
     to hotplug the IOMMU itself.

   - A new IOMMU driver for the Rockchip ARM platform.

   - Couple of cleanups and improvements in the OMAP IOMMU driver.

   - Nesting support for the ARM-SMMU driver.

   - Various other small cleanups and improvements.

  Please note that this time some branches were also pulled into other
  trees, like the DRI and the Tegra tree.  The VT-d branch was also
  pulled into tip/x86/apic.

  Some patches for the AMD IOMMUv2 driver are not in the IOMMU tree but
  were merged by Andrew (or finally ended up in the DRI tree)"

* tag 'iommu-updates-v3.19' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (42 commits)
  iommu: Decouple iommu_map_sg from CPU page size
  iommu/vt-d: Fix an off-by-one bug in __domain_mapping()
  pci, ACPI, iommu: Enhance pci_root to support DMAR device hotplug
  iommu/vt-d: Enhance intel-iommu driver to support DMAR unit hotplug
  iommu/vt-d: Enhance error recovery in function intel_enable_irq_remapping()
  iommu/vt-d: Enhance intel_irq_remapping driver to support DMAR unit hotplug
  iommu/vt-d: Search for ACPI _DSM method for DMAR hotplug
  iommu/vt-d: Implement DMAR unit hotplug framework
  iommu/vt-d: Dynamically allocate and free seq_id for DMAR units
  iommu/vt-d: Introduce helper function dmar_walk_resources()
  iommu/arm-smmu: add support for DOMAIN_ATTR_NESTING attribute
  iommu/arm-smmu: Play nice on non-ARM/SMMU systems
  iommu/amd: remove compiler warning due to IOMMU_CAP_NOEXEC
  iommu/arm-smmu: add IOMMU_CAP_NOEXEC to the ARM SMMU driver
  iommu: add capability IOMMU_CAP_NOEXEC
  iommu/arm-smmu: change IOMMU_EXEC to IOMMU_NOEXEC
  iommu/amd: Fix accounting of device_state
  x86/vt-d: Fix incorrect bit operations in setting values
  iommu/rockchip: Allow to compile with COMPILE_TEST
  iommu/ipmmu-vmsa: Return proper error if devm_request_irq fails
  ...
parents 87c779ba 76771c93
Rockchip IOMMU
==============
A Rockchip DRM iommu translates io virtual addresses to physical addresses for
its master device. Each slave device is bound to a single master device, and
shares its clocks, power domain and irq.
Required properties:
- compatible : Should be "rockchip,iommu"
- reg : Address space for the configuration registers
- interrupts : Interrupt specifier for the IOMMU instance
- interrupt-names : Interrupt name for the IOMMU instance
- #iommu-cells : Should be <0>. This indicates the iommu is a
"single-master" device, and needs no additional information
to associate with its master device. See:
Documentation/devicetree/bindings/iommu/iommu.txt
Example:
vopl_mmu: iommu@ff940300 {
compatible = "rockchip,iommu";
reg = <0xff940300 0x100>;
interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "vopl_mmu";
#iommu-cells = <0>;
};
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/pci-acpi.h> #include <linux/pci-acpi.h>
#include <linux/pci-aspm.h> #include <linux/pci-aspm.h>
#include <linux/dmar.h>
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/dmi.h> #include <linux/dmi.h>
...@@ -525,6 +526,7 @@ static int acpi_pci_root_add(struct acpi_device *device, ...@@ -525,6 +526,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
struct acpi_pci_root *root; struct acpi_pci_root *root;
acpi_handle handle = device->handle; acpi_handle handle = device->handle;
int no_aspm = 0, clear_aspm = 0; int no_aspm = 0, clear_aspm = 0;
bool hotadd = system_state != SYSTEM_BOOTING;
root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL); root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
if (!root) if (!root)
...@@ -571,6 +573,11 @@ static int acpi_pci_root_add(struct acpi_device *device, ...@@ -571,6 +573,11 @@ static int acpi_pci_root_add(struct acpi_device *device,
strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS); strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS);
device->driver_data = root; device->driver_data = root;
if (hotadd && dmar_device_add(handle)) {
result = -ENXIO;
goto end;
}
pr_info(PREFIX "%s [%s] (domain %04x %pR)\n", pr_info(PREFIX "%s [%s] (domain %04x %pR)\n",
acpi_device_name(device), acpi_device_bid(device), acpi_device_name(device), acpi_device_bid(device),
root->segment, &root->secondary); root->segment, &root->secondary);
...@@ -597,7 +604,7 @@ static int acpi_pci_root_add(struct acpi_device *device, ...@@ -597,7 +604,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
root->segment, (unsigned int)root->secondary.start); root->segment, (unsigned int)root->secondary.start);
device->driver_data = NULL; device->driver_data = NULL;
result = -ENODEV; result = -ENODEV;
goto end; goto remove_dmar;
} }
if (clear_aspm) { if (clear_aspm) {
...@@ -611,7 +618,7 @@ static int acpi_pci_root_add(struct acpi_device *device, ...@@ -611,7 +618,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
if (device->wakeup.flags.run_wake) if (device->wakeup.flags.run_wake)
device_set_run_wake(root->bus->bridge, true); device_set_run_wake(root->bus->bridge, true);
if (system_state != SYSTEM_BOOTING) { if (hotadd) {
pcibios_resource_survey_bus(root->bus); pcibios_resource_survey_bus(root->bus);
pci_assign_unassigned_root_bus_resources(root->bus); pci_assign_unassigned_root_bus_resources(root->bus);
} }
...@@ -621,6 +628,9 @@ static int acpi_pci_root_add(struct acpi_device *device, ...@@ -621,6 +628,9 @@ static int acpi_pci_root_add(struct acpi_device *device,
pci_unlock_rescan_remove(); pci_unlock_rescan_remove();
return 1; return 1;
remove_dmar:
if (hotadd)
dmar_device_remove(handle);
end: end:
kfree(root); kfree(root);
return result; return result;
...@@ -639,6 +649,8 @@ static void acpi_pci_root_remove(struct acpi_device *device) ...@@ -639,6 +649,8 @@ static void acpi_pci_root_remove(struct acpi_device *device)
pci_remove_root_bus(root->bus); pci_remove_root_bus(root->bus);
dmar_device_remove(device->handle);
pci_unlock_rescan_remove(); pci_unlock_rescan_remove();
kfree(root); kfree(root);
......
...@@ -144,14 +144,27 @@ config OMAP_IOMMU ...@@ -144,14 +144,27 @@ config OMAP_IOMMU
select IOMMU_API select IOMMU_API
config OMAP_IOMMU_DEBUG config OMAP_IOMMU_DEBUG
tristate "Export OMAP IOMMU internals in DebugFS" bool "Export OMAP IOMMU internals in DebugFS"
depends on OMAP_IOMMU && DEBUG_FS depends on OMAP_IOMMU && DEBUG_FS
help ---help---
Select this to see extensive information about Select this to see extensive information about
the internal state of OMAP IOMMU in debugfs. the internal state of OMAP IOMMU in debugfs.
Say N unless you know you need this. Say N unless you know you need this.
config ROCKCHIP_IOMMU
bool "Rockchip IOMMU Support"
depends on ARM
depends on ARCH_ROCKCHIP || COMPILE_TEST
select IOMMU_API
select ARM_DMA_USE_IOMMU
help
Support for IOMMUs found on Rockchip rk32xx SOCs.
These IOMMUs allow virtualization of the address space used by most
cores within the multimedia subsystem.
Say Y here if you are using a Rockchip SoC that includes an IOMMU
device.
config TEGRA_IOMMU_GART config TEGRA_IOMMU_GART
bool "Tegra GART IOMMU Support" bool "Tegra GART IOMMU Support"
depends on ARCH_TEGRA_2x_SOC depends on ARCH_TEGRA_2x_SOC
......
...@@ -11,8 +11,8 @@ obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o ...@@ -11,8 +11,8 @@ obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o
obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o
obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o
obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
obj-$(CONFIG_OMAP_IOMMU) += omap-iommu2.o
obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
obj-$(CONFIG_ROCKCHIP_IOMMU) += rockchip-iommu.o
obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o
obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
......
...@@ -3411,6 +3411,8 @@ static bool amd_iommu_capable(enum iommu_cap cap) ...@@ -3411,6 +3411,8 @@ static bool amd_iommu_capable(enum iommu_cap cap)
return true; return true;
case IOMMU_CAP_INTR_REMAP: case IOMMU_CAP_INTR_REMAP:
return (irq_remapping_enabled == 1); return (irq_remapping_enabled == 1);
case IOMMU_CAP_NOEXEC:
return false;
} }
return false; return false;
......
...@@ -279,10 +279,8 @@ static void free_pasid_state(struct pasid_state *pasid_state) ...@@ -279,10 +279,8 @@ static void free_pasid_state(struct pasid_state *pasid_state)
static void put_pasid_state(struct pasid_state *pasid_state) static void put_pasid_state(struct pasid_state *pasid_state)
{ {
if (atomic_dec_and_test(&pasid_state->count)) { if (atomic_dec_and_test(&pasid_state->count))
put_device_state(pasid_state->device_state);
wake_up(&pasid_state->wq); wake_up(&pasid_state->wq);
}
} }
static void put_pasid_state_wait(struct pasid_state *pasid_state) static void put_pasid_state_wait(struct pasid_state *pasid_state)
...@@ -291,9 +289,7 @@ static void put_pasid_state_wait(struct pasid_state *pasid_state) ...@@ -291,9 +289,7 @@ static void put_pasid_state_wait(struct pasid_state *pasid_state)
prepare_to_wait(&pasid_state->wq, &wait, TASK_UNINTERRUPTIBLE); prepare_to_wait(&pasid_state->wq, &wait, TASK_UNINTERRUPTIBLE);
if (atomic_dec_and_test(&pasid_state->count)) if (!atomic_dec_and_test(&pasid_state->count))
put_device_state(pasid_state->device_state);
else
schedule(); schedule();
finish_wait(&pasid_state->wq, &wait); finish_wait(&pasid_state->wq, &wait);
......
...@@ -404,9 +404,16 @@ struct arm_smmu_cfg { ...@@ -404,9 +404,16 @@ struct arm_smmu_cfg {
#define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx) #define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx)
#define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1) #define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1)
enum arm_smmu_domain_stage {
ARM_SMMU_DOMAIN_S1 = 0,
ARM_SMMU_DOMAIN_S2,
ARM_SMMU_DOMAIN_NESTED,
};
struct arm_smmu_domain { struct arm_smmu_domain {
struct arm_smmu_device *smmu; struct arm_smmu_device *smmu;
struct arm_smmu_cfg cfg; struct arm_smmu_cfg cfg;
enum arm_smmu_domain_stage stage;
spinlock_t lock; spinlock_t lock;
}; };
...@@ -906,19 +913,46 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, ...@@ -906,19 +913,46 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
if (smmu_domain->smmu) if (smmu_domain->smmu)
goto out_unlock; goto out_unlock;
if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) {
/* /*
* We will likely want to change this if/when KVM gets * Mapping the requested stage onto what we support is surprisingly
* involved. * complicated, mainly because the spec allows S1+S2 SMMUs without
* support for nested translation. That means we end up with the
* following table:
*
* Requested Supported Actual
* S1 N S1
* S1 S1+S2 S1
* S1 S2 S2
* S1 S1 S1
* N N N
* N S1+S2 S2
* N S2 S2
* N S1 S1
*
* Note that you can't actually request stage-2 mappings.
*/ */
if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
switch (smmu_domain->stage) {
case ARM_SMMU_DOMAIN_S1:
cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
start = smmu->num_s2_context_banks; start = smmu->num_s2_context_banks;
} else if (smmu->features & ARM_SMMU_FEAT_TRANS_S1) { break;
cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; case ARM_SMMU_DOMAIN_NESTED:
start = smmu->num_s2_context_banks; /*
} else { * We will likely want to change this if/when KVM gets
* involved.
*/
case ARM_SMMU_DOMAIN_S2:
cfg->cbar = CBAR_TYPE_S2_TRANS; cfg->cbar = CBAR_TYPE_S2_TRANS;
start = 0; start = 0;
break;
default:
ret = -EINVAL;
goto out_unlock;
} }
ret = __arm_smmu_alloc_bitmap(smmu->context_map, start, ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
...@@ -1281,7 +1315,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, ...@@ -1281,7 +1315,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
unsigned long pfn, int prot, int stage) unsigned long pfn, int prot, int stage)
{ {
pte_t *pte, *start; pte_t *pte, *start;
pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF | ARM_SMMU_PTE_XN; pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF;
if (pmd_none(*pmd)) { if (pmd_none(*pmd)) {
/* Allocate a new set of tables */ /* Allocate a new set of tables */
...@@ -1315,10 +1349,11 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, ...@@ -1315,10 +1349,11 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
pteval |= ARM_SMMU_PTE_MEMATTR_NC; pteval |= ARM_SMMU_PTE_MEMATTR_NC;
} }
if (prot & IOMMU_NOEXEC)
pteval |= ARM_SMMU_PTE_XN;
/* If no access, create a faulting entry to avoid TLB fills */ /* If no access, create a faulting entry to avoid TLB fills */
if (prot & IOMMU_EXEC) if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
pteval &= ~ARM_SMMU_PTE_XN;
else if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
pteval &= ~ARM_SMMU_PTE_PAGE; pteval &= ~ARM_SMMU_PTE_PAGE;
pteval |= ARM_SMMU_PTE_SH_IS; pteval |= ARM_SMMU_PTE_SH_IS;
...@@ -1568,6 +1603,8 @@ static bool arm_smmu_capable(enum iommu_cap cap) ...@@ -1568,6 +1603,8 @@ static bool arm_smmu_capable(enum iommu_cap cap)
return true; return true;
case IOMMU_CAP_INTR_REMAP: case IOMMU_CAP_INTR_REMAP:
return true; /* MSIs are just memory writes */ return true; /* MSIs are just memory writes */
case IOMMU_CAP_NOEXEC:
return true;
default: default:
return false; return false;
} }
...@@ -1644,6 +1681,40 @@ static void arm_smmu_remove_device(struct device *dev) ...@@ -1644,6 +1681,40 @@ static void arm_smmu_remove_device(struct device *dev)
iommu_group_remove_device(dev); iommu_group_remove_device(dev);
} }
static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
enum iommu_attr attr, void *data)
{
struct arm_smmu_domain *smmu_domain = domain->priv;
switch (attr) {
case DOMAIN_ATTR_NESTING:
*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
return 0;
default:
return -ENODEV;
}
}
static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
enum iommu_attr attr, void *data)
{
struct arm_smmu_domain *smmu_domain = domain->priv;
switch (attr) {
case DOMAIN_ATTR_NESTING:
if (smmu_domain->smmu)
return -EPERM;
if (*(int *)data)
smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
else
smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
return 0;
default:
return -ENODEV;
}
}
static const struct iommu_ops arm_smmu_ops = { static const struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable, .capable = arm_smmu_capable,
.domain_init = arm_smmu_domain_init, .domain_init = arm_smmu_domain_init,
...@@ -1656,6 +1727,8 @@ static const struct iommu_ops arm_smmu_ops = { ...@@ -1656,6 +1727,8 @@ static const struct iommu_ops arm_smmu_ops = {
.iova_to_phys = arm_smmu_iova_to_phys, .iova_to_phys = arm_smmu_iova_to_phys,
.add_device = arm_smmu_add_device, .add_device = arm_smmu_add_device,
.remove_device = arm_smmu_remove_device, .remove_device = arm_smmu_remove_device,
.domain_get_attr = arm_smmu_domain_get_attr,
.domain_set_attr = arm_smmu_domain_set_attr,
.pgsize_bitmap = (SECTION_SIZE | .pgsize_bitmap = (SECTION_SIZE |
ARM_SMMU_PTE_CONT_SIZE | ARM_SMMU_PTE_CONT_SIZE |
PAGE_SIZE), PAGE_SIZE),
...@@ -2073,8 +2146,20 @@ static struct platform_driver arm_smmu_driver = { ...@@ -2073,8 +2146,20 @@ static struct platform_driver arm_smmu_driver = {
static int __init arm_smmu_init(void) static int __init arm_smmu_init(void)
{ {
struct device_node *np;
int ret; int ret;
/*
* Play nice with systems that don't have an ARM SMMU by checking that
* an ARM SMMU exists in the system before proceeding with the driver
* and IOMMU bus operation registration.
*/
np = of_find_matching_node(NULL, arm_smmu_of_match);
if (!np)
return 0;
of_node_put(np);
ret = platform_driver_register(&arm_smmu_driver); ret = platform_driver_register(&arm_smmu_driver);
if (ret) if (ret)
return ret; return ret;
......
...@@ -44,6 +44,14 @@ ...@@ -44,6 +44,14 @@
#include "irq_remapping.h" #include "irq_remapping.h"
typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
struct dmar_res_callback {
dmar_res_handler_t cb[ACPI_DMAR_TYPE_RESERVED];
void *arg[ACPI_DMAR_TYPE_RESERVED];
bool ignore_unhandled;
bool print_entry;
};
/* /*
* Assumptions: * Assumptions:
* 1) The hotplug framework guarentees that DMAR unit will be hot-added * 1) The hotplug framework guarentees that DMAR unit will be hot-added
...@@ -62,11 +70,12 @@ LIST_HEAD(dmar_drhd_units); ...@@ -62,11 +70,12 @@ LIST_HEAD(dmar_drhd_units);
struct acpi_table_header * __initdata dmar_tbl; struct acpi_table_header * __initdata dmar_tbl;
static acpi_size dmar_tbl_size; static acpi_size dmar_tbl_size;
static int dmar_dev_scope_status = 1; static int dmar_dev_scope_status = 1;
static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
static int alloc_iommu(struct dmar_drhd_unit *drhd); static int alloc_iommu(struct dmar_drhd_unit *drhd);
static void free_iommu(struct intel_iommu *iommu); static void free_iommu(struct intel_iommu *iommu);
static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
{ {
/* /*
* add INCLUDE_ALL at the tail, so scan the list will find it at * add INCLUDE_ALL at the tail, so scan the list will find it at
...@@ -344,24 +353,45 @@ static struct notifier_block dmar_pci_bus_nb = { ...@@ -344,24 +353,45 @@ static struct notifier_block dmar_pci_bus_nb = {
.priority = INT_MIN, .priority = INT_MIN,
}; };
static struct dmar_drhd_unit *
dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
{
struct dmar_drhd_unit *dmaru;
list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list)
if (dmaru->segment == drhd->segment &&
dmaru->reg_base_addr == drhd->address)
return dmaru;
return NULL;
}
/** /**
* dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
* structure which uniquely represent one DMA remapping hardware unit * structure which uniquely represent one DMA remapping hardware unit
* present in the platform * present in the platform
*/ */
static int __init static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
dmar_parse_one_drhd(struct acpi_dmar_header *header)
{ {
struct acpi_dmar_hardware_unit *drhd; struct acpi_dmar_hardware_unit *drhd;
struct dmar_drhd_unit *dmaru; struct dmar_drhd_unit *dmaru;
int ret = 0; int ret = 0;
drhd = (struct acpi_dmar_hardware_unit *)header; drhd = (struct acpi_dmar_hardware_unit *)header;
dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL); dmaru = dmar_find_dmaru(drhd);
if (dmaru)
goto out;
dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL);
if (!dmaru) if (!dmaru)
return -ENOMEM; return -ENOMEM;
dmaru->hdr = header; /*
* If header is allocated from slab by ACPI _DSM method, we need to
* copy the content because the memory buffer will be freed on return.
*/
dmaru->hdr = (void *)(dmaru + 1);
memcpy(dmaru->hdr, header, header->length);
dmaru->reg_base_addr = drhd->address; dmaru->reg_base_addr = drhd->address;
dmaru->segment = drhd->segment; dmaru->segment = drhd->segment;
dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */ dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
...@@ -381,6 +411,11 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header) ...@@ -381,6 +411,11 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
return ret; return ret;
} }
dmar_register_drhd_unit(dmaru); dmar_register_drhd_unit(dmaru);
out:
if (arg)
(*(int *)arg)++;
return 0; return 0;
} }
...@@ -393,7 +428,8 @@ static void dmar_free_drhd(struct dmar_drhd_unit *dmaru) ...@@ -393,7 +428,8 @@ static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
kfree(dmaru); kfree(dmaru);
} }
static int __init dmar_parse_one_andd(struct acpi_dmar_header *header) static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
void *arg)
{ {
struct acpi_dmar_andd *andd = (void *)header; struct acpi_dmar_andd *andd = (void *)header;
...@@ -414,8 +450,7 @@ static int __init dmar_parse_one_andd(struct acpi_dmar_header *header) ...@@ -414,8 +450,7 @@ static int __init dmar_parse_one_andd(struct acpi_dmar_header *header)
} }
#ifdef CONFIG_ACPI_NUMA #ifdef CONFIG_ACPI_NUMA
static int __init static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
dmar_parse_one_rhsa(struct acpi_dmar_header *header)
{ {
struct acpi_dmar_rhsa *rhsa; struct acpi_dmar_rhsa *rhsa;
struct dmar_drhd_unit *drhd; struct dmar_drhd_unit *drhd;
...@@ -442,6 +477,8 @@ dmar_parse_one_rhsa(struct acpi_dmar_header *header) ...@@ -442,6 +477,8 @@ dmar_parse_one_rhsa(struct acpi_dmar_header *header)
return 0; return 0;
} }
#else
#define dmar_parse_one_rhsa dmar_res_noop
#endif #endif
static void __init static void __init
...@@ -503,6 +540,52 @@ static int __init dmar_table_detect(void) ...@@ -503,6 +540,52 @@ static int __init dmar_table_detect(void)
return (ACPI_SUCCESS(status) ? 1 : 0); return (ACPI_SUCCESS(status) ? 1 : 0);
} }
static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
size_t len, struct dmar_res_callback *cb)
{
int ret = 0;
struct acpi_dmar_header *iter, *next;
struct acpi_dmar_header *end = ((void *)start) + len;
for (iter = start; iter < end && ret == 0; iter = next) {
next = (void *)iter + iter->length;
if (iter->length == 0) {
/* Avoid looping forever on bad ACPI tables */
pr_debug(FW_BUG "Invalid 0-length structure\n");
break;
} else if (next > end) {
/* Avoid passing table end */
pr_warn(FW_BUG "record passes table end\n");
ret = -EINVAL;
break;
}
if (cb->print_entry)
dmar_table_print_dmar_entry(iter);
if (iter->type >= ACPI_DMAR_TYPE_RESERVED) {
/* continue for forward compatibility */
pr_debug("Unknown DMAR structure type %d\n",
iter->type);
} else if (cb->cb[iter->type]) {
ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
} else if (!cb->ignore_unhandled) {
pr_warn("No handler for DMAR structure type %d\n",
iter->type);
ret = -EINVAL;
}
}
return ret;
}
static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar,
struct dmar_res_callback *cb)
{
return dmar_walk_remapping_entries((void *)(dmar + 1),
dmar->header.length - sizeof(*dmar), cb);
}
/** /**
* parse_dmar_table - parses the DMA reporting table * parse_dmar_table - parses the DMA reporting table
*/ */
...@@ -510,9 +593,18 @@ static int __init ...@@ -510,9 +593,18 @@ static int __init
parse_dmar_table(void) parse_dmar_table(void)
{ {
struct acpi_table_dmar *dmar; struct acpi_table_dmar *dmar;
struct acpi_dmar_header *entry_header;
int ret = 0; int ret = 0;
int drhd_count = 0; int drhd_count = 0;
struct dmar_res_callback cb = {
.print_entry = true,
.ignore_unhandled = true,
.arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count,
.cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd,
.cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr,
.cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr,
.cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa,
.cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd,
};
/* /*
* Do it again, earlier dmar_tbl mapping could be mapped with * Do it again, earlier dmar_tbl mapping could be mapped with
...@@ -536,51 +628,10 @@ parse_dmar_table(void) ...@@ -536,51 +628,10 @@ parse_dmar_table(void)
} }
pr_info("Host address width %d\n", dmar->width + 1); pr_info("Host address width %d\n", dmar->width + 1);
ret = dmar_walk_dmar_table(dmar, &cb);
entry_header = (struct acpi_dmar_header *)(dmar + 1); if (ret == 0 && drhd_count == 0)
while (((unsigned long)entry_header) <
(((unsigned long)dmar) + dmar_tbl->length)) {
/* Avoid looping forever on bad ACPI tables */
if (entry_header->length == 0) {
pr_warn("Invalid 0-length structure\n");
ret = -EINVAL;
break;
}
dmar_table_print_dmar_entry(entry_header);
switch (entry_header->type) {
case ACPI_DMAR_TYPE_HARDWARE_UNIT:
drhd_count++;
ret = dmar_parse_one_drhd(entry_header);
break;
case ACPI_DMAR_TYPE_RESERVED_MEMORY:
ret = dmar_parse_one_rmrr(entry_header);
break;
case ACPI_DMAR_TYPE_ROOT_ATS:
ret = dmar_parse_one_atsr(entry_header);
break;
case ACPI_DMAR_TYPE_HARDWARE_AFFINITY:
#ifdef CONFIG_ACPI_NUMA
ret = dmar_parse_one_rhsa(entry_header);
#endif
break;
case ACPI_DMAR_TYPE_NAMESPACE:
ret = dmar_parse_one_andd(entry_header);
break;
default:
pr_warn("Unknown DMAR structure type %d\n",
entry_header->type);
ret = 0; /* for forward compatibility */
break;
}
if (ret)
break;
entry_header = ((void *)entry_header + entry_header->length);
}
if (drhd_count == 0)
pr_warn(FW_BUG "No DRHD structure found in DMAR table\n"); pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
return ret; return ret;
} }
...@@ -778,65 +829,57 @@ static void warn_invalid_dmar(u64 addr, const char *message) ...@@ -778,65 +829,57 @@ static void warn_invalid_dmar(u64 addr, const char *message)
dmi_get_system_info(DMI_PRODUCT_VERSION)); dmi_get_system_info(DMI_PRODUCT_VERSION));
} }
static int __init check_zero_address(void) static int __ref
dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
{ {
struct acpi_table_dmar *dmar;
struct acpi_dmar_header *entry_header;
struct acpi_dmar_hardware_unit *drhd; struct acpi_dmar_hardware_unit *drhd;
dmar = (struct acpi_table_dmar *)dmar_tbl;
entry_header = (struct acpi_dmar_header *)(dmar + 1);
while (((unsigned long)entry_header) <
(((unsigned long)dmar) + dmar_tbl->length)) {
/* Avoid looping forever on bad ACPI tables */
if (entry_header->length == 0) {
pr_warn("Invalid 0-length structure\n");
return 0;
}
if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
void __iomem *addr; void __iomem *addr;
u64 cap, ecap; u64 cap, ecap;
drhd = (void *)entry_header; drhd = (void *)entry;
if (!drhd->address) { if (!drhd->address) {
warn_invalid_dmar(0, ""); warn_invalid_dmar(0, "");
goto failed; return -EINVAL;
} }
if (arg)
addr = ioremap(drhd->address, VTD_PAGE_SIZE);
else
addr = early_ioremap(drhd->address, VTD_PAGE_SIZE); addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
if (!addr ) { if (!addr) {
printk("IOMMU: can't validate: %llx\n", drhd->address); pr_warn("IOMMU: can't validate: %llx\n", drhd->address);
goto failed; return -EINVAL;
} }
cap = dmar_readq(addr + DMAR_CAP_REG); cap = dmar_readq(addr + DMAR_CAP_REG);
ecap = dmar_readq(addr + DMAR_ECAP_REG); ecap = dmar_readq(addr + DMAR_ECAP_REG);
if (arg)
iounmap(addr);
else
early_iounmap(addr, VTD_PAGE_SIZE); early_iounmap(addr, VTD_PAGE_SIZE);
if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
warn_invalid_dmar(drhd->address,
" returns all ones");
goto failed;
}
}
entry_header = ((void *)entry_header + entry_header->length); if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
warn_invalid_dmar(drhd->address, " returns all ones");
return -EINVAL;
} }
return 1;
failed:
return 0; return 0;
} }
int __init detect_intel_iommu(void) int __init detect_intel_iommu(void)
{ {
int ret; int ret;
struct dmar_res_callback validate_drhd_cb = {
.cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd,
.ignore_unhandled = true,
};
down_write(&dmar_global_lock); down_write(&dmar_global_lock);
ret = dmar_table_detect(); ret = dmar_table_detect();
if (ret) if (ret)
ret = check_zero_address(); ret = !dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
{ &validate_drhd_cb);
if (ret && !no_iommu && !iommu_detected && !dmar_disabled) { if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
iommu_detected = 1; iommu_detected = 1;
/* Make sure ACS will be enabled */ /* Make sure ACS will be enabled */
...@@ -847,7 +890,7 @@ int __init detect_intel_iommu(void) ...@@ -847,7 +890,7 @@ int __init detect_intel_iommu(void)
if (ret) if (ret)
x86_init.iommu.iommu_init = intel_iommu_init; x86_init.iommu.iommu_init = intel_iommu_init;
#endif #endif
}
early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size); early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
dmar_tbl = NULL; dmar_tbl = NULL;
up_write(&dmar_global_lock); up_write(&dmar_global_lock);
...@@ -931,11 +974,32 @@ static int map_iommu(struct intel_iommu *iommu, u64 phys_addr) ...@@ -931,11 +974,32 @@ static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
return err; return err;
} }
static int dmar_alloc_seq_id(struct intel_iommu *iommu)
{
iommu->seq_id = find_first_zero_bit(dmar_seq_ids,
DMAR_UNITS_SUPPORTED);
if (iommu->seq_id >= DMAR_UNITS_SUPPORTED) {
iommu->seq_id = -1;
} else {
set_bit(iommu->seq_id, dmar_seq_ids);
sprintf(iommu->name, "dmar%d", iommu->seq_id);
}
return iommu->seq_id;
}
static void dmar_free_seq_id(struct intel_iommu *iommu)
{
if (iommu->seq_id >= 0) {
clear_bit(iommu->seq_id, dmar_seq_ids);
iommu->seq_id = -1;
}
}
static int alloc_iommu(struct dmar_drhd_unit *drhd) static int alloc_iommu(struct dmar_drhd_unit *drhd)
{ {
struct intel_iommu *iommu; struct intel_iommu *iommu;
u32 ver, sts; u32 ver, sts;
static int iommu_allocated = 0;
int agaw = 0; int agaw = 0;
int msagaw = 0; int msagaw = 0;
int err; int err;
...@@ -949,13 +1013,16 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) ...@@ -949,13 +1013,16 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
if (!iommu) if (!iommu)
return -ENOMEM; return -ENOMEM;
iommu->seq_id = iommu_allocated++; if (dmar_alloc_seq_id(iommu) < 0) {
sprintf (iommu->name, "dmar%d", iommu->seq_id); pr_err("IOMMU: failed to allocate seq_id\n");
err = -ENOSPC;
goto error;
}
err = map_iommu(iommu, drhd->reg_base_addr); err = map_iommu(iommu, drhd->reg_base_addr);
if (err) { if (err) {
pr_err("IOMMU: failed to map %s\n", iommu->name); pr_err("IOMMU: failed to map %s\n", iommu->name);
goto error; goto error_free_seq_id;
} }
err = -EINVAL; err = -EINVAL;
...@@ -1005,9 +1072,11 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) ...@@ -1005,9 +1072,11 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
return 0; return 0;
err_unmap: err_unmap:
unmap_iommu(iommu); unmap_iommu(iommu);
error: error_free_seq_id:
dmar_free_seq_id(iommu);
error:
kfree(iommu); kfree(iommu);
return err; return err;
} }
...@@ -1031,6 +1100,7 @@ static void free_iommu(struct intel_iommu *iommu) ...@@ -1031,6 +1100,7 @@ static void free_iommu(struct intel_iommu *iommu)
if (iommu->reg) if (iommu->reg)
unmap_iommu(iommu); unmap_iommu(iommu);
dmar_free_seq_id(iommu);
kfree(iommu); kfree(iommu);
} }
...@@ -1661,12 +1731,17 @@ int __init dmar_ir_support(void) ...@@ -1661,12 +1731,17 @@ int __init dmar_ir_support(void)
return dmar->flags & 0x1; return dmar->flags & 0x1;
} }
/* Check whether DMAR units are in use */
static inline bool dmar_in_use(void)
{
return irq_remapping_enabled || intel_iommu_enabled;
}
static int __init dmar_free_unused_resources(void) static int __init dmar_free_unused_resources(void)
{ {
struct dmar_drhd_unit *dmaru, *dmaru_n; struct dmar_drhd_unit *dmaru, *dmaru_n;
/* DMAR units are in use */ if (dmar_in_use())
if (irq_remapping_enabled || intel_iommu_enabled)
return 0; return 0;
if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units)) if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
...@@ -1684,3 +1759,242 @@ static int __init dmar_free_unused_resources(void) ...@@ -1684,3 +1759,242 @@ static int __init dmar_free_unused_resources(void)
late_initcall(dmar_free_unused_resources); late_initcall(dmar_free_unused_resources);
IOMMU_INIT_POST(detect_intel_iommu); IOMMU_INIT_POST(detect_intel_iommu);
/*
* DMAR Hotplug Support
* For more details, please refer to Intel(R) Virtualization Technology
* for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
* "Remapping Hardware Unit Hot Plug".
*/
static u8 dmar_hp_uuid[] = {
/* 0000 */ 0xA6, 0xA3, 0xC1, 0xD8, 0x9B, 0xBE, 0x9B, 0x4C,
/* 0008 */ 0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF
};
/*
* Currently there's only one revision and BIOS will not check the revision id,
* so use 0 for safety.
*/
#define DMAR_DSM_REV_ID 0
#define DMAR_DSM_FUNC_DRHD 1
#define DMAR_DSM_FUNC_ATSR 2
#define DMAR_DSM_FUNC_RHSA 3
static inline bool dmar_detect_dsm(acpi_handle handle, int func)
{
return acpi_check_dsm(handle, dmar_hp_uuid, DMAR_DSM_REV_ID, 1 << func);
}
static int dmar_walk_dsm_resource(acpi_handle handle, int func,
dmar_res_handler_t handler, void *arg)
{
int ret = -ENODEV;
union acpi_object *obj;
struct acpi_dmar_header *start;
struct dmar_res_callback callback;
static int res_type[] = {
[DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT,
[DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS,
[DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY,
};
if (!dmar_detect_dsm(handle, func))
return 0;
obj = acpi_evaluate_dsm_typed(handle, dmar_hp_uuid, DMAR_DSM_REV_ID,
func, NULL, ACPI_TYPE_BUFFER);
if (!obj)
return -ENODEV;
memset(&callback, 0, sizeof(callback));
callback.cb[res_type[func]] = handler;
callback.arg[res_type[func]] = arg;
start = (struct acpi_dmar_header *)obj->buffer.pointer;
ret = dmar_walk_remapping_entries(start, obj->buffer.length, &callback);
ACPI_FREE(obj);
return ret;
}
static int dmar_hp_add_drhd(struct acpi_dmar_header *header, void *arg)
{
int ret;
struct dmar_drhd_unit *dmaru;
dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
if (!dmaru)
return -ENODEV;
ret = dmar_ir_hotplug(dmaru, true);
if (ret == 0)
ret = dmar_iommu_hotplug(dmaru, true);
return ret;
}
static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
{
int i, ret;
struct device *dev;
struct dmar_drhd_unit *dmaru;
dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
if (!dmaru)
return 0;
/*
* All PCI devices managed by this unit should have been destroyed.
*/
if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt)
for_each_active_dev_scope(dmaru->devices,
dmaru->devices_cnt, i, dev)
return -EBUSY;
ret = dmar_ir_hotplug(dmaru, false);
if (ret == 0)
ret = dmar_iommu_hotplug(dmaru, false);
return ret;
}
static int dmar_hp_release_drhd(struct acpi_dmar_header *header, void *arg)
{
struct dmar_drhd_unit *dmaru;
dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
if (dmaru) {
list_del_rcu(&dmaru->list);
synchronize_rcu();
dmar_free_drhd(dmaru);
}
return 0;
}
static int dmar_hotplug_insert(acpi_handle handle)
{
int ret;
int drhd_count = 0;
ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
&dmar_validate_one_drhd, (void *)1);
if (ret)
goto out;
ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
&dmar_parse_one_drhd, (void *)&drhd_count);
if (ret == 0 && drhd_count == 0) {
pr_warn(FW_BUG "No DRHD structures in buffer returned by _DSM method\n");
goto out;
} else if (ret) {
goto release_drhd;
}
ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_RHSA,
&dmar_parse_one_rhsa, NULL);
if (ret)
goto release_drhd;
ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
&dmar_parse_one_atsr, NULL);
if (ret)
goto release_atsr;
ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
&dmar_hp_add_drhd, NULL);
if (!ret)
return 0;
dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
&dmar_hp_remove_drhd, NULL);
release_atsr:
dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
&dmar_release_one_atsr, NULL);
release_drhd:
dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
&dmar_hp_release_drhd, NULL);
out:
return ret;
}
static int dmar_hotplug_remove(acpi_handle handle)
{
int ret;
ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
&dmar_check_one_atsr, NULL);
if (ret)
return ret;
ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
&dmar_hp_remove_drhd, NULL);
if (ret == 0) {
WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
&dmar_release_one_atsr, NULL));
WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
&dmar_hp_release_drhd, NULL));
} else {
dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
&dmar_hp_add_drhd, NULL);
}
return ret;
}
static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl,
void *context, void **retval)
{
acpi_handle *phdl = retval;
if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
*phdl = handle;
return AE_CTRL_TERMINATE;
}
return AE_OK;
}
static int dmar_device_hotplug(acpi_handle handle, bool insert)
{
int ret;
acpi_handle tmp = NULL;
acpi_status status;
if (!dmar_in_use())
return 0;
if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
tmp = handle;
} else {
status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
ACPI_UINT32_MAX,
dmar_get_dsm_handle,
NULL, NULL, &tmp);
if (ACPI_FAILURE(status)) {
pr_warn("Failed to locate _DSM method.\n");
return -ENXIO;
}
}
if (tmp == NULL)
return 0;
down_write(&dmar_global_lock);
if (insert)
ret = dmar_hotplug_insert(tmp);
else
ret = dmar_hotplug_remove(tmp);
up_write(&dmar_global_lock);
return ret;
}
int dmar_device_add(acpi_handle handle)
{
return dmar_device_hotplug(handle, true);
}
int dmar_device_remove(acpi_handle handle)
{
return dmar_device_hotplug(handle, false);
}
...@@ -195,6 +195,7 @@ static inline void set_root_present(struct root_entry *root) ...@@ -195,6 +195,7 @@ static inline void set_root_present(struct root_entry *root)
} }
static inline void set_root_value(struct root_entry *root, unsigned long value) static inline void set_root_value(struct root_entry *root, unsigned long value)
{ {
root->val &= ~VTD_PAGE_MASK;
root->val |= value & VTD_PAGE_MASK; root->val |= value & VTD_PAGE_MASK;
} }
...@@ -247,6 +248,7 @@ static inline void context_set_translation_type(struct context_entry *context, ...@@ -247,6 +248,7 @@ static inline void context_set_translation_type(struct context_entry *context,
static inline void context_set_address_root(struct context_entry *context, static inline void context_set_address_root(struct context_entry *context,
unsigned long value) unsigned long value)
{ {
context->lo &= ~VTD_PAGE_MASK;
context->lo |= value & VTD_PAGE_MASK; context->lo |= value & VTD_PAGE_MASK;
} }
...@@ -328,17 +330,10 @@ static int hw_pass_through = 1; ...@@ -328,17 +330,10 @@ static int hw_pass_through = 1;
/* si_domain contains mulitple devices */ /* si_domain contains mulitple devices */
#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1) #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
/* define the limit of IOMMUs supported in each domain */
#ifdef CONFIG_X86
# define IOMMU_UNITS_SUPPORTED MAX_IO_APICS
#else
# define IOMMU_UNITS_SUPPORTED 64
#endif
struct dmar_domain { struct dmar_domain {
int id; /* domain id */ int id; /* domain id */
int nid; /* node id */ int nid; /* node id */
DECLARE_BITMAP(iommu_bmp, IOMMU_UNITS_SUPPORTED); DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED);
/* bitmap of iommus this domain uses*/ /* bitmap of iommus this domain uses*/
struct list_head devices; /* all devices' list */ struct list_head devices; /* all devices' list */
...@@ -1132,8 +1127,11 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu) ...@@ -1132,8 +1127,11 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
unsigned long flags; unsigned long flags;
root = (struct root_entry *)alloc_pgtable_page(iommu->node); root = (struct root_entry *)alloc_pgtable_page(iommu->node);
if (!root) if (!root) {
pr_err("IOMMU: allocating root entry for %s failed\n",
iommu->name);
return -ENOMEM; return -ENOMEM;
}
__iommu_flush_cache(iommu, root, ROOT_SIZE); __iommu_flush_cache(iommu, root, ROOT_SIZE);
...@@ -1473,7 +1471,7 @@ static int iommu_init_domains(struct intel_iommu *iommu) ...@@ -1473,7 +1471,7 @@ static int iommu_init_domains(struct intel_iommu *iommu)
return 0; return 0;
} }
static void free_dmar_iommu(struct intel_iommu *iommu) static void disable_dmar_iommu(struct intel_iommu *iommu)
{ {
struct dmar_domain *domain; struct dmar_domain *domain;
int i; int i;
...@@ -1497,11 +1495,16 @@ static void free_dmar_iommu(struct intel_iommu *iommu) ...@@ -1497,11 +1495,16 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
if (iommu->gcmd & DMA_GCMD_TE) if (iommu->gcmd & DMA_GCMD_TE)
iommu_disable_translation(iommu); iommu_disable_translation(iommu);
}
static void free_dmar_iommu(struct intel_iommu *iommu)
{
if ((iommu->domains) && (iommu->domain_ids)) {
kfree(iommu->domains); kfree(iommu->domains);
kfree(iommu->domain_ids); kfree(iommu->domain_ids);
iommu->domains = NULL; iommu->domains = NULL;
iommu->domain_ids = NULL; iommu->domain_ids = NULL;
}
g_iommus[iommu->seq_id] = NULL; g_iommus[iommu->seq_id] = NULL;
...@@ -1983,7 +1986,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, ...@@ -1983,7 +1986,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
{ {
struct dma_pte *first_pte = NULL, *pte = NULL; struct dma_pte *first_pte = NULL, *pte = NULL;
phys_addr_t uninitialized_var(pteval); phys_addr_t uninitialized_var(pteval);
unsigned long sg_res; unsigned long sg_res = 0;
unsigned int largepage_lvl = 0; unsigned int largepage_lvl = 0;
unsigned long lvl_pages = 0; unsigned long lvl_pages = 0;
...@@ -1994,10 +1997,8 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, ...@@ -1994,10 +1997,8 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP; prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
if (sg) if (!sg) {
sg_res = 0; sg_res = nr_pages;
else {
sg_res = nr_pages + 1;
pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot; pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
} }
...@@ -2708,6 +2709,41 @@ static int __init iommu_prepare_static_identity_mapping(int hw) ...@@ -2708,6 +2709,41 @@ static int __init iommu_prepare_static_identity_mapping(int hw)
return 0; return 0;
} }
static void intel_iommu_init_qi(struct intel_iommu *iommu)
{
/*
* Start from the sane iommu hardware state.
* If the queued invalidation is already initialized by us
* (for example, while enabling interrupt-remapping) then
* we got the things already rolling from a sane state.
*/
if (!iommu->qi) {
/*
* Clear any previous faults.
*/
dmar_fault(-1, iommu);
/*
* Disable queued invalidation if supported and already enabled
* before OS handover.
*/
dmar_disable_qi(iommu);
}
if (dmar_enable_qi(iommu)) {
/*
* Queued Invalidate not enabled, use Register Based Invalidate
*/
iommu->flush.flush_context = __iommu_flush_context;
iommu->flush.flush_iotlb = __iommu_flush_iotlb;
pr_info("IOMMU: %s using Register based invalidation\n",
iommu->name);
} else {
iommu->flush.flush_context = qi_flush_context;
iommu->flush.flush_iotlb = qi_flush_iotlb;
pr_info("IOMMU: %s using Queued invalidation\n", iommu->name);
}
}
static int __init init_dmars(void) static int __init init_dmars(void)
{ {
struct dmar_drhd_unit *drhd; struct dmar_drhd_unit *drhd;
...@@ -2728,14 +2764,18 @@ static int __init init_dmars(void) ...@@ -2728,14 +2764,18 @@ static int __init init_dmars(void)
* threaded kernel __init code path all other access are read * threaded kernel __init code path all other access are read
* only * only
*/ */
if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) { if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
g_num_of_iommus++; g_num_of_iommus++;
continue; continue;
} }
printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n", printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
IOMMU_UNITS_SUPPORTED); DMAR_UNITS_SUPPORTED);
} }
/* Preallocate enough resources for IOMMU hot-addition */
if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
g_num_of_iommus = DMAR_UNITS_SUPPORTED;
g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *), g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
GFP_KERNEL); GFP_KERNEL);
if (!g_iommus) { if (!g_iommus) {
...@@ -2764,58 +2804,14 @@ static int __init init_dmars(void) ...@@ -2764,58 +2804,14 @@ static int __init init_dmars(void)
* among all IOMMU's. Need to Split it later. * among all IOMMU's. Need to Split it later.
*/ */
ret = iommu_alloc_root_entry(iommu); ret = iommu_alloc_root_entry(iommu);
if (ret) { if (ret)
printk(KERN_ERR "IOMMU: allocate root entry failed\n");
goto free_iommu; goto free_iommu;
}
if (!ecap_pass_through(iommu->ecap)) if (!ecap_pass_through(iommu->ecap))
hw_pass_through = 0; hw_pass_through = 0;
} }
/* for_each_active_iommu(iommu, drhd)
* Start from the sane iommu hardware state. intel_iommu_init_qi(iommu);
*/
for_each_active_iommu(iommu, drhd) {
/*
* If the queued invalidation is already initialized by us
* (for example, while enabling interrupt-remapping) then
* we got the things already rolling from a sane state.
*/
if (iommu->qi)
continue;
/*
* Clear any previous faults.
*/
dmar_fault(-1, iommu);
/*
* Disable queued invalidation if supported and already enabled
* before OS handover.
*/
dmar_disable_qi(iommu);
}
for_each_active_iommu(iommu, drhd) {
if (dmar_enable_qi(iommu)) {
/*
* Queued Invalidate not enabled, use Register Based
* Invalidate
*/
iommu->flush.flush_context = __iommu_flush_context;
iommu->flush.flush_iotlb = __iommu_flush_iotlb;
printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
"invalidation\n",
iommu->seq_id,
(unsigned long long)drhd->reg_base_addr);
} else {
iommu->flush.flush_context = qi_flush_context;
iommu->flush.flush_iotlb = qi_flush_iotlb;
printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
"invalidation\n",
iommu->seq_id,
(unsigned long long)drhd->reg_base_addr);
}
}
if (iommu_pass_through) if (iommu_pass_through)
iommu_identity_mapping |= IDENTMAP_ALL; iommu_identity_mapping |= IDENTMAP_ALL;
...@@ -2901,8 +2897,10 @@ static int __init init_dmars(void) ...@@ -2901,8 +2897,10 @@ static int __init init_dmars(void)
return 0; return 0;
free_iommu: free_iommu:
for_each_active_iommu(iommu, drhd) for_each_active_iommu(iommu, drhd) {
disable_dmar_iommu(iommu);
free_dmar_iommu(iommu); free_dmar_iommu(iommu);
}
kfree(deferred_flush); kfree(deferred_flush);
free_g_iommus: free_g_iommus:
kfree(g_iommus); kfree(g_iommus);
...@@ -3682,7 +3680,7 @@ static inline void init_iommu_pm_ops(void) {} ...@@ -3682,7 +3680,7 @@ static inline void init_iommu_pm_ops(void) {}
#endif /* CONFIG_PM */ #endif /* CONFIG_PM */
int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header) int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
{ {
struct acpi_dmar_reserved_memory *rmrr; struct acpi_dmar_reserved_memory *rmrr;
struct dmar_rmrr_unit *rmrru; struct dmar_rmrr_unit *rmrru;
...@@ -3708,17 +3706,48 @@ int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header) ...@@ -3708,17 +3706,48 @@ int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
return 0; return 0;
} }
int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr) static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
{
struct dmar_atsr_unit *atsru;
struct acpi_dmar_atsr *tmp;
list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
tmp = (struct acpi_dmar_atsr *)atsru->hdr;
if (atsr->segment != tmp->segment)
continue;
if (atsr->header.length != tmp->header.length)
continue;
if (memcmp(atsr, tmp, atsr->header.length) == 0)
return atsru;
}
return NULL;
}
int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
{ {
struct acpi_dmar_atsr *atsr; struct acpi_dmar_atsr *atsr;
struct dmar_atsr_unit *atsru; struct dmar_atsr_unit *atsru;
if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
return 0;
atsr = container_of(hdr, struct acpi_dmar_atsr, header); atsr = container_of(hdr, struct acpi_dmar_atsr, header);
atsru = kzalloc(sizeof(*atsru), GFP_KERNEL); atsru = dmar_find_atsr(atsr);
if (atsru)
return 0;
atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
if (!atsru) if (!atsru)
return -ENOMEM; return -ENOMEM;
atsru->hdr = hdr; /*
* If memory is allocated from slab by ACPI _DSM method, we need to
* copy the memory content because the memory buffer will be freed
* on return.
*/
atsru->hdr = (void *)(atsru + 1);
memcpy(atsru->hdr, hdr, hdr->length);
atsru->include_all = atsr->flags & 0x1; atsru->include_all = atsr->flags & 0x1;
if (!atsru->include_all) { if (!atsru->include_all) {
atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1), atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
...@@ -3741,6 +3770,138 @@ static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru) ...@@ -3741,6 +3770,138 @@ static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
kfree(atsru); kfree(atsru);
} }
int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
{
struct acpi_dmar_atsr *atsr;
struct dmar_atsr_unit *atsru;
atsr = container_of(hdr, struct acpi_dmar_atsr, header);
atsru = dmar_find_atsr(atsr);
if (atsru) {
list_del_rcu(&atsru->list);
synchronize_rcu();
intel_iommu_free_atsr(atsru);
}
return 0;
}
int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
{
int i;
struct device *dev;
struct acpi_dmar_atsr *atsr;
struct dmar_atsr_unit *atsru;
atsr = container_of(hdr, struct acpi_dmar_atsr, header);
atsru = dmar_find_atsr(atsr);
if (!atsru)
return 0;
if (!atsru->include_all && atsru->devices && atsru->devices_cnt)
for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
i, dev)
return -EBUSY;
return 0;
}
static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
{
int sp, ret = 0;
struct intel_iommu *iommu = dmaru->iommu;
if (g_iommus[iommu->seq_id])
return 0;
if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
pr_warn("IOMMU: %s doesn't support hardware pass through.\n",
iommu->name);
return -ENXIO;
}
if (!ecap_sc_support(iommu->ecap) &&
domain_update_iommu_snooping(iommu)) {
pr_warn("IOMMU: %s doesn't support snooping.\n",
iommu->name);
return -ENXIO;
}
sp = domain_update_iommu_superpage(iommu) - 1;
if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
pr_warn("IOMMU: %s doesn't support large page.\n",
iommu->name);
return -ENXIO;
}
/*
* Disable translation if already enabled prior to OS handover.
*/
if (iommu->gcmd & DMA_GCMD_TE)
iommu_disable_translation(iommu);
g_iommus[iommu->seq_id] = iommu;
ret = iommu_init_domains(iommu);
if (ret == 0)
ret = iommu_alloc_root_entry(iommu);
if (ret)
goto out;
if (dmaru->ignored) {
/*
* we always have to disable PMRs or DMA may fail on this device
*/
if (force_on)
iommu_disable_protect_mem_regions(iommu);
return 0;
}
intel_iommu_init_qi(iommu);
iommu_flush_write_buffer(iommu);
ret = dmar_set_interrupt(iommu);
if (ret)
goto disable_iommu;
iommu_set_root_entry(iommu);
iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
iommu_enable_translation(iommu);
if (si_domain) {
ret = iommu_attach_domain(si_domain, iommu);
if (ret < 0 || si_domain->id != ret)
goto disable_iommu;
domain_attach_iommu(si_domain, iommu);
}
iommu_disable_protect_mem_regions(iommu);
return 0;
disable_iommu:
disable_dmar_iommu(iommu);
out:
free_dmar_iommu(iommu);
return ret;
}
int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
{
int ret = 0;
struct intel_iommu *iommu = dmaru->iommu;
if (!intel_iommu_enabled)
return 0;
if (iommu == NULL)
return -EINVAL;
if (insert) {
ret = intel_iommu_add(dmaru);
} else {
disable_dmar_iommu(iommu);
free_dmar_iommu(iommu);
}
return ret;
}
static void intel_iommu_free_dmars(void) static void intel_iommu_free_dmars(void)
{ {
struct dmar_rmrr_unit *rmrru, *rmrr_n; struct dmar_rmrr_unit *rmrru, *rmrr_n;
......
...@@ -36,7 +36,6 @@ struct hpet_scope { ...@@ -36,7 +36,6 @@ struct hpet_scope {
static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
static struct hpet_scope ir_hpet[MAX_HPET_TBS]; static struct hpet_scope ir_hpet[MAX_HPET_TBS];
static int ir_ioapic_num, ir_hpet_num;
/* /*
* Lock ordering: * Lock ordering:
...@@ -206,7 +205,7 @@ static struct intel_iommu *map_hpet_to_ir(u8 hpet_id) ...@@ -206,7 +205,7 @@ static struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
int i; int i;
for (i = 0; i < MAX_HPET_TBS; i++) for (i = 0; i < MAX_HPET_TBS; i++)
if (ir_hpet[i].id == hpet_id) if (ir_hpet[i].id == hpet_id && ir_hpet[i].iommu)
return ir_hpet[i].iommu; return ir_hpet[i].iommu;
return NULL; return NULL;
} }
...@@ -216,7 +215,7 @@ static struct intel_iommu *map_ioapic_to_ir(int apic) ...@@ -216,7 +215,7 @@ static struct intel_iommu *map_ioapic_to_ir(int apic)
int i; int i;
for (i = 0; i < MAX_IO_APICS; i++) for (i = 0; i < MAX_IO_APICS; i++)
if (ir_ioapic[i].id == apic) if (ir_ioapic[i].id == apic && ir_ioapic[i].iommu)
return ir_ioapic[i].iommu; return ir_ioapic[i].iommu;
return NULL; return NULL;
} }
...@@ -325,7 +324,7 @@ static int set_ioapic_sid(struct irte *irte, int apic) ...@@ -325,7 +324,7 @@ static int set_ioapic_sid(struct irte *irte, int apic)
down_read(&dmar_global_lock); down_read(&dmar_global_lock);
for (i = 0; i < MAX_IO_APICS; i++) { for (i = 0; i < MAX_IO_APICS; i++) {
if (ir_ioapic[i].id == apic) { if (ir_ioapic[i].iommu && ir_ioapic[i].id == apic) {
sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn; sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
break; break;
} }
...@@ -352,7 +351,7 @@ static int set_hpet_sid(struct irte *irte, u8 id) ...@@ -352,7 +351,7 @@ static int set_hpet_sid(struct irte *irte, u8 id)
down_read(&dmar_global_lock); down_read(&dmar_global_lock);
for (i = 0; i < MAX_HPET_TBS; i++) { for (i = 0; i < MAX_HPET_TBS; i++) {
if (ir_hpet[i].id == id) { if (ir_hpet[i].iommu && ir_hpet[i].id == id) {
sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn; sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
break; break;
} }
...@@ -473,17 +472,17 @@ static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode) ...@@ -473,17 +472,17 @@ static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
raw_spin_unlock_irqrestore(&iommu->register_lock, flags); raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
} }
static int intel_setup_irq_remapping(struct intel_iommu *iommu)
static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
{ {
struct ir_table *ir_table; struct ir_table *ir_table;
struct page *pages; struct page *pages;
unsigned long *bitmap; unsigned long *bitmap;
ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table), if (iommu->ir_table)
GFP_ATOMIC); return 0;
if (!iommu->ir_table) ir_table = kzalloc(sizeof(struct ir_table), GFP_ATOMIC);
if (!ir_table)
return -ENOMEM; return -ENOMEM;
pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
...@@ -492,24 +491,37 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode) ...@@ -492,24 +491,37 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
if (!pages) { if (!pages) {
pr_err("IR%d: failed to allocate pages of order %d\n", pr_err("IR%d: failed to allocate pages of order %d\n",
iommu->seq_id, INTR_REMAP_PAGE_ORDER); iommu->seq_id, INTR_REMAP_PAGE_ORDER);
kfree(iommu->ir_table); goto out_free_table;
return -ENOMEM;
} }
bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES), bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES),
sizeof(long), GFP_ATOMIC); sizeof(long), GFP_ATOMIC);
if (bitmap == NULL) { if (bitmap == NULL) {
pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id); pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
__free_pages(pages, INTR_REMAP_PAGE_ORDER); goto out_free_pages;
kfree(ir_table);
return -ENOMEM;
} }
ir_table->base = page_address(pages); ir_table->base = page_address(pages);
ir_table->bitmap = bitmap; ir_table->bitmap = bitmap;
iommu->ir_table = ir_table;
iommu_set_irq_remapping(iommu, mode);
return 0; return 0;
out_free_pages:
__free_pages(pages, INTR_REMAP_PAGE_ORDER);
out_free_table:
kfree(ir_table);
return -ENOMEM;
}
static void intel_teardown_irq_remapping(struct intel_iommu *iommu)
{
if (iommu && iommu->ir_table) {
free_pages((unsigned long)iommu->ir_table->base,
INTR_REMAP_PAGE_ORDER);
kfree(iommu->ir_table->bitmap);
kfree(iommu->ir_table);
iommu->ir_table = NULL;
}
} }
/* /*
...@@ -666,9 +678,10 @@ static int __init intel_enable_irq_remapping(void) ...@@ -666,9 +678,10 @@ static int __init intel_enable_irq_remapping(void)
if (!ecap_ir_support(iommu->ecap)) if (!ecap_ir_support(iommu->ecap))
continue; continue;
if (intel_setup_irq_remapping(iommu, eim)) if (intel_setup_irq_remapping(iommu))
goto error; goto error;
iommu_set_irq_remapping(iommu, eim);
setup = 1; setup = 1;
} }
...@@ -689,9 +702,11 @@ static int __init intel_enable_irq_remapping(void) ...@@ -689,9 +702,11 @@ static int __init intel_enable_irq_remapping(void)
return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE; return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;
error: error:
/* for_each_iommu(iommu, drhd)
* handle error condition gracefully here! if (ecap_ir_support(iommu->ecap)) {
*/ iommu_disable_irq_remapping(iommu);
intel_teardown_irq_remapping(iommu);
}
if (x2apic_present) if (x2apic_present)
pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n"); pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n");
...@@ -699,12 +714,13 @@ static int __init intel_enable_irq_remapping(void) ...@@ -699,12 +714,13 @@ static int __init intel_enable_irq_remapping(void)
return -1; return -1;
} }
static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope, static int ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
struct intel_iommu *iommu) struct intel_iommu *iommu,
struct acpi_dmar_hardware_unit *drhd)
{ {
struct acpi_dmar_pci_path *path; struct acpi_dmar_pci_path *path;
u8 bus; u8 bus;
int count; int count, free = -1;
bus = scope->bus; bus = scope->bus;
path = (struct acpi_dmar_pci_path *)(scope + 1); path = (struct acpi_dmar_pci_path *)(scope + 1);
...@@ -720,19 +736,36 @@ static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope, ...@@ -720,19 +736,36 @@ static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
PCI_SECONDARY_BUS); PCI_SECONDARY_BUS);
path++; path++;
} }
ir_hpet[ir_hpet_num].bus = bus;
ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->device, path->function); for (count = 0; count < MAX_HPET_TBS; count++) {
ir_hpet[ir_hpet_num].iommu = iommu; if (ir_hpet[count].iommu == iommu &&
ir_hpet[ir_hpet_num].id = scope->enumeration_id; ir_hpet[count].id == scope->enumeration_id)
ir_hpet_num++; return 0;
else if (ir_hpet[count].iommu == NULL && free == -1)
free = count;
}
if (free == -1) {
pr_warn("Exceeded Max HPET blocks\n");
return -ENOSPC;
}
ir_hpet[free].iommu = iommu;
ir_hpet[free].id = scope->enumeration_id;
ir_hpet[free].bus = bus;
ir_hpet[free].devfn = PCI_DEVFN(path->device, path->function);
pr_info("HPET id %d under DRHD base 0x%Lx\n",
scope->enumeration_id, drhd->address);
return 0;
} }
static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope, static int ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
struct intel_iommu *iommu) struct intel_iommu *iommu,
struct acpi_dmar_hardware_unit *drhd)
{ {
struct acpi_dmar_pci_path *path; struct acpi_dmar_pci_path *path;
u8 bus; u8 bus;
int count; int count, free = -1;
bus = scope->bus; bus = scope->bus;
path = (struct acpi_dmar_pci_path *)(scope + 1); path = (struct acpi_dmar_pci_path *)(scope + 1);
...@@ -749,54 +782,63 @@ static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope, ...@@ -749,54 +782,63 @@ static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
path++; path++;
} }
ir_ioapic[ir_ioapic_num].bus = bus; for (count = 0; count < MAX_IO_APICS; count++) {
ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->device, path->function); if (ir_ioapic[count].iommu == iommu &&
ir_ioapic[ir_ioapic_num].iommu = iommu; ir_ioapic[count].id == scope->enumeration_id)
ir_ioapic[ir_ioapic_num].id = scope->enumeration_id; return 0;
ir_ioapic_num++; else if (ir_ioapic[count].iommu == NULL && free == -1)
free = count;
}
if (free == -1) {
pr_warn("Exceeded Max IO APICS\n");
return -ENOSPC;
}
ir_ioapic[free].bus = bus;
ir_ioapic[free].devfn = PCI_DEVFN(path->device, path->function);
ir_ioapic[free].iommu = iommu;
ir_ioapic[free].id = scope->enumeration_id;
pr_info("IOAPIC id %d under DRHD base 0x%Lx IOMMU %d\n",
scope->enumeration_id, drhd->address, iommu->seq_id);
return 0;
} }
static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header, static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
struct intel_iommu *iommu) struct intel_iommu *iommu)
{ {
int ret = 0;
struct acpi_dmar_hardware_unit *drhd; struct acpi_dmar_hardware_unit *drhd;
struct acpi_dmar_device_scope *scope; struct acpi_dmar_device_scope *scope;
void *start, *end; void *start, *end;
drhd = (struct acpi_dmar_hardware_unit *)header; drhd = (struct acpi_dmar_hardware_unit *)header;
start = (void *)(drhd + 1); start = (void *)(drhd + 1);
end = ((void *)drhd) + header->length; end = ((void *)drhd) + header->length;
while (start < end) { while (start < end && ret == 0) {
scope = start; scope = start;
if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) { if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC)
if (ir_ioapic_num == MAX_IO_APICS) { ret = ir_parse_one_ioapic_scope(scope, iommu, drhd);
printk(KERN_WARNING "Exceeded Max IO APICS\n"); else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET)
return -1; ret = ir_parse_one_hpet_scope(scope, iommu, drhd);
start += scope->length;
} }
printk(KERN_INFO "IOAPIC id %d under DRHD base " return ret;
" 0x%Lx IOMMU %d\n", scope->enumeration_id, }
drhd->address, iommu->seq_id);
ir_parse_one_ioapic_scope(scope, iommu);
} else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) {
if (ir_hpet_num == MAX_HPET_TBS) {
printk(KERN_WARNING "Exceeded Max HPET blocks\n");
return -1;
}
printk(KERN_INFO "HPET id %d under DRHD base" static void ir_remove_ioapic_hpet_scope(struct intel_iommu *iommu)
" 0x%Lx\n", scope->enumeration_id, {
drhd->address); int i;
ir_parse_one_hpet_scope(scope, iommu); for (i = 0; i < MAX_HPET_TBS; i++)
} if (ir_hpet[i].iommu == iommu)
start += scope->length; ir_hpet[i].iommu = NULL;
}
return 0; for (i = 0; i < MAX_IO_APICS; i++)
if (ir_ioapic[i].iommu == iommu)
ir_ioapic[i].iommu = NULL;
} }
/* /*
...@@ -1171,3 +1213,86 @@ struct irq_remap_ops intel_irq_remap_ops = { ...@@ -1171,3 +1213,86 @@ struct irq_remap_ops intel_irq_remap_ops = {
.msi_setup_irq = intel_msi_setup_irq, .msi_setup_irq = intel_msi_setup_irq,
.alloc_hpet_msi = intel_alloc_hpet_msi, .alloc_hpet_msi = intel_alloc_hpet_msi,
}; };
/*
* Support of Interrupt Remapping Unit Hotplug
*/
static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu)
{
int ret;
int eim = x2apic_enabled();
if (eim && !ecap_eim_support(iommu->ecap)) {
pr_info("DRHD %Lx: EIM not supported by DRHD, ecap %Lx\n",
iommu->reg_phys, iommu->ecap);
return -ENODEV;
}
if (ir_parse_ioapic_hpet_scope(dmaru->hdr, iommu)) {
pr_warn("DRHD %Lx: failed to parse managed IOAPIC/HPET\n",
iommu->reg_phys);
return -ENODEV;
}
/* TODO: check all IOAPICs are covered by IOMMU */
/* Setup Interrupt-remapping now. */
ret = intel_setup_irq_remapping(iommu);
if (ret) {
pr_err("DRHD %Lx: failed to allocate resource\n",
iommu->reg_phys);
ir_remove_ioapic_hpet_scope(iommu);
return ret;
}
if (!iommu->qi) {
/* Clear previous faults. */
dmar_fault(-1, iommu);
iommu_disable_irq_remapping(iommu);
dmar_disable_qi(iommu);
}
/* Enable queued invalidation */
ret = dmar_enable_qi(iommu);
if (!ret) {
iommu_set_irq_remapping(iommu, eim);
} else {
pr_err("DRHD %Lx: failed to enable queued invalidation, ecap %Lx, ret %d\n",
iommu->reg_phys, iommu->ecap, ret);
intel_teardown_irq_remapping(iommu);
ir_remove_ioapic_hpet_scope(iommu);
}
return ret;
}
int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
{
int ret = 0;
struct intel_iommu *iommu = dmaru->iommu;
if (!irq_remapping_enabled)
return 0;
if (iommu == NULL)
return -EINVAL;
if (!ecap_ir_support(iommu->ecap))
return 0;
if (insert) {
if (!iommu->ir_table)
ret = dmar_ir_add(dmaru, iommu);
} else {
if (iommu->ir_table) {
if (!bitmap_empty(iommu->ir_table->bitmap,
INTR_REMAP_TABLE_ENTRIES)) {
ret = -EBUSY;
} else {
iommu_disable_irq_remapping(iommu);
intel_teardown_irq_remapping(iommu);
ir_remove_ioapic_hpet_scope(iommu);
}
}
}
return ret;
}
...@@ -1143,14 +1143,24 @@ size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova, ...@@ -1143,14 +1143,24 @@ size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
{ {
struct scatterlist *s; struct scatterlist *s;
size_t mapped = 0; size_t mapped = 0;
unsigned int i; unsigned int i, min_pagesz;
int ret; int ret;
if (unlikely(domain->ops->pgsize_bitmap == 0UL))
return 0;
min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
for_each_sg(sg, s, nents, i) { for_each_sg(sg, s, nents, i) {
phys_addr_t phys = page_to_phys(sg_page(s)); phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
/* We are mapping on page boundarys, so offset must be 0 */ /*
if (s->offset) * We are mapping on IOMMU page boundaries, so offset within
* the page must be 0. However, the IOMMU may support pages
* smaller than PAGE_SIZE, so s->offset may still represent
* an offset of that boundary within the CPU page.
*/
if (!IS_ALIGNED(s->offset, min_pagesz))
goto out_err; goto out_err;
ret = iommu_map(domain, iova + mapped, phys, s->length, prot); ret = iommu_map(domain, iova + mapped, phys, s->length, prot);
......
...@@ -1185,7 +1185,7 @@ static int ipmmu_probe(struct platform_device *pdev) ...@@ -1185,7 +1185,7 @@ static int ipmmu_probe(struct platform_device *pdev)
dev_name(&pdev->dev), mmu); dev_name(&pdev->dev), mmu);
if (ret < 0) { if (ret < 0) {
dev_err(&pdev->dev, "failed to request IRQ %d\n", irq); dev_err(&pdev->dev, "failed to request IRQ %d\n", irq);
return irq; return ret;
} }
ipmmu_device_reset(mmu); ipmmu_device_reset(mmu);
......
...@@ -73,7 +73,6 @@ static int __enable_clocks(struct msm_iommu_drvdata *drvdata) ...@@ -73,7 +73,6 @@ static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
static void __disable_clocks(struct msm_iommu_drvdata *drvdata) static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
{ {
if (drvdata->clk)
clk_disable(drvdata->clk); clk_disable(drvdata->clk);
clk_disable(drvdata->pclk); clk_disable(drvdata->pclk);
} }
......
...@@ -131,7 +131,7 @@ static int msm_iommu_probe(struct platform_device *pdev) ...@@ -131,7 +131,7 @@ static int msm_iommu_probe(struct platform_device *pdev)
struct clk *iommu_clk; struct clk *iommu_clk;
struct clk *iommu_pclk; struct clk *iommu_pclk;
struct msm_iommu_drvdata *drvdata; struct msm_iommu_drvdata *drvdata;
struct msm_iommu_dev *iommu_dev = pdev->dev.platform_data; struct msm_iommu_dev *iommu_dev = dev_get_platdata(&pdev->dev);
void __iomem *regs_base; void __iomem *regs_base;
int ret, irq, par; int ret, irq, par;
...@@ -224,7 +224,6 @@ static int msm_iommu_probe(struct platform_device *pdev) ...@@ -224,7 +224,6 @@ static int msm_iommu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, drvdata); platform_set_drvdata(pdev, drvdata);
if (iommu_clk)
clk_disable(iommu_clk); clk_disable(iommu_clk);
clk_disable(iommu_pclk); clk_disable(iommu_pclk);
...@@ -264,7 +263,7 @@ static int msm_iommu_remove(struct platform_device *pdev) ...@@ -264,7 +263,7 @@ static int msm_iommu_remove(struct platform_device *pdev)
static int msm_iommu_ctx_probe(struct platform_device *pdev) static int msm_iommu_ctx_probe(struct platform_device *pdev)
{ {
struct msm_iommu_ctx_dev *c = pdev->dev.platform_data; struct msm_iommu_ctx_dev *c = dev_get_platdata(&pdev->dev);
struct msm_iommu_drvdata *drvdata; struct msm_iommu_drvdata *drvdata;
struct msm_iommu_ctx_drvdata *ctx_drvdata; struct msm_iommu_ctx_drvdata *ctx_drvdata;
int i, ret; int i, ret;
...@@ -323,7 +322,6 @@ static int msm_iommu_ctx_probe(struct platform_device *pdev) ...@@ -323,7 +322,6 @@ static int msm_iommu_ctx_probe(struct platform_device *pdev)
SET_NSCFG(drvdata->base, mid, 3); SET_NSCFG(drvdata->base, mid, 3);
} }
if (drvdata->clk)
clk_disable(drvdata->clk); clk_disable(drvdata->clk);
clk_disable(drvdata->pclk); clk_disable(drvdata->pclk);
......
...@@ -10,45 +10,35 @@ ...@@ -10,45 +10,35 @@
* published by the Free Software Foundation. * published by the Free Software Foundation.
*/ */
#include <linux/module.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/platform_device.h>
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/omap-iommu.h>
#include <linux/platform_data/iommu-omap.h> #include <linux/platform_data/iommu-omap.h>
#include "omap-iopgtable.h" #include "omap-iopgtable.h"
#include "omap-iommu.h" #include "omap-iommu.h"
#define MAXCOLUMN 100 /* for short messages */
static DEFINE_MUTEX(iommu_debug_lock); static DEFINE_MUTEX(iommu_debug_lock);
static struct dentry *iommu_debug_root; static struct dentry *iommu_debug_root;
static ssize_t debug_read_ver(struct file *file, char __user *userbuf, static inline bool is_omap_iommu_detached(struct omap_iommu *obj)
size_t count, loff_t *ppos)
{ {
u32 ver = omap_iommu_arch_version(); return !obj->domain;
char buf[MAXCOLUMN], *p = buf;
p += sprintf(p, "H/W version: %d.%d\n", (ver >> 4) & 0xf , ver & 0xf);
return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
} }
static ssize_t debug_read_regs(struct file *file, char __user *userbuf, static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
struct device *dev = file->private_data; struct omap_iommu *obj = file->private_data;
struct omap_iommu *obj = dev_to_omap_iommu(dev);
char *p, *buf; char *p, *buf;
ssize_t bytes; ssize_t bytes;
if (is_omap_iommu_detached(obj))
return -EPERM;
buf = kmalloc(count, GFP_KERNEL); buf = kmalloc(count, GFP_KERNEL);
if (!buf) if (!buf)
return -ENOMEM; return -ENOMEM;
...@@ -68,11 +58,13 @@ static ssize_t debug_read_regs(struct file *file, char __user *userbuf, ...@@ -68,11 +58,13 @@ static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
static ssize_t debug_read_tlb(struct file *file, char __user *userbuf, static ssize_t debug_read_tlb(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
struct device *dev = file->private_data; struct omap_iommu *obj = file->private_data;
struct omap_iommu *obj = dev_to_omap_iommu(dev);
char *p, *buf; char *p, *buf;
ssize_t bytes, rest; ssize_t bytes, rest;
if (is_omap_iommu_detached(obj))
return -EPERM;
buf = kmalloc(count, GFP_KERNEL); buf = kmalloc(count, GFP_KERNEL);
if (!buf) if (!buf)
return -ENOMEM; return -ENOMEM;
...@@ -93,133 +85,69 @@ static ssize_t debug_read_tlb(struct file *file, char __user *userbuf, ...@@ -93,133 +85,69 @@ static ssize_t debug_read_tlb(struct file *file, char __user *userbuf,
return bytes; return bytes;
} }
static ssize_t debug_write_pagetable(struct file *file, static void dump_ioptable(struct seq_file *s)
const char __user *userbuf, size_t count, loff_t *ppos)
{
struct iotlb_entry e;
struct cr_regs cr;
int err;
struct device *dev = file->private_data;
struct omap_iommu *obj = dev_to_omap_iommu(dev);
char buf[MAXCOLUMN], *p = buf;
count = min(count, sizeof(buf));
mutex_lock(&iommu_debug_lock);
if (copy_from_user(p, userbuf, count)) {
mutex_unlock(&iommu_debug_lock);
return -EFAULT;
}
sscanf(p, "%x %x", &cr.cam, &cr.ram);
if (!cr.cam || !cr.ram) {
mutex_unlock(&iommu_debug_lock);
return -EINVAL;
}
omap_iotlb_cr_to_e(&cr, &e);
err = omap_iopgtable_store_entry(obj, &e);
if (err)
dev_err(obj->dev, "%s: fail to store cr\n", __func__);
mutex_unlock(&iommu_debug_lock);
return count;
}
#define dump_ioptable_entry_one(lv, da, val) \
({ \
int __err = 0; \
ssize_t bytes; \
const int maxcol = 22; \
const char *str = "%d: %08x %08x\n"; \
bytes = snprintf(p, maxcol, str, lv, da, val); \
p += bytes; \
len -= bytes; \
if (len < maxcol) \
__err = -ENOMEM; \
__err; \
})
static ssize_t dump_ioptable(struct omap_iommu *obj, char *buf, ssize_t len)
{ {
int i; int i, j;
u32 *iopgd; u32 da;
char *p = buf; u32 *iopgd, *iopte;
struct omap_iommu *obj = s->private;
spin_lock(&obj->page_table_lock); spin_lock(&obj->page_table_lock);
iopgd = iopgd_offset(obj, 0); iopgd = iopgd_offset(obj, 0);
for (i = 0; i < PTRS_PER_IOPGD; i++, iopgd++) { for (i = 0; i < PTRS_PER_IOPGD; i++, iopgd++) {
int j, err;
u32 *iopte;
u32 da;
if (!*iopgd) if (!*iopgd)
continue; continue;
if (!(*iopgd & IOPGD_TABLE)) { if (!(*iopgd & IOPGD_TABLE)) {
da = i << IOPGD_SHIFT; da = i << IOPGD_SHIFT;
seq_printf(s, "1: 0x%08x 0x%08x\n", da, *iopgd);
err = dump_ioptable_entry_one(1, da, *iopgd);
if (err)
goto out;
continue; continue;
} }
iopte = iopte_offset(iopgd, 0); iopte = iopte_offset(iopgd, 0);
for (j = 0; j < PTRS_PER_IOPTE; j++, iopte++) { for (j = 0; j < PTRS_PER_IOPTE; j++, iopte++) {
if (!*iopte) if (!*iopte)
continue; continue;
da = (i << IOPGD_SHIFT) + (j << IOPTE_SHIFT); da = (i << IOPGD_SHIFT) + (j << IOPTE_SHIFT);
err = dump_ioptable_entry_one(2, da, *iopgd); seq_printf(s, "2: 0x%08x 0x%08x\n", da, *iopte);
if (err)
goto out;
} }
} }
out:
spin_unlock(&obj->page_table_lock);
return p - buf; spin_unlock(&obj->page_table_lock);
} }
static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf, static int debug_read_pagetable(struct seq_file *s, void *data)
size_t count, loff_t *ppos)
{ {
struct device *dev = file->private_data; struct omap_iommu *obj = s->private;
struct omap_iommu *obj = dev_to_omap_iommu(dev);
char *p, *buf;
size_t bytes;
buf = (char *)__get_free_page(GFP_KERNEL);
if (!buf)
return -ENOMEM;
p = buf;
p += sprintf(p, "L: %8s %8s\n", "da:", "pa:"); if (is_omap_iommu_detached(obj))
p += sprintf(p, "-----------------------------------------\n"); return -EPERM;
mutex_lock(&iommu_debug_lock); mutex_lock(&iommu_debug_lock);
bytes = PAGE_SIZE - (p - buf); seq_printf(s, "L: %8s %8s\n", "da:", "pte:");
p += dump_ioptable(obj, p, bytes); seq_puts(s, "--------------------------\n");
dump_ioptable(s);
bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
mutex_unlock(&iommu_debug_lock); mutex_unlock(&iommu_debug_lock);
free_page((unsigned long)buf);
return bytes; return 0;
} }
#define DEBUG_FOPS(name) \ #define DEBUG_SEQ_FOPS_RO(name) \
static int debug_open_##name(struct inode *inode, struct file *file) \
{ \
return single_open(file, debug_read_##name, inode->i_private); \
} \
\
static const struct file_operations debug_##name##_fops = { \ static const struct file_operations debug_##name##_fops = { \
.open = simple_open, \ .open = debug_open_##name, \
.read = debug_read_##name, \ .read = seq_read, \
.write = debug_write_##name, \ .llseek = seq_lseek, \
.llseek = generic_file_llseek, \ .release = single_release, \
}; }
#define DEBUG_FOPS_RO(name) \ #define DEBUG_FOPS_RO(name) \
static const struct file_operations debug_##name##_fops = { \ static const struct file_operations debug_##name##_fops = { \
...@@ -228,103 +156,63 @@ static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf, ...@@ -228,103 +156,63 @@ static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf,
.llseek = generic_file_llseek, \ .llseek = generic_file_llseek, \
}; };
DEBUG_FOPS_RO(ver);
DEBUG_FOPS_RO(regs); DEBUG_FOPS_RO(regs);
DEBUG_FOPS_RO(tlb); DEBUG_FOPS_RO(tlb);
DEBUG_FOPS(pagetable); DEBUG_SEQ_FOPS_RO(pagetable);
#define __DEBUG_ADD_FILE(attr, mode) \ #define __DEBUG_ADD_FILE(attr, mode) \
{ \ { \
struct dentry *dent; \ struct dentry *dent; \
dent = debugfs_create_file(#attr, mode, parent, \ dent = debugfs_create_file(#attr, mode, obj->debug_dir, \
dev, &debug_##attr##_fops); \ obj, &debug_##attr##_fops); \
if (!dent) \ if (!dent) \
return -ENOMEM; \ goto err; \
} }
#define DEBUG_ADD_FILE(name) __DEBUG_ADD_FILE(name, 0600)
#define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 0400) #define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 0400)
static int iommu_debug_register(struct device *dev, void *data) void omap_iommu_debugfs_add(struct omap_iommu *obj)
{ {
struct platform_device *pdev = to_platform_device(dev); struct dentry *d;
struct omap_iommu *obj = platform_get_drvdata(pdev);
struct omap_iommu_arch_data *arch_data;
struct dentry *d, *parent;
if (!obj || !obj->dev)
return -EINVAL;
arch_data = kzalloc(sizeof(*arch_data), GFP_KERNEL);
if (!arch_data)
return -ENOMEM;
arch_data->iommu_dev = obj;
dev->archdata.iommu = arch_data; if (!iommu_debug_root)
return;
d = debugfs_create_dir(obj->name, iommu_debug_root); obj->debug_dir = debugfs_create_dir(obj->name, iommu_debug_root);
if (!d) if (!obj->debug_dir)
goto nomem; return;
parent = d;
d = debugfs_create_u8("nr_tlb_entries", 400, parent, d = debugfs_create_u8("nr_tlb_entries", 0400, obj->debug_dir,
(u8 *)&obj->nr_tlb_entries); (u8 *)&obj->nr_tlb_entries);
if (!d) if (!d)
goto nomem; return;
DEBUG_ADD_FILE_RO(ver);
DEBUG_ADD_FILE_RO(regs); DEBUG_ADD_FILE_RO(regs);
DEBUG_ADD_FILE_RO(tlb); DEBUG_ADD_FILE_RO(tlb);
DEBUG_ADD_FILE(pagetable); DEBUG_ADD_FILE_RO(pagetable);
return 0; return;
nomem: err:
kfree(arch_data); debugfs_remove_recursive(obj->debug_dir);
return -ENOMEM;
} }
static int iommu_debug_unregister(struct device *dev, void *data) void omap_iommu_debugfs_remove(struct omap_iommu *obj)
{ {
if (!dev->archdata.iommu) if (!obj->debug_dir)
return 0; return;
kfree(dev->archdata.iommu);
dev->archdata.iommu = NULL; debugfs_remove_recursive(obj->debug_dir);
return 0;
} }
static int __init iommu_debug_init(void) void __init omap_iommu_debugfs_init(void)
{ {
struct dentry *d; iommu_debug_root = debugfs_create_dir("omap_iommu", NULL);
int err; if (!iommu_debug_root)
pr_err("can't create debugfs dir\n");
d = debugfs_create_dir("iommu", NULL);
if (!d)
return -ENOMEM;
iommu_debug_root = d;
err = omap_foreach_iommu_device(d, iommu_debug_register);
if (err)
goto err_out;
return 0;
err_out:
debugfs_remove_recursive(iommu_debug_root);
return err;
} }
module_init(iommu_debug_init)
static void __exit iommu_debugfs_exit(void) void __exit omap_iommu_debugfs_exit(void)
{ {
debugfs_remove_recursive(iommu_debug_root); debugfs_remove(iommu_debug_root);
omap_foreach_iommu_device(NULL, iommu_debug_unregister);
} }
module_exit(iommu_debugfs_exit)
MODULE_DESCRIPTION("omap iommu: debugfs interface");
MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
MODULE_LICENSE("GPL v2");
...@@ -76,44 +76,9 @@ struct iotlb_lock { ...@@ -76,44 +76,9 @@ struct iotlb_lock {
short vict; short vict;
}; };
/* accommodate the difference between omap1 and omap2/3 */
static const struct iommu_functions *arch_iommu;
static struct platform_driver omap_iommu_driver; static struct platform_driver omap_iommu_driver;
static struct kmem_cache *iopte_cachep; static struct kmem_cache *iopte_cachep;
/**
* omap_install_iommu_arch - Install archtecure specific iommu functions
* @ops: a pointer to architecture specific iommu functions
*
* There are several kind of iommu algorithm(tlb, pagetable) among
* omap series. This interface installs such an iommu algorighm.
**/
int omap_install_iommu_arch(const struct iommu_functions *ops)
{
if (arch_iommu)
return -EBUSY;
arch_iommu = ops;
return 0;
}
EXPORT_SYMBOL_GPL(omap_install_iommu_arch);
/**
* omap_uninstall_iommu_arch - Uninstall archtecure specific iommu functions
* @ops: a pointer to architecture specific iommu functions
*
* This interface uninstalls the iommu algorighm installed previously.
**/
void omap_uninstall_iommu_arch(const struct iommu_functions *ops)
{
if (arch_iommu != ops)
pr_err("%s: not your arch\n", __func__);
arch_iommu = NULL;
}
EXPORT_SYMBOL_GPL(omap_uninstall_iommu_arch);
/** /**
* omap_iommu_save_ctx - Save registers for pm off-mode support * omap_iommu_save_ctx - Save registers for pm off-mode support
* @dev: client device * @dev: client device
...@@ -121,8 +86,13 @@ EXPORT_SYMBOL_GPL(omap_uninstall_iommu_arch); ...@@ -121,8 +86,13 @@ EXPORT_SYMBOL_GPL(omap_uninstall_iommu_arch);
void omap_iommu_save_ctx(struct device *dev) void omap_iommu_save_ctx(struct device *dev)
{ {
struct omap_iommu *obj = dev_to_omap_iommu(dev); struct omap_iommu *obj = dev_to_omap_iommu(dev);
u32 *p = obj->ctx;
int i;
arch_iommu->save_ctx(obj); for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
p[i] = iommu_read_reg(obj, i * sizeof(u32));
dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
}
} }
EXPORT_SYMBOL_GPL(omap_iommu_save_ctx); EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
...@@ -133,28 +103,74 @@ EXPORT_SYMBOL_GPL(omap_iommu_save_ctx); ...@@ -133,28 +103,74 @@ EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
void omap_iommu_restore_ctx(struct device *dev) void omap_iommu_restore_ctx(struct device *dev)
{ {
struct omap_iommu *obj = dev_to_omap_iommu(dev); struct omap_iommu *obj = dev_to_omap_iommu(dev);
u32 *p = obj->ctx;
int i;
arch_iommu->restore_ctx(obj); for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
iommu_write_reg(obj, p[i], i * sizeof(u32));
dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
}
} }
EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx); EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx);
/** static void __iommu_set_twl(struct omap_iommu *obj, bool on)
* omap_iommu_arch_version - Return running iommu arch version {
**/ u32 l = iommu_read_reg(obj, MMU_CNTL);
u32 omap_iommu_arch_version(void)
if (on)
iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE);
else
iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE);
l &= ~MMU_CNTL_MASK;
if (on)
l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN);
else
l |= (MMU_CNTL_MMU_EN);
iommu_write_reg(obj, l, MMU_CNTL);
}
static int omap2_iommu_enable(struct omap_iommu *obj)
{
u32 l, pa;
if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K))
return -EINVAL;
pa = virt_to_phys(obj->iopgd);
if (!IS_ALIGNED(pa, SZ_16K))
return -EINVAL;
l = iommu_read_reg(obj, MMU_REVISION);
dev_info(obj->dev, "%s: version %d.%d\n", obj->name,
(l >> 4) & 0xf, l & 0xf);
iommu_write_reg(obj, pa, MMU_TTB);
if (obj->has_bus_err_back)
iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG);
__iommu_set_twl(obj, true);
return 0;
}
static void omap2_iommu_disable(struct omap_iommu *obj)
{ {
return arch_iommu->version; u32 l = iommu_read_reg(obj, MMU_CNTL);
l &= ~MMU_CNTL_MASK;
iommu_write_reg(obj, l, MMU_CNTL);
dev_dbg(obj->dev, "%s is shutting down\n", obj->name);
} }
EXPORT_SYMBOL_GPL(omap_iommu_arch_version);
static int iommu_enable(struct omap_iommu *obj) static int iommu_enable(struct omap_iommu *obj)
{ {
int err; int err;
struct platform_device *pdev = to_platform_device(obj->dev); struct platform_device *pdev = to_platform_device(obj->dev);
struct iommu_platform_data *pdata = pdev->dev.platform_data; struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
if (!arch_iommu)
return -ENODEV;
if (pdata && pdata->deassert_reset) { if (pdata && pdata->deassert_reset) {
err = pdata->deassert_reset(pdev, pdata->reset_name); err = pdata->deassert_reset(pdev, pdata->reset_name);
...@@ -166,7 +182,7 @@ static int iommu_enable(struct omap_iommu *obj) ...@@ -166,7 +182,7 @@ static int iommu_enable(struct omap_iommu *obj)
pm_runtime_get_sync(obj->dev); pm_runtime_get_sync(obj->dev);
err = arch_iommu->enable(obj); err = omap2_iommu_enable(obj);
return err; return err;
} }
...@@ -174,9 +190,9 @@ static int iommu_enable(struct omap_iommu *obj) ...@@ -174,9 +190,9 @@ static int iommu_enable(struct omap_iommu *obj)
static void iommu_disable(struct omap_iommu *obj) static void iommu_disable(struct omap_iommu *obj)
{ {
struct platform_device *pdev = to_platform_device(obj->dev); struct platform_device *pdev = to_platform_device(obj->dev);
struct iommu_platform_data *pdata = pdev->dev.platform_data; struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
arch_iommu->disable(obj); omap2_iommu_disable(obj);
pm_runtime_put_sync(obj->dev); pm_runtime_put_sync(obj->dev);
...@@ -187,44 +203,51 @@ static void iommu_disable(struct omap_iommu *obj) ...@@ -187,44 +203,51 @@ static void iommu_disable(struct omap_iommu *obj)
/* /*
* TLB operations * TLB operations
*/ */
void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e)
{
BUG_ON(!cr || !e);
arch_iommu->cr_to_e(cr, e);
}
EXPORT_SYMBOL_GPL(omap_iotlb_cr_to_e);
static inline int iotlb_cr_valid(struct cr_regs *cr) static inline int iotlb_cr_valid(struct cr_regs *cr)
{ {
if (!cr) if (!cr)
return -EINVAL; return -EINVAL;
return arch_iommu->cr_valid(cr); return cr->cam & MMU_CAM_V;
}
static inline struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj,
struct iotlb_entry *e)
{
if (!e)
return NULL;
return arch_iommu->alloc_cr(obj, e);
} }
static u32 iotlb_cr_to_virt(struct cr_regs *cr) static u32 iotlb_cr_to_virt(struct cr_regs *cr)
{ {
return arch_iommu->cr_to_virt(cr); u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK;
u32 mask = get_cam_va_mask(cr->cam & page_size);
return cr->cam & mask;
} }
static u32 get_iopte_attr(struct iotlb_entry *e) static u32 get_iopte_attr(struct iotlb_entry *e)
{ {
return arch_iommu->get_pte_attr(e); u32 attr;
attr = e->mixed << 5;
attr |= e->endian;
attr |= e->elsz >> 3;
attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) ||
(e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6);
return attr;
} }
static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da) static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da)
{ {
return arch_iommu->fault_isr(obj, da); u32 status, fault_addr;
status = iommu_read_reg(obj, MMU_IRQSTATUS);
status &= MMU_IRQ_MASK;
if (!status) {
*da = 0;
return 0;
}
fault_addr = iommu_read_reg(obj, MMU_FAULT_AD);
*da = fault_addr;
iommu_write_reg(obj, status, MMU_IRQSTATUS);
return status;
} }
static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l) static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
...@@ -250,31 +273,19 @@ static void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l) ...@@ -250,31 +273,19 @@ static void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l)
static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr) static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr)
{ {
arch_iommu->tlb_read_cr(obj, cr); cr->cam = iommu_read_reg(obj, MMU_READ_CAM);
cr->ram = iommu_read_reg(obj, MMU_READ_RAM);
} }
static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr) static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
{ {
arch_iommu->tlb_load_cr(obj, cr); iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM);
iommu_write_reg(obj, cr->ram, MMU_RAM);
iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
iommu_write_reg(obj, 1, MMU_LD_TLB); iommu_write_reg(obj, 1, MMU_LD_TLB);
} }
/**
* iotlb_dump_cr - Dump an iommu tlb entry into buf
* @obj: target iommu
* @cr: contents of cam and ram register
* @buf: output buffer
**/
static inline ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr,
char *buf)
{
BUG_ON(!cr || !buf);
return arch_iommu->dump_cr(obj, cr, buf);
}
/* only used in iotlb iteration for-loop */ /* only used in iotlb iteration for-loop */
static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n) static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
{ {
...@@ -289,12 +300,36 @@ static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n) ...@@ -289,12 +300,36 @@ static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
return cr; return cr;
} }
#ifdef PREFETCH_IOTLB
static struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj,
struct iotlb_entry *e)
{
struct cr_regs *cr;
if (!e)
return NULL;
if (e->da & ~(get_cam_va_mask(e->pgsz))) {
dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__,
e->da);
return ERR_PTR(-EINVAL);
}
cr = kmalloc(sizeof(*cr), GFP_KERNEL);
if (!cr)
return ERR_PTR(-ENOMEM);
cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid;
cr->ram = e->pa | e->endian | e->elsz | e->mixed;
return cr;
}
/** /**
* load_iotlb_entry - Set an iommu tlb entry * load_iotlb_entry - Set an iommu tlb entry
* @obj: target iommu * @obj: target iommu
* @e: an iommu tlb entry info * @e: an iommu tlb entry info
**/ **/
#ifdef PREFETCH_IOTLB
static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
{ {
int err = 0; int err = 0;
...@@ -423,7 +458,45 @@ static void flush_iotlb_all(struct omap_iommu *obj) ...@@ -423,7 +458,45 @@ static void flush_iotlb_all(struct omap_iommu *obj)
pm_runtime_put_sync(obj->dev); pm_runtime_put_sync(obj->dev);
} }
#if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE) #ifdef CONFIG_OMAP_IOMMU_DEBUG
#define pr_reg(name) \
do { \
ssize_t bytes; \
const char *str = "%20s: %08x\n"; \
const int maxcol = 32; \
bytes = snprintf(p, maxcol, str, __stringify(name), \
iommu_read_reg(obj, MMU_##name)); \
p += bytes; \
len -= bytes; \
if (len < maxcol) \
goto out; \
} while (0)
static ssize_t
omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len)
{
char *p = buf;
pr_reg(REVISION);
pr_reg(IRQSTATUS);
pr_reg(IRQENABLE);
pr_reg(WALKING_ST);
pr_reg(CNTL);
pr_reg(FAULT_AD);
pr_reg(TTB);
pr_reg(LOCK);
pr_reg(LD_TLB);
pr_reg(CAM);
pr_reg(RAM);
pr_reg(GFLUSH);
pr_reg(FLUSH_ENTRY);
pr_reg(READ_CAM);
pr_reg(READ_RAM);
pr_reg(EMU_FAULT_AD);
out:
return p - buf;
}
ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes) ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes)
{ {
...@@ -432,13 +505,12 @@ ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes) ...@@ -432,13 +505,12 @@ ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes)
pm_runtime_get_sync(obj->dev); pm_runtime_get_sync(obj->dev);
bytes = arch_iommu->dump_ctx(obj, buf, bytes); bytes = omap2_iommu_dump_ctx(obj, buf, bytes);
pm_runtime_put_sync(obj->dev); pm_runtime_put_sync(obj->dev);
return bytes; return bytes;
} }
EXPORT_SYMBOL_GPL(omap_iommu_dump_ctx);
static int static int
__dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num) __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
...@@ -463,6 +535,24 @@ __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num) ...@@ -463,6 +535,24 @@ __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
return p - crs; return p - crs;
} }
/**
* iotlb_dump_cr - Dump an iommu tlb entry into buf
* @obj: target iommu
* @cr: contents of cam and ram register
* @buf: output buffer
**/
static ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr,
char *buf)
{
char *p = buf;
/* FIXME: Need more detail analysis of cam/ram */
p += sprintf(p, "%08x %08x %01x\n", cr->cam, cr->ram,
(cr->cam & MMU_CAM_P) ? 1 : 0);
return p - buf;
}
/** /**
* omap_dump_tlb_entries - dump cr arrays to given buffer * omap_dump_tlb_entries - dump cr arrays to given buffer
* @obj: target iommu * @obj: target iommu
...@@ -488,16 +578,8 @@ size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t bytes) ...@@ -488,16 +578,8 @@ size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t bytes)
return p - buf; return p - buf;
} }
EXPORT_SYMBOL_GPL(omap_dump_tlb_entries);
int omap_foreach_iommu_device(void *data, int (*fn)(struct device *, void *))
{
return driver_for_each_device(&omap_iommu_driver.driver,
NULL, data, fn);
}
EXPORT_SYMBOL_GPL(omap_foreach_iommu_device);
#endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */ #endif /* CONFIG_OMAP_IOMMU_DEBUG */
/* /*
* H/W pagetable operations * H/W pagetable operations
...@@ -680,7 +762,8 @@ iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e) ...@@ -680,7 +762,8 @@ iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e)
* @obj: target iommu * @obj: target iommu
* @e: an iommu tlb entry info * @e: an iommu tlb entry info
**/ **/
int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e) static int
omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e)
{ {
int err; int err;
...@@ -690,7 +773,6 @@ int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e) ...@@ -690,7 +773,6 @@ int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e)
prefetch_iotlb_entry(obj, e); prefetch_iotlb_entry(obj, e);
return err; return err;
} }
EXPORT_SYMBOL_GPL(omap_iopgtable_store_entry);
/** /**
* iopgtable_lookup_entry - Lookup an iommu pte entry * iopgtable_lookup_entry - Lookup an iommu pte entry
...@@ -819,8 +901,9 @@ static irqreturn_t iommu_fault_handler(int irq, void *data) ...@@ -819,8 +901,9 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
u32 *iopgd, *iopte; u32 *iopgd, *iopte;
struct omap_iommu *obj = data; struct omap_iommu *obj = data;
struct iommu_domain *domain = obj->domain; struct iommu_domain *domain = obj->domain;
struct omap_iommu_domain *omap_domain = domain->priv;
if (!obj->refcount) if (!omap_domain->iommu_dev)
return IRQ_NONE; return IRQ_NONE;
errs = iommu_report_fault(obj, &da); errs = iommu_report_fault(obj, &da);
...@@ -880,13 +963,6 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd) ...@@ -880,13 +963,6 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
spin_lock(&obj->iommu_lock); spin_lock(&obj->iommu_lock);
/* an iommu device can only be attached once */
if (++obj->refcount > 1) {
dev_err(dev, "%s: already attached!\n", obj->name);
err = -EBUSY;
goto err_enable;
}
obj->iopgd = iopgd; obj->iopgd = iopgd;
err = iommu_enable(obj); err = iommu_enable(obj);
if (err) if (err)
...@@ -899,7 +975,6 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd) ...@@ -899,7 +975,6 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
return obj; return obj;
err_enable: err_enable:
obj->refcount--;
spin_unlock(&obj->iommu_lock); spin_unlock(&obj->iommu_lock);
return ERR_PTR(err); return ERR_PTR(err);
} }
...@@ -915,9 +990,7 @@ static void omap_iommu_detach(struct omap_iommu *obj) ...@@ -915,9 +990,7 @@ static void omap_iommu_detach(struct omap_iommu *obj)
spin_lock(&obj->iommu_lock); spin_lock(&obj->iommu_lock);
if (--obj->refcount == 0)
iommu_disable(obj); iommu_disable(obj);
obj->iopgd = NULL; obj->iopgd = NULL;
spin_unlock(&obj->iommu_lock); spin_unlock(&obj->iommu_lock);
...@@ -934,7 +1007,7 @@ static int omap_iommu_probe(struct platform_device *pdev) ...@@ -934,7 +1007,7 @@ static int omap_iommu_probe(struct platform_device *pdev)
int irq; int irq;
struct omap_iommu *obj; struct omap_iommu *obj;
struct resource *res; struct resource *res;
struct iommu_platform_data *pdata = pdev->dev.platform_data; struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct device_node *of = pdev->dev.of_node; struct device_node *of = pdev->dev.of_node;
obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
...@@ -981,6 +1054,8 @@ static int omap_iommu_probe(struct platform_device *pdev) ...@@ -981,6 +1054,8 @@ static int omap_iommu_probe(struct platform_device *pdev)
pm_runtime_irq_safe(obj->dev); pm_runtime_irq_safe(obj->dev);
pm_runtime_enable(obj->dev); pm_runtime_enable(obj->dev);
omap_iommu_debugfs_add(obj);
dev_info(&pdev->dev, "%s registered\n", obj->name); dev_info(&pdev->dev, "%s registered\n", obj->name);
return 0; return 0;
} }
...@@ -990,6 +1065,7 @@ static int omap_iommu_remove(struct platform_device *pdev) ...@@ -990,6 +1065,7 @@ static int omap_iommu_remove(struct platform_device *pdev)
struct omap_iommu *obj = platform_get_drvdata(pdev); struct omap_iommu *obj = platform_get_drvdata(pdev);
iopgtable_clear_entry_all(obj); iopgtable_clear_entry_all(obj);
omap_iommu_debugfs_remove(obj);
pm_runtime_disable(obj->dev); pm_runtime_disable(obj->dev);
...@@ -1026,7 +1102,6 @@ static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz) ...@@ -1026,7 +1102,6 @@ static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
e->da = da; e->da = da;
e->pa = pa; e->pa = pa;
e->valid = MMU_CAM_V; e->valid = MMU_CAM_V;
/* FIXME: add OMAP1 support */
e->pgsz = pgsz; e->pgsz = pgsz;
e->endian = MMU_RAM_ENDIAN_LITTLE; e->endian = MMU_RAM_ENDIAN_LITTLE;
e->elsz = MMU_RAM_ELSZ_8; e->elsz = MMU_RAM_ELSZ_8;
...@@ -1131,6 +1206,7 @@ static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain, ...@@ -1131,6 +1206,7 @@ static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
omap_domain->iommu_dev = arch_data->iommu_dev = NULL; omap_domain->iommu_dev = arch_data->iommu_dev = NULL;
omap_domain->dev = NULL; omap_domain->dev = NULL;
oiommu->domain = NULL;
} }
static void omap_iommu_detach_dev(struct iommu_domain *domain, static void omap_iommu_detach_dev(struct iommu_domain *domain,
...@@ -1309,6 +1385,8 @@ static int __init omap_iommu_init(void) ...@@ -1309,6 +1385,8 @@ static int __init omap_iommu_init(void)
bus_set_iommu(&platform_bus_type, &omap_iommu_ops); bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
omap_iommu_debugfs_init();
return platform_driver_register(&omap_iommu_driver); return platform_driver_register(&omap_iommu_driver);
} }
/* must be ready before omap3isp is probed */ /* must be ready before omap3isp is probed */
...@@ -1319,6 +1397,8 @@ static void __exit omap_iommu_exit(void) ...@@ -1319,6 +1397,8 @@ static void __exit omap_iommu_exit(void)
kmem_cache_destroy(iopte_cachep); kmem_cache_destroy(iopte_cachep);
platform_driver_unregister(&omap_iommu_driver); platform_driver_unregister(&omap_iommu_driver);
omap_iommu_debugfs_exit();
} }
module_exit(omap_iommu_exit); module_exit(omap_iommu_exit);
......
...@@ -10,9 +10,8 @@ ...@@ -10,9 +10,8 @@
* published by the Free Software Foundation. * published by the Free Software Foundation.
*/ */
#if defined(CONFIG_ARCH_OMAP1) #ifndef _OMAP_IOMMU_H
#error "iommu for this processor not implemented yet" #define _OMAP_IOMMU_H
#endif
struct iotlb_entry { struct iotlb_entry {
u32 da; u32 da;
...@@ -30,10 +29,9 @@ struct omap_iommu { ...@@ -30,10 +29,9 @@ struct omap_iommu {
const char *name; const char *name;
void __iomem *regbase; void __iomem *regbase;
struct device *dev; struct device *dev;
void *isr_priv;
struct iommu_domain *domain; struct iommu_domain *domain;
struct dentry *debug_dir;
unsigned int refcount;
spinlock_t iommu_lock; /* global for this whole object */ spinlock_t iommu_lock; /* global for this whole object */
/* /*
...@@ -67,34 +65,6 @@ struct cr_regs { ...@@ -67,34 +65,6 @@ struct cr_regs {
}; };
}; };
/* architecture specific functions */
struct iommu_functions {
unsigned long version;
int (*enable)(struct omap_iommu *obj);
void (*disable)(struct omap_iommu *obj);
void (*set_twl)(struct omap_iommu *obj, bool on);
u32 (*fault_isr)(struct omap_iommu *obj, u32 *ra);
void (*tlb_read_cr)(struct omap_iommu *obj, struct cr_regs *cr);
void (*tlb_load_cr)(struct omap_iommu *obj, struct cr_regs *cr);
struct cr_regs *(*alloc_cr)(struct omap_iommu *obj,
struct iotlb_entry *e);
int (*cr_valid)(struct cr_regs *cr);
u32 (*cr_to_virt)(struct cr_regs *cr);
void (*cr_to_e)(struct cr_regs *cr, struct iotlb_entry *e);
ssize_t (*dump_cr)(struct omap_iommu *obj, struct cr_regs *cr,
char *buf);
u32 (*get_pte_attr)(struct iotlb_entry *e);
void (*save_ctx)(struct omap_iommu *obj);
void (*restore_ctx)(struct omap_iommu *obj);
ssize_t (*dump_ctx)(struct omap_iommu *obj, char *buf, ssize_t len);
};
#ifdef CONFIG_IOMMU_API
/** /**
* dev_to_omap_iommu() - retrieves an omap iommu object from a user device * dev_to_omap_iommu() - retrieves an omap iommu object from a user device
* @dev: iommu client device * @dev: iommu client device
...@@ -105,7 +75,6 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) ...@@ -105,7 +75,6 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
return arch_data->iommu_dev; return arch_data->iommu_dev;
} }
#endif
/* /*
* MMU Register offsets * MMU Register offsets
...@@ -133,6 +102,28 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) ...@@ -133,6 +102,28 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
/* /*
* MMU Register bit definitions * MMU Register bit definitions
*/ */
/* IRQSTATUS & IRQENABLE */
#define MMU_IRQ_MULTIHITFAULT (1 << 4)
#define MMU_IRQ_TABLEWALKFAULT (1 << 3)
#define MMU_IRQ_EMUMISS (1 << 2)
#define MMU_IRQ_TRANSLATIONFAULT (1 << 1)
#define MMU_IRQ_TLBMISS (1 << 0)
#define __MMU_IRQ_FAULT \
(MMU_IRQ_MULTIHITFAULT | MMU_IRQ_EMUMISS | MMU_IRQ_TRANSLATIONFAULT)
#define MMU_IRQ_MASK \
(__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT | MMU_IRQ_TLBMISS)
#define MMU_IRQ_TWL_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT)
#define MMU_IRQ_TLB_MISS_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TLBMISS)
/* MMU_CNTL */
#define MMU_CNTL_SHIFT 1
#define MMU_CNTL_MASK (7 << MMU_CNTL_SHIFT)
#define MMU_CNTL_EML_TLB (1 << 3)
#define MMU_CNTL_TWL_EN (1 << 2)
#define MMU_CNTL_MMU_EN (1 << 1)
/* CAM */
#define MMU_CAM_VATAG_SHIFT 12 #define MMU_CAM_VATAG_SHIFT 12
#define MMU_CAM_VATAG_MASK \ #define MMU_CAM_VATAG_MASK \
((~0UL >> MMU_CAM_VATAG_SHIFT) << MMU_CAM_VATAG_SHIFT) ((~0UL >> MMU_CAM_VATAG_SHIFT) << MMU_CAM_VATAG_SHIFT)
...@@ -144,6 +135,7 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) ...@@ -144,6 +135,7 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
#define MMU_CAM_PGSZ_4K (2 << 0) #define MMU_CAM_PGSZ_4K (2 << 0)
#define MMU_CAM_PGSZ_16M (3 << 0) #define MMU_CAM_PGSZ_16M (3 << 0)
/* RAM */
#define MMU_RAM_PADDR_SHIFT 12 #define MMU_RAM_PADDR_SHIFT 12
#define MMU_RAM_PADDR_MASK \ #define MMU_RAM_PADDR_MASK \
((~0UL >> MMU_RAM_PADDR_SHIFT) << MMU_RAM_PADDR_SHIFT) ((~0UL >> MMU_RAM_PADDR_SHIFT) << MMU_RAM_PADDR_SHIFT)
...@@ -165,6 +157,12 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) ...@@ -165,6 +157,12 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
#define MMU_GP_REG_BUS_ERR_BACK_EN 0x1 #define MMU_GP_REG_BUS_ERR_BACK_EN 0x1
#define get_cam_va_mask(pgsz) \
(((pgsz) == MMU_CAM_PGSZ_16M) ? 0xff000000 : \
((pgsz) == MMU_CAM_PGSZ_1M) ? 0xfff00000 : \
((pgsz) == MMU_CAM_PGSZ_64K) ? 0xffff0000 : \
((pgsz) == MMU_CAM_PGSZ_4K) ? 0xfffff000 : 0)
/* /*
* utilities for super page(16MB, 1MB, 64KB and 4KB) * utilities for super page(16MB, 1MB, 64KB and 4KB)
*/ */
...@@ -192,27 +190,25 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) ...@@ -192,27 +190,25 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
/* /*
* global functions * global functions
*/ */
extern u32 omap_iommu_arch_version(void); #ifdef CONFIG_OMAP_IOMMU_DEBUG
extern void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e);
extern int
omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e);
extern void omap_iommu_save_ctx(struct device *dev);
extern void omap_iommu_restore_ctx(struct device *dev);
extern int omap_foreach_iommu_device(void *data,
int (*fn)(struct device *, void *));
extern int omap_install_iommu_arch(const struct iommu_functions *ops);
extern void omap_uninstall_iommu_arch(const struct iommu_functions *ops);
extern ssize_t extern ssize_t
omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len); omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len);
extern size_t extern size_t
omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t len); omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t len);
void omap_iommu_debugfs_init(void);
void omap_iommu_debugfs_exit(void);
void omap_iommu_debugfs_add(struct omap_iommu *obj);
void omap_iommu_debugfs_remove(struct omap_iommu *obj);
#else
static inline void omap_iommu_debugfs_init(void) { }
static inline void omap_iommu_debugfs_exit(void) { }
static inline void omap_iommu_debugfs_add(struct omap_iommu *obj) { }
static inline void omap_iommu_debugfs_remove(struct omap_iommu *obj) { }
#endif
/* /*
* register accessors * register accessors
*/ */
...@@ -225,3 +221,5 @@ static inline void iommu_write_reg(struct omap_iommu *obj, u32 val, size_t offs) ...@@ -225,3 +221,5 @@ static inline void iommu_write_reg(struct omap_iommu *obj, u32 val, size_t offs)
{ {
__raw_writel(val, obj->regbase + offs); __raw_writel(val, obj->regbase + offs);
} }
#endif /* _OMAP_IOMMU_H */
/*
* omap iommu: omap2/3 architecture specific functions
*
* Copyright (C) 2008-2009 Nokia Corporation
*
* Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
* Paul Mundt and Toshihiro Kobayashi
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/err.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/omap-iommu.h>
#include <linux/slab.h>
#include <linux/stringify.h>
#include <linux/platform_data/iommu-omap.h>
#include "omap-iommu.h"
/*
* omap2 architecture specific register bit definitions
*/
#define IOMMU_ARCH_VERSION 0x00000011
/* IRQSTATUS & IRQENABLE */
#define MMU_IRQ_MULTIHITFAULT (1 << 4)
#define MMU_IRQ_TABLEWALKFAULT (1 << 3)
#define MMU_IRQ_EMUMISS (1 << 2)
#define MMU_IRQ_TRANSLATIONFAULT (1 << 1)
#define MMU_IRQ_TLBMISS (1 << 0)
#define __MMU_IRQ_FAULT \
(MMU_IRQ_MULTIHITFAULT | MMU_IRQ_EMUMISS | MMU_IRQ_TRANSLATIONFAULT)
#define MMU_IRQ_MASK \
(__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT | MMU_IRQ_TLBMISS)
#define MMU_IRQ_TWL_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT)
#define MMU_IRQ_TLB_MISS_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TLBMISS)
/* MMU_CNTL */
#define MMU_CNTL_SHIFT 1
#define MMU_CNTL_MASK (7 << MMU_CNTL_SHIFT)
#define MMU_CNTL_EML_TLB (1 << 3)
#define MMU_CNTL_TWL_EN (1 << 2)
#define MMU_CNTL_MMU_EN (1 << 1)
#define get_cam_va_mask(pgsz) \
(((pgsz) == MMU_CAM_PGSZ_16M) ? 0xff000000 : \
((pgsz) == MMU_CAM_PGSZ_1M) ? 0xfff00000 : \
((pgsz) == MMU_CAM_PGSZ_64K) ? 0xffff0000 : \
((pgsz) == MMU_CAM_PGSZ_4K) ? 0xfffff000 : 0)
/* IOMMU errors */
#define OMAP_IOMMU_ERR_TLB_MISS (1 << 0)
#define OMAP_IOMMU_ERR_TRANS_FAULT (1 << 1)
#define OMAP_IOMMU_ERR_EMU_MISS (1 << 2)
#define OMAP_IOMMU_ERR_TBLWALK_FAULT (1 << 3)
#define OMAP_IOMMU_ERR_MULTIHIT_FAULT (1 << 4)
static void __iommu_set_twl(struct omap_iommu *obj, bool on)
{
u32 l = iommu_read_reg(obj, MMU_CNTL);
if (on)
iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE);
else
iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE);
l &= ~MMU_CNTL_MASK;
if (on)
l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN);
else
l |= (MMU_CNTL_MMU_EN);
iommu_write_reg(obj, l, MMU_CNTL);
}
static int omap2_iommu_enable(struct omap_iommu *obj)
{
u32 l, pa;
if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K))
return -EINVAL;
pa = virt_to_phys(obj->iopgd);
if (!IS_ALIGNED(pa, SZ_16K))
return -EINVAL;
l = iommu_read_reg(obj, MMU_REVISION);
dev_info(obj->dev, "%s: version %d.%d\n", obj->name,
(l >> 4) & 0xf, l & 0xf);
iommu_write_reg(obj, pa, MMU_TTB);
if (obj->has_bus_err_back)
iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG);
__iommu_set_twl(obj, true);
return 0;
}
static void omap2_iommu_disable(struct omap_iommu *obj)
{
u32 l = iommu_read_reg(obj, MMU_CNTL);
l &= ~MMU_CNTL_MASK;
iommu_write_reg(obj, l, MMU_CNTL);
dev_dbg(obj->dev, "%s is shutting down\n", obj->name);
}
static void omap2_iommu_set_twl(struct omap_iommu *obj, bool on)
{
__iommu_set_twl(obj, false);
}
static u32 omap2_iommu_fault_isr(struct omap_iommu *obj, u32 *ra)
{
u32 stat, da;
u32 errs = 0;
stat = iommu_read_reg(obj, MMU_IRQSTATUS);
stat &= MMU_IRQ_MASK;
if (!stat) {
*ra = 0;
return 0;
}
da = iommu_read_reg(obj, MMU_FAULT_AD);
*ra = da;
if (stat & MMU_IRQ_TLBMISS)
errs |= OMAP_IOMMU_ERR_TLB_MISS;
if (stat & MMU_IRQ_TRANSLATIONFAULT)
errs |= OMAP_IOMMU_ERR_TRANS_FAULT;
if (stat & MMU_IRQ_EMUMISS)
errs |= OMAP_IOMMU_ERR_EMU_MISS;
if (stat & MMU_IRQ_TABLEWALKFAULT)
errs |= OMAP_IOMMU_ERR_TBLWALK_FAULT;
if (stat & MMU_IRQ_MULTIHITFAULT)
errs |= OMAP_IOMMU_ERR_MULTIHIT_FAULT;
iommu_write_reg(obj, stat, MMU_IRQSTATUS);
return errs;
}
static void omap2_tlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr)
{
cr->cam = iommu_read_reg(obj, MMU_READ_CAM);
cr->ram = iommu_read_reg(obj, MMU_READ_RAM);
}
static void omap2_tlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
{
iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM);
iommu_write_reg(obj, cr->ram, MMU_RAM);
}
static u32 omap2_cr_to_virt(struct cr_regs *cr)
{
u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK;
u32 mask = get_cam_va_mask(cr->cam & page_size);
return cr->cam & mask;
}
static struct cr_regs *omap2_alloc_cr(struct omap_iommu *obj,
struct iotlb_entry *e)
{
struct cr_regs *cr;
if (e->da & ~(get_cam_va_mask(e->pgsz))) {
dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__,
e->da);
return ERR_PTR(-EINVAL);
}
cr = kmalloc(sizeof(*cr), GFP_KERNEL);
if (!cr)
return ERR_PTR(-ENOMEM);
cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid;
cr->ram = e->pa | e->endian | e->elsz | e->mixed;
return cr;
}
static inline int omap2_cr_valid(struct cr_regs *cr)
{
return cr->cam & MMU_CAM_V;
}
static u32 omap2_get_pte_attr(struct iotlb_entry *e)
{
u32 attr;
attr = e->mixed << 5;
attr |= e->endian;
attr |= e->elsz >> 3;
attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) ||
(e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6);
return attr;
}
static ssize_t
omap2_dump_cr(struct omap_iommu *obj, struct cr_regs *cr, char *buf)
{
char *p = buf;
/* FIXME: Need more detail analysis of cam/ram */
p += sprintf(p, "%08x %08x %01x\n", cr->cam, cr->ram,
(cr->cam & MMU_CAM_P) ? 1 : 0);
return p - buf;
}
#define pr_reg(name) \
do { \
ssize_t bytes; \
const char *str = "%20s: %08x\n"; \
const int maxcol = 32; \
bytes = snprintf(p, maxcol, str, __stringify(name), \
iommu_read_reg(obj, MMU_##name)); \
p += bytes; \
len -= bytes; \
if (len < maxcol) \
goto out; \
} while (0)
static ssize_t
omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len)
{
char *p = buf;
pr_reg(REVISION);
pr_reg(IRQSTATUS);
pr_reg(IRQENABLE);
pr_reg(WALKING_ST);
pr_reg(CNTL);
pr_reg(FAULT_AD);
pr_reg(TTB);
pr_reg(LOCK);
pr_reg(LD_TLB);
pr_reg(CAM);
pr_reg(RAM);
pr_reg(GFLUSH);
pr_reg(FLUSH_ENTRY);
pr_reg(READ_CAM);
pr_reg(READ_RAM);
pr_reg(EMU_FAULT_AD);
out:
return p - buf;
}
static void omap2_iommu_save_ctx(struct omap_iommu *obj)
{
int i;
u32 *p = obj->ctx;
for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
p[i] = iommu_read_reg(obj, i * sizeof(u32));
dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
}
BUG_ON(p[0] != IOMMU_ARCH_VERSION);
}
static void omap2_iommu_restore_ctx(struct omap_iommu *obj)
{
int i;
u32 *p = obj->ctx;
for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
iommu_write_reg(obj, p[i], i * sizeof(u32));
dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
}
BUG_ON(p[0] != IOMMU_ARCH_VERSION);
}
static void omap2_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e)
{
e->da = cr->cam & MMU_CAM_VATAG_MASK;
e->pa = cr->ram & MMU_RAM_PADDR_MASK;
e->valid = cr->cam & MMU_CAM_V;
e->pgsz = cr->cam & MMU_CAM_PGSZ_MASK;
e->endian = cr->ram & MMU_RAM_ENDIAN_MASK;
e->elsz = cr->ram & MMU_RAM_ELSZ_MASK;
e->mixed = cr->ram & MMU_RAM_MIXED;
}
static const struct iommu_functions omap2_iommu_ops = {
.version = IOMMU_ARCH_VERSION,
.enable = omap2_iommu_enable,
.disable = omap2_iommu_disable,
.set_twl = omap2_iommu_set_twl,
.fault_isr = omap2_iommu_fault_isr,
.tlb_read_cr = omap2_tlb_read_cr,
.tlb_load_cr = omap2_tlb_load_cr,
.cr_to_e = omap2_cr_to_e,
.cr_to_virt = omap2_cr_to_virt,
.alloc_cr = omap2_alloc_cr,
.cr_valid = omap2_cr_valid,
.dump_cr = omap2_dump_cr,
.get_pte_attr = omap2_get_pte_attr,
.save_ctx = omap2_iommu_save_ctx,
.restore_ctx = omap2_iommu_restore_ctx,
.dump_ctx = omap2_iommu_dump_ctx,
};
static int __init omap2_iommu_init(void)
{
return omap_install_iommu_arch(&omap2_iommu_ops);
}
module_init(omap2_iommu_init);
static void __exit omap2_iommu_exit(void)
{
omap_uninstall_iommu_arch(&omap2_iommu_ops);
}
module_exit(omap2_iommu_exit);
MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
MODULE_DESCRIPTION("omap iommu: omap2/3 architecture specific functions");
MODULE_LICENSE("GPL v2");
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <asm/cacheflush.h>
#include <asm/pgtable.h>
#include <linux/compiler.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iommu.h>
#include <linux/jiffies.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
/** MMU register offsets */
#define RK_MMU_DTE_ADDR 0x00 /* Directory table address */
#define RK_MMU_STATUS 0x04
#define RK_MMU_COMMAND 0x08
#define RK_MMU_PAGE_FAULT_ADDR 0x0C /* IOVA of last page fault */
#define RK_MMU_ZAP_ONE_LINE 0x10 /* Shootdown one IOTLB entry */
#define RK_MMU_INT_RAWSTAT 0x14 /* IRQ status ignoring mask */
#define RK_MMU_INT_CLEAR 0x18 /* Acknowledge and re-arm irq */
#define RK_MMU_INT_MASK 0x1C /* IRQ enable */
#define RK_MMU_INT_STATUS 0x20 /* IRQ status after masking */
#define RK_MMU_AUTO_GATING 0x24
#define DTE_ADDR_DUMMY 0xCAFEBABE
#define FORCE_RESET_TIMEOUT 100 /* ms */
/* RK_MMU_STATUS fields */
#define RK_MMU_STATUS_PAGING_ENABLED BIT(0)
#define RK_MMU_STATUS_PAGE_FAULT_ACTIVE BIT(1)
#define RK_MMU_STATUS_STALL_ACTIVE BIT(2)
#define RK_MMU_STATUS_IDLE BIT(3)
#define RK_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4)
#define RK_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5)
#define RK_MMU_STATUS_STALL_NOT_ACTIVE BIT(31)
/* RK_MMU_COMMAND command values */
#define RK_MMU_CMD_ENABLE_PAGING 0 /* Enable memory translation */
#define RK_MMU_CMD_DISABLE_PAGING 1 /* Disable memory translation */
#define RK_MMU_CMD_ENABLE_STALL 2 /* Stall paging to allow other cmds */
#define RK_MMU_CMD_DISABLE_STALL 3 /* Stop stall re-enables paging */
#define RK_MMU_CMD_ZAP_CACHE 4 /* Shoot down entire IOTLB */
#define RK_MMU_CMD_PAGE_FAULT_DONE 5 /* Clear page fault */
#define RK_MMU_CMD_FORCE_RESET 6 /* Reset all registers */
/* RK_MMU_INT_* register fields */
#define RK_MMU_IRQ_PAGE_FAULT 0x01 /* page fault */
#define RK_MMU_IRQ_BUS_ERROR 0x02 /* bus read error */
#define RK_MMU_IRQ_MASK (RK_MMU_IRQ_PAGE_FAULT | RK_MMU_IRQ_BUS_ERROR)
#define NUM_DT_ENTRIES 1024
#define NUM_PT_ENTRIES 1024
#define SPAGE_ORDER 12
#define SPAGE_SIZE (1 << SPAGE_ORDER)
/*
* Support mapping any size that fits in one page table:
* 4 KiB to 4 MiB
*/
#define RK_IOMMU_PGSIZE_BITMAP 0x007ff000
#define IOMMU_REG_POLL_COUNT_FAST 1000
struct rk_iommu_domain {
struct list_head iommus;
u32 *dt; /* page directory table */
spinlock_t iommus_lock; /* lock for iommus list */
spinlock_t dt_lock; /* lock for modifying page directory table */
};
struct rk_iommu {
struct device *dev;
void __iomem *base;
int irq;
struct list_head node; /* entry in rk_iommu_domain.iommus */
struct iommu_domain *domain; /* domain to which iommu is attached */
};
static inline void rk_table_flush(u32 *va, unsigned int count)
{
phys_addr_t pa_start = virt_to_phys(va);
phys_addr_t pa_end = virt_to_phys(va + count);
size_t size = pa_end - pa_start;
__cpuc_flush_dcache_area(va, size);
outer_flush_range(pa_start, pa_end);
}
/**
* Inspired by _wait_for in intel_drv.h
* This is NOT safe for use in interrupt context.
*
* Note that it's important that we check the condition again after having
* timed out, since the timeout could be due to preemption or similar and
* we've never had a chance to check the condition before the timeout.
*/
#define rk_wait_for(COND, MS) ({ \
unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
int ret__ = 0; \
while (!(COND)) { \
if (time_after(jiffies, timeout__)) { \
ret__ = (COND) ? 0 : -ETIMEDOUT; \
break; \
} \
usleep_range(50, 100); \
} \
ret__; \
})
/*
* The Rockchip rk3288 iommu uses a 2-level page table.
* The first level is the "Directory Table" (DT).
* The DT consists of 1024 4-byte Directory Table Entries (DTEs), each pointing
* to a "Page Table".
* The second level is the 1024 Page Tables (PT).
* Each PT consists of 1024 4-byte Page Table Entries (PTEs), each pointing to
* a 4 KB page of physical memory.
*
* The DT and each PT fits in a single 4 KB page (4-bytes * 1024 entries).
* Each iommu device has a MMU_DTE_ADDR register that contains the physical
* address of the start of the DT page.
*
* The structure of the page table is as follows:
*
* DT
* MMU_DTE_ADDR -> +-----+
* | |
* +-----+ PT
* | DTE | -> +-----+
* +-----+ | | Memory
* | | +-----+ Page
* | | | PTE | -> +-----+
* +-----+ +-----+ | |
* | | | |
* | | | |
* +-----+ | |
* | |
* | |
* +-----+
*/
/*
* Each DTE has a PT address and a valid bit:
* +---------------------+-----------+-+
* | PT address | Reserved |V|
* +---------------------+-----------+-+
* 31:12 - PT address (PTs always starts on a 4 KB boundary)
* 11: 1 - Reserved
* 0 - 1 if PT @ PT address is valid
*/
#define RK_DTE_PT_ADDRESS_MASK 0xfffff000
#define RK_DTE_PT_VALID BIT(0)
static inline phys_addr_t rk_dte_pt_address(u32 dte)
{
return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK;
}
static inline bool rk_dte_is_pt_valid(u32 dte)
{
return dte & RK_DTE_PT_VALID;
}
static u32 rk_mk_dte(u32 *pt)
{
phys_addr_t pt_phys = virt_to_phys(pt);
return (pt_phys & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID;
}
/*
* Each PTE has a Page address, some flags and a valid bit:
* +---------------------+---+-------+-+
* | Page address |Rsv| Flags |V|
* +---------------------+---+-------+-+
* 31:12 - Page address (Pages always start on a 4 KB boundary)
* 11: 9 - Reserved
* 8: 1 - Flags
* 8 - Read allocate - allocate cache space on read misses
* 7 - Read cache - enable cache & prefetch of data
* 6 - Write buffer - enable delaying writes on their way to memory
* 5 - Write allocate - allocate cache space on write misses
* 4 - Write cache - different writes can be merged together
* 3 - Override cache attributes
* if 1, bits 4-8 control cache attributes
* if 0, the system bus defaults are used
* 2 - Writable
* 1 - Readable
* 0 - 1 if Page @ Page address is valid
*/
#define RK_PTE_PAGE_ADDRESS_MASK 0xfffff000
#define RK_PTE_PAGE_FLAGS_MASK 0x000001fe
#define RK_PTE_PAGE_WRITABLE BIT(2)
#define RK_PTE_PAGE_READABLE BIT(1)
#define RK_PTE_PAGE_VALID BIT(0)
static inline phys_addr_t rk_pte_page_address(u32 pte)
{
return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK;
}
static inline bool rk_pte_is_page_valid(u32 pte)
{
return pte & RK_PTE_PAGE_VALID;
}
/* TODO: set cache flags per prot IOMMU_CACHE */
static u32 rk_mk_pte(phys_addr_t page, int prot)
{
u32 flags = 0;
flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0;
flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0;
page &= RK_PTE_PAGE_ADDRESS_MASK;
return page | flags | RK_PTE_PAGE_VALID;
}
static u32 rk_mk_pte_invalid(u32 pte)
{
return pte & ~RK_PTE_PAGE_VALID;
}
/*
* rk3288 iova (IOMMU Virtual Address) format
* 31 22.21 12.11 0
* +-----------+-----------+-------------+
* | DTE index | PTE index | Page offset |
* +-----------+-----------+-------------+
* 31:22 - DTE index - index of DTE in DT
* 21:12 - PTE index - index of PTE in PT @ DTE.pt_address
* 11: 0 - Page offset - offset into page @ PTE.page_address
*/
#define RK_IOVA_DTE_MASK 0xffc00000
#define RK_IOVA_DTE_SHIFT 22
#define RK_IOVA_PTE_MASK 0x003ff000
#define RK_IOVA_PTE_SHIFT 12
#define RK_IOVA_PAGE_MASK 0x00000fff
#define RK_IOVA_PAGE_SHIFT 0
static u32 rk_iova_dte_index(dma_addr_t iova)
{
return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT;
}
static u32 rk_iova_pte_index(dma_addr_t iova)
{
return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT;
}
static u32 rk_iova_page_offset(dma_addr_t iova)
{
return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT;
}
static u32 rk_iommu_read(struct rk_iommu *iommu, u32 offset)
{
return readl(iommu->base + offset);
}
static void rk_iommu_write(struct rk_iommu *iommu, u32 offset, u32 value)
{
writel(value, iommu->base + offset);
}
static void rk_iommu_command(struct rk_iommu *iommu, u32 command)
{
writel(command, iommu->base + RK_MMU_COMMAND);
}
static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova,
size_t size)
{
dma_addr_t iova_end = iova + size;
/*
* TODO(djkurtz): Figure out when it is more efficient to shootdown the
* entire iotlb rather than iterate over individual iovas.
*/
for (; iova < iova_end; iova += SPAGE_SIZE)
rk_iommu_write(iommu, RK_MMU_ZAP_ONE_LINE, iova);
}
static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
{
return rk_iommu_read(iommu, RK_MMU_STATUS) & RK_MMU_STATUS_STALL_ACTIVE;
}
static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
{
return rk_iommu_read(iommu, RK_MMU_STATUS) &
RK_MMU_STATUS_PAGING_ENABLED;
}
static int rk_iommu_enable_stall(struct rk_iommu *iommu)
{
int ret;
if (rk_iommu_is_stall_active(iommu))
return 0;
/* Stall can only be enabled if paging is enabled */
if (!rk_iommu_is_paging_enabled(iommu))
return 0;
rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL);
ret = rk_wait_for(rk_iommu_is_stall_active(iommu), 1);
if (ret)
dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
rk_iommu_read(iommu, RK_MMU_STATUS));
return ret;
}
static int rk_iommu_disable_stall(struct rk_iommu *iommu)
{
int ret;
if (!rk_iommu_is_stall_active(iommu))
return 0;
rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL);
ret = rk_wait_for(!rk_iommu_is_stall_active(iommu), 1);
if (ret)
dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
rk_iommu_read(iommu, RK_MMU_STATUS));
return ret;
}
static int rk_iommu_enable_paging(struct rk_iommu *iommu)
{
int ret;
if (rk_iommu_is_paging_enabled(iommu))
return 0;
rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING);
ret = rk_wait_for(rk_iommu_is_paging_enabled(iommu), 1);
if (ret)
dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
rk_iommu_read(iommu, RK_MMU_STATUS));
return ret;
}
static int rk_iommu_disable_paging(struct rk_iommu *iommu)
{
int ret;
if (!rk_iommu_is_paging_enabled(iommu))
return 0;
rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING);
ret = rk_wait_for(!rk_iommu_is_paging_enabled(iommu), 1);
if (ret)
dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
rk_iommu_read(iommu, RK_MMU_STATUS));
return ret;
}
static int rk_iommu_force_reset(struct rk_iommu *iommu)
{
int ret;
u32 dte_addr;
/*
* Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY
* and verifying that upper 5 nybbles are read back.
*/
rk_iommu_write(iommu, RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
dte_addr = rk_iommu_read(iommu, RK_MMU_DTE_ADDR);
if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) {
dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
return -EFAULT;
}
rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
ret = rk_wait_for(rk_iommu_read(iommu, RK_MMU_DTE_ADDR) == 0x00000000,
FORCE_RESET_TIMEOUT);
if (ret)
dev_err(iommu->dev, "FORCE_RESET command timed out\n");
return ret;
}
static void log_iova(struct rk_iommu *iommu, dma_addr_t iova)
{
u32 dte_index, pte_index, page_offset;
u32 mmu_dte_addr;
phys_addr_t mmu_dte_addr_phys, dte_addr_phys;
u32 *dte_addr;
u32 dte;
phys_addr_t pte_addr_phys = 0;
u32 *pte_addr = NULL;
u32 pte = 0;
phys_addr_t page_addr_phys = 0;
u32 page_flags = 0;
dte_index = rk_iova_dte_index(iova);
pte_index = rk_iova_pte_index(iova);
page_offset = rk_iova_page_offset(iova);
mmu_dte_addr = rk_iommu_read(iommu, RK_MMU_DTE_ADDR);
mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr;
dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
dte_addr = phys_to_virt(dte_addr_phys);
dte = *dte_addr;
if (!rk_dte_is_pt_valid(dte))
goto print_it;
pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4);
pte_addr = phys_to_virt(pte_addr_phys);
pte = *pte_addr;
if (!rk_pte_is_page_valid(pte))
goto print_it;
page_addr_phys = rk_pte_page_address(pte) + page_offset;
page_flags = pte & RK_PTE_PAGE_FLAGS_MASK;
print_it:
dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n",
&iova, dte_index, pte_index, page_offset);
dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n",
&mmu_dte_addr_phys, &dte_addr_phys, dte,
rk_dte_is_pt_valid(dte), &pte_addr_phys, pte,
rk_pte_is_page_valid(pte), &page_addr_phys, page_flags);
}
static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
{
struct rk_iommu *iommu = dev_id;
u32 status;
u32 int_status;
dma_addr_t iova;
int_status = rk_iommu_read(iommu, RK_MMU_INT_STATUS);
if (int_status == 0)
return IRQ_NONE;
iova = rk_iommu_read(iommu, RK_MMU_PAGE_FAULT_ADDR);
if (int_status & RK_MMU_IRQ_PAGE_FAULT) {
int flags;
status = rk_iommu_read(iommu, RK_MMU_STATUS);
flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ?
IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
dev_err(iommu->dev, "Page fault at %pad of type %s\n",
&iova,
(flags == IOMMU_FAULT_WRITE) ? "write" : "read");
log_iova(iommu, iova);
/*
* Report page fault to any installed handlers.
* Ignore the return code, though, since we always zap cache
* and clear the page fault anyway.
*/
if (iommu->domain)
report_iommu_fault(iommu->domain, iommu->dev, iova,
flags);
else
dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
rk_iommu_command(iommu, RK_MMU_CMD_ZAP_CACHE);
rk_iommu_command(iommu, RK_MMU_CMD_PAGE_FAULT_DONE);
}
if (int_status & RK_MMU_IRQ_BUS_ERROR)
dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
if (int_status & ~RK_MMU_IRQ_MASK)
dev_err(iommu->dev, "unexpected int_status: %#08x\n",
int_status);
rk_iommu_write(iommu, RK_MMU_INT_CLEAR, int_status);
return IRQ_HANDLED;
}
static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
struct rk_iommu_domain *rk_domain = domain->priv;
unsigned long flags;
phys_addr_t pt_phys, phys = 0;
u32 dte, pte;
u32 *page_table;
spin_lock_irqsave(&rk_domain->dt_lock, flags);
dte = rk_domain->dt[rk_iova_dte_index(iova)];
if (!rk_dte_is_pt_valid(dte))
goto out;
pt_phys = rk_dte_pt_address(dte);
page_table = (u32 *)phys_to_virt(pt_phys);
pte = page_table[rk_iova_pte_index(iova)];
if (!rk_pte_is_page_valid(pte))
goto out;
phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova);
out:
spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
return phys;
}
static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
dma_addr_t iova, size_t size)
{
struct list_head *pos;
unsigned long flags;
/* shootdown these iova from all iommus using this domain */
spin_lock_irqsave(&rk_domain->iommus_lock, flags);
list_for_each(pos, &rk_domain->iommus) {
struct rk_iommu *iommu;
iommu = list_entry(pos, struct rk_iommu, node);
rk_iommu_zap_lines(iommu, iova, size);
}
spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
}
static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
dma_addr_t iova)
{
u32 *page_table, *dte_addr;
u32 dte;
phys_addr_t pt_phys;
assert_spin_locked(&rk_domain->dt_lock);
dte_addr = &rk_domain->dt[rk_iova_dte_index(iova)];
dte = *dte_addr;
if (rk_dte_is_pt_valid(dte))
goto done;
page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
if (!page_table)
return ERR_PTR(-ENOMEM);
dte = rk_mk_dte(page_table);
*dte_addr = dte;
rk_table_flush(page_table, NUM_PT_ENTRIES);
rk_table_flush(dte_addr, 1);
/*
* Zap the first iova of newly allocated page table so iommu evicts
* old cached value of new dte from the iotlb.
*/
rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE);
done:
pt_phys = rk_dte_pt_address(dte);
return (u32 *)phys_to_virt(pt_phys);
}
static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain,
u32 *pte_addr, dma_addr_t iova, size_t size)
{
unsigned int pte_count;
unsigned int pte_total = size / SPAGE_SIZE;
assert_spin_locked(&rk_domain->dt_lock);
for (pte_count = 0; pte_count < pte_total; pte_count++) {
u32 pte = pte_addr[pte_count];
if (!rk_pte_is_page_valid(pte))
break;
pte_addr[pte_count] = rk_mk_pte_invalid(pte);
}
rk_table_flush(pte_addr, pte_count);
return pte_count * SPAGE_SIZE;
}
static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
dma_addr_t iova, phys_addr_t paddr, size_t size,
int prot)
{
unsigned int pte_count;
unsigned int pte_total = size / SPAGE_SIZE;
phys_addr_t page_phys;
assert_spin_locked(&rk_domain->dt_lock);
for (pte_count = 0; pte_count < pte_total; pte_count++) {
u32 pte = pte_addr[pte_count];
if (rk_pte_is_page_valid(pte))
goto unwind;
pte_addr[pte_count] = rk_mk_pte(paddr, prot);
paddr += SPAGE_SIZE;
}
rk_table_flush(pte_addr, pte_count);
return 0;
unwind:
/* Unmap the range of iovas that we just mapped */
rk_iommu_unmap_iova(rk_domain, pte_addr, iova, pte_count * SPAGE_SIZE);
iova += pte_count * SPAGE_SIZE;
page_phys = rk_pte_page_address(pte_addr[pte_count]);
pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n",
&iova, &page_phys, &paddr, prot);
return -EADDRINUSE;
}
static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
phys_addr_t paddr, size_t size, int prot)
{
struct rk_iommu_domain *rk_domain = domain->priv;
unsigned long flags;
dma_addr_t iova = (dma_addr_t)_iova;
u32 *page_table, *pte_addr;
int ret;
spin_lock_irqsave(&rk_domain->dt_lock, flags);
/*
* pgsize_bitmap specifies iova sizes that fit in one page table
* (1024 4-KiB pages = 4 MiB).
* So, size will always be 4096 <= size <= 4194304.
* Since iommu_map() guarantees that both iova and size will be
* aligned, we will always only be mapping from a single dte here.
*/
page_table = rk_dte_get_page_table(rk_domain, iova);
if (IS_ERR(page_table)) {
spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
return PTR_ERR(page_table);
}
pte_addr = &page_table[rk_iova_pte_index(iova)];
ret = rk_iommu_map_iova(rk_domain, pte_addr, iova, paddr, size, prot);
spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
return ret;
}
static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
size_t size)
{
struct rk_iommu_domain *rk_domain = domain->priv;
unsigned long flags;
dma_addr_t iova = (dma_addr_t)_iova;
phys_addr_t pt_phys;
u32 dte;
u32 *pte_addr;
size_t unmap_size;
spin_lock_irqsave(&rk_domain->dt_lock, flags);
/*
* pgsize_bitmap specifies iova sizes that fit in one page table
* (1024 4-KiB pages = 4 MiB).
* So, size will always be 4096 <= size <= 4194304.
* Since iommu_unmap() guarantees that both iova and size will be
* aligned, we will always only be unmapping from a single dte here.
*/
dte = rk_domain->dt[rk_iova_dte_index(iova)];
/* Just return 0 if iova is unmapped */
if (!rk_dte_is_pt_valid(dte)) {
spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
return 0;
}
pt_phys = rk_dte_pt_address(dte);
pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, iova, size);
spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
/* Shootdown iotlb entries for iova range that was just unmapped */
rk_iommu_zap_iova(rk_domain, iova, unmap_size);
return unmap_size;
}
static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
{
struct iommu_group *group;
struct device *iommu_dev;
struct rk_iommu *rk_iommu;
group = iommu_group_get(dev);
if (!group)
return NULL;
iommu_dev = iommu_group_get_iommudata(group);
rk_iommu = dev_get_drvdata(iommu_dev);
iommu_group_put(group);
return rk_iommu;
}
static int rk_iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
struct rk_iommu *iommu;
struct rk_iommu_domain *rk_domain = domain->priv;
unsigned long flags;
int ret;
phys_addr_t dte_addr;
/*
* Allow 'virtual devices' (e.g., drm) to attach to domain.
* Such a device does not belong to an iommu group.
*/
iommu = rk_iommu_from_dev(dev);
if (!iommu)
return 0;
ret = rk_iommu_enable_stall(iommu);
if (ret)
return ret;
ret = rk_iommu_force_reset(iommu);
if (ret)
return ret;
iommu->domain = domain;
ret = devm_request_irq(dev, iommu->irq, rk_iommu_irq,
IRQF_SHARED, dev_name(dev), iommu);
if (ret)
return ret;
dte_addr = virt_to_phys(rk_domain->dt);
rk_iommu_write(iommu, RK_MMU_DTE_ADDR, dte_addr);
rk_iommu_command(iommu, RK_MMU_CMD_ZAP_CACHE);
rk_iommu_write(iommu, RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
ret = rk_iommu_enable_paging(iommu);
if (ret)
return ret;
spin_lock_irqsave(&rk_domain->iommus_lock, flags);
list_add_tail(&iommu->node, &rk_domain->iommus);
spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
dev_info(dev, "Attached to iommu domain\n");
rk_iommu_disable_stall(iommu);
return 0;
}
static void rk_iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
struct rk_iommu *iommu;
struct rk_iommu_domain *rk_domain = domain->priv;
unsigned long flags;
/* Allow 'virtual devices' (eg drm) to detach from domain */
iommu = rk_iommu_from_dev(dev);
if (!iommu)
return;
spin_lock_irqsave(&rk_domain->iommus_lock, flags);
list_del_init(&iommu->node);
spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
/* Ignore error while disabling, just keep going */
rk_iommu_enable_stall(iommu);
rk_iommu_disable_paging(iommu);
rk_iommu_write(iommu, RK_MMU_INT_MASK, 0);
rk_iommu_write(iommu, RK_MMU_DTE_ADDR, 0);
rk_iommu_disable_stall(iommu);
devm_free_irq(dev, iommu->irq, iommu);
iommu->domain = NULL;
dev_info(dev, "Detached from iommu domain\n");
}
static int rk_iommu_domain_init(struct iommu_domain *domain)
{
struct rk_iommu_domain *rk_domain;
rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL);
if (!rk_domain)
return -ENOMEM;
/*
* rk32xx iommus use a 2 level pagetable.
* Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
* Allocate one 4 KiB page for each table.
*/
rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
if (!rk_domain->dt)
goto err_dt;
rk_table_flush(rk_domain->dt, NUM_DT_ENTRIES);
spin_lock_init(&rk_domain->iommus_lock);
spin_lock_init(&rk_domain->dt_lock);
INIT_LIST_HEAD(&rk_domain->iommus);
domain->priv = rk_domain;
return 0;
err_dt:
kfree(rk_domain);
return -ENOMEM;
}
static void rk_iommu_domain_destroy(struct iommu_domain *domain)
{
struct rk_iommu_domain *rk_domain = domain->priv;
int i;
WARN_ON(!list_empty(&rk_domain->iommus));
for (i = 0; i < NUM_DT_ENTRIES; i++) {
u32 dte = rk_domain->dt[i];
if (rk_dte_is_pt_valid(dte)) {
phys_addr_t pt_phys = rk_dte_pt_address(dte);
u32 *page_table = phys_to_virt(pt_phys);
free_page((unsigned long)page_table);
}
}
free_page((unsigned long)rk_domain->dt);
kfree(domain->priv);
domain->priv = NULL;
}
static bool rk_iommu_is_dev_iommu_master(struct device *dev)
{
struct device_node *np = dev->of_node;
int ret;
/*
* An iommu master has an iommus property containing a list of phandles
* to iommu nodes, each with an #iommu-cells property with value 0.
*/
ret = of_count_phandle_with_args(np, "iommus", "#iommu-cells");
return (ret > 0);
}
static int rk_iommu_group_set_iommudata(struct iommu_group *group,
struct device *dev)
{
struct device_node *np = dev->of_node;
struct platform_device *pd;
int ret;
struct of_phandle_args args;
/*
* An iommu master has an iommus property containing a list of phandles
* to iommu nodes, each with an #iommu-cells property with value 0.
*/
ret = of_parse_phandle_with_args(np, "iommus", "#iommu-cells", 0,
&args);
if (ret) {
dev_err(dev, "of_parse_phandle_with_args(%s) => %d\n",
np->full_name, ret);
return ret;
}
if (args.args_count != 0) {
dev_err(dev, "incorrect number of iommu params found for %s (found %d, expected 0)\n",
args.np->full_name, args.args_count);
return -EINVAL;
}
pd = of_find_device_by_node(args.np);
of_node_put(args.np);
if (!pd) {
dev_err(dev, "iommu %s not found\n", args.np->full_name);
return -EPROBE_DEFER;
}
/* TODO(djkurtz): handle multiple slave iommus for a single master */
iommu_group_set_iommudata(group, &pd->dev, NULL);
return 0;
}
static int rk_iommu_add_device(struct device *dev)
{
struct iommu_group *group;
int ret;
if (!rk_iommu_is_dev_iommu_master(dev))
return -ENODEV;
group = iommu_group_get(dev);
if (!group) {
group = iommu_group_alloc();
if (IS_ERR(group)) {
dev_err(dev, "Failed to allocate IOMMU group\n");
return PTR_ERR(group);
}
}
ret = iommu_group_add_device(group, dev);
if (ret)
goto err_put_group;
ret = rk_iommu_group_set_iommudata(group, dev);
if (ret)
goto err_remove_device;
iommu_group_put(group);
return 0;
err_remove_device:
iommu_group_remove_device(dev);
err_put_group:
iommu_group_put(group);
return ret;
}
static void rk_iommu_remove_device(struct device *dev)
{
if (!rk_iommu_is_dev_iommu_master(dev))
return;
iommu_group_remove_device(dev);
}
static const struct iommu_ops rk_iommu_ops = {
.domain_init = rk_iommu_domain_init,
.domain_destroy = rk_iommu_domain_destroy,
.attach_dev = rk_iommu_attach_device,
.detach_dev = rk_iommu_detach_device,
.map = rk_iommu_map,
.unmap = rk_iommu_unmap,
.add_device = rk_iommu_add_device,
.remove_device = rk_iommu_remove_device,
.iova_to_phys = rk_iommu_iova_to_phys,
.pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
};
static int rk_iommu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rk_iommu *iommu;
struct resource *res;
iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
if (!iommu)
return -ENOMEM;
platform_set_drvdata(pdev, iommu);
iommu->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
iommu->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(iommu->base))
return PTR_ERR(iommu->base);
iommu->irq = platform_get_irq(pdev, 0);
if (iommu->irq < 0) {
dev_err(dev, "Failed to get IRQ, %d\n", iommu->irq);
return -ENXIO;
}
return 0;
}
static int rk_iommu_remove(struct platform_device *pdev)
{
return 0;
}
#ifdef CONFIG_OF
static const struct of_device_id rk_iommu_dt_ids[] = {
{ .compatible = "rockchip,iommu" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, rk_iommu_dt_ids);
#endif
static struct platform_driver rk_iommu_driver = {
.probe = rk_iommu_probe,
.remove = rk_iommu_remove,
.driver = {
.name = "rk_iommu",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(rk_iommu_dt_ids),
},
};
static int __init rk_iommu_init(void)
{
int ret;
ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
if (ret)
return ret;
return platform_driver_register(&rk_iommu_driver);
}
static void __exit rk_iommu_exit(void)
{
platform_driver_unregister(&rk_iommu_driver);
}
subsys_initcall(rk_iommu_init);
module_exit(rk_iommu_exit);
MODULE_DESCRIPTION("IOMMU API for Rockchip");
MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com> and Daniel Kurtz <djkurtz@chromium.org>");
MODULE_ALIAS("platform:rockchip-iommu");
MODULE_LICENSE("GPL v2");
...@@ -30,6 +30,12 @@ ...@@ -30,6 +30,12 @@
struct acpi_dmar_header; struct acpi_dmar_header;
#ifdef CONFIG_X86
# define DMAR_UNITS_SUPPORTED MAX_IO_APICS
#else
# define DMAR_UNITS_SUPPORTED 64
#endif
/* DMAR Flags */ /* DMAR Flags */
#define DMAR_INTR_REMAP 0x1 #define DMAR_INTR_REMAP 0x1
#define DMAR_X2APIC_OPT_OUT 0x2 #define DMAR_X2APIC_OPT_OUT 0x2
...@@ -120,28 +126,60 @@ extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, ...@@ -120,28 +126,60 @@ extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info,
/* Intel IOMMU detection */ /* Intel IOMMU detection */
extern int detect_intel_iommu(void); extern int detect_intel_iommu(void);
extern int enable_drhd_fault_handling(void); extern int enable_drhd_fault_handling(void);
extern int dmar_device_add(acpi_handle handle);
extern int dmar_device_remove(acpi_handle handle);
static inline int dmar_res_noop(struct acpi_dmar_header *hdr, void *arg)
{
return 0;
}
#ifdef CONFIG_INTEL_IOMMU #ifdef CONFIG_INTEL_IOMMU
extern int iommu_detected, no_iommu; extern int iommu_detected, no_iommu;
extern int intel_iommu_init(void); extern int intel_iommu_init(void);
extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header); extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg);
extern int dmar_parse_one_atsr(struct acpi_dmar_header *header); extern int dmar_parse_one_atsr(struct acpi_dmar_header *header, void *arg);
extern int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg);
extern int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg);
extern int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert);
extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info); extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info);
#else /* !CONFIG_INTEL_IOMMU: */ #else /* !CONFIG_INTEL_IOMMU: */
static inline int intel_iommu_init(void) { return -ENODEV; } static inline int intel_iommu_init(void) { return -ENODEV; }
static inline int dmar_parse_one_rmrr(struct acpi_dmar_header *header)
#define dmar_parse_one_rmrr dmar_res_noop
#define dmar_parse_one_atsr dmar_res_noop
#define dmar_check_one_atsr dmar_res_noop
#define dmar_release_one_atsr dmar_res_noop
static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
{ {
return 0; return 0;
} }
static inline int dmar_parse_one_atsr(struct acpi_dmar_header *header)
static inline int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
{ {
return 0; return 0;
} }
static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) #endif /* CONFIG_INTEL_IOMMU */
#ifdef CONFIG_IRQ_REMAP
extern int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert);
#else /* CONFIG_IRQ_REMAP */
static inline int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
{ return 0; }
#endif /* CONFIG_IRQ_REMAP */
#else /* CONFIG_DMAR_TABLE */
static inline int dmar_device_add(void *handle)
{
return 0;
}
static inline int dmar_device_remove(void *handle)
{ {
return 0; return 0;
} }
#endif /* CONFIG_INTEL_IOMMU */
#endif /* CONFIG_DMAR_TABLE */ #endif /* CONFIG_DMAR_TABLE */
......
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
#define IOMMU_READ (1 << 0) #define IOMMU_READ (1 << 0)
#define IOMMU_WRITE (1 << 1) #define IOMMU_WRITE (1 << 1)
#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
#define IOMMU_EXEC (1 << 3) #define IOMMU_NOEXEC (1 << 3)
struct iommu_ops; struct iommu_ops;
struct iommu_group; struct iommu_group;
...@@ -62,6 +62,7 @@ enum iommu_cap { ...@@ -62,6 +62,7 @@ enum iommu_cap {
IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA
transactions */ transactions */
IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */ IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */
IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */
}; };
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment