Commit 99e38df8 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-updates-v4.5' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull IOMMU updates from Joerg Roedel:
 "The updates include:

   - Small code cleanups in the AMD IOMMUv2 driver

   - Scalability improvements for the DMA-API implementation of the AMD
     IOMMU driver.  This is just a starting point, but already showed
     some good improvements in my tests.

   - Removal of the unused Renesas IPMMU/IPMMUI driver

   - Updates for ARM-SMMU include:
      * Some fixes to get the driver working nicely on Broadcom hardware
      * A change to the io-pgtable API to indicate the unit in which to
        flush (all callers converted, with Ack from Laurent)
      * Use of devm_* for allocating/freeing the SMMUv3 buffers

   - Some other small fixes and improvements for other drivers"

* tag 'iommu-updates-v4.5' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (46 commits)
  iommu/vt-d: Fix up error handling in alloc_iommu
  iommu/vt-d: Check the return value of iommu_device_create()
  iommu/amd: Remove an unneeded condition
  iommu/amd: Preallocate dma_ops apertures based on dma_mask
  iommu/amd: Use trylock to aquire bitmap_lock
  iommu/amd: Make dma_ops_domain->next_index percpu
  iommu/amd: Relax locking in dma_ops path
  iommu/amd: Initialize new aperture range before making it visible
  iommu/amd: Build io page-tables with cmpxchg64
  iommu/amd: Allocate new aperture ranges in dma_ops_alloc_addresses
  iommu/amd: Optimize dma_ops_free_addresses
  iommu/amd: Remove need_flush from struct dma_ops_domain
  iommu/amd: Iterate over all aperture ranges in dma_ops_area_alloc
  iommu/amd: Flush iommu tlb in dma_ops_free_addresses
  iommu/amd: Rename dma_ops_domain->next_address to next_index
  iommu/amd: Remove 'start' parameter from dma_ops_area_alloc
  iommu/amd: Flush iommu tlb in dma_ops_aperture_alloc()
  iommu/amd: Retry address allocation within one aperture
  iommu/amd: Move aperture_range.offset to another cache-line
  iommu/amd: Add dma_ops_aperture_alloc() function
  ...
parents a200dcb3 32704253
...@@ -7,7 +7,15 @@ connected to the IPMMU through a port called micro-TLB. ...@@ -7,7 +7,15 @@ connected to the IPMMU through a port called micro-TLB.
Required Properties: Required Properties:
- compatible: Must contain "renesas,ipmmu-vmsa". - compatible: Must contain SoC-specific and generic entries from below.
- "renesas,ipmmu-r8a73a4" for the R8A73A4 (R-Mobile APE6) IPMMU.
- "renesas,ipmmu-r8a7790" for the R8A7790 (R-Car H2) IPMMU.
- "renesas,ipmmu-r8a7791" for the R8A7791 (R-Car M2-W) IPMMU.
- "renesas,ipmmu-r8a7793" for the R8A7793 (R-Car M2-N) IPMMU.
- "renesas,ipmmu-r8a7794" for the R8A7794 (R-Car E2) IPMMU.
- "renesas,ipmmu-vmsa" for generic R-Car Gen2 VMSA-compatible IPMMU.
- reg: Base address and size of the IPMMU registers. - reg: Base address and size of the IPMMU registers.
- interrupts: Specifiers for the MMU fault interrupts. For instances that - interrupts: Specifiers for the MMU fault interrupts. For instances that
support secure mode two interrupts must be specified, for non-secure and support secure mode two interrupts must be specified, for non-secure and
...@@ -27,7 +35,7 @@ node with the following property: ...@@ -27,7 +35,7 @@ node with the following property:
Example: R8A7791 IPMMU-MX and VSP1-D0 bus master Example: R8A7791 IPMMU-MX and VSP1-D0 bus master
ipmmu_mx: mmu@fe951000 { ipmmu_mx: mmu@fe951000 {
compatible = "renasas,ipmmu-vmsa"; compatible = "renasas,ipmmu-r8a7791", "renasas,ipmmu-vmsa";
reg = <0 0xfe951000 0 0x1000>; reg = <0 0xfe951000 0 0x1000>;
interrupts = <0 222 IRQ_TYPE_LEVEL_HIGH>, interrupts = <0 222 IRQ_TYPE_LEVEL_HIGH>,
<0 221 IRQ_TYPE_LEVEL_HIGH>; <0 221 IRQ_TYPE_LEVEL_HIGH>;
......
...@@ -263,81 +263,6 @@ config EXYNOS_IOMMU_DEBUG ...@@ -263,81 +263,6 @@ config EXYNOS_IOMMU_DEBUG
Say N unless you need kernel log message for IOMMU debugging. Say N unless you need kernel log message for IOMMU debugging.
config SHMOBILE_IPMMU
bool
config SHMOBILE_IPMMU_TLB
bool
config SHMOBILE_IOMMU
bool "IOMMU for Renesas IPMMU/IPMMUI"
default n
depends on ARM && MMU
depends on ARCH_SHMOBILE || COMPILE_TEST
select IOMMU_API
select ARM_DMA_USE_IOMMU
select SHMOBILE_IPMMU
select SHMOBILE_IPMMU_TLB
help
Support for Renesas IPMMU/IPMMUI. This option enables
remapping of DMA memory accesses from all of the IP blocks
on the ICB.
Warning: Drivers (including userspace drivers of UIO
devices) of the IP blocks on the ICB *must* use addresses
allocated from the IPMMU (iova) for DMA with this option
enabled.
If unsure, say N.
choice
prompt "IPMMU/IPMMUI address space size"
default SHMOBILE_IOMMU_ADDRSIZE_2048MB
depends on SHMOBILE_IOMMU
help
This option sets IPMMU/IPMMUI address space size by
adjusting the 1st level page table size. The page table size
is calculated as follows:
page table size = number of page table entries * 4 bytes
number of page table entries = address space size / 1 MiB
For example, when the address space size is 2048 MiB, the
1st level page table size is 8192 bytes.
config SHMOBILE_IOMMU_ADDRSIZE_2048MB
bool "2 GiB"
config SHMOBILE_IOMMU_ADDRSIZE_1024MB
bool "1 GiB"
config SHMOBILE_IOMMU_ADDRSIZE_512MB
bool "512 MiB"
config SHMOBILE_IOMMU_ADDRSIZE_256MB
bool "256 MiB"
config SHMOBILE_IOMMU_ADDRSIZE_128MB
bool "128 MiB"
config SHMOBILE_IOMMU_ADDRSIZE_64MB
bool "64 MiB"
config SHMOBILE_IOMMU_ADDRSIZE_32MB
bool "32 MiB"
endchoice
config SHMOBILE_IOMMU_L1SIZE
int
default 8192 if SHMOBILE_IOMMU_ADDRSIZE_2048MB
default 4096 if SHMOBILE_IOMMU_ADDRSIZE_1024MB
default 2048 if SHMOBILE_IOMMU_ADDRSIZE_512MB
default 1024 if SHMOBILE_IOMMU_ADDRSIZE_256MB
default 512 if SHMOBILE_IOMMU_ADDRSIZE_128MB
default 256 if SHMOBILE_IOMMU_ADDRSIZE_64MB
default 128 if SHMOBILE_IOMMU_ADDRSIZE_32MB
config IPMMU_VMSA config IPMMU_VMSA
bool "Renesas VMSA-compatible IPMMU" bool "Renesas VMSA-compatible IPMMU"
depends on ARM_LPAE depends on ARM_LPAE
......
...@@ -22,7 +22,5 @@ obj-$(CONFIG_ROCKCHIP_IOMMU) += rockchip-iommu.o ...@@ -22,7 +22,5 @@ obj-$(CONFIG_ROCKCHIP_IOMMU) += rockchip-iommu.o
obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o
obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
obj-$(CONFIG_SHMOBILE_IOMMU) += shmobile-iommu.o
obj-$(CONFIG_SHMOBILE_IPMMU) += shmobile-ipmmu.o
obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o
obj-$(CONFIG_S390_IOMMU) += s390-iommu.o obj-$(CONFIG_S390_IOMMU) += s390-iommu.o
This diff is collapsed.
...@@ -424,46 +424,6 @@ struct protection_domain { ...@@ -424,46 +424,6 @@ struct protection_domain {
void *priv; /* private data */ void *priv; /* private data */
}; };
/*
* For dynamic growth the aperture size is split into ranges of 128MB of
* DMA address space each. This struct represents one such range.
*/
struct aperture_range {
/* address allocation bitmap */
unsigned long *bitmap;
/*
* Array of PTE pages for the aperture. In this array we save all the
* leaf pages of the domain page table used for the aperture. This way
* we don't need to walk the page table to find a specific PTE. We can
* just calculate its address in constant time.
*/
u64 *pte_pages[64];
unsigned long offset;
};
/*
* Data container for a dma_ops specific protection domain
*/
struct dma_ops_domain {
/* generic protection domain information */
struct protection_domain domain;
/* size of the aperture for the mappings */
unsigned long aperture_size;
/* address we start to search for free addresses */
unsigned long next_address;
/* address space relevant data */
struct aperture_range *aperture[APERTURE_MAX_RANGES];
/* This will be set to true when TLB needs to be flushed */
bool need_flush;
};
/* /*
* Structure where we save information about one hardware AMD IOMMU in the * Structure where we save information about one hardware AMD IOMMU in the
* system. * system.
......
...@@ -432,7 +432,7 @@ static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm) ...@@ -432,7 +432,7 @@ static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
unbind_pasid(pasid_state); unbind_pasid(pasid_state);
} }
static struct mmu_notifier_ops iommu_mn = { static const struct mmu_notifier_ops iommu_mn = {
.release = mn_release, .release = mn_release,
.clear_flush_young = mn_clear_flush_young, .clear_flush_young = mn_clear_flush_young,
.invalidate_page = mn_invalidate_page, .invalidate_page = mn_invalidate_page,
...@@ -513,43 +513,39 @@ static bool access_error(struct vm_area_struct *vma, struct fault *fault) ...@@ -513,43 +513,39 @@ static bool access_error(struct vm_area_struct *vma, struct fault *fault)
static void do_fault(struct work_struct *work) static void do_fault(struct work_struct *work)
{ {
struct fault *fault = container_of(work, struct fault, work); struct fault *fault = container_of(work, struct fault, work);
struct mm_struct *mm;
struct vm_area_struct *vma; struct vm_area_struct *vma;
int ret = VM_FAULT_ERROR;
unsigned int flags = 0;
struct mm_struct *mm;
u64 address; u64 address;
int ret, write;
write = !!(fault->flags & PPR_FAULT_WRITE);
mm = fault->state->mm; mm = fault->state->mm;
address = fault->address; address = fault->address;
if (fault->flags & PPR_FAULT_USER)
flags |= FAULT_FLAG_USER;
if (fault->flags & PPR_FAULT_WRITE)
flags |= FAULT_FLAG_WRITE;
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
vma = find_extend_vma(mm, address); vma = find_extend_vma(mm, address);
if (!vma || address < vma->vm_start) { if (!vma || address < vma->vm_start)
/* failed to get a vma in the right range */ /* failed to get a vma in the right range */
up_read(&mm->mmap_sem);
handle_fault_error(fault);
goto out; goto out;
}
/* Check if we have the right permissions on the vma */ /* Check if we have the right permissions on the vma */
if (access_error(vma, fault)) { if (access_error(vma, fault))
up_read(&mm->mmap_sem);
handle_fault_error(fault);
goto out; goto out;
}
ret = handle_mm_fault(mm, vma, address, write); ret = handle_mm_fault(mm, vma, address, flags);
if (ret & VM_FAULT_ERROR) {
/* failed to service fault */
up_read(&mm->mmap_sem);
handle_fault_error(fault);
goto out;
}
out:
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
out: if (ret & VM_FAULT_ERROR)
/* failed to service fault */
handle_fault_error(fault);
finish_pri_tag(fault->dev_state, fault->state, fault->tag); finish_pri_tag(fault->dev_state, fault->state, fault->tag);
put_pasid_state(fault->state); put_pasid_state(fault->state);
......
This diff is collapsed.
...@@ -582,7 +582,7 @@ static void arm_smmu_tlb_inv_context(void *cookie) ...@@ -582,7 +582,7 @@ static void arm_smmu_tlb_inv_context(void *cookie)
} }
static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
bool leaf, void *cookie) size_t granule, bool leaf, void *cookie)
{ {
struct arm_smmu_domain *smmu_domain = cookie; struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg; struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
...@@ -597,12 +597,18 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, ...@@ -597,12 +597,18 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
if (!IS_ENABLED(CONFIG_64BIT) || smmu->version == ARM_SMMU_V1) { if (!IS_ENABLED(CONFIG_64BIT) || smmu->version == ARM_SMMU_V1) {
iova &= ~12UL; iova &= ~12UL;
iova |= ARM_SMMU_CB_ASID(cfg); iova |= ARM_SMMU_CB_ASID(cfg);
writel_relaxed(iova, reg); do {
writel_relaxed(iova, reg);
iova += granule;
} while (size -= granule);
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
} else { } else {
iova >>= 12; iova >>= 12;
iova |= (u64)ARM_SMMU_CB_ASID(cfg) << 48; iova |= (u64)ARM_SMMU_CB_ASID(cfg) << 48;
writeq_relaxed(iova, reg); do {
writeq_relaxed(iova, reg);
iova += granule >> 12;
} while (size -= granule);
#endif #endif
} }
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
...@@ -610,7 +616,11 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, ...@@ -610,7 +616,11 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L : reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
ARM_SMMU_CB_S2_TLBIIPAS2; ARM_SMMU_CB_S2_TLBIIPAS2;
writeq_relaxed(iova >> 12, reg); iova >>= 12;
do {
writeq_relaxed(iova, reg);
iova += granule >> 12;
} while (size -= granule);
#endif #endif
} else { } else {
reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID; reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
...@@ -945,9 +955,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) ...@@ -945,9 +955,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
free_irq(irq, domain); free_irq(irq, domain);
} }
if (smmu_domain->pgtbl_ops) free_io_pgtable_ops(smmu_domain->pgtbl_ops);
free_io_pgtable_ops(smmu_domain->pgtbl_ops);
__arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
} }
...@@ -1357,6 +1365,7 @@ static int arm_smmu_add_device(struct device *dev) ...@@ -1357,6 +1365,7 @@ static int arm_smmu_add_device(struct device *dev)
if (IS_ERR(group)) if (IS_ERR(group))
return PTR_ERR(group); return PTR_ERR(group);
iommu_group_put(group);
return 0; return 0;
} }
......
...@@ -1063,13 +1063,19 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) ...@@ -1063,13 +1063,19 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
raw_spin_lock_init(&iommu->register_lock); raw_spin_lock_init(&iommu->register_lock);
drhd->iommu = iommu; if (intel_iommu_enabled) {
if (intel_iommu_enabled)
iommu->iommu_dev = iommu_device_create(NULL, iommu, iommu->iommu_dev = iommu_device_create(NULL, iommu,
intel_iommu_groups, intel_iommu_groups,
"%s", iommu->name); "%s", iommu->name);
if (IS_ERR(iommu->iommu_dev)) {
err = PTR_ERR(iommu->iommu_dev);
goto err_unmap;
}
}
drhd->iommu = iommu;
return 0; return 0;
err_unmap: err_unmap:
......
...@@ -38,9 +38,6 @@ ...@@ -38,9 +38,6 @@
#define io_pgtable_to_data(x) \ #define io_pgtable_to_data(x) \
container_of((x), struct arm_lpae_io_pgtable, iop) container_of((x), struct arm_lpae_io_pgtable, iop)
#define io_pgtable_ops_to_pgtable(x) \
container_of((x), struct io_pgtable, ops)
#define io_pgtable_ops_to_data(x) \ #define io_pgtable_ops_to_data(x) \
io_pgtable_to_data(io_pgtable_ops_to_pgtable(x)) io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
...@@ -58,8 +55,10 @@ ...@@ -58,8 +55,10 @@
((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \ ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
* (d)->bits_per_level) + (d)->pg_shift) * (d)->bits_per_level) + (d)->pg_shift)
#define ARM_LPAE_GRANULE(d) (1UL << (d)->pg_shift)
#define ARM_LPAE_PAGES_PER_PGD(d) \ #define ARM_LPAE_PAGES_PER_PGD(d) \
DIV_ROUND_UP((d)->pgd_size, 1UL << (d)->pg_shift) DIV_ROUND_UP((d)->pgd_size, ARM_LPAE_GRANULE(d))
/* /*
* Calculate the index at level l used to map virtual address a using the * Calculate the index at level l used to map virtual address a using the
...@@ -169,7 +168,7 @@ ...@@ -169,7 +168,7 @@
/* IOPTE accessors */ /* IOPTE accessors */
#define iopte_deref(pte,d) \ #define iopte_deref(pte,d) \
(__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) \ (__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) \
& ~((1ULL << (d)->pg_shift) - 1))) & ~(ARM_LPAE_GRANULE(d) - 1ULL)))
#define iopte_type(pte,l) \ #define iopte_type(pte,l) \
(((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK) (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
...@@ -326,7 +325,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, ...@@ -326,7 +325,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
/* Grab a pointer to the next level */ /* Grab a pointer to the next level */
pte = *ptep; pte = *ptep;
if (!pte) { if (!pte) {
cptep = __arm_lpae_alloc_pages(1UL << data->pg_shift, cptep = __arm_lpae_alloc_pages(ARM_LPAE_GRANULE(data),
GFP_ATOMIC, cfg); GFP_ATOMIC, cfg);
if (!cptep) if (!cptep)
return -ENOMEM; return -ENOMEM;
...@@ -405,17 +404,18 @@ static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl, ...@@ -405,17 +404,18 @@ static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
arm_lpae_iopte *start, *end; arm_lpae_iopte *start, *end;
unsigned long table_size; unsigned long table_size;
/* Only leaf entries at the last level */
if (lvl == ARM_LPAE_MAX_LEVELS - 1)
return;
if (lvl == ARM_LPAE_START_LVL(data)) if (lvl == ARM_LPAE_START_LVL(data))
table_size = data->pgd_size; table_size = data->pgd_size;
else else
table_size = 1UL << data->pg_shift; table_size = ARM_LPAE_GRANULE(data);
start = ptep; start = ptep;
end = (void *)ptep + table_size;
/* Only leaf entries at the last level */
if (lvl == ARM_LPAE_MAX_LEVELS - 1)
end = ptep;
else
end = (void *)ptep + table_size;
while (ptep != end) { while (ptep != end) {
arm_lpae_iopte pte = *ptep++; arm_lpae_iopte pte = *ptep++;
...@@ -473,7 +473,7 @@ static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, ...@@ -473,7 +473,7 @@ static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
__arm_lpae_set_pte(ptep, table, cfg); __arm_lpae_set_pte(ptep, table, cfg);
iova &= ~(blk_size - 1); iova &= ~(blk_size - 1);
cfg->tlb->tlb_add_flush(iova, blk_size, true, data->iop.cookie); cfg->tlb->tlb_add_flush(iova, blk_size, blk_size, true, data->iop.cookie);
return size; return size;
} }
...@@ -486,11 +486,13 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, ...@@ -486,11 +486,13 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
void *cookie = data->iop.cookie; void *cookie = data->iop.cookie;
size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data); size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
/* Something went horribly wrong and we ran out of page table */
if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
return 0;
ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
pte = *ptep; pte = *ptep;
if (WARN_ON(!pte))
/* Something went horribly wrong and we ran out of page table */
if (WARN_ON(!pte || (lvl == ARM_LPAE_MAX_LEVELS)))
return 0; return 0;
/* If the size matches this level, we're in the right place */ /* If the size matches this level, we're in the right place */
...@@ -499,12 +501,13 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, ...@@ -499,12 +501,13 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
if (!iopte_leaf(pte, lvl)) { if (!iopte_leaf(pte, lvl)) {
/* Also flush any partial walks */ /* Also flush any partial walks */
tlb->tlb_add_flush(iova, size, false, cookie); tlb->tlb_add_flush(iova, size, ARM_LPAE_GRANULE(data),
false, cookie);
tlb->tlb_sync(cookie); tlb->tlb_sync(cookie);
ptep = iopte_deref(pte, data); ptep = iopte_deref(pte, data);
__arm_lpae_free_pgtable(data, lvl + 1, ptep); __arm_lpae_free_pgtable(data, lvl + 1, ptep);
} else { } else {
tlb->tlb_add_flush(iova, size, true, cookie); tlb->tlb_add_flush(iova, size, size, true, cookie);
} }
return size; return size;
...@@ -570,7 +573,7 @@ static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops, ...@@ -570,7 +573,7 @@ static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
return 0; return 0;
found_translation: found_translation:
iova &= ((1 << data->pg_shift) - 1); iova &= (ARM_LPAE_GRANULE(data) - 1);
return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova; return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova;
} }
...@@ -668,7 +671,7 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) ...@@ -668,7 +671,7 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
(ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) | (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
(ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT); (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
switch (1 << data->pg_shift) { switch (ARM_LPAE_GRANULE(data)) {
case SZ_4K: case SZ_4K:
reg |= ARM_LPAE_TCR_TG0_4K; reg |= ARM_LPAE_TCR_TG0_4K;
break; break;
...@@ -769,7 +772,7 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) ...@@ -769,7 +772,7 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
sl = ARM_LPAE_START_LVL(data); sl = ARM_LPAE_START_LVL(data);
switch (1 << data->pg_shift) { switch (ARM_LPAE_GRANULE(data)) {
case SZ_4K: case SZ_4K:
reg |= ARM_LPAE_TCR_TG0_4K; reg |= ARM_LPAE_TCR_TG0_4K;
sl++; /* SL0 format is different for 4K granule size */ sl++; /* SL0 format is different for 4K granule size */
...@@ -889,8 +892,8 @@ static void dummy_tlb_flush_all(void *cookie) ...@@ -889,8 +892,8 @@ static void dummy_tlb_flush_all(void *cookie)
WARN_ON(cookie != cfg_cookie); WARN_ON(cookie != cfg_cookie);
} }
static void dummy_tlb_add_flush(unsigned long iova, size_t size, bool leaf, static void dummy_tlb_add_flush(unsigned long iova, size_t size,
void *cookie) size_t granule, bool leaf, void *cookie)
{ {
WARN_ON(cookie != cfg_cookie); WARN_ON(cookie != cfg_cookie);
WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
......
...@@ -26,8 +26,8 @@ enum io_pgtable_fmt { ...@@ -26,8 +26,8 @@ enum io_pgtable_fmt {
*/ */
struct iommu_gather_ops { struct iommu_gather_ops {
void (*tlb_flush_all)(void *cookie); void (*tlb_flush_all)(void *cookie);
void (*tlb_add_flush)(unsigned long iova, size_t size, bool leaf, void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule,
void *cookie); bool leaf, void *cookie);
void (*tlb_sync)(void *cookie); void (*tlb_sync)(void *cookie);
}; };
...@@ -131,6 +131,8 @@ struct io_pgtable { ...@@ -131,6 +131,8 @@ struct io_pgtable {
struct io_pgtable_ops ops; struct io_pgtable_ops ops;
}; };
#define io_pgtable_ops_to_pgtable(x) container_of((x), struct io_pgtable, ops)
/** /**
* struct io_pgtable_init_fns - Alloc/free a set of page tables for a * struct io_pgtable_init_fns - Alloc/free a set of page tables for a
* particular format. * particular format.
......
...@@ -277,8 +277,8 @@ static void ipmmu_tlb_flush_all(void *cookie) ...@@ -277,8 +277,8 @@ static void ipmmu_tlb_flush_all(void *cookie)
ipmmu_tlb_invalidate(domain); ipmmu_tlb_invalidate(domain);
} }
static void ipmmu_tlb_add_flush(unsigned long iova, size_t size, bool leaf, static void ipmmu_tlb_add_flush(unsigned long iova, size_t size,
void *cookie) size_t granule, bool leaf, void *cookie)
{ {
/* The hardware doesn't support selective TLB flush. */ /* The hardware doesn't support selective TLB flush. */
} }
......
...@@ -359,30 +359,19 @@ static struct platform_driver msm_iommu_ctx_driver = { ...@@ -359,30 +359,19 @@ static struct platform_driver msm_iommu_ctx_driver = {
.remove = msm_iommu_ctx_remove, .remove = msm_iommu_ctx_remove,
}; };
static struct platform_driver * const drivers[] = {
&msm_iommu_driver,
&msm_iommu_ctx_driver,
};
static int __init msm_iommu_driver_init(void) static int __init msm_iommu_driver_init(void)
{ {
int ret; return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
ret = platform_driver_register(&msm_iommu_driver);
if (ret != 0) {
pr_err("Failed to register IOMMU driver\n");
goto error;
}
ret = platform_driver_register(&msm_iommu_ctx_driver);
if (ret != 0) {
platform_driver_unregister(&msm_iommu_driver);
pr_err("Failed to register IOMMU context driver\n");
goto error;
}
error:
return ret;
} }
static void __exit msm_iommu_driver_exit(void) static void __exit msm_iommu_driver_exit(void)
{ {
platform_driver_unregister(&msm_iommu_ctx_driver); platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
platform_driver_unregister(&msm_iommu_driver);
} }
subsys_initcall(msm_iommu_driver_init); subsys_initcall(msm_iommu_driver_init);
......
...@@ -49,7 +49,7 @@ static bool s390_iommu_capable(enum iommu_cap cap) ...@@ -49,7 +49,7 @@ static bool s390_iommu_capable(enum iommu_cap cap)
} }
} }
struct iommu_domain *s390_domain_alloc(unsigned domain_type) static struct iommu_domain *s390_domain_alloc(unsigned domain_type)
{ {
struct s390_domain *s390_domain; struct s390_domain *s390_domain;
...@@ -73,7 +73,7 @@ struct iommu_domain *s390_domain_alloc(unsigned domain_type) ...@@ -73,7 +73,7 @@ struct iommu_domain *s390_domain_alloc(unsigned domain_type)
return &s390_domain->domain; return &s390_domain->domain;
} }
void s390_domain_free(struct iommu_domain *domain) static void s390_domain_free(struct iommu_domain *domain)
{ {
struct s390_domain *s390_domain = to_s390_domain(domain); struct s390_domain *s390_domain = to_s390_domain(domain);
......
This diff is collapsed.
/*
* IPMMU/IPMMUI
* Copyright (C) 2012 Hideki EIRAKU
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*/
#include <linux/err.h>
#include <linux/export.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/platform_data/sh_ipmmu.h>
#include "shmobile-ipmmu.h"
#define IMCTR1 0x000
#define IMCTR2 0x004
#define IMASID 0x010
#define IMTTBR 0x014
#define IMTTBCR 0x018
#define IMCTR1_TLBEN (1 << 0)
#define IMCTR1_FLUSH (1 << 1)
static void ipmmu_reg_write(struct shmobile_ipmmu *ipmmu, unsigned long reg_off,
unsigned long data)
{
iowrite32(data, ipmmu->ipmmu_base + reg_off);
}
void ipmmu_tlb_flush(struct shmobile_ipmmu *ipmmu)
{
if (!ipmmu)
return;
spin_lock(&ipmmu->flush_lock);
if (ipmmu->tlb_enabled)
ipmmu_reg_write(ipmmu, IMCTR1, IMCTR1_FLUSH | IMCTR1_TLBEN);
else
ipmmu_reg_write(ipmmu, IMCTR1, IMCTR1_FLUSH);
spin_unlock(&ipmmu->flush_lock);
}
void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size,
int asid)
{
if (!ipmmu)
return;
spin_lock(&ipmmu->flush_lock);
switch (size) {
default:
ipmmu->tlb_enabled = 0;
break;
case 0x2000:
ipmmu_reg_write(ipmmu, IMTTBCR, 1);
ipmmu->tlb_enabled = 1;
break;
case 0x1000:
ipmmu_reg_write(ipmmu, IMTTBCR, 2);
ipmmu->tlb_enabled = 1;
break;
case 0x800:
ipmmu_reg_write(ipmmu, IMTTBCR, 3);
ipmmu->tlb_enabled = 1;
break;
case 0x400:
ipmmu_reg_write(ipmmu, IMTTBCR, 4);
ipmmu->tlb_enabled = 1;
break;
case 0x200:
ipmmu_reg_write(ipmmu, IMTTBCR, 5);
ipmmu->tlb_enabled = 1;
break;
case 0x100:
ipmmu_reg_write(ipmmu, IMTTBCR, 6);
ipmmu->tlb_enabled = 1;
break;
case 0x80:
ipmmu_reg_write(ipmmu, IMTTBCR, 7);
ipmmu->tlb_enabled = 1;
break;
}
ipmmu_reg_write(ipmmu, IMTTBR, phys);
ipmmu_reg_write(ipmmu, IMASID, asid);
spin_unlock(&ipmmu->flush_lock);
}
static int ipmmu_probe(struct platform_device *pdev)
{
struct shmobile_ipmmu *ipmmu;
struct resource *res;
struct shmobile_ipmmu_platform_data *pdata = pdev->dev.platform_data;
ipmmu = devm_kzalloc(&pdev->dev, sizeof(*ipmmu), GFP_KERNEL);
if (!ipmmu) {
dev_err(&pdev->dev, "cannot allocate device data\n");
return -ENOMEM;
}
spin_lock_init(&ipmmu->flush_lock);
ipmmu->dev = &pdev->dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ipmmu->ipmmu_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(ipmmu->ipmmu_base))
return PTR_ERR(ipmmu->ipmmu_base);
ipmmu->dev_names = pdata->dev_names;
ipmmu->num_dev_names = pdata->num_dev_names;
platform_set_drvdata(pdev, ipmmu);
ipmmu_reg_write(ipmmu, IMCTR1, 0x0); /* disable TLB */
ipmmu_reg_write(ipmmu, IMCTR2, 0x0); /* disable PMB */
return ipmmu_iommu_init(ipmmu);
}
static struct platform_driver ipmmu_driver = {
.probe = ipmmu_probe,
.driver = {
.name = "ipmmu",
},
};
static int __init ipmmu_init(void)
{
return platform_driver_register(&ipmmu_driver);
}
subsys_initcall(ipmmu_init);
/* shmobile-ipmmu.h
*
* Copyright (C) 2012 Hideki EIRAKU
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*/
#ifndef __SHMOBILE_IPMMU_H__
#define __SHMOBILE_IPMMU_H__
struct shmobile_ipmmu {
struct device *dev;
void __iomem *ipmmu_base;
int tlb_enabled;
spinlock_t flush_lock;
const char * const *dev_names;
unsigned int num_dev_names;
};
#ifdef CONFIG_SHMOBILE_IPMMU_TLB
void ipmmu_tlb_flush(struct shmobile_ipmmu *ipmmu);
void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size,
int asid);
int ipmmu_iommu_init(struct shmobile_ipmmu *ipmmu);
#else
static inline int ipmmu_iommu_init(struct shmobile_ipmmu *ipmmu)
{
return -EINVAL;
}
#endif
#endif /* __SHMOBILE_IPMMU_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment