Commit 44f6876a authored by Robin Murphy's avatar Robin Murphy Committed by Will Deacon

iommu/arm-smmu: Support non-strict mode

All we need is to wire up .flush_iotlb_all properly and implement the
domain attribute, and iommu-dma and io-pgtable will do the rest for us.
The only real subtlety is documenting the barrier semantics we're
introducing between io-pgtable and the drivers for non-strict flushes.
Signed-off-by: default avatarRobin Murphy <robin.murphy@arm.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent b2dfeba6
...@@ -246,6 +246,7 @@ struct arm_smmu_domain { ...@@ -246,6 +246,7 @@ struct arm_smmu_domain {
const struct iommu_gather_ops *tlb_ops; const struct iommu_gather_ops *tlb_ops;
struct arm_smmu_cfg cfg; struct arm_smmu_cfg cfg;
enum arm_smmu_domain_stage stage; enum arm_smmu_domain_stage stage;
bool non_strict;
struct mutex init_mutex; /* Protects smmu pointer */ struct mutex init_mutex; /* Protects smmu pointer */
spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */ spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */
struct iommu_domain domain; struct iommu_domain domain;
...@@ -447,7 +448,11 @@ static void arm_smmu_tlb_inv_context_s1(void *cookie) ...@@ -447,7 +448,11 @@ static void arm_smmu_tlb_inv_context_s1(void *cookie)
struct arm_smmu_cfg *cfg = &smmu_domain->cfg; struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx); void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
writel_relaxed(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID); /*
* NOTE: this is not a relaxed write; it needs to guarantee that PTEs
* cleared by the current CPU are visible to the SMMU before the TLBI.
*/
writel(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
arm_smmu_tlb_sync_context(cookie); arm_smmu_tlb_sync_context(cookie);
} }
...@@ -457,7 +462,8 @@ static void arm_smmu_tlb_inv_context_s2(void *cookie) ...@@ -457,7 +462,8 @@ static void arm_smmu_tlb_inv_context_s2(void *cookie)
struct arm_smmu_device *smmu = smmu_domain->smmu; struct arm_smmu_device *smmu = smmu_domain->smmu;
void __iomem *base = ARM_SMMU_GR0(smmu); void __iomem *base = ARM_SMMU_GR0(smmu);
writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID); /* NOTE: see above */
writel(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
arm_smmu_tlb_sync_global(smmu); arm_smmu_tlb_sync_global(smmu);
} }
...@@ -869,6 +875,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, ...@@ -869,6 +875,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA; pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
if (smmu_domain->non_strict)
pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
smmu_domain->smmu = smmu; smmu_domain->smmu = smmu;
pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain); pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
if (!pgtbl_ops) { if (!pgtbl_ops) {
...@@ -1258,6 +1267,14 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, ...@@ -1258,6 +1267,14 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
return ops->unmap(ops, iova, size); return ops->unmap(ops, iova, size);
} }
static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
if (smmu_domain->tlb_ops)
smmu_domain->tlb_ops->tlb_flush_all(smmu_domain);
}
static void arm_smmu_iotlb_sync(struct iommu_domain *domain) static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
{ {
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
...@@ -1476,9 +1493,8 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain, ...@@ -1476,9 +1493,8 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
{ {
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
if (domain->type != IOMMU_DOMAIN_UNMANAGED) switch(domain->type) {
return -EINVAL; case IOMMU_DOMAIN_UNMANAGED:
switch (attr) { switch (attr) {
case DOMAIN_ATTR_NESTING: case DOMAIN_ATTR_NESTING:
*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED); *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
...@@ -1486,6 +1502,19 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain, ...@@ -1486,6 +1502,19 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
default: default:
return -ENODEV; return -ENODEV;
} }
break;
case IOMMU_DOMAIN_DMA:
switch (attr) {
case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
*(int *)data = smmu_domain->non_strict;
return 0;
default:
return -ENODEV;
}
break;
default:
return -EINVAL;
}
} }
static int arm_smmu_domain_set_attr(struct iommu_domain *domain, static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
...@@ -1494,11 +1523,10 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain, ...@@ -1494,11 +1523,10 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
int ret = 0; int ret = 0;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
if (domain->type != IOMMU_DOMAIN_UNMANAGED)
return -EINVAL;
mutex_lock(&smmu_domain->init_mutex); mutex_lock(&smmu_domain->init_mutex);
switch(domain->type) {
case IOMMU_DOMAIN_UNMANAGED:
switch (attr) { switch (attr) {
case DOMAIN_ATTR_NESTING: case DOMAIN_ATTR_NESTING:
if (smmu_domain->smmu) { if (smmu_domain->smmu) {
...@@ -1510,12 +1538,23 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain, ...@@ -1510,12 +1538,23 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
else else
smmu_domain->stage = ARM_SMMU_DOMAIN_S1; smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
break; break;
default: default:
ret = -ENODEV; ret = -ENODEV;
} }
break;
case IOMMU_DOMAIN_DMA:
switch (attr) {
case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
smmu_domain->non_strict = *(int *)data;
break;
default:
ret = -ENODEV;
}
break;
default:
ret = -EINVAL;
}
out_unlock: out_unlock:
mutex_unlock(&smmu_domain->init_mutex); mutex_unlock(&smmu_domain->init_mutex);
return ret; return ret;
...@@ -1568,7 +1607,7 @@ static struct iommu_ops arm_smmu_ops = { ...@@ -1568,7 +1607,7 @@ static struct iommu_ops arm_smmu_ops = {
.attach_dev = arm_smmu_attach_dev, .attach_dev = arm_smmu_attach_dev,
.map = arm_smmu_map, .map = arm_smmu_map,
.unmap = arm_smmu_unmap, .unmap = arm_smmu_unmap,
.flush_iotlb_all = arm_smmu_iotlb_sync, .flush_iotlb_all = arm_smmu_flush_iotlb_all,
.iotlb_sync = arm_smmu_iotlb_sync, .iotlb_sync = arm_smmu_iotlb_sync,
.iova_to_phys = arm_smmu_iova_to_phys, .iova_to_phys = arm_smmu_iova_to_phys,
.add_device = arm_smmu_add_device, .add_device = arm_smmu_add_device,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment