Commit 61005762 authored by Robin Murphy's avatar Robin Murphy Committed by Will Deacon

iommu/arm-smmu: Get rid of weird "atomic" write

The smmu_write_atomic_lq oddity made some sense when the context
format was effectively tied to CONFIG_64BIT, but these days it's
simpler to just pick an explicit access size based on the format
for the one-and-a-half times we actually care.
Signed-off-by: default avatarRobin Murphy <robin.murphy@arm.com>
Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent 71e8a8cd
...@@ -83,17 +83,6 @@ ...@@ -83,17 +83,6 @@
((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \ ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
? 0x400 : 0)) ? 0x400 : 0))
/*
* Some 64-bit registers only make sense to write atomically, but in such
* cases all the data relevant to AArch32 formats lies within the lower word,
* therefore this actually makes more sense than it might first appear.
*/
#ifdef CONFIG_64BIT
#define smmu_write_atomic_lq writeq_relaxed
#else
#define smmu_write_atomic_lq writel_relaxed
#endif
/* Translation context bank */ /* Translation context bank */
#define ARM_SMMU_CB(smmu, n) ((smmu)->base + (((smmu)->numpage + (n)) << (smmu)->pgshift)) #define ARM_SMMU_CB(smmu, n) ((smmu)->base + (((smmu)->numpage + (n)) << (smmu)->pgshift))
...@@ -533,7 +522,10 @@ static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size, ...@@ -533,7 +522,10 @@ static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L : ARM_SMMU_CB_S2_TLBIIPAS2; reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L : ARM_SMMU_CB_S2_TLBIIPAS2;
iova >>= 12; iova >>= 12;
do { do {
smmu_write_atomic_lq(iova, reg); if (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64)
writeq_relaxed(iova, reg);
else
writel_relaxed(iova, reg);
iova += granule >> 12; iova += granule >> 12;
} while (size -= granule); } while (size -= granule);
} }
...@@ -1371,11 +1363,10 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, ...@@ -1371,11 +1363,10 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
cb_base = ARM_SMMU_CB(smmu, cfg->cbndx); cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
spin_lock_irqsave(&smmu_domain->cb_lock, flags); spin_lock_irqsave(&smmu_domain->cb_lock, flags);
/* ATS1 registers can only be written atomically */
va = iova & ~0xfffUL; va = iova & ~0xfffUL;
if (smmu->version == ARM_SMMU_V2) if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR); writeq_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
else /* Register is only 32-bit in v1 */ else
writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR); writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp, if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment