Commit 83b89ea4 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-fixes-v4.13-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull IOMMU fixes from Joerg Roedel:

 - fix a scheduling-while-atomic bug in the AMD IOMMU driver. It was
   found after the checker was enabled earlier.

 - a fix for the virtual APIC code in the AMD IOMMU driver which
   delivers device interrupts directly into KVM guests for assigned
   devices.

 - fixes for the recently merged lock-less page-table code for ARM. The
   redundant TLB syncs got reverted and locks added again around the TLB
   sync code.

 - fix for error handling in arm_smmu_add_device()

 - address sanitization fix for arm io-pgtable code

* tag 'iommu-fixes-v4.13-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
  iommu/amd: Fix schedule-while-atomic BUG in initialization code
  iommu/amd: Enable ga_log_intr when enabling guest_mode
  iommu/io-pgtable: Sanitise map/unmap addresses
  iommu/arm-smmu: Fix the error path in arm_smmu_add_device
  Revert "iommu/io-pgtable: Avoid redundant TLB syncs"
  iommu/mtk: Avoid redundant TLB syncs locally
  iommu/arm-smmu: Reintroduce locking around TLB sync operations
parents 8145f373 74ddda71
...@@ -4452,6 +4452,7 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info) ...@@ -4452,6 +4452,7 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
/* Setting */ /* Setting */
irte->hi.fields.ga_root_ptr = (pi_data->base >> 12); irte->hi.fields.ga_root_ptr = (pi_data->base >> 12);
irte->hi.fields.vector = vcpu_pi_info->vector; irte->hi.fields.vector = vcpu_pi_info->vector;
irte->lo.fields_vapic.ga_log_intr = 1;
irte->lo.fields_vapic.guest_mode = 1; irte->lo.fields_vapic.guest_mode = 1;
irte->lo.fields_vapic.ga_tag = pi_data->ga_tag; irte->lo.fields_vapic.ga_tag = pi_data->ga_tag;
......
...@@ -2440,11 +2440,11 @@ static int __init state_next(void) ...@@ -2440,11 +2440,11 @@ static int __init state_next(void)
break; break;
case IOMMU_ACPI_FINISHED: case IOMMU_ACPI_FINISHED:
early_enable_iommus(); early_enable_iommus();
register_syscore_ops(&amd_iommu_syscore_ops);
x86_platform.iommu_shutdown = disable_iommus; x86_platform.iommu_shutdown = disable_iommus;
init_state = IOMMU_ENABLED; init_state = IOMMU_ENABLED;
break; break;
case IOMMU_ENABLED: case IOMMU_ENABLED:
register_syscore_ops(&amd_iommu_syscore_ops);
ret = amd_iommu_init_pci(); ret = amd_iommu_init_pci();
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT; init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
enable_iommus_v2(); enable_iommus_v2();
......
...@@ -400,6 +400,8 @@ struct arm_smmu_device { ...@@ -400,6 +400,8 @@ struct arm_smmu_device {
u32 cavium_id_base; /* Specific to Cavium */ u32 cavium_id_base; /* Specific to Cavium */
spinlock_t global_sync_lock;
/* IOMMU core code handle */ /* IOMMU core code handle */
struct iommu_device iommu; struct iommu_device iommu;
}; };
...@@ -436,7 +438,7 @@ struct arm_smmu_domain { ...@@ -436,7 +438,7 @@ struct arm_smmu_domain {
struct arm_smmu_cfg cfg; struct arm_smmu_cfg cfg;
enum arm_smmu_domain_stage stage; enum arm_smmu_domain_stage stage;
struct mutex init_mutex; /* Protects smmu pointer */ struct mutex init_mutex; /* Protects smmu pointer */
spinlock_t cb_lock; /* Serialises ATS1* ops */ spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */
struct iommu_domain domain; struct iommu_domain domain;
}; };
...@@ -602,9 +604,12 @@ static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, ...@@ -602,9 +604,12 @@ static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu) static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
{ {
void __iomem *base = ARM_SMMU_GR0(smmu); void __iomem *base = ARM_SMMU_GR0(smmu);
unsigned long flags;
spin_lock_irqsave(&smmu->global_sync_lock, flags);
__arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC, __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC,
base + ARM_SMMU_GR0_sTLBGSTATUS); base + ARM_SMMU_GR0_sTLBGSTATUS);
spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
} }
static void arm_smmu_tlb_sync_context(void *cookie) static void arm_smmu_tlb_sync_context(void *cookie)
...@@ -612,9 +617,12 @@ static void arm_smmu_tlb_sync_context(void *cookie) ...@@ -612,9 +617,12 @@ static void arm_smmu_tlb_sync_context(void *cookie)
struct arm_smmu_domain *smmu_domain = cookie; struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_device *smmu = smmu_domain->smmu; struct arm_smmu_device *smmu = smmu_domain->smmu;
void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx); void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
unsigned long flags;
spin_lock_irqsave(&smmu_domain->cb_lock, flags);
__arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC, __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC,
base + ARM_SMMU_CB_TLBSTATUS); base + ARM_SMMU_CB_TLBSTATUS);
spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
} }
static void arm_smmu_tlb_sync_vmid(void *cookie) static void arm_smmu_tlb_sync_vmid(void *cookie)
...@@ -1511,7 +1519,6 @@ static int arm_smmu_add_device(struct device *dev) ...@@ -1511,7 +1519,6 @@ static int arm_smmu_add_device(struct device *dev)
if (using_legacy_binding) { if (using_legacy_binding) {
ret = arm_smmu_register_legacy_master(dev, &smmu); ret = arm_smmu_register_legacy_master(dev, &smmu);
fwspec = dev->iommu_fwspec;
if (ret) if (ret)
goto out_free; goto out_free;
} else if (fwspec && fwspec->ops == &arm_smmu_ops) { } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
...@@ -1550,15 +1557,15 @@ static int arm_smmu_add_device(struct device *dev) ...@@ -1550,15 +1557,15 @@ static int arm_smmu_add_device(struct device *dev)
ret = arm_smmu_master_alloc_smes(dev); ret = arm_smmu_master_alloc_smes(dev);
if (ret) if (ret)
goto out_free; goto out_cfg_free;
iommu_device_link(&smmu->iommu, dev); iommu_device_link(&smmu->iommu, dev);
return 0; return 0;
out_cfg_free:
kfree(cfg);
out_free: out_free:
if (fwspec)
kfree(fwspec->iommu_priv);
iommu_fwspec_free(dev); iommu_fwspec_free(dev);
return ret; return ret;
} }
...@@ -1925,6 +1932,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) ...@@ -1925,6 +1932,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
smmu->num_mapping_groups = size; smmu->num_mapping_groups = size;
mutex_init(&smmu->stream_map_mutex); mutex_init(&smmu->stream_map_mutex);
spin_lock_init(&smmu->global_sync_lock);
if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) { if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L; smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
......
...@@ -479,6 +479,9 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova, ...@@ -479,6 +479,9 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
if (!(prot & (IOMMU_READ | IOMMU_WRITE))) if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
return 0; return 0;
if (WARN_ON(upper_32_bits(iova) || upper_32_bits(paddr)))
return -ERANGE;
ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd); ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd);
/* /*
* Synchronise all PTE updates for the new mapping before there's * Synchronise all PTE updates for the new mapping before there's
...@@ -659,6 +662,9 @@ static int arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova, ...@@ -659,6 +662,9 @@ static int arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova,
struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops); struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
size_t unmapped; size_t unmapped;
if (WARN_ON(upper_32_bits(iova)))
return 0;
unmapped = __arm_v7s_unmap(data, iova, size, 1, data->pgd); unmapped = __arm_v7s_unmap(data, iova, size, 1, data->pgd);
if (unmapped) if (unmapped)
io_pgtable_tlb_sync(&data->iop); io_pgtable_tlb_sync(&data->iop);
......
...@@ -452,6 +452,10 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, ...@@ -452,6 +452,10 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE))) if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
return 0; return 0;
if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) ||
paddr >= (1ULL << data->iop.cfg.oas)))
return -ERANGE;
prot = arm_lpae_prot_to_pte(data, iommu_prot); prot = arm_lpae_prot_to_pte(data, iommu_prot);
ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep); ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
/* /*
...@@ -610,6 +614,9 @@ static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, ...@@ -610,6 +614,9 @@ static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
arm_lpae_iopte *ptep = data->pgd; arm_lpae_iopte *ptep = data->pgd;
int lvl = ARM_LPAE_START_LVL(data); int lvl = ARM_LPAE_START_LVL(data);
if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias)))
return 0;
unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep); unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep);
if (unmapped) if (unmapped)
io_pgtable_tlb_sync(&data->iop); io_pgtable_tlb_sync(&data->iop);
......
...@@ -158,14 +158,12 @@ void free_io_pgtable_ops(struct io_pgtable_ops *ops); ...@@ -158,14 +158,12 @@ void free_io_pgtable_ops(struct io_pgtable_ops *ops);
* @fmt: The page table format. * @fmt: The page table format.
* @cookie: An opaque token provided by the IOMMU driver and passed back to * @cookie: An opaque token provided by the IOMMU driver and passed back to
* any callback routines. * any callback routines.
* @tlb_sync_pending: Private flag for optimising out redundant syncs.
* @cfg: A copy of the page table configuration. * @cfg: A copy of the page table configuration.
* @ops: The page table operations in use for this set of page tables. * @ops: The page table operations in use for this set of page tables.
*/ */
struct io_pgtable { struct io_pgtable {
enum io_pgtable_fmt fmt; enum io_pgtable_fmt fmt;
void *cookie; void *cookie;
bool tlb_sync_pending;
struct io_pgtable_cfg cfg; struct io_pgtable_cfg cfg;
struct io_pgtable_ops ops; struct io_pgtable_ops ops;
}; };
...@@ -175,22 +173,17 @@ struct io_pgtable { ...@@ -175,22 +173,17 @@ struct io_pgtable {
static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop) static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop)
{ {
iop->cfg.tlb->tlb_flush_all(iop->cookie); iop->cfg.tlb->tlb_flush_all(iop->cookie);
iop->tlb_sync_pending = true;
} }
static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop, static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop,
unsigned long iova, size_t size, size_t granule, bool leaf) unsigned long iova, size_t size, size_t granule, bool leaf)
{ {
iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie); iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie);
iop->tlb_sync_pending = true;
} }
static inline void io_pgtable_tlb_sync(struct io_pgtable *iop) static inline void io_pgtable_tlb_sync(struct io_pgtable *iop)
{ {
if (iop->tlb_sync_pending) { iop->cfg.tlb->tlb_sync(iop->cookie);
iop->cfg.tlb->tlb_sync(iop->cookie);
iop->tlb_sync_pending = false;
}
} }
/** /**
......
...@@ -129,6 +129,7 @@ static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size, ...@@ -129,6 +129,7 @@ static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size,
writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A); writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A);
writel_relaxed(iova + size - 1, data->base + REG_MMU_INVLD_END_A); writel_relaxed(iova + size - 1, data->base + REG_MMU_INVLD_END_A);
writel_relaxed(F_MMU_INV_RANGE, data->base + REG_MMU_INVALIDATE); writel_relaxed(F_MMU_INV_RANGE, data->base + REG_MMU_INVALIDATE);
data->tlb_flush_active = true;
} }
static void mtk_iommu_tlb_sync(void *cookie) static void mtk_iommu_tlb_sync(void *cookie)
...@@ -137,6 +138,10 @@ static void mtk_iommu_tlb_sync(void *cookie) ...@@ -137,6 +138,10 @@ static void mtk_iommu_tlb_sync(void *cookie)
int ret; int ret;
u32 tmp; u32 tmp;
/* Avoid timing out if there's nothing to wait for */
if (!data->tlb_flush_active)
return;
ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, tmp, ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, tmp,
tmp != 0, 10, 100000); tmp != 0, 10, 100000);
if (ret) { if (ret) {
...@@ -146,6 +151,7 @@ static void mtk_iommu_tlb_sync(void *cookie) ...@@ -146,6 +151,7 @@ static void mtk_iommu_tlb_sync(void *cookie)
} }
/* Clear the CPE status */ /* Clear the CPE status */
writel_relaxed(0, data->base + REG_MMU_CPE_DONE); writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
data->tlb_flush_active = false;
} }
static const struct iommu_gather_ops mtk_iommu_gather_ops = { static const struct iommu_gather_ops mtk_iommu_gather_ops = {
......
...@@ -47,6 +47,7 @@ struct mtk_iommu_data { ...@@ -47,6 +47,7 @@ struct mtk_iommu_data {
struct iommu_group *m4u_group; struct iommu_group *m4u_group;
struct mtk_smi_iommu smi_imu; /* SMI larb iommu info */ struct mtk_smi_iommu smi_imu; /* SMI larb iommu info */
bool enable_4GB; bool enable_4GB;
bool tlb_flush_active;
struct iommu_device iommu; struct iommu_device iommu;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment