Commit 8f1b4600 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-fixes-v6.6-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull iommu fixes from Joerg Roedel:

 - Arm SMMU fixes from Will Deacon:
     - Fix TLB range command encoding when TTL, Num and Scale are all zero
     - Fix soft lockup by limiting TLB invalidation ops issued by SVA
     - Fix clocks description for SDM630 platform in arm-smmu DT binding

 - Intel VT-d fix from Lu Baolu:
     - Fix a suspend/hibernation problem in iommu_suspend()

 - Mediatek driver: Fix page table sharing for addresses over 4GiB

 - Apple/Dart: DMA_FQ handling fix in attach_dev()

* tag 'iommu-fixes-v6.6-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
  iommu/vt-d: Avoid memory allocation in iommu_suspend()
  iommu/apple-dart: Handle DMA_FQ domains in attach_dev()
  iommu/mediatek: Fix share pgtable for iova over 4GB
  iommu/arm-smmu-v3: Fix soft lockup triggered by arm_smmu_mm_invalidate_range
  dt-bindings: arm-smmu: Fix SDM630 clocks description
  iommu/arm-smmu-v3: Avoid constructing invalid range commands
parents 8a749fd1 59df44bf
......@@ -270,6 +270,7 @@ allOf:
contains:
enum:
- qcom,msm8998-smmu-v2
- qcom,sdm630-smmu-v2
then:
anyOf:
- properties:
......@@ -311,7 +312,6 @@ allOf:
compatible:
contains:
enum:
- qcom,sdm630-smmu-v2
- qcom,sm6375-smmu-v2
then:
anyOf:
......
......@@ -671,8 +671,7 @@ static int apple_dart_attach_dev(struct iommu_domain *domain,
return ret;
switch (domain->type) {
case IOMMU_DOMAIN_DMA:
case IOMMU_DOMAIN_UNMANAGED:
default:
ret = apple_dart_domain_add_streams(dart_domain, cfg);
if (ret)
return ret;
......
......@@ -186,6 +186,15 @@ static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd)
}
}
/*
* Cloned from the MAX_TLBI_OPS in arch/arm64/include/asm/tlbflush.h, this
* is used as a threshold to replace per-page TLBI commands to issue in the
* command queue with an address-space TLBI command, when SMMU w/o a range
* invalidation feature handles too many per-page TLBI commands, which will
* otherwise result in a soft lockup.
*/
#define CMDQ_MAX_TLBI_OPS (1 << (PAGE_SHIFT - 3))
static void arm_smmu_mm_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
......@@ -201,8 +210,13 @@ static void arm_smmu_mm_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
* range. So do a simple translation here by calculating size correctly.
*/
size = end - start;
if (size == ULONG_MAX)
size = 0;
if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_RANGE_INV)) {
if (size >= CMDQ_MAX_TLBI_OPS * PAGE_SIZE)
size = 0;
} else {
if (size == ULONG_MAX)
size = 0;
}
if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM)) {
if (!size)
......
......@@ -1895,18 +1895,23 @@ static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd,
/* Get the leaf page size */
tg = __ffs(smmu_domain->domain.pgsize_bitmap);
num_pages = size >> tg;
/* Convert page size of 12,14,16 (log2) to 1,2,3 */
cmd->tlbi.tg = (tg - 10) / 2;
/*
* Determine what level the granule is at. For non-leaf, io-pgtable
* assumes .tlb_flush_walk can invalidate multiple levels at once,
* so ignore the nominal last-level granule and leave TTL=0.
* Determine what level the granule is at. For non-leaf, both
* io-pgtable and SVA pass a nominal last-level granule because
* they don't know what level(s) actually apply, so ignore that
* and leave TTL=0. However for various errata reasons we still
* want to use a range command, so avoid the SVA corner case
* where both scale and num could be 0 as well.
*/
if (cmd->tlbi.leaf)
cmd->tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3));
num_pages = size >> tg;
else if ((num_pages & CMDQ_TLBI_RANGE_NUM_MAX) == 1)
num_pages++;
}
cmds.num = 0;
......
......@@ -2998,13 +2998,6 @@ static int iommu_suspend(void)
struct intel_iommu *iommu = NULL;
unsigned long flag;
for_each_active_iommu(iommu, drhd) {
iommu->iommu_state = kcalloc(MAX_SR_DMAR_REGS, sizeof(u32),
GFP_KERNEL);
if (!iommu->iommu_state)
goto nomem;
}
iommu_flush_all();
for_each_active_iommu(iommu, drhd) {
......@@ -3024,12 +3017,6 @@ static int iommu_suspend(void)
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
return 0;
nomem:
for_each_active_iommu(iommu, drhd)
kfree(iommu->iommu_state);
return -ENOMEM;
}
static void iommu_resume(void)
......@@ -3061,9 +3048,6 @@ static void iommu_resume(void)
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
for_each_active_iommu(iommu, drhd)
kfree(iommu->iommu_state);
}
static struct syscore_ops iommu_syscore_ops = {
......
......@@ -681,7 +681,7 @@ struct intel_iommu {
struct iopf_queue *iopf_queue;
unsigned char iopfq_name[16];
struct q_inval *qi; /* Queued invalidation info */
u32 *iommu_state; /* Store iommu states between suspend and resume.*/
u32 iommu_state[MAX_SR_DMAR_REGS]; /* Store iommu states between suspend and resume.*/
#ifdef CONFIG_IRQ_REMAP
struct ir_table *ir_table; /* Interrupt remapping info */
......
......@@ -262,7 +262,7 @@ struct mtk_iommu_data {
struct device *smicomm_dev;
struct mtk_iommu_bank_data *bank;
struct mtk_iommu_domain *share_dom; /* For 2 HWs share pgtable */
struct mtk_iommu_domain *share_dom;
struct regmap *pericfg;
struct mutex mutex; /* Protect m4u_group/m4u_dom above */
......@@ -643,8 +643,8 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom,
struct mtk_iommu_domain *share_dom = data->share_dom;
const struct mtk_iommu_iova_region *region;
/* Always use share domain in sharing pgtable case */
if (MTK_IOMMU_HAS_FLAG(data->plat_data, SHARE_PGTABLE) && share_dom) {
/* Share pgtable when 2 MM IOMMU share the pgtable or one IOMMU use multiple iova ranges */
if (share_dom) {
dom->iop = share_dom->iop;
dom->cfg = share_dom->cfg;
dom->domain.pgsize_bitmap = share_dom->cfg.pgsize_bitmap;
......@@ -677,8 +677,7 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom,
/* Update our support page sizes bitmap */
dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap;
if (MTK_IOMMU_HAS_FLAG(data->plat_data, SHARE_PGTABLE))
data->share_dom = dom;
data->share_dom = dom;
update_iova_region:
/* Update the iova region for this domain */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment