Commit 589e5cab authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-fixes-v5.15-rc0' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull iommu fixes from Joerg Roedel:

 - Intel VT-d:
     - PASID leakage in intel_svm_unbind_mm()
     - Deadlock in intel_svm_drain_prq()

 - AMD IOMMU: Fixes for an unhandled page-fault bug when AVIC is used
   for a KVM guest.

 - Make CONFIG_IOMMU_DEFAULT_DMA_LAZY architecture instead of IOMMU
   driver dependent

* tag 'iommu-fixes-v5.15-rc0' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
  iommu: Clarify default domain Kconfig
  iommu/vt-d: Fix a deadlock in intel_svm_drain_prq()
  iommu/vt-d: Fix PASID leak in intel_svm_unbind_mm()
  iommu/amd: Remove iommu_init_ga()
  iommu/amd: Relocate GAMSup check to early_enable_iommus
parents 5ffc06eb 8cc63319
...@@ -82,7 +82,7 @@ config IOMMU_DEBUGFS ...@@ -82,7 +82,7 @@ config IOMMU_DEBUGFS
choice choice
prompt "IOMMU default domain type" prompt "IOMMU default domain type"
depends on IOMMU_API depends on IOMMU_API
default IOMMU_DEFAULT_DMA_LAZY if AMD_IOMMU || INTEL_IOMMU default IOMMU_DEFAULT_DMA_LAZY if X86 || IA64
default IOMMU_DEFAULT_DMA_STRICT default IOMMU_DEFAULT_DMA_STRICT
help help
Choose the type of IOMMU domain used to manage DMA API usage by Choose the type of IOMMU domain used to manage DMA API usage by
......
...@@ -297,6 +297,22 @@ int amd_iommu_get_num_iommus(void) ...@@ -297,6 +297,22 @@ int amd_iommu_get_num_iommus(void)
return amd_iommus_present; return amd_iommus_present;
} }
#ifdef CONFIG_IRQ_REMAP
static bool check_feature_on_all_iommus(u64 mask)
{
bool ret = false;
struct amd_iommu *iommu;
for_each_iommu(iommu) {
ret = iommu_feature(iommu, mask);
if (!ret)
return false;
}
return true;
}
#endif
/* /*
* For IVHD type 0x11/0x40, EFR is also available via IVHD. * For IVHD type 0x11/0x40, EFR is also available via IVHD.
* Default to IVHD EFR since it is available sooner * Default to IVHD EFR since it is available sooner
...@@ -813,9 +829,9 @@ static int iommu_ga_log_enable(struct amd_iommu *iommu) ...@@ -813,9 +829,9 @@ static int iommu_ga_log_enable(struct amd_iommu *iommu)
return 0; return 0;
} }
#ifdef CONFIG_IRQ_REMAP
static int iommu_init_ga_log(struct amd_iommu *iommu) static int iommu_init_ga_log(struct amd_iommu *iommu)
{ {
#ifdef CONFIG_IRQ_REMAP
u64 entry; u64 entry;
if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
...@@ -845,25 +861,9 @@ static int iommu_init_ga_log(struct amd_iommu *iommu) ...@@ -845,25 +861,9 @@ static int iommu_init_ga_log(struct amd_iommu *iommu)
err_out: err_out:
free_ga_log(iommu); free_ga_log(iommu);
return -EINVAL; return -EINVAL;
} #else
#endif /* CONFIG_IRQ_REMAP */ return 0;
static int iommu_init_ga(struct amd_iommu *iommu)
{
int ret = 0;
#ifdef CONFIG_IRQ_REMAP
/* Note: We have already checked GASup from IVRS table.
* Now, we need to make sure that GAMSup is set.
*/
if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
!iommu_feature(iommu, FEATURE_GAM_VAPIC))
amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
ret = iommu_init_ga_log(iommu);
#endif /* CONFIG_IRQ_REMAP */ #endif /* CONFIG_IRQ_REMAP */
return ret;
} }
static int __init alloc_cwwb_sem(struct amd_iommu *iommu) static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
...@@ -1845,7 +1845,7 @@ static int __init iommu_init_pci(struct amd_iommu *iommu) ...@@ -1845,7 +1845,7 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu)) if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
return -ENOMEM; return -ENOMEM;
ret = iommu_init_ga(iommu); ret = iommu_init_ga_log(iommu);
if (ret) if (ret)
return ret; return ret;
...@@ -2479,6 +2479,14 @@ static void early_enable_iommus(void) ...@@ -2479,6 +2479,14 @@ static void early_enable_iommus(void)
} }
#ifdef CONFIG_IRQ_REMAP #ifdef CONFIG_IRQ_REMAP
/*
* Note: We have already checked GASup from IVRS table.
* Now, we need to make sure that GAMSup is set.
*/
if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
!check_feature_on_all_iommus(FEATURE_GAM_VAPIC))
amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP); amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
#endif #endif
......
...@@ -514,9 +514,6 @@ static void load_pasid(struct mm_struct *mm, u32 pasid) ...@@ -514,9 +514,6 @@ static void load_pasid(struct mm_struct *mm, u32 pasid)
{ {
mutex_lock(&mm->context.lock); mutex_lock(&mm->context.lock);
/* Synchronize with READ_ONCE in update_pasid(). */
smp_store_release(&mm->pasid, pasid);
/* Update PASID MSR on all CPUs running the mm's tasks. */ /* Update PASID MSR on all CPUs running the mm's tasks. */
on_each_cpu_mask(mm_cpumask(mm), _load_pasid, NULL, true); on_each_cpu_mask(mm_cpumask(mm), _load_pasid, NULL, true);
...@@ -792,7 +789,19 @@ static void intel_svm_drain_prq(struct device *dev, u32 pasid) ...@@ -792,7 +789,19 @@ static void intel_svm_drain_prq(struct device *dev, u32 pasid)
goto prq_retry; goto prq_retry;
} }
/*
* A work in IO page fault workqueue may try to lock pasid_mutex now.
* Holding pasid_mutex while waiting in iopf_queue_flush_dev() for
* all works in the workqueue to finish may cause deadlock.
*
* It's unnecessary to hold pasid_mutex in iopf_queue_flush_dev().
* Unlock it to allow the works to be handled while waiting for
* them to finish.
*/
lockdep_assert_held(&pasid_mutex);
mutex_unlock(&pasid_mutex);
iopf_queue_flush_dev(dev); iopf_queue_flush_dev(dev);
mutex_lock(&pasid_mutex);
/* /*
* Perform steps described in VT-d spec CH7.10 to drain page * Perform steps described in VT-d spec CH7.10 to drain page
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment