Commit 2ccd4502 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-fixes-v6.0-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull iommu fixes from Joerg Roedel:

 - Intel VT-d fixes from Lu Baolu:
     - Boot kdump kernels with VT-d scalable mode on
     - Calculate the right page table levels
     - Fix two recursive locking issues
     - Fix a lockdep splat issue

 - AMD IOMMU fixes:
     - Fix for completion-wait command to use full 64 bits of data
     - Fix PASID related issue where GPU sound devices failed to
       initialize

 - Fix for Virtio-IOMMU to report correct caching behavior, needed for
   use with VFIO

* tag 'iommu-fixes-v6.0-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
  iommu: Fix false ownership failure on AMD systems with PASID activated
  iommu/vt-d: Fix possible recursive locking in intel_iommu_init()
  iommu/virtio: Fix interaction with VFIO
  iommu/vt-d: Fix lockdep splat due to klist iteration in atomic context
  iommu/vt-d: Fix recursive lock issue in iommu_flush_dev_iotlb()
  iommu/vt-d: Correctly calculate sagaw value of IOMMU
  iommu/vt-d: Fix kdump kernels boot failure with scalable mode
  iommu/amd: use full 64-bit value in build_completion_wait()
parents 134984db 2380f1e8
......@@ -939,7 +939,8 @@ static void build_completion_wait(struct iommu_cmd *cmd,
memset(cmd, 0, sizeof(*cmd));
cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK;
cmd->data[1] = upper_32_bits(paddr);
cmd->data[2] = data;
cmd->data[2] = lower_32_bits(data);
cmd->data[3] = upper_32_bits(data);
CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
}
......
......@@ -777,6 +777,8 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
if (dev_state->domain == NULL)
goto out_free_states;
/* See iommu_is_default_domain() */
dev_state->domain->type = IOMMU_DOMAIN_IDENTITY;
amd_iommu_domain_direct_map(dev_state->domain);
ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids);
......
......@@ -2349,6 +2349,13 @@ static int dmar_device_hotplug(acpi_handle handle, bool insert)
if (!dmar_in_use())
return 0;
/*
* It's unlikely that any I/O board is hot added before the IOMMU
* subsystem is initialized.
*/
if (IS_ENABLED(CONFIG_INTEL_IOMMU) && !intel_iommu_enabled)
return -EOPNOTSUPP;
if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
tmp = handle;
} else {
......
......@@ -163,38 +163,6 @@ static phys_addr_t root_entry_uctp(struct root_entry *re)
return re->hi & VTD_PAGE_MASK;
}
static inline void context_clear_pasid_enable(struct context_entry *context)
{
context->lo &= ~(1ULL << 11);
}
static inline bool context_pasid_enabled(struct context_entry *context)
{
return !!(context->lo & (1ULL << 11));
}
static inline void context_set_copied(struct context_entry *context)
{
context->hi |= (1ull << 3);
}
static inline bool context_copied(struct context_entry *context)
{
return !!(context->hi & (1ULL << 3));
}
static inline bool __context_present(struct context_entry *context)
{
return (context->lo & 1);
}
bool context_present(struct context_entry *context)
{
return context_pasid_enabled(context) ?
__context_present(context) :
__context_present(context) && !context_copied(context);
}
static inline void context_set_present(struct context_entry *context)
{
context->lo |= 1;
......@@ -242,6 +210,26 @@ static inline void context_clear_entry(struct context_entry *context)
context->hi = 0;
}
static inline bool context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
{
if (!iommu->copied_tables)
return false;
return test_bit(((long)bus << 8) | devfn, iommu->copied_tables);
}
static inline void
set_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
{
set_bit(((long)bus << 8) | devfn, iommu->copied_tables);
}
static inline void
clear_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
{
clear_bit(((long)bus << 8) | devfn, iommu->copied_tables);
}
/*
* This domain is a statically identity mapping domain.
* 1. This domain creats a static 1:1 mapping to all usable memory.
......@@ -402,14 +390,36 @@ static inline int domain_pfn_supported(struct dmar_domain *domain,
return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
}
/*
* Calculate the Supported Adjusted Guest Address Widths of an IOMMU.
* Refer to 11.4.2 of the VT-d spec for the encoding of each bit of
* the returned SAGAW.
*/
static unsigned long __iommu_calculate_sagaw(struct intel_iommu *iommu)
{
unsigned long fl_sagaw, sl_sagaw;
fl_sagaw = BIT(2) | (cap_fl1gp_support(iommu->cap) ? BIT(3) : 0);
sl_sagaw = cap_sagaw(iommu->cap);
/* Second level only. */
if (!sm_supported(iommu) || !ecap_flts(iommu->ecap))
return sl_sagaw;
/* First level only. */
if (!ecap_slts(iommu->ecap))
return fl_sagaw;
return fl_sagaw & sl_sagaw;
}
static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
{
unsigned long sagaw;
int agaw;
sagaw = cap_sagaw(iommu->cap);
for (agaw = width_to_agaw(max_gaw);
agaw >= 0; agaw--) {
sagaw = __iommu_calculate_sagaw(iommu);
for (agaw = width_to_agaw(max_gaw); agaw >= 0; agaw--) {
if (test_bit(agaw, &sagaw))
break;
}
......@@ -505,8 +515,9 @@ static int domain_update_device_node(struct dmar_domain *domain)
{
struct device_domain_info *info;
int nid = NUMA_NO_NODE;
unsigned long flags;
spin_lock(&domain->lock);
spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry(info, &domain->devices, link) {
/*
* There could possibly be multiple device numa nodes as devices
......@@ -518,7 +529,7 @@ static int domain_update_device_node(struct dmar_domain *domain)
if (nid != NUMA_NO_NODE)
break;
}
spin_unlock(&domain->lock);
spin_unlock_irqrestore(&domain->lock, flags);
return nid;
}
......@@ -578,6 +589,13 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
struct context_entry *context;
u64 *entry;
/*
* Except that the caller requested to allocate a new entry,
* returning a copied context entry makes no sense.
*/
if (!alloc && context_copied(iommu, bus, devfn))
return NULL;
entry = &root->lo;
if (sm_supported(iommu)) {
if (devfn >= 0x80) {
......@@ -795,32 +813,11 @@ static void free_context_table(struct intel_iommu *iommu)
}
#ifdef CONFIG_DMAR_DEBUG
static void pgtable_walk(struct intel_iommu *iommu, unsigned long pfn, u8 bus, u8 devfn)
static void pgtable_walk(struct intel_iommu *iommu, unsigned long pfn,
u8 bus, u8 devfn, struct dma_pte *parent, int level)
{
struct device_domain_info *info;
struct dma_pte *parent, *pte;
struct dmar_domain *domain;
struct pci_dev *pdev;
int offset, level;
pdev = pci_get_domain_bus_and_slot(iommu->segment, bus, devfn);
if (!pdev)
return;
info = dev_iommu_priv_get(&pdev->dev);
if (!info || !info->domain) {
pr_info("device [%02x:%02x.%d] not probed\n",
bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
return;
}
domain = info->domain;
level = agaw_to_level(domain->agaw);
parent = domain->pgd;
if (!parent) {
pr_info("no page table setup\n");
return;
}
struct dma_pte *pte;
int offset;
while (1) {
offset = pfn_level_offset(pfn, level);
......@@ -847,9 +844,10 @@ void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
struct pasid_entry *entries, *pte;
struct context_entry *ctx_entry;
struct root_entry *rt_entry;
int i, dir_index, index, level;
u8 devfn = source_id & 0xff;
u8 bus = source_id >> 8;
int i, dir_index, index;
struct dma_pte *pgtable;
pr_info("Dump %s table entries for IOVA 0x%llx\n", iommu->name, addr);
......@@ -877,8 +875,11 @@ void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
ctx_entry->hi, ctx_entry->lo);
/* legacy mode does not require PASID entries */
if (!sm_supported(iommu))
if (!sm_supported(iommu)) {
level = agaw_to_level(ctx_entry->hi & 7);
pgtable = phys_to_virt(ctx_entry->lo & VTD_PAGE_MASK);
goto pgtable_walk;
}
/* get the pointer to pasid directory entry */
dir = phys_to_virt(ctx_entry->lo & VTD_PAGE_MASK);
......@@ -905,8 +906,16 @@ void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
for (i = 0; i < ARRAY_SIZE(pte->val); i++)
pr_info("pasid table entry[%d]: 0x%016llx\n", i, pte->val[i]);
if (pasid_pte_get_pgtt(pte) == PASID_ENTRY_PGTT_FL_ONLY) {
level = pte->val[2] & BIT_ULL(2) ? 5 : 4;
pgtable = phys_to_virt(pte->val[2] & VTD_PAGE_MASK);
} else {
level = agaw_to_level((pte->val[0] >> 2) & 0x7);
pgtable = phys_to_virt(pte->val[0] & VTD_PAGE_MASK);
}
pgtable_walk:
pgtable_walk(iommu, addr >> VTD_PAGE_SHIFT, bus, devfn);
pgtable_walk(iommu, addr >> VTD_PAGE_SHIFT, bus, devfn, pgtable, level);
}
#endif
......@@ -1345,19 +1354,20 @@ iommu_support_dev_iotlb(struct dmar_domain *domain, struct intel_iommu *iommu,
u8 bus, u8 devfn)
{
struct device_domain_info *info;
unsigned long flags;
if (!iommu->qi)
return NULL;
spin_lock(&domain->lock);
spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry(info, &domain->devices, link) {
if (info->iommu == iommu && info->bus == bus &&
info->devfn == devfn) {
spin_unlock(&domain->lock);
spin_unlock_irqrestore(&domain->lock, flags);
return info->ats_supported ? info : NULL;
}
}
spin_unlock(&domain->lock);
spin_unlock_irqrestore(&domain->lock, flags);
return NULL;
}
......@@ -1366,8 +1376,9 @@ static void domain_update_iotlb(struct dmar_domain *domain)
{
struct device_domain_info *info;
bool has_iotlb_device = false;
unsigned long flags;
spin_lock(&domain->lock);
spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry(info, &domain->devices, link) {
if (info->ats_enabled) {
has_iotlb_device = true;
......@@ -1375,7 +1386,7 @@ static void domain_update_iotlb(struct dmar_domain *domain)
}
}
domain->has_iotlb_device = has_iotlb_device;
spin_unlock(&domain->lock);
spin_unlock_irqrestore(&domain->lock, flags);
}
static void iommu_enable_dev_iotlb(struct device_domain_info *info)
......@@ -1467,14 +1478,15 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
u64 addr, unsigned mask)
{
struct device_domain_info *info;
unsigned long flags;
if (!domain->has_iotlb_device)
return;
spin_lock(&domain->lock);
spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry(info, &domain->devices, link)
__iommu_flush_dev_iotlb(info, addr, mask);
spin_unlock(&domain->lock);
spin_unlock_irqrestore(&domain->lock, flags);
}
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
......@@ -1688,6 +1700,11 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
iommu->domain_ids = NULL;
}
if (iommu->copied_tables) {
bitmap_free(iommu->copied_tables);
iommu->copied_tables = NULL;
}
/* free context mapping */
free_context_table(iommu);
......@@ -1913,7 +1930,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
goto out_unlock;
ret = 0;
if (context_present(context))
if (context_present(context) && !context_copied(iommu, bus, devfn))
goto out_unlock;
/*
......@@ -1925,7 +1942,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
* in-flight DMA will exist, and we don't need to worry anymore
* hereafter.
*/
if (context_copied(context)) {
if (context_copied(iommu, bus, devfn)) {
u16 did_old = context_domain_id(context);
if (did_old < cap_ndoms(iommu->cap)) {
......@@ -1936,6 +1953,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
DMA_TLB_DSI_FLUSH);
}
clear_context_copied(iommu, bus, devfn);
}
context_clear_entry(context);
......@@ -2429,6 +2448,7 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu;
unsigned long flags;
u8 bus, devfn;
int ret;
......@@ -2440,9 +2460,9 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
if (ret)
return ret;
info->domain = domain;
spin_lock(&domain->lock);
spin_lock_irqsave(&domain->lock, flags);
list_add(&info->link, &domain->devices);
spin_unlock(&domain->lock);
spin_unlock_irqrestore(&domain->lock, flags);
/* PASID table is mandatory for a PCI device in scalable mode. */
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
......@@ -2684,32 +2704,14 @@ static int copy_context_table(struct intel_iommu *iommu,
/* Now copy the context entry */
memcpy(&ce, old_ce + idx, sizeof(ce));
if (!__context_present(&ce))
if (!context_present(&ce))
continue;
did = context_domain_id(&ce);
if (did >= 0 && did < cap_ndoms(iommu->cap))
set_bit(did, iommu->domain_ids);
/*
* We need a marker for copied context entries. This
* marker needs to work for the old format as well as
* for extended context entries.
*
* Bit 67 of the context entry is used. In the old
* format this bit is available to software, in the
* extended format it is the PGE bit, but PGE is ignored
* by HW if PASIDs are disabled (and thus still
* available).
*
* So disable PASIDs first and then mark the entry
* copied. This means that we don't copy PASID
* translations from the old kernel, but this is fine as
* faults there are not fatal.
*/
context_clear_pasid_enable(&ce);
context_set_copied(&ce);
set_context_copied(iommu, bus, devfn);
new_ce[idx] = ce;
}
......@@ -2735,8 +2737,8 @@ static int copy_translation_tables(struct intel_iommu *iommu)
bool new_ext, ext;
rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
ext = !!(rtaddr_reg & DMA_RTADDR_RTT);
new_ext = !!ecap_ecs(iommu->ecap);
ext = !!(rtaddr_reg & DMA_RTADDR_SMT);
new_ext = !!sm_supported(iommu);
/*
* The RTT bit can only be changed when translation is disabled,
......@@ -2747,6 +2749,10 @@ static int copy_translation_tables(struct intel_iommu *iommu)
if (new_ext != ext)
return -EINVAL;
iommu->copied_tables = bitmap_zalloc(BIT_ULL(16), GFP_KERNEL);
if (!iommu->copied_tables)
return -ENOMEM;
old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
if (!old_rt_phys)
return -EINVAL;
......@@ -3013,13 +3019,7 @@ static int __init init_dmars(void)
#ifdef CONFIG_INTEL_IOMMU_SVM
if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
/*
* Call dmar_alloc_hwirq() with dmar_global_lock held,
* could cause possible lock race condition.
*/
up_write(&dmar_global_lock);
ret = intel_svm_enable_prq(iommu);
down_write(&dmar_global_lock);
if (ret)
goto free_iommu;
}
......@@ -3932,7 +3932,6 @@ int __init intel_iommu_init(void)
force_on = (!intel_iommu_tboot_noforce && tboot_force_iommu()) ||
platform_optin_force_iommu();
down_write(&dmar_global_lock);
if (dmar_table_init()) {
if (force_on)
panic("tboot: Failed to initialize DMAR table\n");
......@@ -3945,16 +3944,6 @@ int __init intel_iommu_init(void)
goto out_free_dmar;
}
up_write(&dmar_global_lock);
/*
* The bus notifier takes the dmar_global_lock, so lockdep will
* complain later when we register it under the lock.
*/
dmar_register_bus_notifier();
down_write(&dmar_global_lock);
if (!no_iommu)
intel_iommu_debugfs_init();
......@@ -3999,11 +3988,9 @@ int __init intel_iommu_init(void)
pr_err("Initialization failed\n");
goto out_free_dmar;
}
up_write(&dmar_global_lock);
init_iommu_pm_ops();
down_read(&dmar_global_lock);
for_each_active_iommu(iommu, drhd) {
/*
* The flush queue implementation does not perform
......@@ -4021,13 +4008,11 @@ int __init intel_iommu_init(void)
"%s", iommu->name);
iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL);
}
up_read(&dmar_global_lock);
bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
if (si_domain && !hw_pass_through)
register_memory_notifier(&intel_iommu_memory_nb);
down_read(&dmar_global_lock);
if (probe_acpi_namespace_devices())
pr_warn("ACPI name space devices didn't probe correctly\n");
......@@ -4038,17 +4023,15 @@ int __init intel_iommu_init(void)
iommu_disable_protect_mem_regions(iommu);
}
up_read(&dmar_global_lock);
pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
intel_iommu_enabled = 1;
dmar_register_bus_notifier();
pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
return 0;
out_free_dmar:
intel_iommu_free_dmars();
up_write(&dmar_global_lock);
return ret;
}
......@@ -4080,6 +4063,7 @@ static void dmar_remove_one_dev_info(struct device *dev)
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct dmar_domain *domain = info->domain;
struct intel_iommu *iommu = info->iommu;
unsigned long flags;
if (!dev_is_real_dma_subdevice(info->dev)) {
if (dev_is_pci(info->dev) && sm_supported(iommu))
......@@ -4091,9 +4075,9 @@ static void dmar_remove_one_dev_info(struct device *dev)
intel_pasid_free_table(info->dev);
}
spin_lock(&domain->lock);
spin_lock_irqsave(&domain->lock, flags);
list_del(&info->link);
spin_unlock(&domain->lock);
spin_unlock_irqrestore(&domain->lock, flags);
domain_detach_iommu(domain, iommu);
info->domain = NULL;
......@@ -4412,19 +4396,20 @@ static void domain_set_force_snooping(struct dmar_domain *domain)
static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
unsigned long flags;
if (dmar_domain->force_snooping)
return true;
spin_lock(&dmar_domain->lock);
spin_lock_irqsave(&dmar_domain->lock, flags);
if (!domain_support_force_snooping(dmar_domain)) {
spin_unlock(&dmar_domain->lock);
spin_unlock_irqrestore(&dmar_domain->lock, flags);
return false;
}
domain_set_force_snooping(dmar_domain);
dmar_domain->force_snooping = true;
spin_unlock(&dmar_domain->lock);
spin_unlock_irqrestore(&dmar_domain->lock, flags);
return true;
}
......
......@@ -197,7 +197,6 @@
#define ecap_dis(e) (((e) >> 27) & 0x1)
#define ecap_nest(e) (((e) >> 26) & 0x1)
#define ecap_mts(e) (((e) >> 25) & 0x1)
#define ecap_ecs(e) (((e) >> 24) & 0x1)
#define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16)
#define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16)
#define ecap_coherent(e) ((e) & 0x1)
......@@ -265,7 +264,6 @@
#define DMA_GSTS_CFIS (((u32)1) << 23)
/* DMA_RTADDR_REG */
#define DMA_RTADDR_RTT (((u64)1) << 11)
#define DMA_RTADDR_SMT (((u64)1) << 10)
/* CCMD_REG */
......@@ -579,6 +577,7 @@ struct intel_iommu {
#ifdef CONFIG_INTEL_IOMMU
unsigned long *domain_ids; /* bitmap of domains */
unsigned long *copied_tables; /* bitmap of copied tables */
spinlock_t lock; /* protect context, domain ids */
struct root_entry *root_entry; /* virtual address */
......@@ -701,6 +700,11 @@ static inline int nr_pte_to_next_page(struct dma_pte *pte)
(struct dma_pte *)ALIGN((unsigned long)pte, VTD_PAGE_SIZE) - pte;
}
static inline bool context_present(struct context_entry *context)
{
return (context->lo & 1);
}
extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
extern int dmar_enable_qi(struct intel_iommu *iommu);
......@@ -784,7 +788,6 @@ static inline void intel_iommu_debugfs_init(void) {}
#endif /* CONFIG_INTEL_IOMMU_DEBUGFS */
extern const struct attribute_group *intel_iommu_groups[];
bool context_present(struct context_entry *context);
struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
u8 devfn, int alloc);
......
......@@ -3076,6 +3076,24 @@ static ssize_t iommu_group_store_type(struct iommu_group *group,
return ret;
}
static bool iommu_is_default_domain(struct iommu_group *group)
{
if (group->domain == group->default_domain)
return true;
/*
* If the default domain was set to identity and it is still an identity
* domain then we consider this a pass. This happens because of
* amd_iommu_init_device() replacing the default idenytity domain with an
* identity domain that has a different configuration for AMDGPU.
*/
if (group->default_domain &&
group->default_domain->type == IOMMU_DOMAIN_IDENTITY &&
group->domain && group->domain->type == IOMMU_DOMAIN_IDENTITY)
return true;
return false;
}
/**
* iommu_device_use_default_domain() - Device driver wants to handle device
* DMA through the kernel DMA API.
......@@ -3094,8 +3112,7 @@ int iommu_device_use_default_domain(struct device *dev)
mutex_lock(&group->mutex);
if (group->owner_cnt) {
if (group->domain != group->default_domain ||
group->owner) {
if (group->owner || !iommu_is_default_domain(group)) {
ret = -EBUSY;
goto unlock_out;
}
......
......@@ -1006,7 +1006,18 @@ static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args)
return iommu_fwspec_add_ids(dev, args->args, 1);
}
static bool viommu_capable(enum iommu_cap cap)
{
switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY:
return true;
default:
return false;
}
}
static struct iommu_ops viommu_ops = {
.capable = viommu_capable,
.domain_alloc = viommu_domain_alloc,
.probe_device = viommu_probe_device,
.probe_finalize = viommu_probe_finalize,
......
......@@ -65,6 +65,7 @@ struct dmar_pci_notify_info {
extern struct rw_semaphore dmar_global_lock;
extern struct list_head dmar_drhd_units;
extern int intel_iommu_enabled;
#define for_each_drhd_unit(drhd) \
list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
......@@ -88,7 +89,8 @@ extern struct list_head dmar_drhd_units;
static inline bool dmar_rcu_check(void)
{
return rwsem_is_locked(&dmar_global_lock) ||
system_state == SYSTEM_BOOTING;
system_state == SYSTEM_BOOTING ||
(IS_ENABLED(CONFIG_INTEL_IOMMU) && !intel_iommu_enabled);
}
#define dmar_rcu_dereference(p) rcu_dereference_check((p), dmar_rcu_check())
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment