Commit 318afd41 authored by Joerg Roedel's avatar Joerg Roedel

x86/amd-iommu: Make np-cache a global flag

The non-present cache flag was IOMMU local until now which
doesn't make sense. Make this a global flag so we can remove
the lase user of 'struct iommu' in the map/unmap path.
Signed-off-by: default avatarJoerg Roedel <joerg.roedel@amd.com>
parent 09b42804
...@@ -211,6 +211,9 @@ extern bool amd_iommu_dump; ...@@ -211,6 +211,9 @@ extern bool amd_iommu_dump;
printk(KERN_INFO "AMD-Vi: " format, ## arg); \ printk(KERN_INFO "AMD-Vi: " format, ## arg); \
} while(0); } while(0);
/* global flag if IOMMUs cache non-present entries */
extern bool amd_iommu_np_cache;
/* /*
* Make iterating over all IOMMUs easier * Make iterating over all IOMMUs easier
*/ */
......
...@@ -131,12 +131,6 @@ static void amd_iommu_stats_init(void) ...@@ -131,12 +131,6 @@ static void amd_iommu_stats_init(void)
#endif #endif
/* returns !0 if the IOMMU is caching non-present entries in its TLB */
static int iommu_has_npcache(struct amd_iommu *iommu)
{
return iommu->cap & (1UL << IOMMU_CAP_NPCACHE);
}
/**************************************************************************** /****************************************************************************
* *
* Interrupt handling functions * Interrupt handling functions
...@@ -1713,7 +1707,7 @@ static dma_addr_t __map_single(struct device *dev, ...@@ -1713,7 +1707,7 @@ static dma_addr_t __map_single(struct device *dev,
if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) { if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) {
iommu_flush_tlb(&dma_dom->domain); iommu_flush_tlb(&dma_dom->domain);
dma_dom->need_flush = false; dma_dom->need_flush = false;
} else if (unlikely(iommu_has_npcache(iommu))) } else if (unlikely(amd_iommu_np_cache))
iommu_flush_pages(&dma_dom->domain, address, size); iommu_flush_pages(&dma_dom->domain, address, size);
out: out:
......
...@@ -141,6 +141,9 @@ LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the ...@@ -141,6 +141,9 @@ LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
struct amd_iommu *amd_iommus[MAX_IOMMUS]; struct amd_iommu *amd_iommus[MAX_IOMMUS];
int amd_iommus_present; int amd_iommus_present;
/* IOMMUs have a non-present cache? */
bool amd_iommu_np_cache __read_mostly;
/* /*
* List of protection domains - used during resume * List of protection domains - used during resume
*/ */
...@@ -891,6 +894,9 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) ...@@ -891,6 +894,9 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
init_iommu_from_acpi(iommu, h); init_iommu_from_acpi(iommu, h);
init_iommu_devices(iommu); init_iommu_devices(iommu);
if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
amd_iommu_np_cache = true;
return pci_enable_device(iommu->dev); return pci_enable_device(iommu->dev);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment