Commit 5331fe6f authored by Weidong Han's avatar Weidong Han Committed by Joerg Roedel

Add domain_flush_cache

Because virtual machine domain may have multiple devices from different iommus, it cannot use __iommu_flush_cache.

In some common low level functions, use domain_flush_cache instead of __iommu_flush_cache. On the other hand, in some functions, iommu can is specified or domain cannot be got, still use __iommu_flush_cache
Signed-off-by: default avatarWeidong Han <weidong.han@intel.com>
Signed-off-by: default avatarJoerg Roedel <joerg.roedel@amd.com>
parent c7151a8d
...@@ -445,6 +445,13 @@ static struct intel_iommu *device_to_iommu(u8 bus, u8 devfn) ...@@ -445,6 +445,13 @@ static struct intel_iommu *device_to_iommu(u8 bus, u8 devfn)
return NULL; return NULL;
} }
static void domain_flush_cache(struct dmar_domain *domain,
void *addr, int size)
{
if (!domain->iommu_coherency)
clflush_cache_range(addr, size);
}
/* Gets context entry for a given bus and devfn */ /* Gets context entry for a given bus and devfn */
static struct context_entry * device_to_context_entry(struct intel_iommu *iommu, static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
u8 bus, u8 devfn) u8 bus, u8 devfn)
...@@ -585,7 +592,6 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr) ...@@ -585,7 +592,6 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
int level = agaw_to_level(domain->agaw); int level = agaw_to_level(domain->agaw);
int offset; int offset;
unsigned long flags; unsigned long flags;
struct intel_iommu *iommu = domain_get_iommu(domain);
BUG_ON(!domain->pgd); BUG_ON(!domain->pgd);
...@@ -609,8 +615,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr) ...@@ -609,8 +615,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
flags); flags);
return NULL; return NULL;
} }
__iommu_flush_cache(iommu, tmp_page, domain_flush_cache(domain, tmp_page, PAGE_SIZE);
PAGE_SIZE);
dma_set_pte_addr(pte, virt_to_phys(tmp_page)); dma_set_pte_addr(pte, virt_to_phys(tmp_page));
/* /*
* high level table always sets r/w, last level page * high level table always sets r/w, last level page
...@@ -618,7 +623,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr) ...@@ -618,7 +623,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
*/ */
dma_set_pte_readable(pte); dma_set_pte_readable(pte);
dma_set_pte_writable(pte); dma_set_pte_writable(pte);
__iommu_flush_cache(iommu, pte, sizeof(*pte)); domain_flush_cache(domain, pte, sizeof(*pte));
} }
parent = phys_to_virt(dma_pte_addr(pte)); parent = phys_to_virt(dma_pte_addr(pte));
level--; level--;
...@@ -655,14 +660,13 @@ static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr, ...@@ -655,14 +660,13 @@ static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr,
static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr) static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr)
{ {
struct dma_pte *pte = NULL; struct dma_pte *pte = NULL;
struct intel_iommu *iommu = domain_get_iommu(domain);
/* get last level pte */ /* get last level pte */
pte = dma_addr_level_pte(domain, addr, 1); pte = dma_addr_level_pte(domain, addr, 1);
if (pte) { if (pte) {
dma_clear_pte(pte); dma_clear_pte(pte);
__iommu_flush_cache(iommu, pte, sizeof(*pte)); domain_flush_cache(domain, pte, sizeof(*pte));
} }
} }
...@@ -693,7 +697,6 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain, ...@@ -693,7 +697,6 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
int total = agaw_to_level(domain->agaw); int total = agaw_to_level(domain->agaw);
int level; int level;
u64 tmp; u64 tmp;
struct intel_iommu *iommu = domain_get_iommu(domain);
start &= (((u64)1) << addr_width) - 1; start &= (((u64)1) << addr_width) - 1;
end &= (((u64)1) << addr_width) - 1; end &= (((u64)1) << addr_width) - 1;
...@@ -711,8 +714,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain, ...@@ -711,8 +714,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
free_pgtable_page( free_pgtable_page(
phys_to_virt(dma_pte_addr(pte))); phys_to_virt(dma_pte_addr(pte)));
dma_clear_pte(pte); dma_clear_pte(pte);
__iommu_flush_cache(iommu, domain_flush_cache(domain, pte, sizeof(*pte));
pte, sizeof(*pte));
} }
tmp += level_size(level); tmp += level_size(level);
} }
...@@ -1445,12 +1447,17 @@ static int domain_context_mapping_one(struct dmar_domain *domain, ...@@ -1445,12 +1447,17 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
u8 bus, u8 devfn) u8 bus, u8 devfn)
{ {
struct context_entry *context; struct context_entry *context;
struct intel_iommu *iommu = domain_get_iommu(domain);
unsigned long flags; unsigned long flags;
struct intel_iommu *iommu;
pr_debug("Set context mapping for %02x:%02x.%d\n", pr_debug("Set context mapping for %02x:%02x.%d\n",
bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
BUG_ON(!domain->pgd); BUG_ON(!domain->pgd);
iommu = device_to_iommu(bus, devfn);
if (!iommu)
return -ENODEV;
context = device_to_context_entry(iommu, bus, devfn); context = device_to_context_entry(iommu, bus, devfn);
if (!context) if (!context)
return -ENOMEM; return -ENOMEM;
...@@ -1466,7 +1473,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain, ...@@ -1466,7 +1473,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
context_set_translation_type(context, CONTEXT_TT_MULTI_LEVEL); context_set_translation_type(context, CONTEXT_TT_MULTI_LEVEL);
context_set_fault_enable(context); context_set_fault_enable(context);
context_set_present(context); context_set_present(context);
__iommu_flush_cache(iommu, context, sizeof(*context)); domain_flush_cache(domain, context, sizeof(*context));
/* it's a non-present to present mapping */ /* it's a non-present to present mapping */
if (iommu->flush.flush_context(iommu, domain->id, if (iommu->flush.flush_context(iommu, domain->id,
...@@ -1519,12 +1526,15 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev) ...@@ -1519,12 +1526,15 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev)
tmp->bus->number, tmp->devfn); tmp->bus->number, tmp->devfn);
} }
static int domain_context_mapped(struct dmar_domain *domain, static int domain_context_mapped(struct pci_dev *pdev)
struct pci_dev *pdev)
{ {
int ret; int ret;
struct pci_dev *tmp, *parent; struct pci_dev *tmp, *parent;
struct intel_iommu *iommu = domain_get_iommu(domain); struct intel_iommu *iommu;
iommu = device_to_iommu(pdev->bus->number, pdev->devfn);
if (!iommu)
return -ENODEV;
ret = device_context_mapped(iommu, ret = device_context_mapped(iommu,
pdev->bus->number, pdev->devfn); pdev->bus->number, pdev->devfn);
...@@ -1559,7 +1569,6 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova, ...@@ -1559,7 +1569,6 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
struct dma_pte *pte; struct dma_pte *pte;
int index; int index;
int addr_width = agaw_to_width(domain->agaw); int addr_width = agaw_to_width(domain->agaw);
struct intel_iommu *iommu = domain_get_iommu(domain);
hpa &= (((u64)1) << addr_width) - 1; hpa &= (((u64)1) << addr_width) - 1;
...@@ -1579,7 +1588,7 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova, ...@@ -1579,7 +1588,7 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
BUG_ON(dma_pte_addr(pte)); BUG_ON(dma_pte_addr(pte));
dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT); dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT);
dma_set_pte_prot(pte, prot); dma_set_pte_prot(pte, prot);
__iommu_flush_cache(iommu, pte, sizeof(*pte)); domain_flush_cache(domain, pte, sizeof(*pte));
start_pfn++; start_pfn++;
index++; index++;
} }
...@@ -2129,7 +2138,7 @@ get_valid_domain_for_dev(struct pci_dev *pdev) ...@@ -2129,7 +2138,7 @@ get_valid_domain_for_dev(struct pci_dev *pdev)
} }
/* make sure context mapping is ok */ /* make sure context mapping is ok */
if (unlikely(!domain_context_mapped(domain, pdev))) { if (unlikely(!domain_context_mapped(pdev))) {
ret = domain_context_mapping(domain, pdev); ret = domain_context_mapping(domain, pdev);
if (ret) { if (ret) {
printk(KERN_ERR printk(KERN_ERR
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment