Commit 1f0ef2aa authored by David Woodhouse's avatar David Woodhouse

intel-iommu: Clean up handling of "caching mode" vs. IOTLB flushing.

As we just did for context cache flushing, clean up the logic around
whether we need to flush the iotlb or just the write-buffer, depending
on caching mode.

Fix the same bug in qi_flush_iotlb() that qi_flush_context() had -- it
isn't supposed to be returning an error; it's supposed to be returning a
flag which triggers a write-buffer flush.

Remove some superfluous conditional write-buffer flushes which could
never have happened because they weren't for non-present-to-present
mapping changes anyway.
Signed-off-by: default avatarDavid Woodhouse <David.Woodhouse@intel.com>
parent 4c25a2c1
...@@ -735,22 +735,14 @@ void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, ...@@ -735,22 +735,14 @@ void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
qi_submit_sync(&desc, iommu); qi_submit_sync(&desc, iommu);
} }
int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
unsigned int size_order, u64 type, unsigned int size_order, u64 type)
int non_present_entry_flush)
{ {
u8 dw = 0, dr = 0; u8 dw = 0, dr = 0;
struct qi_desc desc; struct qi_desc desc;
int ih = 0; int ih = 0;
if (non_present_entry_flush) {
if (!cap_caching_mode(iommu->cap))
return 1;
else
did = 0;
}
if (cap_write_drain(iommu->cap)) if (cap_write_drain(iommu->cap))
dw = 1; dw = 1;
...@@ -762,7 +754,7 @@ int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, ...@@ -762,7 +754,7 @@ int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih) desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
| QI_IOTLB_AM(size_order); | QI_IOTLB_AM(size_order);
return qi_submit_sync(&desc, iommu); qi_submit_sync(&desc, iommu);
} }
/* /*
......
...@@ -891,27 +891,13 @@ static void __iommu_flush_context(struct intel_iommu *iommu, ...@@ -891,27 +891,13 @@ static void __iommu_flush_context(struct intel_iommu *iommu,
} }
/* return value determine if we need a write buffer flush */ /* return value determine if we need a write buffer flush */
static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
u64 addr, unsigned int size_order, u64 type, u64 addr, unsigned int size_order, u64 type)
int non_present_entry_flush)
{ {
int tlb_offset = ecap_iotlb_offset(iommu->ecap); int tlb_offset = ecap_iotlb_offset(iommu->ecap);
u64 val = 0, val_iva = 0; u64 val = 0, val_iva = 0;
unsigned long flag; unsigned long flag;
/*
* In the non-present entry flush case, if hardware doesn't cache
* non-present entry we do nothing and if hardware cache non-present
* entry, we flush entries of domain 0 (the domain id is used to cache
* any non-present entries)
*/
if (non_present_entry_flush) {
if (!cap_caching_mode(iommu->cap))
return 1;
else
did = 0;
}
switch (type) { switch (type) {
case DMA_TLB_GLOBAL_FLUSH: case DMA_TLB_GLOBAL_FLUSH:
/* global flush doesn't need set IVA_REG */ /* global flush doesn't need set IVA_REG */
...@@ -959,12 +945,10 @@ static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, ...@@ -959,12 +945,10 @@ static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n", pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
(unsigned long long)DMA_TLB_IIRG(type), (unsigned long long)DMA_TLB_IIRG(type),
(unsigned long long)DMA_TLB_IAIG(val)); (unsigned long long)DMA_TLB_IAIG(val));
/* flush iotlb entry will implicitly flush write buffer */
return 0;
} }
static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
u64 addr, unsigned int pages, int non_present_entry_flush) u64 addr, unsigned int pages)
{ {
unsigned int mask; unsigned int mask;
...@@ -974,8 +958,7 @@ static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, ...@@ -974,8 +958,7 @@ static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
/* Fallback to domain selective flush if no PSI support */ /* Fallback to domain selective flush if no PSI support */
if (!cap_pgsel_inv(iommu->cap)) if (!cap_pgsel_inv(iommu->cap))
return iommu->flush.flush_iotlb(iommu, did, 0, 0, return iommu->flush.flush_iotlb(iommu, did, 0, 0,
DMA_TLB_DSI_FLUSH, DMA_TLB_DSI_FLUSH);
non_present_entry_flush);
/* /*
* PSI requires page size to be 2 ^ x, and the base address is naturally * PSI requires page size to be 2 ^ x, and the base address is naturally
...@@ -985,11 +968,10 @@ static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, ...@@ -985,11 +968,10 @@ static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
/* Fallback to domain selective flush if size is too big */ /* Fallback to domain selective flush if size is too big */
if (mask > cap_max_amask_val(iommu->cap)) if (mask > cap_max_amask_val(iommu->cap))
return iommu->flush.flush_iotlb(iommu, did, 0, 0, return iommu->flush.flush_iotlb(iommu, did, 0, 0,
DMA_TLB_DSI_FLUSH, non_present_entry_flush); DMA_TLB_DSI_FLUSH);
return iommu->flush.flush_iotlb(iommu, did, addr, mask, return iommu->flush.flush_iotlb(iommu, did, addr, mask,
DMA_TLB_PSI_FLUSH, DMA_TLB_PSI_FLUSH);
non_present_entry_flush);
} }
static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
...@@ -1423,7 +1405,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment, ...@@ -1423,7 +1405,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
(((u16)bus) << 8) | devfn, (((u16)bus) << 8) | devfn,
DMA_CCMD_MASK_NOBIT, DMA_CCMD_MASK_NOBIT,
DMA_CCMD_DEVICE_INVL); DMA_CCMD_DEVICE_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0); iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);
} else { } else {
iommu_flush_write_buffer(iommu); iommu_flush_write_buffer(iommu);
} }
...@@ -1558,8 +1540,7 @@ static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn) ...@@ -1558,8 +1540,7 @@ static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
clear_context_table(iommu, bus, devfn); clear_context_table(iommu, bus, devfn);
iommu->flush.flush_context(iommu, 0, 0, 0, iommu->flush.flush_context(iommu, 0, 0, 0,
DMA_CCMD_GLOBAL_INVL); DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0, iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
DMA_TLB_GLOBAL_FLUSH, 0);
} }
static void domain_remove_dev_info(struct dmar_domain *domain) static void domain_remove_dev_info(struct dmar_domain *domain)
...@@ -2096,8 +2077,7 @@ static int __init init_dmars(void) ...@@ -2096,8 +2077,7 @@ static int __init init_dmars(void)
iommu_set_root_entry(iommu); iommu_set_root_entry(iommu);
iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH, iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
0);
iommu_disable_protect_mem_regions(iommu); iommu_disable_protect_mem_regions(iommu);
ret = iommu_enable_translation(iommu); ret = iommu_enable_translation(iommu);
...@@ -2244,10 +2224,11 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, ...@@ -2244,10 +2224,11 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
if (ret) if (ret)
goto error; goto error;
/* it's a non-present to present mapping */ /* it's a non-present to present mapping. Only flush if caching mode */
ret = iommu_flush_iotlb_psi(iommu, domain->id, if (cap_caching_mode(iommu->cap))
start_paddr, size >> VTD_PAGE_SHIFT, 1); iommu_flush_iotlb_psi(iommu, 0, start_paddr,
if (ret) size >> VTD_PAGE_SHIFT);
else
iommu_flush_write_buffer(iommu); iommu_flush_write_buffer(iommu);
return start_paddr + ((u64)paddr & (~PAGE_MASK)); return start_paddr + ((u64)paddr & (~PAGE_MASK));
...@@ -2283,7 +2264,7 @@ static void flush_unmaps(void) ...@@ -2283,7 +2264,7 @@ static void flush_unmaps(void)
if (deferred_flush[i].next) { if (deferred_flush[i].next) {
iommu->flush.flush_iotlb(iommu, 0, 0, 0, iommu->flush.flush_iotlb(iommu, 0, 0, 0,
DMA_TLB_GLOBAL_FLUSH, 0); DMA_TLB_GLOBAL_FLUSH);
for (j = 0; j < deferred_flush[i].next; j++) { for (j = 0; j < deferred_flush[i].next; j++) {
__free_iova(&deferred_flush[i].domain[j]->iovad, __free_iova(&deferred_flush[i].domain[j]->iovad,
deferred_flush[i].iova[j]); deferred_flush[i].iova[j]);
...@@ -2362,9 +2343,8 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, ...@@ -2362,9 +2343,8 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
/* free page tables */ /* free page tables */
dma_pte_free_pagetable(domain, start_addr, start_addr + size); dma_pte_free_pagetable(domain, start_addr, start_addr + size);
if (intel_iommu_strict) { if (intel_iommu_strict) {
if (iommu_flush_iotlb_psi(iommu, iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0)) size >> VTD_PAGE_SHIFT);
iommu_flush_write_buffer(iommu);
/* free iova */ /* free iova */
__free_iova(&domain->iovad, iova); __free_iova(&domain->iovad, iova);
} else { } else {
...@@ -2455,9 +2435,8 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, ...@@ -2455,9 +2435,8 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
/* free page tables */ /* free page tables */
dma_pte_free_pagetable(domain, start_addr, start_addr + size); dma_pte_free_pagetable(domain, start_addr, start_addr + size);
if (iommu_flush_iotlb_psi(iommu, domain->id, start_addr, iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
size >> VTD_PAGE_SHIFT, 0)) size >> VTD_PAGE_SHIFT);
iommu_flush_write_buffer(iommu);
/* free iova */ /* free iova */
__free_iova(&domain->iovad, iova); __free_iova(&domain->iovad, iova);
...@@ -2549,10 +2528,13 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne ...@@ -2549,10 +2528,13 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
offset += size; offset += size;
} }
/* it's a non-present to present mapping */ /* it's a non-present to present mapping. Only flush if caching mode */
if (iommu_flush_iotlb_psi(iommu, domain->id, if (cap_caching_mode(iommu->cap))
start_addr, offset >> VTD_PAGE_SHIFT, 1)) iommu_flush_iotlb_psi(iommu, 0, start_addr,
offset >> VTD_PAGE_SHIFT);
else
iommu_flush_write_buffer(iommu); iommu_flush_write_buffer(iommu);
return nelems; return nelems;
} }
...@@ -2711,9 +2693,9 @@ static int init_iommu_hw(void) ...@@ -2711,9 +2693,9 @@ static int init_iommu_hw(void)
iommu_set_root_entry(iommu); iommu_set_root_entry(iommu);
iommu->flush.flush_context(iommu, 0, 0, 0, iommu->flush.flush_context(iommu, 0, 0, 0,
DMA_CCMD_GLOBAL_INVL); DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0, iommu->flush.flush_iotlb(iommu, 0, 0, 0,
DMA_TLB_GLOBAL_FLUSH, 0); DMA_TLB_GLOBAL_FLUSH);
iommu_disable_protect_mem_regions(iommu); iommu_disable_protect_mem_regions(iommu);
iommu_enable_translation(iommu); iommu_enable_translation(iommu);
} }
...@@ -2728,9 +2710,9 @@ static void iommu_flush_all(void) ...@@ -2728,9 +2710,9 @@ static void iommu_flush_all(void)
for_each_active_iommu(iommu, drhd) { for_each_active_iommu(iommu, drhd) {
iommu->flush.flush_context(iommu, 0, 0, 0, iommu->flush.flush_context(iommu, 0, 0, 0,
DMA_CCMD_GLOBAL_INVL); DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0, iommu->flush.flush_iotlb(iommu, 0, 0, 0,
DMA_TLB_GLOBAL_FLUSH, 0); DMA_TLB_GLOBAL_FLUSH);
} }
} }
......
...@@ -283,8 +283,8 @@ struct ir_table { ...@@ -283,8 +283,8 @@ struct ir_table {
struct iommu_flush { struct iommu_flush {
void (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid, void (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid,
u8 fm, u64 type); u8 fm, u64 type);
int (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr, void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
unsigned int size_order, u64 type, int non_present_entry_flush); unsigned int size_order, u64 type);
}; };
enum { enum {
...@@ -341,9 +341,8 @@ extern void qi_global_iec(struct intel_iommu *iommu); ...@@ -341,9 +341,8 @@ extern void qi_global_iec(struct intel_iommu *iommu);
extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
u8 fm, u64 type); u8 fm, u64 type);
extern int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
unsigned int size_order, u64 type, unsigned int size_order, u64 type);
int non_present_entry_flush);
extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment