Commit 650ab1e3 authored by Alexey Kardashevskiy's avatar Alexey Kardashevskiy Committed by Michael Ellerman

vfio/spapr_tce: Invalidate multiple TCEs at once

Invalidating a TCE cache entry for each updated TCE is quite expensive.
This makes use of the new iommu_table_ops::xchg_no_kill()/tce_kill()
callbacks to bring down the time spent in mapping a huge guest DMA window.
Signed-off-by: default avatarAlexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20190829085252.72370-4-aik@ozlabs.ru
parent 01b7d128
...@@ -435,7 +435,7 @@ static int tce_iommu_clear(struct tce_container *container, ...@@ -435,7 +435,7 @@ static int tce_iommu_clear(struct tce_container *container,
unsigned long oldhpa; unsigned long oldhpa;
long ret; long ret;
enum dma_data_direction direction; enum dma_data_direction direction;
unsigned long lastentry = entry + pages; unsigned long lastentry = entry + pages, firstentry = entry;
for ( ; entry < lastentry; ++entry) { for ( ; entry < lastentry; ++entry) {
if (tbl->it_indirect_levels && tbl->it_userspace) { if (tbl->it_indirect_levels && tbl->it_userspace) {
...@@ -460,7 +460,7 @@ static int tce_iommu_clear(struct tce_container *container, ...@@ -460,7 +460,7 @@ static int tce_iommu_clear(struct tce_container *container,
direction = DMA_NONE; direction = DMA_NONE;
oldhpa = 0; oldhpa = 0;
ret = iommu_tce_xchg(container->mm, tbl, entry, &oldhpa, ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry, &oldhpa,
&direction); &direction);
if (ret) if (ret)
continue; continue;
...@@ -476,6 +476,8 @@ static int tce_iommu_clear(struct tce_container *container, ...@@ -476,6 +476,8 @@ static int tce_iommu_clear(struct tce_container *container,
tce_iommu_unuse_page(container, oldhpa); tce_iommu_unuse_page(container, oldhpa);
} }
iommu_tce_kill(tbl, firstentry, pages);
return 0; return 0;
} }
...@@ -518,8 +520,8 @@ static long tce_iommu_build(struct tce_container *container, ...@@ -518,8 +520,8 @@ static long tce_iommu_build(struct tce_container *container,
hpa |= offset; hpa |= offset;
dirtmp = direction; dirtmp = direction;
ret = iommu_tce_xchg(container->mm, tbl, entry + i, &hpa, ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry + i,
&dirtmp); &hpa, &dirtmp);
if (ret) { if (ret) {
tce_iommu_unuse_page(container, hpa); tce_iommu_unuse_page(container, hpa);
pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n", pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
...@@ -536,6 +538,8 @@ static long tce_iommu_build(struct tce_container *container, ...@@ -536,6 +538,8 @@ static long tce_iommu_build(struct tce_container *container,
if (ret) if (ret)
tce_iommu_clear(container, tbl, entry, i); tce_iommu_clear(container, tbl, entry, i);
else
iommu_tce_kill(tbl, entry, pages);
return ret; return ret;
} }
...@@ -572,8 +576,8 @@ static long tce_iommu_build_v2(struct tce_container *container, ...@@ -572,8 +576,8 @@ static long tce_iommu_build_v2(struct tce_container *container,
if (mm_iommu_mapped_inc(mem)) if (mm_iommu_mapped_inc(mem))
break; break;
ret = iommu_tce_xchg(container->mm, tbl, entry + i, &hpa, ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry + i,
&dirtmp); &hpa, &dirtmp);
if (ret) { if (ret) {
/* dirtmp cannot be DMA_NONE here */ /* dirtmp cannot be DMA_NONE here */
tce_iommu_unuse_page_v2(container, tbl, entry + i); tce_iommu_unuse_page_v2(container, tbl, entry + i);
...@@ -593,6 +597,8 @@ static long tce_iommu_build_v2(struct tce_container *container, ...@@ -593,6 +597,8 @@ static long tce_iommu_build_v2(struct tce_container *container,
if (ret) if (ret)
tce_iommu_clear(container, tbl, entry, i); tce_iommu_clear(container, tbl, entry, i);
else
iommu_tce_kill(tbl, entry, pages);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment