Commit 649354b7 authored by Alexey Kardashevskiy's avatar Alexey Kardashevskiy Committed by Michael Ellerman

vfio: powerpc/spapr: Moving pinning/unpinning to helpers

This is a pretty mechanical patch to make next patches simpler.

New tce_iommu_unuse_page() helper does put_page() now but it might skip
that after the memory registering patch applied.

As we are here, this removes unnecessary checks for a value returned
by pfn_to_page() as it cannot possibly return NULL.

This moves tce_iommu_disable() later to let tce_iommu_clear() know if
the container has been enabled because if it has not been, then
put_page() must not be called on TCEs from the TCE table. This situation
is not yet possible but it will after KVM acceleration patchset is
applied.

This changes code to work with physical addresses rather than linear
mapping addresses for better code readability. Following patches will
add an xchg() callback for an IOMMU table which will accept/return
physical addresses (unlike current tce_build()) which will eliminate
redundant conversions.
Signed-off-by: default avatarAlexey Kardashevskiy <aik@ozlabs.ru>
[aw: for the vfio related changes]
Acked-by: default avatarAlex Williamson <alex.williamson@redhat.com>
Reviewed-by: default avatarDavid Gibson <david@gibson.dropbear.id.au>
Reviewed-by: default avatarGavin Shan <gwshan@linux.vnet.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 3c56e822
...@@ -191,69 +191,90 @@ static void tce_iommu_release(void *iommu_data) ...@@ -191,69 +191,90 @@ static void tce_iommu_release(void *iommu_data)
struct tce_container *container = iommu_data; struct tce_container *container = iommu_data;
WARN_ON(container->tbl && !container->tbl->it_group); WARN_ON(container->tbl && !container->tbl->it_group);
tce_iommu_disable(container);
if (container->tbl && container->tbl->it_group) if (container->tbl && container->tbl->it_group)
tce_iommu_detach_group(iommu_data, container->tbl->it_group); tce_iommu_detach_group(iommu_data, container->tbl->it_group);
tce_iommu_disable(container);
mutex_destroy(&container->lock); mutex_destroy(&container->lock);
kfree(container); kfree(container);
} }
static void tce_iommu_unuse_page(struct tce_container *container,
unsigned long oldtce)
{
struct page *page;
if (!(oldtce & (TCE_PCI_READ | TCE_PCI_WRITE)))
return;
page = pfn_to_page(oldtce >> PAGE_SHIFT);
if (oldtce & TCE_PCI_WRITE)
SetPageDirty(page);
put_page(page);
}
static int tce_iommu_clear(struct tce_container *container, static int tce_iommu_clear(struct tce_container *container,
struct iommu_table *tbl, struct iommu_table *tbl,
unsigned long entry, unsigned long pages) unsigned long entry, unsigned long pages)
{ {
unsigned long oldtce; unsigned long oldtce;
struct page *page;
for ( ; pages; --pages, ++entry) { for ( ; pages; --pages, ++entry) {
oldtce = iommu_clear_tce(tbl, entry); oldtce = iommu_clear_tce(tbl, entry);
if (!oldtce) if (!oldtce)
continue; continue;
page = pfn_to_page(oldtce >> PAGE_SHIFT); tce_iommu_unuse_page(container, oldtce);
WARN_ON(!page);
if (page) {
if (oldtce & TCE_PCI_WRITE)
SetPageDirty(page);
put_page(page);
}
} }
return 0; return 0;
} }
static int tce_iommu_use_page(unsigned long tce, unsigned long *hpa)
{
struct page *page = NULL;
enum dma_data_direction direction = iommu_tce_direction(tce);
if (get_user_pages_fast(tce & PAGE_MASK, 1,
direction != DMA_TO_DEVICE, &page) != 1)
return -EFAULT;
*hpa = __pa((unsigned long) page_address(page));
return 0;
}
static long tce_iommu_build(struct tce_container *container, static long tce_iommu_build(struct tce_container *container,
struct iommu_table *tbl, struct iommu_table *tbl,
unsigned long entry, unsigned long tce, unsigned long pages) unsigned long entry, unsigned long tce, unsigned long pages)
{ {
long i, ret = 0; long i, ret = 0;
struct page *page = NULL; struct page *page;
unsigned long hva; unsigned long hpa;
enum dma_data_direction direction = iommu_tce_direction(tce); enum dma_data_direction direction = iommu_tce_direction(tce);
for (i = 0; i < pages; ++i) { for (i = 0; i < pages; ++i) {
unsigned long offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK; unsigned long offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
ret = get_user_pages_fast(tce & PAGE_MASK, 1, ret = tce_iommu_use_page(tce, &hpa);
direction != DMA_TO_DEVICE, &page); if (ret)
if (unlikely(ret != 1)) {
ret = -EFAULT;
break; break;
}
page = pfn_to_page(hpa >> PAGE_SHIFT);
if (!tce_page_is_contained(page, tbl->it_page_shift)) { if (!tce_page_is_contained(page, tbl->it_page_shift)) {
ret = -EPERM; ret = -EPERM;
break; break;
} }
hva = (unsigned long) page_address(page) + offset; hpa |= offset;
ret = iommu_tce_build(tbl, entry + i, (unsigned long) __va(hpa),
ret = iommu_tce_build(tbl, entry + i, hva, direction); direction);
if (ret) { if (ret) {
put_page(page); tce_iommu_unuse_page(container, hpa);
pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n", pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
__func__, entry << tbl->it_page_shift, __func__, entry << tbl->it_page_shift,
tce, ret); tce, ret);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment