Commit e0bf78b0 authored by Alexey Kardashevskiy's avatar Alexey Kardashevskiy Committed by Michael Ellerman

powerpc/mm/iommu/vfio_spapr_tce: Change mm_iommu_get to reference a region

Normally mm_iommu_get() should add a reference and mm_iommu_put() should
remove it. However historically mm_iommu_find() does the referencing and
mm_iommu_get() is doing allocation and referencing.

We are going to add another helper to preregister device memory so
instead of having mm_iommu_new() (which pre-registers the normal memory
and references the region), we need separate helpers for pre-registering
and referencing.

This renames:
- mm_iommu_get to mm_iommu_new;
- mm_iommu_find to mm_iommu_get.

This changes mm_iommu_get() to reference the region so the name now
reflects what it does.

This removes the check for exact match from mm_iommu_new() as we want it
to fail on existing regions; mm_iommu_get() should be used instead.
Signed-off-by: default avatarAlexey Kardashevskiy <aik@ozlabs.ru>
Reviewed-by: default avatarDavid Gibson <david@gibson.dropbear.id.au>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent ab7032e7
......@@ -21,7 +21,7 @@ struct mm_iommu_table_group_mem_t;
extern int isolate_lru_page(struct page *page); /* from internal.h */
extern bool mm_iommu_preregistered(struct mm_struct *mm);
extern long mm_iommu_get(struct mm_struct *mm,
extern long mm_iommu_new(struct mm_struct *mm,
unsigned long ua, unsigned long entries,
struct mm_iommu_table_group_mem_t **pmem);
extern long mm_iommu_put(struct mm_struct *mm,
......@@ -32,7 +32,7 @@ extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
unsigned long ua, unsigned long size);
extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(
struct mm_struct *mm, unsigned long ua, unsigned long size);
extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
extern struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm,
unsigned long ua, unsigned long entries);
extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
unsigned long ua, unsigned int pageshift, unsigned long *hpa);
......
......@@ -126,7 +126,7 @@ static int mm_iommu_move_page_from_cma(struct page *page)
return 0;
}
long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
long mm_iommu_new(struct mm_struct *mm, unsigned long ua, unsigned long entries,
struct mm_iommu_table_group_mem_t **pmem)
{
struct mm_iommu_table_group_mem_t *mem;
......@@ -140,12 +140,6 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list,
next) {
if ((mem->ua == ua) && (mem->entries == entries)) {
++mem->used;
*pmem = mem;
goto unlock_exit;
}
/* Overlap? */
if ((mem->ua < (ua + (entries << PAGE_SHIFT))) &&
(ua < (mem->ua +
......@@ -252,7 +246,7 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
return ret;
}
EXPORT_SYMBOL_GPL(mm_iommu_get);
EXPORT_SYMBOL_GPL(mm_iommu_new);
static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem)
{
......@@ -368,21 +362,26 @@ struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(struct mm_struct *mm,
return ret;
}
struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm,
unsigned long ua, unsigned long entries)
{
struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
mutex_lock(&mem_list_mutex);
list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
if ((mem->ua == ua) && (mem->entries == entries)) {
ret = mem;
++mem->used;
break;
}
}
mutex_unlock(&mem_list_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(mm_iommu_find);
EXPORT_SYMBOL_GPL(mm_iommu_get);
long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
unsigned long ua, unsigned int pageshift, unsigned long *hpa)
......
......@@ -152,11 +152,12 @@ static long tce_iommu_unregister_pages(struct tce_container *container,
struct mm_iommu_table_group_mem_t *mem;
struct tce_iommu_prereg *tcemem;
bool found = false;
long ret;
if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK))
return -EINVAL;
mem = mm_iommu_find(container->mm, vaddr, size >> PAGE_SHIFT);
mem = mm_iommu_get(container->mm, vaddr, size >> PAGE_SHIFT);
if (!mem)
return -ENOENT;
......@@ -168,9 +169,13 @@ static long tce_iommu_unregister_pages(struct tce_container *container,
}
if (!found)
return -ENOENT;
ret = -ENOENT;
else
ret = tce_iommu_prereg_free(container, tcemem);
mm_iommu_put(container->mm, mem);
return tce_iommu_prereg_free(container, tcemem);
return ret;
}
static long tce_iommu_register_pages(struct tce_container *container,
......@@ -185,22 +190,24 @@ static long tce_iommu_register_pages(struct tce_container *container,
((vaddr + size) < vaddr))
return -EINVAL;
mem = mm_iommu_find(container->mm, vaddr, entries);
mem = mm_iommu_get(container->mm, vaddr, entries);
if (mem) {
list_for_each_entry(tcemem, &container->prereg_list, next) {
if (tcemem->mem == mem)
return -EBUSY;
if (tcemem->mem == mem) {
ret = -EBUSY;
goto put_exit;
}
}
} else {
ret = mm_iommu_new(container->mm, vaddr, entries, &mem);
if (ret)
return ret;
}
ret = mm_iommu_get(container->mm, vaddr, entries, &mem);
if (ret)
return ret;
tcemem = kzalloc(sizeof(*tcemem), GFP_KERNEL);
if (!tcemem) {
mm_iommu_put(container->mm, mem);
return -ENOMEM;
ret = -ENOMEM;
goto put_exit;
}
tcemem->mem = mem;
......@@ -209,6 +216,10 @@ static long tce_iommu_register_pages(struct tce_container *container,
container->enabled = true;
return 0;
put_exit:
mm_iommu_put(container->mm, mem);
return ret;
}
static bool tce_page_is_contained(struct page *page, unsigned page_shift)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment