Commit 3a68900a authored by Hans de Goede's avatar Hans de Goede Committed by Mauro Carvalho Chehab

media: atomisp: remove hmm_page_object

hmm_page_object only stores a struct page pointer, so we can just use
the hmm_bo.pages page pointer array everywhere.

Link: https://lore.kernel.org/linux-media/20220615205037.16549-33-hdegoede@redhat.comReviewed-by: default avatarAndy Shevchenko <andy.shevchenko@gmail.com>
Signed-off-by: default avatarHans de Goede <hdegoede@redhat.com>
Signed-off-by: default avatarMauro Carvalho Chehab <mchehab@kernel.org>
parent f9599127
...@@ -114,10 +114,6 @@ struct hmm_bo_device { ...@@ -114,10 +114,6 @@ struct hmm_bo_device {
struct kmem_cache *bo_cache; struct kmem_cache *bo_cache;
}; };
struct hmm_page_object {
struct page *page;
};
struct hmm_buffer_object { struct hmm_buffer_object {
struct hmm_bo_device *bdev; struct hmm_bo_device *bdev;
struct list_head list; struct list_head list;
...@@ -128,7 +124,6 @@ struct hmm_buffer_object { ...@@ -128,7 +124,6 @@ struct hmm_buffer_object {
/* mutex protecting this BO */ /* mutex protecting this BO */
struct mutex mutex; struct mutex mutex;
enum hmm_bo_type type; enum hmm_bo_type type;
struct hmm_page_object *page_obj; /* physical pages */
int mmap_count; int mmap_count;
int status; int status;
int mem_type; int mem_type;
......
...@@ -295,7 +295,7 @@ static int load_and_flush_by_kmap(ia_css_ptr virt, void *data, ...@@ -295,7 +295,7 @@ static int load_and_flush_by_kmap(ia_css_ptr virt, void *data,
idx = (virt - bo->start) >> PAGE_SHIFT; idx = (virt - bo->start) >> PAGE_SHIFT;
offset = (virt - bo->start) - (idx << PAGE_SHIFT); offset = (virt - bo->start) - (idx << PAGE_SHIFT);
src = (char *)kmap(bo->page_obj[idx].page) + offset; src = (char *)kmap(bo->pages[idx]) + offset;
if ((bytes + offset) >= PAGE_SIZE) { if ((bytes + offset) >= PAGE_SIZE) {
len = PAGE_SIZE - offset; len = PAGE_SIZE - offset;
...@@ -314,7 +314,7 @@ static int load_and_flush_by_kmap(ia_css_ptr virt, void *data, ...@@ -314,7 +314,7 @@ static int load_and_flush_by_kmap(ia_css_ptr virt, void *data,
clflush_cache_range(src, len); clflush_cache_range(src, len);
kunmap(bo->page_obj[idx].page); kunmap(bo->pages[idx]);
} }
return 0; return 0;
...@@ -428,9 +428,9 @@ int hmm_store(ia_css_ptr virt, const void *data, unsigned int bytes) ...@@ -428,9 +428,9 @@ int hmm_store(ia_css_ptr virt, const void *data, unsigned int bytes)
offset = (virt - bo->start) - (idx << PAGE_SHIFT); offset = (virt - bo->start) - (idx << PAGE_SHIFT);
if (in_atomic()) if (in_atomic())
des = (char *)kmap_atomic(bo->page_obj[idx].page); des = (char *)kmap_atomic(bo->pages[idx]);
else else
des = (char *)kmap(bo->page_obj[idx].page); des = (char *)kmap(bo->pages[idx]);
if (!des) { if (!des) {
dev_err(atomisp_dev, dev_err(atomisp_dev,
...@@ -464,7 +464,7 @@ int hmm_store(ia_css_ptr virt, const void *data, unsigned int bytes) ...@@ -464,7 +464,7 @@ int hmm_store(ia_css_ptr virt, const void *data, unsigned int bytes)
*/ */
kunmap_atomic(des - offset); kunmap_atomic(des - offset);
else else
kunmap(bo->page_obj[idx].page); kunmap(bo->pages[idx]);
} }
return 0; return 0;
...@@ -508,7 +508,7 @@ int hmm_set(ia_css_ptr virt, int c, unsigned int bytes) ...@@ -508,7 +508,7 @@ int hmm_set(ia_css_ptr virt, int c, unsigned int bytes)
idx = (virt - bo->start) >> PAGE_SHIFT; idx = (virt - bo->start) >> PAGE_SHIFT;
offset = (virt - bo->start) - (idx << PAGE_SHIFT); offset = (virt - bo->start) - (idx << PAGE_SHIFT);
des = (char *)kmap(bo->page_obj[idx].page) + offset; des = (char *)kmap(bo->pages[idx]) + offset;
if ((bytes + offset) >= PAGE_SIZE) { if ((bytes + offset) >= PAGE_SIZE) {
len = PAGE_SIZE - offset; len = PAGE_SIZE - offset;
...@@ -524,7 +524,7 @@ int hmm_set(ia_css_ptr virt, int c, unsigned int bytes) ...@@ -524,7 +524,7 @@ int hmm_set(ia_css_ptr virt, int c, unsigned int bytes)
clflush_cache_range(des, len); clflush_cache_range(des, len);
kunmap(bo->page_obj[idx].page); kunmap(bo->pages[idx]);
} }
return 0; return 0;
...@@ -547,7 +547,7 @@ phys_addr_t hmm_virt_to_phys(ia_css_ptr virt) ...@@ -547,7 +547,7 @@ phys_addr_t hmm_virt_to_phys(ia_css_ptr virt)
idx = (virt - bo->start) >> PAGE_SHIFT; idx = (virt - bo->start) >> PAGE_SHIFT;
offset = (virt - bo->start) - (idx << PAGE_SHIFT); offset = (virt - bo->start) - (idx << PAGE_SHIFT);
return page_to_phys(bo->page_obj[idx].page) + offset; return page_to_phys(bo->pages[idx]) + offset;
} }
int hmm_mmap(struct vm_area_struct *vma, ia_css_ptr virt) int hmm_mmap(struct vm_area_struct *vma, ia_css_ptr virt)
......
...@@ -631,7 +631,7 @@ static void free_private_bo_pages(struct hmm_buffer_object *bo, ...@@ -631,7 +631,7 @@ static void free_private_bo_pages(struct hmm_buffer_object *bo,
int i, ret; int i, ret;
for (i = 0; i < free_pgnr; i++) { for (i = 0; i < free_pgnr; i++) {
ret = set_pages_wb(bo->page_obj[i].page, 1); ret = set_pages_wb(bo->pages[i], 1);
if (ret) if (ret)
dev_err(atomisp_dev, dev_err(atomisp_dev,
"set page to WB err ...ret = %d\n", "set page to WB err ...ret = %d\n",
...@@ -644,7 +644,7 @@ static void free_private_bo_pages(struct hmm_buffer_object *bo, ...@@ -644,7 +644,7 @@ static void free_private_bo_pages(struct hmm_buffer_object *bo,
address be valid,it maybe memory corruption by lowmemory address be valid,it maybe memory corruption by lowmemory
*/ */
if (!ret) { if (!ret) {
__free_pages(bo->page_obj[i].page, 0); __free_pages(bo->pages[i], 0);
} }
} }
} }
...@@ -663,11 +663,6 @@ static int alloc_private_pages(struct hmm_buffer_object *bo) ...@@ -663,11 +663,6 @@ static int alloc_private_pages(struct hmm_buffer_object *bo)
pgnr = bo->pgnr; pgnr = bo->pgnr;
bo->page_obj = kmalloc_array(pgnr, sizeof(struct hmm_page_object),
GFP_KERNEL);
if (unlikely(!bo->page_obj))
return -ENOMEM;
i = 0; i = 0;
alloc_pgnr = 0; alloc_pgnr = 0;
...@@ -739,7 +734,7 @@ static int alloc_private_pages(struct hmm_buffer_object *bo) ...@@ -739,7 +734,7 @@ static int alloc_private_pages(struct hmm_buffer_object *bo)
} }
for (j = 0; j < blk_pgnr; j++, i++) { for (j = 0; j < blk_pgnr; j++, i++) {
bo->page_obj[i].page = pages + j; bo->pages[i] = pages + j;
} }
pgnr -= blk_pgnr; pgnr -= blk_pgnr;
...@@ -759,18 +754,9 @@ static int alloc_private_pages(struct hmm_buffer_object *bo) ...@@ -759,18 +754,9 @@ static int alloc_private_pages(struct hmm_buffer_object *bo)
cleanup: cleanup:
alloc_pgnr = i; alloc_pgnr = i;
free_private_bo_pages(bo, alloc_pgnr); free_private_bo_pages(bo, alloc_pgnr);
kfree(bo->page_obj);
return -ENOMEM; return -ENOMEM;
} }
static void free_private_pages(struct hmm_buffer_object *bo)
{
free_private_bo_pages(bo, bo->pgnr);
kfree(bo->page_obj);
}
static void free_user_pages(struct hmm_buffer_object *bo, static void free_user_pages(struct hmm_buffer_object *bo,
unsigned int page_nr) unsigned int page_nr)
{ {
...@@ -782,8 +768,6 @@ static void free_user_pages(struct hmm_buffer_object *bo, ...@@ -782,8 +768,6 @@ static void free_user_pages(struct hmm_buffer_object *bo,
for (i = 0; i < page_nr; i++) for (i = 0; i < page_nr; i++)
put_page(bo->pages[i]); put_page(bo->pages[i]);
} }
kfree(bo->pages);
kfree(bo->page_obj);
} }
/* /*
...@@ -793,20 +777,7 @@ static int alloc_user_pages(struct hmm_buffer_object *bo, ...@@ -793,20 +777,7 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
const void __user *userptr) const void __user *userptr)
{ {
int page_nr; int page_nr;
int i;
struct vm_area_struct *vma; struct vm_area_struct *vma;
struct page **pages;
pages = kmalloc_array(bo->pgnr, sizeof(struct page *), GFP_KERNEL);
if (unlikely(!pages))
return -ENOMEM;
bo->page_obj = kmalloc_array(bo->pgnr, sizeof(struct hmm_page_object),
GFP_KERNEL);
if (unlikely(!bo->page_obj)) {
kfree(pages);
return -ENOMEM;
}
mutex_unlock(&bo->mutex); mutex_unlock(&bo->mutex);
mmap_read_lock(current->mm); mmap_read_lock(current->mm);
...@@ -814,8 +785,6 @@ static int alloc_user_pages(struct hmm_buffer_object *bo, ...@@ -814,8 +785,6 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
mmap_read_unlock(current->mm); mmap_read_unlock(current->mm);
if (!vma) { if (!vma) {
dev_err(atomisp_dev, "find_vma failed\n"); dev_err(atomisp_dev, "find_vma failed\n");
kfree(bo->page_obj);
kfree(pages);
mutex_lock(&bo->mutex); mutex_lock(&bo->mutex);
return -EFAULT; return -EFAULT;
} }
...@@ -827,18 +796,16 @@ static int alloc_user_pages(struct hmm_buffer_object *bo, ...@@ -827,18 +796,16 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
userptr = untagged_addr(userptr); userptr = untagged_addr(userptr);
bo->pages = pages;
if (vma->vm_flags & (VM_IO | VM_PFNMAP)) { if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
page_nr = pin_user_pages((unsigned long)userptr, bo->pgnr, page_nr = pin_user_pages((unsigned long)userptr, bo->pgnr,
FOLL_LONGTERM | FOLL_WRITE, FOLL_LONGTERM | FOLL_WRITE,
pages, NULL); bo->pages, NULL);
bo->mem_type = HMM_BO_MEM_TYPE_PFN; bo->mem_type = HMM_BO_MEM_TYPE_PFN;
} else { } else {
/*Handle frame buffer allocated in user space*/ /*Handle frame buffer allocated in user space*/
mutex_unlock(&bo->mutex); mutex_unlock(&bo->mutex);
page_nr = get_user_pages_fast((unsigned long)userptr, page_nr = get_user_pages_fast((unsigned long)userptr,
(int)(bo->pgnr), 1, pages); (int)(bo->pgnr), 1, bo->pages);
mutex_lock(&bo->mutex); mutex_lock(&bo->mutex);
bo->mem_type = HMM_BO_MEM_TYPE_USER; bo->mem_type = HMM_BO_MEM_TYPE_USER;
} }
...@@ -858,10 +825,6 @@ static int alloc_user_pages(struct hmm_buffer_object *bo, ...@@ -858,10 +825,6 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
goto out_of_mem; goto out_of_mem;
} }
for (i = 0; i < bo->pgnr; i++) {
bo->page_obj[i].page = pages[i];
}
return 0; return 0;
out_of_mem: out_of_mem:
...@@ -891,6 +854,12 @@ int hmm_bo_alloc_pages(struct hmm_buffer_object *bo, ...@@ -891,6 +854,12 @@ int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
mutex_lock(&bo->mutex); mutex_lock(&bo->mutex);
check_bo_status_no_goto(bo, HMM_BO_PAGE_ALLOCED, status_err); check_bo_status_no_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
bo->pages = kmalloc_array(bo->pgnr, sizeof(struct page *), GFP_KERNEL);
if (unlikely(!bo->pages)) {
ret = -ENOMEM;
goto alloc_err;
}
/* /*
* TO DO: * TO DO:
* add HMM_BO_USER type * add HMM_BO_USER type
...@@ -915,6 +884,7 @@ int hmm_bo_alloc_pages(struct hmm_buffer_object *bo, ...@@ -915,6 +884,7 @@ int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
return 0; return 0;
alloc_err: alloc_err:
kfree(bo->pages);
mutex_unlock(&bo->mutex); mutex_unlock(&bo->mutex);
dev_err(atomisp_dev, "alloc pages err...\n"); dev_err(atomisp_dev, "alloc pages err...\n");
return ret; return ret;
...@@ -940,11 +910,13 @@ void hmm_bo_free_pages(struct hmm_buffer_object *bo) ...@@ -940,11 +910,13 @@ void hmm_bo_free_pages(struct hmm_buffer_object *bo)
bo->status &= (~HMM_BO_PAGE_ALLOCED); bo->status &= (~HMM_BO_PAGE_ALLOCED);
if (bo->type == HMM_BO_PRIVATE) if (bo->type == HMM_BO_PRIVATE)
free_private_pages(bo); free_private_bo_pages(bo, bo->pgnr);
else if (bo->type == HMM_BO_USER) else if (bo->type == HMM_BO_USER)
free_user_pages(bo, bo->pgnr); free_user_pages(bo, bo->pgnr);
else else
dev_err(atomisp_dev, "invalid buffer type.\n"); dev_err(atomisp_dev, "invalid buffer type.\n");
kfree(bo->pages);
mutex_unlock(&bo->mutex); mutex_unlock(&bo->mutex);
return; return;
...@@ -989,7 +961,7 @@ int hmm_bo_bind(struct hmm_buffer_object *bo) ...@@ -989,7 +961,7 @@ int hmm_bo_bind(struct hmm_buffer_object *bo)
for (i = 0; i < bo->pgnr; i++) { for (i = 0; i < bo->pgnr; i++) {
ret = ret =
isp_mmu_map(&bdev->mmu, virt, isp_mmu_map(&bdev->mmu, virt,
page_to_phys(bo->page_obj[i].page), 1); page_to_phys(bo->pages[i]), 1);
if (ret) if (ret)
goto map_err; goto map_err;
virt += (1 << PAGE_SHIFT); virt += (1 << PAGE_SHIFT);
...@@ -1103,9 +1075,6 @@ int hmm_bo_binded(struct hmm_buffer_object *bo) ...@@ -1103,9 +1075,6 @@ int hmm_bo_binded(struct hmm_buffer_object *bo)
void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached) void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached)
{ {
struct page **pages;
int i;
check_bo_null_return(bo, NULL); check_bo_null_return(bo, NULL);
mutex_lock(&bo->mutex); mutex_lock(&bo->mutex);
...@@ -1122,27 +1091,15 @@ void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached) ...@@ -1122,27 +1091,15 @@ void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached)
bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED); bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
} }
pages = kmalloc_array(bo->pgnr, sizeof(*pages), GFP_KERNEL); bo->vmap_addr = vmap(bo->pages, bo->pgnr, VM_MAP,
if (unlikely(!pages)) {
mutex_unlock(&bo->mutex);
return NULL;
}
for (i = 0; i < bo->pgnr; i++)
pages[i] = bo->page_obj[i].page;
bo->vmap_addr = vmap(pages, bo->pgnr, VM_MAP,
cached ? PAGE_KERNEL : PAGE_KERNEL_NOCACHE); cached ? PAGE_KERNEL : PAGE_KERNEL_NOCACHE);
if (unlikely(!bo->vmap_addr)) { if (unlikely(!bo->vmap_addr)) {
kfree(pages);
mutex_unlock(&bo->mutex); mutex_unlock(&bo->mutex);
dev_err(atomisp_dev, "vmap failed...\n"); dev_err(atomisp_dev, "vmap failed...\n");
return NULL; return NULL;
} }
bo->status |= (cached ? HMM_BO_VMAPED_CACHED : HMM_BO_VMAPED); bo->status |= (cached ? HMM_BO_VMAPED_CACHED : HMM_BO_VMAPED);
kfree(pages);
mutex_unlock(&bo->mutex); mutex_unlock(&bo->mutex);
return bo->vmap_addr; return bo->vmap_addr;
} }
...@@ -1272,7 +1229,7 @@ int hmm_bo_mmap(struct vm_area_struct *vma, struct hmm_buffer_object *bo) ...@@ -1272,7 +1229,7 @@ int hmm_bo_mmap(struct vm_area_struct *vma, struct hmm_buffer_object *bo)
virt = vma->vm_start; virt = vma->vm_start;
for (i = 0; i < pgnr; i++) { for (i = 0; i < pgnr; i++) {
pfn = page_to_pfn(bo->page_obj[i].page); pfn = page_to_pfn(bo->pages[i]);
if (remap_pfn_range(vma, virt, pfn, PAGE_SIZE, PAGE_SHARED)) { if (remap_pfn_range(vma, virt, pfn, PAGE_SIZE, PAGE_SHARED)) {
dev_warn(atomisp_dev, dev_warn(atomisp_dev,
"remap_pfn_range failed: virt = 0x%x, pfn = 0x%x, mapped_pgnr = %d\n", "remap_pfn_range failed: virt = 0x%x, pfn = 0x%x, mapped_pgnr = %d\n",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment