Commit bfed6708 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Linus Torvalds

drm/i915: use vmap in shmem_pin_map

shmem_pin_map somewhat awkwardly reimplements vmap using alloc_vm_area and
manual pte setup.  The only practical difference is that alloc_vm_area
prefeaults the vmalloc area PTEs, which doesn't seem to be required here
(and could be added to vmap using a flag if actually required).  Switch to
use vmap, and use vfree to free both the vmalloc mapping and the page
array, as well as dropping the references to each page.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Jani Nikula <jani.nikula@linux.intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Nitin Gupta <ngupta@vflare.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Uladzislau Rezki (Sony) <urezki@gmail.com>
Link: https://lkml.kernel.org/r/20201002122204.1534411-7-hch@lst.deSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d1b6d2e1
...@@ -49,80 +49,40 @@ struct file *shmem_create_from_object(struct drm_i915_gem_object *obj) ...@@ -49,80 +49,40 @@ struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
return file; return file;
} }
static size_t shmem_npte(struct file *file)
{
return file->f_mapping->host->i_size >> PAGE_SHIFT;
}
static void __shmem_unpin_map(struct file *file, void *ptr, size_t n_pte)
{
unsigned long pfn;
vunmap(ptr);
for (pfn = 0; pfn < n_pte; pfn++) {
struct page *page;
page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
GFP_KERNEL);
if (!WARN_ON(IS_ERR(page))) {
put_page(page);
put_page(page);
}
}
}
void *shmem_pin_map(struct file *file) void *shmem_pin_map(struct file *file)
{ {
const size_t n_pte = shmem_npte(file); struct page **pages;
pte_t *stack[32], **ptes, **mem; size_t n_pages, i;
struct vm_struct *area; void *vaddr;
unsigned long pfn;
mem = stack;
if (n_pte > ARRAY_SIZE(stack)) {
mem = kvmalloc_array(n_pte, sizeof(*mem), GFP_KERNEL);
if (!mem)
return NULL;
}
area = alloc_vm_area(n_pte << PAGE_SHIFT, mem); n_pages = file->f_mapping->host->i_size >> PAGE_SHIFT;
if (!area) { pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
if (mem != stack) if (!pages)
kvfree(mem);
return NULL; return NULL;
}
ptes = mem; for (i = 0; i < n_pages; i++) {
for (pfn = 0; pfn < n_pte; pfn++) { pages[i] = shmem_read_mapping_page_gfp(file->f_mapping, i,
struct page *page; GFP_KERNEL);
if (IS_ERR(pages[i]))
page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
GFP_KERNEL);
if (IS_ERR(page))
goto err_page; goto err_page;
**ptes++ = mk_pte(page, PAGE_KERNEL);
} }
if (mem != stack) vaddr = vmap(pages, n_pages, VM_MAP_PUT_PAGES, PAGE_KERNEL);
kvfree(mem); if (!vaddr)
goto err_page;
mapping_set_unevictable(file->f_mapping); mapping_set_unevictable(file->f_mapping);
return area->addr; return vaddr;
err_page: err_page:
if (mem != stack) while (--i >= 0)
kvfree(mem); put_page(pages[i]);
kvfree(pages);
__shmem_unpin_map(file, area->addr, pfn);
return NULL; return NULL;
} }
void shmem_unpin_map(struct file *file, void *ptr) void shmem_unpin_map(struct file *file, void *ptr)
{ {
mapping_clear_unevictable(file->f_mapping); mapping_clear_unevictable(file->f_mapping);
__shmem_unpin_map(file, ptr, shmem_npte(file)); vfree(ptr);
} }
static int __shmem_rw(struct file *file, loff_t off, static int __shmem_rw(struct file *file, loff_t off,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment