Commit 67b7836d authored by Dmitry Osipenko's avatar Dmitry Osipenko

drm/shmem-helper: Switch to reservation lock

Replace all drm-shmem locks with a GEM reservation lock. This makes locks
consistent with dma-buf locking convention where importers are responsible
for holding reservation lock for all operations performed over dma-bufs,
preventing deadlock between dma-buf importers and exporters.
Suggested-by: default avatarDaniel Vetter <daniel@ffwll.ch>
Acked-by: default avatarThomas Zimmermann <tzimmermann@suse.de>
Signed-off-by: default avatarDmitry Osipenko <dmitry.osipenko@collabora.com>
Link: https://lore.kernel.org/all/20230108210445.3948344-8-dmitry.osipenko@collabora.com/
parent 67fe7487
...@@ -88,8 +88,6 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private) ...@@ -88,8 +88,6 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
if (ret) if (ret)
goto err_release; goto err_release;
mutex_init(&shmem->pages_lock);
mutex_init(&shmem->vmap_lock);
INIT_LIST_HEAD(&shmem->madv_list); INIT_LIST_HEAD(&shmem->madv_list);
if (!private) { if (!private) {
...@@ -141,11 +139,13 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) ...@@ -141,11 +139,13 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
{ {
struct drm_gem_object *obj = &shmem->base; struct drm_gem_object *obj = &shmem->base;
drm_WARN_ON(obj->dev, shmem->vmap_use_count);
if (obj->import_attach) { if (obj->import_attach) {
drm_prime_gem_destroy(obj, shmem->sgt); drm_prime_gem_destroy(obj, shmem->sgt);
} else { } else {
dma_resv_lock(shmem->base.resv, NULL);
drm_WARN_ON(obj->dev, shmem->vmap_use_count);
if (shmem->sgt) { if (shmem->sgt) {
dma_unmap_sgtable(obj->dev->dev, shmem->sgt, dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
DMA_BIDIRECTIONAL, 0); DMA_BIDIRECTIONAL, 0);
...@@ -154,18 +154,18 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) ...@@ -154,18 +154,18 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
} }
if (shmem->pages) if (shmem->pages)
drm_gem_shmem_put_pages(shmem); drm_gem_shmem_put_pages(shmem);
}
drm_WARN_ON(obj->dev, shmem->pages_use_count); drm_WARN_ON(obj->dev, shmem->pages_use_count);
dma_resv_unlock(shmem->base.resv);
}
drm_gem_object_release(obj); drm_gem_object_release(obj);
mutex_destroy(&shmem->pages_lock);
mutex_destroy(&shmem->vmap_lock);
kfree(shmem); kfree(shmem);
} }
EXPORT_SYMBOL_GPL(drm_gem_shmem_free); EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem) static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
{ {
struct drm_gem_object *obj = &shmem->base; struct drm_gem_object *obj = &shmem->base;
struct page **pages; struct page **pages;
...@@ -197,35 +197,16 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem) ...@@ -197,35 +197,16 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
} }
/* /*
* drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
* @shmem: shmem GEM object * @shmem: shmem GEM object
* *
* This function makes sure that backing pages exists for the shmem GEM object * This function decreases the use count and puts the backing pages when use drops to zero.
* and increases the use count.
*
* Returns:
* 0 on success or a negative error code on failure.
*/ */
int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem) void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
{ {
struct drm_gem_object *obj = &shmem->base; struct drm_gem_object *obj = &shmem->base;
int ret;
drm_WARN_ON(obj->dev, obj->import_attach); dma_resv_assert_held(shmem->base.resv);
ret = mutex_lock_interruptible(&shmem->pages_lock);
if (ret)
return ret;
ret = drm_gem_shmem_get_pages_locked(shmem);
mutex_unlock(&shmem->pages_lock);
return ret;
}
EXPORT_SYMBOL(drm_gem_shmem_get_pages);
static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count)) if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
return; return;
...@@ -243,19 +224,6 @@ static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem) ...@@ -243,19 +224,6 @@ static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
shmem->pages_mark_accessed_on_put); shmem->pages_mark_accessed_on_put);
shmem->pages = NULL; shmem->pages = NULL;
} }
/*
* drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
* @shmem: shmem GEM object
*
* This function decreases the use count and puts the backing pages when use drops to zero.
*/
void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
{
mutex_lock(&shmem->pages_lock);
drm_gem_shmem_put_pages_locked(shmem);
mutex_unlock(&shmem->pages_lock);
}
EXPORT_SYMBOL(drm_gem_shmem_put_pages); EXPORT_SYMBOL(drm_gem_shmem_put_pages);
/** /**
...@@ -272,6 +240,8 @@ int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem) ...@@ -272,6 +240,8 @@ int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
{ {
struct drm_gem_object *obj = &shmem->base; struct drm_gem_object *obj = &shmem->base;
dma_resv_assert_held(shmem->base.resv);
drm_WARN_ON(obj->dev, obj->import_attach); drm_WARN_ON(obj->dev, obj->import_attach);
return drm_gem_shmem_get_pages(shmem); return drm_gem_shmem_get_pages(shmem);
...@@ -289,14 +259,31 @@ void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem) ...@@ -289,14 +259,31 @@ void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
{ {
struct drm_gem_object *obj = &shmem->base; struct drm_gem_object *obj = &shmem->base;
dma_resv_assert_held(shmem->base.resv);
drm_WARN_ON(obj->dev, obj->import_attach); drm_WARN_ON(obj->dev, obj->import_attach);
drm_gem_shmem_put_pages(shmem); drm_gem_shmem_put_pages(shmem);
} }
EXPORT_SYMBOL(drm_gem_shmem_unpin); EXPORT_SYMBOL(drm_gem_shmem_unpin);
static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, /*
struct iosys_map *map) * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
* @shmem: shmem GEM object
* @map: Returns the kernel virtual address of the SHMEM GEM object's backing
* store.
*
* This function makes sure that a contiguous kernel virtual address mapping
* exists for the buffer backing the shmem GEM object. It hides the differences
* between dma-buf imported and natively allocated objects.
*
* Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
*
* Returns:
* 0 on success or a negative error code on failure.
*/
int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
struct iosys_map *map)
{ {
struct drm_gem_object *obj = &shmem->base; struct drm_gem_object *obj = &shmem->base;
int ret = 0; int ret = 0;
...@@ -312,6 +299,8 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, ...@@ -312,6 +299,8 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
} else { } else {
pgprot_t prot = PAGE_KERNEL; pgprot_t prot = PAGE_KERNEL;
dma_resv_assert_held(shmem->base.resv);
if (shmem->vmap_use_count++ > 0) { if (shmem->vmap_use_count++ > 0) {
iosys_map_set_vaddr(map, shmem->vaddr); iosys_map_set_vaddr(map, shmem->vaddr);
return 0; return 0;
...@@ -346,45 +335,30 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, ...@@ -346,45 +335,30 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
return ret; return ret;
} }
EXPORT_SYMBOL(drm_gem_shmem_vmap);
/* /*
* drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
* @shmem: shmem GEM object * @shmem: shmem GEM object
* @map: Returns the kernel virtual address of the SHMEM GEM object's backing * @map: Kernel virtual address where the SHMEM GEM object was mapped
* store.
*
* This function makes sure that a contiguous kernel virtual address mapping
* exists for the buffer backing the shmem GEM object. It hides the differences
* between dma-buf imported and natively allocated objects.
* *
* Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap(). * This function cleans up a kernel virtual address mapping acquired by
* drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
* zero.
* *
* Returns: * This function hides the differences between dma-buf imported and natively
* 0 on success or a negative error code on failure. * allocated objects.
*/ */
int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
struct iosys_map *map) struct iosys_map *map)
{
int ret;
ret = mutex_lock_interruptible(&shmem->vmap_lock);
if (ret)
return ret;
ret = drm_gem_shmem_vmap_locked(shmem, map);
mutex_unlock(&shmem->vmap_lock);
return ret;
}
EXPORT_SYMBOL(drm_gem_shmem_vmap);
static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
struct iosys_map *map)
{ {
struct drm_gem_object *obj = &shmem->base; struct drm_gem_object *obj = &shmem->base;
if (obj->import_attach) { if (obj->import_attach) {
dma_buf_vunmap(obj->import_attach->dmabuf, map); dma_buf_vunmap(obj->import_attach->dmabuf, map);
} else { } else {
dma_resv_assert_held(shmem->base.resv);
if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count)) if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
return; return;
...@@ -397,26 +371,6 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem, ...@@ -397,26 +371,6 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
shmem->vaddr = NULL; shmem->vaddr = NULL;
} }
/*
* drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
* @shmem: shmem GEM object
* @map: Kernel virtual address where the SHMEM GEM object was mapped
*
* This function cleans up a kernel virtual address mapping acquired by
* drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
* zero.
*
* This function hides the differences between dma-buf imported and natively
* allocated objects.
*/
void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
struct iosys_map *map)
{
mutex_lock(&shmem->vmap_lock);
drm_gem_shmem_vunmap_locked(shmem, map);
mutex_unlock(&shmem->vmap_lock);
}
EXPORT_SYMBOL(drm_gem_shmem_vunmap); EXPORT_SYMBOL(drm_gem_shmem_vunmap);
static int static int
...@@ -447,24 +401,24 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv, ...@@ -447,24 +401,24 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
*/ */
int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv) int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
{ {
mutex_lock(&shmem->pages_lock); dma_resv_assert_held(shmem->base.resv);
if (shmem->madv >= 0) if (shmem->madv >= 0)
shmem->madv = madv; shmem->madv = madv;
madv = shmem->madv; madv = shmem->madv;
mutex_unlock(&shmem->pages_lock);
return (madv >= 0); return (madv >= 0);
} }
EXPORT_SYMBOL(drm_gem_shmem_madvise); EXPORT_SYMBOL(drm_gem_shmem_madvise);
void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem) void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
{ {
struct drm_gem_object *obj = &shmem->base; struct drm_gem_object *obj = &shmem->base;
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->dev;
dma_resv_assert_held(shmem->base.resv);
drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem)); drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem));
dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0); dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
...@@ -472,7 +426,7 @@ void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem) ...@@ -472,7 +426,7 @@ void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
kfree(shmem->sgt); kfree(shmem->sgt);
shmem->sgt = NULL; shmem->sgt = NULL;
drm_gem_shmem_put_pages_locked(shmem); drm_gem_shmem_put_pages(shmem);
shmem->madv = -1; shmem->madv = -1;
...@@ -488,17 +442,6 @@ void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem) ...@@ -488,17 +442,6 @@ void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1); invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
} }
EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
{
if (!mutex_trylock(&shmem->pages_lock))
return false;
drm_gem_shmem_purge_locked(shmem);
mutex_unlock(&shmem->pages_lock);
return true;
}
EXPORT_SYMBOL(drm_gem_shmem_purge); EXPORT_SYMBOL(drm_gem_shmem_purge);
/** /**
...@@ -551,7 +494,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf) ...@@ -551,7 +494,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
/* We don't use vmf->pgoff since that has the fake offset */ /* We don't use vmf->pgoff since that has the fake offset */
page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
mutex_lock(&shmem->pages_lock); dma_resv_lock(shmem->base.resv, NULL);
if (page_offset >= num_pages || if (page_offset >= num_pages ||
drm_WARN_ON_ONCE(obj->dev, !shmem->pages) || drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
...@@ -563,7 +506,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf) ...@@ -563,7 +506,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page)); ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
} }
mutex_unlock(&shmem->pages_lock); dma_resv_unlock(shmem->base.resv);
return ret; return ret;
} }
...@@ -575,7 +518,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) ...@@ -575,7 +518,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
drm_WARN_ON(obj->dev, obj->import_attach); drm_WARN_ON(obj->dev, obj->import_attach);
mutex_lock(&shmem->pages_lock); dma_resv_lock(shmem->base.resv, NULL);
/* /*
* We should have already pinned the pages when the buffer was first * We should have already pinned the pages when the buffer was first
...@@ -585,7 +528,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) ...@@ -585,7 +528,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count)) if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
shmem->pages_use_count++; shmem->pages_use_count++;
mutex_unlock(&shmem->pages_lock); dma_resv_unlock(shmem->base.resv);
drm_gem_vm_open(vma); drm_gem_vm_open(vma);
} }
...@@ -595,7 +538,10 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma) ...@@ -595,7 +538,10 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
struct drm_gem_object *obj = vma->vm_private_data; struct drm_gem_object *obj = vma->vm_private_data;
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
dma_resv_lock(shmem->base.resv, NULL);
drm_gem_shmem_put_pages(shmem); drm_gem_shmem_put_pages(shmem);
dma_resv_unlock(shmem->base.resv);
drm_gem_vm_close(vma); drm_gem_vm_close(vma);
} }
...@@ -630,7 +576,10 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct ...@@ -630,7 +576,10 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
return dma_buf_mmap(obj->dma_buf, vma, 0); return dma_buf_mmap(obj->dma_buf, vma, 0);
} }
dma_resv_lock(shmem->base.resv, NULL);
ret = drm_gem_shmem_get_pages(shmem); ret = drm_gem_shmem_get_pages(shmem);
dma_resv_unlock(shmem->base.resv);
if (ret) if (ret)
return ret; return ret;
...@@ -696,7 +645,7 @@ static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_ ...@@ -696,7 +645,7 @@ static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_
drm_WARN_ON(obj->dev, obj->import_attach); drm_WARN_ON(obj->dev, obj->import_attach);
ret = drm_gem_shmem_get_pages_locked(shmem); ret = drm_gem_shmem_get_pages(shmem);
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
...@@ -718,7 +667,7 @@ static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_ ...@@ -718,7 +667,7 @@ static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_
sg_free_table(sgt); sg_free_table(sgt);
kfree(sgt); kfree(sgt);
err_put_pages: err_put_pages:
drm_gem_shmem_put_pages_locked(shmem); drm_gem_shmem_put_pages(shmem);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
...@@ -743,11 +692,11 @@ struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem) ...@@ -743,11 +692,11 @@ struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
int ret; int ret;
struct sg_table *sgt; struct sg_table *sgt;
ret = mutex_lock_interruptible(&shmem->pages_lock); ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
sgt = drm_gem_shmem_get_pages_sgt_locked(shmem); sgt = drm_gem_shmem_get_pages_sgt_locked(shmem);
mutex_unlock(&shmem->pages_lock); dma_resv_unlock(shmem->base.resv);
return sgt; return sgt;
} }
......
...@@ -34,7 +34,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm) ...@@ -34,7 +34,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
new_size = min(new_size, bo->base.base.size); new_size = min(new_size, bo->base.base.size);
mutex_lock(&bo->base.pages_lock); dma_resv_lock(bo->base.base.resv, NULL);
if (bo->base.pages) { if (bo->base.pages) {
pages = bo->base.pages; pages = bo->base.pages;
...@@ -42,7 +42,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm) ...@@ -42,7 +42,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT, pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
sizeof(*pages), GFP_KERNEL | __GFP_ZERO); sizeof(*pages), GFP_KERNEL | __GFP_ZERO);
if (!pages) { if (!pages) {
mutex_unlock(&bo->base.pages_lock); dma_resv_unlock(bo->base.base.resv);
return -ENOMEM; return -ENOMEM;
} }
...@@ -56,13 +56,13 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm) ...@@ -56,13 +56,13 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
struct page *page = shmem_read_mapping_page(mapping, i); struct page *page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page)) { if (IS_ERR(page)) {
mutex_unlock(&bo->base.pages_lock); dma_resv_unlock(bo->base.base.resv);
return PTR_ERR(page); return PTR_ERR(page);
} }
pages[i] = page; pages[i] = page;
} }
mutex_unlock(&bo->base.pages_lock); dma_resv_unlock(bo->base.base.resv);
ret = sg_alloc_table_from_pages(&sgt, pages, i, 0, ret = sg_alloc_table_from_pages(&sgt, pages, i, 0,
new_size, GFP_KERNEL); new_size, GFP_KERNEL);
......
...@@ -407,6 +407,10 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, ...@@ -407,6 +407,10 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
bo = to_panfrost_bo(gem_obj); bo = to_panfrost_bo(gem_obj);
ret = dma_resv_lock_interruptible(bo->base.base.resv, NULL);
if (ret)
goto out_put_object;
mutex_lock(&pfdev->shrinker_lock); mutex_lock(&pfdev->shrinker_lock);
mutex_lock(&bo->mappings.lock); mutex_lock(&bo->mappings.lock);
if (args->madv == PANFROST_MADV_DONTNEED) { if (args->madv == PANFROST_MADV_DONTNEED) {
...@@ -444,7 +448,8 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, ...@@ -444,7 +448,8 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
out_unlock_mappings: out_unlock_mappings:
mutex_unlock(&bo->mappings.lock); mutex_unlock(&bo->mappings.lock);
mutex_unlock(&pfdev->shrinker_lock); mutex_unlock(&pfdev->shrinker_lock);
dma_resv_unlock(bo->base.base.resv);
out_put_object:
drm_gem_object_put(gem_obj); drm_gem_object_put(gem_obj);
return ret; return ret;
} }
......
...@@ -48,14 +48,14 @@ static bool panfrost_gem_purge(struct drm_gem_object *obj) ...@@ -48,14 +48,14 @@ static bool panfrost_gem_purge(struct drm_gem_object *obj)
if (!mutex_trylock(&bo->mappings.lock)) if (!mutex_trylock(&bo->mappings.lock))
return false; return false;
if (!mutex_trylock(&shmem->pages_lock)) if (!dma_resv_trylock(shmem->base.resv))
goto unlock_mappings; goto unlock_mappings;
panfrost_gem_teardown_mappings_locked(bo); panfrost_gem_teardown_mappings_locked(bo);
drm_gem_shmem_purge_locked(&bo->base); drm_gem_shmem_purge(&bo->base);
ret = true; ret = true;
mutex_unlock(&shmem->pages_lock); dma_resv_unlock(shmem->base.resv);
unlock_mappings: unlock_mappings:
mutex_unlock(&bo->mappings.lock); mutex_unlock(&bo->mappings.lock);
......
...@@ -443,6 +443,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, ...@@ -443,6 +443,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
struct panfrost_gem_mapping *bomapping; struct panfrost_gem_mapping *bomapping;
struct panfrost_gem_object *bo; struct panfrost_gem_object *bo;
struct address_space *mapping; struct address_space *mapping;
struct drm_gem_object *obj;
pgoff_t page_offset; pgoff_t page_offset;
struct sg_table *sgt; struct sg_table *sgt;
struct page **pages; struct page **pages;
...@@ -465,15 +466,16 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, ...@@ -465,15 +466,16 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
page_offset = addr >> PAGE_SHIFT; page_offset = addr >> PAGE_SHIFT;
page_offset -= bomapping->mmnode.start; page_offset -= bomapping->mmnode.start;
mutex_lock(&bo->base.pages_lock); obj = &bo->base.base;
dma_resv_lock(obj->resv, NULL);
if (!bo->base.pages) { if (!bo->base.pages) {
bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M, bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M,
sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO); sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO);
if (!bo->sgts) { if (!bo->sgts) {
mutex_unlock(&bo->base.pages_lock);
ret = -ENOMEM; ret = -ENOMEM;
goto err_bo; goto err_unlock;
} }
pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT, pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
...@@ -481,9 +483,8 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, ...@@ -481,9 +483,8 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
if (!pages) { if (!pages) {
kvfree(bo->sgts); kvfree(bo->sgts);
bo->sgts = NULL; bo->sgts = NULL;
mutex_unlock(&bo->base.pages_lock);
ret = -ENOMEM; ret = -ENOMEM;
goto err_bo; goto err_unlock;
} }
bo->base.pages = pages; bo->base.pages = pages;
bo->base.pages_use_count = 1; bo->base.pages_use_count = 1;
...@@ -491,7 +492,6 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, ...@@ -491,7 +492,6 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
pages = bo->base.pages; pages = bo->base.pages;
if (pages[page_offset]) { if (pages[page_offset]) {
/* Pages are already mapped, bail out. */ /* Pages are already mapped, bail out. */
mutex_unlock(&bo->base.pages_lock);
goto out; goto out;
} }
} }
...@@ -502,14 +502,11 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, ...@@ -502,14 +502,11 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) { for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) {
pages[i] = shmem_read_mapping_page(mapping, i); pages[i] = shmem_read_mapping_page(mapping, i);
if (IS_ERR(pages[i])) { if (IS_ERR(pages[i])) {
mutex_unlock(&bo->base.pages_lock);
ret = PTR_ERR(pages[i]); ret = PTR_ERR(pages[i]);
goto err_pages; goto err_pages;
} }
} }
mutex_unlock(&bo->base.pages_lock);
sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)]; sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)];
ret = sg_alloc_table_from_pages(sgt, pages + page_offset, ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL); NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL);
...@@ -528,6 +525,8 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, ...@@ -528,6 +525,8 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr); dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
out: out:
dma_resv_unlock(obj->resv);
panfrost_gem_mapping_put(bomapping); panfrost_gem_mapping_put(bomapping);
return 0; return 0;
...@@ -536,6 +535,8 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, ...@@ -536,6 +535,8 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
sg_free_table(sgt); sg_free_table(sgt);
err_pages: err_pages:
drm_gem_shmem_put_pages(&bo->base); drm_gem_shmem_put_pages(&bo->base);
err_unlock:
dma_resv_unlock(obj->resv);
err_bo: err_bo:
panfrost_gem_mapping_put(bomapping); panfrost_gem_mapping_put(bomapping);
return ret; return ret;
......
...@@ -26,11 +26,6 @@ struct drm_gem_shmem_object { ...@@ -26,11 +26,6 @@ struct drm_gem_shmem_object {
*/ */
struct drm_gem_object base; struct drm_gem_object base;
/**
* @pages_lock: Protects the page table and use count
*/
struct mutex pages_lock;
/** /**
* @pages: Page table * @pages: Page table
*/ */
...@@ -65,11 +60,6 @@ struct drm_gem_shmem_object { ...@@ -65,11 +60,6 @@ struct drm_gem_shmem_object {
*/ */
struct sg_table *sgt; struct sg_table *sgt;
/**
* @vmap_lock: Protects the vmap address and use count
*/
struct mutex vmap_lock;
/** /**
* @vaddr: Kernel virtual address of the backing memory * @vaddr: Kernel virtual address of the backing memory
*/ */
...@@ -109,7 +99,6 @@ struct drm_gem_shmem_object { ...@@ -109,7 +99,6 @@ struct drm_gem_shmem_object {
struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size); struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size);
void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem); void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem);
int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem);
void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem); void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem);
int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem); int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem);
void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem); void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem);
...@@ -128,8 +117,7 @@ static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem ...@@ -128,8 +117,7 @@ static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem
!shmem->base.dma_buf && !shmem->base.import_attach; !shmem->base.dma_buf && !shmem->base.import_attach;
} }
void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem); void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem);
bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem);
struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem); struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem);
struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem); struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment