Commit 0cf2ef46 authored by Thomas Zimmermann's avatar Thomas Zimmermann

drm/shmem-helper: Use cached mappings by default

SHMEM-buffer backing storage is allocated from system memory; which is
typically cachable. The default mode for SHMEM objects is writecombine
though.

Unify SHMEM semantics by defaulting to cached mappings. The exception
is pages imported via dma-buf. DMA memory is usually not cached.

DRM drivers that require write-combined mappings set the map_wc flag
in struct drm_gem_shmem_object to true. This currently affects lima,
panfrost and v3d.

The drivers mgag200, udl, virtio and vkms continue to use default
shmem mappings.

The drivers cirrus and gm12u320 change caching flags. Both used
writecombine and now switch over to shmem defaults. Both drivers use
SHMEM objects as shadow buffers for internal video memory, so cached
mappings will not affect them negatively.

v3:
	* set value of shmem pointer before dereferencing it in
	  __drm_gem_shmem_create() (Dan, kernel test robot)
v2:
	* recreate patch on top of latest SHMEM helpers
	* update lima, panfrost, v3d to select writecombine (Daniel, Rob)
Signed-off-by: default avatarThomas Zimmermann <tzimmermann@suse.de>
Acked-by: default avatarMaxime Ripard <mripard@kernel.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20201117133156.26822-2-tzimmermann@suse.de
parent 3a78f064
...@@ -51,13 +51,17 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private) ...@@ -51,13 +51,17 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
if (!obj) if (!obj)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
shmem = to_drm_gem_shmem_obj(obj);
if (!obj->funcs) if (!obj->funcs)
obj->funcs = &drm_gem_shmem_funcs; obj->funcs = &drm_gem_shmem_funcs;
if (private) if (private) {
drm_gem_private_object_init(dev, obj, size); drm_gem_private_object_init(dev, obj, size);
else shmem->map_wc = false; /* dma-buf mappings use always writecombine */
} else {
ret = drm_gem_object_init(dev, obj, size); ret = drm_gem_object_init(dev, obj, size);
}
if (ret) if (ret)
goto err_free; goto err_free;
...@@ -65,7 +69,6 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private) ...@@ -65,7 +69,6 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
if (ret) if (ret)
goto err_release; goto err_release;
shmem = to_drm_gem_shmem_obj(obj);
mutex_init(&shmem->pages_lock); mutex_init(&shmem->pages_lock);
mutex_init(&shmem->vmap_lock); mutex_init(&shmem->vmap_lock);
INIT_LIST_HEAD(&shmem->madv_list); INIT_LIST_HEAD(&shmem->madv_list);
...@@ -284,7 +287,7 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, struct ...@@ -284,7 +287,7 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, struct
if (ret) if (ret)
goto err_zero_use; goto err_zero_use;
if (!shmem->map_cached) if (shmem->map_wc)
prot = pgprot_writecombine(prot); prot = pgprot_writecombine(prot);
shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
VM_MAP, prot); VM_MAP, prot);
...@@ -497,7 +500,6 @@ drm_gem_shmem_create_object_cached(struct drm_device *dev, size_t size) ...@@ -497,7 +500,6 @@ drm_gem_shmem_create_object_cached(struct drm_device *dev, size_t size)
shmem = kzalloc(sizeof(*shmem), GFP_KERNEL); shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
if (!shmem) if (!shmem)
return NULL; return NULL;
shmem->map_cached = true;
return &shmem->base; return &shmem->base;
} }
...@@ -626,7 +628,7 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) ...@@ -626,7 +628,7 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND; vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
if (!shmem->map_cached) if (shmem->map_wc)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
vma->vm_ops = &drm_gem_shmem_vm_ops; vma->vm_ops = &drm_gem_shmem_vm_ops;
......
...@@ -225,7 +225,7 @@ struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t siz ...@@ -225,7 +225,7 @@ struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t siz
mutex_init(&bo->lock); mutex_init(&bo->lock);
INIT_LIST_HEAD(&bo->va); INIT_LIST_HEAD(&bo->va);
bo->base.map_wc = true;
bo->base.base.funcs = &lima_gem_funcs; bo->base.base.funcs = &lima_gem_funcs;
return &bo->base.base; return &bo->base.base;
......
...@@ -228,7 +228,7 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t ...@@ -228,7 +228,7 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t
INIT_LIST_HEAD(&obj->mappings.list); INIT_LIST_HEAD(&obj->mappings.list);
mutex_init(&obj->mappings.lock); mutex_init(&obj->mappings.lock);
obj->base.base.funcs = &panfrost_gem_funcs; obj->base.base.funcs = &panfrost_gem_funcs;
obj->base.map_cached = pfdev->coherent; obj->base.map_wc = !pfdev->coherent;
return &obj->base.base; return &obj->base.base;
} }
......
...@@ -78,7 +78,7 @@ struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size) ...@@ -78,7 +78,7 @@ struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size)
obj = &bo->base.base; obj = &bo->base.base;
obj->funcs = &v3d_gem_funcs; obj->funcs = &v3d_gem_funcs;
bo->base.map_wc = true;
INIT_LIST_HEAD(&bo->unref_head); INIT_LIST_HEAD(&bo->unref_head);
return &bo->base.base; return &bo->base.base;
......
...@@ -144,7 +144,6 @@ struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev, ...@@ -144,7 +144,6 @@ struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
dshmem = &shmem->base.base; dshmem = &shmem->base.base;
dshmem->base.funcs = &virtio_gpu_shmem_funcs; dshmem->base.funcs = &virtio_gpu_shmem_funcs;
dshmem->map_cached = true;
return &dshmem->base; return &dshmem->base;
} }
......
...@@ -98,9 +98,9 @@ struct drm_gem_shmem_object { ...@@ -98,9 +98,9 @@ struct drm_gem_shmem_object {
unsigned int vmap_use_count; unsigned int vmap_use_count;
/** /**
* @map_cached: map object cached (instead of using writecombine). * @map_wc: map object write-combined (instead of using shmem defaults).
*/ */
bool map_cached; bool map_wc;
}; };
#define to_drm_gem_shmem_obj(obj) \ #define to_drm_gem_shmem_obj(obj) \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment