Commit 20e76f1a authored by Thomas Zimmermann's avatar Thomas Zimmermann

dma-buf: Use struct dma_buf_map in dma_buf_vunmap() interfaces

This patch updates dma_buf_vunmap() and dma-buf's vunmap callback to
use struct dma_buf_map. The interfaces used to receive a buffer address.
This address is now given in an instance of the structure.

Users of the functions are updated accordingly. This is only an interface
change. It is currently expected that dma-buf memory can be accessed with
system memory load/store operations.

v2:
	* include dma-buf-heaps and i915 selftests (kernel test robot)
	* initialize cma_obj before using it in drm_gem_cma_free_object()
	  (kernel test robot)
Signed-off-by: default avatarThomas Zimmermann <tzimmermann@suse.de>
Acked-by: default avatarSumit Semwal <sumit.semwal@linaro.org>
Acked-by: default avatarChristian König <christian.koenig@amd.com>
Acked-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Acked-by: default avatarTomasz Figa <tfiga@chromium.org>
Acked-by: default avatarSam Ravnborg <sam@ravnborg.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20200925115601.23955-4-tzimmermann@suse.de
parent 6619ccf1
...@@ -1236,21 +1236,21 @@ EXPORT_SYMBOL_GPL(dma_buf_vmap); ...@@ -1236,21 +1236,21 @@ EXPORT_SYMBOL_GPL(dma_buf_vmap);
/** /**
* dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap. * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
* @dmabuf: [in] buffer to vunmap * @dmabuf: [in] buffer to vunmap
* @vaddr: [in] vmap to vunmap * @map: [in] vmap pointer to vunmap
*/ */
void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr) void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
{ {
if (WARN_ON(!dmabuf)) if (WARN_ON(!dmabuf))
return; return;
BUG_ON(dma_buf_map_is_null(&dmabuf->vmap_ptr)); BUG_ON(dma_buf_map_is_null(&dmabuf->vmap_ptr));
BUG_ON(dmabuf->vmapping_counter == 0); BUG_ON(dmabuf->vmapping_counter == 0);
BUG_ON(!dma_buf_map_is_vaddr(&dmabuf->vmap_ptr, vaddr)); BUG_ON(!dma_buf_map_is_equal(&dmabuf->vmap_ptr, map));
mutex_lock(&dmabuf->lock); mutex_lock(&dmabuf->lock);
if (--dmabuf->vmapping_counter == 0) { if (--dmabuf->vmapping_counter == 0) {
if (dmabuf->ops->vunmap) if (dmabuf->ops->vunmap)
dmabuf->ops->vunmap(dmabuf, vaddr); dmabuf->ops->vunmap(dmabuf, map);
dma_buf_map_clear(&dmabuf->vmap_ptr); dma_buf_map_clear(&dmabuf->vmap_ptr);
} }
mutex_unlock(&dmabuf->lock); mutex_unlock(&dmabuf->lock);
......
...@@ -252,7 +252,7 @@ static int dma_heap_dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map ...@@ -252,7 +252,7 @@ static int dma_heap_dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map
return 0; return 0;
} }
static void dma_heap_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr) static void dma_heap_dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
{ {
struct heap_helper_buffer *buffer = dmabuf->priv; struct heap_helper_buffer *buffer = dmabuf->priv;
......
...@@ -175,13 +175,12 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv, ...@@ -175,13 +175,12 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv,
*/ */
void drm_gem_cma_free_object(struct drm_gem_object *gem_obj) void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
{ {
struct drm_gem_cma_object *cma_obj; struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(gem_obj);
struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(cma_obj->vaddr);
cma_obj = to_drm_gem_cma_obj(gem_obj);
if (gem_obj->import_attach) { if (gem_obj->import_attach) {
if (cma_obj->vaddr) if (cma_obj->vaddr)
dma_buf_vunmap(gem_obj->import_attach->dmabuf, cma_obj->vaddr); dma_buf_vunmap(gem_obj->import_attach->dmabuf, &map);
drm_prime_gem_destroy(gem_obj, cma_obj->sgt); drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
} else if (cma_obj->vaddr) { } else if (cma_obj->vaddr) {
dma_free_wc(gem_obj->dev->dev, cma_obj->base.size, dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
...@@ -645,7 +644,7 @@ drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *dev, ...@@ -645,7 +644,7 @@ drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *dev,
obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt); obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
if (IS_ERR(obj)) { if (IS_ERR(obj)) {
dma_buf_vunmap(attach->dmabuf, map.vaddr); dma_buf_vunmap(attach->dmabuf, &map);
return obj; return obj;
} }
......
...@@ -337,6 +337,7 @@ EXPORT_SYMBOL(drm_gem_shmem_vmap); ...@@ -337,6 +337,7 @@ EXPORT_SYMBOL(drm_gem_shmem_vmap);
static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem) static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem)
{ {
struct drm_gem_object *obj = &shmem->base; struct drm_gem_object *obj = &shmem->base;
struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(shmem->vaddr);
if (WARN_ON_ONCE(!shmem->vmap_use_count)) if (WARN_ON_ONCE(!shmem->vmap_use_count))
return; return;
...@@ -345,7 +346,7 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem) ...@@ -345,7 +346,7 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem)
return; return;
if (obj->import_attach) if (obj->import_attach)
dma_buf_vunmap(obj->import_attach->dmabuf, shmem->vaddr); dma_buf_vunmap(obj->import_attach->dmabuf, &map);
else else
vunmap(shmem->vaddr); vunmap(shmem->vaddr);
......
...@@ -687,16 +687,16 @@ EXPORT_SYMBOL(drm_gem_dmabuf_vmap); ...@@ -687,16 +687,16 @@ EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
/** /**
* drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM
* @dma_buf: buffer to be unmapped * @dma_buf: buffer to be unmapped
* @vaddr: the virtual address of the buffer * @map: the virtual address of the buffer
* *
* Releases a kernel virtual mapping. This can be used as the * Releases a kernel virtual mapping. This can be used as the
* &dma_buf_ops.vunmap callback. Calls into &drm_gem_object_funcs.vunmap for device specific handling. * &dma_buf_ops.vunmap callback. Calls into &drm_gem_object_funcs.vunmap for device specific handling.
*/ */
void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
{ {
struct drm_gem_object *obj = dma_buf->priv; struct drm_gem_object *obj = dma_buf->priv;
drm_gem_vunmap(obj, vaddr); drm_gem_vunmap(obj, map->vaddr);
} }
EXPORT_SYMBOL(drm_gem_dmabuf_vunmap); EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
......
...@@ -70,9 +70,10 @@ void etnaviv_gem_prime_unpin(struct drm_gem_object *obj) ...@@ -70,9 +70,10 @@ void etnaviv_gem_prime_unpin(struct drm_gem_object *obj)
static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj) static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj)
{ {
struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(etnaviv_obj->vaddr);
if (etnaviv_obj->vaddr) if (etnaviv_obj->vaddr)
dma_buf_vunmap(etnaviv_obj->base.import_attach->dmabuf, dma_buf_vunmap(etnaviv_obj->base.import_attach->dmabuf, &map);
etnaviv_obj->vaddr);
/* Don't drop the pages for imported dmabuf, as they are not /* Don't drop the pages for imported dmabuf, as they are not
* ours, just free the array we allocated: * ours, just free the array we allocated:
......
...@@ -96,7 +96,7 @@ static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map ...@@ -96,7 +96,7 @@ static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map
return 0; return 0;
} }
static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
{ {
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
......
...@@ -152,7 +152,7 @@ static int igt_dmabuf_import(void *arg) ...@@ -152,7 +152,7 @@ static int igt_dmabuf_import(void *arg)
err = 0; err = 0;
out_dma_map: out_dma_map:
dma_buf_vunmap(dmabuf, dma_map); dma_buf_vunmap(dmabuf, &map);
out_obj: out_obj:
i915_gem_object_put(obj); i915_gem_object_put(obj);
out_dmabuf: out_dmabuf:
...@@ -182,7 +182,7 @@ static int igt_dmabuf_import_ownership(void *arg) ...@@ -182,7 +182,7 @@ static int igt_dmabuf_import_ownership(void *arg)
} }
memset(ptr, 0xc5, PAGE_SIZE); memset(ptr, 0xc5, PAGE_SIZE);
dma_buf_vunmap(dmabuf, ptr); dma_buf_vunmap(dmabuf, &map);
obj = to_intel_bo(i915_gem_prime_import(&i915->drm, dmabuf)); obj = to_intel_bo(i915_gem_prime_import(&i915->drm, dmabuf));
if (IS_ERR(obj)) { if (IS_ERR(obj)) {
...@@ -250,7 +250,7 @@ static int igt_dmabuf_export_vmap(void *arg) ...@@ -250,7 +250,7 @@ static int igt_dmabuf_export_vmap(void *arg)
memset(ptr, 0xc5, dmabuf->size); memset(ptr, 0xc5, dmabuf->size);
err = 0; err = 0;
dma_buf_vunmap(dmabuf, ptr); dma_buf_vunmap(dmabuf, &map);
out: out:
dma_buf_put(dmabuf); dma_buf_put(dmabuf);
return err; return err;
......
...@@ -75,11 +75,11 @@ static int mock_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map) ...@@ -75,11 +75,11 @@ static int mock_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
return 0; return 0;
} }
static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
{ {
struct mock_dmabuf *mock = to_mock(dma_buf); struct mock_dmabuf *mock = to_mock(dma_buf);
vm_unmap_ram(vaddr, mock->npages); vm_unmap_ram(map->vaddr, mock->npages);
} }
static int mock_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) static int mock_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
......
...@@ -149,11 +149,12 @@ static void *tegra_bo_mmap(struct host1x_bo *bo) ...@@ -149,11 +149,12 @@ static void *tegra_bo_mmap(struct host1x_bo *bo)
static void tegra_bo_munmap(struct host1x_bo *bo, void *addr) static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
{ {
struct tegra_bo *obj = host1x_to_tegra_bo(bo); struct tegra_bo *obj = host1x_to_tegra_bo(bo);
struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(addr);
if (obj->vaddr) if (obj->vaddr)
return; return;
else if (obj->gem.import_attach) else if (obj->gem.import_attach)
dma_buf_vunmap(obj->gem.import_attach->dmabuf, addr); dma_buf_vunmap(obj->gem.import_attach->dmabuf, &map);
else else
vunmap(addr); vunmap(addr);
} }
...@@ -663,7 +664,7 @@ static int tegra_gem_prime_vmap(struct dma_buf *buf, struct dma_buf_map *map) ...@@ -663,7 +664,7 @@ static int tegra_gem_prime_vmap(struct dma_buf *buf, struct dma_buf_map *map)
return 0; return 0;
} }
static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr) static void tegra_gem_prime_vunmap(struct dma_buf *buf, struct dma_buf_map *map)
{ {
} }
......
...@@ -648,6 +648,7 @@ static void vb2_dc_unmap_dmabuf(void *mem_priv) ...@@ -648,6 +648,7 @@ static void vb2_dc_unmap_dmabuf(void *mem_priv)
{ {
struct vb2_dc_buf *buf = mem_priv; struct vb2_dc_buf *buf = mem_priv;
struct sg_table *sgt = buf->dma_sgt; struct sg_table *sgt = buf->dma_sgt;
struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr);
if (WARN_ON(!buf->db_attach)) { if (WARN_ON(!buf->db_attach)) {
pr_err("trying to unpin a not attached buffer\n"); pr_err("trying to unpin a not attached buffer\n");
...@@ -660,7 +661,7 @@ static void vb2_dc_unmap_dmabuf(void *mem_priv) ...@@ -660,7 +661,7 @@ static void vb2_dc_unmap_dmabuf(void *mem_priv)
} }
if (buf->vaddr) { if (buf->vaddr) {
dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr); dma_buf_vunmap(buf->db_attach->dmabuf, &map);
buf->vaddr = NULL; buf->vaddr = NULL;
} }
dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir); dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
......
...@@ -580,6 +580,7 @@ static void vb2_dma_sg_unmap_dmabuf(void *mem_priv) ...@@ -580,6 +580,7 @@ static void vb2_dma_sg_unmap_dmabuf(void *mem_priv)
{ {
struct vb2_dma_sg_buf *buf = mem_priv; struct vb2_dma_sg_buf *buf = mem_priv;
struct sg_table *sgt = buf->dma_sgt; struct sg_table *sgt = buf->dma_sgt;
struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr);
if (WARN_ON(!buf->db_attach)) { if (WARN_ON(!buf->db_attach)) {
pr_err("trying to unpin a not attached buffer\n"); pr_err("trying to unpin a not attached buffer\n");
...@@ -592,7 +593,7 @@ static void vb2_dma_sg_unmap_dmabuf(void *mem_priv) ...@@ -592,7 +593,7 @@ static void vb2_dma_sg_unmap_dmabuf(void *mem_priv)
} }
if (buf->vaddr) { if (buf->vaddr) {
dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr); dma_buf_vunmap(buf->db_attach->dmabuf, &map);
buf->vaddr = NULL; buf->vaddr = NULL;
} }
dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir); dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
......
...@@ -390,17 +390,19 @@ static int vb2_vmalloc_map_dmabuf(void *mem_priv) ...@@ -390,17 +390,19 @@ static int vb2_vmalloc_map_dmabuf(void *mem_priv)
static void vb2_vmalloc_unmap_dmabuf(void *mem_priv) static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
{ {
struct vb2_vmalloc_buf *buf = mem_priv; struct vb2_vmalloc_buf *buf = mem_priv;
struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr);
dma_buf_vunmap(buf->dbuf, buf->vaddr); dma_buf_vunmap(buf->dbuf, &map);
buf->vaddr = NULL; buf->vaddr = NULL;
} }
static void vb2_vmalloc_detach_dmabuf(void *mem_priv) static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
{ {
struct vb2_vmalloc_buf *buf = mem_priv; struct vb2_vmalloc_buf *buf = mem_priv;
struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr);
if (buf->vaddr) if (buf->vaddr)
dma_buf_vunmap(buf->dbuf, buf->vaddr); dma_buf_vunmap(buf->dbuf, &map);
kfree(buf); kfree(buf);
} }
......
...@@ -84,7 +84,7 @@ void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, ...@@ -84,7 +84,7 @@ void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
struct sg_table *sgt, struct sg_table *sgt,
enum dma_data_direction dir); enum dma_data_direction dir);
int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map); int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map);
void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr); void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map);
int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma); int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma);
......
...@@ -23,6 +23,16 @@ struct dma_buf_map { ...@@ -23,6 +23,16 @@ struct dma_buf_map {
bool is_iomem; bool is_iomem;
}; };
/**
* DMA_BUF_MAP_INIT_VADDR - Initializes struct dma_buf_map to an address in system memory
* @vaddr: A system-memory address
*/
#define DMA_BUF_MAP_INIT_VADDR(vaddr_) \
{ \
.vaddr = (vaddr_), \
.is_iomem = false, \
}
/** /**
* dma_buf_map_set_vaddr - Sets a dma-buf mapping structure to an address in system memory * dma_buf_map_set_vaddr - Sets a dma-buf mapping structure to an address in system memory
* @map: The dma-buf mapping structure * @map: The dma-buf mapping structure
...@@ -36,10 +46,26 @@ static inline void dma_buf_map_set_vaddr(struct dma_buf_map *map, void *vaddr) ...@@ -36,10 +46,26 @@ static inline void dma_buf_map_set_vaddr(struct dma_buf_map *map, void *vaddr)
map->is_iomem = false; map->is_iomem = false;
} }
/* API transition helper */ /**
static inline bool dma_buf_map_is_vaddr(const struct dma_buf_map *map, const void *vaddr) * dma_buf_map_is_equal - Compares two dma-buf mapping structures for equality
* @lhs: The dma-buf mapping structure
* @rhs: A dma-buf mapping structure to compare with
*
* Two dma-buf mapping structures are equal if they both refer to the same type of memory
* and to the same address within that memory.
*
* Returns:
* True is both structures are equal, or false otherwise.
*/
static inline bool dma_buf_map_is_equal(const struct dma_buf_map *lhs,
const struct dma_buf_map *rhs)
{ {
return !map->is_iomem && (map->vaddr == vaddr); if (lhs->is_iomem != rhs->is_iomem)
return false;
else if (lhs->is_iomem)
return lhs->vaddr_iomem == rhs->vaddr_iomem;
else
return lhs->vaddr == rhs->vaddr;
} }
/** /**
......
...@@ -267,7 +267,7 @@ struct dma_buf_ops { ...@@ -267,7 +267,7 @@ struct dma_buf_ops {
int (*mmap)(struct dma_buf *, struct vm_area_struct *vma); int (*mmap)(struct dma_buf *, struct vm_area_struct *vma);
int (*vmap)(struct dma_buf *dmabuf, struct dma_buf_map *map); int (*vmap)(struct dma_buf *dmabuf, struct dma_buf_map *map);
void (*vunmap)(struct dma_buf *, void *vaddr); void (*vunmap)(struct dma_buf *dmabuf, struct dma_buf_map *map);
}; };
/** /**
...@@ -504,5 +504,5 @@ int dma_buf_end_cpu_access(struct dma_buf *dma_buf, ...@@ -504,5 +504,5 @@ int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *, int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
unsigned long); unsigned long);
int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map); int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map);
void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr); void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map);
#endif /* __DMA_BUF_H__ */ #endif /* __DMA_BUF_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment