Commit 67e93c80 authored by Joonyoung Shim's avatar Joonyoung Shim Committed by Inki Dae

drm/exynos: stop copying sg table

Already struct exynos_drm_gem_buf has pages of the buffer, so we don't
need to copy from sg table of the buffer to sg table of dma-buf
attachment, just can make sg table from pages of the buffer.
Signed-off-by: default avatarJoonyoung Shim <jy0922.shim@samsung.com>
Signed-off-by: default avatarInki Dae <inki.dae@samsung.com>
parent 0e9a2ee3
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
#include <linux/dma-buf.h> #include <linux/dma-buf.h>
struct exynos_drm_dmabuf_attachment { struct exynos_drm_dmabuf_attachment {
struct sg_table sgt; struct sg_table *sgt;
enum dma_data_direction dir; enum dma_data_direction dir;
bool is_mapped; bool is_mapped;
}; };
...@@ -53,13 +53,15 @@ static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf, ...@@ -53,13 +53,15 @@ static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf,
if (!exynos_attach) if (!exynos_attach)
return; return;
sgt = &exynos_attach->sgt; sgt = exynos_attach->sgt;
if (sgt) {
if (exynos_attach->dir != DMA_NONE) if (exynos_attach->dir != DMA_NONE)
dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
exynos_attach->dir); exynos_attach->dir);
sg_free_table(sgt); sg_free_table(sgt);
}
kfree(sgt);
kfree(exynos_attach); kfree(exynos_attach);
attach->priv = NULL; attach->priv = NULL;
} }
...@@ -70,16 +72,13 @@ static struct sg_table * ...@@ -70,16 +72,13 @@ static struct sg_table *
{ {
struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv; struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
struct exynos_drm_gem_obj *gem_obj = dma_buf_to_obj(attach->dmabuf); struct exynos_drm_gem_obj *gem_obj = dma_buf_to_obj(attach->dmabuf);
struct drm_device *dev = gem_obj->base.dev;
struct exynos_drm_gem_buf *buf; struct exynos_drm_gem_buf *buf;
struct scatterlist *rd, *wr; struct sg_table *sgt;
struct sg_table *sgt = NULL; int npages;
unsigned int i;
int nents, ret;
/* just return current sgt if already requested. */ /* just return current sgt if already requested. */
if (exynos_attach->dir == dir && exynos_attach->is_mapped) if (exynos_attach->dir == dir && exynos_attach->is_mapped)
return &exynos_attach->sgt; return exynos_attach->sgt;
buf = gem_obj->buffer; buf = gem_obj->buffer;
if (!buf) { if (!buf) {
...@@ -87,42 +86,29 @@ static struct sg_table * ...@@ -87,42 +86,29 @@ static struct sg_table *
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
sgt = &exynos_attach->sgt; npages = buf->size >> PAGE_SHIFT;
ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL);
if (ret) {
DRM_ERROR("failed to alloc sgt.\n");
return ERR_PTR(-ENOMEM);
}
mutex_lock(&dev->struct_mutex);
rd = buf->sgt->sgl; sgt = drm_prime_pages_to_sg(buf->pages, npages);
wr = sgt->sgl; if (IS_ERR(sgt))
for (i = 0; i < sgt->orig_nents; ++i) { goto err;
sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
rd = sg_next(rd);
wr = sg_next(wr);
}
if (dir != DMA_NONE) { if (dir != DMA_NONE) {
nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir); if (!dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir)) {
if (!nents) {
DRM_ERROR("failed to map sgl with iommu.\n"); DRM_ERROR("failed to map sgl with iommu.\n");
sg_free_table(sgt); sg_free_table(sgt);
sgt = ERR_PTR(-EIO); sgt = ERR_PTR(-EIO);
goto err_unlock; goto err;
} }
} }
exynos_attach->is_mapped = true; exynos_attach->is_mapped = true;
exynos_attach->sgt = sgt;
exynos_attach->dir = dir; exynos_attach->dir = dir;
attach->priv = exynos_attach; attach->priv = exynos_attach;
DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size); DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
err_unlock: err:
mutex_unlock(&dev->struct_mutex);
return sgt; return sgt;
} }
...@@ -280,7 +266,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, ...@@ -280,7 +266,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
} }
exynos_gem_obj->buffer = buffer; exynos_gem_obj->buffer = buffer;
buffer->sgt = sgt;
exynos_gem_obj->base.import_attach = attach; exynos_gem_obj->base.import_attach = attach;
DRM_DEBUG_PRIME("dma_addr = %pad, size = 0x%lx\n", &buffer->dma_addr, DRM_DEBUG_PRIME("dma_addr = %pad, size = 0x%lx\n", &buffer->dma_addr,
......
...@@ -455,9 +455,6 @@ void exynos_drm_gem_free_object(struct drm_gem_object *obj) ...@@ -455,9 +455,6 @@ void exynos_drm_gem_free_object(struct drm_gem_object *obj)
exynos_gem_obj = to_exynos_gem_obj(obj); exynos_gem_obj = to_exynos_gem_obj(obj);
buf = exynos_gem_obj->buffer; buf = exynos_gem_obj->buffer;
if (obj->import_attach)
drm_prime_gem_destroy(obj, buf->sgt);
exynos_drm_gem_destroy(to_exynos_gem_obj(obj)); exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
} }
......
...@@ -30,7 +30,6 @@ ...@@ -30,7 +30,6 @@
* device address with IOMMU. * device address with IOMMU.
* @write: whether pages will be written to by the caller. * @write: whether pages will be written to by the caller.
* @pages: Array of backing pages. * @pages: Array of backing pages.
* @sgt: sg table to transfer page data.
* @size: size of allocated memory region. * @size: size of allocated memory region.
* @pfnmap: indicate whether memory region from userptr is mmaped with * @pfnmap: indicate whether memory region from userptr is mmaped with
* VM_PFNMAP or not. * VM_PFNMAP or not.
...@@ -43,7 +42,6 @@ struct exynos_drm_gem_buf { ...@@ -43,7 +42,6 @@ struct exynos_drm_gem_buf {
struct dma_attrs dma_attrs; struct dma_attrs dma_attrs;
unsigned int write; unsigned int write;
struct page **pages; struct page **pages;
struct sg_table *sgt;
unsigned long size; unsigned long size;
bool pfnmap; bool pfnmap;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment