Commit 81aa3f8e authored by Michael J. Ruhl's avatar Michael J. Ruhl Committed by Matthew Auld

drm/i915/dmabuf: dmabuf cleanup

Some minor cleanup of some variables for consistency.

Normalize struct sg_table to sgt.
Normalize struct dma_buf_attachment to attach.
checkpatch issues sizeof(), !NULL updates.

Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: default avatarMichael J. Ruhl <michael.j.ruhl@intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221028155029.494736-3-matthew.auld@intel.com
parent 6427ab57
...@@ -25,43 +25,46 @@ static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf) ...@@ -25,43 +25,46 @@ static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
return to_intel_bo(buf->priv); return to_intel_bo(buf->priv);
} }
static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment, static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf); struct drm_i915_gem_object *obj = dma_buf_to_obj(attach->dmabuf);
struct sg_table *st; struct sg_table *sgt;
struct scatterlist *src, *dst; struct scatterlist *src, *dst;
int ret, i; int ret, i;
/* Copy sg so that we make an independent mapping */ /*
st = kmalloc(sizeof(struct sg_table), GFP_KERNEL); * Make a copy of the object's sgt, so that we can make an independent
if (st == NULL) { * mapping
*/
sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt) {
ret = -ENOMEM; ret = -ENOMEM;
goto err; goto err;
} }
ret = sg_alloc_table(st, obj->mm.pages->orig_nents, GFP_KERNEL); ret = sg_alloc_table(sgt, obj->mm.pages->orig_nents, GFP_KERNEL);
if (ret) if (ret)
goto err_free; goto err_free;
src = obj->mm.pages->sgl; src = obj->mm.pages->sgl;
dst = st->sgl; dst = sgt->sgl;
for (i = 0; i < obj->mm.pages->orig_nents; i++) { for (i = 0; i < obj->mm.pages->orig_nents; i++) {
sg_set_page(dst, sg_page(src), src->length, 0); sg_set_page(dst, sg_page(src), src->length, 0);
dst = sg_next(dst); dst = sg_next(dst);
src = sg_next(src); src = sg_next(src);
} }
ret = dma_map_sgtable(attachment->dev, st, dir, DMA_ATTR_SKIP_CPU_SYNC); ret = dma_map_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC);
if (ret) if (ret)
goto err_free_sg; goto err_free_sg;
return st; return sgt;
err_free_sg: err_free_sg:
sg_free_table(st); sg_free_table(sgt);
err_free: err_free:
kfree(st); kfree(sgt);
err: err:
return ERR_PTR(ret); return ERR_PTR(ret);
} }
...@@ -236,15 +239,15 @@ struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags) ...@@ -236,15 +239,15 @@ struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags)
static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
{ {
struct drm_i915_private *i915 = to_i915(obj->base.dev); struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct sg_table *pages; struct sg_table *sgt;
unsigned int sg_page_sizes; unsigned int sg_page_sizes;
assert_object_held(obj); assert_object_held(obj);
pages = dma_buf_map_attachment(obj->base.import_attach, sgt = dma_buf_map_attachment(obj->base.import_attach,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
if (IS_ERR(pages)) if (IS_ERR(sgt))
return PTR_ERR(pages); return PTR_ERR(sgt);
/* /*
* DG1 is special here since it still snoops transactions even with * DG1 is special here since it still snoops transactions even with
...@@ -261,16 +264,16 @@ static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) ...@@ -261,16 +264,16 @@ static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
(!HAS_LLC(i915) && !IS_DG1(i915))) (!HAS_LLC(i915) && !IS_DG1(i915)))
wbinvd_on_all_cpus(); wbinvd_on_all_cpus();
sg_page_sizes = i915_sg_dma_sizes(pages->sgl); sg_page_sizes = i915_sg_dma_sizes(sgt->sgl);
__i915_gem_object_set_pages(obj, pages, sg_page_sizes); __i915_gem_object_set_pages(obj, sgt, sg_page_sizes);
return 0; return 0;
} }
static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj, static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
struct sg_table *pages) struct sg_table *sgt)
{ {
dma_buf_unmap_attachment(obj->base.import_attach, pages, dma_buf_unmap_attachment(obj->base.import_attach, sgt,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
} }
...@@ -313,7 +316,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, ...@@ -313,7 +316,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
get_dma_buf(dma_buf); get_dma_buf(dma_buf);
obj = i915_gem_object_alloc(); obj = i915_gem_object_alloc();
if (obj == NULL) { if (!obj) {
ret = -ENOMEM; ret = -ENOMEM;
goto fail_detach; goto fail_detach;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment