Commit a9a2c823 authored by Sergey Senozhatsky's avatar Sergey Senozhatsky Committed by Mauro Carvalho Chehab

media: videobuf2: don't test db_attach in dma-contig prepare and finish

We moved cache management decision making to the upper layer and
rely on buffer's need_cache_sync flags and videobuf2 core. If the
upper layer (core) has decided to invoke ->prepare() or ->finish()
then we must sync.

For DMABUF ->need_cache_sync_on_prepare and ->need_cache_sync_on_flush
are always false so videobuf core does not call ->prepare() and
->finish() on such buffers.

Additionally, scratch the DMABUF comment.
Signed-off-by: default avatarSergey Senozhatsky <senozhatsky@chromium.org>
Signed-off-by: default avatarHans Verkuil <hverkuil-cisco@xs4all.nl>
Signed-off-by: default avatarMauro Carvalho Chehab <mchehab+huawei@kernel.org>
parent d4db5eb5
...@@ -100,8 +100,7 @@ static void vb2_dc_prepare(void *buf_priv) ...@@ -100,8 +100,7 @@ static void vb2_dc_prepare(void *buf_priv)
struct vb2_dc_buf *buf = buf_priv; struct vb2_dc_buf *buf = buf_priv;
struct sg_table *sgt = buf->dma_sgt; struct sg_table *sgt = buf->dma_sgt;
/* DMABUF exporter will flush the cache for us */ if (!sgt)
if (!sgt || buf->db_attach)
return; return;
dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents, dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
...@@ -113,8 +112,7 @@ static void vb2_dc_finish(void *buf_priv) ...@@ -113,8 +112,7 @@ static void vb2_dc_finish(void *buf_priv)
struct vb2_dc_buf *buf = buf_priv; struct vb2_dc_buf *buf = buf_priv;
struct sg_table *sgt = buf->dma_sgt; struct sg_table *sgt = buf->dma_sgt;
/* DMABUF exporter will flush the cache for us */ if (!sgt)
if (!sgt || buf->db_attach)
return; return;
dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir); dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
......
...@@ -204,10 +204,6 @@ static void vb2_dma_sg_prepare(void *buf_priv) ...@@ -204,10 +204,6 @@ static void vb2_dma_sg_prepare(void *buf_priv)
struct vb2_dma_sg_buf *buf = buf_priv; struct vb2_dma_sg_buf *buf = buf_priv;
struct sg_table *sgt = buf->dma_sgt; struct sg_table *sgt = buf->dma_sgt;
/* DMABUF exporter will flush the cache for us */
if (buf->db_attach)
return;
dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents, dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
buf->dma_dir); buf->dma_dir);
} }
...@@ -217,10 +213,6 @@ static void vb2_dma_sg_finish(void *buf_priv) ...@@ -217,10 +213,6 @@ static void vb2_dma_sg_finish(void *buf_priv)
struct vb2_dma_sg_buf *buf = buf_priv; struct vb2_dma_sg_buf *buf = buf_priv;
struct sg_table *sgt = buf->dma_sgt; struct sg_table *sgt = buf->dma_sgt;
/* DMABUF exporter will flush the cache for us */
if (buf->db_attach)
return;
dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir); dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment