Commit e1bf29e0 authored by Rob Clark's avatar Rob Clark

drm/msm: drop cache sync hack

Now that it isn't causing problems to use dma_map/unmap, we can drop the
hack of using dma_sync in certain cases.
Signed-off-by: default avatarRob Clark <robdclark@chromium.org>
parent f032b681
...@@ -52,26 +52,16 @@ static void sync_for_device(struct msm_gem_object *msm_obj) ...@@ -52,26 +52,16 @@ static void sync_for_device(struct msm_gem_object *msm_obj)
{ {
struct device *dev = msm_obj->base.dev->dev; struct device *dev = msm_obj->base.dev->dev;
if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) { dma_map_sg(dev, msm_obj->sgt->sgl,
dma_sync_sg_for_device(dev, msm_obj->sgt->sgl, msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
} else {
dma_map_sg(dev, msm_obj->sgt->sgl,
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
}
} }
static void sync_for_cpu(struct msm_gem_object *msm_obj) static void sync_for_cpu(struct msm_gem_object *msm_obj)
{ {
struct device *dev = msm_obj->base.dev->dev; struct device *dev = msm_obj->base.dev->dev;
if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) { dma_unmap_sg(dev, msm_obj->sgt->sgl,
dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl, msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
} else {
dma_unmap_sg(dev, msm_obj->sgt->sgl,
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
}
} }
/* allocate pages from VRAM carveout, used when no IOMMU: */ /* allocate pages from VRAM carveout, used when no IOMMU: */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment