Commit 1948d28d authored by Ivaylo Dimitrov's avatar Ivaylo Dimitrov Committed by Tomi Valkeinen

drm: omapdrm: Support exporting of non-contiguous GEM BOs

Currently code allocates non-scanout BOs from SHMEM and those objects are
accessible to userspace by mmap(). However, on devices with no DMM (like
OMAP3), the same objects are not accessible by kernel drivers that want to
render to them as code refuses to export them. In turn this means that on
devices with no DMM, all buffers must be allocated as scanout, otherwise
only CPU can access them. On those devices, scanout buffers are allocated
from CMA, making those allocations highly unreliable.

Fix that by implementing functionality to export SHMEM backed buffers on
devices with no DMM. This makes CMA memory only being used when needed,
instead for every buffer that has to be off-CPU rendered.

Tested on Motorola Droid4 and Nokia N900
Signed-off-by: default avatarIvaylo Dimitrov <ivo.g.dimitrov.75@gmail.com>
Signed-off-by: default avatarTomi Valkeinen <tomi.valkeinen@ideasonboard.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1642587791-13222-3-git-send-email-ivo.g.dimitrov.75@gmail.com
parent 86ad0397
...@@ -38,7 +38,7 @@ struct omap_gem_object { ...@@ -38,7 +38,7 @@ struct omap_gem_object {
/** roll applied when mapping to DMM */ /** roll applied when mapping to DMM */
u32 roll; u32 roll;
/** protects dma_addr_cnt, block, pages, dma_addrs and vaddr */ /** protects pin_cnt, block, pages, dma_addrs and vaddr */
struct mutex lock; struct mutex lock;
/** /**
...@@ -50,24 +50,24 @@ struct omap_gem_object { ...@@ -50,24 +50,24 @@ struct omap_gem_object {
* - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set) * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
* if they are physically contiguous (when sgt->orig_nents == 1) * if they are physically contiguous (when sgt->orig_nents == 1)
* *
* - buffers mapped through the TILER when dma_addr_cnt is not zero, in * - buffers mapped through the TILER when pin_cnt is not zero, in which
* which case the DMA address points to the TILER aperture * case the DMA address points to the TILER aperture
* *
* Physically contiguous buffers have their DMA address equal to the * Physically contiguous buffers have their DMA address equal to the
* physical address as we don't remap those buffers through the TILER. * physical address as we don't remap those buffers through the TILER.
* *
* Buffers mapped to the TILER have their DMA address pointing to the * Buffers mapped to the TILER have their DMA address pointing to the
* TILER aperture. As TILER mappings are refcounted (through * TILER aperture. As TILER mappings are refcounted (through pin_cnt)
* dma_addr_cnt) the DMA address must be accessed through omap_gem_pin() * the DMA address must be accessed through omap_gem_pin() to ensure
* to ensure that the mapping won't disappear unexpectedly. References * that the mapping won't disappear unexpectedly. References must be
* must be released with omap_gem_unpin(). * released with omap_gem_unpin().
*/ */
dma_addr_t dma_addr; dma_addr_t dma_addr;
/** /**
* # of users of dma_addr * # of users
*/ */
refcount_t dma_addr_cnt; refcount_t pin_cnt;
/** /**
* If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
...@@ -812,31 +812,27 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr) ...@@ -812,31 +812,27 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
mutex_lock(&omap_obj->lock); mutex_lock(&omap_obj->lock);
if (!omap_gem_is_contiguous(omap_obj) && priv->has_dmm) { if (!omap_gem_is_contiguous(omap_obj)) {
if (refcount_read(&omap_obj->dma_addr_cnt) == 0) { if (refcount_read(&omap_obj->pin_cnt) == 0) {
refcount_set(&omap_obj->dma_addr_cnt, 1); refcount_set(&omap_obj->pin_cnt, 1);
ret = omap_gem_attach_pages(obj); ret = omap_gem_attach_pages(obj);
if (ret) if (ret)
goto fail; goto fail;
if (priv->has_dmm) {
ret = omap_gem_pin_tiler(obj); ret = omap_gem_pin_tiler(obj);
if (ret) if (ret)
goto fail; goto fail;
}
} else { } else {
refcount_inc(&omap_obj->dma_addr_cnt); refcount_inc(&omap_obj->pin_cnt);
}
} }
if (dma_addr) if (dma_addr)
*dma_addr = omap_obj->dma_addr; *dma_addr = omap_obj->dma_addr;
} else if (omap_gem_is_contiguous(omap_obj)) {
if (dma_addr)
*dma_addr = omap_obj->dma_addr;
} else {
ret = -EINVAL;
goto fail;
}
fail: fail:
mutex_unlock(&omap_obj->lock); mutex_unlock(&omap_obj->lock);
...@@ -856,15 +852,16 @@ static void omap_gem_unpin_locked(struct drm_gem_object *obj) ...@@ -856,15 +852,16 @@ static void omap_gem_unpin_locked(struct drm_gem_object *obj)
struct omap_gem_object *omap_obj = to_omap_bo(obj); struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret; int ret;
if (omap_gem_is_contiguous(omap_obj) || !priv->has_dmm) if (omap_gem_is_contiguous(omap_obj))
return; return;
if (refcount_dec_and_test(&omap_obj->dma_addr_cnt)) { if (refcount_dec_and_test(&omap_obj->pin_cnt)) {
if (omap_obj->sgt) { if (omap_obj->sgt) {
sg_free_table(omap_obj->sgt); sg_free_table(omap_obj->sgt);
kfree(omap_obj->sgt); kfree(omap_obj->sgt);
omap_obj->sgt = NULL; omap_obj->sgt = NULL;
} }
if (priv->has_dmm) {
ret = tiler_unpin(omap_obj->block); ret = tiler_unpin(omap_obj->block);
if (ret) { if (ret) {
dev_err(obj->dev->dev, dev_err(obj->dev->dev,
...@@ -878,6 +875,7 @@ static void omap_gem_unpin_locked(struct drm_gem_object *obj) ...@@ -878,6 +875,7 @@ static void omap_gem_unpin_locked(struct drm_gem_object *obj)
omap_obj->dma_addr = 0; omap_obj->dma_addr = 0;
omap_obj->block = NULL; omap_obj->block = NULL;
} }
}
} }
/** /**
...@@ -909,7 +907,7 @@ int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient, ...@@ -909,7 +907,7 @@ int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
mutex_lock(&omap_obj->lock); mutex_lock(&omap_obj->lock);
if ((refcount_read(&omap_obj->dma_addr_cnt) > 0) && omap_obj->block && if ((refcount_read(&omap_obj->pin_cnt) > 0) && omap_obj->block &&
(omap_obj->flags & OMAP_BO_TILED_MASK)) { (omap_obj->flags & OMAP_BO_TILED_MASK)) {
*dma_addr = tiler_tsptr(omap_obj->block, orient, x, y); *dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
ret = 0; ret = 0;
...@@ -977,7 +975,8 @@ int omap_gem_put_pages(struct drm_gem_object *obj) ...@@ -977,7 +975,8 @@ int omap_gem_put_pages(struct drm_gem_object *obj)
return 0; return 0;
} }
struct sg_table *omap_gem_get_sg(struct drm_gem_object *obj) struct sg_table *omap_gem_get_sg(struct drm_gem_object *obj,
enum dma_data_direction dir)
{ {
struct omap_gem_object *omap_obj = to_omap_bo(obj); struct omap_gem_object *omap_obj = to_omap_bo(obj);
dma_addr_t addr; dma_addr_t addr;
...@@ -1002,6 +1001,7 @@ struct sg_table *omap_gem_get_sg(struct drm_gem_object *obj) ...@@ -1002,6 +1001,7 @@ struct sg_table *omap_gem_get_sg(struct drm_gem_object *obj)
goto err_unpin; goto err_unpin;
} }
if (addr) {
if (omap_obj->flags & OMAP_BO_TILED_MASK) { if (omap_obj->flags & OMAP_BO_TILED_MASK) {
enum tiler_fmt fmt = gem2fmt(omap_obj->flags); enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
...@@ -1013,18 +1013,33 @@ struct sg_table *omap_gem_get_sg(struct drm_gem_object *obj) ...@@ -1013,18 +1013,33 @@ struct sg_table *omap_gem_get_sg(struct drm_gem_object *obj)
count = 1; count = 1;
stride = 0; stride = 0;
} }
} else {
count = obj->size >> PAGE_SHIFT;
}
ret = sg_alloc_table(sgt, count, GFP_KERNEL); ret = sg_alloc_table(sgt, count, GFP_KERNEL);
if (ret) if (ret)
goto err_free; goto err_free;
/* this must be after omap_gem_pin() to ensure we have pages attached */
omap_gem_dma_sync_buffer(obj, dir);
if (addr) {
for_each_sg(sgt->sgl, sg, count, i) { for_each_sg(sgt->sgl, sg, count, i) {
sg_set_page(sg, phys_to_page(addr), len, offset_in_page(addr)); sg_set_page(sg, phys_to_page(addr), len,
offset_in_page(addr));
sg_dma_address(sg) = addr; sg_dma_address(sg) = addr;
sg_dma_len(sg) = len; sg_dma_len(sg) = len;
addr += stride; addr += stride;
} }
} else {
for_each_sg(sgt->sgl, sg, count, i) {
sg_set_page(sg, omap_obj->pages[i], PAGE_SIZE, 0);
sg_dma_address(sg) = omap_obj->dma_addrs[i];
sg_dma_len(sg) = PAGE_SIZE;
}
}
omap_obj->sgt = sgt; omap_obj->sgt = sgt;
out: out:
...@@ -1133,7 +1148,7 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m) ...@@ -1133,7 +1148,7 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d", seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
omap_obj->flags, obj->name, kref_read(&obj->refcount), omap_obj->flags, obj->name, kref_read(&obj->refcount),
off, &omap_obj->dma_addr, off, &omap_obj->dma_addr,
refcount_read(&omap_obj->dma_addr_cnt), refcount_read(&omap_obj->pin_cnt),
omap_obj->vaddr, omap_obj->roll); omap_obj->vaddr, omap_obj->roll);
if (omap_obj->flags & OMAP_BO_TILED_MASK) { if (omap_obj->flags & OMAP_BO_TILED_MASK) {
...@@ -1196,7 +1211,7 @@ static void omap_gem_free_object(struct drm_gem_object *obj) ...@@ -1196,7 +1211,7 @@ static void omap_gem_free_object(struct drm_gem_object *obj)
mutex_lock(&omap_obj->lock); mutex_lock(&omap_obj->lock);
/* The object should not be pinned. */ /* The object should not be pinned. */
WARN_ON(refcount_read(&omap_obj->dma_addr_cnt) > 0); WARN_ON(refcount_read(&omap_obj->pin_cnt) > 0);
if (omap_obj->pages) { if (omap_obj->pages) {
if (omap_obj->flags & OMAP_BO_MEM_DMABUF) if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
......
...@@ -82,7 +82,8 @@ u32 omap_gem_flags(struct drm_gem_object *obj); ...@@ -82,7 +82,8 @@ u32 omap_gem_flags(struct drm_gem_object *obj);
int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient, int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
int x, int y, dma_addr_t *dma_addr); int x, int y, dma_addr_t *dma_addr);
int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient); int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient);
struct sg_table *omap_gem_get_sg(struct drm_gem_object *obj); struct sg_table *omap_gem_get_sg(struct drm_gem_object *obj,
enum dma_data_direction dir);
void omap_gem_put_sg(struct drm_gem_object *obj, struct sg_table *sgt); void omap_gem_put_sg(struct drm_gem_object *obj, struct sg_table *sgt);
#endif /* __OMAPDRM_GEM_H__ */ #endif /* __OMAPDRM_GEM_H__ */
...@@ -23,13 +23,10 @@ static struct sg_table *omap_gem_map_dma_buf( ...@@ -23,13 +23,10 @@ static struct sg_table *omap_gem_map_dma_buf(
{ {
struct drm_gem_object *obj = attachment->dmabuf->priv; struct drm_gem_object *obj = attachment->dmabuf->priv;
struct sg_table *sg; struct sg_table *sg;
sg = omap_gem_get_sg(obj); sg = omap_gem_get_sg(obj, dir);
if (IS_ERR(sg)) if (IS_ERR(sg))
return sg; return sg;
/* this must be after omap_gem_pin() to ensure we have pages attached */
omap_gem_dma_sync_buffer(obj, dir);
return sg; return sg;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment