Commit 9cba3b99 authored by Laurent Pinchart's avatar Laurent Pinchart Committed by Tomi Valkeinen

drm/omap: gem: Refactor GEM object allocation

Split the individual steps of GEM object allocation and initialization
clearly. This improves readability and prepares for dma_buf import
support.
Signed-off-by: default avatarLaurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: default avatarTomi Valkeinen <tomi.valkeinen@ti.com>
parent cdb0381d
...@@ -1374,67 +1374,80 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev, ...@@ -1374,67 +1374,80 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
size_t size; size_t size;
int ret; int ret;
/* Validate the flags and compute the memory and cache flags. */
if (flags & OMAP_BO_TILED) { if (flags & OMAP_BO_TILED) {
if (!priv->usergart) { if (!priv->usergart) {
dev_err(dev->dev, "Tiled buffers require DMM\n"); dev_err(dev->dev, "Tiled buffers require DMM\n");
return NULL; return NULL;
} }
/* tiled buffers are always shmem paged backed.. when they are /*
* scanned out, they are remapped into DMM/TILER * Tiled buffers are always shmem paged backed. When they are
* scanned out, they are remapped into DMM/TILER.
*/ */
flags &= ~OMAP_BO_SCANOUT; flags &= ~OMAP_BO_SCANOUT;
flags |= OMAP_BO_MEM_SHMEM;
/* currently don't allow cached buffers.. there is some caching /*
* stuff that needs to be handled better * Currently don't allow cached buffers. There is some caching
* stuff that needs to be handled better.
*/ */
flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED); flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
flags |= tiler_get_cpu_cache_flags(); flags |= tiler_get_cpu_cache_flags();
} else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
/* align dimensions to slot boundaries... */ /*
tiler_align(gem2fmt(flags), * Use contiguous memory if we don't have DMM to remap
&gsize.tiled.width, &gsize.tiled.height); * discontiguous buffers.
*/
/* ...and calculate size based on aligned dimensions */ flags |= OMAP_BO_MEM_DMA_API;
size = tiler_size(gem2fmt(flags), } else if (!(flags & OMAP_BO_MEM_EXT)) {
gsize.tiled.width, gsize.tiled.height); /*
} else { * All other buffers not backed with external memory are
size = PAGE_ALIGN(gsize.bytes); * shmem-backed.
*/
flags |= OMAP_BO_MEM_SHMEM;
} }
/* Allocate the initialize the OMAP GEM object. */
omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL); omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
if (!omap_obj) if (!omap_obj)
return NULL; return NULL;
obj = &omap_obj->base; obj = &omap_obj->base;
omap_obj->flags = flags;
if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) { if (flags & OMAP_BO_TILED) {
/* attempt to allocate contiguous memory if we don't /*
* have DMM for remappign discontiguous buffers * For tiled buffers align dimensions to slot boundaries and
* calculate size based on aligned dimensions.
*/ */
omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size, tiler_align(gem2fmt(flags), &gsize.tiled.width,
&omap_obj->paddr, GFP_KERNEL); &gsize.tiled.height);
if (!omap_obj->vaddr) {
kfree(omap_obj);
return NULL; size = tiler_size(gem2fmt(flags), gsize.tiled.width,
} gsize.tiled.height);
flags |= OMAP_BO_MEM_DMA_API; omap_obj->width = gsize.tiled.width;
omap_obj->height = gsize.tiled.height;
} else {
size = PAGE_ALIGN(gsize.bytes);
} }
spin_lock(&priv->list_lock); spin_lock(&priv->list_lock);
list_add(&omap_obj->mm_list, &priv->obj_list); list_add(&omap_obj->mm_list, &priv->obj_list);
spin_unlock(&priv->list_lock); spin_unlock(&priv->list_lock);
omap_obj->flags = flags; /* Allocate memory if needed. */
if (flags & OMAP_BO_MEM_DMA_API) {
if (flags & OMAP_BO_TILED) { omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size,
omap_obj->width = gsize.tiled.width; &omap_obj->paddr,
omap_obj->height = gsize.tiled.height; GFP_KERNEL);
if (!omap_obj->vaddr)
goto fail;
} }
if (flags & (OMAP_BO_MEM_DMA_API | OMAP_BO_MEM_EXT)) { /* Initialize the GEM object. */
if (!(flags & OMAP_BO_MEM_SHMEM)) {
drm_gem_private_object_init(dev, obj, size); drm_gem_private_object_init(dev, obj, size);
} else { } else {
ret = drm_gem_object_init(dev, obj, size); ret = drm_gem_object_init(dev, obj, size);
...@@ -1443,8 +1456,6 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev, ...@@ -1443,8 +1456,6 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
mapping = file_inode(obj->filp)->i_mapping; mapping = file_inode(obj->filp)->i_mapping;
mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32); mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
omap_obj->flags |= OMAP_BO_MEM_SHMEM;
} }
return obj; return obj;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment