Commit c11b8989 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'omapdrm-4.5-resolved' of...

Merge tag 'omapdrm-4.5-resolved' of git://git.kernel.org/pub/scm/linux/kernel/git/tomba/linux into drm-next

omapdrm changes for v4.5

* enable DRIVER_ATOMIC
* improved TILER performance
* cleanups preparing for DMAbuf import
* fbdev emulation is now optional
* minor fixes

* tag 'omapdrm-4.5-resolved' of git://git.kernel.org/pub/scm/linux/kernel/git/tomba/linux:
  drm/omap: remove obsolete manager assignment
  drm/omap: set DRIVER_ATOMIC for omapdrm
  drm/omap: remove unused plugin defines
  drm/omap: Use bitmaps for TILER placement
  drm: omapdrm: gem: Remove check for impossible condition
  drm: omapdrm: gem: Simplify error handling when creating GEM object
  drm: omapdrm: gem: Don't free mmap offset twice
  drm: omapdrm: gem: Fix GEM object destroy in error path
  drm: omapdrm: gem: Free the correct memory object
  drm: omapdrm: gem: Mask out private flags passed from userspace
  drm: omapdrm: gem: Move global usergart variable to omap_drm_private
  drm: omapdrm: gem: Group functions by purpose
  drm: omapdrm: gem: Remove forward declarations
  drm: omapdrm: gem: Remove unused function prototypes
  drm: omapdrm: Make fbdev emulation optional
  drm: omapdrm: Fix plane state free in plane reset handler
  drm: omapdrm: move omap_plane_reset()
  drm/omap: Use platform_register/unregister_drivers()
  drm: omapdrm: tiler: Remove unneded module alias for tiler
parents 54255e81 9c8e566e
...@@ -12,10 +12,11 @@ omapdrm-y := omap_drv.o \ ...@@ -12,10 +12,11 @@ omapdrm-y := omap_drv.o \
omap_encoder.o \ omap_encoder.o \
omap_connector.o \ omap_connector.o \
omap_fb.o \ omap_fb.o \
omap_fbdev.o \
omap_gem.o \ omap_gem.o \
omap_gem_dmabuf.o \ omap_gem_dmabuf.o \
omap_dmm_tiler.o \ omap_dmm_tiler.o \
tcm-sita.o tcm-sita.o
omapdrm-$(CONFIG_DRM_FBDEV_EMULATION) += omap_fbdev.o
obj-$(CONFIG_DRM_OMAP) += omapdrm.o obj-$(CONFIG_DRM_OMAP) += omapdrm.o
...@@ -51,6 +51,7 @@ static int mm_show(struct seq_file *m, void *arg) ...@@ -51,6 +51,7 @@ static int mm_show(struct seq_file *m, void *arg)
return drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm); return drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm);
} }
#ifdef CONFIG_DRM_FBDEV_EMULATION
static int fb_show(struct seq_file *m, void *arg) static int fb_show(struct seq_file *m, void *arg)
{ {
struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_info_node *node = (struct drm_info_node *) m->private;
...@@ -73,12 +74,15 @@ static int fb_show(struct seq_file *m, void *arg) ...@@ -73,12 +74,15 @@ static int fb_show(struct seq_file *m, void *arg)
return 0; return 0;
} }
#endif
/* list of debufs files that are applicable to all devices */ /* list of debufs files that are applicable to all devices */
static struct drm_info_list omap_debugfs_list[] = { static struct drm_info_list omap_debugfs_list[] = {
{"gem", gem_show, 0}, {"gem", gem_show, 0},
{"mm", mm_show, 0}, {"mm", mm_show, 0},
#ifdef CONFIG_DRM_FBDEV_EMULATION
{"fb", fb_show, 0}, {"fb", fb_show, 0},
#endif
}; };
/* list of debugfs files that are specific to devices with dmm/tiler */ /* list of debugfs files that are specific to devices with dmm/tiler */
......
...@@ -363,6 +363,7 @@ struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w, ...@@ -363,6 +363,7 @@ struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w,
u32 min_align = 128; u32 min_align = 128;
int ret; int ret;
unsigned long flags; unsigned long flags;
size_t slot_bytes;
BUG_ON(!validfmt(fmt)); BUG_ON(!validfmt(fmt));
...@@ -371,13 +372,15 @@ struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w, ...@@ -371,13 +372,15 @@ struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w,
h = DIV_ROUND_UP(h, geom[fmt].slot_h); h = DIV_ROUND_UP(h, geom[fmt].slot_h);
/* convert alignment to slots */ /* convert alignment to slots */
min_align = max(min_align, (geom[fmt].slot_w * geom[fmt].cpp)); slot_bytes = geom[fmt].slot_w * geom[fmt].cpp;
align = ALIGN(align, min_align); min_align = max(min_align, slot_bytes);
align /= geom[fmt].slot_w * geom[fmt].cpp; align = (align > min_align) ? ALIGN(align, min_align) : min_align;
align /= slot_bytes;
block->fmt = fmt; block->fmt = fmt;
ret = tcm_reserve_2d(containers[fmt], w, h, align, &block->area); ret = tcm_reserve_2d(containers[fmt], w, h, align, -1, slot_bytes,
&block->area);
if (ret) { if (ret) {
kfree(block); kfree(block);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -739,8 +742,7 @@ static int omap_dmm_probe(struct platform_device *dev) ...@@ -739,8 +742,7 @@ static int omap_dmm_probe(struct platform_device *dev)
programming during reill operations */ programming during reill operations */
for (i = 0; i < omap_dmm->num_lut; i++) { for (i = 0; i < omap_dmm->num_lut; i++) {
omap_dmm->tcm[i] = sita_init(omap_dmm->container_width, omap_dmm->tcm[i] = sita_init(omap_dmm->container_width,
omap_dmm->container_height, omap_dmm->container_height);
NULL);
if (!omap_dmm->tcm[i]) { if (!omap_dmm->tcm[i]) {
dev_err(&dev->dev, "failed to allocate container\n"); dev_err(&dev->dev, "failed to allocate container\n");
...@@ -1030,4 +1032,3 @@ struct platform_driver omap_dmm_driver = { ...@@ -1030,4 +1032,3 @@ struct platform_driver omap_dmm_driver = {
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Andy Gross <andy.gross@ti.com>"); MODULE_AUTHOR("Andy Gross <andy.gross@ti.com>");
MODULE_DESCRIPTION("OMAP DMM/Tiler Driver"); MODULE_DESCRIPTION("OMAP DMM/Tiler Driver");
MODULE_ALIAS("platform:" DMM_DRIVER_NAME);
...@@ -547,14 +547,19 @@ static int ioctl_set_param(struct drm_device *dev, void *data, ...@@ -547,14 +547,19 @@ static int ioctl_set_param(struct drm_device *dev, void *data,
return 0; return 0;
} }
#define OMAP_BO_USER_MASK 0x00ffffff /* flags settable by userspace */
static int ioctl_gem_new(struct drm_device *dev, void *data, static int ioctl_gem_new(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
struct drm_omap_gem_new *args = data; struct drm_omap_gem_new *args = data;
u32 flags = args->flags & OMAP_BO_USER_MASK;
VERB("%p:%p: size=0x%08x, flags=%08x", dev, file_priv, VERB("%p:%p: size=0x%08x, flags=%08x", dev, file_priv,
args->size.bytes, args->flags); args->size.bytes, flags);
return omap_gem_new_handle(dev, file_priv, args->size,
args->flags, &args->handle); return omap_gem_new_handle(dev, file_priv, args->size, flags,
&args->handle);
} }
static int ioctl_gem_cpu_prep(struct drm_device *dev, void *data, static int ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
...@@ -692,10 +697,6 @@ static int dev_load(struct drm_device *dev, unsigned long flags) ...@@ -692,10 +697,6 @@ static int dev_load(struct drm_device *dev, unsigned long flags)
drm_crtc_vblank_off(priv->crtcs[i]); drm_crtc_vblank_off(priv->crtcs[i]);
priv->fbdev = omap_fbdev_init(dev); priv->fbdev = omap_fbdev_init(dev);
if (!priv->fbdev) {
dev_warn(dev->dev, "omap_fbdev_init failed\n");
/* well, limp along without an fbdev.. maybe X11 will work? */
}
/* store off drm_device for use in pm ops */ /* store off drm_device for use in pm ops */
dev_set_drvdata(dev->dev, dev); dev_set_drvdata(dev->dev, dev);
...@@ -831,7 +832,8 @@ static const struct file_operations omapdriver_fops = { ...@@ -831,7 +832,8 @@ static const struct file_operations omapdriver_fops = {
}; };
static struct drm_driver omap_drm_driver = { static struct drm_driver omap_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME, .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
DRIVER_ATOMIC,
.load = dev_load, .load = dev_load,
.unload = dev_unload, .unload = dev_unload,
.open = dev_open, .open = dev_open,
...@@ -928,35 +930,23 @@ static struct platform_driver pdev = { ...@@ -928,35 +930,23 @@ static struct platform_driver pdev = {
.remove = pdev_remove, .remove = pdev_remove,
}; };
static struct platform_driver * const drivers[] = {
&omap_dmm_driver,
&pdev,
};
static int __init omap_drm_init(void) static int __init omap_drm_init(void)
{ {
int r;
DBG("init"); DBG("init");
r = platform_driver_register(&omap_dmm_driver); return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
if (r) {
pr_err("DMM driver registration failed\n");
return r;
}
r = platform_driver_register(&pdev);
if (r) {
pr_err("omapdrm driver registration failed\n");
platform_driver_unregister(&omap_dmm_driver);
return r;
}
return 0;
} }
static void __exit omap_drm_fini(void) static void __exit omap_drm_fini(void)
{ {
DBG("fini"); DBG("fini");
platform_driver_unregister(&pdev); platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
platform_driver_unregister(&omap_dmm_driver);
} }
/* need late_initcall() so we load after dss_driver's are loaded */ /* need late_initcall() so we load after dss_driver's are loaded */
......
...@@ -36,11 +36,7 @@ ...@@ -36,11 +36,7 @@
#define MODULE_NAME "omapdrm" #define MODULE_NAME "omapdrm"
/* max # of mapper-id's that can be assigned.. todo, come up with a better struct omap_drm_usergart;
* (but still inexpensive) way to store/access per-buffer mapper private
* data..
*/
#define MAX_MAPPERS 2
/* parameters which describe (unrotated) coordinates of scanout within a fb: */ /* parameters which describe (unrotated) coordinates of scanout within a fb: */
struct omap_drm_window { struct omap_drm_window {
...@@ -97,6 +93,7 @@ struct omap_drm_private { ...@@ -97,6 +93,7 @@ struct omap_drm_private {
/* list of GEM objects: */ /* list of GEM objects: */
struct list_head obj_list; struct list_head obj_list;
struct omap_drm_usergart *usergart;
bool has_dmm; bool has_dmm;
/* properties: */ /* properties: */
...@@ -138,8 +135,18 @@ void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq); ...@@ -138,8 +135,18 @@ void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq);
void omap_drm_irq_uninstall(struct drm_device *dev); void omap_drm_irq_uninstall(struct drm_device *dev);
int omap_drm_irq_install(struct drm_device *dev); int omap_drm_irq_install(struct drm_device *dev);
#ifdef CONFIG_DRM_FBDEV_EMULATION
struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev); struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev);
void omap_fbdev_free(struct drm_device *dev); void omap_fbdev_free(struct drm_device *dev);
#else
static inline struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
{
return NULL;
}
static inline void omap_fbdev_free(struct drm_device *dev)
{
}
#endif
struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc); struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc);
enum omap_channel omap_crtc_channel(struct drm_crtc *crtc); enum omap_channel omap_crtc_channel(struct drm_crtc *crtc);
......
...@@ -110,8 +110,6 @@ static int omap_encoder_update(struct drm_encoder *encoder, ...@@ -110,8 +110,6 @@ static int omap_encoder_update(struct drm_encoder *encoder,
struct omap_dss_driver *dssdrv = dssdev->driver; struct omap_dss_driver *dssdrv = dssdev->driver;
int ret; int ret;
dssdev->src->manager = omap_dss_get_overlay_manager(channel);
if (dssdrv->check_timings) { if (dssdrv->check_timings) {
ret = dssdrv->check_timings(dssdev, timings); ret = dssdrv->check_timings(dssdev, timings);
} else { } else {
......
...@@ -295,6 +295,10 @@ struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev) ...@@ -295,6 +295,10 @@ struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
drm_fb_helper_fini(helper); drm_fb_helper_fini(helper);
fail: fail:
kfree(fbdev); kfree(fbdev);
dev_warn(dev->dev, "omap_fbdev_init failed\n");
/* well, limp along without an fbdev.. maybe X11 will work? */
return NULL; return NULL;
} }
......
...@@ -25,24 +25,15 @@ ...@@ -25,24 +25,15 @@
#include "omap_drv.h" #include "omap_drv.h"
#include "omap_dmm_tiler.h" #include "omap_dmm_tiler.h"
/* remove these once drm core helpers are merged */
struct page **_drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
bool dirty, bool accessed);
int _drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
/* /*
* GEM buffer object implementation. * GEM buffer object implementation.
*/ */
#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
/* note: we use upper 8 bits of flags for driver-internal flags: */ /* note: we use upper 8 bits of flags for driver-internal flags: */
#define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */ #define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */
#define OMAP_BO_EXT_SYNC 0x02000000 /* externally allocated sync object */ #define OMAP_BO_EXT_SYNC 0x02000000 /* externally allocated sync object */
#define OMAP_BO_EXT_MEM 0x04000000 /* externally allocated memory */ #define OMAP_BO_EXT_MEM 0x04000000 /* externally allocated memory */
struct omap_gem_object { struct omap_gem_object {
struct drm_gem_object base; struct drm_gem_object base;
...@@ -119,8 +110,7 @@ struct omap_gem_object { ...@@ -119,8 +110,7 @@ struct omap_gem_object {
} *sync; } *sync;
}; };
static int get_pages(struct drm_gem_object *obj, struct page ***pages); #define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
static uint64_t mmap_offset(struct drm_gem_object *obj);
/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
* not necessarily pinned in TILER all the time, and (b) when they are * not necessarily pinned in TILER all the time, and (b) when they are
...@@ -134,27 +124,69 @@ static uint64_t mmap_offset(struct drm_gem_object *obj); ...@@ -134,27 +124,69 @@ static uint64_t mmap_offset(struct drm_gem_object *obj);
* for later.. * for later..
*/ */
#define NUM_USERGART_ENTRIES 2 #define NUM_USERGART_ENTRIES 2
struct usergart_entry { struct omap_drm_usergart_entry {
struct tiler_block *block; /* the reserved tiler block */ struct tiler_block *block; /* the reserved tiler block */
dma_addr_t paddr; dma_addr_t paddr;
struct drm_gem_object *obj; /* the current pinned obj */ struct drm_gem_object *obj; /* the current pinned obj */
pgoff_t obj_pgoff; /* page offset of obj currently pgoff_t obj_pgoff; /* page offset of obj currently
mapped in */ mapped in */
}; };
static struct {
struct usergart_entry entry[NUM_USERGART_ENTRIES]; struct omap_drm_usergart {
struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES];
int height; /* height in rows */ int height; /* height in rows */
int height_shift; /* ilog2(height in rows) */ int height_shift; /* ilog2(height in rows) */
int slot_shift; /* ilog2(width per slot) */ int slot_shift; /* ilog2(width per slot) */
int stride_pfn; /* stride in pages */ int stride_pfn; /* stride in pages */
int last; /* index of last used entry */ int last; /* index of last used entry */
} *usergart; };
/* -----------------------------------------------------------------------------
* Helpers
*/
/** get mmap offset */
static uint64_t mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
int ret;
size_t size;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
/* Make it mmapable */
size = omap_gem_mmap_size(obj);
ret = drm_gem_create_mmap_offset_size(obj, size);
if (ret) {
dev_err(dev->dev, "could not allocate mmap offset\n");
return 0;
}
return drm_vma_node_offset_addr(&obj->vma_node);
}
/* GEM objects can either be allocated from contiguous memory (in which
* case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non
* contiguous buffers can be remapped in TILER/DMM if they need to be
* contiguous... but we don't do this all the time to reduce pressure
* on TILER/DMM space when we know at allocation time that the buffer
* will need to be scanned out.
*/
static inline bool is_shmem(struct drm_gem_object *obj)
{
return obj->filp != NULL;
}
/* -----------------------------------------------------------------------------
* Eviction
*/
static void evict_entry(struct drm_gem_object *obj, static void evict_entry(struct drm_gem_object *obj,
enum tiler_fmt fmt, struct usergart_entry *entry) enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
{ {
struct omap_gem_object *omap_obj = to_omap_bo(obj); struct omap_gem_object *omap_obj = to_omap_bo(obj);
int n = usergart[fmt].height; struct omap_drm_private *priv = obj->dev->dev_private;
int n = priv->usergart[fmt].height;
size_t size = PAGE_SIZE * n; size_t size = PAGE_SIZE * n;
loff_t off = mmap_offset(obj) + loff_t off = mmap_offset(obj) +
(entry->obj_pgoff << PAGE_SHIFT); (entry->obj_pgoff << PAGE_SHIFT);
...@@ -180,46 +212,25 @@ static void evict_entry(struct drm_gem_object *obj, ...@@ -180,46 +212,25 @@ static void evict_entry(struct drm_gem_object *obj,
static void evict(struct drm_gem_object *obj) static void evict(struct drm_gem_object *obj)
{ {
struct omap_gem_object *omap_obj = to_omap_bo(obj); struct omap_gem_object *omap_obj = to_omap_bo(obj);
struct omap_drm_private *priv = obj->dev->dev_private;
if (omap_obj->flags & OMAP_BO_TILED) { if (omap_obj->flags & OMAP_BO_TILED) {
enum tiler_fmt fmt = gem2fmt(omap_obj->flags); enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
int i; int i;
if (!usergart)
return;
for (i = 0; i < NUM_USERGART_ENTRIES; i++) { for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
struct usergart_entry *entry = &usergart[fmt].entry[i]; struct omap_drm_usergart_entry *entry =
&priv->usergart[fmt].entry[i];
if (entry->obj == obj) if (entry->obj == obj)
evict_entry(obj, fmt, entry); evict_entry(obj, fmt, entry);
} }
} }
} }
/* GEM objects can either be allocated from contiguous memory (in which /* -----------------------------------------------------------------------------
* case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non * Page Management
* contiguous buffers can be remapped in TILER/DMM if they need to be
* contiguous... but we don't do this all the time to reduce pressure
* on TILER/DMM space when we know at allocation time that the buffer
* will need to be scanned out.
*/
static inline bool is_shmem(struct drm_gem_object *obj)
{
return obj->filp != NULL;
}
/**
* shmem buffers that are mapped cached can simulate coherency via using
* page faulting to keep track of dirty pages
*/ */
static inline bool is_cached_coherent(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
return is_shmem(obj) &&
((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
}
static DEFINE_SPINLOCK(sync_lock);
/** ensure backing pages are allocated */ /** ensure backing pages are allocated */
static int omap_gem_attach_pages(struct drm_gem_object *obj) static int omap_gem_attach_pages(struct drm_gem_object *obj)
...@@ -272,6 +283,28 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj) ...@@ -272,6 +283,28 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
return ret; return ret;
} }
/* acquire pages when needed (for example, for DMA where physically
* contiguous buffer is not required
*/
static int get_pages(struct drm_gem_object *obj, struct page ***pages)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret = 0;
if (is_shmem(obj) && !omap_obj->pages) {
ret = omap_gem_attach_pages(obj);
if (ret) {
dev_err(obj->dev->dev, "could not attach pages\n");
return ret;
}
}
/* TODO: even phys-contig.. we should have a list of pages? */
*pages = omap_obj->pages;
return 0;
}
/** release backing pages */ /** release backing pages */
static void omap_gem_detach_pages(struct drm_gem_object *obj) static void omap_gem_detach_pages(struct drm_gem_object *obj)
{ {
...@@ -301,26 +334,6 @@ uint32_t omap_gem_flags(struct drm_gem_object *obj) ...@@ -301,26 +334,6 @@ uint32_t omap_gem_flags(struct drm_gem_object *obj)
return to_omap_bo(obj)->flags; return to_omap_bo(obj)->flags;
} }
/** get mmap offset */
static uint64_t mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
int ret;
size_t size;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
/* Make it mmapable */
size = omap_gem_mmap_size(obj);
ret = drm_gem_create_mmap_offset_size(obj, size);
if (ret) {
dev_err(dev->dev, "could not allocate mmap offset\n");
return 0;
}
return drm_vma_node_offset_addr(&obj->vma_node);
}
uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj) uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
{ {
uint64_t offset; uint64_t offset;
...@@ -362,6 +375,10 @@ int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h) ...@@ -362,6 +375,10 @@ int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h)
return -EINVAL; return -EINVAL;
} }
/* -----------------------------------------------------------------------------
* Fault Handling
*/
/* Normal handling for the case of faulting in non-tiled buffers */ /* Normal handling for the case of faulting in non-tiled buffers */
static int fault_1d(struct drm_gem_object *obj, static int fault_1d(struct drm_gem_object *obj,
struct vm_area_struct *vma, struct vm_fault *vmf) struct vm_area_struct *vma, struct vm_fault *vmf)
...@@ -393,7 +410,8 @@ static int fault_2d(struct drm_gem_object *obj, ...@@ -393,7 +410,8 @@ static int fault_2d(struct drm_gem_object *obj,
struct vm_area_struct *vma, struct vm_fault *vmf) struct vm_area_struct *vma, struct vm_fault *vmf)
{ {
struct omap_gem_object *omap_obj = to_omap_bo(obj); struct omap_gem_object *omap_obj = to_omap_bo(obj);
struct usergart_entry *entry; struct omap_drm_private *priv = obj->dev->dev_private;
struct omap_drm_usergart_entry *entry;
enum tiler_fmt fmt = gem2fmt(omap_obj->flags); enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
struct page *pages[64]; /* XXX is this too much to have on stack? */ struct page *pages[64]; /* XXX is this too much to have on stack? */
unsigned long pfn; unsigned long pfn;
...@@ -406,8 +424,8 @@ static int fault_2d(struct drm_gem_object *obj, ...@@ -406,8 +424,8 @@ static int fault_2d(struct drm_gem_object *obj,
* that need to be mapped in to fill 4kb wide CPU page. If the slot * that need to be mapped in to fill 4kb wide CPU page. If the slot
* height is 64, then 64 pages fill a 4kb wide by 64 row region. * height is 64, then 64 pages fill a 4kb wide by 64 row region.
*/ */
const int n = usergart[fmt].height; const int n = priv->usergart[fmt].height;
const int n_shift = usergart[fmt].height_shift; const int n_shift = priv->usergart[fmt].height_shift;
/* /*
* If buffer width in bytes > PAGE_SIZE then the virtual stride is * If buffer width in bytes > PAGE_SIZE then the virtual stride is
...@@ -428,11 +446,11 @@ static int fault_2d(struct drm_gem_object *obj, ...@@ -428,11 +446,11 @@ static int fault_2d(struct drm_gem_object *obj,
base_pgoff = round_down(pgoff, m << n_shift); base_pgoff = round_down(pgoff, m << n_shift);
/* figure out buffer width in slots */ /* figure out buffer width in slots */
slots = omap_obj->width >> usergart[fmt].slot_shift; slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT); vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
entry = &usergart[fmt].entry[usergart[fmt].last]; entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
/* evict previous buffer using this usergart entry, if any: */ /* evict previous buffer using this usergart entry, if any: */
if (entry->obj) if (entry->obj)
...@@ -479,12 +497,13 @@ static int fault_2d(struct drm_gem_object *obj, ...@@ -479,12 +497,13 @@ static int fault_2d(struct drm_gem_object *obj,
for (i = n; i > 0; i--) { for (i = n; i > 0; i--) {
vm_insert_mixed(vma, (unsigned long)vaddr, pfn); vm_insert_mixed(vma, (unsigned long)vaddr, pfn);
pfn += usergart[fmt].stride_pfn; pfn += priv->usergart[fmt].stride_pfn;
vaddr += PAGE_SIZE * m; vaddr += PAGE_SIZE * m;
} }
/* simple round-robin: */ /* simple round-robin: */
usergart[fmt].last = (usergart[fmt].last + 1) % NUM_USERGART_ENTRIES; priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
% NUM_USERGART_ENTRIES;
return 0; return 0;
} }
...@@ -596,6 +615,9 @@ int omap_gem_mmap_obj(struct drm_gem_object *obj, ...@@ -596,6 +615,9 @@ int omap_gem_mmap_obj(struct drm_gem_object *obj,
return 0; return 0;
} }
/* -----------------------------------------------------------------------------
* Dumb Buffers
*/
/** /**
* omap_gem_dumb_create - create a dumb buffer * omap_gem_dumb_create - create a dumb buffer
...@@ -653,6 +675,7 @@ int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, ...@@ -653,6 +675,7 @@ int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
return ret; return ret;
} }
#ifdef CONFIG_DRM_FBDEV_EMULATION
/* Set scrolling position. This allows us to implement fast scrolling /* Set scrolling position. This allows us to implement fast scrolling
* for console. * for console.
* *
...@@ -689,6 +712,22 @@ int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll) ...@@ -689,6 +712,22 @@ int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
return ret; return ret;
} }
#endif
/* -----------------------------------------------------------------------------
* Memory Management & DMA Sync
*/
/**
* shmem buffers that are mapped cached can simulate coherency via using
* page faulting to keep track of dirty pages
*/
static inline bool is_cached_coherent(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
return is_shmem(obj) &&
((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
}
/* Sync the buffer for CPU access.. note pages should already be /* Sync the buffer for CPU access.. note pages should already be
* attached, ie. omap_gem_get_pages() * attached, ie. omap_gem_get_pages()
...@@ -865,28 +904,6 @@ int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient) ...@@ -865,28 +904,6 @@ int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient)
return ret; return ret;
} }
/* acquire pages when needed (for example, for DMA where physically
* contiguous buffer is not required
*/
static int get_pages(struct drm_gem_object *obj, struct page ***pages)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret = 0;
if (is_shmem(obj) && !omap_obj->pages) {
ret = omap_gem_attach_pages(obj);
if (ret) {
dev_err(obj->dev->dev, "could not attach pages\n");
return ret;
}
}
/* TODO: even phys-contig.. we should have a list of pages? */
*pages = omap_obj->pages;
return 0;
}
/* if !remap, and we don't have pages backing, then fail, rather than /* if !remap, and we don't have pages backing, then fail, rather than
* increasing the pin count (which we don't really do yet anyways, * increasing the pin count (which we don't really do yet anyways,
* because we don't support swapping pages back out). And 'remap' * because we don't support swapping pages back out). And 'remap'
...@@ -924,6 +941,7 @@ int omap_gem_put_pages(struct drm_gem_object *obj) ...@@ -924,6 +941,7 @@ int omap_gem_put_pages(struct drm_gem_object *obj)
return 0; return 0;
} }
#ifdef CONFIG_DRM_FBDEV_EMULATION
/* Get kernel virtual address for CPU access.. this more or less only /* Get kernel virtual address for CPU access.. this more or less only
* exists for omap_fbdev. This should be called with struct_mutex * exists for omap_fbdev. This should be called with struct_mutex
* held. * held.
...@@ -942,6 +960,11 @@ void *omap_gem_vaddr(struct drm_gem_object *obj) ...@@ -942,6 +960,11 @@ void *omap_gem_vaddr(struct drm_gem_object *obj)
} }
return omap_obj->vaddr; return omap_obj->vaddr;
} }
#endif
/* -----------------------------------------------------------------------------
* Power Management
*/
#ifdef CONFIG_PM #ifdef CONFIG_PM
/* re-pin objects in DMM in resume path: */ /* re-pin objects in DMM in resume path: */
...@@ -971,6 +994,10 @@ int omap_gem_resume(struct device *dev) ...@@ -971,6 +994,10 @@ int omap_gem_resume(struct device *dev)
} }
#endif #endif
/* -----------------------------------------------------------------------------
* DebugFS
*/
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m) void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{ {
...@@ -1017,9 +1044,12 @@ void omap_gem_describe_objects(struct list_head *list, struct seq_file *m) ...@@ -1017,9 +1044,12 @@ void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
} }
#endif #endif
/* Buffer Synchronization: /* -----------------------------------------------------------------------------
* Buffer Synchronization
*/ */
static DEFINE_SPINLOCK(sync_lock);
struct omap_gem_sync_waiter { struct omap_gem_sync_waiter {
struct list_head list; struct list_head list;
struct omap_gem_object *omap_obj; struct omap_gem_object *omap_obj;
...@@ -1265,6 +1295,10 @@ int omap_gem_set_sync_object(struct drm_gem_object *obj, void *syncobj) ...@@ -1265,6 +1295,10 @@ int omap_gem_set_sync_object(struct drm_gem_object *obj, void *syncobj)
return ret; return ret;
} }
/* -----------------------------------------------------------------------------
* Constructor & Destructor
*/
/* don't call directly.. called from GEM core when it is time to actually /* don't call directly.. called from GEM core when it is time to actually
* free the object.. * free the object..
*/ */
...@@ -1282,8 +1316,6 @@ void omap_gem_free_object(struct drm_gem_object *obj) ...@@ -1282,8 +1316,6 @@ void omap_gem_free_object(struct drm_gem_object *obj)
list_del(&omap_obj->mm_list); list_del(&omap_obj->mm_list);
spin_unlock(&priv->list_lock); spin_unlock(&priv->list_lock);
drm_gem_free_mmap_offset(obj);
/* this means the object is still pinned.. which really should /* this means the object is still pinned.. which really should
* not happen. I think.. * not happen. I think..
*/ */
...@@ -1308,31 +1340,7 @@ void omap_gem_free_object(struct drm_gem_object *obj) ...@@ -1308,31 +1340,7 @@ void omap_gem_free_object(struct drm_gem_object *obj)
drm_gem_object_release(obj); drm_gem_object_release(obj);
kfree(obj); kfree(omap_obj);
}
/* convenience method to construct a GEM buffer object, and userspace handle */
int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
{
struct drm_gem_object *obj;
int ret;
obj = omap_gem_new(dev, gsize, flags);
if (!obj)
return -ENOMEM;
ret = drm_gem_handle_create(file, obj, handle);
if (ret) {
drm_gem_object_release(obj);
kfree(obj); /* TODO isn't there a dtor to call? just copying i915 */
return ret;
}
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(obj);
return 0;
} }
/* GEM buffer object constructor */ /* GEM buffer object constructor */
...@@ -1341,15 +1349,15 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev, ...@@ -1341,15 +1349,15 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
{ {
struct omap_drm_private *priv = dev->dev_private; struct omap_drm_private *priv = dev->dev_private;
struct omap_gem_object *omap_obj; struct omap_gem_object *omap_obj;
struct drm_gem_object *obj = NULL; struct drm_gem_object *obj;
struct address_space *mapping; struct address_space *mapping;
size_t size; size_t size;
int ret; int ret;
if (flags & OMAP_BO_TILED) { if (flags & OMAP_BO_TILED) {
if (!usergart) { if (!priv->usergart) {
dev_err(dev->dev, "Tiled buffers require DMM\n"); dev_err(dev->dev, "Tiled buffers require DMM\n");
goto fail; return NULL;
} }
/* tiled buffers are always shmem paged backed.. when they are /* tiled buffers are always shmem paged backed.. when they are
...@@ -1420,16 +1428,42 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev, ...@@ -1420,16 +1428,42 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
return obj; return obj;
fail: fail:
if (obj) omap_gem_free_object(obj);
return NULL;
}
/* convenience method to construct a GEM buffer object, and userspace handle */
int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
{
struct drm_gem_object *obj;
int ret;
obj = omap_gem_new(dev, gsize, flags);
if (!obj)
return -ENOMEM;
ret = drm_gem_handle_create(file, obj, handle);
if (ret) {
omap_gem_free_object(obj); omap_gem_free_object(obj);
return ret;
}
return NULL; /* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(obj);
return 0;
} }
/* init/cleanup.. if DMM is used, we need to set some stuff up.. */ /* -----------------------------------------------------------------------------
* Init & Cleanup
*/
/* If DMM is used, we need to set some stuff up.. */
void omap_gem_init(struct drm_device *dev) void omap_gem_init(struct drm_device *dev)
{ {
struct omap_drm_private *priv = dev->dev_private; struct omap_drm_private *priv = dev->dev_private;
struct omap_drm_usergart *usergart;
const enum tiler_fmt fmts[] = { const enum tiler_fmt fmts[] = {
TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
}; };
...@@ -1458,10 +1492,11 @@ void omap_gem_init(struct drm_device *dev) ...@@ -1458,10 +1492,11 @@ void omap_gem_init(struct drm_device *dev)
usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT; usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i); usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
for (j = 0; j < NUM_USERGART_ENTRIES; j++) { for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
struct usergart_entry *entry = &usergart[i].entry[j]; struct omap_drm_usergart_entry *entry;
struct tiler_block *block = struct tiler_block *block;
tiler_reserve_2d(fmts[i], w, h,
PAGE_SIZE); entry = &usergart[i].entry[j];
block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
if (IS_ERR(block)) { if (IS_ERR(block)) {
dev_err(dev->dev, dev_err(dev->dev,
"reserve failed: %d, %d, %ld\n", "reserve failed: %d, %d, %ld\n",
...@@ -1477,13 +1512,16 @@ void omap_gem_init(struct drm_device *dev) ...@@ -1477,13 +1512,16 @@ void omap_gem_init(struct drm_device *dev)
} }
} }
priv->usergart = usergart;
priv->has_dmm = true; priv->has_dmm = true;
} }
void omap_gem_deinit(struct drm_device *dev) void omap_gem_deinit(struct drm_device *dev)
{ {
struct omap_drm_private *priv = dev->dev_private;
/* I believe we can rely on there being no more outstanding GEM /* I believe we can rely on there being no more outstanding GEM
* objects which could depend on usergart/dmm at this point. * objects which could depend on usergart/dmm at this point.
*/ */
kfree(usergart); kfree(priv->usergart);
} }
...@@ -188,33 +188,6 @@ static const struct drm_plane_helper_funcs omap_plane_helper_funcs = { ...@@ -188,33 +188,6 @@ static const struct drm_plane_helper_funcs omap_plane_helper_funcs = {
.atomic_disable = omap_plane_atomic_disable, .atomic_disable = omap_plane_atomic_disable,
}; };
static void omap_plane_reset(struct drm_plane *plane)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
struct omap_plane_state *omap_state;
if (plane->state && plane->state->fb)
drm_framebuffer_unreference(plane->state->fb);
kfree(plane->state);
plane->state = NULL;
omap_state = kzalloc(sizeof(*omap_state), GFP_KERNEL);
if (omap_state == NULL)
return;
/*
* Set defaults depending on whether we are a primary or overlay
* plane.
*/
omap_state->zorder = plane->type == DRM_PLANE_TYPE_PRIMARY
? 0 : omap_plane->id;
omap_state->base.rotation = BIT(DRM_ROTATE_0);
plane->state = &omap_state->base;
plane->state->plane = plane;
}
static void omap_plane_destroy(struct drm_plane *plane) static void omap_plane_destroy(struct drm_plane *plane)
{ {
struct omap_plane *omap_plane = to_omap_plane(plane); struct omap_plane *omap_plane = to_omap_plane(plane);
...@@ -270,6 +243,32 @@ static void omap_plane_atomic_destroy_state(struct drm_plane *plane, ...@@ -270,6 +243,32 @@ static void omap_plane_atomic_destroy_state(struct drm_plane *plane,
kfree(to_omap_plane_state(state)); kfree(to_omap_plane_state(state));
} }
static void omap_plane_reset(struct drm_plane *plane)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
struct omap_plane_state *omap_state;
if (plane->state) {
omap_plane_atomic_destroy_state(plane, plane->state);
plane->state = NULL;
}
omap_state = kzalloc(sizeof(*omap_state), GFP_KERNEL);
if (omap_state == NULL)
return;
/*
* Set defaults depending on whether we are a primary or overlay
* plane.
*/
omap_state->zorder = plane->type == DRM_PLANE_TYPE_PRIMARY
? 0 : omap_plane->id;
omap_state->base.rotation = BIT(DRM_ROTATE_0);
plane->state = &omap_state->base;
plane->state->plane = plane;
}
static int omap_plane_atomic_set_property(struct drm_plane *plane, static int omap_plane_atomic_set_property(struct drm_plane *plane,
struct drm_plane_state *state, struct drm_plane_state *state,
struct drm_property *property, struct drm_property *property,
......
...@@ -5,8 +5,9 @@ ...@@ -5,8 +5,9 @@
* *
* Authors: Ravi Ramachandra <r.ramachandra@ti.com>, * Authors: Ravi Ramachandra <r.ramachandra@ti.com>,
* Lajos Molnar <molnar@ti.com> * Lajos Molnar <molnar@ti.com>
* Andy Gross <andy.gross@ti.com>
* *
* Copyright (C) 2009-2010 Texas Instruments, Inc. * Copyright (C) 2012 Texas Instruments, Inc.
* *
* This package is free software; you can redistribute it and/or modify * This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
...@@ -17,684 +18,244 @@ ...@@ -17,684 +18,244 @@
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
* *
*/ */
#include <linux/init.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/bitmap.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/spinlock.h> #include "tcm.h"
#include "tcm-sita.h" static unsigned long mask[8];
/*
#define ALIGN_DOWN(value, align) ((value) & ~((align) - 1)) * pos position in bitmap
* w width in slots
/* Individual selection criteria for different scan areas */ * h height in slots
static s32 CR_L2R_T2B = CR_BIAS_HORIZONTAL; * map ptr to bitmap
static s32 CR_R2L_T2B = CR_DIAGONAL_BALANCE; * stride slots in a row
/*********************************************
* TCM API - Sita Implementation
*********************************************/
static s32 sita_reserve_2d(struct tcm *tcm, u16 h, u16 w, u8 align,
struct tcm_area *area);
static s32 sita_reserve_1d(struct tcm *tcm, u32 slots, struct tcm_area *area);
static s32 sita_free(struct tcm *tcm, struct tcm_area *area);
static void sita_deinit(struct tcm *tcm);
/*********************************************
* Main Scanner functions
*********************************************/
static s32 scan_areas_and_find_fit(struct tcm *tcm, u16 w, u16 h, u16 align,
struct tcm_area *area);
static s32 scan_l2r_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
struct tcm_area *field, struct tcm_area *area);
static s32 scan_r2l_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
struct tcm_area *field, struct tcm_area *area);
static s32 scan_r2l_b2t_one_dim(struct tcm *tcm, u32 num_slots,
struct tcm_area *field, struct tcm_area *area);
/*********************************************
* Support Infrastructure Methods
*********************************************/
static s32 is_area_free(struct tcm_area ***map, u16 x0, u16 y0, u16 w, u16 h);
static s32 update_candidate(struct tcm *tcm, u16 x0, u16 y0, u16 w, u16 h,
struct tcm_area *field, s32 criteria,
struct score *best);
static void get_nearness_factor(struct tcm_area *field,
struct tcm_area *candidate,
struct nearness_factor *nf);
static void get_neighbor_stats(struct tcm *tcm, struct tcm_area *area,
struct neighbor_stats *stat);
static void fill_area(struct tcm *tcm,
struct tcm_area *area, struct tcm_area *parent);
/*********************************************/
/*********************************************
* Utility Methods
*********************************************/
struct tcm *sita_init(u16 width, u16 height, struct tcm_pt *attr)
{
struct tcm *tcm;
struct sita_pvt *pvt;
struct tcm_area area = {0};
s32 i;
if (width == 0 || height == 0)
return NULL;
tcm = kzalloc(sizeof(*tcm), GFP_KERNEL);
pvt = kzalloc(sizeof(*pvt), GFP_KERNEL);
if (!tcm || !pvt)
goto error;
/* Updating the pointers to SiTA implementation APIs */
tcm->height = height;
tcm->width = width;
tcm->reserve_2d = sita_reserve_2d;
tcm->reserve_1d = sita_reserve_1d;
tcm->free = sita_free;
tcm->deinit = sita_deinit;
tcm->pvt = (void *)pvt;
spin_lock_init(&(pvt->lock));
/* Creating tam map */
pvt->map = kmalloc(sizeof(*pvt->map) * tcm->width, GFP_KERNEL);
if (!pvt->map)
goto error;
for (i = 0; i < tcm->width; i++) {
pvt->map[i] =
kmalloc(sizeof(**pvt->map) * tcm->height,
GFP_KERNEL);
if (pvt->map[i] == NULL) {
while (i--)
kfree(pvt->map[i]);
kfree(pvt->map);
goto error;
}
}
if (attr && attr->x <= tcm->width && attr->y <= tcm->height) {
pvt->div_pt.x = attr->x;
pvt->div_pt.y = attr->y;
} else {
/* Defaulting to 3:1 ratio on width for 2D area split */
/* Defaulting to 3:1 ratio on height for 2D and 1D split */
pvt->div_pt.x = (tcm->width * 3) / 4;
pvt->div_pt.y = (tcm->height * 3) / 4;
}
spin_lock(&(pvt->lock));
assign(&area, 0, 0, width - 1, height - 1);
fill_area(tcm, &area, NULL);
spin_unlock(&(pvt->lock));
return tcm;
error:
kfree(tcm);
kfree(pvt);
return NULL;
}
static void sita_deinit(struct tcm *tcm)
{
struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
struct tcm_area area = {0};
s32 i;
area.p1.x = tcm->width - 1;
area.p1.y = tcm->height - 1;
spin_lock(&(pvt->lock));
fill_area(tcm, &area, NULL);
spin_unlock(&(pvt->lock));
for (i = 0; i < tcm->height; i++)
kfree(pvt->map[i]);
kfree(pvt->map);
kfree(pvt);
}
/**
* Reserve a 1D area in the container
*
* @param num_slots size of 1D area
* @param area pointer to the area that will be populated with the
* reserved area
*
* @return 0 on success, non-0 error value on failure.
*/ */
static s32 sita_reserve_1d(struct tcm *tcm, u32 num_slots, static void free_slots(unsigned long pos, uint16_t w, uint16_t h,
struct tcm_area *area) unsigned long *map, uint16_t stride)
{ {
s32 ret; int i;
struct tcm_area field = {0};
struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
spin_lock(&(pvt->lock)); for (i = 0; i < h; i++, pos += stride)
bitmap_clear(map, pos, w);
/* Scanning entire container */
assign(&field, tcm->width - 1, tcm->height - 1, 0, 0);
ret = scan_r2l_b2t_one_dim(tcm, num_slots, &field, area);
if (!ret)
/* update map */
fill_area(tcm, area, area);
spin_unlock(&(pvt->lock));
return ret;
} }
/** /*
* Reserve a 2D area in the container * w width in slots
* * pos ptr to position
* @param w width * map ptr to bitmap
* @param h height * num_bits number of bits in bitmap
* @param area pointer to the area that will be populated with the reserved
* area
*
* @return 0 on success, non-0 error value on failure.
*/ */
static s32 sita_reserve_2d(struct tcm *tcm, u16 h, u16 w, u8 align, static int r2l_b2t_1d(uint16_t w, unsigned long *pos, unsigned long *map,
struct tcm_area *area) size_t num_bits)
{ {
s32 ret; unsigned long search_count = 0;
struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt; unsigned long bit;
bool area_found = false;
/* not supporting more than 64 as alignment */ *pos = num_bits - w;
if (align > 64)
return -EINVAL;
/* we prefer 1, 32 and 64 as alignment */ while (search_count < num_bits) {
align = align <= 1 ? 1 : align <= 32 ? 32 : 64; bit = find_next_bit(map, num_bits, *pos);
spin_lock(&(pvt->lock)); if (bit - *pos >= w) {
ret = scan_areas_and_find_fit(tcm, w, h, align, area); /* found a long enough free area */
if (!ret) bitmap_set(map, *pos, w);
/* update map */ area_found = true;
fill_area(tcm, area, area); break;
}
spin_unlock(&(pvt->lock)); search_count = num_bits - bit + w;
return ret; *pos = bit - w;
}
return (area_found) ? 0 : -ENOMEM;
} }
/** /*
* Unreserve a previously allocated 2D or 1D area * w = width in slots
* @param area area to be freed * h = height in slots
* @return 0 - success * a = align in slots (mask, 2^n-1, 0 is unaligned)
* offset = offset in bytes from 4KiB
* pos = position in bitmap for buffer
* map = bitmap ptr
* num_bits = size of bitmap
* stride = bits in one row of container
*/ */
static s32 sita_free(struct tcm *tcm, struct tcm_area *area) static int l2r_t2b(uint16_t w, uint16_t h, uint16_t a, int16_t offset,
unsigned long *pos, unsigned long slot_bytes,
unsigned long *map, size_t num_bits, size_t slot_stride)
{ {
struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt; int i;
unsigned long index;
bool area_free;
unsigned long slots_per_band = PAGE_SIZE / slot_bytes;
unsigned long bit_offset = (offset > 0) ? offset / slot_bytes : 0;
unsigned long curr_bit = bit_offset;
/* reset alignment to 1 if we are matching a specific offset */
/* adjust alignment - 1 to get to the format expected in bitmaps */
a = (offset > 0) ? 0 : a - 1;
/* FIXME Return error if slots_per_band > stride */
while (curr_bit < num_bits) {
*pos = bitmap_find_next_zero_area(map, num_bits, curr_bit, w,
a);
/* skip forward if we are not at right offset */
if (bit_offset > 0 && (*pos % slots_per_band != bit_offset)) {
curr_bit = ALIGN(*pos, slots_per_band) + bit_offset;
continue;
}
spin_lock(&(pvt->lock)); /* skip forward to next row if we overlap end of row */
if ((*pos % slot_stride) + w > slot_stride) {
curr_bit = ALIGN(*pos, slot_stride) + bit_offset;
continue;
}
/* check that this is in fact an existing area */ /* TODO: Handle overlapping 4K boundaries */
WARN_ON(pvt->map[area->p0.x][area->p0.y] != area ||
pvt->map[area->p1.x][area->p1.y] != area);
/* Clear the contents of the associated tiles in the map */ /* break out of look if we will go past end of container */
fill_area(tcm, area, NULL); if ((*pos + slot_stride * h) > num_bits)
break;
spin_unlock(&(pvt->lock)); /* generate mask that represents out matching pattern */
bitmap_clear(mask, 0, slot_stride);
bitmap_set(mask, (*pos % BITS_PER_LONG), w);
return 0; /* assume the area is free until we find an overlap */
} area_free = true;
/**
* Note: In general the cordinates in the scan field area relevant to the can
* sweep directions. The scan origin (e.g. top-left corner) will always be
* the p0 member of the field. Therfore, for a scan from top-left p0.x <= p1.x
* and p0.y <= p1.y; whereas, for a scan from bottom-right p1.x <= p0.x and p1.y
* <= p0.y
*/
/** /* check subsequent rows to see if complete area is free */
* Raster scan horizontally right to left from top to bottom to find a place for for (i = 1; i < h; i++) {
* a 2D area of given size inside a scan field. index = *pos / BITS_PER_LONG + i * 8;
* if (bitmap_intersects(&map[index], mask,
* @param w width of desired area (*pos % BITS_PER_LONG) + w)) {
* @param h height of desired area area_free = false;
* @param align desired area alignment
* @param area pointer to the area that will be set to the best position
* @param field area to scan (inclusive)
*
* @return 0 on success, non-0 error value on failure.
*/
static s32 scan_r2l_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
struct tcm_area *field, struct tcm_area *area)
{
s32 x, y;
s16 start_x, end_x, start_y, end_y, found_x = -1;
struct tcm_area ***map = ((struct sita_pvt *)tcm->pvt)->map;
struct score best = {{0}, {0}, {0}, 0};
start_x = field->p0.x;
end_x = field->p1.x;
start_y = field->p0.y;
end_y = field->p1.y;
/* check scan area co-ordinates */
if (field->p0.x < field->p1.x ||
field->p1.y < field->p0.y)
return -EINVAL;
/* check if allocation would fit in scan area */
if (w > LEN(start_x, end_x) || h > LEN(end_y, start_y))
return -ENOSPC;
/* adjust start_x and end_y, as allocation would not fit beyond */
start_x = ALIGN_DOWN(start_x - w + 1, align); /* - 1 to be inclusive */
end_y = end_y - h + 1;
/* check if allocation would still fit in scan area */
if (start_x < end_x)
return -ENOSPC;
/* scan field top-to-bottom, right-to-left */
for (y = start_y; y <= end_y; y++) {
for (x = start_x; x >= end_x; x -= align) {
if (is_area_free(map, x, y, w, h)) {
found_x = x;
/* update best candidate */
if (update_candidate(tcm, x, y, w, h, field,
CR_R2L_T2B, &best))
goto done;
/* change upper x bound */
end_x = x + 1;
break; break;
} else if (map[x][y] && map[x][y]->is2d) {
/* step over 2D areas */
x = ALIGN(map[x][y]->p0.x - w + 1, align);
} }
} }
/* break if you find a free area shouldering the scan field */ if (area_free)
if (found_x == start_x)
break; break;
}
if (!best.a.tcm)
return -ENOSPC;
done:
assign(area, best.a.p0.x, best.a.p0.y, best.a.p1.x, best.a.p1.y);
return 0;
}
/**
* Raster scan horizontally left to right from top to bottom to find a place for
* a 2D area of given size inside a scan field.
*
* @param w width of desired area
* @param h height of desired area
* @param align desired area alignment
* @param area pointer to the area that will be set to the best position
* @param field area to scan (inclusive)
*
* @return 0 on success, non-0 error value on failure.
*/
static s32 scan_l2r_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
struct tcm_area *field, struct tcm_area *area)
{
s32 x, y;
s16 start_x, end_x, start_y, end_y, found_x = -1;
struct tcm_area ***map = ((struct sita_pvt *)tcm->pvt)->map;
struct score best = {{0}, {0}, {0}, 0};
start_x = field->p0.x;
end_x = field->p1.x;
start_y = field->p0.y;
end_y = field->p1.y;
/* check scan area co-ordinates */
if (field->p1.x < field->p0.x ||
field->p1.y < field->p0.y)
return -EINVAL;
/* check if allocation would fit in scan area */
if (w > LEN(end_x, start_x) || h > LEN(end_y, start_y))
return -ENOSPC;
start_x = ALIGN(start_x, align);
/* check if allocation would still fit in scan area */
if (w > LEN(end_x, start_x))
return -ENOSPC;
/* adjust end_x and end_y, as allocation would not fit beyond */
end_x = end_x - w + 1; /* + 1 to be inclusive */
end_y = end_y - h + 1;
/* scan field top-to-bottom, left-to-right */
for (y = start_y; y <= end_y; y++) {
for (x = start_x; x <= end_x; x += align) {
if (is_area_free(map, x, y, w, h)) {
found_x = x;
/* update best candidate */
if (update_candidate(tcm, x, y, w, h, field,
CR_L2R_T2B, &best))
goto done;
/* change upper x bound */
end_x = x - 1;
break; /* go forward past this match */
} else if (map[x][y] && map[x][y]->is2d) { if (bit_offset > 0)
/* step over 2D areas */ curr_bit = ALIGN(*pos, slots_per_band) + bit_offset;
x = ALIGN_DOWN(map[x][y]->p1.x, align); else
} curr_bit = *pos + a + 1;
} }
/* break if you find a free area shouldering the scan field */ if (area_free) {
if (found_x == start_x) /* set area as in-use. iterate over rows */
break; for (i = 0, index = *pos; i < h; i++, index += slot_stride)
bitmap_set(map, index, w);
} }
if (!best.a.tcm) return (area_free) ? 0 : -ENOMEM;
return -ENOSPC;
done:
assign(area, best.a.p0.x, best.a.p0.y, best.a.p1.x, best.a.p1.y);
return 0;
} }
/** static s32 sita_reserve_1d(struct tcm *tcm, u32 num_slots,
* Raster scan horizontally right to left from bottom to top to find a place struct tcm_area *area)
* for a 1D area of given size inside a scan field.
*
* @param num_slots size of desired area
* @param align desired area alignment
* @param area pointer to the area that will be set to the best
* position
* @param field area to scan (inclusive)
*
* @return 0 on success, non-0 error value on failure.
*/
static s32 scan_r2l_b2t_one_dim(struct tcm *tcm, u32 num_slots,
struct tcm_area *field, struct tcm_area *area)
{ {
s32 found = 0; unsigned long pos;
s16 x, y; int ret;
struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
struct tcm_area *p; spin_lock(&(tcm->lock));
ret = r2l_b2t_1d(num_slots, &pos, tcm->bitmap, tcm->map_size);
/* check scan area co-ordinates */ if (!ret) {
if (field->p0.y < field->p1.y) area->p0.x = pos % tcm->width;
return -EINVAL; area->p0.y = pos / tcm->width;
area->p1.x = (pos + num_slots - 1) % tcm->width;
/** area->p1.y = (pos + num_slots - 1) / tcm->width;
* Currently we only support full width 1D scan field, which makes sense
* since 1D slot-ordering spans the full container width.
*/
if (tcm->width != field->p0.x - field->p1.x + 1)
return -EINVAL;
/* check if allocation would fit in scan area */
if (num_slots > tcm->width * LEN(field->p0.y, field->p1.y))
return -ENOSPC;
x = field->p0.x;
y = field->p0.y;
/* find num_slots consecutive free slots to the left */
while (found < num_slots) {
if (y < 0)
return -ENOSPC;
/* remember bottom-right corner */
if (found == 0) {
area->p1.x = x;
area->p1.y = y;
}
/* skip busy regions */
p = pvt->map[x][y];
if (p) {
/* move to left of 2D areas, top left of 1D */
x = p->p0.x;
if (!p->is2d)
y = p->p0.y;
/* start over */
found = 0;
} else {
/* count consecutive free slots */
found++;
if (found == num_slots)
break;
}
/* move to the left */
if (x == 0)
y--;
x = (x ? : tcm->width) - 1;
} }
spin_unlock(&(tcm->lock));
/* set top-left corner */ return ret;
area->p0.x = x;
area->p0.y = y;
return 0;
} }
/** static s32 sita_reserve_2d(struct tcm *tcm, u16 h, u16 w, u16 align,
* Find a place for a 2D area of given size inside a scan field based on its int16_t offset, uint16_t slot_bytes,
* alignment needs. struct tcm_area *area)
*
* @param w width of desired area
* @param h height of desired area
* @param align desired area alignment
* @param area pointer to the area that will be set to the best position
*
* @return 0 on success, non-0 error value on failure.
*/
static s32 scan_areas_and_find_fit(struct tcm *tcm, u16 w, u16 h, u16 align,
struct tcm_area *area)
{ {
s32 ret = 0; unsigned long pos;
struct tcm_area field = {0}; int ret;
u16 boundary_x, boundary_y;
struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt; spin_lock(&(tcm->lock));
ret = l2r_t2b(w, h, align, offset, &pos, slot_bytes, tcm->bitmap,
if (align > 1) { tcm->map_size, tcm->width);
/* prefer top-left corner */
boundary_x = pvt->div_pt.x - 1; if (!ret) {
boundary_y = pvt->div_pt.y - 1; area->p0.x = pos % tcm->width;
area->p0.y = pos / tcm->width;
/* expand width and height if needed */ area->p1.x = area->p0.x + w - 1;
if (w > pvt->div_pt.x) area->p1.y = area->p0.y + h - 1;
boundary_x = tcm->width - 1;
if (h > pvt->div_pt.y)
boundary_y = tcm->height - 1;
assign(&field, 0, 0, boundary_x, boundary_y);
ret = scan_l2r_t2b(tcm, w, h, align, &field, area);
/* scan whole container if failed, but do not scan 2x */
if (ret != 0 && (boundary_x != tcm->width - 1 ||
boundary_y != tcm->height - 1)) {
/* scan the entire container if nothing found */
assign(&field, 0, 0, tcm->width - 1, tcm->height - 1);
ret = scan_l2r_t2b(tcm, w, h, align, &field, area);
}
} else if (align == 1) {
/* prefer top-right corner */
boundary_x = pvt->div_pt.x;
boundary_y = pvt->div_pt.y - 1;
/* expand width and height if needed */
if (w > (tcm->width - pvt->div_pt.x))
boundary_x = 0;
if (h > pvt->div_pt.y)
boundary_y = tcm->height - 1;
assign(&field, tcm->width - 1, 0, boundary_x, boundary_y);
ret = scan_r2l_t2b(tcm, w, h, align, &field, area);
/* scan whole container if failed, but do not scan 2x */
if (ret != 0 && (boundary_x != 0 ||
boundary_y != tcm->height - 1)) {
/* scan the entire container if nothing found */
assign(&field, tcm->width - 1, 0, 0, tcm->height - 1);
ret = scan_r2l_t2b(tcm, w, h, align, &field,
area);
}
} }
spin_unlock(&(tcm->lock));
return ret; return ret;
} }
/* check if an entire area is free */ static void sita_deinit(struct tcm *tcm)
static s32 is_area_free(struct tcm_area ***map, u16 x0, u16 y0, u16 w, u16 h)
{ {
u16 x = 0, y = 0; kfree(tcm);
for (y = y0; y < y0 + h; y++) {
for (x = x0; x < x0 + w; x++) {
if (map[x][y])
return false;
}
}
return true;
} }
/* fills an area with a parent tcm_area */ static s32 sita_free(struct tcm *tcm, struct tcm_area *area)
static void fill_area(struct tcm *tcm, struct tcm_area *area,
struct tcm_area *parent)
{ {
s32 x, y; unsigned long pos;
struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt; uint16_t w, h;
struct tcm_area a, a_;
/* set area's tcm; otherwise, enumerator considers it invalid */
area->tcm = tcm;
tcm_for_each_slice(a, *area, a_) {
for (x = a.p0.x; x <= a.p1.x; ++x)
for (y = a.p0.y; y <= a.p1.y; ++y)
pvt->map[x][y] = parent;
pos = area->p0.x + area->p0.y * tcm->width;
if (area->is2d) {
w = area->p1.x - area->p0.x + 1;
h = area->p1.y - area->p0.y + 1;
} else {
w = area->p1.x + area->p1.y * tcm->width - pos + 1;
h = 1;
} }
spin_lock(&(tcm->lock));
free_slots(pos, w, h, tcm->bitmap, tcm->width);
spin_unlock(&(tcm->lock));
return 0;
} }
/** struct tcm *sita_init(u16 width, u16 height)
* Compares a candidate area to the current best area, and if it is a better
* fit, it updates the best to this one.
*
* @param x0, y0, w, h top, left, width, height of candidate area
* @param field scan field
* @param criteria scan criteria
* @param best best candidate and its scores
*
* @return 1 (true) if the candidate area is known to be the final best, so no
* more searching should be performed
*/
static s32 update_candidate(struct tcm *tcm, u16 x0, u16 y0, u16 w, u16 h,
struct tcm_area *field, s32 criteria,
struct score *best)
{ {
struct score me; /* score for area */ struct tcm *tcm;
size_t map_size = BITS_TO_LONGS(width*height) * sizeof(unsigned long);
/*
* NOTE: For horizontal bias we always give the first found, because our
* scan is horizontal-raster-based and the first candidate will always
* have the horizontal bias.
*/
bool first = criteria & CR_BIAS_HORIZONTAL;
assign(&me.a, x0, y0, x0 + w - 1, y0 + h - 1);
/* calculate score for current candidate */
if (!first) {
get_neighbor_stats(tcm, &me.a, &me.n);
me.neighs = me.n.edge + me.n.busy;
get_nearness_factor(field, &me.a, &me.f);
}
/* the 1st candidate is always the best */
if (!best->a.tcm)
goto better;
BUG_ON(first); if (width == 0 || height == 0)
return NULL;
/* diagonal balance check */ tcm = kzalloc(sizeof(*tcm) + map_size, GFP_KERNEL);
if ((criteria & CR_DIAGONAL_BALANCE) && if (!tcm)
best->neighs <= me.neighs && goto error;
(best->neighs < me.neighs ||
/* this implies that neighs and occupied match */
best->n.busy < me.n.busy ||
(best->n.busy == me.n.busy &&
/* check the nearness factor */
best->f.x + best->f.y > me.f.x + me.f.y)))
goto better;
/* not better, keep going */ /* Updating the pointers to SiTA implementation APIs */
return 0; tcm->height = height;
tcm->width = width;
tcm->reserve_2d = sita_reserve_2d;
tcm->reserve_1d = sita_reserve_1d;
tcm->free = sita_free;
tcm->deinit = sita_deinit;
better: spin_lock_init(&tcm->lock);
/* save current area as best */ tcm->bitmap = (unsigned long *)(tcm + 1);
memcpy(best, &me, sizeof(me)); bitmap_clear(tcm->bitmap, 0, width*height);
best->a.tcm = tcm;
return first;
}
/** tcm->map_size = width*height;
* Calculate the nearness factor of an area in a search field. The nearness
* factor is smaller if the area is closer to the search origin.
*/
static void get_nearness_factor(struct tcm_area *field, struct tcm_area *area,
struct nearness_factor *nf)
{
/**
* Using signed math as field coordinates may be reversed if
* search direction is right-to-left or bottom-to-top.
*/
nf->x = (s32)(area->p0.x - field->p0.x) * 1000 /
(field->p1.x - field->p0.x);
nf->y = (s32)(area->p0.y - field->p0.y) * 1000 /
(field->p1.y - field->p0.y);
}
/* get neighbor statistics */ return tcm;
static void get_neighbor_stats(struct tcm *tcm, struct tcm_area *area,
struct neighbor_stats *stat)
{
s16 x = 0, y = 0;
struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
/* Clearing any exisiting values */
memset(stat, 0, sizeof(*stat));
/* process top & bottom edges */
for (x = area->p0.x; x <= area->p1.x; x++) {
if (area->p0.y == 0)
stat->edge++;
else if (pvt->map[x][area->p0.y - 1])
stat->busy++;
if (area->p1.y == tcm->height - 1)
stat->edge++;
else if (pvt->map[x][area->p1.y + 1])
stat->busy++;
}
/* process left & right edges */ error:
for (y = area->p0.y; y <= area->p1.y; ++y) { kfree(tcm);
if (area->p0.x == 0) return NULL;
stat->edge++;
else if (pvt->map[area->p0.x - 1][y])
stat->busy++;
if (area->p1.x == tcm->width - 1)
stat->edge++;
else if (pvt->map[area->p1.x + 1][y])
stat->busy++;
}
} }
...@@ -61,18 +61,17 @@ struct tcm { ...@@ -61,18 +61,17 @@ struct tcm {
unsigned int y_offset; /* offset to use for y coordinates */ unsigned int y_offset; /* offset to use for y coordinates */
/* 'pvt' structure shall contain any tcm details (attr) along with spinlock_t lock;
linked list of allocated areas and mutex for mutually exclusive access unsigned long *bitmap;
to the list. It may also contain copies of width and height to notice size_t map_size;
any changes to the publicly available width and height fields. */
void *pvt;
/* function table */ /* function table */
s32 (*reserve_2d)(struct tcm *tcm, u16 height, u16 width, u8 align, s32 (*reserve_2d)(struct tcm *tcm, u16 height, u16 width, u16 align,
int16_t offset, uint16_t slot_bytes,
struct tcm_area *area); struct tcm_area *area);
s32 (*reserve_1d)(struct tcm *tcm, u32 slots, struct tcm_area *area); s32 (*reserve_1d)(struct tcm *tcm, u32 slots, struct tcm_area *area);
s32 (*free) (struct tcm *tcm, struct tcm_area *area); s32 (*free)(struct tcm *tcm, struct tcm_area *area);
void (*deinit) (struct tcm *tcm); void (*deinit)(struct tcm *tcm);
}; };
/*============================================================================= /*=============================================================================
...@@ -91,7 +90,7 @@ struct tcm { ...@@ -91,7 +90,7 @@ struct tcm {
* *
*/ */
struct tcm *sita_init(u16 width, u16 height, struct tcm_pt *attr); struct tcm *sita_init(u16 width, u16 height);
/** /**
...@@ -120,6 +119,9 @@ static inline void tcm_deinit(struct tcm *tcm) ...@@ -120,6 +119,9 @@ static inline void tcm_deinit(struct tcm *tcm)
* all values may be supported by the container manager, * all values may be supported by the container manager,
* but it must support 0 (1), 32 and 64. * but it must support 0 (1), 32 and 64.
* 0 value is equivalent to 1. * 0 value is equivalent to 1.
* @param offset Offset requirement, in bytes. This is the offset
* from a 4KiB aligned virtual address.
* @param slot_bytes Width of slot in bytes
* @param area Pointer to where the reserved area should be stored. * @param area Pointer to where the reserved area should be stored.
* *
* @return 0 on success. Non-0 error code on failure. Also, * @return 0 on success. Non-0 error code on failure. Also,
...@@ -129,7 +131,8 @@ static inline void tcm_deinit(struct tcm *tcm) ...@@ -129,7 +131,8 @@ static inline void tcm_deinit(struct tcm *tcm)
* allocation. * allocation.
*/ */
static inline s32 tcm_reserve_2d(struct tcm *tcm, u16 width, u16 height, static inline s32 tcm_reserve_2d(struct tcm *tcm, u16 width, u16 height,
u16 align, struct tcm_area *area) u16 align, int16_t offset, uint16_t slot_bytes,
struct tcm_area *area)
{ {
/* perform rudimentary error checking */ /* perform rudimentary error checking */
s32 res = tcm == NULL ? -ENODEV : s32 res = tcm == NULL ? -ENODEV :
...@@ -140,7 +143,8 @@ static inline s32 tcm_reserve_2d(struct tcm *tcm, u16 width, u16 height, ...@@ -140,7 +143,8 @@ static inline s32 tcm_reserve_2d(struct tcm *tcm, u16 width, u16 height,
if (!res) { if (!res) {
area->is2d = true; area->is2d = true;
res = tcm->reserve_2d(tcm, height, width, align, area); res = tcm->reserve_2d(tcm, height, width, align, offset,
slot_bytes, area);
area->tcm = res ? NULL : tcm; area->tcm = res ? NULL : tcm;
} }
......
...@@ -101,9 +101,6 @@ struct drm_omap_gem_info { ...@@ -101,9 +101,6 @@ struct drm_omap_gem_info {
#define DRM_OMAP_GET_PARAM 0x00 #define DRM_OMAP_GET_PARAM 0x00
#define DRM_OMAP_SET_PARAM 0x01 #define DRM_OMAP_SET_PARAM 0x01
/* placeholder for plugin-api
#define DRM_OMAP_GET_BASE 0x02
*/
#define DRM_OMAP_GEM_NEW 0x03 #define DRM_OMAP_GEM_NEW 0x03
#define DRM_OMAP_GEM_CPU_PREP 0x04 #define DRM_OMAP_GEM_CPU_PREP 0x04
#define DRM_OMAP_GEM_CPU_FINI 0x05 #define DRM_OMAP_GEM_CPU_FINI 0x05
...@@ -112,9 +109,6 @@ struct drm_omap_gem_info { ...@@ -112,9 +109,6 @@ struct drm_omap_gem_info {
#define DRM_IOCTL_OMAP_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GET_PARAM, struct drm_omap_param) #define DRM_IOCTL_OMAP_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GET_PARAM, struct drm_omap_param)
#define DRM_IOCTL_OMAP_SET_PARAM DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_SET_PARAM, struct drm_omap_param) #define DRM_IOCTL_OMAP_SET_PARAM DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_SET_PARAM, struct drm_omap_param)
/* placeholder for plugin-api
#define DRM_IOCTL_OMAP_GET_BASE DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GET_BASE, struct drm_omap_get_base)
*/
#define DRM_IOCTL_OMAP_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GEM_NEW, struct drm_omap_gem_new) #define DRM_IOCTL_OMAP_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GEM_NEW, struct drm_omap_gem_new)
#define DRM_IOCTL_OMAP_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_PREP, struct drm_omap_gem_cpu_prep) #define DRM_IOCTL_OMAP_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_PREP, struct drm_omap_gem_cpu_prep)
#define DRM_IOCTL_OMAP_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_FINI, struct drm_omap_gem_cpu_fini) #define DRM_IOCTL_OMAP_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_FINI, struct drm_omap_gem_cpu_fini)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment