Commit 345f3b90 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'exynos-drm-next' of...

Merge branch 'exynos-drm-next' of git://git.infradead.org/users/kmpark/linux-samsung into drm-core-next

* 'exynos-drm-next' of git://git.infradead.org/users/kmpark/linux-samsung:
  drm/exynos: add G2D driver
  drm/exynos: added vp scaling feature for hdmi
  drm/exynos: added source size to overlay structure
  drm/exynos: add additional display mode for hdmi
  drm/exynos: enable dvi mode for dvi monitor
  drm/exynos: fixed wrong pageflip finish event for interlace mode
  drm/exynos: add PM functions for hdmi and mixer
  drm/exynos: add dpms for hdmi
  drm/exynos: use threaded irq for hdmi hotplug
  drm/exynos: use platform_get_irq_byname for hdmi
  drm/exynos: cleanup for hdmi platform data
  drm/exynos: added a feature to get gem buffer information.
  drm/exynos: added drm prime feature.
  drm/exynos: added cache attribute support for gem.
  vgaarb: Provide dummy default device functions
parents ebe0f244 d7f1642c
......@@ -10,6 +10,12 @@ config DRM_EXYNOS
Choose this option if you have a Samsung SoC EXYNOS chipset.
If M is selected the module will be called exynosdrm.
config DRM_EXYNOS_DMABUF
bool "EXYNOS DRM DMABUF"
depends on DRM_EXYNOS
help
Choose this option if you want to use DMABUF feature for DRM.
config DRM_EXYNOS_FIMD
bool "Exynos DRM FIMD"
depends on DRM_EXYNOS && !FB_S3C
......@@ -27,3 +33,9 @@ config DRM_EXYNOS_VIDI
depends on DRM_EXYNOS
help
Choose this option if you want to use Exynos VIDI for DRM.
config DRM_EXYNOS_G2D
bool "Exynos DRM G2D"
depends on DRM_EXYNOS
help
Choose this option if you want to use Exynos G2D for DRM.
......@@ -8,10 +8,12 @@ exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
exynos_drm_plane.o
exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o
exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \
exynos_ddc.o exynos_hdmiphy.o \
exynos_drm_hdmi.o
exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI) += exynos_drm_vidi.o
exynosdrm-$(CONFIG_DRM_EXYNOS_G2D) += exynos_drm_g2d.o
obj-$(CONFIG_DRM_EXYNOS) += exynosdrm.o
......@@ -35,7 +35,7 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
unsigned int flags, struct exynos_drm_gem_buf *buf)
{
dma_addr_t start_addr;
unsigned int npages, page_size, i = 0;
unsigned int npages, i = 0;
struct scatterlist *sgl;
int ret = 0;
......@@ -53,13 +53,13 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
if (buf->size >= SZ_1M) {
npages = buf->size >> SECTION_SHIFT;
page_size = SECTION_SIZE;
buf->page_size = SECTION_SIZE;
} else if (buf->size >= SZ_64K) {
npages = buf->size >> 16;
page_size = SZ_64K;
buf->page_size = SZ_64K;
} else {
npages = buf->size >> PAGE_SHIFT;
page_size = PAGE_SIZE;
buf->page_size = PAGE_SIZE;
}
buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
......@@ -96,9 +96,9 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
while (i < npages) {
buf->pages[i] = phys_to_page(start_addr);
sg_set_page(sgl, buf->pages[i], page_size, 0);
sg_set_page(sgl, buf->pages[i], buf->page_size, 0);
sg_dma_address(sgl) = start_addr;
start_addr += page_size;
start_addr += buf->page_size;
sgl = sg_next(sgl);
i++;
}
......
......@@ -105,6 +105,8 @@ int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
overlay->fb_y = pos->fb_y;
overlay->fb_width = fb->width;
overlay->fb_height = fb->height;
overlay->src_width = pos->src_w;
overlay->src_height = pos->src_h;
overlay->bpp = fb->bits_per_pixel;
overlay->pitch = fb->pitches[0];
overlay->pixel_format = fb->pixel_format;
......@@ -153,6 +155,8 @@ static int exynos_drm_crtc_update(struct drm_crtc *crtc)
pos.crtc_y = 0;
pos.crtc_w = fb->width - crtc->x;
pos.crtc_h = fb->height - crtc->y;
pos.src_w = pos.crtc_w;
pos.src_h = pos.crtc_h;
return exynos_drm_overlay_update(overlay, crtc->fb, mode, &pos);
}
......
......@@ -42,6 +42,8 @@ void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
* - the unit is screen coordinates.
* @fb_y: offset y on a framebuffer to be displayed
* - the unit is screen coordinates.
* @src_w: width of source area to be displayed from a framebuffer.
* @src_h: height of source area to be displayed from a framebuffer.
* @crtc_x: offset x on hardware screen.
* @crtc_y: offset y on hardware screen.
* @crtc_w: width of hardware screen.
......@@ -50,6 +52,8 @@ void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
struct exynos_drm_crtc_pos {
unsigned int fb_x;
unsigned int fb_y;
unsigned int src_w;
unsigned int src_h;
unsigned int crtc_x;
unsigned int crtc_y;
unsigned int crtc_w;
......
/* exynos_drm_dmabuf.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* Author: Inki Dae <inki.dae@samsung.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "drmP.h"
#include "drm.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_gem.h"
#include <linux/dma-buf.h>
static struct sg_table *exynos_pages_to_sg(struct page **pages, int nr_pages,
unsigned int page_size)
{
struct sg_table *sgt = NULL;
struct scatterlist *sgl;
int i, ret;
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt)
goto out;
ret = sg_alloc_table(sgt, nr_pages, GFP_KERNEL);
if (ret)
goto err_free_sgt;
if (page_size < PAGE_SIZE)
page_size = PAGE_SIZE;
for_each_sg(sgt->sgl, sgl, nr_pages, i)
sg_set_page(sgl, pages[i], page_size, 0);
return sgt;
err_free_sgt:
kfree(sgt);
sgt = NULL;
out:
return NULL;
}
static struct sg_table *
exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir)
{
struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv;
struct drm_device *dev = gem_obj->base.dev;
struct exynos_drm_gem_buf *buf;
struct sg_table *sgt = NULL;
unsigned int npages;
int nents;
DRM_DEBUG_PRIME("%s\n", __FILE__);
mutex_lock(&dev->struct_mutex);
buf = gem_obj->buffer;
/* there should always be pages allocated. */
if (!buf->pages) {
DRM_ERROR("pages is null.\n");
goto err_unlock;
}
npages = buf->size / buf->page_size;
sgt = exynos_pages_to_sg(buf->pages, npages, buf->page_size);
nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
DRM_DEBUG_PRIME("npages = %d buffer size = 0x%lx page_size = 0x%lx\n",
npages, buf->size, buf->page_size);
err_unlock:
mutex_unlock(&dev->struct_mutex);
return sgt;
}
static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
struct sg_table *sgt,
enum dma_data_direction dir)
{
dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
sg_free_table(sgt);
kfree(sgt);
sgt = NULL;
}
static void exynos_dmabuf_release(struct dma_buf *dmabuf)
{
struct exynos_drm_gem_obj *exynos_gem_obj = dmabuf->priv;
DRM_DEBUG_PRIME("%s\n", __FILE__);
/*
* exynos_dmabuf_release() call means that file object's
* f_count is 0 and it calls drm_gem_object_handle_unreference()
* to drop the references that these values had been increased
* at drm_prime_handle_to_fd()
*/
if (exynos_gem_obj->base.export_dma_buf == dmabuf) {
exynos_gem_obj->base.export_dma_buf = NULL;
/*
* drop this gem object refcount to release allocated buffer
* and resources.
*/
drm_gem_object_unreference_unlocked(&exynos_gem_obj->base);
}
}
static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
unsigned long page_num)
{
/* TODO */
return NULL;
}
static void exynos_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
unsigned long page_num,
void *addr)
{
/* TODO */
}
static void *exynos_gem_dmabuf_kmap(struct dma_buf *dma_buf,
unsigned long page_num)
{
/* TODO */
return NULL;
}
static void exynos_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
unsigned long page_num, void *addr)
{
/* TODO */
}
static struct dma_buf_ops exynos_dmabuf_ops = {
.map_dma_buf = exynos_gem_map_dma_buf,
.unmap_dma_buf = exynos_gem_unmap_dma_buf,
.kmap = exynos_gem_dmabuf_kmap,
.kmap_atomic = exynos_gem_dmabuf_kmap_atomic,
.kunmap = exynos_gem_dmabuf_kunmap,
.kunmap_atomic = exynos_gem_dmabuf_kunmap_atomic,
.release = exynos_dmabuf_release,
};
struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
struct drm_gem_object *obj, int flags)
{
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
return dma_buf_export(exynos_gem_obj, &exynos_dmabuf_ops,
exynos_gem_obj->base.size, 0600);
}
struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
struct dma_buf *dma_buf)
{
struct dma_buf_attachment *attach;
struct sg_table *sgt;
struct scatterlist *sgl;
struct exynos_drm_gem_obj *exynos_gem_obj;
struct exynos_drm_gem_buf *buffer;
struct page *page;
int ret, i = 0;
DRM_DEBUG_PRIME("%s\n", __FILE__);
/* is this one of own objects? */
if (dma_buf->ops == &exynos_dmabuf_ops) {
struct drm_gem_object *obj;
exynos_gem_obj = dma_buf->priv;
obj = &exynos_gem_obj->base;
/* is it from our device? */
if (obj->dev == drm_dev) {
drm_gem_object_reference(obj);
return obj;
}
}
attach = dma_buf_attach(dma_buf, drm_dev->dev);
if (IS_ERR(attach))
return ERR_PTR(-EINVAL);
sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
if (IS_ERR(sgt)) {
ret = PTR_ERR(sgt);
goto err_buf_detach;
}
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer) {
DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
ret = -ENOMEM;
goto err_unmap_attach;
}
buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL);
if (!buffer->pages) {
DRM_ERROR("failed to allocate pages.\n");
ret = -ENOMEM;
goto err_free_buffer;
}
exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
if (!exynos_gem_obj) {
ret = -ENOMEM;
goto err_free_pages;
}
sgl = sgt->sgl;
buffer->dma_addr = sg_dma_address(sgl);
while (i < sgt->nents) {
buffer->pages[i] = sg_page(sgl);
buffer->size += sg_dma_len(sgl);
sgl = sg_next(sgl);
i++;
}
exynos_gem_obj->buffer = buffer;
buffer->sgt = sgt;
exynos_gem_obj->base.import_attach = attach;
DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr,
buffer->size);
return &exynos_gem_obj->base;
err_free_pages:
kfree(buffer->pages);
buffer->pages = NULL;
err_free_buffer:
kfree(buffer);
buffer = NULL;
err_unmap_attach:
dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
err_buf_detach:
dma_buf_detach(dma_buf, attach);
return ERR_PTR(ret);
}
MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
MODULE_DESCRIPTION("Samsung SoC DRM DMABUF Module");
MODULE_LICENSE("GPL");
/* exynos_drm_dmabuf.h
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* Author: Inki Dae <inki.dae@samsung.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef _EXYNOS_DRM_DMABUF_H_
#define _EXYNOS_DRM_DMABUF_H_
#ifdef CONFIG_DRM_EXYNOS_DMABUF
struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
struct drm_gem_object *obj, int flags);
struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
struct dma_buf *dma_buf);
#else
#define exynos_dmabuf_prime_export NULL
#define exynos_dmabuf_prime_import NULL
#endif
#endif
......@@ -39,6 +39,8 @@
#include "exynos_drm_gem.h"
#include "exynos_drm_plane.h"
#include "exynos_drm_vidi.h"
#include "exynos_drm_dmabuf.h"
#include "exynos_drm_g2d.h"
#define DRIVER_NAME "exynos"
#define DRIVER_DESC "Samsung SoC DRM"
......@@ -147,8 +149,17 @@ static int exynos_drm_unload(struct drm_device *dev)
static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
{
struct drm_exynos_file_private *file_priv;
DRM_DEBUG_DRIVER("%s\n", __FILE__);
file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
if (!file_priv)
return -ENOMEM;
drm_prime_init_file_private(&file->prime);
file->driver_priv = file_priv;
return exynos_drm_subdrv_open(dev, file);
}
......@@ -170,6 +181,7 @@ static void exynos_drm_preclose(struct drm_device *dev,
e->base.destroy(&e->base);
}
}
drm_prime_destroy_file_private(&file->prime);
spin_unlock_irqrestore(&dev->event_lock, flags);
exynos_drm_subdrv_close(dev, file);
......@@ -207,10 +219,18 @@ static struct drm_ioctl_desc exynos_ioctls[] = {
DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MMAP,
exynos_drm_gem_mmap_ioctl, DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET,
exynos_drm_gem_get_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(EXYNOS_PLANE_SET_ZPOS, exynos_plane_set_zpos_ioctl,
DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION,
vidi_connection_ioctl, DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_G2D_GET_VER,
exynos_g2d_get_ver_ioctl, DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_G2D_SET_CMDLIST,
exynos_g2d_set_cmdlist_ioctl, DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC,
exynos_g2d_exec_ioctl, DRM_UNLOCKED | DRM_AUTH),
};
static const struct file_operations exynos_drm_driver_fops = {
......@@ -225,7 +245,7 @@ static const struct file_operations exynos_drm_driver_fops = {
static struct drm_driver exynos_drm_driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_BUS_PLATFORM |
DRIVER_MODESET | DRIVER_GEM,
DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
.load = exynos_drm_load,
.unload = exynos_drm_unload,
.open = exynos_drm_open,
......@@ -241,6 +261,10 @@ static struct drm_driver exynos_drm_driver = {
.dumb_create = exynos_drm_gem_dumb_create,
.dumb_map_offset = exynos_drm_gem_dumb_map_offset,
.dumb_destroy = exynos_drm_gem_dumb_destroy,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = exynos_dmabuf_prime_export,
.gem_prime_import = exynos_dmabuf_prime_import,
.ioctls = exynos_ioctls,
.fops = &exynos_drm_driver_fops,
.name = DRIVER_NAME,
......@@ -307,6 +331,12 @@ static int __init exynos_drm_init(void)
goto out_vidi;
#endif
#ifdef CONFIG_DRM_EXYNOS_G2D
ret = platform_driver_register(&g2d_driver);
if (ret < 0)
goto out_g2d;
#endif
ret = platform_driver_register(&exynos_drm_platform_driver);
if (ret < 0)
goto out;
......@@ -314,6 +344,11 @@ static int __init exynos_drm_init(void)
return 0;
out:
#ifdef CONFIG_DRM_EXYNOS_G2D
platform_driver_unregister(&g2d_driver);
out_g2d:
#endif
#ifdef CONFIG_DRM_EXYNOS_VIDI
out_vidi:
platform_driver_unregister(&vidi_driver);
......@@ -341,6 +376,10 @@ static void __exit exynos_drm_exit(void)
platform_driver_unregister(&exynos_drm_platform_driver);
#ifdef CONFIG_DRM_EXYNOS_G2D
platform_driver_unregister(&g2d_driver);
#endif
#ifdef CONFIG_DRM_EXYNOS_HDMI
platform_driver_unregister(&exynos_drm_common_hdmi_driver);
platform_driver_unregister(&mixer_driver);
......
......@@ -77,6 +77,8 @@ struct exynos_drm_overlay_ops {
* - the unit is screen coordinates.
* @fb_width: width of a framebuffer.
* @fb_height: height of a framebuffer.
* @src_width: width of a partial image to be displayed from framebuffer.
* @src_height: height of a partial image to be displayed from framebuffer.
* @crtc_x: offset x on hardware screen.
* @crtc_y: offset y on hardware screen.
* @crtc_width: window width to be displayed (hardware screen).
......@@ -108,6 +110,8 @@ struct exynos_drm_overlay {
unsigned int fb_y;
unsigned int fb_width;
unsigned int fb_height;
unsigned int src_width;
unsigned int src_height;
unsigned int crtc_x;
unsigned int crtc_y;
unsigned int crtc_width;
......@@ -205,6 +209,18 @@ struct exynos_drm_manager {
struct exynos_drm_display_ops *display_ops;
};
struct exynos_drm_g2d_private {
struct device *dev;
struct list_head inuse_cmdlist;
struct list_head event_list;
struct list_head gem_list;
unsigned int gem_nr;
};
struct drm_exynos_file_private {
struct exynos_drm_g2d_private *g2d_priv;
};
/*
* Exynos drm private structure.
*/
......@@ -287,4 +303,5 @@ extern struct platform_driver hdmi_driver;
extern struct platform_driver mixer_driver;
extern struct platform_driver exynos_drm_common_hdmi_driver;
extern struct platform_driver vidi_driver;
extern struct platform_driver g2d_driver;
#endif
/*
* Copyright (C) 2012 Samsung Electronics Co.Ltd
* Authors: Joonyoung Shim <jy0922.shim@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundationr
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include "drmP.h"
#include "exynos_drm.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_gem.h"
#define G2D_HW_MAJOR_VER 4
#define G2D_HW_MINOR_VER 1
/* vaild register range set from user: 0x0104 ~ 0x0880 */
#define G2D_VALID_START 0x0104
#define G2D_VALID_END 0x0880
/* general registers */
#define G2D_SOFT_RESET 0x0000
#define G2D_INTEN 0x0004
#define G2D_INTC_PEND 0x000C
#define G2D_DMA_SFR_BASE_ADDR 0x0080
#define G2D_DMA_COMMAND 0x0084
#define G2D_DMA_STATUS 0x008C
#define G2D_DMA_HOLD_CMD 0x0090
/* command registers */
#define G2D_BITBLT_START 0x0100
/* registers for base address */
#define G2D_SRC_BASE_ADDR 0x0304
#define G2D_SRC_PLANE2_BASE_ADDR 0x0318
#define G2D_DST_BASE_ADDR 0x0404
#define G2D_DST_PLANE2_BASE_ADDR 0x0418
#define G2D_PAT_BASE_ADDR 0x0500
#define G2D_MSK_BASE_ADDR 0x0520
/* G2D_SOFT_RESET */
#define G2D_SFRCLEAR (1 << 1)
#define G2D_R (1 << 0)
/* G2D_INTEN */
#define G2D_INTEN_ACF (1 << 3)
#define G2D_INTEN_UCF (1 << 2)
#define G2D_INTEN_GCF (1 << 1)
#define G2D_INTEN_SCF (1 << 0)
/* G2D_INTC_PEND */
#define G2D_INTP_ACMD_FIN (1 << 3)
#define G2D_INTP_UCMD_FIN (1 << 2)
#define G2D_INTP_GCMD_FIN (1 << 1)
#define G2D_INTP_SCMD_FIN (1 << 0)
/* G2D_DMA_COMMAND */
#define G2D_DMA_HALT (1 << 2)
#define G2D_DMA_CONTINUE (1 << 1)
#define G2D_DMA_START (1 << 0)
/* G2D_DMA_STATUS */
#define G2D_DMA_LIST_DONE_COUNT (0xFF << 17)
#define G2D_DMA_BITBLT_DONE_COUNT (0xFFFF << 1)
#define G2D_DMA_DONE (1 << 0)
#define G2D_DMA_LIST_DONE_COUNT_OFFSET 17
/* G2D_DMA_HOLD_CMD */
#define G2D_USET_HOLD (1 << 2)
#define G2D_LIST_HOLD (1 << 1)
#define G2D_BITBLT_HOLD (1 << 0)
/* G2D_BITBLT_START */
#define G2D_START_CASESEL (1 << 2)
#define G2D_START_NHOLT (1 << 1)
#define G2D_START_BITBLT (1 << 0)
#define G2D_CMDLIST_SIZE (PAGE_SIZE / 4)
#define G2D_CMDLIST_NUM 64
#define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM)
#define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2)
/* cmdlist data structure */
struct g2d_cmdlist {
u32 head;
u32 data[G2D_CMDLIST_DATA_NUM];
u32 last; /* last data offset */
};
struct drm_exynos_pending_g2d_event {
struct drm_pending_event base;
struct drm_exynos_g2d_event event;
};
struct g2d_gem_node {
struct list_head list;
unsigned int handle;
};
struct g2d_cmdlist_node {
struct list_head list;
struct g2d_cmdlist *cmdlist;
unsigned int gem_nr;
dma_addr_t dma_addr;
struct drm_exynos_pending_g2d_event *event;
};
struct g2d_runqueue_node {
struct list_head list;
struct list_head run_cmdlist;
struct list_head event_list;
struct completion complete;
int async;
};
struct g2d_data {
struct device *dev;
struct clk *gate_clk;
struct resource *regs_res;
void __iomem *regs;
int irq;
struct workqueue_struct *g2d_workq;
struct work_struct runqueue_work;
struct exynos_drm_subdrv subdrv;
bool suspended;
/* cmdlist */
struct g2d_cmdlist_node *cmdlist_node;
struct list_head free_cmdlist;
struct mutex cmdlist_mutex;
dma_addr_t cmdlist_pool;
void *cmdlist_pool_virt;
/* runqueue*/
struct g2d_runqueue_node *runqueue_node;
struct list_head runqueue;
struct mutex runqueue_mutex;
struct kmem_cache *runqueue_slab;
};
static int g2d_init_cmdlist(struct g2d_data *g2d)
{
struct device *dev = g2d->dev;
struct g2d_cmdlist_node *node = g2d->cmdlist_node;
int nr;
int ret;
g2d->cmdlist_pool_virt = dma_alloc_coherent(dev, G2D_CMDLIST_POOL_SIZE,
&g2d->cmdlist_pool, GFP_KERNEL);
if (!g2d->cmdlist_pool_virt) {
dev_err(dev, "failed to allocate dma memory\n");
return -ENOMEM;
}
node = kcalloc(G2D_CMDLIST_NUM, G2D_CMDLIST_NUM * sizeof(*node),
GFP_KERNEL);
if (!node) {
dev_err(dev, "failed to allocate memory\n");
ret = -ENOMEM;
goto err;
}
for (nr = 0; nr < G2D_CMDLIST_NUM; nr++) {
node[nr].cmdlist =
g2d->cmdlist_pool_virt + nr * G2D_CMDLIST_SIZE;
node[nr].dma_addr =
g2d->cmdlist_pool + nr * G2D_CMDLIST_SIZE;
list_add_tail(&node[nr].list, &g2d->free_cmdlist);
}
return 0;
err:
dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt,
g2d->cmdlist_pool);
return ret;
}
static void g2d_fini_cmdlist(struct g2d_data *g2d)
{
struct device *dev = g2d->dev;
kfree(g2d->cmdlist_node);
dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt,
g2d->cmdlist_pool);
}
static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d)
{
struct device *dev = g2d->dev;
struct g2d_cmdlist_node *node;
mutex_lock(&g2d->cmdlist_mutex);
if (list_empty(&g2d->free_cmdlist)) {
dev_err(dev, "there is no free cmdlist\n");
mutex_unlock(&g2d->cmdlist_mutex);
return NULL;
}
node = list_first_entry(&g2d->free_cmdlist, struct g2d_cmdlist_node,
list);
list_del_init(&node->list);
mutex_unlock(&g2d->cmdlist_mutex);
return node;
}
static void g2d_put_cmdlist(struct g2d_data *g2d, struct g2d_cmdlist_node *node)
{
mutex_lock(&g2d->cmdlist_mutex);
list_move_tail(&node->list, &g2d->free_cmdlist);
mutex_unlock(&g2d->cmdlist_mutex);
}
static void g2d_add_cmdlist_to_inuse(struct exynos_drm_g2d_private *g2d_priv,
struct g2d_cmdlist_node *node)
{
struct g2d_cmdlist_node *lnode;
if (list_empty(&g2d_priv->inuse_cmdlist))
goto add_to_list;
/* this links to base address of new cmdlist */
lnode = list_entry(g2d_priv->inuse_cmdlist.prev,
struct g2d_cmdlist_node, list);
lnode->cmdlist->data[lnode->cmdlist->last] = node->dma_addr;
add_to_list:
list_add_tail(&node->list, &g2d_priv->inuse_cmdlist);
if (node->event)
list_add_tail(&node->event->base.link, &g2d_priv->event_list);
}
static int g2d_get_cmdlist_gem(struct drm_device *drm_dev,
struct drm_file *file,
struct g2d_cmdlist_node *node)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
struct g2d_cmdlist *cmdlist = node->cmdlist;
dma_addr_t *addr;
int offset;
int i;
for (i = 0; i < node->gem_nr; i++) {
struct g2d_gem_node *gem_node;
gem_node = kzalloc(sizeof(*gem_node), GFP_KERNEL);
if (!gem_node) {
dev_err(g2d_priv->dev, "failed to allocate gem node\n");
return -ENOMEM;
}
offset = cmdlist->last - (i * 2 + 1);
gem_node->handle = cmdlist->data[offset];
addr = exynos_drm_gem_get_dma_addr(drm_dev, gem_node->handle,
file);
if (IS_ERR(addr)) {
node->gem_nr = i;
kfree(gem_node);
return PTR_ERR(addr);
}
cmdlist->data[offset] = *addr;
list_add_tail(&gem_node->list, &g2d_priv->gem_list);
g2d_priv->gem_nr++;
}
return 0;
}
static void g2d_put_cmdlist_gem(struct drm_device *drm_dev,
struct drm_file *file,
unsigned int nr)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
struct g2d_gem_node *node, *n;
list_for_each_entry_safe_reverse(node, n, &g2d_priv->gem_list, list) {
if (!nr)
break;
exynos_drm_gem_put_dma_addr(drm_dev, node->handle, file);
list_del_init(&node->list);
kfree(node);
nr--;
}
}
static void g2d_dma_start(struct g2d_data *g2d,
struct g2d_runqueue_node *runqueue_node)
{
struct g2d_cmdlist_node *node =
list_first_entry(&runqueue_node->run_cmdlist,
struct g2d_cmdlist_node, list);
pm_runtime_get_sync(g2d->dev);
clk_enable(g2d->gate_clk);
/* interrupt enable */
writel_relaxed(G2D_INTEN_ACF | G2D_INTEN_UCF | G2D_INTEN_GCF,
g2d->regs + G2D_INTEN);
writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR);
writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND);
}
static struct g2d_runqueue_node *g2d_get_runqueue_node(struct g2d_data *g2d)
{
struct g2d_runqueue_node *runqueue_node;
if (list_empty(&g2d->runqueue))
return NULL;
runqueue_node = list_first_entry(&g2d->runqueue,
struct g2d_runqueue_node, list);
list_del_init(&runqueue_node->list);
return runqueue_node;
}
static void g2d_free_runqueue_node(struct g2d_data *g2d,
struct g2d_runqueue_node *runqueue_node)
{
if (!runqueue_node)
return;
mutex_lock(&g2d->cmdlist_mutex);
list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist);
mutex_unlock(&g2d->cmdlist_mutex);
kmem_cache_free(g2d->runqueue_slab, runqueue_node);
}
static void g2d_exec_runqueue(struct g2d_data *g2d)
{
g2d->runqueue_node = g2d_get_runqueue_node(g2d);
if (g2d->runqueue_node)
g2d_dma_start(g2d, g2d->runqueue_node);
}
static void g2d_runqueue_worker(struct work_struct *work)
{
struct g2d_data *g2d = container_of(work, struct g2d_data,
runqueue_work);
mutex_lock(&g2d->runqueue_mutex);
clk_disable(g2d->gate_clk);
pm_runtime_put_sync(g2d->dev);
complete(&g2d->runqueue_node->complete);
if (g2d->runqueue_node->async)
g2d_free_runqueue_node(g2d, g2d->runqueue_node);
if (g2d->suspended)
g2d->runqueue_node = NULL;
else
g2d_exec_runqueue(g2d);
mutex_unlock(&g2d->runqueue_mutex);
}
static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)
{
struct drm_device *drm_dev = g2d->subdrv.drm_dev;
struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node;
struct drm_exynos_pending_g2d_event *e;
struct timeval now;
unsigned long flags;
if (list_empty(&runqueue_node->event_list))
return;
e = list_first_entry(&runqueue_node->event_list,
struct drm_exynos_pending_g2d_event, base.link);
do_gettimeofday(&now);
e->event.tv_sec = now.tv_sec;
e->event.tv_usec = now.tv_usec;
e->event.cmdlist_no = cmdlist_no;
spin_lock_irqsave(&drm_dev->event_lock, flags);
list_move_tail(&e->base.link, &e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
}
static irqreturn_t g2d_irq_handler(int irq, void *dev_id)
{
struct g2d_data *g2d = dev_id;
u32 pending;
pending = readl_relaxed(g2d->regs + G2D_INTC_PEND);
if (pending)
writel_relaxed(pending, g2d->regs + G2D_INTC_PEND);
if (pending & G2D_INTP_GCMD_FIN) {
u32 cmdlist_no = readl_relaxed(g2d->regs + G2D_DMA_STATUS);
cmdlist_no = (cmdlist_no & G2D_DMA_LIST_DONE_COUNT) >>
G2D_DMA_LIST_DONE_COUNT_OFFSET;
g2d_finish_event(g2d, cmdlist_no);
writel_relaxed(0, g2d->regs + G2D_DMA_HOLD_CMD);
if (!(pending & G2D_INTP_ACMD_FIN)) {
writel_relaxed(G2D_DMA_CONTINUE,
g2d->regs + G2D_DMA_COMMAND);
}
}
if (pending & G2D_INTP_ACMD_FIN)
queue_work(g2d->g2d_workq, &g2d->runqueue_work);
return IRQ_HANDLED;
}
static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
int nr, bool for_addr)
{
int reg_offset;
int index;
int i;
for (i = 0; i < nr; i++) {
index = cmdlist->last - 2 * (i + 1);
reg_offset = cmdlist->data[index] & ~0xfffff000;
if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END)
goto err;
if (reg_offset % 4)
goto err;
switch (reg_offset) {
case G2D_SRC_BASE_ADDR:
case G2D_SRC_PLANE2_BASE_ADDR:
case G2D_DST_BASE_ADDR:
case G2D_DST_PLANE2_BASE_ADDR:
case G2D_PAT_BASE_ADDR:
case G2D_MSK_BASE_ADDR:
if (!for_addr)
goto err;
break;
default:
if (for_addr)
goto err;
break;
}
}
return 0;
err:
dev_err(dev, "Bad register offset: 0x%x\n", cmdlist->data[index]);
return -EINVAL;
}
/* ioctl functions */
int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data,
struct drm_file *file)
{
struct drm_exynos_g2d_get_ver *ver = data;
ver->major = G2D_HW_MAJOR_VER;
ver->minor = G2D_HW_MINOR_VER;
return 0;
}
EXPORT_SYMBOL_GPL(exynos_g2d_get_ver_ioctl);
int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
struct device *dev = g2d_priv->dev;
struct g2d_data *g2d;
struct drm_exynos_g2d_set_cmdlist *req = data;
struct drm_exynos_g2d_cmd *cmd;
struct drm_exynos_pending_g2d_event *e;
struct g2d_cmdlist_node *node;
struct g2d_cmdlist *cmdlist;
unsigned long flags;
int size;
int ret;
if (!dev)
return -ENODEV;
g2d = dev_get_drvdata(dev);
if (!g2d)
return -EFAULT;
node = g2d_get_cmdlist(g2d);
if (!node)
return -ENOMEM;
node->event = NULL;
if (req->event_type != G2D_EVENT_NOT) {
spin_lock_irqsave(&drm_dev->event_lock, flags);
if (file->event_space < sizeof(e->event)) {
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
ret = -ENOMEM;
goto err;
}
file->event_space -= sizeof(e->event);
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
e = kzalloc(sizeof(*node->event), GFP_KERNEL);
if (!e) {
dev_err(dev, "failed to allocate event\n");
spin_lock_irqsave(&drm_dev->event_lock, flags);
file->event_space += sizeof(e->event);
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
ret = -ENOMEM;
goto err;
}
e->event.base.type = DRM_EXYNOS_G2D_EVENT;
e->event.base.length = sizeof(e->event);
e->event.user_data = req->user_data;
e->base.event = &e->event.base;
e->base.file_priv = file;
e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
node->event = e;
}
cmdlist = node->cmdlist;
cmdlist->last = 0;
/*
* If don't clear SFR registers, the cmdlist is affected by register
* values of previous cmdlist. G2D hw executes SFR clear command and
* a next command at the same time then the next command is ignored and
* is executed rightly from next next command, so needs a dummy command
* to next command of SFR clear command.
*/
cmdlist->data[cmdlist->last++] = G2D_SOFT_RESET;
cmdlist->data[cmdlist->last++] = G2D_SFRCLEAR;
cmdlist->data[cmdlist->last++] = G2D_SRC_BASE_ADDR;
cmdlist->data[cmdlist->last++] = 0;
if (node->event) {
cmdlist->data[cmdlist->last++] = G2D_DMA_HOLD_CMD;
cmdlist->data[cmdlist->last++] = G2D_LIST_HOLD;
}
/* Check size of cmdlist: last 2 is about G2D_BITBLT_START */
size = cmdlist->last + req->cmd_nr * 2 + req->cmd_gem_nr * 2 + 2;
if (size > G2D_CMDLIST_DATA_NUM) {
dev_err(dev, "cmdlist size is too big\n");
ret = -EINVAL;
goto err_free_event;
}
cmd = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd;
if (copy_from_user(cmdlist->data + cmdlist->last,
(void __user *)cmd,
sizeof(*cmd) * req->cmd_nr)) {
ret = -EFAULT;
goto err_free_event;
}
cmdlist->last += req->cmd_nr * 2;
ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_nr, false);
if (ret < 0)
goto err_free_event;
node->gem_nr = req->cmd_gem_nr;
if (req->cmd_gem_nr) {
struct drm_exynos_g2d_cmd *cmd_gem;
cmd_gem = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_gem;
if (copy_from_user(cmdlist->data + cmdlist->last,
(void __user *)cmd_gem,
sizeof(*cmd_gem) * req->cmd_gem_nr)) {
ret = -EFAULT;
goto err_free_event;
}
cmdlist->last += req->cmd_gem_nr * 2;
ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_gem_nr, true);
if (ret < 0)
goto err_free_event;
ret = g2d_get_cmdlist_gem(drm_dev, file, node);
if (ret < 0)
goto err_unmap;
}
cmdlist->data[cmdlist->last++] = G2D_BITBLT_START;
cmdlist->data[cmdlist->last++] = G2D_START_BITBLT;
/* head */
cmdlist->head = cmdlist->last / 2;
/* tail */
cmdlist->data[cmdlist->last] = 0;
g2d_add_cmdlist_to_inuse(g2d_priv, node);
return 0;
err_unmap:
g2d_put_cmdlist_gem(drm_dev, file, node->gem_nr);
err_free_event:
if (node->event) {
spin_lock_irqsave(&drm_dev->event_lock, flags);
file->event_space += sizeof(e->event);
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
kfree(node->event);
}
err:
g2d_put_cmdlist(g2d, node);
return ret;
}
EXPORT_SYMBOL_GPL(exynos_g2d_set_cmdlist_ioctl);
int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
struct device *dev = g2d_priv->dev;
struct g2d_data *g2d;
struct drm_exynos_g2d_exec *req = data;
struct g2d_runqueue_node *runqueue_node;
struct list_head *run_cmdlist;
struct list_head *event_list;
if (!dev)
return -ENODEV;
g2d = dev_get_drvdata(dev);
if (!g2d)
return -EFAULT;
runqueue_node = kmem_cache_alloc(g2d->runqueue_slab, GFP_KERNEL);
if (!runqueue_node) {
dev_err(dev, "failed to allocate memory\n");
return -ENOMEM;
}
run_cmdlist = &runqueue_node->run_cmdlist;
event_list = &runqueue_node->event_list;
INIT_LIST_HEAD(run_cmdlist);
INIT_LIST_HEAD(event_list);
init_completion(&runqueue_node->complete);
runqueue_node->async = req->async;
list_splice_init(&g2d_priv->inuse_cmdlist, run_cmdlist);
list_splice_init(&g2d_priv->event_list, event_list);
if (list_empty(run_cmdlist)) {
dev_err(dev, "there is no inuse cmdlist\n");
kmem_cache_free(g2d->runqueue_slab, runqueue_node);
return -EPERM;
}
mutex_lock(&g2d->runqueue_mutex);
list_add_tail(&runqueue_node->list, &g2d->runqueue);
if (!g2d->runqueue_node)
g2d_exec_runqueue(g2d);
mutex_unlock(&g2d->runqueue_mutex);
if (runqueue_node->async)
goto out;
wait_for_completion(&runqueue_node->complete);
g2d_free_runqueue_node(g2d, runqueue_node);
out:
return 0;
}
EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl);
static int g2d_open(struct drm_device *drm_dev, struct device *dev,
struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
struct exynos_drm_g2d_private *g2d_priv;
g2d_priv = kzalloc(sizeof(*g2d_priv), GFP_KERNEL);
if (!g2d_priv) {
dev_err(dev, "failed to allocate g2d private data\n");
return -ENOMEM;
}
g2d_priv->dev = dev;
file_priv->g2d_priv = g2d_priv;
INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist);
INIT_LIST_HEAD(&g2d_priv->event_list);
INIT_LIST_HEAD(&g2d_priv->gem_list);
return 0;
}
static void g2d_close(struct drm_device *drm_dev, struct device *dev,
struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
struct g2d_data *g2d;
struct g2d_cmdlist_node *node, *n;
if (!dev)
return;
g2d = dev_get_drvdata(dev);
if (!g2d)
return;
mutex_lock(&g2d->cmdlist_mutex);
list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list)
list_move_tail(&node->list, &g2d->free_cmdlist);
mutex_unlock(&g2d->cmdlist_mutex);
g2d_put_cmdlist_gem(drm_dev, file, g2d_priv->gem_nr);
kfree(file_priv->g2d_priv);
}
static int __devinit g2d_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
struct g2d_data *g2d;
struct exynos_drm_subdrv *subdrv;
int ret;
g2d = kzalloc(sizeof(*g2d), GFP_KERNEL);
if (!g2d) {
dev_err(dev, "failed to allocate driver data\n");
return -ENOMEM;
}
g2d->runqueue_slab = kmem_cache_create("g2d_runqueue_slab",
sizeof(struct g2d_runqueue_node), 0, 0, NULL);
if (!g2d->runqueue_slab) {
ret = -ENOMEM;
goto err_free_mem;
}
g2d->dev = dev;
g2d->g2d_workq = create_singlethread_workqueue("g2d");
if (!g2d->g2d_workq) {
dev_err(dev, "failed to create workqueue\n");
ret = -EINVAL;
goto err_destroy_slab;
}
INIT_WORK(&g2d->runqueue_work, g2d_runqueue_worker);
INIT_LIST_HEAD(&g2d->free_cmdlist);
INIT_LIST_HEAD(&g2d->runqueue);
mutex_init(&g2d->cmdlist_mutex);
mutex_init(&g2d->runqueue_mutex);
ret = g2d_init_cmdlist(g2d);
if (ret < 0)
goto err_destroy_workqueue;
g2d->gate_clk = clk_get(dev, "fimg2d");
if (IS_ERR(g2d->gate_clk)) {
dev_err(dev, "failed to get gate clock\n");
ret = PTR_ERR(g2d->gate_clk);
goto err_fini_cmdlist;
}
pm_runtime_enable(dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "failed to get I/O memory\n");
ret = -ENOENT;
goto err_put_clk;
}
g2d->regs_res = request_mem_region(res->start, resource_size(res),
dev_name(dev));
if (!g2d->regs_res) {
dev_err(dev, "failed to request I/O memory\n");
ret = -ENOENT;
goto err_put_clk;
}
g2d->regs = ioremap(res->start, resource_size(res));
if (!g2d->regs) {
dev_err(dev, "failed to remap I/O memory\n");
ret = -ENXIO;
goto err_release_res;
}
g2d->irq = platform_get_irq(pdev, 0);
if (g2d->irq < 0) {
dev_err(dev, "failed to get irq\n");
ret = g2d->irq;
goto err_unmap_base;
}
ret = request_irq(g2d->irq, g2d_irq_handler, 0, "drm_g2d", g2d);
if (ret < 0) {
dev_err(dev, "irq request failed\n");
goto err_unmap_base;
}
platform_set_drvdata(pdev, g2d);
subdrv = &g2d->subdrv;
subdrv->dev = dev;
subdrv->open = g2d_open;
subdrv->close = g2d_close;
ret = exynos_drm_subdrv_register(subdrv);
if (ret < 0) {
dev_err(dev, "failed to register drm g2d device\n");
goto err_free_irq;
}
dev_info(dev, "The exynos g2d(ver %d.%d) successfully probed\n",
G2D_HW_MAJOR_VER, G2D_HW_MINOR_VER);
return 0;
err_free_irq:
free_irq(g2d->irq, g2d);
err_unmap_base:
iounmap(g2d->regs);
err_release_res:
release_resource(g2d->regs_res);
kfree(g2d->regs_res);
err_put_clk:
pm_runtime_disable(dev);
clk_put(g2d->gate_clk);
err_fini_cmdlist:
g2d_fini_cmdlist(g2d);
err_destroy_workqueue:
destroy_workqueue(g2d->g2d_workq);
err_destroy_slab:
kmem_cache_destroy(g2d->runqueue_slab);
err_free_mem:
kfree(g2d);
return ret;
}
static int __devexit g2d_remove(struct platform_device *pdev)
{
struct g2d_data *g2d = platform_get_drvdata(pdev);
cancel_work_sync(&g2d->runqueue_work);
exynos_drm_subdrv_unregister(&g2d->subdrv);
free_irq(g2d->irq, g2d);
while (g2d->runqueue_node) {
g2d_free_runqueue_node(g2d, g2d->runqueue_node);
g2d->runqueue_node = g2d_get_runqueue_node(g2d);
}
iounmap(g2d->regs);
release_resource(g2d->regs_res);
kfree(g2d->regs_res);
pm_runtime_disable(&pdev->dev);
clk_put(g2d->gate_clk);
g2d_fini_cmdlist(g2d);
destroy_workqueue(g2d->g2d_workq);
kmem_cache_destroy(g2d->runqueue_slab);
kfree(g2d);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int g2d_suspend(struct device *dev)
{
struct g2d_data *g2d = dev_get_drvdata(dev);
mutex_lock(&g2d->runqueue_mutex);
g2d->suspended = true;
mutex_unlock(&g2d->runqueue_mutex);
while (g2d->runqueue_node)
/* FIXME: good range? */
usleep_range(500, 1000);
flush_work_sync(&g2d->runqueue_work);
return 0;
}
static int g2d_resume(struct device *dev)
{
struct g2d_data *g2d = dev_get_drvdata(dev);
g2d->suspended = false;
g2d_exec_runqueue(g2d);
return 0;
}
#endif
SIMPLE_DEV_PM_OPS(g2d_pm_ops, g2d_suspend, g2d_resume);
struct platform_driver g2d_driver = {
.probe = g2d_probe,
.remove = __devexit_p(g2d_remove),
.driver = {
.name = "s5p-g2d",
.owner = THIS_MODULE,
.pm = &g2d_pm_ops,
},
};
/*
* Copyright (C) 2012 Samsung Electronics Co.Ltd
* Authors: Joonyoung Shim <jy0922.shim@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundationr
*/
#ifdef CONFIG_DRM_EXYNOS_G2D
extern int exynos_g2d_get_ver_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int exynos_g2d_set_cmdlist_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int exynos_g2d_exec_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
#else
static inline int exynos_g2d_get_ver_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return -ENODEV;
}
static inline int exynos_g2d_set_cmdlist_ioctl(struct drm_device *dev,
void *data,
struct drm_file *file_priv)
{
return -ENODEV;
}
static inline int exynos_g2d_exec_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return -ENODEV;
}
#endif
......@@ -66,6 +66,22 @@ static int check_gem_flags(unsigned int flags)
return 0;
}
static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
struct vm_area_struct *vma)
{
DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
/* non-cachable as default. */
if (obj->flags & EXYNOS_BO_CACHABLE)
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
else if (obj->flags & EXYNOS_BO_WC)
vma->vm_page_prot =
pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
else
vma->vm_page_prot =
pgprot_noncached(vm_get_page_prot(vma->vm_flags));
}
static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
{
if (!IS_NONCONTIG_BUFFER(flags)) {
......@@ -80,7 +96,7 @@ static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
return roundup(size, PAGE_SIZE);
}
static struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
gfp_t gfpmask)
{
struct inode *inode;
......@@ -180,6 +196,7 @@ static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
}
npages = obj->size >> PAGE_SHIFT;
buf->page_size = PAGE_SIZE;
buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!buf->sgt) {
......@@ -262,24 +279,24 @@ static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
{
struct drm_gem_object *obj;
struct exynos_drm_gem_buf *buf;
DRM_DEBUG_KMS("%s\n", __FILE__);
if (!exynos_gem_obj)
return;
obj = &exynos_gem_obj->base;
buf = exynos_gem_obj->buffer;
DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
if ((exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) &&
exynos_gem_obj->buffer->pages)
if (!buf->pages)
return;
if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG)
exynos_drm_gem_put_pages(obj);
else
exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags,
exynos_gem_obj->buffer);
exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
exynos_drm_fini_buf(obj->dev, exynos_gem_obj->buffer);
exynos_drm_fini_buf(obj->dev, buf);
exynos_gem_obj->buffer = NULL;
if (obj->map_list.map)
......@@ -292,7 +309,7 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
exynos_gem_obj = NULL;
}
static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
unsigned long size)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
......@@ -493,8 +510,7 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
vma->vm_flags |= (VM_IO | VM_RESERVED);
/* in case of direct mapping, always having non-cachable attribute */
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
update_vm_cache_attr(exynos_gem_obj, vma);
vm_size = usize = vma->vm_end - vma->vm_start;
......@@ -588,6 +604,32 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
return 0;
}
int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{ struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_exynos_gem_info *args = data;
struct drm_gem_object *obj;
mutex_lock(&dev->struct_mutex);
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
exynos_gem_obj = to_exynos_gem_obj(obj);
args->flags = exynos_gem_obj->flags;
args->size = exynos_gem_obj->size;
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
return 0;
}
int exynos_drm_gem_init_object(struct drm_gem_object *obj)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
......@@ -597,8 +639,17 @@ int exynos_drm_gem_init_object(struct drm_gem_object *obj)
void exynos_drm_gem_free_object(struct drm_gem_object *obj)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
struct exynos_drm_gem_buf *buf;
DRM_DEBUG_KMS("%s\n", __FILE__);
exynos_gem_obj = to_exynos_gem_obj(obj);
buf = exynos_gem_obj->buffer;
if (obj->import_attach)
drm_prime_gem_destroy(obj, buf->sgt);
exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
}
......@@ -724,6 +775,8 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_gem_object *obj;
int ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
......@@ -735,8 +788,20 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return ret;
}
obj = vma->vm_private_data;
exynos_gem_obj = to_exynos_gem_obj(obj);
ret = check_gem_flags(exynos_gem_obj->flags);
if (ret) {
drm_gem_vm_close(vma);
drm_gem_free_mmap_offset(obj);
return ret;
}
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_flags |= VM_MIXEDMAP;
update_vm_cache_attr(exynos_gem_obj, vma);
return ret;
}
......@@ -40,6 +40,7 @@
* device address with IOMMU.
* @sgt: sg table to transfer page data.
* @pages: contain all pages to allocated memory region.
* @page_size: could be 4K, 64K or 1MB.
* @size: size of allocated memory region.
*/
struct exynos_drm_gem_buf {
......@@ -47,6 +48,7 @@ struct exynos_drm_gem_buf {
dma_addr_t dma_addr;
struct sg_table *sgt;
struct page **pages;
unsigned long page_size;
unsigned long size;
};
......@@ -74,9 +76,15 @@ struct exynos_drm_gem_obj {
unsigned int flags;
};
struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
/* destroy a buffer with gem object */
void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj);
/* create a private gem object and initialize it. */
struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
unsigned long size);
/* create a new buffer with gem object */
struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
unsigned int flags,
......@@ -119,6 +127,10 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/* get buffer information to memory region allocated by gem. */
int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/* initialize gem object. */
int exynos_drm_gem_init_object(struct drm_gem_object *obj);
......
......@@ -37,6 +37,8 @@ struct drm_hdmi_context {
struct exynos_drm_subdrv subdrv;
struct exynos_drm_hdmi_context *hdmi_ctx;
struct exynos_drm_hdmi_context *mixer_ctx;
bool enabled[MIXER_WIN_NR];
};
void exynos_hdmi_ops_register(struct exynos_hdmi_ops *ops)
......@@ -189,23 +191,34 @@ static void drm_hdmi_dpms(struct device *subdrv_dev, int mode)
DRM_DEBUG_KMS("%s\n", __FILE__);
switch (mode) {
case DRM_MODE_DPMS_ON:
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
if (hdmi_ops && hdmi_ops->disable)
hdmi_ops->disable(ctx->hdmi_ctx->ctx);
break;
default:
DRM_DEBUG_KMS("unkown dps mode: %d\n", mode);
break;
if (mixer_ops && mixer_ops->dpms)
mixer_ops->dpms(ctx->mixer_ctx->ctx, mode);
if (hdmi_ops && hdmi_ops->dpms)
hdmi_ops->dpms(ctx->hdmi_ctx->ctx, mode);
}
static void drm_hdmi_apply(struct device *subdrv_dev)
{
struct drm_hdmi_context *ctx = to_context(subdrv_dev);
int i;
DRM_DEBUG_KMS("%s\n", __FILE__);
for (i = 0; i < MIXER_WIN_NR; i++) {
if (!ctx->enabled[i])
continue;
if (mixer_ops && mixer_ops->win_commit)
mixer_ops->win_commit(ctx->mixer_ctx->ctx, i);
}
if (hdmi_ops && hdmi_ops->commit)
hdmi_ops->commit(ctx->hdmi_ctx->ctx);
}
static struct exynos_drm_manager_ops drm_hdmi_manager_ops = {
.dpms = drm_hdmi_dpms,
.apply = drm_hdmi_apply,
.enable_vblank = drm_hdmi_enable_vblank,
.disable_vblank = drm_hdmi_disable_vblank,
.mode_fixup = drm_hdmi_mode_fixup,
......@@ -228,21 +241,37 @@ static void drm_mixer_mode_set(struct device *subdrv_dev,
static void drm_mixer_commit(struct device *subdrv_dev, int zpos)
{
struct drm_hdmi_context *ctx = to_context(subdrv_dev);
int win = (zpos == DEFAULT_ZPOS) ? MIXER_DEFAULT_WIN : zpos;
DRM_DEBUG_KMS("%s\n", __FILE__);
if (win < 0 || win > MIXER_WIN_NR) {
DRM_ERROR("mixer window[%d] is wrong\n", win);
return;
}
if (mixer_ops && mixer_ops->win_commit)
mixer_ops->win_commit(ctx->mixer_ctx->ctx, zpos);
mixer_ops->win_commit(ctx->mixer_ctx->ctx, win);
ctx->enabled[win] = true;
}
static void drm_mixer_disable(struct device *subdrv_dev, int zpos)
{
struct drm_hdmi_context *ctx = to_context(subdrv_dev);
int win = (zpos == DEFAULT_ZPOS) ? MIXER_DEFAULT_WIN : zpos;
DRM_DEBUG_KMS("%s\n", __FILE__);
if (win < 0 || win > MIXER_WIN_NR) {
DRM_ERROR("mixer window[%d] is wrong\n", win);
return;
}
if (mixer_ops && mixer_ops->win_disable)
mixer_ops->win_disable(ctx->mixer_ctx->ctx, zpos);
mixer_ops->win_disable(ctx->mixer_ctx->ctx, win);
ctx->enabled[win] = false;
}
static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = {
......@@ -335,25 +364,6 @@ static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
return 0;
}
static int hdmi_runtime_suspend(struct device *dev)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
return 0;
}
static int hdmi_runtime_resume(struct device *dev)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
return 0;
}
static const struct dev_pm_ops hdmi_pm_ops = {
.runtime_suspend = hdmi_runtime_suspend,
.runtime_resume = hdmi_runtime_resume,
};
static int __devexit exynos_drm_hdmi_remove(struct platform_device *pdev)
{
struct drm_hdmi_context *ctx = platform_get_drvdata(pdev);
......@@ -372,6 +382,5 @@ struct platform_driver exynos_drm_common_hdmi_driver = {
.driver = {
.name = "exynos-drm-hdmi",
.owner = THIS_MODULE,
.pm = &hdmi_pm_ops,
},
};
......@@ -26,6 +26,9 @@
#ifndef _EXYNOS_DRM_HDMI_H_
#define _EXYNOS_DRM_HDMI_H_
#define MIXER_WIN_NR 3
#define MIXER_DEFAULT_WIN 0
/*
* exynos hdmi common context structure.
*
......@@ -54,13 +57,14 @@ struct exynos_hdmi_ops {
void (*get_max_resol)(void *ctx, unsigned int *width,
unsigned int *height);
void (*commit)(void *ctx);
void (*disable)(void *ctx);
void (*dpms)(void *ctx, int mode);
};
struct exynos_mixer_ops {
/* manager */
int (*enable_vblank)(void *ctx, int pipe);
void (*disable_vblank)(void *ctx);
void (*dpms)(void *ctx, int mode);
/* overlay */
void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay);
......
......@@ -41,8 +41,6 @@ exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
container_of(plane, struct exynos_plane, base);
struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
struct exynos_drm_crtc_pos pos;
unsigned int x = src_x >> 16;
unsigned int y = src_y >> 16;
int ret;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
......@@ -53,10 +51,12 @@ exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
pos.crtc_w = crtc_w;
pos.crtc_h = crtc_h;
pos.fb_x = x;
pos.fb_y = y;
/* considering 16.16 fixed point of source values */
pos.fb_x = src_x >> 16;
pos.fb_y = src_y >> 16;
pos.src_w = src_w >> 16;
pos.src_h = src_h >> 16;
/* TODO: scale feature */
ret = exynos_drm_overlay_update(overlay, fb, &crtc->mode, &pos);
if (ret < 0)
return ret;
......
......@@ -57,18 +57,16 @@ struct hdmi_resources {
struct hdmi_context {
struct device *dev;
struct drm_device *drm_dev;
struct fb_videomode *default_timing;
unsigned int is_v13:1;
unsigned int default_win;
unsigned int default_bpp;
bool hpd_handle;
bool enabled;
bool hpd;
bool powered;
bool is_v13;
bool dvi_mode;
struct mutex hdmi_mutex;
struct resource *regs_res;
void __iomem *regs;
unsigned int irq;
struct workqueue_struct *wq;
struct work_struct hotplug_work;
unsigned int external_irq;
unsigned int internal_irq;
struct i2c_client *ddc_port;
struct i2c_client *hdmiphy_port;
......@@ -78,6 +76,9 @@ struct hdmi_context {
struct hdmi_resources res;
void *parent_ctx;
void (*cfg_hpd)(bool external);
int (*get_hpd)(void);
};
/* HDMI Version 1.3 */
......@@ -361,6 +362,13 @@ static const u8 hdmiphy_conf27_027[32] = {
0x54, 0xe3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00,
};
static const u8 hdmiphy_conf74_176[32] = {
0x01, 0xd1, 0x1f, 0x10, 0x40, 0x5b, 0xef, 0x08,
0x81, 0xa0, 0xb9, 0xd8, 0x45, 0xa0, 0xac, 0x80,
0x5a, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
0x54, 0xa6, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00,
};
static const u8 hdmiphy_conf74_25[32] = {
0x01, 0xd1, 0x1f, 0x10, 0x40, 0x40, 0xf8, 0x08,
0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80,
......@@ -750,6 +758,63 @@ static const struct hdmi_preset_conf hdmi_conf_1080i60 = {
},
};
static const struct hdmi_preset_conf hdmi_conf_1080p30 = {
.core = {
.h_blank = {0x18, 0x01},
.v2_blank = {0x65, 0x04},
.v1_blank = {0x2d, 0x00},
.v_line = {0x65, 0x04},
.h_line = {0x98, 0x08},
.hsync_pol = {0x00},
.vsync_pol = {0x00},
.int_pro_mode = {0x00},
.v_blank_f0 = {0xff, 0xff},
.v_blank_f1 = {0xff, 0xff},
.h_sync_start = {0x56, 0x00},
.h_sync_end = {0x82, 0x00},
.v_sync_line_bef_2 = {0x09, 0x00},
.v_sync_line_bef_1 = {0x04, 0x00},
.v_sync_line_aft_2 = {0xff, 0xff},
.v_sync_line_aft_1 = {0xff, 0xff},
.v_sync_line_aft_pxl_2 = {0xff, 0xff},
.v_sync_line_aft_pxl_1 = {0xff, 0xff},
.v_blank_f2 = {0xff, 0xff},
.v_blank_f3 = {0xff, 0xff},
.v_blank_f4 = {0xff, 0xff},
.v_blank_f5 = {0xff, 0xff},
.v_sync_line_aft_3 = {0xff, 0xff},
.v_sync_line_aft_4 = {0xff, 0xff},
.v_sync_line_aft_5 = {0xff, 0xff},
.v_sync_line_aft_6 = {0xff, 0xff},
.v_sync_line_aft_pxl_3 = {0xff, 0xff},
.v_sync_line_aft_pxl_4 = {0xff, 0xff},
.v_sync_line_aft_pxl_5 = {0xff, 0xff},
.v_sync_line_aft_pxl_6 = {0xff, 0xff},
.vact_space_1 = {0xff, 0xff},
.vact_space_2 = {0xff, 0xff},
.vact_space_3 = {0xff, 0xff},
.vact_space_4 = {0xff, 0xff},
.vact_space_5 = {0xff, 0xff},
.vact_space_6 = {0xff, 0xff},
/* other don't care */
},
.tg = {
0x00, /* cmd */
0x98, 0x08, /* h_fsz */
0x18, 0x01, 0x80, 0x07, /* hact */
0x65, 0x04, /* v_fsz */
0x01, 0x00, 0x33, 0x02, /* vsync */
0x2d, 0x00, 0x38, 0x04, /* vact */
0x33, 0x02, /* field_chg */
0x48, 0x02, /* vact_st2 */
0x00, 0x00, /* vact_st3 */
0x00, 0x00, /* vact_st4 */
0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
0x01, 0x00, 0x33, 0x02, /* field top/bot */
0x00, /* 3d FP */
},
};
static const struct hdmi_preset_conf hdmi_conf_1080p50 = {
.core = {
.h_blank = {0xd0, 0x02},
......@@ -864,6 +929,7 @@ static const struct hdmi_conf hdmi_confs[] = {
{ 1280, 720, 60, false, hdmiphy_conf74_25, &hdmi_conf_720p60 },
{ 1920, 1080, 50, true, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
{ 1920, 1080, 60, true, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
{ 1920, 1080, 30, false, hdmiphy_conf74_176, &hdmi_conf_1080p30 },
{ 1920, 1080, 50, false, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
{ 1920, 1080, 60, false, hdmiphy_conf148_5, &hdmi_conf_1080p60 },
};
......@@ -1194,12 +1260,8 @@ static int hdmi_conf_index(struct hdmi_context *hdata,
static bool hdmi_is_connected(void *ctx)
{
struct hdmi_context *hdata = ctx;
u32 val = hdmi_reg_read(hdata, HDMI_HPD_STATUS);
if (val)
return true;
return false;
return hdata->hpd;
}
static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
......@@ -1215,10 +1277,12 @@ static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
raw_edid = drm_get_edid(connector, hdata->ddc_port->adapter);
if (raw_edid) {
hdata->dvi_mode = !drm_detect_hdmi_monitor(raw_edid);
memcpy(edid, raw_edid, min((1 + raw_edid->extensions)
* EDID_LENGTH, len));
DRM_DEBUG_KMS("width[%d] x height[%d]\n",
raw_edid->width_cm, raw_edid->height_cm);
DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n",
(hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
raw_edid->width_cm, raw_edid->height_cm);
} else {
return -ENODEV;
}
......@@ -1289,28 +1353,6 @@ static int hdmi_check_timing(void *ctx, void *timing)
return hdmi_v14_check_timing(check_timing);
}
static int hdmi_display_power_on(void *ctx, int mode)
{
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
switch (mode) {
case DRM_MODE_DPMS_ON:
DRM_DEBUG_KMS("hdmi [on]\n");
break;
case DRM_MODE_DPMS_STANDBY:
break;
case DRM_MODE_DPMS_SUSPEND:
break;
case DRM_MODE_DPMS_OFF:
DRM_DEBUG_KMS("hdmi [off]\n");
break;
default:
break;
}
return 0;
}
static void hdmi_set_acr(u32 freq, u8 *acr)
{
u32 n, cts;
......@@ -1463,10 +1505,7 @@ static void hdmi_audio_init(struct hdmi_context *hdata)
static void hdmi_audio_control(struct hdmi_context *hdata, bool onoff)
{
u32 mod;
mod = hdmi_reg_read(hdata, HDMI_MODE_SEL);
if (mod & HDMI_DVI_MODE_EN)
if (hdata->dvi_mode)
return;
hdmi_reg_writeb(hdata, HDMI_AUI_CON, onoff ? 2 : 0);
......@@ -1478,9 +1517,6 @@ static void hdmi_conf_reset(struct hdmi_context *hdata)
{
u32 reg;
/* disable hpd handle for drm */
hdata->hpd_handle = false;
if (hdata->is_v13)
reg = HDMI_V13_CORE_RSTOUT;
else
......@@ -1491,16 +1527,10 @@ static void hdmi_conf_reset(struct hdmi_context *hdata)
mdelay(10);
hdmi_reg_writemask(hdata, reg, ~0, HDMI_CORE_SW_RSTOUT);
mdelay(10);
/* enable hpd handle for drm */
hdata->hpd_handle = true;
}
static void hdmi_conf_init(struct hdmi_context *hdata)
{
/* disable hpd handle for drm */
hdata->hpd_handle = false;
/* enable HPD interrupts */
hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
......@@ -1514,6 +1544,14 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
/* disable bluescreen */
hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_BLUE_SCR_EN);
if (hdata->dvi_mode) {
/* choose DVI mode */
hdmi_reg_writemask(hdata, HDMI_MODE_SEL,
HDMI_MODE_DVI_EN, HDMI_MODE_MASK);
hdmi_reg_writeb(hdata, HDMI_CON_2,
HDMI_VID_PREAMBLE_DIS | HDMI_GUARD_BAND_DIS);
}
if (hdata->is_v13) {
/* choose bluescreen (fecal) color */
hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_0, 0x12);
......@@ -1535,9 +1573,6 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 2 << 5);
hdmi_reg_writemask(hdata, HDMI_CON_1, 2, 3 << 5);
}
/* enable hpd handle for drm */
hdata->hpd_handle = true;
}
static void hdmi_v13_timing_apply(struct hdmi_context *hdata)
......@@ -1890,8 +1925,11 @@ static void hdmi_conf_apply(struct hdmi_context *hdata)
hdmiphy_conf_reset(hdata);
hdmiphy_conf_apply(hdata);
mutex_lock(&hdata->hdmi_mutex);
hdmi_conf_reset(hdata);
hdmi_conf_init(hdata);
mutex_unlock(&hdata->hdmi_mutex);
hdmi_audio_init(hdata);
/* setting core registers */
......@@ -1971,20 +2009,86 @@ static void hdmi_commit(void *ctx)
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
hdmi_conf_apply(hdata);
}
static void hdmi_poweron(struct hdmi_context *hdata)
{
struct hdmi_resources *res = &hdata->res;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
mutex_lock(&hdata->hdmi_mutex);
if (hdata->powered) {
mutex_unlock(&hdata->hdmi_mutex);
return;
}
hdata->powered = true;
if (hdata->cfg_hpd)
hdata->cfg_hpd(true);
mutex_unlock(&hdata->hdmi_mutex);
pm_runtime_get_sync(hdata->dev);
regulator_bulk_enable(res->regul_count, res->regul_bulk);
clk_enable(res->hdmiphy);
clk_enable(res->hdmi);
clk_enable(res->sclk_hdmi);
}
static void hdmi_poweroff(struct hdmi_context *hdata)
{
struct hdmi_resources *res = &hdata->res;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
mutex_lock(&hdata->hdmi_mutex);
if (!hdata->powered)
goto out;
mutex_unlock(&hdata->hdmi_mutex);
/*
* The TV power domain needs any condition of hdmiphy to turn off and
* its reset state seems to meet the condition.
*/
hdmiphy_conf_reset(hdata);
clk_disable(res->sclk_hdmi);
clk_disable(res->hdmi);
clk_disable(res->hdmiphy);
regulator_bulk_disable(res->regul_count, res->regul_bulk);
pm_runtime_put_sync(hdata->dev);
hdata->enabled = true;
mutex_lock(&hdata->hdmi_mutex);
if (hdata->cfg_hpd)
hdata->cfg_hpd(false);
hdata->powered = false;
out:
mutex_unlock(&hdata->hdmi_mutex);
}
static void hdmi_disable(void *ctx)
static void hdmi_dpms(void *ctx, int mode)
{
struct hdmi_context *hdata = ctx;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
if (hdata->enabled) {
hdmi_audio_control(hdata, false);
hdmiphy_conf_reset(hdata);
hdmi_conf_reset(hdata);
switch (mode) {
case DRM_MODE_DPMS_ON:
hdmi_poweron(hdata);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
hdmi_poweroff(hdata);
break;
default:
DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
break;
}
}
......@@ -1993,30 +2097,35 @@ static struct exynos_hdmi_ops hdmi_ops = {
.is_connected = hdmi_is_connected,
.get_edid = hdmi_get_edid,
.check_timing = hdmi_check_timing,
.power_on = hdmi_display_power_on,
/* manager */
.mode_fixup = hdmi_mode_fixup,
.mode_set = hdmi_mode_set,
.get_max_resol = hdmi_get_max_resol,
.commit = hdmi_commit,
.disable = hdmi_disable,
.dpms = hdmi_dpms,
};
/*
* Handle hotplug events outside the interrupt handler proper.
*/
static void hdmi_hotplug_func(struct work_struct *work)
static irqreturn_t hdmi_external_irq_thread(int irq, void *arg)
{
struct hdmi_context *hdata =
container_of(work, struct hdmi_context, hotplug_work);
struct exynos_drm_hdmi_context *ctx =
(struct exynos_drm_hdmi_context *)hdata->parent_ctx;
struct exynos_drm_hdmi_context *ctx = arg;
struct hdmi_context *hdata = ctx->ctx;
if (!hdata->get_hpd)
goto out;
mutex_lock(&hdata->hdmi_mutex);
hdata->hpd = hdata->get_hpd();
mutex_unlock(&hdata->hdmi_mutex);
drm_helper_hpd_irq_event(ctx->drm_dev);
if (ctx->drm_dev)
drm_helper_hpd_irq_event(ctx->drm_dev);
out:
return IRQ_HANDLED;
}
static irqreturn_t hdmi_irq_handler(int irq, void *arg)
static irqreturn_t hdmi_internal_irq_thread(int irq, void *arg)
{
struct exynos_drm_hdmi_context *ctx = arg;
struct hdmi_context *hdata = ctx->ctx;
......@@ -2025,19 +2134,28 @@ static irqreturn_t hdmi_irq_handler(int irq, void *arg)
intc_flag = hdmi_reg_read(hdata, HDMI_INTC_FLAG);
/* clearing flags for HPD plug/unplug */
if (intc_flag & HDMI_INTC_FLAG_HPD_UNPLUG) {
DRM_DEBUG_KMS("unplugged, handling:%d\n", hdata->hpd_handle);
DRM_DEBUG_KMS("unplugged\n");
hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0,
HDMI_INTC_FLAG_HPD_UNPLUG);
}
if (intc_flag & HDMI_INTC_FLAG_HPD_PLUG) {
DRM_DEBUG_KMS("plugged, handling:%d\n", hdata->hpd_handle);
DRM_DEBUG_KMS("plugged\n");
hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0,
HDMI_INTC_FLAG_HPD_PLUG);
}
if (ctx->drm_dev && hdata->hpd_handle)
queue_work(hdata->wq, &hdata->hotplug_work);
mutex_lock(&hdata->hdmi_mutex);
hdata->hpd = hdmi_reg_read(hdata, HDMI_HPD_STATUS);
if (hdata->powered && hdata->hpd) {
mutex_unlock(&hdata->hdmi_mutex);
goto out;
}
mutex_unlock(&hdata->hdmi_mutex);
if (ctx->drm_dev)
drm_helper_hpd_irq_event(ctx->drm_dev);
out:
return IRQ_HANDLED;
}
......@@ -2131,68 +2249,6 @@ static int hdmi_resources_cleanup(struct hdmi_context *hdata)
return 0;
}
static void hdmi_resource_poweron(struct hdmi_context *hdata)
{
struct hdmi_resources *res = &hdata->res;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
/* turn HDMI power on */
regulator_bulk_enable(res->regul_count, res->regul_bulk);
/* power-on hdmi physical interface */
clk_enable(res->hdmiphy);
/* turn clocks on */
clk_enable(res->hdmi);
clk_enable(res->sclk_hdmi);
hdmiphy_conf_reset(hdata);
hdmi_conf_reset(hdata);
hdmi_conf_init(hdata);
hdmi_audio_init(hdata);
}
static void hdmi_resource_poweroff(struct hdmi_context *hdata)
{
struct hdmi_resources *res = &hdata->res;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
/* turn clocks off */
clk_disable(res->sclk_hdmi);
clk_disable(res->hdmi);
/* power-off hdmiphy */
clk_disable(res->hdmiphy);
/* turn HDMI power off */
regulator_bulk_disable(res->regul_count, res->regul_bulk);
}
static int hdmi_runtime_suspend(struct device *dev)
{
struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
DRM_DEBUG_KMS("%s\n", __func__);
hdmi_resource_poweroff(ctx->ctx);
return 0;
}
static int hdmi_runtime_resume(struct device *dev)
{
struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
DRM_DEBUG_KMS("%s\n", __func__);
hdmi_resource_poweron(ctx->ctx);
return 0;
}
static const struct dev_pm_ops hdmi_pm_ops = {
.runtime_suspend = hdmi_runtime_suspend,
.runtime_resume = hdmi_runtime_resume,
};
static struct i2c_client *hdmi_ddc, *hdmi_hdmiphy;
void hdmi_attach_ddc_client(struct i2c_client *ddc)
......@@ -2237,15 +2293,16 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
return -ENOMEM;
}
mutex_init(&hdata->hdmi_mutex);
drm_hdmi_ctx->ctx = (void *)hdata;
hdata->parent_ctx = (void *)drm_hdmi_ctx;
platform_set_drvdata(pdev, drm_hdmi_ctx);
hdata->is_v13 = pdata->is_v13;
hdata->default_win = pdata->default_win;
hdata->default_timing = &pdata->timing;
hdata->default_bpp = pdata->bpp;
hdata->cfg_hpd = pdata->cfg_hpd;
hdata->get_hpd = pdata->get_hpd;
hdata->dev = dev;
ret = hdmi_resources_init(hdata);
......@@ -2294,41 +2351,49 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
hdata->hdmiphy_port = hdmi_hdmiphy;
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (res == NULL) {
DRM_ERROR("get interrupt resource failed.\n");
ret = -ENXIO;
hdata->external_irq = platform_get_irq_byname(pdev, "external_irq");
if (hdata->external_irq < 0) {
DRM_ERROR("failed to get platform irq\n");
ret = hdata->external_irq;
goto err_hdmiphy;
}
/* create workqueue and hotplug work */
hdata->wq = alloc_workqueue("exynos-drm-hdmi",
WQ_UNBOUND | WQ_NON_REENTRANT, 1);
if (hdata->wq == NULL) {
DRM_ERROR("Failed to create workqueue.\n");
ret = -ENOMEM;
hdata->internal_irq = platform_get_irq_byname(pdev, "internal_irq");
if (hdata->internal_irq < 0) {
DRM_ERROR("failed to get platform internal irq\n");
ret = hdata->internal_irq;
goto err_hdmiphy;
}
INIT_WORK(&hdata->hotplug_work, hdmi_hotplug_func);
/* register hpd interrupt */
ret = request_irq(res->start, hdmi_irq_handler, 0, "drm_hdmi",
drm_hdmi_ctx);
ret = request_threaded_irq(hdata->external_irq, NULL,
hdmi_external_irq_thread, IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"hdmi_external", drm_hdmi_ctx);
if (ret) {
DRM_ERROR("request interrupt failed.\n");
goto err_workqueue;
DRM_ERROR("failed to register hdmi internal interrupt\n");
goto err_hdmiphy;
}
if (hdata->cfg_hpd)
hdata->cfg_hpd(false);
ret = request_threaded_irq(hdata->internal_irq, NULL,
hdmi_internal_irq_thread, IRQF_ONESHOT,
"hdmi_internal", drm_hdmi_ctx);
if (ret) {
DRM_ERROR("failed to register hdmi internal interrupt\n");
goto err_free_irq;
}
hdata->irq = res->start;
/* register specific callbacks to common hdmi. */
exynos_hdmi_ops_register(&hdmi_ops);
hdmi_resource_poweron(hdata);
pm_runtime_enable(dev);
return 0;
err_workqueue:
destroy_workqueue(hdata->wq);
err_free_irq:
free_irq(hdata->external_irq, drm_hdmi_ctx);
err_hdmiphy:
i2c_del_driver(&hdmiphy_driver);
err_ddc:
......@@ -2348,18 +2413,15 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
static int __devexit hdmi_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct exynos_drm_hdmi_context *ctx = platform_get_drvdata(pdev);
struct hdmi_context *hdata = ctx->ctx;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
hdmi_resource_poweroff(hdata);
pm_runtime_disable(dev);
disable_irq(hdata->irq);
free_irq(hdata->irq, hdata);
cancel_work_sync(&hdata->hotplug_work);
destroy_workqueue(hdata->wq);
free_irq(hdata->internal_irq, hdata);
hdmi_resources_cleanup(hdata);
......@@ -2378,12 +2440,43 @@ static int __devexit hdmi_remove(struct platform_device *pdev)
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int hdmi_suspend(struct device *dev)
{
struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
struct hdmi_context *hdata = ctx->ctx;
disable_irq(hdata->internal_irq);
disable_irq(hdata->external_irq);
hdata->hpd = false;
if (ctx->drm_dev)
drm_helper_hpd_irq_event(ctx->drm_dev);
hdmi_poweroff(hdata);
return 0;
}
static int hdmi_resume(struct device *dev)
{
struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
struct hdmi_context *hdata = ctx->ctx;
enable_irq(hdata->external_irq);
enable_irq(hdata->internal_irq);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(hdmi_pm_ops, hdmi_suspend, hdmi_resume);
struct platform_driver hdmi_driver = {
.probe = hdmi_probe,
.remove = __devexit_p(hdmi_remove),
.driver = {
.name = "exynos4-hdmi",
.owner = THIS_MODULE,
.pm = &hdmi_pm_ops,
.pm = &hdmi_pm_ops,
},
};
......@@ -37,9 +37,6 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_hdmi.h"
#define MIXER_WIN_NR 3
#define MIXER_DEFAULT_WIN 0
#define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev))
struct hdmi_win_data {
......@@ -57,13 +54,14 @@ struct hdmi_win_data {
unsigned int fb_y;
unsigned int fb_width;
unsigned int fb_height;
unsigned int src_width;
unsigned int src_height;
unsigned int mode_width;
unsigned int mode_height;
unsigned int scan_flags;
};
struct mixer_resources {
struct device *dev;
int irq;
void __iomem *mixer_regs;
void __iomem *vp_regs;
......@@ -76,10 +74,13 @@ struct mixer_resources {
};
struct mixer_context {
unsigned int irq;
struct device *dev;
int pipe;
bool interlace;
bool powered;
u32 int_en;
struct mutex mixer_mutex;
struct mixer_resources mixer_res;
struct hdmi_win_data win_data[MIXER_WIN_NR];
};
......@@ -352,10 +353,7 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
struct mixer_resources *res = &ctx->mixer_res;
unsigned long flags;
struct hdmi_win_data *win_data;
unsigned int full_width, full_height, width, height;
unsigned int x_ratio, y_ratio;
unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset;
unsigned int mode_width, mode_height;
unsigned int buf_num;
dma_addr_t luma_addr[2], chroma_addr[2];
bool tiled_mode = false;
......@@ -382,21 +380,9 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
return;
}
full_width = win_data->fb_width;
full_height = win_data->fb_height;
width = win_data->crtc_width;
height = win_data->crtc_height;
mode_width = win_data->mode_width;
mode_height = win_data->mode_height;
/* scaling feature: (src << 16) / dst */
x_ratio = (width << 16) / width;
y_ratio = (height << 16) / height;
src_x_offset = win_data->fb_x;
src_y_offset = win_data->fb_y;
dst_x_offset = win_data->crtc_x;
dst_y_offset = win_data->crtc_y;
x_ratio = (win_data->src_width << 16) / win_data->crtc_width;
y_ratio = (win_data->src_height << 16) / win_data->crtc_height;
if (buf_num == 2) {
luma_addr[0] = win_data->dma_addr;
......@@ -404,7 +390,7 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
} else {
luma_addr[0] = win_data->dma_addr;
chroma_addr[0] = win_data->dma_addr
+ (full_width * full_height);
+ (win_data->fb_width * win_data->fb_height);
}
if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE) {
......@@ -413,8 +399,8 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
luma_addr[1] = luma_addr[0] + 0x40;
chroma_addr[1] = chroma_addr[0] + 0x40;
} else {
luma_addr[1] = luma_addr[0] + full_width;
chroma_addr[1] = chroma_addr[0] + full_width;
luma_addr[1] = luma_addr[0] + win_data->fb_width;
chroma_addr[1] = chroma_addr[0] + win_data->fb_width;
}
} else {
ctx->interlace = false;
......@@ -435,26 +421,26 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK);
/* setting size of input image */
vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(full_width) |
VP_IMG_VSIZE(full_height));
vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(win_data->fb_width) |
VP_IMG_VSIZE(win_data->fb_height));
/* chroma height has to reduced by 2 to avoid chroma distorions */
vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(full_width) |
VP_IMG_VSIZE(full_height / 2));
vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(win_data->fb_width) |
VP_IMG_VSIZE(win_data->fb_height / 2));
vp_reg_write(res, VP_SRC_WIDTH, width);
vp_reg_write(res, VP_SRC_HEIGHT, height);
vp_reg_write(res, VP_SRC_WIDTH, win_data->src_width);
vp_reg_write(res, VP_SRC_HEIGHT, win_data->src_height);
vp_reg_write(res, VP_SRC_H_POSITION,
VP_SRC_H_POSITION_VAL(src_x_offset));
vp_reg_write(res, VP_SRC_V_POSITION, src_y_offset);
VP_SRC_H_POSITION_VAL(win_data->fb_x));
vp_reg_write(res, VP_SRC_V_POSITION, win_data->fb_y);
vp_reg_write(res, VP_DST_WIDTH, width);
vp_reg_write(res, VP_DST_H_POSITION, dst_x_offset);
vp_reg_write(res, VP_DST_WIDTH, win_data->crtc_width);
vp_reg_write(res, VP_DST_H_POSITION, win_data->crtc_x);
if (ctx->interlace) {
vp_reg_write(res, VP_DST_HEIGHT, height / 2);
vp_reg_write(res, VP_DST_V_POSITION, dst_y_offset / 2);
vp_reg_write(res, VP_DST_HEIGHT, win_data->crtc_height / 2);
vp_reg_write(res, VP_DST_V_POSITION, win_data->crtc_y / 2);
} else {
vp_reg_write(res, VP_DST_HEIGHT, height);
vp_reg_write(res, VP_DST_V_POSITION, dst_y_offset);
vp_reg_write(res, VP_DST_HEIGHT, win_data->crtc_height);
vp_reg_write(res, VP_DST_V_POSITION, win_data->crtc_y);
}
vp_reg_write(res, VP_H_RATIO, x_ratio);
......@@ -468,8 +454,8 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
vp_reg_write(res, VP_TOP_C_PTR, chroma_addr[0]);
vp_reg_write(res, VP_BOT_C_PTR, chroma_addr[1]);
mixer_cfg_scan(ctx, mode_height);
mixer_cfg_rgb_fmt(ctx, mode_height);
mixer_cfg_scan(ctx, win_data->mode_height);
mixer_cfg_rgb_fmt(ctx, win_data->mode_height);
mixer_cfg_layer(ctx, win, true);
mixer_run(ctx);
......@@ -484,10 +470,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
struct mixer_resources *res = &ctx->mixer_res;
unsigned long flags;
struct hdmi_win_data *win_data;
unsigned int full_width, width, height;
unsigned int x_ratio, y_ratio;
unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset;
unsigned int mode_width, mode_height;
dma_addr_t dma_addr;
unsigned int fmt;
u32 val;
......@@ -510,26 +494,17 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
fmt = ARGB8888;
}
dma_addr = win_data->dma_addr;
full_width = win_data->fb_width;
width = win_data->crtc_width;
height = win_data->crtc_height;
mode_width = win_data->mode_width;
mode_height = win_data->mode_height;
/* 2x scaling feature */
x_ratio = 0;
y_ratio = 0;
src_x_offset = win_data->fb_x;
src_y_offset = win_data->fb_y;
dst_x_offset = win_data->crtc_x;
dst_y_offset = win_data->crtc_y;
/* converting dma address base and source offset */
dma_addr = dma_addr
+ (src_x_offset * win_data->bpp >> 3)
+ (src_y_offset * full_width * win_data->bpp >> 3);
dma_addr = win_data->dma_addr
+ (win_data->fb_x * win_data->bpp >> 3)
+ (win_data->fb_y * win_data->fb_width * win_data->bpp >> 3);
src_x_offset = 0;
src_y_offset = 0;
......@@ -546,10 +521,10 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
MXR_GRP_CFG_FORMAT_VAL(fmt), MXR_GRP_CFG_FORMAT_MASK);
/* setup geometry */
mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), full_width);
mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), win_data->fb_width);
val = MXR_GRP_WH_WIDTH(width);
val |= MXR_GRP_WH_HEIGHT(height);
val = MXR_GRP_WH_WIDTH(win_data->crtc_width);
val |= MXR_GRP_WH_HEIGHT(win_data->crtc_height);
val |= MXR_GRP_WH_H_SCALE(x_ratio);
val |= MXR_GRP_WH_V_SCALE(y_ratio);
mixer_reg_write(res, MXR_GRAPHIC_WH(win), val);
......@@ -567,8 +542,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
/* set buffer address to mixer */
mixer_reg_write(res, MXR_GRAPHIC_BASE(win), dma_addr);
mixer_cfg_scan(ctx, mode_height);
mixer_cfg_rgb_fmt(ctx, mode_height);
mixer_cfg_scan(ctx, win_data->mode_height);
mixer_cfg_rgb_fmt(ctx, win_data->mode_height);
mixer_cfg_layer(ctx, win, true);
mixer_run(ctx);
......@@ -591,6 +566,116 @@ static void vp_win_reset(struct mixer_context *ctx)
WARN(tries == 0, "failed to reset Video Processor\n");
}
static void mixer_win_reset(struct mixer_context *ctx)
{
struct mixer_resources *res = &ctx->mixer_res;
unsigned long flags;
u32 val; /* value stored to register */
spin_lock_irqsave(&res->reg_slock, flags);
mixer_vsync_set_update(ctx, false);
mixer_reg_writemask(res, MXR_CFG, MXR_CFG_DST_HDMI, MXR_CFG_DST_MASK);
/* set output in RGB888 mode */
mixer_reg_writemask(res, MXR_CFG, MXR_CFG_OUT_RGB888, MXR_CFG_OUT_MASK);
/* 16 beat burst in DMA */
mixer_reg_writemask(res, MXR_STATUS, MXR_STATUS_16_BURST,
MXR_STATUS_BURST_MASK);
/* setting default layer priority: layer1 > layer0 > video
* because typical usage scenario would be
* layer1 - OSD
* layer0 - framebuffer
* video - video overlay
*/
val = MXR_LAYER_CFG_GRP1_VAL(3);
val |= MXR_LAYER_CFG_GRP0_VAL(2);
val |= MXR_LAYER_CFG_VP_VAL(1);
mixer_reg_write(res, MXR_LAYER_CFG, val);
/* setting background color */
mixer_reg_write(res, MXR_BG_COLOR0, 0x008080);
mixer_reg_write(res, MXR_BG_COLOR1, 0x008080);
mixer_reg_write(res, MXR_BG_COLOR2, 0x008080);
/* setting graphical layers */
val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
val |= MXR_GRP_CFG_WIN_BLEND_EN;
val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
/* the same configuration for both layers */
mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val);
val |= MXR_GRP_CFG_BLEND_PRE_MUL;
val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val);
/* configuration of Video Processor Registers */
vp_win_reset(ctx);
vp_default_filter(res);
/* disable all layers */
mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP0_ENABLE);
mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP1_ENABLE);
mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE);
mixer_vsync_set_update(ctx, true);
spin_unlock_irqrestore(&res->reg_slock, flags);
}
static void mixer_poweron(struct mixer_context *ctx)
{
struct mixer_resources *res = &ctx->mixer_res;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
mutex_lock(&ctx->mixer_mutex);
if (ctx->powered) {
mutex_unlock(&ctx->mixer_mutex);
return;
}
ctx->powered = true;
mutex_unlock(&ctx->mixer_mutex);
pm_runtime_get_sync(ctx->dev);
clk_enable(res->mixer);
clk_enable(res->vp);
clk_enable(res->sclk_mixer);
mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
mixer_win_reset(ctx);
}
static void mixer_poweroff(struct mixer_context *ctx)
{
struct mixer_resources *res = &ctx->mixer_res;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
mutex_lock(&ctx->mixer_mutex);
if (!ctx->powered)
goto out;
mutex_unlock(&ctx->mixer_mutex);
ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
clk_disable(res->mixer);
clk_disable(res->vp);
clk_disable(res->sclk_mixer);
pm_runtime_put_sync(ctx->dev);
mutex_lock(&ctx->mixer_mutex);
ctx->powered = false;
out:
mutex_unlock(&ctx->mixer_mutex);
}
static int mixer_enable_vblank(void *ctx, int pipe)
{
struct mixer_context *mixer_ctx = ctx;
......@@ -618,6 +703,27 @@ static void mixer_disable_vblank(void *ctx)
mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
}
static void mixer_dpms(void *ctx, int mode)
{
struct mixer_context *mixer_ctx = ctx;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
switch (mode) {
case DRM_MODE_DPMS_ON:
mixer_poweron(mixer_ctx);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
mixer_poweroff(mixer_ctx);
break;
default:
DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
break;
}
}
static void mixer_win_mode_set(void *ctx,
struct exynos_drm_overlay *overlay)
{
......@@ -643,7 +749,7 @@ static void mixer_win_mode_set(void *ctx,
win = MIXER_DEFAULT_WIN;
if (win < 0 || win > MIXER_WIN_NR) {
DRM_ERROR("overlay plane[%d] is wrong\n", win);
DRM_ERROR("mixer window[%d] is wrong\n", win);
return;
}
......@@ -665,6 +771,8 @@ static void mixer_win_mode_set(void *ctx,
win_data->fb_y = overlay->fb_y;
win_data->fb_width = overlay->fb_width;
win_data->fb_height = overlay->fb_height;
win_data->src_width = overlay->src_width;
win_data->src_height = overlay->src_height;
win_data->mode_width = overlay->mode_width;
win_data->mode_height = overlay->mode_height;
......@@ -672,44 +780,26 @@ static void mixer_win_mode_set(void *ctx,
win_data->scan_flags = overlay->scan_flag;
}
static void mixer_win_commit(void *ctx, int zpos)
static void mixer_win_commit(void *ctx, int win)
{
struct mixer_context *mixer_ctx = ctx;
int win = zpos;
DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
if (win == DEFAULT_ZPOS)
win = MIXER_DEFAULT_WIN;
if (win < 0 || win > MIXER_WIN_NR) {
DRM_ERROR("overlay plane[%d] is wrong\n", win);
return;
}
if (win > 1)
vp_video_buffer(mixer_ctx, win);
else
mixer_graph_buffer(mixer_ctx, win);
}
static void mixer_win_disable(void *ctx, int zpos)
static void mixer_win_disable(void *ctx, int win)
{
struct mixer_context *mixer_ctx = ctx;
struct mixer_resources *res = &mixer_ctx->mixer_res;
unsigned long flags;
int win = zpos;
DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
if (win == DEFAULT_ZPOS)
win = MIXER_DEFAULT_WIN;
if (win < 0 || win > MIXER_WIN_NR) {
DRM_ERROR("overlay plane[%d] is wrong\n", win);
return;
}
spin_lock_irqsave(&res->reg_slock, flags);
mixer_vsync_set_update(mixer_ctx, false);
......@@ -723,6 +813,7 @@ static struct exynos_mixer_ops mixer_ops = {
/* manager */
.enable_vblank = mixer_enable_vblank,
.disable_vblank = mixer_disable_vblank,
.dpms = mixer_dpms,
/* overlay */
.win_mode_set = mixer_win_mode_set,
......@@ -773,7 +864,7 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
struct exynos_drm_hdmi_context *drm_hdmi_ctx = arg;
struct mixer_context *ctx = drm_hdmi_ctx->ctx;
struct mixer_resources *res = &ctx->mixer_res;
u32 val, val_base;
u32 val, base, shadow;
spin_lock(&res->reg_slock);
......@@ -784,12 +875,14 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
if (val & MXR_INT_STATUS_VSYNC) {
/* interlace scan need to check shadow register */
if (ctx->interlace) {
val_base = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(0));
if (ctx->win_data[0].dma_addr != val_base)
base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0));
shadow = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(0));
if (base != shadow)
goto out;
val_base = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(1));
if (ctx->win_data[1].dma_addr != val_base)
base = mixer_reg_read(res, MXR_GRAPHIC_BASE(1));
shadow = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(1));
if (base != shadow)
goto out;
}
......@@ -811,117 +904,6 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
static void mixer_win_reset(struct mixer_context *ctx)
{
struct mixer_resources *res = &ctx->mixer_res;
unsigned long flags;
u32 val; /* value stored to register */
spin_lock_irqsave(&res->reg_slock, flags);
mixer_vsync_set_update(ctx, false);
mixer_reg_writemask(res, MXR_CFG, MXR_CFG_DST_HDMI, MXR_CFG_DST_MASK);
/* set output in RGB888 mode */
mixer_reg_writemask(res, MXR_CFG, MXR_CFG_OUT_RGB888, MXR_CFG_OUT_MASK);
/* 16 beat burst in DMA */
mixer_reg_writemask(res, MXR_STATUS, MXR_STATUS_16_BURST,
MXR_STATUS_BURST_MASK);
/* setting default layer priority: layer1 > layer0 > video
* because typical usage scenario would be
* layer1 - OSD
* layer0 - framebuffer
* video - video overlay
*/
val = MXR_LAYER_CFG_GRP1_VAL(3);
val |= MXR_LAYER_CFG_GRP0_VAL(2);
val |= MXR_LAYER_CFG_VP_VAL(1);
mixer_reg_write(res, MXR_LAYER_CFG, val);
/* setting background color */
mixer_reg_write(res, MXR_BG_COLOR0, 0x008080);
mixer_reg_write(res, MXR_BG_COLOR1, 0x008080);
mixer_reg_write(res, MXR_BG_COLOR2, 0x008080);
/* setting graphical layers */
val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
val |= MXR_GRP_CFG_WIN_BLEND_EN;
val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
/* the same configuration for both layers */
mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val);
val |= MXR_GRP_CFG_BLEND_PRE_MUL;
val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val);
/* configuration of Video Processor Registers */
vp_win_reset(ctx);
vp_default_filter(res);
/* disable all layers */
mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP0_ENABLE);
mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP1_ENABLE);
mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE);
mixer_vsync_set_update(ctx, true);
spin_unlock_irqrestore(&res->reg_slock, flags);
}
static void mixer_resource_poweron(struct mixer_context *ctx)
{
struct mixer_resources *res = &ctx->mixer_res;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
clk_enable(res->mixer);
clk_enable(res->vp);
clk_enable(res->sclk_mixer);
mixer_win_reset(ctx);
}
static void mixer_resource_poweroff(struct mixer_context *ctx)
{
struct mixer_resources *res = &ctx->mixer_res;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
clk_disable(res->mixer);
clk_disable(res->vp);
clk_disable(res->sclk_mixer);
}
static int mixer_runtime_resume(struct device *dev)
{
struct exynos_drm_hdmi_context *ctx = get_mixer_context(dev);
DRM_DEBUG_KMS("resume - start\n");
mixer_resource_poweron(ctx->ctx);
return 0;
}
static int mixer_runtime_suspend(struct device *dev)
{
struct exynos_drm_hdmi_context *ctx = get_mixer_context(dev);
DRM_DEBUG_KMS("suspend - start\n");
mixer_resource_poweroff(ctx->ctx);
return 0;
}
static const struct dev_pm_ops mixer_pm_ops = {
.runtime_suspend = mixer_runtime_suspend,
.runtime_resume = mixer_runtime_resume,
};
static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
struct platform_device *pdev)
{
......@@ -931,7 +913,6 @@ static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
struct resource *res;
int ret;
mixer_res->dev = dev;
spin_lock_init(&mixer_res->reg_slock);
mixer_res->mixer = clk_get(dev, "mixer");
......@@ -1027,7 +1008,6 @@ static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
clk_put(mixer_res->vp);
if (!IS_ERR_OR_NULL(mixer_res->mixer))
clk_put(mixer_res->mixer);
mixer_res->dev = NULL;
return ret;
}
......@@ -1035,7 +1015,6 @@ static void mixer_resources_cleanup(struct mixer_context *ctx)
{
struct mixer_resources *res = &ctx->mixer_res;
disable_irq(res->irq);
free_irq(res->irq, ctx);
iounmap(res->vp_regs);
......@@ -1064,6 +1043,9 @@ static int __devinit mixer_probe(struct platform_device *pdev)
return -ENOMEM;
}
mutex_init(&ctx->mixer_mutex);
ctx->dev = &pdev->dev;
drm_hdmi_ctx->ctx = (void *)ctx;
platform_set_drvdata(pdev, drm_hdmi_ctx);
......@@ -1076,7 +1058,7 @@ static int __devinit mixer_probe(struct platform_device *pdev)
/* register specific callback point to common hdmi. */
exynos_mixer_ops_register(&mixer_ops);
mixer_resource_poweron(ctx);
pm_runtime_enable(dev);
return 0;
......@@ -1095,12 +1077,27 @@ static int mixer_remove(struct platform_device *pdev)
dev_info(dev, "remove successful\n");
mixer_resource_poweroff(ctx);
pm_runtime_disable(&pdev->dev);
mixer_resources_cleanup(ctx);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int mixer_suspend(struct device *dev)
{
struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
struct mixer_context *ctx = drm_hdmi_ctx->ctx;
mixer_poweroff(ctx);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(mixer_pm_ops, mixer_suspend, NULL);
struct platform_driver mixer_driver = {
.driver = {
.name = "s5p-mixer",
......
......@@ -138,14 +138,16 @@
#define HDMI_ASP_MASK (1 << 2)
#define HDMI_EN (1 << 0)
/* HDMI_CON_2 */
#define HDMI_VID_PREAMBLE_DIS (1 << 5)
#define HDMI_GUARD_BAND_DIS (1 << 1)
/* HDMI_PHY_STATUS */
#define HDMI_PHY_STATUS_READY (1 << 0)
/* HDMI_MODE_SEL */
#define HDMI_MODE_HDMI_EN (1 << 1)
#define HDMI_MODE_DVI_EN (1 << 0)
#define HDMI_DVI_MODE_EN (1)
#define HDMI_DVI_MODE_DIS (0)
#define HDMI_MODE_MASK (3 << 0)
/* HDMI_TG_CMD */
......
......@@ -29,6 +29,8 @@
#ifndef _EXYNOS_DRM_H_
#define _EXYNOS_DRM_H_
#include "drm.h"
/**
* User-desired buffer creation information structure.
*
......@@ -74,6 +76,21 @@ struct drm_exynos_gem_mmap {
uint64_t mapped;
};
/**
* A structure to gem information.
*
* @handle: a handle to gem object created.
* @flags: flag value including memory type and cache attribute and
* this value would be set by driver.
* @size: size to memory region allocated by gem and this size would
* be set by driver.
*/
struct drm_exynos_gem_info {
unsigned int handle;
unsigned int flags;
uint64_t size;
};
/**
* A structure for user connection request of virtual display.
*
......@@ -95,18 +112,64 @@ struct drm_exynos_plane_set_zpos {
/* memory type definitions. */
enum e_drm_exynos_gem_mem_type {
/* Physically Continuous memory and used as default. */
EXYNOS_BO_CONTIG = 0 << 0,
/* Physically Non-Continuous memory. */
EXYNOS_BO_NONCONTIG = 1 << 0,
EXYNOS_BO_MASK = EXYNOS_BO_NONCONTIG
/* non-cachable mapping and used as default. */
EXYNOS_BO_NONCACHABLE = 0 << 1,
/* cachable mapping. */
EXYNOS_BO_CACHABLE = 1 << 1,
/* write-combine mapping. */
EXYNOS_BO_WC = 1 << 2,
EXYNOS_BO_MASK = EXYNOS_BO_NONCONTIG | EXYNOS_BO_CACHABLE |
EXYNOS_BO_WC
};
struct drm_exynos_g2d_get_ver {
__u32 major;
__u32 minor;
};
struct drm_exynos_g2d_cmd {
__u32 offset;
__u32 data;
};
enum drm_exynos_g2d_event_type {
G2D_EVENT_NOT,
G2D_EVENT_NONSTOP,
G2D_EVENT_STOP, /* not yet */
};
struct drm_exynos_g2d_set_cmdlist {
__u64 cmd;
__u64 cmd_gem;
__u32 cmd_nr;
__u32 cmd_gem_nr;
/* for g2d event */
__u64 event_type;
__u64 user_data;
};
struct drm_exynos_g2d_exec {
__u64 async;
};
#define DRM_EXYNOS_GEM_CREATE 0x00
#define DRM_EXYNOS_GEM_MAP_OFFSET 0x01
#define DRM_EXYNOS_GEM_MMAP 0x02
/* Reserved 0x03 ~ 0x05 for exynos specific gem ioctl */
#define DRM_EXYNOS_GEM_GET 0x04
#define DRM_EXYNOS_PLANE_SET_ZPOS 0x06
#define DRM_EXYNOS_VIDI_CONNECTION 0x07
/* G2D */
#define DRM_EXYNOS_G2D_GET_VER 0x20
#define DRM_EXYNOS_G2D_SET_CMDLIST 0x21
#define DRM_EXYNOS_G2D_EXEC 0x22
#define DRM_IOCTL_EXYNOS_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create)
......@@ -116,12 +179,34 @@ enum e_drm_exynos_gem_mem_type {
#define DRM_IOCTL_EXYNOS_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_GEM_MMAP, struct drm_exynos_gem_mmap)
#define DRM_IOCTL_EXYNOS_GEM_GET DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_GEM_GET, struct drm_exynos_gem_info)
#define DRM_IOCTL_EXYNOS_PLANE_SET_ZPOS DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_PLANE_SET_ZPOS, struct drm_exynos_plane_set_zpos)
#define DRM_IOCTL_EXYNOS_VIDI_CONNECTION DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_VIDI_CONNECTION, struct drm_exynos_vidi_connection)
#define DRM_IOCTL_EXYNOS_G2D_GET_VER DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_G2D_GET_VER, struct drm_exynos_g2d_get_ver)
#define DRM_IOCTL_EXYNOS_G2D_SET_CMDLIST DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_G2D_SET_CMDLIST, struct drm_exynos_g2d_set_cmdlist)
#define DRM_IOCTL_EXYNOS_G2D_EXEC DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_G2D_EXEC, struct drm_exynos_g2d_exec)
/* EXYNOS specific events */
#define DRM_EXYNOS_G2D_EVENT 0x80000000
struct drm_exynos_g2d_event {
struct drm_event base;
__u64 user_data;
__u32 tv_sec;
__u32 tv_usec;
__u32 cmdlist_no;
__u32 reserved;
};
#ifdef __KERNEL__
/**
......@@ -169,16 +254,14 @@ struct exynos_drm_common_hdmi_pd {
/**
* Platform Specific Structure for DRM based HDMI core.
*
* @timing: default video mode for initializing
* @default_win: default window layer number to be used for UI.
* @bpp: default bit per pixel.
* @is_v13: set if hdmi version 13 is.
* @cfg_hpd: function pointer to configure hdmi hotplug detection pin
* @get_hpd: function pointer to get value of hdmi hotplug detection pin
*/
struct exynos_drm_hdmi_pdata {
struct fb_videomode timing;
unsigned int default_win;
unsigned int bpp;
unsigned int is_v13:1;
bool is_v13;
void (*cfg_hpd)(bool external);
int (*get_hpd)(void);
};
#endif /* __KERNEL__ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment