Commit 00f09afd authored by Dave Airlie's avatar Dave Airlie

Merge branch 'exynos-drm-next' of...

Merge branch 'exynos-drm-next' of git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos into drm-next

This patch set adds iommu support, userptr feature to g2d, minor fixups
and code cleanups.

And the iommu feature has dependency of the below patches related to
dma mapping framework.

This patch is used to allocate fully physically contiguous memory region.
- add sending AVI and AVI info frames.
  . this adds some codes for composing AVI and AUI info frames
    and send them every VSYNC for HDMI Certification.
- bug fix to previous pull request.
- add some code cleanup

* 'exynos-drm-next' of git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos: (32 commits)
  drm/exynos: sending AVI and AUI info frames
  drm/exynos: Use devm_clk_get in exynos_drm_fimd.c
  drm/exynos: Use devm_* APIs in exynos_hdmi.c
  drm/exynos: Use devm_clk_get in exynos_mixer.c
  drm/exynos: Fix potential NULL pointer dereference
  drm/exynos: Use devm_clk_get in exynos_drm_g2d.c
  drm/exynos: use sgt instead of pages for framebuffer address
  drm: exynos: fix for loosing display mode header during mode adjustment
  drm/exynos: fix memory leak to EDID block
  drm/exynos: remove 'pages' and 'page_size' elements in exynos gem buffer
  drm/exynos: add exynos drm specific fb_mmap function
  drm/exynos: make sure that overlay data are updated
  drm/exynos: add vm_ops to specific gem mmaper
  drm/exynos: add userptr feature for g2d module
  drm/exynos: remove unnecessary sg_alloc_table call
  drm: exynos: fix for mapping of dma buffers
  drm/exynos: remove EXYNOS_BO_NONCONTIG type checking.
  drm/exynos: add iommu support for g2d
  drm/exynos: add iommu support for hdmi driver
  drm/exynos: add iommu support to fimd driver
  ...
parents 7136470d a144c2e9
...@@ -91,3 +91,12 @@ transferred to 'device' domain. This attribute can be also used for ...@@ -91,3 +91,12 @@ transferred to 'device' domain. This attribute can be also used for
dma_unmap_{single,page,sg} functions family to force buffer to stay in dma_unmap_{single,page,sg} functions family to force buffer to stay in
device domain after releasing a mapping for it. Use this attribute with device domain after releasing a mapping for it. Use this attribute with
care! care!
DMA_ATTR_FORCE_CONTIGUOUS
-------------------------
By default DMA-mapping subsystem is allowed to assemble the buffer
allocated by dma_alloc_attrs() function from individual pages if it can
be mapped as contiguous chunk into device dma address space. By
specifing this attribute the allocated buffer is forced to be contiguous
also in physical memory.
...@@ -1036,7 +1036,8 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping, ...@@ -1036,7 +1036,8 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping,
spin_unlock_irqrestore(&mapping->lock, flags); spin_unlock_irqrestore(&mapping->lock, flags);
} }
static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t gfp) static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
gfp_t gfp, struct dma_attrs *attrs)
{ {
struct page **pages; struct page **pages;
int count = size >> PAGE_SHIFT; int count = size >> PAGE_SHIFT;
...@@ -1050,6 +1051,23 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t ...@@ -1050,6 +1051,23 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t
if (!pages) if (!pages)
return NULL; return NULL;
if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs))
{
unsigned long order = get_order(size);
struct page *page;
page = dma_alloc_from_contiguous(dev, count, order);
if (!page)
goto error;
__dma_clear_buffer(page, size);
for (i = 0; i < count; i++)
pages[i] = page + i;
return pages;
}
while (count) { while (count) {
int j, order = __fls(count); int j, order = __fls(count);
...@@ -1083,14 +1101,21 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t ...@@ -1083,14 +1101,21 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t
return NULL; return NULL;
} }
static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t size) static int __iommu_free_buffer(struct device *dev, struct page **pages,
size_t size, struct dma_attrs *attrs)
{ {
int count = size >> PAGE_SHIFT; int count = size >> PAGE_SHIFT;
int array_size = count * sizeof(struct page *); int array_size = count * sizeof(struct page *);
int i; int i;
for (i = 0; i < count; i++)
if (pages[i]) if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
__free_pages(pages[i], 0); dma_release_from_contiguous(dev, pages[0], count);
} else {
for (i = 0; i < count; i++)
if (pages[i])
__free_pages(pages[i], 0);
}
if (array_size <= PAGE_SIZE) if (array_size <= PAGE_SIZE)
kfree(pages); kfree(pages);
else else
...@@ -1252,7 +1277,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, ...@@ -1252,7 +1277,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
if (gfp & GFP_ATOMIC) if (gfp & GFP_ATOMIC)
return __iommu_alloc_atomic(dev, size, handle); return __iommu_alloc_atomic(dev, size, handle);
pages = __iommu_alloc_buffer(dev, size, gfp); pages = __iommu_alloc_buffer(dev, size, gfp, attrs);
if (!pages) if (!pages)
return NULL; return NULL;
...@@ -1273,7 +1298,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, ...@@ -1273,7 +1298,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
err_mapping: err_mapping:
__iommu_remove_mapping(dev, *handle, size); __iommu_remove_mapping(dev, *handle, size);
err_buffer: err_buffer:
__iommu_free_buffer(dev, pages, size); __iommu_free_buffer(dev, pages, size, attrs);
return NULL; return NULL;
} }
...@@ -1329,7 +1354,7 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, ...@@ -1329,7 +1354,7 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
} }
__iommu_remove_mapping(dev, handle, size); __iommu_remove_mapping(dev, handle, size);
__iommu_free_buffer(dev, pages, size); __iommu_free_buffer(dev, pages, size, attrs);
} }
static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
......
...@@ -1021,6 +1021,8 @@ void drm_vblank_off(struct drm_device *dev, int crtc) ...@@ -1021,6 +1021,8 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
/* Send any queued vblank events, lest the natives grow disquiet */ /* Send any queued vblank events, lest the natives grow disquiet */
seq = drm_vblank_count_and_time(dev, crtc, &now); seq = drm_vblank_count_and_time(dev, crtc, &now);
spin_lock(&dev->event_lock);
list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) { list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
if (e->pipe != crtc) if (e->pipe != crtc)
continue; continue;
...@@ -1031,6 +1033,7 @@ void drm_vblank_off(struct drm_device *dev, int crtc) ...@@ -1031,6 +1033,7 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
drm_vblank_put(dev, e->pipe); drm_vblank_put(dev, e->pipe);
send_vblank_event(dev, e, seq, &now); send_vblank_event(dev, e, seq, &now);
} }
spin_unlock(&dev->event_lock);
spin_unlock_irqrestore(&dev->vbl_lock, irqflags); spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
} }
......
...@@ -10,6 +10,12 @@ config DRM_EXYNOS ...@@ -10,6 +10,12 @@ config DRM_EXYNOS
Choose this option if you have a Samsung SoC EXYNOS chipset. Choose this option if you have a Samsung SoC EXYNOS chipset.
If M is selected the module will be called exynosdrm. If M is selected the module will be called exynosdrm.
config DRM_EXYNOS_IOMMU
bool "EXYNOS DRM IOMMU Support"
depends on DRM_EXYNOS && EXYNOS_IOMMU && ARM_DMA_USE_IOMMU
help
Choose this option if you want to use IOMMU feature for DRM.
config DRM_EXYNOS_DMABUF config DRM_EXYNOS_DMABUF
bool "EXYNOS DRM DMABUF" bool "EXYNOS DRM DMABUF"
depends on DRM_EXYNOS depends on DRM_EXYNOS
......
...@@ -8,6 +8,7 @@ exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \ ...@@ -8,6 +8,7 @@ exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \ exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
exynos_drm_plane.o exynos_drm_plane.o
exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o
exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \ exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \
......
...@@ -33,73 +33,42 @@ ...@@ -33,73 +33,42 @@
static int lowlevel_buffer_allocate(struct drm_device *dev, static int lowlevel_buffer_allocate(struct drm_device *dev,
unsigned int flags, struct exynos_drm_gem_buf *buf) unsigned int flags, struct exynos_drm_gem_buf *buf)
{ {
dma_addr_t start_addr;
unsigned int npages, i = 0;
struct scatterlist *sgl;
int ret = 0; int ret = 0;
enum dma_attr attr = DMA_ATTR_FORCE_CONTIGUOUS;
DRM_DEBUG_KMS("%s\n", __FILE__); DRM_DEBUG_KMS("%s\n", __FILE__);
if (IS_NONCONTIG_BUFFER(flags)) {
DRM_DEBUG_KMS("not support allocation type.\n");
return -EINVAL;
}
if (buf->dma_addr) { if (buf->dma_addr) {
DRM_DEBUG_KMS("already allocated.\n"); DRM_DEBUG_KMS("already allocated.\n");
return 0; return 0;
} }
if (buf->size >= SZ_1M) { init_dma_attrs(&buf->dma_attrs);
npages = buf->size >> SECTION_SHIFT;
buf->page_size = SECTION_SIZE;
} else if (buf->size >= SZ_64K) {
npages = buf->size >> 16;
buf->page_size = SZ_64K;
} else {
npages = buf->size >> PAGE_SHIFT;
buf->page_size = PAGE_SIZE;
}
buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL); if (flags & EXYNOS_BO_NONCONTIG)
if (!buf->sgt) { attr = DMA_ATTR_WRITE_COMBINE;
DRM_ERROR("failed to allocate sg table.\n");
return -ENOMEM;
}
ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL); dma_set_attr(attr, &buf->dma_attrs);
if (ret < 0) {
DRM_ERROR("failed to initialize sg table.\n");
kfree(buf->sgt);
buf->sgt = NULL;
return -ENOMEM;
}
buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size, buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size,
&buf->dma_addr, GFP_KERNEL); &buf->dma_addr, GFP_KERNEL, &buf->dma_attrs);
if (!buf->kvaddr) { if (!buf->kvaddr) {
DRM_ERROR("failed to allocate buffer.\n"); DRM_ERROR("failed to allocate buffer.\n");
ret = -ENOMEM; return -ENOMEM;
goto err1;
} }
buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL); buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!buf->pages) { if (!buf->sgt) {
DRM_ERROR("failed to allocate pages.\n"); DRM_ERROR("failed to allocate sg table.\n");
ret = -ENOMEM; ret = -ENOMEM;
goto err2; goto err_free_attrs;
} }
sgl = buf->sgt->sgl; ret = dma_get_sgtable(dev->dev, buf->sgt, buf->kvaddr, buf->dma_addr,
start_addr = buf->dma_addr; buf->size);
if (ret < 0) {
while (i < npages) { DRM_ERROR("failed to get sgtable.\n");
buf->pages[i] = phys_to_page(start_addr); goto err_free_sgt;
sg_set_page(sgl, buf->pages[i], buf->page_size, 0);
sg_dma_address(sgl) = start_addr;
start_addr += buf->page_size;
sgl = sg_next(sgl);
i++;
} }
DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n", DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
...@@ -108,14 +77,14 @@ static int lowlevel_buffer_allocate(struct drm_device *dev, ...@@ -108,14 +77,14 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
buf->size); buf->size);
return ret; return ret;
err2:
dma_free_writecombine(dev->dev, buf->size, buf->kvaddr, err_free_sgt:
(dma_addr_t)buf->dma_addr);
buf->dma_addr = (dma_addr_t)NULL;
err1:
sg_free_table(buf->sgt);
kfree(buf->sgt); kfree(buf->sgt);
buf->sgt = NULL; buf->sgt = NULL;
err_free_attrs:
dma_free_attrs(dev->dev, buf->size, buf->kvaddr,
(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
buf->dma_addr = (dma_addr_t)NULL;
return ret; return ret;
} }
...@@ -125,16 +94,6 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev, ...@@ -125,16 +94,6 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
{ {
DRM_DEBUG_KMS("%s.\n", __FILE__); DRM_DEBUG_KMS("%s.\n", __FILE__);
/*
* release only physically continuous memory and
* non-continuous memory would be released by exynos
* gem framework.
*/
if (IS_NONCONTIG_BUFFER(flags)) {
DRM_DEBUG_KMS("not support allocation type.\n");
return;
}
if (!buf->dma_addr) { if (!buf->dma_addr) {
DRM_DEBUG_KMS("dma_addr is invalid.\n"); DRM_DEBUG_KMS("dma_addr is invalid.\n");
return; return;
...@@ -150,11 +109,8 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev, ...@@ -150,11 +109,8 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
kfree(buf->sgt); kfree(buf->sgt);
buf->sgt = NULL; buf->sgt = NULL;
kfree(buf->pages); dma_free_attrs(dev->dev, buf->size, buf->kvaddr,
buf->pages = NULL; (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
(dma_addr_t)buf->dma_addr);
buf->dma_addr = (dma_addr_t)NULL; buf->dma_addr = (dma_addr_t)NULL;
} }
......
...@@ -34,12 +34,12 @@ struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev, ...@@ -34,12 +34,12 @@ struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
void exynos_drm_fini_buf(struct drm_device *dev, void exynos_drm_fini_buf(struct drm_device *dev,
struct exynos_drm_gem_buf *buffer); struct exynos_drm_gem_buf *buffer);
/* allocate physical memory region and setup sgt and pages. */ /* allocate physical memory region and setup sgt. */
int exynos_drm_alloc_buf(struct drm_device *dev, int exynos_drm_alloc_buf(struct drm_device *dev,
struct exynos_drm_gem_buf *buf, struct exynos_drm_gem_buf *buf,
unsigned int flags); unsigned int flags);
/* release physical memory region, sgt and pages. */ /* release physical memory region, and sgt. */
void exynos_drm_free_buf(struct drm_device *dev, void exynos_drm_free_buf(struct drm_device *dev,
unsigned int flags, unsigned int flags,
struct exynos_drm_gem_buf *buffer); struct exynos_drm_gem_buf *buffer);
......
...@@ -236,16 +236,21 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc, ...@@ -236,16 +236,21 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
goto out; goto out;
} }
spin_lock_irq(&dev->event_lock);
list_add_tail(&event->base.link, list_add_tail(&event->base.link,
&dev_priv->pageflip_event_list); &dev_priv->pageflip_event_list);
spin_unlock_irq(&dev->event_lock);
crtc->fb = fb; crtc->fb = fb;
ret = exynos_drm_crtc_mode_set_base(crtc, crtc->x, crtc->y, ret = exynos_drm_crtc_mode_set_base(crtc, crtc->x, crtc->y,
NULL); NULL);
if (ret) { if (ret) {
crtc->fb = old_fb; crtc->fb = old_fb;
spin_lock_irq(&dev->event_lock);
drm_vblank_put(dev, exynos_crtc->pipe); drm_vblank_put(dev, exynos_crtc->pipe);
list_del(&event->base.link); list_del(&event->base.link);
spin_unlock_irq(&dev->event_lock);
goto out; goto out;
} }
......
...@@ -30,26 +30,22 @@ ...@@ -30,26 +30,22 @@
#include <linux/dma-buf.h> #include <linux/dma-buf.h>
static struct sg_table *exynos_pages_to_sg(struct page **pages, int nr_pages, static struct sg_table *exynos_get_sgt(struct drm_device *drm_dev,
unsigned int page_size) struct exynos_drm_gem_buf *buf)
{ {
struct sg_table *sgt = NULL; struct sg_table *sgt = NULL;
struct scatterlist *sgl; int ret;
int i, ret;
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt) if (!sgt)
goto out; goto out;
ret = sg_alloc_table(sgt, nr_pages, GFP_KERNEL); ret = dma_get_sgtable(drm_dev->dev, sgt, buf->kvaddr,
if (ret) buf->dma_addr, buf->size);
if (ret < 0) {
DRM_ERROR("failed to get sgtable.\n");
goto err_free_sgt; goto err_free_sgt;
}
if (page_size < PAGE_SIZE)
page_size = PAGE_SIZE;
for_each_sg(sgt->sgl, sgl, nr_pages, i)
sg_set_page(sgl, pages[i], page_size, 0);
return sgt; return sgt;
...@@ -68,32 +64,30 @@ static struct sg_table * ...@@ -68,32 +64,30 @@ static struct sg_table *
struct drm_device *dev = gem_obj->base.dev; struct drm_device *dev = gem_obj->base.dev;
struct exynos_drm_gem_buf *buf; struct exynos_drm_gem_buf *buf;
struct sg_table *sgt = NULL; struct sg_table *sgt = NULL;
unsigned int npages;
int nents; int nents;
DRM_DEBUG_PRIME("%s\n", __FILE__); DRM_DEBUG_PRIME("%s\n", __FILE__);
mutex_lock(&dev->struct_mutex);
buf = gem_obj->buffer; buf = gem_obj->buffer;
if (!buf) {
/* there should always be pages allocated. */ DRM_ERROR("buffer is null.\n");
if (!buf->pages) { return sgt;
DRM_ERROR("pages is null.\n");
goto err_unlock;
} }
npages = buf->size / buf->page_size; mutex_lock(&dev->struct_mutex);
sgt = exynos_pages_to_sg(buf->pages, npages, buf->page_size); sgt = exynos_get_sgt(dev, buf);
if (!sgt) { if (!sgt)
DRM_DEBUG_PRIME("exynos_pages_to_sg returned NULL!\n");
goto err_unlock; goto err_unlock;
}
nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir); nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
if (!nents) {
DRM_ERROR("failed to map sgl with iommu.\n");
sgt = NULL;
goto err_unlock;
}
DRM_DEBUG_PRIME("npages = %d buffer size = 0x%lx page_size = 0x%lx\n", DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
npages, buf->size, buf->page_size);
err_unlock: err_unlock:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
...@@ -105,6 +99,7 @@ static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach, ...@@ -105,6 +99,7 @@ static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
sg_free_table(sgt); sg_free_table(sgt);
kfree(sgt); kfree(sgt);
sgt = NULL; sgt = NULL;
...@@ -196,7 +191,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, ...@@ -196,7 +191,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
struct scatterlist *sgl; struct scatterlist *sgl;
struct exynos_drm_gem_obj *exynos_gem_obj; struct exynos_drm_gem_obj *exynos_gem_obj;
struct exynos_drm_gem_buf *buffer; struct exynos_drm_gem_buf *buffer;
struct page *page;
int ret; int ret;
DRM_DEBUG_PRIME("%s\n", __FILE__); DRM_DEBUG_PRIME("%s\n", __FILE__);
...@@ -233,38 +227,27 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, ...@@ -233,38 +227,27 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
goto err_unmap_attach; goto err_unmap_attach;
} }
buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL);
if (!buffer->pages) {
DRM_ERROR("failed to allocate pages.\n");
ret = -ENOMEM;
goto err_free_buffer;
}
exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size); exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
if (!exynos_gem_obj) { if (!exynos_gem_obj) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_free_pages; goto err_free_buffer;
} }
sgl = sgt->sgl; sgl = sgt->sgl;
if (sgt->nents == 1) { buffer->size = dma_buf->size;
buffer->dma_addr = sg_dma_address(sgt->sgl); buffer->dma_addr = sg_dma_address(sgl);
buffer->size = sg_dma_len(sgt->sgl);
if (sgt->nents == 1) {
/* always physically continuous memory if sgt->nents is 1. */ /* always physically continuous memory if sgt->nents is 1. */
exynos_gem_obj->flags |= EXYNOS_BO_CONTIG; exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
} else { } else {
unsigned int i = 0; /*
* this case could be CONTIG or NONCONTIG type but for now
buffer->dma_addr = sg_dma_address(sgl); * sets NONCONTIG.
while (i < sgt->nents) { * TODO. we have to find a way that exporter can notify
buffer->pages[i] = sg_page(sgl); * the type of its own buffer to importer.
buffer->size += sg_dma_len(sgl); */
sgl = sg_next(sgl);
i++;
}
exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG; exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
} }
...@@ -277,9 +260,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, ...@@ -277,9 +260,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
return &exynos_gem_obj->base; return &exynos_gem_obj->base;
err_free_pages:
kfree(buffer->pages);
buffer->pages = NULL;
err_free_buffer: err_free_buffer:
kfree(buffer); kfree(buffer);
buffer = NULL; buffer = NULL;
......
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#include "exynos_drm_vidi.h" #include "exynos_drm_vidi.h"
#include "exynos_drm_dmabuf.h" #include "exynos_drm_dmabuf.h"
#include "exynos_drm_g2d.h" #include "exynos_drm_g2d.h"
#include "exynos_drm_iommu.h"
#define DRIVER_NAME "exynos" #define DRIVER_NAME "exynos"
#define DRIVER_DESC "Samsung SoC DRM" #define DRIVER_DESC "Samsung SoC DRM"
...@@ -66,6 +67,18 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags) ...@@ -66,6 +67,18 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
INIT_LIST_HEAD(&private->pageflip_event_list); INIT_LIST_HEAD(&private->pageflip_event_list);
dev->dev_private = (void *)private; dev->dev_private = (void *)private;
/*
* create mapping to manage iommu table and set a pointer to iommu
* mapping structure to iommu_mapping of private data.
* also this iommu_mapping can be used to check if iommu is supported
* or not.
*/
ret = drm_create_iommu_mapping(dev);
if (ret < 0) {
DRM_ERROR("failed to create iommu mapping.\n");
goto err_crtc;
}
drm_mode_config_init(dev); drm_mode_config_init(dev);
/* init kms poll for handling hpd */ /* init kms poll for handling hpd */
...@@ -80,7 +93,7 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags) ...@@ -80,7 +93,7 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
for (nr = 0; nr < MAX_CRTC; nr++) { for (nr = 0; nr < MAX_CRTC; nr++) {
ret = exynos_drm_crtc_create(dev, nr); ret = exynos_drm_crtc_create(dev, nr);
if (ret) if (ret)
goto err_crtc; goto err_release_iommu_mapping;
} }
for (nr = 0; nr < MAX_PLANE; nr++) { for (nr = 0; nr < MAX_PLANE; nr++) {
...@@ -89,12 +102,12 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags) ...@@ -89,12 +102,12 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
plane = exynos_plane_init(dev, possible_crtcs, false); plane = exynos_plane_init(dev, possible_crtcs, false);
if (!plane) if (!plane)
goto err_crtc; goto err_release_iommu_mapping;
} }
ret = drm_vblank_init(dev, MAX_CRTC); ret = drm_vblank_init(dev, MAX_CRTC);
if (ret) if (ret)
goto err_crtc; goto err_release_iommu_mapping;
/* /*
* probe sub drivers such as display controller and hdmi driver, * probe sub drivers such as display controller and hdmi driver,
...@@ -126,6 +139,8 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags) ...@@ -126,6 +139,8 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
exynos_drm_device_unregister(dev); exynos_drm_device_unregister(dev);
err_vblank: err_vblank:
drm_vblank_cleanup(dev); drm_vblank_cleanup(dev);
err_release_iommu_mapping:
drm_release_iommu_mapping(dev);
err_crtc: err_crtc:
drm_mode_config_cleanup(dev); drm_mode_config_cleanup(dev);
kfree(private); kfree(private);
...@@ -142,6 +157,8 @@ static int exynos_drm_unload(struct drm_device *dev) ...@@ -142,6 +157,8 @@ static int exynos_drm_unload(struct drm_device *dev)
drm_vblank_cleanup(dev); drm_vblank_cleanup(dev);
drm_kms_helper_poll_fini(dev); drm_kms_helper_poll_fini(dev);
drm_mode_config_cleanup(dev); drm_mode_config_cleanup(dev);
drm_release_iommu_mapping(dev);
kfree(dev->dev_private); kfree(dev->dev_private);
dev->dev_private = NULL; dev->dev_private = NULL;
......
...@@ -231,8 +231,7 @@ struct exynos_drm_g2d_private { ...@@ -231,8 +231,7 @@ struct exynos_drm_g2d_private {
struct device *dev; struct device *dev;
struct list_head inuse_cmdlist; struct list_head inuse_cmdlist;
struct list_head event_list; struct list_head event_list;
struct list_head gem_list; struct list_head userptr_list;
unsigned int gem_nr;
}; };
struct drm_exynos_file_private { struct drm_exynos_file_private {
...@@ -241,6 +240,13 @@ struct drm_exynos_file_private { ...@@ -241,6 +240,13 @@ struct drm_exynos_file_private {
/* /*
* Exynos drm private structure. * Exynos drm private structure.
*
* @da_start: start address to device address space.
* with iommu, device address space starts from this address
* otherwise default one.
* @da_space_size: size of device address space.
* if 0 then default value is used for it.
* @da_space_order: order to device address space.
*/ */
struct exynos_drm_private { struct exynos_drm_private {
struct drm_fb_helper *fb_helper; struct drm_fb_helper *fb_helper;
...@@ -255,6 +261,10 @@ struct exynos_drm_private { ...@@ -255,6 +261,10 @@ struct exynos_drm_private {
struct drm_crtc *crtc[MAX_CRTC]; struct drm_crtc *crtc[MAX_CRTC];
struct drm_property *plane_zpos_property; struct drm_property *plane_zpos_property;
struct drm_property *crtc_mode_property; struct drm_property *crtc_mode_property;
unsigned long da_start;
unsigned long da_space_size;
unsigned long da_space_order;
}; };
/* /*
......
...@@ -226,8 +226,47 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder) ...@@ -226,8 +226,47 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
* already updated or not by exynos_drm_encoder_dpms function. * already updated or not by exynos_drm_encoder_dpms function.
*/ */
exynos_encoder->updated = true; exynos_encoder->updated = true;
/*
* In case of setcrtc, there is no way to update encoder's dpms
* so update it here.
*/
exynos_encoder->dpms = DRM_MODE_DPMS_ON;
}
void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb)
{
struct exynos_drm_encoder *exynos_encoder;
struct exynos_drm_overlay_ops *overlay_ops;
struct exynos_drm_manager *manager;
struct drm_device *dev = fb->dev;
struct drm_encoder *encoder;
/*
* make sure that overlay data are updated to real hardware
* for all encoders.
*/
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
exynos_encoder = to_exynos_encoder(encoder);
/* if exynos was disabled, just ignor it. */
if (exynos_encoder->dpms > DRM_MODE_DPMS_ON)
continue;
manager = exynos_encoder->manager;
overlay_ops = manager->overlay_ops;
/*
* wait for vblank interrupt
* - this makes sure that overlay data are updated to
* real hardware.
*/
if (overlay_ops->wait_for_vblank)
overlay_ops->wait_for_vblank(manager->dev);
}
} }
static void exynos_drm_encoder_disable(struct drm_encoder *encoder) static void exynos_drm_encoder_disable(struct drm_encoder *encoder)
{ {
struct drm_plane *plane; struct drm_plane *plane;
...@@ -499,14 +538,4 @@ void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data) ...@@ -499,14 +538,4 @@ void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data)
if (overlay_ops && overlay_ops->disable) if (overlay_ops && overlay_ops->disable)
overlay_ops->disable(manager->dev, zpos); overlay_ops->disable(manager->dev, zpos);
/*
* wait for vblank interrupt
* - this makes sure that hardware overlay is disabled to avoid
* for the dma accesses to memory after gem buffer was released
* because the setting for disabling the overlay will be updated
* at vsync.
*/
if (overlay_ops->wait_for_vblank)
overlay_ops->wait_for_vblank(manager->dev);
} }
...@@ -46,5 +46,6 @@ void exynos_drm_encoder_plane_mode_set(struct drm_encoder *encoder, void *data); ...@@ -46,5 +46,6 @@ void exynos_drm_encoder_plane_mode_set(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_plane_commit(struct drm_encoder *encoder, void *data); void exynos_drm_encoder_plane_commit(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_plane_enable(struct drm_encoder *encoder, void *data); void exynos_drm_encoder_plane_enable(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data); void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb);
#endif #endif
...@@ -30,10 +30,13 @@ ...@@ -30,10 +30,13 @@
#include <drm/drm_crtc.h> #include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h> #include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h> #include <drm/drm_fb_helper.h>
#include <uapi/drm/exynos_drm.h>
#include "exynos_drm_drv.h" #include "exynos_drm_drv.h"
#include "exynos_drm_fb.h" #include "exynos_drm_fb.h"
#include "exynos_drm_gem.h" #include "exynos_drm_gem.h"
#include "exynos_drm_iommu.h"
#include "exynos_drm_encoder.h"
#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb) #define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb)
...@@ -50,6 +53,32 @@ struct exynos_drm_fb { ...@@ -50,6 +53,32 @@ struct exynos_drm_fb {
struct exynos_drm_gem_obj *exynos_gem_obj[MAX_FB_BUFFER]; struct exynos_drm_gem_obj *exynos_gem_obj[MAX_FB_BUFFER];
}; };
static int check_fb_gem_memory_type(struct drm_device *drm_dev,
struct exynos_drm_gem_obj *exynos_gem_obj)
{
unsigned int flags;
/*
* if exynos drm driver supports iommu then framebuffer can use
* all the buffer types.
*/
if (is_drm_iommu_supported(drm_dev))
return 0;
flags = exynos_gem_obj->flags;
/*
* without iommu support, not support physically non-continuous memory
* for framebuffer.
*/
if (IS_NONCONTIG_BUFFER(flags)) {
DRM_ERROR("cannot use this gem memory type for fb.\n");
return -EINVAL;
}
return 0;
}
static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
{ {
struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
...@@ -57,6 +86,9 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) ...@@ -57,6 +86,9 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
DRM_DEBUG_KMS("%s\n", __FILE__); DRM_DEBUG_KMS("%s\n", __FILE__);
/* make sure that overlay data are updated before relesing fb. */
exynos_drm_encoder_complete_scanout(fb);
drm_framebuffer_cleanup(fb); drm_framebuffer_cleanup(fb);
for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) { for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) {
...@@ -128,14 +160,25 @@ exynos_drm_framebuffer_init(struct drm_device *dev, ...@@ -128,14 +160,25 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
struct drm_gem_object *obj) struct drm_gem_object *obj)
{ {
struct exynos_drm_fb *exynos_fb; struct exynos_drm_fb *exynos_fb;
struct exynos_drm_gem_obj *exynos_gem_obj;
int ret; int ret;
exynos_gem_obj = to_exynos_gem_obj(obj);
ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
if (ret < 0) {
DRM_ERROR("cannot use this gem memory type for fb.\n");
return ERR_PTR(-EINVAL);
}
exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL); exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
if (!exynos_fb) { if (!exynos_fb) {
DRM_ERROR("failed to allocate exynos drm framebuffer\n"); DRM_ERROR("failed to allocate exynos drm framebuffer\n");
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
exynos_fb->exynos_gem_obj[0] = exynos_gem_obj;
ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs); ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
if (ret) { if (ret) {
DRM_ERROR("failed to initialize framebuffer\n"); DRM_ERROR("failed to initialize framebuffer\n");
...@@ -143,7 +186,6 @@ exynos_drm_framebuffer_init(struct drm_device *dev, ...@@ -143,7 +186,6 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
} }
drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd); drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
return &exynos_fb->fb; return &exynos_fb->fb;
} }
...@@ -214,6 +256,9 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, ...@@ -214,6 +256,9 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt); DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt);
for (i = 1; i < exynos_fb->buf_cnt; i++) { for (i = 1; i < exynos_fb->buf_cnt; i++) {
struct exynos_drm_gem_obj *exynos_gem_obj;
int ret;
obj = drm_gem_object_lookup(dev, file_priv, obj = drm_gem_object_lookup(dev, file_priv,
mode_cmd->handles[i]); mode_cmd->handles[i]);
if (!obj) { if (!obj) {
...@@ -222,6 +267,15 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, ...@@ -222,6 +267,15 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
return ERR_PTR(-ENOENT); return ERR_PTR(-ENOENT);
} }
exynos_gem_obj = to_exynos_gem_obj(obj);
ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
if (ret < 0) {
DRM_ERROR("cannot use this gem memory type for fb.\n");
exynos_drm_fb_destroy(fb);
return ERR_PTR(ret);
}
exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj); exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj);
} }
......
...@@ -46,8 +46,38 @@ struct exynos_drm_fbdev { ...@@ -46,8 +46,38 @@ struct exynos_drm_fbdev {
struct exynos_drm_gem_obj *exynos_gem_obj; struct exynos_drm_gem_obj *exynos_gem_obj;
}; };
static int exynos_drm_fb_mmap(struct fb_info *info,
struct vm_area_struct *vma)
{
struct drm_fb_helper *helper = info->par;
struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
struct exynos_drm_gem_buf *buffer = exynos_gem_obj->buffer;
unsigned long vm_size;
int ret;
DRM_DEBUG_KMS("%s\n", __func__);
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
vm_size = vma->vm_end - vma->vm_start;
if (vm_size > buffer->size)
return -EINVAL;
ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->kvaddr,
buffer->dma_addr, buffer->size, &buffer->dma_attrs);
if (ret < 0) {
DRM_ERROR("failed to mmap.\n");
return ret;
}
return 0;
}
static struct fb_ops exynos_drm_fb_ops = { static struct fb_ops exynos_drm_fb_ops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.fb_mmap = exynos_drm_fb_mmap,
.fb_fillrect = cfb_fillrect, .fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea, .fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit, .fb_imageblit = cfb_imageblit,
...@@ -87,7 +117,8 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, ...@@ -87,7 +117,8 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr; dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
fbi->screen_base = buffer->kvaddr + offset; fbi->screen_base = buffer->kvaddr + offset;
fbi->fix.smem_start = (unsigned long)(buffer->dma_addr + offset); fbi->fix.smem_start = (unsigned long)
(page_to_phys(sg_page(buffer->sgt->sgl)) + offset);
fbi->screen_size = size; fbi->screen_size = size;
fbi->fix.smem_len = size; fbi->fix.smem_len = size;
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include "exynos_drm_drv.h" #include "exynos_drm_drv.h"
#include "exynos_drm_fbdev.h" #include "exynos_drm_fbdev.h"
#include "exynos_drm_crtc.h" #include "exynos_drm_crtc.h"
#include "exynos_drm_iommu.h"
/* /*
* FIMD is stand for Fully Interactive Mobile Display and * FIMD is stand for Fully Interactive Mobile Display and
...@@ -61,11 +62,11 @@ struct fimd_driver_data { ...@@ -61,11 +62,11 @@ struct fimd_driver_data {
unsigned int timing_base; unsigned int timing_base;
}; };
struct fimd_driver_data exynos4_fimd_driver_data = { static struct fimd_driver_data exynos4_fimd_driver_data = {
.timing_base = 0x0, .timing_base = 0x0,
}; };
struct fimd_driver_data exynos5_fimd_driver_data = { static struct fimd_driver_data exynos5_fimd_driver_data = {
.timing_base = 0x20000, .timing_base = 0x20000,
}; };
...@@ -623,7 +624,6 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc) ...@@ -623,7 +624,6 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
struct drm_pending_vblank_event *e, *t; struct drm_pending_vblank_event *e, *t;
struct timeval now; struct timeval now;
unsigned long flags; unsigned long flags;
bool is_checked = false;
spin_lock_irqsave(&drm_dev->event_lock, flags); spin_lock_irqsave(&drm_dev->event_lock, flags);
...@@ -633,8 +633,6 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc) ...@@ -633,8 +633,6 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
if (crtc != e->pipe) if (crtc != e->pipe)
continue; continue;
is_checked = true;
do_gettimeofday(&now); do_gettimeofday(&now);
e->event.sequence = 0; e->event.sequence = 0;
e->event.tv_sec = now.tv_sec; e->event.tv_sec = now.tv_sec;
...@@ -642,22 +640,7 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc) ...@@ -642,22 +640,7 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
list_move_tail(&e->base.link, &e->base.file_priv->event_list); list_move_tail(&e->base.link, &e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait); wake_up_interruptible(&e->base.file_priv->event_wait);
} drm_vblank_put(drm_dev, crtc);
if (is_checked) {
/*
* call drm_vblank_put only in case that drm_vblank_get was
* called.
*/
if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
drm_vblank_put(drm_dev, crtc);
/*
* don't off vblank if vblank_disable_allowed is 1,
* because vblank would be off by timer handler.
*/
if (!drm_dev->vblank_disable_allowed)
drm_vblank_off(drm_dev, crtc);
} }
spin_unlock_irqrestore(&drm_dev->event_lock, flags); spin_unlock_irqrestore(&drm_dev->event_lock, flags);
...@@ -709,6 +692,10 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev) ...@@ -709,6 +692,10 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
*/ */
drm_dev->vblank_disable_allowed = 1; drm_dev->vblank_disable_allowed = 1;
/* attach this sub driver to iommu mapping if supported. */
if (is_drm_iommu_supported(drm_dev))
drm_iommu_attach_device(drm_dev, dev);
return 0; return 0;
} }
...@@ -716,7 +703,9 @@ static void fimd_subdrv_remove(struct drm_device *drm_dev, struct device *dev) ...@@ -716,7 +703,9 @@ static void fimd_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
{ {
DRM_DEBUG_KMS("%s\n", __FILE__); DRM_DEBUG_KMS("%s\n", __FILE__);
/* TODO. */ /* detach this sub driver from iommu mapping if supported. */
if (is_drm_iommu_supported(drm_dev))
drm_iommu_detach_device(drm_dev, dev);
} }
static int fimd_calc_clkdiv(struct fimd_context *ctx, static int fimd_calc_clkdiv(struct fimd_context *ctx,
...@@ -857,18 +846,16 @@ static int __devinit fimd_probe(struct platform_device *pdev) ...@@ -857,18 +846,16 @@ static int __devinit fimd_probe(struct platform_device *pdev)
if (!ctx) if (!ctx)
return -ENOMEM; return -ENOMEM;
ctx->bus_clk = clk_get(dev, "fimd"); ctx->bus_clk = devm_clk_get(dev, "fimd");
if (IS_ERR(ctx->bus_clk)) { if (IS_ERR(ctx->bus_clk)) {
dev_err(dev, "failed to get bus clock\n"); dev_err(dev, "failed to get bus clock\n");
ret = PTR_ERR(ctx->bus_clk); return PTR_ERR(ctx->bus_clk);
goto err_clk_get;
} }
ctx->lcd_clk = clk_get(dev, "sclk_fimd"); ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd");
if (IS_ERR(ctx->lcd_clk)) { if (IS_ERR(ctx->lcd_clk)) {
dev_err(dev, "failed to get lcd clock\n"); dev_err(dev, "failed to get lcd clock\n");
ret = PTR_ERR(ctx->lcd_clk); return PTR_ERR(ctx->lcd_clk);
goto err_bus_clk;
} }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
...@@ -876,14 +863,13 @@ static int __devinit fimd_probe(struct platform_device *pdev) ...@@ -876,14 +863,13 @@ static int __devinit fimd_probe(struct platform_device *pdev)
ctx->regs = devm_request_and_ioremap(&pdev->dev, res); ctx->regs = devm_request_and_ioremap(&pdev->dev, res);
if (!ctx->regs) { if (!ctx->regs) {
dev_err(dev, "failed to map registers\n"); dev_err(dev, "failed to map registers\n");
ret = -ENXIO; return -ENXIO;
goto err_clk;
} }
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) { if (!res) {
dev_err(dev, "irq request failed.\n"); dev_err(dev, "irq request failed.\n");
goto err_clk; return -ENXIO;
} }
ctx->irq = res->start; ctx->irq = res->start;
...@@ -892,7 +878,7 @@ static int __devinit fimd_probe(struct platform_device *pdev) ...@@ -892,7 +878,7 @@ static int __devinit fimd_probe(struct platform_device *pdev)
0, "drm_fimd", ctx); 0, "drm_fimd", ctx);
if (ret) { if (ret) {
dev_err(dev, "irq request failed.\n"); dev_err(dev, "irq request failed.\n");
goto err_clk; return ret;
} }
ctx->vidcon0 = pdata->vidcon0; ctx->vidcon0 = pdata->vidcon0;
...@@ -926,17 +912,6 @@ static int __devinit fimd_probe(struct platform_device *pdev) ...@@ -926,17 +912,6 @@ static int __devinit fimd_probe(struct platform_device *pdev)
exynos_drm_subdrv_register(subdrv); exynos_drm_subdrv_register(subdrv);
return 0; return 0;
err_clk:
clk_disable(ctx->lcd_clk);
clk_put(ctx->lcd_clk);
err_bus_clk:
clk_disable(ctx->bus_clk);
clk_put(ctx->bus_clk);
err_clk_get:
return ret;
} }
static int __devexit fimd_remove(struct platform_device *pdev) static int __devexit fimd_remove(struct platform_device *pdev)
...@@ -960,9 +935,6 @@ static int __devexit fimd_remove(struct platform_device *pdev) ...@@ -960,9 +935,6 @@ static int __devexit fimd_remove(struct platform_device *pdev)
out: out:
pm_runtime_disable(dev); pm_runtime_disable(dev);
clk_put(ctx->lcd_clk);
clk_put(ctx->bus_clk);
return 0; return 0;
} }
......
This diff is collapsed.
This diff is collapsed.
...@@ -35,21 +35,25 @@ ...@@ -35,21 +35,25 @@
* exynos drm gem buffer structure. * exynos drm gem buffer structure.
* *
* @kvaddr: kernel virtual address to allocated memory region. * @kvaddr: kernel virtual address to allocated memory region.
* *userptr: user space address.
* @dma_addr: bus address(accessed by dma) to allocated memory region. * @dma_addr: bus address(accessed by dma) to allocated memory region.
* - this address could be physical address without IOMMU and * - this address could be physical address without IOMMU and
* device address with IOMMU. * device address with IOMMU.
* @write: whether pages will be written to by the caller.
* @sgt: sg table to transfer page data. * @sgt: sg table to transfer page data.
* @pages: contain all pages to allocated memory region.
* @page_size: could be 4K, 64K or 1MB.
* @size: size of allocated memory region. * @size: size of allocated memory region.
* @pfnmap: indicate whether memory region from userptr is mmaped with
* VM_PFNMAP or not.
*/ */
struct exynos_drm_gem_buf { struct exynos_drm_gem_buf {
void __iomem *kvaddr; void __iomem *kvaddr;
unsigned long userptr;
dma_addr_t dma_addr; dma_addr_t dma_addr;
struct dma_attrs dma_attrs;
unsigned int write;
struct sg_table *sgt; struct sg_table *sgt;
struct page **pages;
unsigned long page_size;
unsigned long size; unsigned long size;
bool pfnmap;
}; };
/* /*
...@@ -65,6 +69,7 @@ struct exynos_drm_gem_buf { ...@@ -65,6 +69,7 @@ struct exynos_drm_gem_buf {
* or at framebuffer creation. * or at framebuffer creation.
* @size: size requested from user, in bytes and this size is aligned * @size: size requested from user, in bytes and this size is aligned
* in page unit. * in page unit.
* @vma: a pointer to vm_area.
* @flags: indicate memory type to allocated buffer and cache attruibute. * @flags: indicate memory type to allocated buffer and cache attruibute.
* *
* P.S. this object would be transfered to user as kms_bo.handle so * P.S. this object would be transfered to user as kms_bo.handle so
...@@ -74,6 +79,7 @@ struct exynos_drm_gem_obj { ...@@ -74,6 +79,7 @@ struct exynos_drm_gem_obj {
struct drm_gem_object base; struct drm_gem_object base;
struct exynos_drm_gem_buf *buffer; struct exynos_drm_gem_buf *buffer;
unsigned long size; unsigned long size;
struct vm_area_struct *vma;
unsigned int flags; unsigned int flags;
}; };
...@@ -104,9 +110,9 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data, ...@@ -104,9 +110,9 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
* other drivers such as 2d/3d acceleration drivers. * other drivers such as 2d/3d acceleration drivers.
* with this function call, gem object reference count would be increased. * with this function call, gem object reference count would be increased.
*/ */
void *exynos_drm_gem_get_dma_addr(struct drm_device *dev, dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
unsigned int gem_handle, unsigned int gem_handle,
struct drm_file *file_priv); struct drm_file *filp);
/* /*
* put dma address from gem handle and this function could be used for * put dma address from gem handle and this function could be used for
...@@ -115,7 +121,7 @@ void *exynos_drm_gem_get_dma_addr(struct drm_device *dev, ...@@ -115,7 +121,7 @@ void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
*/ */
void exynos_drm_gem_put_dma_addr(struct drm_device *dev, void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
unsigned int gem_handle, unsigned int gem_handle,
struct drm_file *file_priv); struct drm_file *filp);
/* get buffer offset to map to user space. */ /* get buffer offset to map to user space. */
int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data, int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
...@@ -128,6 +134,10 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data, ...@@ -128,6 +134,10 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data, int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
/* map user space allocated by malloc to pages. */
int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/* get buffer information to memory region allocated by gem. */ /* get buffer information to memory region allocated by gem. */
int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
...@@ -163,4 +173,36 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); ...@@ -163,4 +173,36 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
/* set vm_flags and we can change the vm attribute to other one at here. */ /* set vm_flags and we can change the vm attribute to other one at here. */
int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
static inline int vma_is_io(struct vm_area_struct *vma)
{
return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
}
/* get a copy of a virtual memory region. */
struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma);
/* release a userspace virtual memory area. */
void exynos_gem_put_vma(struct vm_area_struct *vma);
/* get pages from user space. */
int exynos_gem_get_pages_from_userptr(unsigned long start,
unsigned int npages,
struct page **pages,
struct vm_area_struct *vma);
/* drop the reference to pages. */
void exynos_gem_put_pages_to_userptr(struct page **pages,
unsigned int npages,
struct vm_area_struct *vma);
/* map sgt with dma region. */
int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
struct sg_table *sgt,
enum dma_data_direction dir);
/* unmap sgt from dma region. */
void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
struct sg_table *sgt,
enum dma_data_direction dir);
#endif #endif
...@@ -346,9 +346,23 @@ static int hdmi_subdrv_probe(struct drm_device *drm_dev, ...@@ -346,9 +346,23 @@ static int hdmi_subdrv_probe(struct drm_device *drm_dev,
ctx->hdmi_ctx->drm_dev = drm_dev; ctx->hdmi_ctx->drm_dev = drm_dev;
ctx->mixer_ctx->drm_dev = drm_dev; ctx->mixer_ctx->drm_dev = drm_dev;
if (mixer_ops->iommu_on)
mixer_ops->iommu_on(ctx->mixer_ctx->ctx, true);
return 0; return 0;
} }
static void hdmi_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
{
struct drm_hdmi_context *ctx;
struct exynos_drm_subdrv *subdrv = to_subdrv(dev);
ctx = get_ctx_from_subdrv(subdrv);
if (mixer_ops->iommu_on)
mixer_ops->iommu_on(ctx->mixer_ctx->ctx, false);
}
static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev) static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
{ {
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
...@@ -368,6 +382,7 @@ static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev) ...@@ -368,6 +382,7 @@ static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
subdrv->dev = dev; subdrv->dev = dev;
subdrv->manager = &hdmi_manager; subdrv->manager = &hdmi_manager;
subdrv->probe = hdmi_subdrv_probe; subdrv->probe = hdmi_subdrv_probe;
subdrv->remove = hdmi_subdrv_remove;
platform_set_drvdata(pdev, subdrv); platform_set_drvdata(pdev, subdrv);
......
...@@ -62,6 +62,7 @@ struct exynos_hdmi_ops { ...@@ -62,6 +62,7 @@ struct exynos_hdmi_ops {
struct exynos_mixer_ops { struct exynos_mixer_ops {
/* manager */ /* manager */
int (*iommu_on)(void *ctx, bool enable);
int (*enable_vblank)(void *ctx, int pipe); int (*enable_vblank)(void *ctx, int pipe);
void (*disable_vblank)(void *ctx); void (*disable_vblank)(void *ctx);
void (*dpms)(void *ctx, int mode); void (*dpms)(void *ctx, int mode);
......
/* exynos_drm_iommu.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* Author: Inki Dae <inki.dae@samsung.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <drmP.h>
#include <drm/exynos_drm.h>
#include <linux/dma-mapping.h>
#include <linux/iommu.h>
#include <linux/kref.h>
#include <asm/dma-iommu.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_iommu.h"
/*
* drm_create_iommu_mapping - create a mapping structure
*
* @drm_dev: DRM device
*/
int drm_create_iommu_mapping(struct drm_device *drm_dev)
{
struct dma_iommu_mapping *mapping = NULL;
struct exynos_drm_private *priv = drm_dev->dev_private;
struct device *dev = drm_dev->dev;
if (!priv->da_start)
priv->da_start = EXYNOS_DEV_ADDR_START;
if (!priv->da_space_size)
priv->da_space_size = EXYNOS_DEV_ADDR_SIZE;
if (!priv->da_space_order)
priv->da_space_order = EXYNOS_DEV_ADDR_ORDER;
mapping = arm_iommu_create_mapping(&platform_bus_type, priv->da_start,
priv->da_space_size,
priv->da_space_order);
if (!mapping)
return -ENOMEM;
dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
GFP_KERNEL);
dma_set_max_seg_size(dev, 0xffffffffu);
dev->archdata.mapping = mapping;
return 0;
}
/*
* drm_release_iommu_mapping - release iommu mapping structure
*
* @drm_dev: DRM device
*
* if mapping->kref becomes 0 then all things related to iommu mapping
* will be released
*/
void drm_release_iommu_mapping(struct drm_device *drm_dev)
{
struct device *dev = drm_dev->dev;
arm_iommu_release_mapping(dev->archdata.mapping);
}
/*
* drm_iommu_attach_device- attach device to iommu mapping
*
* @drm_dev: DRM device
* @subdrv_dev: device to be attach
*
* This function should be called by sub drivers to attach it to iommu
* mapping.
*/
int drm_iommu_attach_device(struct drm_device *drm_dev,
struct device *subdrv_dev)
{
struct device *dev = drm_dev->dev;
int ret;
if (!dev->archdata.mapping) {
DRM_ERROR("iommu_mapping is null.\n");
return -EFAULT;
}
subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev,
sizeof(*subdrv_dev->dma_parms),
GFP_KERNEL);
dma_set_max_seg_size(subdrv_dev, 0xffffffffu);
ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping);
if (ret < 0) {
DRM_DEBUG_KMS("failed iommu attach.\n");
return ret;
}
/*
* Set dma_ops to drm_device just one time.
*
* The dma mapping api needs device object and the api is used
* to allocate physial memory and map it with iommu table.
* If iommu attach succeeded, the sub driver would have dma_ops
* for iommu and also all sub drivers have same dma_ops.
*/
if (!dev->archdata.dma_ops)
dev->archdata.dma_ops = subdrv_dev->archdata.dma_ops;
return 0;
}
/*
* drm_iommu_detach_device -detach device address space mapping from device
*
* @drm_dev: DRM device
* @subdrv_dev: device to be detached
*
* This function should be called by sub drivers to detach it from iommu
* mapping
*/
void drm_iommu_detach_device(struct drm_device *drm_dev,
struct device *subdrv_dev)
{
struct device *dev = drm_dev->dev;
struct dma_iommu_mapping *mapping = dev->archdata.mapping;
if (!mapping || !mapping->domain)
return;
iommu_detach_device(mapping->domain, subdrv_dev);
drm_release_iommu_mapping(drm_dev);
}
/* exynos_drm_iommu.h
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* Authoer: Inki Dae <inki.dae@samsung.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef _EXYNOS_DRM_IOMMU_H_
#define _EXYNOS_DRM_IOMMU_H_
#define EXYNOS_DEV_ADDR_START 0x20000000
#define EXYNOS_DEV_ADDR_SIZE 0x40000000
#define EXYNOS_DEV_ADDR_ORDER 0x4
#ifdef CONFIG_DRM_EXYNOS_IOMMU
int drm_create_iommu_mapping(struct drm_device *drm_dev);
void drm_release_iommu_mapping(struct drm_device *drm_dev);
int drm_iommu_attach_device(struct drm_device *drm_dev,
struct device *subdrv_dev);
void drm_iommu_detach_device(struct drm_device *dev_dev,
struct device *subdrv_dev);
static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
{
#ifdef CONFIG_ARM_DMA_USE_IOMMU
struct device *dev = drm_dev->dev;
return dev->archdata.mapping ? true : false;
#else
return false;
#endif
}
#else
struct dma_iommu_mapping;
static inline int drm_create_iommu_mapping(struct drm_device *drm_dev)
{
return 0;
}
static inline void drm_release_iommu_mapping(struct drm_device *drm_dev)
{
}
static inline int drm_iommu_attach_device(struct drm_device *drm_dev,
struct device *subdrv_dev)
{
return 0;
}
static inline void drm_iommu_detach_device(struct drm_device *drm_dev,
struct device *subdrv_dev)
{
}
static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
{
return false;
}
#endif
#endif
...@@ -204,7 +204,6 @@ exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, ...@@ -204,7 +204,6 @@ exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
return ret; return ret;
plane->crtc = crtc; plane->crtc = crtc;
plane->fb = crtc->fb;
exynos_plane_commit(plane); exynos_plane_commit(plane);
exynos_plane_dpms(plane, DRM_MODE_DPMS_ON); exynos_plane_dpms(plane, DRM_MODE_DPMS_ON);
......
...@@ -382,7 +382,6 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc) ...@@ -382,7 +382,6 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
struct drm_pending_vblank_event *e, *t; struct drm_pending_vblank_event *e, *t;
struct timeval now; struct timeval now;
unsigned long flags; unsigned long flags;
bool is_checked = false;
spin_lock_irqsave(&drm_dev->event_lock, flags); spin_lock_irqsave(&drm_dev->event_lock, flags);
...@@ -392,8 +391,6 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc) ...@@ -392,8 +391,6 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
if (crtc != e->pipe) if (crtc != e->pipe)
continue; continue;
is_checked = true;
do_gettimeofday(&now); do_gettimeofday(&now);
e->event.sequence = 0; e->event.sequence = 0;
e->event.tv_sec = now.tv_sec; e->event.tv_sec = now.tv_sec;
...@@ -401,22 +398,7 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc) ...@@ -401,22 +398,7 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
list_move_tail(&e->base.link, &e->base.file_priv->event_list); list_move_tail(&e->base.link, &e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait); wake_up_interruptible(&e->base.file_priv->event_wait);
} drm_vblank_put(drm_dev, crtc);
if (is_checked) {
/*
* call drm_vblank_put only in case that drm_vblank_get was
* called.
*/
if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
drm_vblank_put(drm_dev, crtc);
/*
* don't off vblank if vblank_disable_allowed is 1,
* because vblank would be off by timer handler.
*/
if (!drm_dev->vblank_disable_allowed)
drm_vblank_off(drm_dev, crtc);
} }
spin_unlock_irqrestore(&drm_dev->event_lock, flags); spin_unlock_irqrestore(&drm_dev->event_lock, flags);
......
This diff is collapsed.
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include "exynos_drm_drv.h" #include "exynos_drm_drv.h"
#include "exynos_drm_hdmi.h" #include "exynos_drm_hdmi.h"
#include "exynos_drm_iommu.h"
#define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev)) #define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev))
...@@ -80,6 +81,7 @@ enum mixer_version_id { ...@@ -80,6 +81,7 @@ enum mixer_version_id {
struct mixer_context { struct mixer_context {
struct device *dev; struct device *dev;
struct drm_device *drm_dev;
int pipe; int pipe;
bool interlace; bool interlace;
bool powered; bool powered;
...@@ -90,6 +92,7 @@ struct mixer_context { ...@@ -90,6 +92,7 @@ struct mixer_context {
struct mixer_resources mixer_res; struct mixer_resources mixer_res;
struct hdmi_win_data win_data[MIXER_WIN_NR]; struct hdmi_win_data win_data[MIXER_WIN_NR];
enum mixer_version_id mxr_ver; enum mixer_version_id mxr_ver;
void *parent_ctx;
}; };
struct mixer_drv_data { struct mixer_drv_data {
...@@ -665,6 +668,24 @@ static void mixer_win_reset(struct mixer_context *ctx) ...@@ -665,6 +668,24 @@ static void mixer_win_reset(struct mixer_context *ctx)
spin_unlock_irqrestore(&res->reg_slock, flags); spin_unlock_irqrestore(&res->reg_slock, flags);
} }
static int mixer_iommu_on(void *ctx, bool enable)
{
struct exynos_drm_hdmi_context *drm_hdmi_ctx;
struct mixer_context *mdata = ctx;
struct drm_device *drm_dev;
drm_hdmi_ctx = mdata->parent_ctx;
drm_dev = drm_hdmi_ctx->drm_dev;
if (is_drm_iommu_supported(drm_dev)) {
if (enable)
return drm_iommu_attach_device(drm_dev, mdata->dev);
drm_iommu_detach_device(drm_dev, mdata->dev);
}
return 0;
}
static void mixer_poweron(struct mixer_context *ctx) static void mixer_poweron(struct mixer_context *ctx)
{ {
struct mixer_resources *res = &ctx->mixer_res; struct mixer_resources *res = &ctx->mixer_res;
...@@ -866,6 +887,7 @@ static void mixer_win_disable(void *ctx, int win) ...@@ -866,6 +887,7 @@ static void mixer_win_disable(void *ctx, int win)
static struct exynos_mixer_ops mixer_ops = { static struct exynos_mixer_ops mixer_ops = {
/* manager */ /* manager */
.iommu_on = mixer_iommu_on,
.enable_vblank = mixer_enable_vblank, .enable_vblank = mixer_enable_vblank,
.disable_vblank = mixer_disable_vblank, .disable_vblank = mixer_disable_vblank,
.dpms = mixer_dpms, .dpms = mixer_dpms,
...@@ -884,7 +906,6 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc) ...@@ -884,7 +906,6 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
struct drm_pending_vblank_event *e, *t; struct drm_pending_vblank_event *e, *t;
struct timeval now; struct timeval now;
unsigned long flags; unsigned long flags;
bool is_checked = false;
spin_lock_irqsave(&drm_dev->event_lock, flags); spin_lock_irqsave(&drm_dev->event_lock, flags);
...@@ -894,7 +915,6 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc) ...@@ -894,7 +915,6 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
if (crtc != e->pipe) if (crtc != e->pipe)
continue; continue;
is_checked = true;
do_gettimeofday(&now); do_gettimeofday(&now);
e->event.sequence = 0; e->event.sequence = 0;
e->event.tv_sec = now.tv_sec; e->event.tv_sec = now.tv_sec;
...@@ -902,16 +922,9 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc) ...@@ -902,16 +922,9 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
list_move_tail(&e->base.link, &e->base.file_priv->event_list); list_move_tail(&e->base.link, &e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait); wake_up_interruptible(&e->base.file_priv->event_wait);
drm_vblank_put(drm_dev, crtc);
} }
if (is_checked)
/*
* call drm_vblank_put only in case that drm_vblank_get was
* called.
*/
if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
drm_vblank_put(drm_dev, crtc);
spin_unlock_irqrestore(&drm_dev->event_lock, flags); spin_unlock_irqrestore(&drm_dev->event_lock, flags);
} }
...@@ -971,57 +984,45 @@ static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx, ...@@ -971,57 +984,45 @@ static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
spin_lock_init(&mixer_res->reg_slock); spin_lock_init(&mixer_res->reg_slock);
mixer_res->mixer = clk_get(dev, "mixer"); mixer_res->mixer = devm_clk_get(dev, "mixer");
if (IS_ERR_OR_NULL(mixer_res->mixer)) { if (IS_ERR_OR_NULL(mixer_res->mixer)) {
dev_err(dev, "failed to get clock 'mixer'\n"); dev_err(dev, "failed to get clock 'mixer'\n");
ret = -ENODEV; return -ENODEV;
goto fail;
} }
mixer_res->sclk_hdmi = clk_get(dev, "sclk_hdmi"); mixer_res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
if (IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) { if (IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) {
dev_err(dev, "failed to get clock 'sclk_hdmi'\n"); dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
ret = -ENODEV; return -ENODEV;
goto fail;
} }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) { if (res == NULL) {
dev_err(dev, "get memory resource failed.\n"); dev_err(dev, "get memory resource failed.\n");
ret = -ENXIO; return -ENXIO;
goto fail;
} }
mixer_res->mixer_regs = devm_ioremap(&pdev->dev, res->start, mixer_res->mixer_regs = devm_ioremap(&pdev->dev, res->start,
resource_size(res)); resource_size(res));
if (mixer_res->mixer_regs == NULL) { if (mixer_res->mixer_regs == NULL) {
dev_err(dev, "register mapping failed.\n"); dev_err(dev, "register mapping failed.\n");
ret = -ENXIO; return -ENXIO;
goto fail;
} }
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (res == NULL) { if (res == NULL) {
dev_err(dev, "get interrupt resource failed.\n"); dev_err(dev, "get interrupt resource failed.\n");
ret = -ENXIO; return -ENXIO;
goto fail;
} }
ret = devm_request_irq(&pdev->dev, res->start, mixer_irq_handler, ret = devm_request_irq(&pdev->dev, res->start, mixer_irq_handler,
0, "drm_mixer", ctx); 0, "drm_mixer", ctx);
if (ret) { if (ret) {
dev_err(dev, "request interrupt failed.\n"); dev_err(dev, "request interrupt failed.\n");
goto fail; return ret;
} }
mixer_res->irq = res->start; mixer_res->irq = res->start;
return 0; return 0;
fail:
if (!IS_ERR_OR_NULL(mixer_res->sclk_hdmi))
clk_put(mixer_res->sclk_hdmi);
if (!IS_ERR_OR_NULL(mixer_res->mixer))
clk_put(mixer_res->mixer);
return ret;
} }
static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx, static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
...@@ -1031,25 +1032,21 @@ static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx, ...@@ -1031,25 +1032,21 @@ static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct mixer_resources *mixer_res = &mixer_ctx->mixer_res; struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
struct resource *res; struct resource *res;
int ret;
mixer_res->vp = clk_get(dev, "vp"); mixer_res->vp = devm_clk_get(dev, "vp");
if (IS_ERR_OR_NULL(mixer_res->vp)) { if (IS_ERR_OR_NULL(mixer_res->vp)) {
dev_err(dev, "failed to get clock 'vp'\n"); dev_err(dev, "failed to get clock 'vp'\n");
ret = -ENODEV; return -ENODEV;
goto fail;
} }
mixer_res->sclk_mixer = clk_get(dev, "sclk_mixer"); mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
if (IS_ERR_OR_NULL(mixer_res->sclk_mixer)) { if (IS_ERR_OR_NULL(mixer_res->sclk_mixer)) {
dev_err(dev, "failed to get clock 'sclk_mixer'\n"); dev_err(dev, "failed to get clock 'sclk_mixer'\n");
ret = -ENODEV; return -ENODEV;
goto fail;
} }
mixer_res->sclk_dac = clk_get(dev, "sclk_dac"); mixer_res->sclk_dac = devm_clk_get(dev, "sclk_dac");
if (IS_ERR_OR_NULL(mixer_res->sclk_dac)) { if (IS_ERR_OR_NULL(mixer_res->sclk_dac)) {
dev_err(dev, "failed to get clock 'sclk_dac'\n"); dev_err(dev, "failed to get clock 'sclk_dac'\n");
ret = -ENODEV; return -ENODEV;
goto fail;
} }
if (mixer_res->sclk_hdmi) if (mixer_res->sclk_hdmi)
...@@ -1058,28 +1055,17 @@ static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx, ...@@ -1058,28 +1055,17 @@ static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
res = platform_get_resource(pdev, IORESOURCE_MEM, 1); res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res == NULL) { if (res == NULL) {
dev_err(dev, "get memory resource failed.\n"); dev_err(dev, "get memory resource failed.\n");
ret = -ENXIO; return -ENXIO;
goto fail;
} }
mixer_res->vp_regs = devm_ioremap(&pdev->dev, res->start, mixer_res->vp_regs = devm_ioremap(&pdev->dev, res->start,
resource_size(res)); resource_size(res));
if (mixer_res->vp_regs == NULL) { if (mixer_res->vp_regs == NULL) {
dev_err(dev, "register mapping failed.\n"); dev_err(dev, "register mapping failed.\n");
ret = -ENXIO; return -ENXIO;
goto fail;
} }
return 0; return 0;
fail:
if (!IS_ERR_OR_NULL(mixer_res->sclk_dac))
clk_put(mixer_res->sclk_dac);
if (!IS_ERR_OR_NULL(mixer_res->sclk_mixer))
clk_put(mixer_res->sclk_mixer);
if (!IS_ERR_OR_NULL(mixer_res->vp))
clk_put(mixer_res->vp);
return ret;
} }
static struct mixer_drv_data exynos5_mxr_drv_data = { static struct mixer_drv_data exynos5_mxr_drv_data = {
...@@ -1149,6 +1135,7 @@ static int __devinit mixer_probe(struct platform_device *pdev) ...@@ -1149,6 +1135,7 @@ static int __devinit mixer_probe(struct platform_device *pdev)
} }
ctx->dev = &pdev->dev; ctx->dev = &pdev->dev;
ctx->parent_ctx = (void *)drm_hdmi_ctx;
drm_hdmi_ctx->ctx = (void *)ctx; drm_hdmi_ctx->ctx = (void *)ctx;
ctx->vp_enabled = drv->is_vp_enabled; ctx->vp_enabled = drv->is_vp_enabled;
ctx->mxr_ver = drv->version; ctx->mxr_ver = drv->version;
......
...@@ -298,14 +298,14 @@ ...@@ -298,14 +298,14 @@
#define HDMI_AVI_HEADER1 HDMI_CORE_BASE(0x0714) #define HDMI_AVI_HEADER1 HDMI_CORE_BASE(0x0714)
#define HDMI_AVI_HEADER2 HDMI_CORE_BASE(0x0718) #define HDMI_AVI_HEADER2 HDMI_CORE_BASE(0x0718)
#define HDMI_AVI_CHECK_SUM HDMI_CORE_BASE(0x071C) #define HDMI_AVI_CHECK_SUM HDMI_CORE_BASE(0x071C)
#define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0720 + 4 * (n)) #define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0720 + 4 * (n-1))
#define HDMI_AUI_CON HDMI_CORE_BASE(0x0800) #define HDMI_AUI_CON HDMI_CORE_BASE(0x0800)
#define HDMI_AUI_HEADER0 HDMI_CORE_BASE(0x0810) #define HDMI_AUI_HEADER0 HDMI_CORE_BASE(0x0810)
#define HDMI_AUI_HEADER1 HDMI_CORE_BASE(0x0814) #define HDMI_AUI_HEADER1 HDMI_CORE_BASE(0x0814)
#define HDMI_AUI_HEADER2 HDMI_CORE_BASE(0x0818) #define HDMI_AUI_HEADER2 HDMI_CORE_BASE(0x0818)
#define HDMI_AUI_CHECK_SUM HDMI_CORE_BASE(0x081C) #define HDMI_AUI_CHECK_SUM HDMI_CORE_BASE(0x081C)
#define HDMI_AUI_BYTE(n) HDMI_CORE_BASE(0x0820 + 4 * (n)) #define HDMI_AUI_BYTE(n) HDMI_CORE_BASE(0x0820 + 4 * (n-1))
#define HDMI_MPG_CON HDMI_CORE_BASE(0x0900) #define HDMI_MPG_CON HDMI_CORE_BASE(0x0900)
#define HDMI_MPG_CHECK_SUM HDMI_CORE_BASE(0x091C) #define HDMI_MPG_CHECK_SUM HDMI_CORE_BASE(0x091C)
...@@ -338,6 +338,19 @@ ...@@ -338,6 +338,19 @@
#define HDMI_AN_SEED_2 HDMI_CORE_BASE(0x0E60) #define HDMI_AN_SEED_2 HDMI_CORE_BASE(0x0E60)
#define HDMI_AN_SEED_3 HDMI_CORE_BASE(0x0E64) #define HDMI_AN_SEED_3 HDMI_CORE_BASE(0x0E64)
/* AVI bit definition */
#define HDMI_AVI_CON_DO_NOT_TRANSMIT (0 << 1)
#define HDMI_AVI_CON_EVERY_VSYNC (1 << 1)
#define AVI_ACTIVE_FORMAT_VALID (1 << 4)
#define AVI_UNDERSCANNED_DISPLAY_VALID (1 << 1)
/* AUI bit definition */
#define HDMI_AUI_CON_NO_TRAN (0 << 0)
/* VSI bit definition */
#define HDMI_VSI_CON_DO_NOT_TRANSMIT (0 << 0)
/* HDCP related registers */ /* HDCP related registers */
#define HDMI_HDCP_SHA1(n) HDMI_CORE_BASE(0x7000 + 4 * (n)) #define HDMI_HDCP_SHA1(n) HDMI_CORE_BASE(0x7000 + 4 * (n))
#define HDMI_HDCP_KSV_LIST(n) HDMI_CORE_BASE(0x7050 + 4 * (n)) #define HDMI_HDCP_KSV_LIST(n) HDMI_CORE_BASE(0x7050 + 4 * (n))
......
...@@ -17,6 +17,7 @@ enum dma_attr { ...@@ -17,6 +17,7 @@ enum dma_attr {
DMA_ATTR_NON_CONSISTENT, DMA_ATTR_NON_CONSISTENT,
DMA_ATTR_NO_KERNEL_MAPPING, DMA_ATTR_NO_KERNEL_MAPPING,
DMA_ATTR_SKIP_CPU_SYNC, DMA_ATTR_SKIP_CPU_SYNC,
DMA_ATTR_FORCE_CONTIGUOUS,
DMA_ATTR_MAX, DMA_ATTR_MAX,
}; };
......
...@@ -133,17 +133,26 @@ struct drm_exynos_g2d_cmd { ...@@ -133,17 +133,26 @@ struct drm_exynos_g2d_cmd {
__u32 data; __u32 data;
}; };
enum drm_exynos_g2d_buf_type {
G2D_BUF_USERPTR = 1 << 31,
};
enum drm_exynos_g2d_event_type { enum drm_exynos_g2d_event_type {
G2D_EVENT_NOT, G2D_EVENT_NOT,
G2D_EVENT_NONSTOP, G2D_EVENT_NONSTOP,
G2D_EVENT_STOP, /* not yet */ G2D_EVENT_STOP, /* not yet */
}; };
struct drm_exynos_g2d_userptr {
unsigned long userptr;
unsigned long size;
};
struct drm_exynos_g2d_set_cmdlist { struct drm_exynos_g2d_set_cmdlist {
__u64 cmd; __u64 cmd;
__u64 cmd_gem; __u64 cmd_buf;
__u32 cmd_nr; __u32 cmd_nr;
__u32 cmd_gem_nr; __u32 cmd_buf_nr;
/* for g2d event */ /* for g2d event */
__u64 event_type; __u64 event_type;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment