Commit 42532512 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'exynos-drm-next' of...

Merge branch 'exynos-drm-next' of git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos into drm-next

   Sorry for late. This pull request includes some enhancements
   for Exynos drm, new feature supports, cleanups and fixups
   like below,

   - Consider low power transmission for drm mipi dsi module,
     and also add non-continuous clock mode support for Exynos
     mipi dsi driver.
   - Add Exynos3250 SoC support.
   - Enhance and clean up ipp framework and fimc driver.
   - Update to use component match support and fix up
     de-initialization order.
   - Remove a direct mmap interface and relevant stuff specific to
     Exynos drm, use drm generic mmap interface instead.
     And we will remove the specific interface from userspace
     library, libdrm soon.
   - Use universal plane which allows to replace fake primary plane
     with the real one.
   - Some code cleanups and fixups.

* 'exynos-drm-next' of git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos: (40 commits)
  drm/exynos: switch to universal plane API
  drm/exynos: use drm generic mmap interface
  drm/exynos: remove DRM_EXYNOS_GEM_MAP_OFFSET ioctl
  drm/exynos: factor out initial setting of each driver
  drm/exynos/hdmi: unregister connector on removal
  drm/exynos/dp: unregister connector on removal
  drm/exynos/dpi: unregister connector and panel on removal
  drm/exynos/dsi: unregister connector on removal
  drm/exynos/fb: free exynos framebuffer on error
  drm/exynos/fbdev: fix fbdev gem object cleanup
  drm/exynos: fix drm driver de-initialization order
  drm/exynos/ipp: traverse ipp drivers list safely
  drm/exynos: update to use component match support
  drm/exynos/ipp: add file checks for ioctls
  drm/exynos/ipp: remove file argument from node related functions
  drm/exynos/fimc: fix source buffer registers
  drm/exynos/fimc: simplify buffer queuing
  drm/exynos/fimc: do not enable fimc twice
  drm/exynos/fimc: avoid clearing overflow bits
  drm/exynos/ipp: remove events during command cleaning
  ...
parents 6b654af5 72ed6ccd
......@@ -2,6 +2,7 @@ Exynos MIPI DSI Master
Required properties:
- compatible: value should be one of the following
"samsung,exynos3250-mipi-dsi" /* for Exynos3250/3472 SoCs */
"samsung,exynos4210-mipi-dsi" /* for Exynos4 SoCs */
"samsung,exynos5410-mipi-dsi" /* for Exynos5410/5420/5440 SoCs */
- reg: physical base address and length of the registers set for the device
......
......@@ -9,6 +9,7 @@ Required properties:
"samsung,s3c2443-fimd"; /* for S3C24XX SoCs */
"samsung,s3c6400-fimd"; /* for S3C64XX SoCs */
"samsung,s5pv210-fimd"; /* for S5PV210 SoC */
"samsung,exynos3250-fimd"; /* for Exynos3250/3472 SoCs */
"samsung,exynos4210-fimd"; /* for Exynos4 SoCs */
"samsung,exynos5250-fimd"; /* for Exynos5 SoCs */
......
......@@ -132,6 +132,12 @@ pmu_system_controller: system-controller@10020000 {
reg = <0x10020000 0x4000>;
};
mipi_phy: video-phy@10020710 {
compatible = "samsung,s5pv210-mipi-video-phy";
reg = <0x10020710 8>;
#phy-cells = <1>;
};
pd_cam: cam-power-domain@10023C00 {
compatible = "samsung,exynos4210-pd";
reg = <0x10023C00 0x20>;
......@@ -216,6 +222,33 @@ pinctrl_0: pinctrl@11400000 {
interrupts = <0 240 0>;
};
fimd: fimd@11c00000 {
compatible = "samsung,exynos3250-fimd";
reg = <0x11c00000 0x30000>;
interrupt-names = "fifo", "vsync", "lcd_sys";
interrupts = <0 84 0>, <0 85 0>, <0 86 0>;
clocks = <&cmu CLK_SCLK_FIMD0>, <&cmu CLK_FIMD0>;
clock-names = "sclk_fimd", "fimd";
samsung,power-domain = <&pd_lcd0>;
samsung,sysreg = <&sys_reg>;
status = "disabled";
};
dsi_0: dsi@11C80000 {
compatible = "samsung,exynos3250-mipi-dsi";
reg = <0x11C80000 0x10000>;
interrupts = <0 83 0>;
samsung,phy-type = <0>;
samsung,power-domain = <&pd_lcd0>;
phys = <&mipi_phy 1>;
phy-names = "dsim";
clocks = <&cmu CLK_DSIM0>, <&cmu CLK_SCLK_MIPI0>;
clock-names = "bus_clk", "pll_clk";
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
};
mshc_0: mshc@12510000 {
compatible = "samsung,exynos5250-dw-mshc";
reg = <0x12510000 0x1000>;
......
......@@ -231,6 +231,9 @@ ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, const void *data,
break;
}
if (dsi->mode_flags & MIPI_DSI_MODE_LPM)
msg.flags = MIPI_DSI_MSG_USE_LPM;
return ops->transfer(dsi->host, &msg);
}
EXPORT_SYMBOL(mipi_dsi_dcs_write);
......@@ -260,6 +263,9 @@ ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data,
if (!ops || !ops->transfer)
return -ENOSYS;
if (dsi->mode_flags & MIPI_DSI_MODE_LPM)
msg.flags = MIPI_DSI_MSG_USE_LPM;
return ops->transfer(dsi->host, &msg);
}
EXPORT_SYMBOL(mipi_dsi_dcs_read);
......
......@@ -937,6 +937,8 @@ static enum drm_connector_status exynos_dp_detect(
static void exynos_dp_connector_destroy(struct drm_connector *connector)
{
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
}
static struct drm_connector_funcs exynos_dp_connector_funcs = {
......@@ -1358,8 +1360,8 @@ static void exynos_dp_unbind(struct device *dev, struct device *master,
exynos_dp_dpms(display, DRM_MODE_DPMS_OFF);
exynos_dp_connector_destroy(&dp->connector);
encoder->funcs->destroy(encoder);
drm_connector_cleanup(&dp->connector);
}
static const struct component_ops exynos_dp_ops = {
......
......@@ -32,7 +32,6 @@ enum exynos_crtc_mode {
* Exynos specific crtc structure.
*
* @drm_crtc: crtc object.
* @drm_plane: pointer of private plane object for this crtc
* @manager: the manager associated with this crtc
* @pipe: a crtc index created at load() with a new crtc object creation
* and the crtc object would be set to private->crtc array
......@@ -46,7 +45,6 @@ enum exynos_crtc_mode {
*/
struct exynos_drm_crtc {
struct drm_crtc drm_crtc;
struct drm_plane *plane;
struct exynos_drm_manager *manager;
unsigned int pipe;
unsigned int dpms;
......@@ -94,12 +92,12 @@ static void exynos_drm_crtc_commit(struct drm_crtc *crtc)
exynos_drm_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
exynos_plane_commit(exynos_crtc->plane);
exynos_plane_commit(crtc->primary);
if (manager->ops->commit)
manager->ops->commit(manager);
exynos_plane_dpms(exynos_crtc->plane, DRM_MODE_DPMS_ON);
exynos_plane_dpms(crtc->primary, DRM_MODE_DPMS_ON);
}
static bool
......@@ -123,10 +121,9 @@ exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
struct exynos_drm_manager *manager = exynos_crtc->manager;
struct drm_plane *plane = exynos_crtc->plane;
struct drm_framebuffer *fb = crtc->primary->fb;
unsigned int crtc_w;
unsigned int crtc_h;
int ret;
/*
* copy the mode data adjusted by mode_fixup() into crtc->mode
......@@ -134,29 +131,21 @@ exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
*/
memcpy(&crtc->mode, adjusted_mode, sizeof(*adjusted_mode));
crtc_w = crtc->primary->fb->width - x;
crtc_h = crtc->primary->fb->height - y;
crtc_w = fb->width - x;
crtc_h = fb->height - y;
if (manager->ops->mode_set)
manager->ops->mode_set(manager, &crtc->mode);
ret = exynos_plane_mode_set(plane, crtc, crtc->primary->fb, 0, 0, crtc_w, crtc_h,
x, y, crtc_w, crtc_h);
if (ret)
return ret;
plane->crtc = crtc;
plane->fb = crtc->primary->fb;
drm_framebuffer_reference(plane->fb);
return 0;
return exynos_plane_mode_set(crtc->primary, crtc, fb, 0, 0,
crtc_w, crtc_h, x, y, crtc_w, crtc_h);
}
static int exynos_drm_crtc_mode_set_commit(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
struct drm_plane *plane = exynos_crtc->plane;
struct drm_framebuffer *fb = crtc->primary->fb;
unsigned int crtc_w;
unsigned int crtc_h;
int ret;
......@@ -167,11 +156,11 @@ static int exynos_drm_crtc_mode_set_commit(struct drm_crtc *crtc, int x, int y,
return -EPERM;
}
crtc_w = crtc->primary->fb->width - x;
crtc_h = crtc->primary->fb->height - y;
crtc_w = fb->width - x;
crtc_h = fb->height - y;
ret = exynos_plane_mode_set(plane, crtc, crtc->primary->fb, 0, 0, crtc_w, crtc_h,
x, y, crtc_w, crtc_h);
ret = exynos_plane_mode_set(crtc->primary, crtc, fb, 0, 0,
crtc_w, crtc_h, x, y, crtc_w, crtc_h);
if (ret)
return ret;
......@@ -304,8 +293,7 @@ static int exynos_drm_crtc_set_property(struct drm_crtc *crtc,
exynos_drm_crtc_commit(crtc);
break;
case CRTC_MODE_BLANK:
exynos_plane_dpms(exynos_crtc->plane,
DRM_MODE_DPMS_OFF);
exynos_plane_dpms(crtc->primary, DRM_MODE_DPMS_OFF);
break;
default:
break;
......@@ -351,8 +339,10 @@ static void exynos_drm_crtc_attach_mode_property(struct drm_crtc *crtc)
int exynos_drm_crtc_create(struct exynos_drm_manager *manager)
{
struct exynos_drm_crtc *exynos_crtc;
struct drm_plane *plane;
struct exynos_drm_private *private = manager->drm_dev->dev_private;
struct drm_crtc *crtc;
int ret;
exynos_crtc = kzalloc(sizeof(*exynos_crtc), GFP_KERNEL);
if (!exynos_crtc)
......@@ -364,11 +354,11 @@ int exynos_drm_crtc_create(struct exynos_drm_manager *manager)
exynos_crtc->dpms = DRM_MODE_DPMS_OFF;
exynos_crtc->manager = manager;
exynos_crtc->pipe = manager->pipe;
exynos_crtc->plane = exynos_plane_init(manager->drm_dev,
1 << manager->pipe, true);
if (!exynos_crtc->plane) {
kfree(exynos_crtc);
return -ENOMEM;
plane = exynos_plane_init(manager->drm_dev, 1 << manager->pipe,
DRM_PLANE_TYPE_PRIMARY);
if (IS_ERR(plane)) {
ret = PTR_ERR(plane);
goto err_plane;
}
manager->crtc = &exynos_crtc->drm_crtc;
......@@ -376,12 +366,22 @@ int exynos_drm_crtc_create(struct exynos_drm_manager *manager)
private->crtc[manager->pipe] = crtc;
drm_crtc_init(manager->drm_dev, crtc, &exynos_crtc_funcs);
ret = drm_crtc_init_with_planes(manager->drm_dev, crtc, plane, NULL,
&exynos_crtc_funcs);
if (ret < 0)
goto err_crtc;
drm_crtc_helper_add(crtc, &exynos_crtc_helper_funcs);
exynos_drm_crtc_attach_mode_property(crtc);
return 0;
err_crtc:
plane->funcs->destroy(plane);
err_plane:
kfree(exynos_crtc);
return ret;
}
int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe)
......
......@@ -342,8 +342,12 @@ int exynos_dpi_remove(struct device *dev)
struct exynos_dpi *ctx = exynos_dpi_display.ctx;
exynos_dpi_dpms(&exynos_dpi_display, DRM_MODE_DPMS_OFF);
exynos_dpi_connector_destroy(&ctx->connector);
encoder->funcs->destroy(encoder);
drm_connector_cleanup(&ctx->connector);
if (ctx->panel)
drm_panel_detach(ctx->panel);
exynos_drm_component_del(dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
......
......@@ -15,7 +15,6 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <linux/anon_inodes.h>
#include <linux/component.h>
#include <drm/exynos_drm.h>
......@@ -86,8 +85,9 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
struct drm_plane *plane;
unsigned long possible_crtcs = (1 << MAX_CRTC) - 1;
plane = exynos_plane_init(dev, possible_crtcs, false);
if (!plane)
plane = exynos_plane_init(dev, possible_crtcs,
DRM_PLANE_TYPE_OVERLAY);
if (IS_ERR(plane))
goto err_mode_config_cleanup;
}
......@@ -116,6 +116,23 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
/* force connectors detection */
drm_helper_hpd_irq_event(dev);
/*
* enable drm irq mode.
* - with irq_enabled = true, we can use the vblank feature.
*
* P.S. note that we wouldn't use drm irq handler but
* just specific driver own one instead because
* drm framework supports only one irq handler.
*/
dev->irq_enabled = true;
/*
* with vblank_disable_allowed = true, vblank interrupt will be disabled
* by drm timer once a current process gives up ownership of
* vblank event.(after drm_vblank_put function is called)
*/
dev->vblank_disable_allowed = true;
return 0;
err_unbind_all:
......@@ -136,23 +153,19 @@ static int exynos_drm_unload(struct drm_device *dev)
exynos_drm_device_subdrv_remove(dev);
exynos_drm_fbdev_fini(dev);
drm_vblank_cleanup(dev);
drm_kms_helper_poll_fini(dev);
drm_mode_config_cleanup(dev);
component_unbind_all(dev->dev, dev);
drm_vblank_cleanup(dev);
drm_mode_config_cleanup(dev);
drm_release_iommu_mapping(dev);
kfree(dev->dev_private);
component_unbind_all(dev->dev, dev);
kfree(dev->dev_private);
dev->dev_private = NULL;
return 0;
}
static const struct file_operations exynos_drm_gem_fops = {
.mmap = exynos_drm_gem_mmap_buffer,
};
static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state)
{
struct drm_connector *connector;
......@@ -191,7 +204,6 @@ static int exynos_drm_resume(struct drm_device *dev)
static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
{
struct drm_exynos_file_private *file_priv;
struct file *anon_filp;
int ret;
file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
......@@ -204,21 +216,8 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
if (ret)
goto err_file_priv_free;
anon_filp = anon_inode_getfile("exynos_gem", &exynos_drm_gem_fops,
NULL, 0);
if (IS_ERR(anon_filp)) {
ret = PTR_ERR(anon_filp);
goto err_subdrv_close;
}
anon_filp->f_mode = FMODE_READ | FMODE_WRITE;
file_priv->anon_filp = anon_filp;
return ret;
err_subdrv_close:
exynos_drm_subdrv_close(dev, file);
err_file_priv_free:
kfree(file_priv);
file->driver_priv = NULL;
......@@ -234,7 +233,6 @@ static void exynos_drm_preclose(struct drm_device *dev,
static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
{
struct exynos_drm_private *private = dev->dev_private;
struct drm_exynos_file_private *file_priv;
struct drm_pending_vblank_event *v, *vt;
struct drm_pending_event *e, *et;
unsigned long flags;
......@@ -260,10 +258,6 @@ static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
}
spin_unlock_irqrestore(&dev->event_lock, flags);
file_priv = file->driver_priv;
if (file_priv->anon_filp)
fput(file_priv->anon_filp);
kfree(file->driver_priv);
file->driver_priv = NULL;
}
......@@ -282,11 +276,6 @@ static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
static const struct drm_ioctl_desc exynos_ioctls[] = {
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl,
DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MAP_OFFSET,
exynos_drm_gem_map_offset_ioctl, DRM_UNLOCKED |
DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MMAP,
exynos_drm_gem_mmap_ioctl, DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET,
exynos_drm_gem_get_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION,
......@@ -486,21 +475,20 @@ void exynos_drm_component_del(struct device *dev,
mutex_unlock(&drm_component_lock);
}
static int compare_of(struct device *dev, void *data)
static int compare_dev(struct device *dev, void *data)
{
return dev == (struct device *)data;
}
static int exynos_drm_add_components(struct device *dev, struct master *m)
static struct component_match *exynos_drm_match_add(struct device *dev)
{
struct component_match *match = NULL;
struct component_dev *cdev;
unsigned int attach_cnt = 0;
mutex_lock(&drm_component_lock);
list_for_each_entry(cdev, &drm_component_list, list) {
int ret;
/*
* Add components to master only in case that crtc and
* encoder/connector device objects exist.
......@@ -515,16 +503,10 @@ static int exynos_drm_add_components(struct device *dev, struct master *m)
/*
* fimd and dpi modules have same device object so add
* only crtc device object in this case.
*
* TODO. if dpi module follows driver-model driver then
* below codes can be removed.
*/
if (cdev->crtc_dev == cdev->conn_dev) {
ret = component_master_add_child(m, compare_of,
component_match_add(dev, &match, compare_dev,
cdev->crtc_dev);
if (ret < 0)
return ret;
goto out_lock;
}
......@@ -534,11 +516,8 @@ static int exynos_drm_add_components(struct device *dev, struct master *m)
* connector/encoder need pipe number of crtc when they
* are created.
*/
ret = component_master_add_child(m, compare_of, cdev->crtc_dev);
ret |= component_master_add_child(m, compare_of,
cdev->conn_dev);
if (ret < 0)
return ret;
component_match_add(dev, &match, compare_dev, cdev->crtc_dev);
component_match_add(dev, &match, compare_dev, cdev->conn_dev);
out_lock:
mutex_lock(&drm_component_lock);
......@@ -546,7 +525,7 @@ static int exynos_drm_add_components(struct device *dev, struct master *m)
mutex_unlock(&drm_component_lock);
return attach_cnt ? 0 : -ENODEV;
return attach_cnt ? match : ERR_PTR(-EPROBE_DEFER);
}
static int exynos_drm_bind(struct device *dev)
......@@ -560,13 +539,13 @@ static void exynos_drm_unbind(struct device *dev)
}
static const struct component_master_ops exynos_drm_ops = {
.add_components = exynos_drm_add_components,
.bind = exynos_drm_bind,
.unbind = exynos_drm_unbind,
};
static int exynos_drm_platform_probe(struct platform_device *pdev)
{
struct component_match *match;
int ret;
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
......@@ -633,13 +612,23 @@ static int exynos_drm_platform_probe(struct platform_device *pdev)
goto err_unregister_ipp_drv;
#endif
ret = component_master_add(&pdev->dev, &exynos_drm_ops);
match = exynos_drm_match_add(&pdev->dev);
if (IS_ERR(match)) {
ret = PTR_ERR(match);
goto err_unregister_resources;
}
ret = component_master_add_with_match(&pdev->dev, &exynos_drm_ops,
match);
if (ret < 0)
DRM_DEBUG_KMS("re-tried by last sub driver probed later.\n");
goto err_unregister_resources;
return 0;
return ret;
err_unregister_resources:
#ifdef CONFIG_DRM_EXYNOS_IPP
exynos_platform_device_ipp_unregister();
err_unregister_ipp_drv:
platform_driver_unregister(&ipp_driver);
err_unregister_gsc_drv:
......
......@@ -240,7 +240,6 @@ struct exynos_drm_g2d_private {
struct drm_exynos_file_private {
struct exynos_drm_g2d_private *g2d_priv;
struct device *ipp_dev;
struct file *anon_filp;
};
/*
......
......@@ -114,6 +114,8 @@
#define DSIM_SYNC_INFORM (1 << 27)
#define DSIM_EOT_DISABLE (1 << 28)
#define DSIM_MFLUSH_VS (1 << 29)
/* This flag is valid only for exynos3250/3472/4415/5260/5430 */
#define DSIM_CLKLANE_STOP (1 << 30)
/* DSIM_ESCMODE */
#define DSIM_TX_TRIGGER_RST (1 << 4)
......@@ -262,6 +264,7 @@ struct exynos_dsi_driver_data {
unsigned int plltmr_reg;
unsigned int has_freqband:1;
unsigned int has_clklane_stop:1;
};
struct exynos_dsi {
......@@ -301,9 +304,16 @@ struct exynos_dsi {
#define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host)
#define connector_to_dsi(c) container_of(c, struct exynos_dsi, connector)
static struct exynos_dsi_driver_data exynos3_dsi_driver_data = {
.plltmr_reg = 0x50,
.has_freqband = 1,
.has_clklane_stop = 1,
};
static struct exynos_dsi_driver_data exynos4_dsi_driver_data = {
.plltmr_reg = 0x50,
.has_freqband = 1,
.has_clklane_stop = 1,
};
static struct exynos_dsi_driver_data exynos5_dsi_driver_data = {
......@@ -311,6 +321,8 @@ static struct exynos_dsi_driver_data exynos5_dsi_driver_data = {
};
static struct of_device_id exynos_dsi_of_match[] = {
{ .compatible = "samsung,exynos3250-mipi-dsi",
.data = &exynos3_dsi_driver_data },
{ .compatible = "samsung,exynos4210-mipi-dsi",
.data = &exynos4_dsi_driver_data },
{ .compatible = "samsung,exynos5410-mipi-dsi",
......@@ -421,7 +433,7 @@ static unsigned long exynos_dsi_set_pll(struct exynos_dsi *dsi,
if (!fout) {
dev_err(dsi->dev,
"failed to find PLL PMS for requested frequency\n");
return -EFAULT;
return 0;
}
dev_dbg(dsi->dev, "PLL freq %lu, (p %d, m %d, s %d)\n", fout, p, m, s);
......@@ -453,7 +465,7 @@ static unsigned long exynos_dsi_set_pll(struct exynos_dsi *dsi,
do {
if (timeout-- == 0) {
dev_err(dsi->dev, "PLL failed to stabilize\n");
return -EFAULT;
return 0;
}
reg = readl(dsi->reg_base + DSIM_STATUS_REG);
} while ((reg & DSIM_PLL_STABLE) == 0);
......@@ -569,6 +581,7 @@ static void exynos_dsi_disable_clock(struct exynos_dsi *dsi)
static int exynos_dsi_init_link(struct exynos_dsi *dsi)
{
struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
int timeout;
u32 reg;
u32 lanes_mask;
......@@ -650,6 +663,20 @@ static int exynos_dsi_init_link(struct exynos_dsi *dsi)
reg |= DSIM_LANE_EN(lanes_mask);
writel(reg, dsi->reg_base + DSIM_CONFIG_REG);
/*
* Use non-continuous clock mode if the periparal wants and
* host controller supports
*
* In non-continous clock mode, host controller will turn off
* the HS clock between high-speed transmissions to reduce
* power consumption.
*/
if (driver_data->has_clklane_stop &&
dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) {
reg |= DSIM_CLKLANE_STOP;
writel(reg, dsi->reg_base + DSIM_CONFIG_REG);
}
/* Check clock and data lane state are stop state */
timeout = 100;
do {
......@@ -1414,6 +1441,9 @@ exynos_dsi_detect(struct drm_connector *connector, bool force)
static void exynos_dsi_connector_destroy(struct drm_connector *connector)
{
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
connector->dev = NULL;
}
static struct drm_connector_funcs exynos_dsi_connector_funcs = {
......@@ -1634,10 +1664,10 @@ static void exynos_dsi_unbind(struct device *dev, struct device *master,
exynos_dsi_dpms(&exynos_dsi_display, DRM_MODE_DPMS_OFF);
mipi_dsi_host_unregister(&dsi->dsi_host);
exynos_dsi_connector_destroy(&dsi->connector);
encoder->funcs->destroy(encoder);
drm_connector_cleanup(&dsi->connector);
mipi_dsi_host_unregister(&dsi->dsi_host);
}
static const struct component_ops exynos_dsi_component_ops = {
......
......@@ -165,6 +165,7 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
if (ret) {
kfree(exynos_fb);
DRM_ERROR("failed to initialize framebuffer\n");
return ERR_PTR(ret);
}
......
......@@ -123,6 +123,7 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
fbi->screen_base = buffer->kvaddr + offset;
fbi->screen_size = size;
fbi->fix.smem_len = size;
return 0;
}
......@@ -353,9 +354,6 @@ void exynos_drm_fbdev_fini(struct drm_device *dev)
fbdev = to_exynos_fbdev(private->fb_helper);
if (fbdev->exynos_gem_obj)
exynos_drm_gem_destroy(fbdev->exynos_gem_obj);
exynos_drm_fbdev_destroy(dev, private->fb_helper);
kfree(fbdev);
private->fb_helper = NULL;
......
......@@ -336,9 +336,6 @@ static bool fimc_check_ovf(struct fimc_context *ctx)
fimc_set_bits(ctx, EXYNOS_CIWDOFST,
EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
EXYNOS_CIWDOFST_CLROVFICR);
fimc_clear_bits(ctx, EXYNOS_CIWDOFST,
EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
EXYNOS_CIWDOFST_CLROVFICR);
dev_err(ippdrv->dev, "occurred overflow at %d, status 0x%x.\n",
ctx->id, status);
......@@ -718,24 +715,24 @@ static int fimc_src_set_addr(struct device *dev,
case IPP_BUF_ENQUEUE:
config = &property->config[EXYNOS_DRM_OPS_SRC];
fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_Y],
EXYNOS_CIIYSA(buf_id));
EXYNOS_CIIYSA0);
if (config->fmt == DRM_FORMAT_YVU420) {
fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR],
EXYNOS_CIICBSA(buf_id));
EXYNOS_CIICBSA0);
fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB],
EXYNOS_CIICRSA(buf_id));
EXYNOS_CIICRSA0);
} else {
fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB],
EXYNOS_CIICBSA(buf_id));
EXYNOS_CIICBSA0);
fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR],
EXYNOS_CIICRSA(buf_id));
EXYNOS_CIICRSA0);
}
break;
case IPP_BUF_DEQUEUE:
fimc_write(ctx, 0x0, EXYNOS_CIIYSA(buf_id));
fimc_write(ctx, 0x0, EXYNOS_CIICBSA(buf_id));
fimc_write(ctx, 0x0, EXYNOS_CIICRSA(buf_id));
fimc_write(ctx, 0x0, EXYNOS_CIIYSA0);
fimc_write(ctx, 0x0, EXYNOS_CIICBSA0);
fimc_write(ctx, 0x0, EXYNOS_CIICRSA0);
break;
default:
/* bypass */
......@@ -1122,67 +1119,34 @@ static int fimc_dst_set_size(struct device *dev, int swap,
return 0;
}
static int fimc_dst_get_buf_count(struct fimc_context *ctx)
{
u32 cfg, buf_num;
cfg = fimc_read(ctx, EXYNOS_CIFCNTSEQ);
buf_num = hweight32(cfg);
DRM_DEBUG_KMS("buf_num[%d]\n", buf_num);
return buf_num;
}
static int fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
static void fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
enum drm_exynos_ipp_buf_type buf_type)
{
struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
bool enable;
u32 cfg;
u32 mask = 0x00000001 << buf_id;
int ret = 0;
unsigned long flags;
u32 buf_num;
u32 cfg;
DRM_DEBUG_KMS("buf_id[%d]buf_type[%d]\n", buf_id, buf_type);
spin_lock_irqsave(&ctx->lock, flags);
/* mask register set */
cfg = fimc_read(ctx, EXYNOS_CIFCNTSEQ);
switch (buf_type) {
case IPP_BUF_ENQUEUE:
enable = true;
break;
case IPP_BUF_DEQUEUE:
enable = false;
break;
default:
dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
ret = -EINVAL;
goto err_unlock;
}
if (buf_type == IPP_BUF_ENQUEUE)
cfg |= (1 << buf_id);
else
cfg &= ~(1 << buf_id);
/* sequence id */
cfg &= ~mask;
cfg |= (enable << buf_id);
fimc_write(ctx, cfg, EXYNOS_CIFCNTSEQ);
/* interrupt enable */
if (buf_type == IPP_BUF_ENQUEUE &&
fimc_dst_get_buf_count(ctx) >= FIMC_BUF_START)
fimc_mask_irq(ctx, true);
buf_num = hweight32(cfg);
/* interrupt disable */
if (buf_type == IPP_BUF_DEQUEUE &&
fimc_dst_get_buf_count(ctx) <= FIMC_BUF_STOP)
if (buf_type == IPP_BUF_ENQUEUE && buf_num >= FIMC_BUF_START)
fimc_mask_irq(ctx, true);
else if (buf_type == IPP_BUF_DEQUEUE && buf_num <= FIMC_BUF_STOP)
fimc_mask_irq(ctx, false);
err_unlock:
spin_unlock_irqrestore(&ctx->lock, flags);
return ret;
}
static int fimc_dst_set_addr(struct device *dev,
......@@ -1240,7 +1204,9 @@ static int fimc_dst_set_addr(struct device *dev,
break;
}
return fimc_dst_set_buf_seq(ctx, buf_id, buf_type);
fimc_dst_set_buf_seq(ctx, buf_id, buf_type);
return 0;
}
static struct exynos_drm_ipp_ops fimc_dst_ops = {
......@@ -1291,14 +1257,11 @@ static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
DRM_DEBUG_KMS("buf_id[%d]\n", buf_id);
if (fimc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE) < 0) {
DRM_ERROR("failed to dequeue.\n");
return IRQ_HANDLED;
}
fimc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
event_work->ippdrv = ippdrv;
event_work->buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
queue_work(ippdrv->event_workq, (struct work_struct *)event_work);
queue_work(ippdrv->event_workq, &event_work->work);
return IRQ_HANDLED;
}
......@@ -1590,11 +1553,8 @@ static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
fimc_clear_bits(ctx, EXYNOS_CIOCTRL, EXYNOS_CIOCTRL_WEAVE_MASK);
if (cmd == IPP_CMD_M2M) {
fimc_set_bits(ctx, EXYNOS_MSCTRL, EXYNOS_MSCTRL_ENVID);
if (cmd == IPP_CMD_M2M)
fimc_set_bits(ctx, EXYNOS_MSCTRL, EXYNOS_MSCTRL_ENVID);
}
return 0;
}
......
......@@ -104,6 +104,14 @@ static struct fimd_driver_data s3c64xx_fimd_driver_data = {
.has_limited_fmt = 1,
};
static struct fimd_driver_data exynos3_fimd_driver_data = {
.timing_base = 0x20000,
.lcdblk_offset = 0x210,
.lcdblk_bypass_shift = 1,
.has_shadowcon = 1,
.has_vidoutcon = 1,
};
static struct fimd_driver_data exynos4_fimd_driver_data = {
.timing_base = 0x0,
.lcdblk_offset = 0x210,
......@@ -168,6 +176,8 @@ struct fimd_context {
static const struct of_device_id fimd_driver_dt_match[] = {
{ .compatible = "samsung,s3c6400-fimd",
.data = &s3c64xx_fimd_driver_data },
{ .compatible = "samsung,exynos3250-fimd",
.data = &exynos3_fimd_driver_data },
{ .compatible = "samsung,exynos4210-fimd",
.data = &exynos4_fimd_driver_data },
{ .compatible = "samsung,exynos5250-fimd",
......@@ -204,7 +214,6 @@ static void fimd_wait_for_vblank(struct exynos_drm_manager *mgr)
DRM_DEBUG_KMS("vblank wait timed out.\n");
}
static void fimd_clear_channel(struct exynos_drm_manager *mgr)
{
struct fimd_context *ctx = mgr->ctx;
......@@ -214,17 +223,31 @@ static void fimd_clear_channel(struct exynos_drm_manager *mgr)
/* Check if any channel is enabled. */
for (win = 0; win < WINDOWS_NR; win++) {
u32 val = readl(ctx->regs + SHADOWCON);
if (val & SHADOWCON_CHx_ENABLE(win)) {
u32 val = readl(ctx->regs + WINCON(win));
if (val & WINCONx_ENWIN) {
/* wincon */
val &= ~WINCONx_ENWIN;
writel(val, ctx->regs + WINCON(win));
/* unprotect windows */
if (ctx->driver_data->has_shadowcon) {
val = readl(ctx->regs + SHADOWCON);
val &= ~SHADOWCON_CHx_ENABLE(win);
writel(val, ctx->regs + SHADOWCON);
}
ch_enabled = 1;
}
}
/* Wait for vsync, as disable channel takes effect at next vsync */
if (ch_enabled)
if (ch_enabled) {
unsigned int state = ctx->suspended;
ctx->suspended = 0;
fimd_wait_for_vblank(mgr);
ctx->suspended = state;
}
}
static int fimd_mgr_initialize(struct exynos_drm_manager *mgr,
......@@ -237,23 +260,6 @@ static int fimd_mgr_initialize(struct exynos_drm_manager *mgr,
mgr->drm_dev = ctx->drm_dev = drm_dev;
mgr->pipe = ctx->pipe = priv->pipe++;
/*
* enable drm irq mode.
* - with irq_enabled = true, we can use the vblank feature.
*
* P.S. note that we wouldn't use drm irq handler but
* just specific driver own one instead because
* drm framework supports only one irq handler.
*/
drm_dev->irq_enabled = true;
/*
* with vblank_disable_allowed = true, vblank interrupt will be disabled
* by drm timer once a current process gives up ownership of
* vblank event.(after drm_vblank_put function is called)
*/
drm_dev->vblank_disable_allowed = true;
/* attach this sub driver to iommu mapping if supported. */
if (is_drm_iommu_supported(ctx->drm_dev)) {
/*
......@@ -1051,7 +1057,6 @@ static void fimd_unbind(struct device *dev, struct device *master,
{
struct exynos_drm_manager *mgr = dev_get_drvdata(dev);
struct fimd_context *ctx = fimd_manager.ctx;
struct drm_crtc *crtc = mgr->crtc;
fimd_dpms(mgr, DRM_MODE_DPMS_OFF);
......@@ -1059,8 +1064,6 @@ static void fimd_unbind(struct device *dev, struct device *master,
exynos_dpi_remove(dev);
fimd_mgr_remove(mgr);
crtc->funcs->destroy(crtc);
}
static const struct component_ops fimd_component_ops = {
......
......@@ -318,40 +318,16 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
drm_gem_object_unreference_unlocked(obj);
}
int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_exynos_gem_map_off *args = data;
DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
args->handle, (unsigned long)args->offset);
if (!(dev->driver->driver_features & DRIVER_GEM)) {
DRM_ERROR("does not support GEM.\n");
return -ENODEV;
}
return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
&args->offset);
}
int exynos_drm_gem_mmap_buffer(struct file *filp,
int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
struct vm_area_struct *vma)
{
struct drm_gem_object *obj = filp->private_data;
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
struct drm_device *drm_dev = obj->dev;
struct drm_device *drm_dev = exynos_gem_obj->base.dev;
struct exynos_drm_gem_buf *buffer;
unsigned long vm_size;
int ret;
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_private_data = obj;
vma->vm_ops = drm_dev->driver->gem_vm_ops;
update_vm_cache_attr(exynos_gem_obj, vma);
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_pgoff = 0;
vm_size = vma->vm_end - vma->vm_start;
......@@ -373,60 +349,6 @@ int exynos_drm_gem_mmap_buffer(struct file *filp,
return ret;
}
/*
* take a reference to this mapping of the object. And this reference
* is unreferenced by the corresponding vm_close call.
*/
drm_gem_object_reference(obj);
drm_vm_open_locked(drm_dev, vma);
return 0;
}
int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_exynos_file_private *exynos_file_priv;
struct drm_exynos_gem_mmap *args = data;
struct drm_gem_object *obj;
struct file *anon_filp;
unsigned long addr;
if (!(dev->driver->driver_features & DRIVER_GEM)) {
DRM_ERROR("does not support GEM.\n");
return -ENODEV;
}
mutex_lock(&dev->struct_mutex);
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
exynos_file_priv = file_priv->driver_priv;
anon_filp = exynos_file_priv->anon_filp;
anon_filp->private_data = obj;
addr = vm_mmap(anon_filp, 0, args->size, PROT_READ | PROT_WRITE,
MAP_SHARED, 0);
drm_gem_object_unreference(obj);
if (IS_ERR_VALUE(addr)) {
mutex_unlock(&dev->struct_mutex);
return (int)addr;
}
mutex_unlock(&dev->struct_mutex);
args->mapped = addr;
DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped);
return 0;
}
......@@ -710,16 +632,20 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
exynos_gem_obj = to_exynos_gem_obj(obj);
ret = check_gem_flags(exynos_gem_obj->flags);
if (ret) {
drm_gem_vm_close(vma);
drm_gem_free_mmap_offset(obj);
return ret;
}
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_flags |= VM_MIXEDMAP;
if (ret)
goto err_close_vm;
update_vm_cache_attr(exynos_gem_obj, vma);
ret = exynos_drm_gem_mmap_buffer(exynos_gem_obj, vma);
if (ret)
goto err_close_vm;
return ret;
err_close_vm:
drm_gem_vm_close(vma);
drm_gem_free_mmap_offset(obj);
return ret;
}
......@@ -111,20 +111,6 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
struct drm_file *filp);
/* get buffer offset to map to user space. */
int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/*
* mmap the physically continuous memory that a gem object contains
* to user space.
*/
int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int exynos_drm_gem_mmap_buffer(struct file *filp,
struct vm_area_struct *vma);
/* map user space allocated by malloc to pages. */
int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
......
......@@ -1326,8 +1326,7 @@ static irqreturn_t gsc_irq_handler(int irq, void *dev_id)
buf_id[EXYNOS_DRM_OPS_SRC];
event_work->buf_id[EXYNOS_DRM_OPS_DST] =
buf_id[EXYNOS_DRM_OPS_DST];
queue_work(ippdrv->event_workq,
(struct work_struct *)event_work);
queue_work(ippdrv->event_workq, &event_work->work);
}
return IRQ_HANDLED;
......
......@@ -75,7 +75,6 @@ struct drm_exynos_ipp_mem_node {
u32 prop_id;
u32 buf_id;
struct drm_exynos_ipp_buf_info buf_info;
struct drm_file *filp;
};
/*
......@@ -319,44 +318,6 @@ static void ipp_print_property(struct drm_exynos_ipp_property *property,
sz->hsize, sz->vsize, config->flip, config->degree);
}
static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
{
struct exynos_drm_ippdrv *ippdrv;
struct drm_exynos_ipp_cmd_node *c_node;
u32 prop_id = property->prop_id;
DRM_DEBUG_KMS("prop_id[%d]\n", prop_id);
ippdrv = ipp_find_drv_by_handle(prop_id);
if (IS_ERR(ippdrv)) {
DRM_ERROR("failed to get ipp driver.\n");
return -EINVAL;
}
/*
* Find command node using command list in ippdrv.
* when we find this command no using prop_id.
* return property information set in this command node.
*/
mutex_lock(&ippdrv->cmd_lock);
list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
if ((c_node->property.prop_id == prop_id) &&
(c_node->state == IPP_STATE_STOP)) {
mutex_unlock(&ippdrv->cmd_lock);
DRM_DEBUG_KMS("found cmd[%d]ippdrv[0x%x]\n",
property->cmd, (int)ippdrv);
c_node->property = *property;
return 0;
}
}
mutex_unlock(&ippdrv->cmd_lock);
DRM_ERROR("failed to search property.\n");
return -EINVAL;
}
static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
{
struct drm_exynos_ipp_cmd_work *cmd_work;
......@@ -392,6 +353,7 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
struct drm_exynos_ipp_property *property = data;
struct exynos_drm_ippdrv *ippdrv;
struct drm_exynos_ipp_cmd_node *c_node;
u32 prop_id;
int ret, i;
if (!ctx) {
......@@ -404,6 +366,8 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
return -EINVAL;
}
prop_id = property->prop_id;
/*
* This is log print for user application property.
* user application set various property.
......@@ -412,14 +376,24 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
ipp_print_property(property, i);
/*
* set property ioctl generated new prop_id.
* but in this case already asigned prop_id using old set property.
* e.g PAUSE state. this case supports find current prop_id and use it
* instead of allocation.
* In case prop_id is not zero try to set existing property.
*/
if (property->prop_id) {
DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
return ipp_find_and_set_property(property);
if (prop_id) {
c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, prop_id);
if (!c_node || c_node->filp != file) {
DRM_DEBUG_KMS("prop_id[%d] not found\n", prop_id);
return -EINVAL;
}
if (c_node->state != IPP_STATE_STOP) {
DRM_DEBUG_KMS("prop_id[%d] not stopped\n", prop_id);
return -EINVAL;
}
c_node->property = *property;
return 0;
}
/* find ipp driver using ipp id */
......@@ -445,9 +419,9 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
property->prop_id, property->cmd, (int)ippdrv);
/* stored property information and ippdrv in private data */
c_node->dev = dev;
c_node->property = *property;
c_node->state = IPP_STATE_IDLE;
c_node->filp = file;
c_node->start_work = ipp_create_cmd_work();
if (IS_ERR(c_node->start_work)) {
......@@ -499,105 +473,37 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
return ret;
}
static void ipp_clean_cmd_node(struct ipp_context *ctx,
struct drm_exynos_ipp_cmd_node *c_node)
{
/* delete list */
list_del(&c_node->list);
ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock,
c_node->property.prop_id);
/* destroy mutex */
mutex_destroy(&c_node->lock);
mutex_destroy(&c_node->mem_lock);
mutex_destroy(&c_node->event_lock);
/* free command node */
kfree(c_node->start_work);
kfree(c_node->stop_work);
kfree(c_node->event_work);
kfree(c_node);
}
static bool ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
{
switch (c_node->property.cmd) {
case IPP_CMD_WB:
return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
case IPP_CMD_OUTPUT:
return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]);
case IPP_CMD_M2M:
default:
return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]) &&
!list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
}
}
static struct drm_exynos_ipp_mem_node
*ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
struct drm_exynos_ipp_queue_buf *qbuf)
{
struct drm_exynos_ipp_mem_node *m_node;
struct list_head *head;
int count = 0;
DRM_DEBUG_KMS("buf_id[%d]\n", qbuf->buf_id);
/* source/destination memory list */
head = &c_node->mem_list[qbuf->ops_id];
/* find memory node from memory list */
list_for_each_entry(m_node, head, list) {
DRM_DEBUG_KMS("count[%d]m_node[0x%x]\n", count++, (int)m_node);
/* compare buffer id */
if (m_node->buf_id == qbuf->buf_id)
return m_node;
}
return NULL;
}
static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
static int ipp_put_mem_node(struct drm_device *drm_dev,
struct drm_exynos_ipp_cmd_node *c_node,
struct drm_exynos_ipp_mem_node *m_node)
{
struct exynos_drm_ipp_ops *ops = NULL;
int ret = 0;
int i;
DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
if (!m_node) {
DRM_ERROR("invalid queue node.\n");
DRM_ERROR("invalid dequeue node.\n");
return -EFAULT;
}
DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
/* get operations callback */
ops = ippdrv->ops[m_node->ops_id];
if (!ops) {
DRM_ERROR("not support ops.\n");
return -EFAULT;
/* put gem buffer */
for_each_ipp_planar(i) {
unsigned long handle = m_node->buf_info.handles[i];
if (handle)
exynos_drm_gem_put_dma_addr(drm_dev, handle,
c_node->filp);
}
/* set address and enable irq */
if (ops->set_addr) {
ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
m_node->buf_id, IPP_BUF_ENQUEUE);
if (ret) {
DRM_ERROR("failed to set addr.\n");
return ret;
}
}
list_del(&m_node->list);
kfree(m_node);
return ret;
return 0;
}
static struct drm_exynos_ipp_mem_node
*ipp_get_mem_node(struct drm_device *drm_dev,
struct drm_file *file,
struct drm_exynos_ipp_cmd_node *c_node,
struct drm_exynos_ipp_queue_buf *qbuf)
{
......@@ -615,6 +521,7 @@ static struct drm_exynos_ipp_mem_node
m_node->ops_id = qbuf->ops_id;
m_node->prop_id = qbuf->prop_id;
m_node->buf_id = qbuf->buf_id;
INIT_LIST_HEAD(&m_node->list);
DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node, qbuf->ops_id);
DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id);
......@@ -627,10 +534,11 @@ static struct drm_exynos_ipp_mem_node
dma_addr_t *addr;
addr = exynos_drm_gem_get_dma_addr(drm_dev,
qbuf->handle[i], file);
qbuf->handle[i], c_node->filp);
if (IS_ERR(addr)) {
DRM_ERROR("failed to get addr.\n");
goto err_clear;
ipp_put_mem_node(drm_dev, c_node, m_node);
return ERR_PTR(-EFAULT);
}
buf_info->handles[i] = qbuf->handle[i];
......@@ -640,46 +548,30 @@ static struct drm_exynos_ipp_mem_node
}
}
m_node->filp = file;
mutex_lock(&c_node->mem_lock);
list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
mutex_unlock(&c_node->mem_lock);
return m_node;
err_clear:
kfree(m_node);
return ERR_PTR(-EFAULT);
}
static int ipp_put_mem_node(struct drm_device *drm_dev,
struct drm_exynos_ipp_cmd_node *c_node,
struct drm_exynos_ipp_mem_node *m_node)
static void ipp_clean_mem_nodes(struct drm_device *drm_dev,
struct drm_exynos_ipp_cmd_node *c_node, int ops)
{
int i;
DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
struct drm_exynos_ipp_mem_node *m_node, *tm_node;
struct list_head *head = &c_node->mem_list[ops];
if (!m_node) {
DRM_ERROR("invalid dequeue node.\n");
return -EFAULT;
}
mutex_lock(&c_node->mem_lock);
DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
list_for_each_entry_safe(m_node, tm_node, head, list) {
int ret;
/* put gem buffer */
for_each_ipp_planar(i) {
unsigned long handle = m_node->buf_info.handles[i];
if (handle)
exynos_drm_gem_put_dma_addr(drm_dev, handle,
m_node->filp);
ret = ipp_put_mem_node(drm_dev, c_node, m_node);
if (ret)
DRM_ERROR("failed to put m_node.\n");
}
/* delete list in queue */
list_del(&m_node->list);
kfree(m_node);
return 0;
mutex_unlock(&c_node->mem_lock);
}
static void ipp_free_event(struct drm_pending_event *event)
......@@ -688,7 +580,6 @@ static void ipp_free_event(struct drm_pending_event *event)
}
static int ipp_get_event(struct drm_device *drm_dev,
struct drm_file *file,
struct drm_exynos_ipp_cmd_node *c_node,
struct drm_exynos_ipp_queue_buf *qbuf)
{
......@@ -700,7 +591,7 @@ static int ipp_get_event(struct drm_device *drm_dev,
e = kzalloc(sizeof(*e), GFP_KERNEL);
if (!e) {
spin_lock_irqsave(&drm_dev->event_lock, flags);
file->event_space += sizeof(e->event);
c_node->filp->event_space += sizeof(e->event);
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
return -ENOMEM;
}
......@@ -712,7 +603,7 @@ static int ipp_get_event(struct drm_device *drm_dev,
e->event.prop_id = qbuf->prop_id;
e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
e->base.event = &e->event.base;
e->base.file_priv = file;
e->base.file_priv = c_node->filp;
e->base.destroy = ipp_free_event;
mutex_lock(&c_node->event_lock);
list_add_tail(&e->base.link, &c_node->event_list);
......@@ -757,6 +648,115 @@ static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
return;
}
static void ipp_clean_cmd_node(struct ipp_context *ctx,
struct drm_exynos_ipp_cmd_node *c_node)
{
int i;
/* cancel works */
cancel_work_sync(&c_node->start_work->work);
cancel_work_sync(&c_node->stop_work->work);
cancel_work_sync(&c_node->event_work->work);
/* put event */
ipp_put_event(c_node, NULL);
for_each_ipp_ops(i)
ipp_clean_mem_nodes(ctx->subdrv.drm_dev, c_node, i);
/* delete list */
list_del(&c_node->list);
ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock,
c_node->property.prop_id);
/* destroy mutex */
mutex_destroy(&c_node->lock);
mutex_destroy(&c_node->mem_lock);
mutex_destroy(&c_node->event_lock);
/* free command node */
kfree(c_node->start_work);
kfree(c_node->stop_work);
kfree(c_node->event_work);
kfree(c_node);
}
static bool ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
{
switch (c_node->property.cmd) {
case IPP_CMD_WB:
return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
case IPP_CMD_OUTPUT:
return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]);
case IPP_CMD_M2M:
default:
return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]) &&
!list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
}
}
static struct drm_exynos_ipp_mem_node
*ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
struct drm_exynos_ipp_queue_buf *qbuf)
{
struct drm_exynos_ipp_mem_node *m_node;
struct list_head *head;
int count = 0;
DRM_DEBUG_KMS("buf_id[%d]\n", qbuf->buf_id);
/* source/destination memory list */
head = &c_node->mem_list[qbuf->ops_id];
/* find memory node from memory list */
list_for_each_entry(m_node, head, list) {
DRM_DEBUG_KMS("count[%d]m_node[0x%x]\n", count++, (int)m_node);
/* compare buffer id */
if (m_node->buf_id == qbuf->buf_id)
return m_node;
}
return NULL;
}
static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
struct drm_exynos_ipp_cmd_node *c_node,
struct drm_exynos_ipp_mem_node *m_node)
{
struct exynos_drm_ipp_ops *ops = NULL;
int ret = 0;
DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
if (!m_node) {
DRM_ERROR("invalid queue node.\n");
return -EFAULT;
}
DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
/* get operations callback */
ops = ippdrv->ops[m_node->ops_id];
if (!ops) {
DRM_ERROR("not support ops.\n");
return -EFAULT;
}
/* set address and enable irq */
if (ops->set_addr) {
ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
m_node->buf_id, IPP_BUF_ENQUEUE);
if (ret) {
DRM_ERROR("failed to set addr.\n");
return ret;
}
}
return ret;
}
static void ipp_handle_cmd_work(struct device *dev,
struct exynos_drm_ippdrv *ippdrv,
struct drm_exynos_ipp_cmd_work *cmd_work,
......@@ -766,7 +766,7 @@ static void ipp_handle_cmd_work(struct device *dev,
cmd_work->ippdrv = ippdrv;
cmd_work->c_node = c_node;
queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work);
queue_work(ctx->cmd_workq, &cmd_work->work);
}
static int ipp_queue_buf_with_run(struct device *dev,
......@@ -872,7 +872,7 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
/* find command node */
c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
qbuf->prop_id);
if (!c_node) {
if (!c_node || c_node->filp != file) {
DRM_ERROR("failed to get command node.\n");
return -ENODEV;
}
......@@ -881,7 +881,7 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
switch (qbuf->buf_type) {
case IPP_BUF_ENQUEUE:
/* get memory node */
m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf);
m_node = ipp_get_mem_node(drm_dev, c_node, qbuf);
if (IS_ERR(m_node)) {
DRM_ERROR("failed to get m_node.\n");
return PTR_ERR(m_node);
......@@ -894,7 +894,7 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
*/
if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
/* get event for destination buffer */
ret = ipp_get_event(drm_dev, file, c_node, qbuf);
ret = ipp_get_event(drm_dev, c_node, qbuf);
if (ret) {
DRM_ERROR("failed to get event.\n");
goto err_clean_node;
......@@ -1007,7 +1007,7 @@ int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
cmd_ctrl->prop_id);
if (!c_node) {
if (!c_node || c_node->filp != file) {
DRM_ERROR("invalid command node list.\n");
return -ENODEV;
}
......@@ -1257,80 +1257,39 @@ static int ipp_stop_property(struct drm_device *drm_dev,
struct exynos_drm_ippdrv *ippdrv,
struct drm_exynos_ipp_cmd_node *c_node)
{
struct drm_exynos_ipp_mem_node *m_node, *tm_node;
struct drm_exynos_ipp_property *property = &c_node->property;
struct list_head *head;
int ret = 0, i;
int i;
DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
/* put event */
ipp_put_event(c_node, NULL);
mutex_lock(&c_node->mem_lock);
/* stop operations */
if (ippdrv->stop)
ippdrv->stop(ippdrv->dev, property->cmd);
/* check command */
switch (property->cmd) {
case IPP_CMD_M2M:
for_each_ipp_ops(i) {
/* source/destination memory list */
head = &c_node->mem_list[i];
list_for_each_entry_safe(m_node, tm_node,
head, list) {
ret = ipp_put_mem_node(drm_dev, c_node,
m_node);
if (ret) {
DRM_ERROR("failed to put m_node.\n");
goto err_clear;
}
}
}
for_each_ipp_ops(i)
ipp_clean_mem_nodes(drm_dev, c_node, i);
break;
case IPP_CMD_WB:
/* destination memory list */
head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
list_for_each_entry_safe(m_node, tm_node, head, list) {
ret = ipp_put_mem_node(drm_dev, c_node, m_node);
if (ret) {
DRM_ERROR("failed to put m_node.\n");
goto err_clear;
}
}
ipp_clean_mem_nodes(drm_dev, c_node, EXYNOS_DRM_OPS_DST);
break;
case IPP_CMD_OUTPUT:
/* source memory list */
head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
list_for_each_entry_safe(m_node, tm_node, head, list) {
ret = ipp_put_mem_node(drm_dev, c_node, m_node);
if (ret) {
DRM_ERROR("failed to put m_node.\n");
goto err_clear;
}
}
ipp_clean_mem_nodes(drm_dev, c_node, EXYNOS_DRM_OPS_SRC);
break;
default:
DRM_ERROR("invalid operations.\n");
ret = -EINVAL;
goto err_clear;
return -EINVAL;
}
err_clear:
mutex_unlock(&c_node->mem_lock);
/* stop operations */
if (ippdrv->stop)
ippdrv->stop(ippdrv->dev, property->cmd);
return ret;
return 0;
}
void ipp_sched_cmd(struct work_struct *work)
{
struct drm_exynos_ipp_cmd_work *cmd_work =
(struct drm_exynos_ipp_cmd_work *)work;
container_of(work, struct drm_exynos_ipp_cmd_work, work);
struct exynos_drm_ippdrv *ippdrv;
struct drm_exynos_ipp_cmd_node *c_node;
struct drm_exynos_ipp_property *property;
......@@ -1543,7 +1502,7 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
void ipp_sched_event(struct work_struct *work)
{
struct drm_exynos_ipp_event_work *event_work =
(struct drm_exynos_ipp_event_work *)work;
container_of(work, struct drm_exynos_ipp_event_work, work);
struct exynos_drm_ippdrv *ippdrv;
struct drm_exynos_ipp_cmd_node *c_node;
int ret;
......@@ -1646,11 +1605,11 @@ static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
{
struct exynos_drm_ippdrv *ippdrv;
struct exynos_drm_ippdrv *ippdrv, *t;
struct ipp_context *ctx = get_ipp_context(dev);
/* get ipp driver entry */
list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
list_for_each_entry_safe(ippdrv, t, &exynos_drm_ippdrv_list, drv_list) {
if (is_drm_iommu_supported(drm_dev))
drm_iommu_detach_device(drm_dev, ippdrv->dev);
......@@ -1677,14 +1636,11 @@ static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
struct exynos_drm_ippdrv *ippdrv = NULL;
struct ipp_context *ctx = get_ipp_context(dev);
struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
int count = 0;
DRM_DEBUG_KMS("for priv[0x%x]\n", (int)file_priv->ipp_dev);
list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
mutex_lock(&ippdrv->cmd_lock);
list_for_each_entry_safe(c_node, tc_node,
......@@ -1692,7 +1648,7 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n",
count++, (int)ippdrv);
if (c_node->dev == file_priv->ipp_dev) {
if (c_node->filp == file) {
/*
* userland goto unnormal state. process killed.
* and close the file.
......@@ -1808,63 +1764,12 @@ static int ipp_remove(struct platform_device *pdev)
return 0;
}
static int ipp_power_ctrl(struct ipp_context *ctx, bool enable)
{
DRM_DEBUG_KMS("enable[%d]\n", enable);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int ipp_suspend(struct device *dev)
{
struct ipp_context *ctx = get_ipp_context(dev);
if (pm_runtime_suspended(dev))
return 0;
return ipp_power_ctrl(ctx, false);
}
static int ipp_resume(struct device *dev)
{
struct ipp_context *ctx = get_ipp_context(dev);
if (!pm_runtime_suspended(dev))
return ipp_power_ctrl(ctx, true);
return 0;
}
#endif
#ifdef CONFIG_PM_RUNTIME
static int ipp_runtime_suspend(struct device *dev)
{
struct ipp_context *ctx = get_ipp_context(dev);
return ipp_power_ctrl(ctx, false);
}
static int ipp_runtime_resume(struct device *dev)
{
struct ipp_context *ctx = get_ipp_context(dev);
return ipp_power_ctrl(ctx, true);
}
#endif
static const struct dev_pm_ops ipp_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume)
SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL)
};
struct platform_driver ipp_driver = {
.probe = ipp_probe,
.remove = ipp_remove,
.driver = {
.name = "exynos-drm-ipp",
.owner = THIS_MODULE,
.pm = &ipp_pm_ops,
},
};
......@@ -48,7 +48,6 @@ struct drm_exynos_ipp_cmd_work {
/*
* A structure of command node.
*
* @dev: IPP device.
* @list: list head to command queue information.
* @event_list: list head of event.
* @mem_list: list head to source,destination memory queue information.
......@@ -62,9 +61,9 @@ struct drm_exynos_ipp_cmd_work {
* @stop_work: stop command work structure.
* @event_work: event work structure.
* @state: state of command node.
* @filp: associated file pointer.
*/
struct drm_exynos_ipp_cmd_node {
struct device *dev;
struct list_head list;
struct list_head event_list;
struct list_head mem_list[EXYNOS_DRM_OPS_MAX];
......@@ -78,6 +77,7 @@ struct drm_exynos_ipp_cmd_node {
struct drm_exynos_ipp_cmd_work *stop_work;
struct drm_exynos_ipp_event_work *event_work;
enum drm_exynos_ipp_state state;
struct drm_file *filp;
};
/*
......
......@@ -139,6 +139,8 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
overlay->crtc_x, overlay->crtc_y,
overlay->crtc_width, overlay->crtc_height);
plane->crtc = crtc;
exynos_drm_crtc_plane_mode_set(crtc, overlay);
return 0;
......@@ -187,8 +189,6 @@ exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (ret < 0)
return ret;
plane->crtc = crtc;
exynos_plane_commit(plane);
exynos_plane_dpms(plane, DRM_MODE_DPMS_ON);
......@@ -254,25 +254,26 @@ static void exynos_plane_attach_zpos_property(struct drm_plane *plane)
}
struct drm_plane *exynos_plane_init(struct drm_device *dev,
unsigned long possible_crtcs, bool priv)
unsigned long possible_crtcs,
enum drm_plane_type type)
{
struct exynos_plane *exynos_plane;
int err;
exynos_plane = kzalloc(sizeof(struct exynos_plane), GFP_KERNEL);
if (!exynos_plane)
return NULL;
return ERR_PTR(-ENOMEM);
err = drm_plane_init(dev, &exynos_plane->base, possible_crtcs,
&exynos_plane_funcs, formats, ARRAY_SIZE(formats),
priv);
err = drm_universal_plane_init(dev, &exynos_plane->base, possible_crtcs,
&exynos_plane_funcs, formats,
ARRAY_SIZE(formats), type);
if (err) {
DRM_ERROR("failed to initialize plane\n");
kfree(exynos_plane);
return NULL;
return ERR_PTR(err);
}
if (priv)
if (type == DRM_PLANE_TYPE_PRIMARY)
exynos_plane->overlay.zpos = DEFAULT_ZPOS;
else
exynos_plane_attach_zpos_property(&exynos_plane->base);
......
......@@ -17,4 +17,5 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
void exynos_plane_commit(struct drm_plane *plane);
void exynos_plane_dpms(struct drm_plane *plane, int mode);
struct drm_plane *exynos_plane_init(struct drm_device *dev,
unsigned long possible_crtcs, bool priv);
unsigned long possible_crtcs,
enum drm_plane_type type);
......@@ -156,8 +156,7 @@ static irqreturn_t rotator_irq_handler(int irq, void *arg)
event_work->ippdrv = ippdrv;
event_work->buf_id[EXYNOS_DRM_OPS_DST] =
rot->cur_buf_id[EXYNOS_DRM_OPS_DST];
queue_work(ippdrv->event_workq,
(struct work_struct *)event_work);
queue_work(ippdrv->event_workq, &event_work->work);
} else {
DRM_ERROR("the SFR is set illegally\n");
}
......
......@@ -303,23 +303,6 @@ static int vidi_mgr_initialize(struct exynos_drm_manager *mgr,
mgr->drm_dev = ctx->drm_dev = drm_dev;
mgr->pipe = ctx->pipe = priv->pipe++;
/*
* enable drm irq mode.
* - with irq_enabled = 1, we can use the vblank feature.
*
* P.S. note that we wouldn't use drm irq handler but
* just specific driver own one instead because
* drm framework supports only one irq handler.
*/
drm_dev->irq_enabled = 1;
/*
* with vblank_disable_allowed = 1, vblank interrupt will be disabled
* by drm timer once a current process gives up ownership of
* vblank event.(after drm_vblank_put function is called)
*/
drm_dev->vblank_disable_allowed = 1;
return 0;
}
......@@ -648,7 +631,6 @@ static int vidi_remove(struct platform_device *pdev)
struct exynos_drm_manager *mgr = platform_get_drvdata(pdev);
struct vidi_context *ctx = mgr->ctx;
struct drm_encoder *encoder = ctx->encoder;
struct drm_crtc *crtc = mgr->crtc;
if (ctx->raw_edid != (struct edid *)fake_edid_info) {
kfree(ctx->raw_edid);
......@@ -657,7 +639,6 @@ static int vidi_remove(struct platform_device *pdev)
return -EINVAL;
}
crtc->funcs->destroy(crtc);
encoder->funcs->destroy(encoder);
drm_connector_cleanup(&ctx->connector);
......
......@@ -1040,6 +1040,8 @@ static enum drm_connector_status hdmi_detect(struct drm_connector *connector,
static void hdmi_connector_destroy(struct drm_connector *connector)
{
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
}
static struct drm_connector_funcs hdmi_connector_funcs = {
......@@ -2314,8 +2316,8 @@ static void hdmi_unbind(struct device *dev, struct device *master, void *data)
struct drm_encoder *encoder = display->encoder;
struct hdmi_context *hdata = display->ctx;
hdmi_connector_destroy(&hdata->connector);
encoder->funcs->destroy(encoder);
drm_connector_cleanup(&hdata->connector);
}
static const struct component_ops hdmi_component_ops = {
......
......@@ -1302,15 +1302,12 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data)
static void mixer_unbind(struct device *dev, struct device *master, void *data)
{
struct exynos_drm_manager *mgr = dev_get_drvdata(dev);
struct drm_crtc *crtc = mgr->crtc;
dev_info(dev, "remove successful\n");
mixer_mgr_remove(mgr);
pm_runtime_disable(dev);
crtc->funcs->destroy(crtc);
}
static const struct component_ops mixer_component_ops = {
......
......@@ -96,6 +96,8 @@ void mipi_dsi_host_unregister(struct mipi_dsi_host *host);
#define MIPI_DSI_MODE_EOT_PACKET BIT(9)
/* device supports non-continuous clock behavior (DSI spec 5.6.1) */
#define MIPI_DSI_CLOCK_NON_CONTINUOUS BIT(10)
/* transmit data in low power */
#define MIPI_DSI_MODE_LPM BIT(11)
enum mipi_dsi_pixel_format {
MIPI_DSI_FMT_RGB888,
......
......@@ -32,38 +32,6 @@ struct drm_exynos_gem_create {
unsigned int handle;
};
/**
* A structure for getting buffer offset.
*
* @handle: a pointer to gem object created.
* @pad: just padding to be 64-bit aligned.
* @offset: relatived offset value of the memory region allocated.
* - this value should be set by user.
*/
struct drm_exynos_gem_map_off {
unsigned int handle;
unsigned int pad;
uint64_t offset;
};
/**
* A structure for mapping buffer.
*
* @handle: a handle to gem object created.
* @pad: just padding to be 64-bit aligned.
* @size: memory size to be mapped.
* @mapped: having user virtual address mmaped.
* - this variable would be filled by exynos gem module
* of kernel side with user virtual address which is allocated
* by do_mmap().
*/
struct drm_exynos_gem_mmap {
unsigned int handle;
unsigned int pad;
uint64_t size;
uint64_t mapped;
};
/**
* A structure to gem information.
*
......@@ -316,8 +284,6 @@ struct drm_exynos_ipp_cmd_ctrl {
};
#define DRM_EXYNOS_GEM_CREATE 0x00
#define DRM_EXYNOS_GEM_MAP_OFFSET 0x01
#define DRM_EXYNOS_GEM_MMAP 0x02
/* Reserved 0x03 ~ 0x05 for exynos specific gem ioctl */
#define DRM_EXYNOS_GEM_GET 0x04
#define DRM_EXYNOS_VIDI_CONNECTION 0x07
......@@ -336,12 +302,6 @@ struct drm_exynos_ipp_cmd_ctrl {
#define DRM_IOCTL_EXYNOS_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create)
#define DRM_IOCTL_EXYNOS_GEM_MAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_GEM_MAP_OFFSET, struct drm_exynos_gem_map_off)
#define DRM_IOCTL_EXYNOS_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_GEM_MMAP, struct drm_exynos_gem_mmap)
#define DRM_IOCTL_EXYNOS_GEM_GET DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_GEM_GET, struct drm_exynos_gem_info)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment