Commit b20b515a authored by Dave Airlie's avatar Dave Airlie

Merge tag 'omapdrm-4.20-2' of...

Merge tag 'omapdrm-4.20-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tomba/linux into drm-next

omapdrm fixes and cleanups for 4.20

- fix memory barrier bug in DMM driver
- fix interrupt management in DMM driver
- partial workaround for errata i878
- fix use of freed memory
- some cleanups
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Tomi Valkeinen <tomi.valkeinen@ti.com>
Link: https://patchwork.freedesktop.org/patch/msgid/36bdd576-2b5c-d69e-24af-05572652b08e@ti.com
parents 29b90203 2e65c7a6
......@@ -1140,18 +1140,6 @@ static void dispc_ovl_set_color_mode(struct dispc_device *dispc,
REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), m, 4, 1);
}
static bool format_is_yuv(u32 fourcc)
{
switch (fourcc) {
case DRM_FORMAT_YUYV:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_NV12:
return true;
default:
return false;
}
}
static void dispc_ovl_configure_burst_type(struct dispc_device *dispc,
enum omap_plane_id plane,
enum omap_dss_rotation_type rotation)
......@@ -1910,11 +1898,14 @@ static void dispc_ovl_set_scaling_uv(struct dispc_device *dispc,
int scale_x = out_width != orig_width;
int scale_y = out_height != orig_height;
bool chroma_upscale = plane != OMAP_DSS_WB;
const struct drm_format_info *info;
info = drm_format_info(fourcc);
if (!dispc_has_feature(dispc, FEAT_HANDLE_UV_SEPARATE))
return;
if (!format_is_yuv(fourcc)) {
if (!info->is_yuv) {
/* reset chroma resampling for RGB formats */
if (plane != OMAP_DSS_WB)
REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES2(plane),
......@@ -2624,7 +2615,7 @@ static int dispc_ovl_setup_common(struct dispc_device *dispc,
unsigned int offset0, offset1;
s32 row_inc;
s32 pix_inc;
u16 frame_width, frame_height;
u16 frame_width;
unsigned int field_offset = 0;
u16 in_height = height;
u16 in_width = width;
......@@ -2632,6 +2623,9 @@ static int dispc_ovl_setup_common(struct dispc_device *dispc,
bool ilace = !!(vm->flags & DISPLAY_FLAGS_INTERLACED);
unsigned long pclk = dispc_plane_pclk_rate(dispc, plane);
unsigned long lclk = dispc_plane_lclk_rate(dispc, plane);
const struct drm_format_info *info;
info = drm_format_info(fourcc);
/* when setting up WB, dispc_plane_pclk_rate() returns 0 */
if (plane == OMAP_DSS_WB)
......@@ -2640,7 +2634,7 @@ static int dispc_ovl_setup_common(struct dispc_device *dispc,
if (paddr == 0 && rotation_type != OMAP_DSS_ROT_TILER)
return -EINVAL;
if (format_is_yuv(fourcc) && (in_width & 1)) {
if (info->is_yuv && (in_width & 1)) {
DSSERR("input width %d is not even for YUV format\n", in_width);
return -EINVAL;
}
......@@ -2680,7 +2674,7 @@ static int dispc_ovl_setup_common(struct dispc_device *dispc,
DSSDBG("predecimation %d x %x, new input size %d x %d\n",
x_predecim, y_predecim, in_width, in_height);
if (format_is_yuv(fourcc) && (in_width & 1)) {
if (info->is_yuv && (in_width & 1)) {
DSSDBG("predecimated input width is not even for YUV format\n");
DSSDBG("adjusting input width %d -> %d\n",
in_width, in_width & ~1);
......@@ -2688,7 +2682,7 @@ static int dispc_ovl_setup_common(struct dispc_device *dispc,
in_width &= ~1;
}
if (format_is_yuv(fourcc))
if (info->is_yuv)
cconv = 1;
if (ilace && !fieldmode) {
......@@ -2714,13 +2708,10 @@ static int dispc_ovl_setup_common(struct dispc_device *dispc,
row_inc = 0;
pix_inc = 0;
if (plane == OMAP_DSS_WB) {
if (plane == OMAP_DSS_WB)
frame_width = out_width;
frame_height = out_height;
} else {
else
frame_width = in_width;
frame_height = height;
}
calc_offset(screen_width, frame_width,
fourcc, fieldmode, field_offset,
......
......@@ -947,7 +947,7 @@ dss_debugfs_create_file(struct dss_device *dss, const char *name,
&dss_debug_fops);
if (IS_ERR(d)) {
kfree(entry);
return ERR_PTR(PTR_ERR(d));
return ERR_CAST(d);
}
entry->dentry = d;
......
......@@ -164,10 +164,11 @@ static void omap_connector_destroy(struct drm_connector *connector)
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(omap_connector);
omapdss_device_put(omap_connector->output);
omapdss_device_put(omap_connector->display);
kfree(omap_connector);
}
#define MAX_EDID 512
......
......@@ -159,6 +159,7 @@ struct dmm_platform_data {
struct dmm {
struct device *dev;
dma_addr_t phys_base;
void __iomem *base;
int irq;
......@@ -189,6 +190,12 @@ struct dmm {
struct list_head alloc_head;
const struct dmm_platform_data *plat_data;
bool dmm_workaround;
spinlock_t wa_lock;
u32 *wa_dma_data;
dma_addr_t wa_dma_handle;
struct dma_chan *wa_dma_chan;
};
#endif
......@@ -18,6 +18,7 @@
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/interrupt.h>
......@@ -79,14 +80,138 @@ static const u32 reg[][4] = {
DMM_PAT_DESCR__2, DMM_PAT_DESCR__3},
};
static int dmm_dma_copy(struct dmm *dmm, dma_addr_t src, dma_addr_t dst)
{
struct dma_device *dma_dev = dmm->wa_dma_chan->device;
struct dma_async_tx_descriptor *tx;
enum dma_status status;
dma_cookie_t cookie;
tx = dma_dev->device_prep_dma_memcpy(dmm->wa_dma_chan, dst, src, 4, 0);
if (!tx) {
dev_err(dmm->dev, "Failed to prepare DMA memcpy\n");
return -EIO;
}
cookie = tx->tx_submit(tx);
if (dma_submit_error(cookie)) {
dev_err(dmm->dev, "Failed to do DMA tx_submit\n");
return -EIO;
}
dma_async_issue_pending(dmm->wa_dma_chan);
status = dma_sync_wait(dmm->wa_dma_chan, cookie);
if (status != DMA_COMPLETE)
dev_err(dmm->dev, "i878 wa DMA copy failure\n");
dmaengine_terminate_all(dmm->wa_dma_chan);
return 0;
}
static u32 dmm_read_wa(struct dmm *dmm, u32 reg)
{
dma_addr_t src, dst;
int r;
src = dmm->phys_base + reg;
dst = dmm->wa_dma_handle;
r = dmm_dma_copy(dmm, src, dst);
if (r) {
dev_err(dmm->dev, "sDMA read transfer timeout\n");
return readl(dmm->base + reg);
}
/*
* As per i878 workaround, the DMA is used to access the DMM registers.
* Make sure that the readl is not moved by the compiler or the CPU
* earlier than the DMA finished writing the value to memory.
*/
rmb();
return readl(dmm->wa_dma_data);
}
static void dmm_write_wa(struct dmm *dmm, u32 val, u32 reg)
{
dma_addr_t src, dst;
int r;
writel(val, dmm->wa_dma_data);
/*
* As per i878 workaround, the DMA is used to access the DMM registers.
* Make sure that the writel is not moved by the compiler or the CPU, so
* the data will be in place before we start the DMA to do the actual
* register write.
*/
wmb();
src = dmm->wa_dma_handle;
dst = dmm->phys_base + reg;
r = dmm_dma_copy(dmm, src, dst);
if (r) {
dev_err(dmm->dev, "sDMA write transfer timeout\n");
writel(val, dmm->base + reg);
}
}
static u32 dmm_read(struct dmm *dmm, u32 reg)
{
if (dmm->dmm_workaround) {
u32 v;
unsigned long flags;
spin_lock_irqsave(&dmm->wa_lock, flags);
v = dmm_read_wa(dmm, reg);
spin_unlock_irqrestore(&dmm->wa_lock, flags);
return v;
} else {
return readl(dmm->base + reg);
}
}
static void dmm_write(struct dmm *dmm, u32 val, u32 reg)
{
if (dmm->dmm_workaround) {
unsigned long flags;
spin_lock_irqsave(&dmm->wa_lock, flags);
dmm_write_wa(dmm, val, reg);
spin_unlock_irqrestore(&dmm->wa_lock, flags);
} else {
writel(val, dmm->base + reg);
}
}
static int dmm_workaround_init(struct dmm *dmm)
{
dma_cap_mask_t mask;
spin_lock_init(&dmm->wa_lock);
dmm->wa_dma_data = dma_alloc_coherent(dmm->dev, sizeof(u32),
&dmm->wa_dma_handle, GFP_KERNEL);
if (!dmm->wa_dma_data)
return -ENOMEM;
dma_cap_zero(mask);
dma_cap_set(DMA_MEMCPY, mask);
dmm->wa_dma_chan = dma_request_channel(mask, NULL, NULL);
if (!dmm->wa_dma_chan) {
dma_free_coherent(dmm->dev, 4, dmm->wa_dma_data, dmm->wa_dma_handle);
return -ENODEV;
}
return 0;
}
static void dmm_workaround_uninit(struct dmm *dmm)
{
dma_release_channel(dmm->wa_dma_chan);
dma_free_coherent(dmm->dev, 4, dmm->wa_dma_data, dmm->wa_dma_handle);
}
/* simple allocator to grab next 16 byte aligned memory from txn */
......@@ -285,6 +410,17 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
}
txn->last_pat->next_pa = 0;
/* ensure that the written descriptors are visible to DMM */
wmb();
/*
* NOTE: the wmb() above should be enough, but there seems to be a bug
* in OMAP's memory barrier implementation, which in some rare cases may
* cause the writes not to be observable after wmb().
*/
/* read back to ensure the data is in RAM */
readl(&txn->last_pat->next_pa);
/* write to PAT_DESCR to clear out any pending transaction */
dmm_write(dmm, 0x0, reg[PAT_DESCR][engine->id]);
......@@ -603,6 +739,10 @@ static int omap_dmm_remove(struct platform_device *dev)
unsigned long flags;
if (omap_dmm) {
/* Disable all enabled interrupts */
dmm_write(omap_dmm, 0x7e7e7e7e, DMM_PAT_IRQENABLE_CLR);
free_irq(omap_dmm->irq, omap_dmm);
/* free all area regions */
spin_lock_irqsave(&list_lock, flags);
list_for_each_entry_safe(block, _block, &omap_dmm->alloc_head,
......@@ -625,8 +765,8 @@ static int omap_dmm_remove(struct platform_device *dev)
if (omap_dmm->dummy_page)
__free_page(omap_dmm->dummy_page);
if (omap_dmm->irq > 0)
free_irq(omap_dmm->irq, omap_dmm);
if (omap_dmm->dmm_workaround)
dmm_workaround_uninit(omap_dmm);
iounmap(omap_dmm->base);
kfree(omap_dmm);
......@@ -673,6 +813,7 @@ static int omap_dmm_probe(struct platform_device *dev)
goto fail;
}
omap_dmm->phys_base = mem->start;
omap_dmm->base = ioremap(mem->start, SZ_2K);
if (!omap_dmm->base) {
......@@ -688,6 +829,22 @@ static int omap_dmm_probe(struct platform_device *dev)
omap_dmm->dev = &dev->dev;
if (of_machine_is_compatible("ti,dra7")) {
/*
* DRA7 Errata i878 says that MPU should not be used to access
* RAM and DMM at the same time. As it's not possible to prevent
* MPU accessing RAM, we need to access DMM via a proxy.
*/
if (!dmm_workaround_init(omap_dmm)) {
omap_dmm->dmm_workaround = true;
dev_info(&dev->dev,
"workaround for errata i878 in use\n");
} else {
dev_warn(&dev->dev,
"failed to initialize work-around for i878\n");
}
}
hwinfo = dmm_read(omap_dmm, DMM_PAT_HWINFO);
omap_dmm->num_engines = (hwinfo >> 24) & 0x1F;
omap_dmm->num_lut = (hwinfo >> 16) & 0x1F;
......@@ -714,24 +871,6 @@ static int omap_dmm_probe(struct platform_device *dev)
dmm_write(omap_dmm, 0x88888888, DMM_TILER_OR__0);
dmm_write(omap_dmm, 0x88888888, DMM_TILER_OR__1);
ret = request_irq(omap_dmm->irq, omap_dmm_irq_handler, IRQF_SHARED,
"omap_dmm_irq_handler", omap_dmm);
if (ret) {
dev_err(&dev->dev, "couldn't register IRQ %d, error %d\n",
omap_dmm->irq, ret);
omap_dmm->irq = -1;
goto fail;
}
/* Enable all interrupts for each refill engine except
* ERR_LUT_MISS<n> (which is just advisory, and we don't care
* about because we want to be able to refill live scanout
* buffers for accelerated pan/scroll) and FILL_DSC<n> which
* we just generally don't care about.
*/
dmm_write(omap_dmm, 0x7e7e7e7e, DMM_PAT_IRQENABLE_SET);
omap_dmm->dummy_page = alloc_page(GFP_KERNEL | __GFP_DMA32);
if (!omap_dmm->dummy_page) {
dev_err(&dev->dev, "could not allocate dummy page\n");
......@@ -823,6 +962,24 @@ static int omap_dmm_probe(struct platform_device *dev)
.p1.y = omap_dmm->container_height - 1,
};
ret = request_irq(omap_dmm->irq, omap_dmm_irq_handler, IRQF_SHARED,
"omap_dmm_irq_handler", omap_dmm);
if (ret) {
dev_err(&dev->dev, "couldn't register IRQ %d, error %d\n",
omap_dmm->irq, ret);
omap_dmm->irq = -1;
goto fail;
}
/* Enable all interrupts for each refill engine except
* ERR_LUT_MISS<n> (which is just advisory, and we don't care
* about because we want to be able to refill live scanout
* buffers for accelerated pan/scroll) and FILL_DSC<n> which
* we just generally don't care about.
*/
dmm_write(omap_dmm, 0x7e7e7e7e, DMM_PAT_IRQENABLE_SET);
/* initialize all LUTs to dummy page entries */
for (i = 0; i < omap_dmm->num_lut; i++) {
area.tcm = omap_dmm->tcm[i];
......
......@@ -439,7 +439,7 @@ static int ioctl_gem_info(struct drm_device *dev, void *data,
args->size = omap_gem_mmap_size(obj);
args->offset = omap_gem_mmap_offset(obj);
drm_gem_object_unreference_unlocked(obj);
drm_gem_object_put_unlocked(obj);
return ret;
}
......@@ -614,7 +614,7 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
omap_disconnect_pipelines(ddev);
err_crtc_uninit:
omap_crtc_pre_uninit(priv);
drm_dev_unref(ddev);
drm_dev_put(ddev);
return ret;
}
......@@ -643,7 +643,7 @@ static void omapdrm_cleanup(struct omap_drm_private *priv)
omap_disconnect_pipelines(ddev);
omap_crtc_pre_uninit(priv);
drm_dev_unref(ddev);
drm_dev_put(ddev);
}
static int pdev_probe(struct platform_device *pdev)
......
......@@ -319,7 +319,7 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
error:
while (--i >= 0)
drm_gem_object_unreference_unlocked(bos[i]);
drm_gem_object_put_unlocked(bos[i]);
return fb;
}
......
......@@ -150,7 +150,7 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
/* note: if fb creation failed, we can't rely on fb destroy
* to unref the bo:
*/
drm_gem_object_unreference_unlocked(fbdev->bo);
drm_gem_object_put_unlocked(fbdev->bo);
ret = PTR_ERR(fb);
goto fail;
}
......
......@@ -638,7 +638,7 @@ int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
*offset = omap_gem_mmap_offset(obj);
drm_gem_object_unreference_unlocked(obj);
drm_gem_object_put_unlocked(obj);
fail:
return ret;
......@@ -1312,7 +1312,7 @@ int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
}
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(obj);
drm_gem_object_put_unlocked(obj);
return 0;
}
......
......@@ -168,7 +168,7 @@ struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
* Importing dmabuf exported from out own gem increases
* refcount on gem itself instead of f_count of dmabuf.
*/
drm_gem_object_reference(obj);
drm_gem_object_get(obj);
return obj;
}
}
......
/*
* SImple Tiler Allocator (SiTA) private structures.
*
* Copyright (C) 2009-2011 Texas Instruments Incorporated - http://www.ti.com/
* Author: Ravi Ramachandra <r.ramachandra@ti.com>
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* * Neither the name of Texas Instruments Incorporated nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _TCM_SITA_H
#define _TCM_SITA_H
#include "tcm.h"
/* length between two coordinates */
#define LEN(a, b) ((a) > (b) ? (a) - (b) + 1 : (b) - (a) + 1)
enum criteria {
CR_MAX_NEIGHS = 0x01,
CR_FIRST_FOUND = 0x10,
CR_BIAS_HORIZONTAL = 0x20,
CR_BIAS_VERTICAL = 0x40,
CR_DIAGONAL_BALANCE = 0x80
};
/* nearness to the beginning of the search field from 0 to 1000 */
struct nearness_factor {
s32 x;
s32 y;
};
/*
* Statistics on immediately neighboring slots. Edge is the number of
* border segments that are also border segments of the scan field. Busy
* refers to the number of neighbors that are occupied.
*/
struct neighbor_stats {
u16 edge;
u16 busy;
};
/* structure to keep the score of a potential allocation */
struct score {
struct nearness_factor f;
struct neighbor_stats n;
struct tcm_area a;
u16 neighs; /* number of busy neighbors */
};
struct sita_pvt {
spinlock_t lock; /* spinlock to protect access */
struct tcm_pt div_pt; /* divider point splitting container */
struct tcm_area ***map; /* pointers to the parent area for each slot */
};
/* assign coordinates to area */
static inline
void assign(struct tcm_area *a, u16 x0, u16 y0, u16 x1, u16 y1)
{
a->p0.x = x0;
a->p0.y = y0;
a->p1.x = x1;
a->p1.y = y1;
}
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment