Commit 202dfa08 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm/next/du' of git://linuxtv.org/pinchartl/media into drm-next

The series interleaves DRM and V4L2 patches due to dependencies between the R-
Car DU and VSP drivers. Mauro has acked all the V4L2 patches to go through
your tree, and they don't conflict with anything queued for v4.13 in his tree.
If I need to send any conflicting patches through Mauro's tree for v4.13, I'll
make sure to base them on this branch.

* 'drm/next/du' of git://linuxtv.org/pinchartl/media:
  drm: rcar-du: Map memory through the VSP device
  v4l: vsp1: Add API to map and unmap DRM buffers through the VSP
  v4l: vsp1: Map the DL and video buffers through the proper bus master
  v4l: rcar-fcp: Add an API to retrieve the FCP device
  v4l: rcar-fcp: Don't get/put module reference
  drm: rcar-du: Register a completion callback with VSP1
  v4l: vsp1: Extend VSP1 module API to allow DRM callbacks
  v4l: vsp1: Postpone frame end handling in event of display list race
  drm: rcar-du: Arm the page flip event after queuing the page flip
parents 7249e3d6 fa5b4114
......@@ -378,7 +378,7 @@ static void rcar_du_crtc_update_planes(struct rcar_du_crtc *rcrtc)
* Page Flip
*/
static void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc)
void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc)
{
struct drm_pending_vblank_event *event;
struct drm_device *dev = rcrtc->crtc.dev;
......@@ -581,17 +581,6 @@ static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
struct drm_device *dev = rcrtc->crtc.dev;
unsigned long flags;
if (crtc->state->event) {
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
spin_lock_irqsave(&dev->event_lock, flags);
rcrtc->event = crtc->state->event;
crtc->state->event = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
}
if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
rcar_du_vsp_atomic_begin(rcrtc);
......@@ -601,9 +590,20 @@ static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
struct drm_device *dev = rcrtc->crtc.dev;
unsigned long flags;
rcar_du_crtc_update_planes(rcrtc);
if (crtc->state->event) {
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
spin_lock_irqsave(&dev->event_lock, flags);
rcrtc->event = crtc->state->event;
crtc->state->event = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
}
if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
rcar_du_vsp_atomic_flush(rcrtc);
}
......@@ -650,6 +650,7 @@ static const struct drm_crtc_funcs crtc_funcs = {
static irqreturn_t rcar_du_crtc_irq(int irq, void *arg)
{
struct rcar_du_crtc *rcrtc = arg;
struct rcar_du_device *rcdu = rcrtc->group->dev;
irqreturn_t ret = IRQ_NONE;
u32 status;
......@@ -658,7 +659,10 @@ static irqreturn_t rcar_du_crtc_irq(int irq, void *arg)
if (status & DSSR_FRM) {
drm_crtc_handle_vblank(&rcrtc->crtc);
rcar_du_crtc_finish_page_flip(rcrtc);
if (rcdu->info->gen < 3)
rcar_du_crtc_finish_page_flip(rcrtc);
ret = IRQ_HANDLED;
}
......
......@@ -73,5 +73,6 @@ void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc);
void rcar_du_crtc_route_output(struct drm_crtc *crtc,
enum rcar_du_output output);
void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc);
#endif /* __RCAR_DU_CRTC_H__ */
......@@ -19,7 +19,9 @@
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_plane_helper.h>
#include <linux/dma-mapping.h>
#include <linux/of_platform.h>
#include <linux/scatterlist.h>
#include <linux/videodev2.h>
#include <media/vsp1.h>
......@@ -28,6 +30,13 @@
#include "rcar_du_kms.h"
#include "rcar_du_vsp.h"
static void rcar_du_vsp_complete(void *private)
{
struct rcar_du_crtc *crtc = private;
rcar_du_crtc_finish_page_flip(crtc);
}
void rcar_du_vsp_enable(struct rcar_du_crtc *crtc)
{
const struct drm_display_mode *mode = &crtc->crtc.state->adjusted_mode;
......@@ -35,6 +44,8 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc)
struct vsp1_du_lif_config cfg = {
.width = mode->hdisplay,
.height = mode->vdisplay,
.callback = rcar_du_vsp_complete,
.callback_data = crtc,
};
struct rcar_du_plane_state state = {
.state = {
......@@ -170,12 +181,9 @@ static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane)
cfg.dst.width = state->state.crtc_w;
cfg.dst.height = state->state.crtc_h;
for (i = 0; i < state->format->planes; ++i) {
struct drm_gem_cma_object *gem;
gem = drm_fb_cma_get_gem_obj(fb, i);
cfg.mem[i] = gem->paddr + fb->offsets[i];
}
for (i = 0; i < state->format->planes; ++i)
cfg.mem[i] = sg_dma_address(state->sg_tables[i].sgl)
+ fb->offsets[i];
for (i = 0; i < ARRAY_SIZE(formats_kms); ++i) {
if (formats_kms[i] == state->format->fourcc) {
......@@ -187,6 +195,67 @@ static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane)
vsp1_du_atomic_update(plane->vsp->vsp, plane->index, &cfg);
}
static int rcar_du_vsp_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct rcar_du_vsp_plane_state *rstate = to_rcar_vsp_plane_state(state);
struct rcar_du_vsp *vsp = to_rcar_vsp_plane(plane)->vsp;
struct rcar_du_device *rcdu = vsp->dev;
unsigned int i;
int ret;
if (!state->fb)
return 0;
for (i = 0; i < rstate->format->planes; ++i) {
struct drm_gem_cma_object *gem =
drm_fb_cma_get_gem_obj(state->fb, i);
struct sg_table *sgt = &rstate->sg_tables[i];
ret = dma_get_sgtable(rcdu->dev, sgt, gem->vaddr, gem->paddr,
gem->base.size);
if (ret)
goto fail;
ret = vsp1_du_map_sg(vsp->vsp, sgt);
if (!ret) {
sg_free_table(sgt);
ret = -ENOMEM;
goto fail;
}
}
return 0;
fail:
while (i--) {
struct sg_table *sgt = &rstate->sg_tables[i];
vsp1_du_unmap_sg(vsp->vsp, sgt);
sg_free_table(sgt);
}
return ret;
}
static void rcar_du_vsp_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct rcar_du_vsp_plane_state *rstate = to_rcar_vsp_plane_state(state);
struct rcar_du_vsp *vsp = to_rcar_vsp_plane(plane)->vsp;
unsigned int i;
if (!state->fb)
return;
for (i = 0; i < rstate->format->planes; ++i) {
struct sg_table *sgt = &rstate->sg_tables[i];
vsp1_du_unmap_sg(vsp->vsp, sgt);
sg_free_table(sgt);
}
}
static int rcar_du_vsp_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
......@@ -227,6 +296,8 @@ static void rcar_du_vsp_plane_atomic_update(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs rcar_du_vsp_plane_helper_funcs = {
.prepare_fb = rcar_du_vsp_plane_prepare_fb,
.cleanup_fb = rcar_du_vsp_plane_cleanup_fb,
.atomic_check = rcar_du_vsp_plane_atomic_check,
.atomic_update = rcar_du_vsp_plane_atomic_update,
};
......
......@@ -43,6 +43,7 @@ static inline struct rcar_du_vsp_plane *to_rcar_vsp_plane(struct drm_plane *p)
* struct rcar_du_vsp_plane_state - Driver-specific plane state
* @state: base DRM plane state
* @format: information about the pixel format used by the plane
* @sg_tables: scatter-gather tables for the frame buffer memory
* @alpha: value of the plane alpha property
* @zpos: value of the plane zpos property
*/
......@@ -50,6 +51,7 @@ struct rcar_du_vsp_plane_state {
struct drm_plane_state state;
const struct rcar_du_format_info *format;
struct sg_table sg_tables[3];
unsigned int alpha;
unsigned int zpos;
......
......@@ -53,14 +53,7 @@ struct rcar_fcp_device *rcar_fcp_get(const struct device_node *np)
if (fcp->dev->of_node != np)
continue;
/*
* Make sure the module won't be unloaded behind our back. This
* is a poor man's safety net, the module should really not be
* unloaded while FCP users can be active.
*/
if (!try_module_get(fcp->dev->driver->owner))
fcp = NULL;
get_device(fcp->dev);
goto done;
}
......@@ -81,10 +74,16 @@ EXPORT_SYMBOL_GPL(rcar_fcp_get);
void rcar_fcp_put(struct rcar_fcp_device *fcp)
{
if (fcp)
module_put(fcp->dev->driver->owner);
put_device(fcp->dev);
}
EXPORT_SYMBOL_GPL(rcar_fcp_put);
struct device *rcar_fcp_get_device(struct rcar_fcp_device *fcp)
{
return fcp->dev;
}
EXPORT_SYMBOL_GPL(rcar_fcp_get_device);
/**
* rcar_fcp_enable - Enable an FCP
* @fcp: The FCP instance
......
......@@ -74,6 +74,7 @@ struct vsp1_device {
void __iomem *mmio;
struct rcar_fcp_device *fcp;
struct device *bus_master;
struct vsp1_bru *bru;
struct vsp1_clu *clu;
......
......@@ -137,7 +137,7 @@ static int vsp1_dl_body_init(struct vsp1_device *vsp1,
dlb->vsp1 = vsp1;
dlb->size = size;
dlb->entries = dma_alloc_wc(vsp1->dev, dlb->size, &dlb->dma,
dlb->entries = dma_alloc_wc(vsp1->bus_master, dlb->size, &dlb->dma,
GFP_KERNEL);
if (!dlb->entries)
return -ENOMEM;
......@@ -150,7 +150,7 @@ static int vsp1_dl_body_init(struct vsp1_device *vsp1,
*/
static void vsp1_dl_body_cleanup(struct vsp1_dl_body *dlb)
{
dma_free_wc(dlb->vsp1->dev, dlb->size, dlb->entries, dlb->dma);
dma_free_wc(dlb->vsp1->bus_master, dlb->size, dlb->entries, dlb->dma);
}
/**
......@@ -561,9 +561,19 @@ void vsp1_dlm_irq_display_start(struct vsp1_dl_manager *dlm)
spin_unlock(&dlm->lock);
}
void vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm)
/**
* vsp1_dlm_irq_frame_end - Display list handler for the frame end interrupt
* @dlm: the display list manager
*
* Return true if the previous display list has completed at frame end, or false
* if it has been delayed by one frame because the display list commit raced
* with the frame end interrupt. The function always returns true in header mode
* as display list processing is then not continuous and races never occur.
*/
bool vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm)
{
struct vsp1_device *vsp1 = dlm->vsp1;
bool completed = false;
spin_lock(&dlm->lock);
......@@ -575,8 +585,10 @@ void vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm)
* perform any operation as there can't be any new display list queued
* in that case.
*/
if (dlm->mode == VSP1_DL_MODE_HEADER)
if (dlm->mode == VSP1_DL_MODE_HEADER) {
completed = true;
goto done;
}
/*
* The UPD bit set indicates that the commit operation raced with the
......@@ -594,6 +606,7 @@ void vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm)
if (dlm->queued) {
dlm->active = dlm->queued;
dlm->queued = NULL;
completed = true;
}
/*
......@@ -614,6 +627,8 @@ void vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm)
done:
spin_unlock(&dlm->lock);
return completed;
}
/* Hardware Setup */
......
......@@ -28,7 +28,7 @@ struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1,
void vsp1_dlm_destroy(struct vsp1_dl_manager *dlm);
void vsp1_dlm_reset(struct vsp1_dl_manager *dlm);
void vsp1_dlm_irq_display_start(struct vsp1_dl_manager *dlm);
void vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm);
bool vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm);
struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm);
void vsp1_dl_list_put(struct vsp1_dl_list *dl);
......
......@@ -12,6 +12,7 @@
*/
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <media/media-entity.h>
......@@ -36,6 +37,14 @@ void vsp1_drm_display_start(struct vsp1_device *vsp1)
vsp1_dlm_irq_display_start(vsp1->drm->pipe.output->dlm);
}
static void vsp1_du_pipeline_frame_end(struct vsp1_pipeline *pipe)
{
struct vsp1_drm *drm = to_vsp1_drm(pipe);
if (drm->du_complete)
drm->du_complete(drm->du_private);
}
/* -----------------------------------------------------------------------------
* DU Driver API
*/
......@@ -95,6 +104,7 @@ int vsp1_du_setup_lif(struct device *dev, const struct vsp1_du_lif_config *cfg)
}
pipe->num_inputs = 0;
vsp1->drm->du_complete = NULL;
vsp1_dlm_reset(pipe->output->dlm);
vsp1_device_put(vsp1);
......@@ -199,6 +209,13 @@ int vsp1_du_setup_lif(struct device *dev, const struct vsp1_du_lif_config *cfg)
if (ret < 0)
return ret;
/*
* Register a callback to allow us to notify the DRM driver of frame
* completion events.
*/
vsp1->drm->du_complete = cfg->callback;
vsp1->drm->du_private = cfg->callback_data;
ret = media_pipeline_start(&pipe->output->entity.subdev.entity,
&pipe->pipe);
if (ret < 0) {
......@@ -524,6 +541,29 @@ void vsp1_du_atomic_flush(struct device *dev)
}
EXPORT_SYMBOL_GPL(vsp1_du_atomic_flush);
int vsp1_du_map_sg(struct device *dev, struct sg_table *sgt)
{
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
/*
* As all the buffers allocated by the DU driver are coherent, we can
* skip cache sync. This will need to be revisited when support for
* non-coherent buffers will be added to the DU driver.
*/
return dma_map_sg_attrs(vsp1->bus_master, sgt->sgl, sgt->nents,
DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
}
EXPORT_SYMBOL_GPL(vsp1_du_map_sg);
void vsp1_du_unmap_sg(struct device *dev, struct sg_table *sgt)
{
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
dma_unmap_sg_attrs(vsp1->bus_master, sgt->sgl, sgt->nents,
DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
}
EXPORT_SYMBOL_GPL(vsp1_du_unmap_sg);
/* -----------------------------------------------------------------------------
* Initialization
*/
......@@ -603,6 +643,7 @@ int vsp1_drm_init(struct vsp1_device *vsp1)
pipe->lif = &vsp1->lif->entity;
pipe->output = vsp1->wpf[0];
pipe->output->pipe = pipe;
pipe->frame_end = vsp1_du_pipeline_frame_end;
return 0;
}
......
......@@ -23,6 +23,8 @@
* @num_inputs: number of active pipeline inputs at the beginning of an update
* @inputs: source crop rectangle, destination compose rectangle and z-order
* position for every input
* @du_complete: frame completion callback for the DU driver (optional)
* @du_private: data to be passed to the du_complete callback
*/
struct vsp1_drm {
struct vsp1_pipeline pipe;
......@@ -33,8 +35,17 @@ struct vsp1_drm {
struct v4l2_rect compose;
unsigned int zpos;
} inputs[VSP1_MAX_RPF];
/* Frame synchronisation */
void (*du_complete)(void *);
void *du_private;
};
static inline struct vsp1_drm *to_vsp1_drm(struct vsp1_pipeline *pipe)
{
return container_of(pipe, struct vsp1_drm, pipe);
}
int vsp1_drm_init(struct vsp1_device *vsp1);
void vsp1_drm_cleanup(struct vsp1_device *vsp1);
int vsp1_drm_create_links(struct vsp1_device *vsp1);
......
......@@ -764,6 +764,15 @@ static int vsp1_probe(struct platform_device *pdev)
PTR_ERR(vsp1->fcp));
return PTR_ERR(vsp1->fcp);
}
/*
* When the FCP is present, it handles all bus master accesses
* for the VSP and must thus be used in place of the VSP device
* to map DMA buffers.
*/
vsp1->bus_master = rcar_fcp_get_device(vsp1->fcp);
} else {
vsp1->bus_master = vsp1->dev;
}
/* Configure device parameters based on the version register. */
......
......@@ -330,10 +330,21 @@ bool vsp1_pipeline_ready(struct vsp1_pipeline *pipe)
void vsp1_pipeline_frame_end(struct vsp1_pipeline *pipe)
{
bool completed;
if (pipe == NULL)
return;
vsp1_dlm_irq_frame_end(pipe->output->dlm);
completed = vsp1_dlm_irq_frame_end(pipe->output->dlm);
if (!completed) {
/*
* If the DL commit raced with the frame end interrupt, the
* commit ends up being postponed by one frame. Return
* immediately without calling the pipeline's frame end handler
* or incrementing the sequence number.
*/
return;
}
if (pipe->hgo)
vsp1_hgo_frame_end(pipe->hgo);
......
......@@ -1197,7 +1197,7 @@ struct vsp1_video *vsp1_video_create(struct vsp1_device *vsp1,
video->queue.ops = &vsp1_video_queue_qops;
video->queue.mem_ops = &vb2_dma_contig_memops;
video->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
video->queue.dev = video->vsp1->dev;
video->queue.dev = video->vsp1->bus_master;
ret = vb2_queue_init(&video->queue);
if (ret < 0) {
dev_err(video->vsp1->dev, "failed to initialize vb2 queue\n");
......
......@@ -19,6 +19,7 @@ struct rcar_fcp_device;
#if IS_ENABLED(CONFIG_VIDEO_RENESAS_FCP)
struct rcar_fcp_device *rcar_fcp_get(const struct device_node *np);
void rcar_fcp_put(struct rcar_fcp_device *fcp);
struct device *rcar_fcp_get_device(struct rcar_fcp_device *fcp);
int rcar_fcp_enable(struct rcar_fcp_device *fcp);
void rcar_fcp_disable(struct rcar_fcp_device *fcp);
#else
......@@ -27,6 +28,10 @@ static inline struct rcar_fcp_device *rcar_fcp_get(const struct device_node *np)
return ERR_PTR(-ENOENT);
}
static inline void rcar_fcp_put(struct rcar_fcp_device *fcp) { }
static inline struct device *rcar_fcp_get_device(struct rcar_fcp_device *fcp)
{
return NULL;
}
static inline int rcar_fcp_enable(struct rcar_fcp_device *fcp)
{
return 0;
......
......@@ -13,6 +13,7 @@
#ifndef __MEDIA_VSP1_H__
#define __MEDIA_VSP1_H__
#include <linux/scatterlist.h>
#include <linux/types.h>
#include <linux/videodev2.h>
......@@ -24,10 +25,17 @@ int vsp1_du_init(struct device *dev);
* struct vsp1_du_lif_config - VSP LIF configuration
* @width: output frame width
* @height: output frame height
* @callback: frame completion callback function (optional). When a callback
* is provided, the VSP driver guarantees that it will be called once
* and only once for each vsp1_du_atomic_flush() call.
* @callback_data: data to be passed to the frame completion callback
*/
struct vsp1_du_lif_config {
unsigned int width;
unsigned int height;
void (*callback)(void *);
void *callback_data;
};
int vsp1_du_setup_lif(struct device *dev, const struct vsp1_du_lif_config *cfg);
......@@ -46,5 +54,7 @@ void vsp1_du_atomic_begin(struct device *dev);
int vsp1_du_atomic_update(struct device *dev, unsigned int rpf,
const struct vsp1_du_atomic_config *cfg);
void vsp1_du_atomic_flush(struct device *dev);
int vsp1_du_map_sg(struct device *dev, struct sg_table *sgt);
void vsp1_du_unmap_sg(struct device *dev, struct sg_table *sgt);
#endif /* __MEDIA_VSP1_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment