Commit ed851963 authored by Rob Clark's avatar Rob Clark

drm/msm/mdp5: atomic

Convert mdp5 over to atomic helpers.  Extend/wrap drm_plane_state to
track plane zpos and to keep track of the needed when applying the
atomic update.  In mdp5's plane->atomic_check() we also need to check
for updates which require SMP reallocation, in order to trigger full
modeset.
Signed-off-by: default avatarRob Clark <robdclark@gmail.com>
parent 3e2f29e4
......@@ -18,6 +18,7 @@
#include "mdp5_kms.h"
#include <linux/sort.h>
#include <drm/drm_mode.h>
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
......@@ -41,25 +42,11 @@ struct mdp5_crtc {
/* if there is a pending flip, these will be non-null: */
struct drm_pending_vblank_event *event;
struct msm_fence_cb pageflip_cb;
#define PENDING_CURSOR 0x1
#define PENDING_FLIP 0x2
atomic_t pending;
/* the fb that we logically (from PoV of KMS API) hold a ref
* to. Which we may not yet be scanning out (we may still
* be scanning out previous in case of page_flip while waiting
* for gpu rendering to complete:
*/
struct drm_framebuffer *fb;
/* the fb that we currently hold a scanout ref to: */
struct drm_framebuffer *scanout_fb;
/* for unref'ing framebuffers after scanout completes: */
struct drm_flip_work unref_fb_work;
struct mdp_irq vblank;
struct mdp_irq err;
};
......@@ -113,41 +100,6 @@ static void crtc_flush_all(struct drm_crtc *crtc)
crtc_flush(crtc, flush_mask);
}
static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct drm_framebuffer *old_fb = mdp5_crtc->fb;
/* grab reference to incoming scanout fb: */
drm_framebuffer_reference(new_fb);
mdp5_crtc->base.primary->fb = new_fb;
mdp5_crtc->fb = new_fb;
if (old_fb)
drm_flip_work_queue(&mdp5_crtc->unref_fb_work, old_fb);
}
/* unlike update_fb(), take a ref to the new scanout fb *before* updating
* plane, then call this. Needed to ensure we don't unref the buffer that
* is actually still being scanned out.
*
* Note that this whole thing goes away with atomic.. since we can defer
* calling into driver until rendering is done.
*/
static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
if (mdp5_crtc->scanout_fb)
drm_flip_work_queue(&mdp5_crtc->unref_fb_work,
mdp5_crtc->scanout_fb);
mdp5_crtc->scanout_fb = fb;
/* enable vblank to complete flip: */
request_pending(crtc, PENDING_FLIP);
}
/* if file!=NULL, this is preclose potential cancel-flip path */
static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
{
......@@ -166,6 +118,7 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
*/
if (!file || (event->base.file_priv == file)) {
mdp5_crtc->event = NULL;
DBG("%s: send event: %p", mdp5_crtc->name, event);
drm_send_vblank_event(dev, mdp5_crtc->id, event);
}
}
......@@ -175,39 +128,11 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
mdp5_plane_complete_flip(plane);
}
static void pageflip_cb(struct msm_fence_cb *cb)
{
struct mdp5_crtc *mdp5_crtc =
container_of(cb, struct mdp5_crtc, pageflip_cb);
struct drm_crtc *crtc = &mdp5_crtc->base;
struct drm_framebuffer *fb = mdp5_crtc->fb;
if (!fb)
return;
drm_framebuffer_reference(fb);
mdp5_plane_set_scanout(crtc->primary, fb);
update_scanout(crtc, fb);
crtc_flush_all(crtc);
}
static void unref_fb_worker(struct drm_flip_work *work, void *val)
{
struct mdp5_crtc *mdp5_crtc =
container_of(work, struct mdp5_crtc, unref_fb_work);
struct drm_device *dev = mdp5_crtc->base.dev;
mutex_lock(&dev->mode_config.mutex);
drm_framebuffer_unreference(val);
mutex_unlock(&dev->mode_config.mutex);
}
static void mdp5_crtc_destroy(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
drm_crtc_cleanup(crtc);
drm_flip_work_cleanup(&mdp5_crtc->unref_fb_work);
kfree(mdp5_crtc);
}
......@@ -257,7 +182,6 @@ static void blend_setup(struct drm_crtc *crtc)
struct drm_plane *plane;
const struct mdp5_cfg_hw *hw_cfg;
uint32_t lm = mdp5_crtc->lm, blend_cfg = 0;
enum mdp_mixer_stage_id stage;
unsigned long flags;
#define blender(stage) ((stage) - STAGE_BASE)
......@@ -270,10 +194,8 @@ static void blend_setup(struct drm_crtc *crtc)
goto out;
for_each_plane_on_crtc(crtc, plane) {
struct mdp5_overlay_info *overlay;
overlay = mdp5_plane_get_overlay_info(plane);
stage = overlay->zorder;
enum mdp_mixer_stage_id stage =
to_mdp5_plane_state(plane->state)->stage;
/*
* Note: This cannot happen with current implementation but
......@@ -303,18 +225,17 @@ static void blend_setup(struct drm_crtc *crtc)
spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
}
static int mdp5_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb)
static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
unsigned long flags;
int ret;
struct drm_display_mode *mode;
mode = adjusted_mode;
if (WARN_ON(!crtc->state))
return;
mode = &crtc->state->adjusted_mode;
DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
mdp5_crtc->name, mode->base.id, mode->name,
......@@ -325,38 +246,11 @@ static int mdp5_crtc_mode_set(struct drm_crtc *crtc,
mode->vsync_end, mode->vtotal,
mode->type, mode->flags);
/* request a free CTL, if none is already allocated for this CRTC */
if (!mdp5_crtc->ctl) {
mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc);
if (!mdp5_crtc->ctl)
return -EBUSY;
}
/* grab extra ref for update_scanout() */
drm_framebuffer_reference(crtc->primary->fb);
ret = mdp5_plane_mode_set(crtc->primary, crtc, crtc->primary->fb,
0, 0, mode->hdisplay, mode->vdisplay,
x << 16, y << 16,
mode->hdisplay << 16, mode->vdisplay << 16);
if (ret) {
drm_framebuffer_unreference(crtc->primary->fb);
dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
mdp5_crtc->name, ret);
return ret;
}
spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->lm),
MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) |
MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
update_fb(crtc, crtc->primary->fb);
update_scanout(crtc, crtc->primary->fb);
/* crtc_flush_all(crtc) will be called in _commit callback */
return 0;
}
static void mdp5_crtc_prepare(struct drm_crtc *crtc)
......@@ -370,79 +264,119 @@ static void mdp5_crtc_prepare(struct drm_crtc *crtc)
static void mdp5_crtc_commit(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
DBG("%s", mdp5_crtc->name);
mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
crtc_flush_all(crtc);
/* drop the ref to mdp clk's that we got in prepare: */
mdp5_disable(get_kms(crtc));
}
static int mdp5_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
static void mdp5_crtc_load_lut(struct drm_crtc *crtc)
{
struct drm_plane *plane = crtc->primary;
struct drm_display_mode *mode = &crtc->mode;
int ret;
/* grab extra ref for update_scanout() */
drm_framebuffer_reference(crtc->primary->fb);
ret = mdp5_plane_mode_set(plane, crtc, crtc->primary->fb,
0, 0, mode->hdisplay, mode->vdisplay,
x << 16, y << 16,
mode->hdisplay << 16, mode->vdisplay << 16);
if (ret) {
drm_framebuffer_unreference(crtc->primary->fb);
return ret;
}
update_fb(crtc, crtc->primary->fb);
update_scanout(crtc, crtc->primary->fb);
crtc_flush_all(crtc);
return 0;
}
static void mdp5_crtc_load_lut(struct drm_crtc *crtc)
struct plane_state {
struct drm_plane *plane;
struct mdp5_plane_state *state;
};
static int pstate_cmp(const void *a, const void *b)
{
struct plane_state *pa = (struct plane_state *)a;
struct plane_state *pb = (struct plane_state *)b;
return pa->state->zpos - pb->state->zpos;
}
static void mdp5_crtc_disable(struct drm_crtc *crtc)
static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct drm_plane *plane;
struct drm_device *dev = crtc->dev;
struct plane_state pstates[STAGE3 + 1];
int cnt = 0, i;
DBG("%s", mdp5_crtc->name);
DBG("%s: check", mdp5_crtc->name);
if (mdp5_crtc->ctl) {
mdp5_ctl_release(mdp5_crtc->ctl);
mdp5_crtc->ctl = NULL;
if (mdp5_crtc->event) {
dev_err(dev->dev, "already pending flip!\n");
return -EBUSY;
}
/* request a free CTL, if none is already allocated for this CRTC */
if (state->enable && !mdp5_crtc->ctl) {
mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc);
if (WARN_ON(!mdp5_crtc->ctl))
return -EINVAL;
}
/* verify that there are not too many planes attached to crtc
* and that we don't have conflicting mixer stages:
*/
for_each_pending_plane_on_crtc(state->state, crtc, plane) {
struct drm_plane_state *pstate;
if (cnt >= ARRAY_SIZE(pstates)) {
dev_err(dev->dev, "too many planes!\n");
return -EINVAL;
}
pstate = state->state->plane_states[drm_plane_index(plane)];
/* plane might not have changed, in which case take
* current state:
*/
if (!pstate)
pstate = plane->state;
pstates[cnt].plane = plane;
pstates[cnt].state = to_mdp5_plane_state(pstate);
cnt++;
}
sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
for (i = 0; i < cnt; i++) {
pstates[i].state->stage = STAGE_BASE + i;
DBG("%s: assign pipe %s on stage=%d", mdp5_crtc->name,
pipe2name(mdp5_plane_pipe(pstates[i].plane)),
pstates[i].state->stage);
}
return 0;
}
static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
DBG("%s: begin", mdp5_crtc->name);
}
static int mdp5_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *new_fb,
struct drm_pending_vblank_event *event,
uint32_t page_flip_flags)
static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_gem_object *obj;
unsigned long flags;
if (mdp5_crtc->event) {
dev_err(dev->dev, "already pending flip!\n");
return -EBUSY;
}
DBG("%s: flush", mdp5_crtc->name);
obj = msm_framebuffer_bo(new_fb, 0);
WARN_ON(mdp5_crtc->event);
spin_lock_irqsave(&dev->event_lock, flags);
mdp5_crtc->event = event;
mdp5_crtc->event = crtc->state->event;
spin_unlock_irqrestore(&dev->event_lock, flags);
update_fb(crtc, new_fb);
blend_setup(crtc);
crtc_flush_all(crtc);
request_pending(crtc, PENDING_FLIP);
return msm_gem_queue_inactive_cb(obj, &mdp5_crtc->pageflip_cb);
if (mdp5_crtc->ctl && !crtc->state->enable) {
mdp5_ctl_release(mdp5_crtc->ctl);
mdp5_crtc->ctl = NULL;
}
}
static int mdp5_crtc_set_property(struct drm_crtc *crtc,
......@@ -453,28 +387,33 @@ static int mdp5_crtc_set_property(struct drm_crtc *crtc,
}
static const struct drm_crtc_funcs mdp5_crtc_funcs = {
.set_config = drm_crtc_helper_set_config,
.set_config = drm_atomic_helper_set_config,
.destroy = mdp5_crtc_destroy,
.page_flip = mdp5_crtc_page_flip,
.page_flip = drm_atomic_helper_page_flip,
.set_property = mdp5_crtc_set_property,
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
};
static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
.dpms = mdp5_crtc_dpms,
.mode_fixup = mdp5_crtc_mode_fixup,
.mode_set = mdp5_crtc_mode_set,
.mode_set_nofb = mdp5_crtc_mode_set_nofb,
.mode_set = drm_helper_crtc_mode_set,
.mode_set_base = drm_helper_crtc_mode_set_base,
.prepare = mdp5_crtc_prepare,
.commit = mdp5_crtc_commit,
.mode_set_base = mdp5_crtc_mode_set_base,
.load_lut = mdp5_crtc_load_lut,
.disable = mdp5_crtc_disable,
.atomic_check = mdp5_crtc_atomic_check,
.atomic_begin = mdp5_crtc_atomic_begin,
.atomic_flush = mdp5_crtc_atomic_flush,
};
static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
{
struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
struct drm_crtc *crtc = &mdp5_crtc->base;
struct msm_drm_private *priv = crtc->dev->dev_private;
unsigned pending;
mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
......@@ -483,7 +422,6 @@ static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
if (pending & PENDING_FLIP) {
complete_flip(crtc, NULL);
drm_flip_work_commit(&mdp5_crtc->unref_fb_work, priv->wq);
}
}
......@@ -560,92 +498,6 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
crtc_flush(crtc, flush_mask);
}
static int count_planes(struct drm_crtc *crtc)
{
struct drm_plane *plane;
int cnt = 0;
for_each_plane_on_crtc(crtc, plane)
cnt++;
return cnt;
}
static void set_attach(struct drm_crtc *crtc, enum mdp5_pipe pipe_id,
struct drm_plane *plane)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
if (plane)
plane->crtc = crtc;
DBG("%s: %d planes attached", mdp5_crtc->name, count_planes(crtc));
blend_setup(crtc);
if (mdp5_crtc->enabled)
crtc_flush_all(crtc);
}
int mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct device *dev = crtc->dev->dev;
const struct mdp5_cfg_hw *hw_cfg;
bool private_plane = (plane == crtc->primary);
struct mdp5_overlay_info overlay_info;
enum mdp_mixer_stage_id stage = STAGE_BASE;
int max_nb_planes;
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
max_nb_planes = hw_cfg->lm.nb_stages;
if (count_planes(crtc) >= max_nb_planes) {
dev_err(dev, "%s: max # of planes (%d) reached\n",
mdp5_crtc->name, max_nb_planes);
return -EBUSY;
}
/*
* Set default z-ordering depending on the type of plane
* private -> lower stage
* public -> topmost stage
*
* TODO: add a property to give userspace an API to change this...
* (will come in a subsequent patch)
*/
if (private_plane) {
stage = STAGE_BASE;
} else {
struct drm_plane *attached_plane;
for_each_plane_on_crtc(crtc, attached_plane) {
struct mdp5_overlay_info *overlay;
if (!attached_plane)
continue;
overlay = mdp5_plane_get_overlay_info(attached_plane);
stage = max(stage, overlay->zorder);
}
stage++;
}
overlay_info.zorder = stage;
mdp5_plane_set_overlay_info(plane, &overlay_info);
DBG("%s: %s plane %s set to stage %d by default", mdp5_crtc->name,
private_plane ? "private" : "public",
pipe2name(mdp5_plane_pipe(plane)), overlay_info.zorder);
set_attach(crtc, mdp5_plane_pipe(plane), plane);
return 0;
}
void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane)
{
/* don't actually detatch our primary plane: */
if (crtc->primary == plane)
return;
set_attach(crtc, mdp5_plane_pipe(plane), NULL);
}
int mdp5_crtc_get_lm(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
......@@ -680,11 +532,6 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d",
pipe2name(mdp5_plane_pipe(plane)), id);
drm_flip_work_init(&mdp5_crtc->unref_fb_work,
"unref fb", unref_fb_worker);
INIT_FENCE_CB(&mdp5_crtc->pageflip_cb, pageflip_cb);
drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs);
drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
plane->crtc = crtc;
......
......@@ -67,9 +67,32 @@ struct mdp5_kms {
};
#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base)
struct mdp5_overlay_info {
enum mdp_mixer_stage_id zorder;
struct mdp5_plane_state {
struct drm_plane_state base;
/* "virtual" zpos.. we calculate actual mixer-stage at runtime
* by sorting the attached planes by zpos and then assigning
* mixer stage lowest to highest. Private planes get default
* zpos of zero, and public planes a unique value that is
* greater than zero. This way, things work out if a naive
* userspace assigns planes to a crtc without setting zpos.
*/
int zpos;
/* the actual mixer stage, calculated in crtc->atomic_check()
* NOTE: this should move to mdp5_crtc_state, when that exists
*/
enum mdp_mixer_stage_id stage;
/* some additional transactional status to help us know in the
* apply path whether we need to update SMP allocation, and
* whether current update is still pending:
*/
bool mode_changed : 1;
bool pending : 1;
};
#define to_mdp5_plane_state(x) \
container_of(x, struct mdp5_plane_state, base)
static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data)
{
......@@ -154,18 +177,7 @@ uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats,
void mdp5_plane_install_properties(struct drm_plane *plane,
struct drm_mode_object *obj);
void mdp5_plane_set_overlay_info(struct drm_plane *plane,
const struct mdp5_overlay_info *overlay_info);
struct mdp5_overlay_info *mdp5_plane_get_overlay_info(struct drm_plane *plane);
uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
void mdp5_plane_set_scanout(struct drm_plane *plane,
struct drm_framebuffer *fb);
int mdp5_plane_mode_set(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h);
void mdp5_plane_complete_flip(struct drm_plane *plane);
enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
struct drm_plane *mdp5_plane_init(struct drm_device *dev,
......@@ -177,8 +189,6 @@ int mdp5_crtc_get_lm(struct drm_crtc *crtc);
void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
enum mdp5_intf intf_id);
int mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane);
void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane);
struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
struct drm_plane *plane, int id);
......
......@@ -31,8 +31,6 @@ struct mdp5_plane {
uint32_t flush_mask; /* used to commit pipe registers */
struct mdp5_overlay_info overlay_info;
uint32_t nformats;
uint32_t formats[32];
......@@ -40,31 +38,24 @@ struct mdp5_plane {
};
#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base)
static int mdp5_plane_mode_set(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h);
static void set_scanout_locked(struct drm_plane *plane,
struct drm_framebuffer *fb);
static struct mdp5_kms *get_kms(struct drm_plane *plane)
{
struct msm_drm_private *priv = plane->dev->dev_private;
return to_mdp5_kms(to_mdp_kms(priv->kms));
}
static int mdp5_plane_update(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
static bool plane_enabled(struct drm_plane_state *state)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
mdp5_plane->enabled = true;
if (plane->fb)
drm_framebuffer_unreference(plane->fb);
drm_framebuffer_reference(fb);
return mdp5_plane_mode_set(plane, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h);
return state->fb && state->crtc;
}
static int mdp5_plane_disable(struct drm_plane *plane)
......@@ -80,46 +71,19 @@ static int mdp5_plane_disable(struct drm_plane *plane)
mdp5_smp_release(mdp5_kms->smp, pipe);
}
/* TODO detaching now will cause us not to get the last
* vblank and mdp5_smp_commit().. so other planes will
* still see smp blocks previously allocated to us as
* in-use..
*/
if (plane->crtc)
mdp5_crtc_detach(plane->crtc, plane);
return 0;
}
static void mdp5_plane_destroy(struct drm_plane *plane)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct msm_drm_private *priv = plane->dev->dev_private;
if (priv->kms)
mdp5_plane_disable(plane);
drm_plane_helper_disable(plane);
drm_plane_cleanup(plane);
kfree(mdp5_plane);
}
void mdp5_plane_set_overlay_info(struct drm_plane *plane,
const struct mdp5_overlay_info *overlay_info)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
memcpy(&mdp5_plane->overlay_info, overlay_info, sizeof(*overlay_info));
}
struct mdp5_overlay_info *mdp5_plane_get_overlay_info(
struct drm_plane *plane)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
return &mdp5_plane->overlay_info;
}
/* helper to install properties which are common to planes and crtcs */
void mdp5_plane_install_properties(struct drm_plane *plane,
struct drm_mode_object *obj)
......@@ -134,70 +98,185 @@ int mdp5_plane_set_property(struct drm_plane *plane,
return -EINVAL;
}
static void mdp5_plane_reset(struct drm_plane *plane)
{
struct mdp5_plane_state *mdp5_state;
if (plane->state && plane->state->fb)
drm_framebuffer_unreference(plane->state->fb);
kfree(to_mdp5_plane_state(plane->state));
mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
mdp5_state->zpos = 0;
} else {
mdp5_state->zpos = 1 + drm_plane_index(plane);
}
plane->state = &mdp5_state->base;
}
static struct drm_plane_state *
mdp5_plane_duplicate_state(struct drm_plane *plane)
{
struct mdp5_plane_state *mdp5_state;
if (WARN_ON(!plane->state))
return NULL;
mdp5_state = kmemdup(to_mdp5_plane_state(plane->state),
sizeof(*mdp5_state), GFP_KERNEL);
if (mdp5_state && mdp5_state->base.fb)
drm_framebuffer_reference(mdp5_state->base.fb);
mdp5_state->mode_changed = false;
mdp5_state->pending = false;
return &mdp5_state->base;
}
static void mdp5_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
if (state->fb)
drm_framebuffer_unreference(state->fb);
kfree(to_mdp5_plane_state(state));
}
static const struct drm_plane_funcs mdp5_plane_funcs = {
.update_plane = mdp5_plane_update,
.disable_plane = mdp5_plane_disable,
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = mdp5_plane_destroy,
.set_property = mdp5_plane_set_property,
.reset = mdp5_plane_reset,
.atomic_duplicate_state = mdp5_plane_duplicate_state,
.atomic_destroy_state = mdp5_plane_destroy_state,
};
static int get_fb_addr(struct drm_plane *plane, struct drm_framebuffer *fb,
uint32_t iova[MAX_PLANE])
static int mdp5_plane_prepare_fb(struct drm_plane *plane,
struct drm_framebuffer *fb)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct mdp5_kms *mdp5_kms = get_kms(plane);
uint32_t nplanes = drm_format_num_planes(fb->pixel_format);
int i;
for (i = 0; i < nplanes; i++) {
struct drm_gem_object *bo = msm_framebuffer_bo(fb, i);
msm_gem_get_iova(bo, mdp5_kms->id, &iova[i]);
}
for (; i < MAX_PLANE; i++)
iova[i] = 0;
return 0;
DBG("%s: prepare: FB[%u]", mdp5_plane->name, fb->base.id);
return msm_framebuffer_prepare(fb, mdp5_kms->id);
}
static void set_scanout_locked(struct drm_plane *plane,
uint32_t pitches[MAX_PLANE], uint32_t src_addr[MAX_PLANE])
static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
struct drm_framebuffer *fb)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct mdp5_kms *mdp5_kms = get_kms(plane);
enum mdp5_pipe pipe = mdp5_plane->pipe;
WARN_ON(!spin_is_locked(&mdp5_plane->pipe_lock));
DBG("%s: cleanup: FB[%u]", mdp5_plane->name, fb->base.id);
msm_framebuffer_cleanup(fb, mdp5_kms->id);
}
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
MDP5_PIPE_SRC_STRIDE_A_P0(pitches[0]) |
MDP5_PIPE_SRC_STRIDE_A_P1(pitches[1]));
static int mdp5_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct drm_plane_state *old_state = plane->state;
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_B(pipe),
MDP5_PIPE_SRC_STRIDE_B_P2(pitches[2]) |
MDP5_PIPE_SRC_STRIDE_B_P3(pitches[3]));
DBG("%s: check (%d -> %d)", mdp5_plane->name,
plane_enabled(old_state), plane_enabled(state));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe), src_addr[0]);
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe), src_addr[1]);
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe), src_addr[2]);
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe), src_addr[3]);
if (plane_enabled(state) && plane_enabled(old_state)) {
/* we cannot change SMP block configuration during scanout: */
bool full_modeset = false;
if (state->fb->pixel_format != old_state->fb->pixel_format) {
DBG("%s: pixel_format change!", mdp5_plane->name);
full_modeset = true;
}
if (state->src_w != old_state->src_w) {
DBG("%s: src_w change!", mdp5_plane->name);
full_modeset = true;
}
if (to_mdp5_plane_state(old_state)->pending) {
DBG("%s: still pending!", mdp5_plane->name);
full_modeset = true;
}
if (full_modeset) {
struct drm_crtc_state *crtc_state =
drm_atomic_get_crtc_state(state->state, state->crtc);
crtc_state->mode_changed = true;
to_mdp5_plane_state(state)->mode_changed = true;
}
} else {
to_mdp5_plane_state(state)->mode_changed = true;
}
return 0;
}
void mdp5_plane_set_scanout(struct drm_plane *plane,
struct drm_framebuffer *fb)
static void mdp5_plane_atomic_update(struct drm_plane *plane)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
uint32_t src_addr[MAX_PLANE];
unsigned long flags;
struct drm_plane_state *state = plane->state;
get_fb_addr(plane, fb, src_addr);
DBG("%s: update", mdp5_plane->name);
if (!plane_enabled(state)) {
to_mdp5_plane_state(state)->pending = true;
mdp5_plane_disable(plane);
} else if (to_mdp5_plane_state(state)->mode_changed) {
int ret;
to_mdp5_plane_state(state)->pending = true;
ret = mdp5_plane_mode_set(plane,
state->crtc, state->fb,
state->crtc_x, state->crtc_y,
state->crtc_w, state->crtc_h,
state->src_x, state->src_y,
state->src_w, state->src_h);
/* atomic_check should have ensured that this doesn't fail */
WARN_ON(ret < 0);
} else {
unsigned long flags;
spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
set_scanout_locked(plane, fb->pitches, src_addr);
set_scanout_locked(plane, state->fb);
spin_unlock_irqrestore(&mdp5_plane->pipe_lock, flags);
}
}
static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = {
.prepare_fb = mdp5_plane_prepare_fb,
.cleanup_fb = mdp5_plane_cleanup_fb,
.atomic_check = mdp5_plane_atomic_check,
.atomic_update = mdp5_plane_atomic_update,
};
static void set_scanout_locked(struct drm_plane *plane,
struct drm_framebuffer *fb)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct mdp5_kms *mdp5_kms = get_kms(plane);
enum mdp5_pipe pipe = mdp5_plane->pipe;
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_B(pipe),
MDP5_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe),
msm_framebuffer_iova(fb, mdp5_kms->id, 0));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe),
msm_framebuffer_iova(fb, mdp5_kms->id, 1));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe),
msm_framebuffer_iova(fb, mdp5_kms->id, 2));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
msm_framebuffer_iova(fb, mdp5_kms->id, 4));
plane->fb = fb;
}
int mdp5_plane_mode_set(struct drm_plane *plane,
static int mdp5_plane_mode_set(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
......@@ -211,7 +290,6 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
uint32_t nplanes, config = 0;
uint32_t phasex_step = 0, phasey_step = 0;
uint32_t hdecm = 0, vdecm = 0;
uint32_t src_addr[MAX_PLANE];
unsigned long flags;
int ret;
......@@ -255,10 +333,6 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
/* TODO calc phasey_step, vdecm */
}
ret = get_fb_addr(plane, fb, src_addr);
if (ret)
return ret;
spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe),
......@@ -320,22 +394,24 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(SCALE_FILTER_NEAREST) |
MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(SCALE_FILTER_NEAREST));
set_scanout_locked(plane, fb->pitches, src_addr);
set_scanout_locked(plane, fb);
spin_unlock_irqrestore(&mdp5_plane->pipe_lock, flags);
/* TODO detach from old crtc (if we had more than one) */
ret = mdp5_crtc_attach(crtc, plane);
return ret;
}
void mdp5_plane_complete_flip(struct drm_plane *plane)
{
struct mdp5_kms *mdp5_kms = get_kms(plane);
enum mdp5_pipe pipe = to_mdp5_plane(plane)->pipe;
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
enum mdp5_pipe pipe = mdp5_plane->pipe;
DBG("%s: complete flip", mdp5_plane->name);
mdp5_smp_commit(mdp5_kms->smp, pipe);
to_mdp5_plane_state(plane->state)->pending = false;
}
enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
......@@ -379,9 +455,13 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
spin_lock_init(&mdp5_plane->pipe_lock);
type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
mdp5_plane->formats, mdp5_plane->nformats,
type);
if (ret)
goto fail;
drm_plane_helper_add(plane, &mdp5_plane_helper_funcs);
mdp5_plane_install_properties(plane, &plane->base);
......
......@@ -70,9 +70,22 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev);
list_for_each_entry((_plane), &(_crtc)->dev->mode_config.plane_list, head) \
if ((_plane)->state->crtc == (_crtc))
static inline bool
__plane_will_be_attached_to_crtc(struct drm_atomic_state *state,
struct drm_plane *plane, struct drm_crtc *crtc)
{
int idx = drm_plane_index(plane);
/* if plane is modified in incoming state, use the new state: */
if (state->plane_states[idx])
return state->plane_states[idx]->crtc == crtc;
/* otherwise, current state: */
return plane->state->crtc == crtc;
}
#define for_each_pending_plane_on_crtc(_state, _crtc, _plane) \
list_for_each_entry((_plane), &(_crtc)->dev->mode_config.plane_list, head) \
if (({struct drm_plane_state *_ps = (_state)->plane_states[drm_plane_index(_plane)]; \
_ps && _ps->crtc == (_crtc);}))
if (__plane_will_be_attached_to_crtc((_state), (_plane), (_crtc)))
#endif /* __MSM_KMS_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment