Commit b4cba04f authored by Wentao Xu's avatar Wentao Xu Committed by Rob Clark

drm/msm/mdp5: release SMB (shared memory blocks) in various cases

Release all blocks after the pipe is disabled, even when vsync
didn't happen in some error cases. Allow requesting SMB multiple
times before configuring to hardware, by releasing blocks not
programmed to hardware yet for shrinking case.

This fixes a potential leak of shared memory pool blocks.
Signed-off-by: default avatarWentao Xu <wentaox@codeaurora.org>
Tested-by: default avatarArchit Taneja <architt@codeaurora.org>
Signed-off-by: default avatarRob Clark <robdclark@gmail.com>
parent 99fc1bc4
...@@ -76,7 +76,20 @@ static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st ...@@ -76,7 +76,20 @@ static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st
static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
{ {
int i;
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
int nplanes = mdp5_kms->dev->mode_config.num_total_plane;
for (i = 0; i < nplanes; i++) {
struct drm_plane *plane = state->planes[i];
struct drm_plane_state *plane_state = state->plane_states[i];
if (!plane)
continue;
mdp5_plane_complete_commit(plane, plane_state);
}
mdp5_disable(mdp5_kms); mdp5_disable(mdp5_kms);
} }
......
...@@ -227,6 +227,8 @@ void mdp5_plane_install_properties(struct drm_plane *plane, ...@@ -227,6 +227,8 @@ void mdp5_plane_install_properties(struct drm_plane *plane,
struct drm_mode_object *obj); struct drm_mode_object *obj);
uint32_t mdp5_plane_get_flush(struct drm_plane *plane); uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
void mdp5_plane_complete_flip(struct drm_plane *plane); void mdp5_plane_complete_flip(struct drm_plane *plane);
void mdp5_plane_complete_commit(struct drm_plane *plane,
struct drm_plane_state *state);
enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
struct drm_plane *mdp5_plane_init(struct drm_device *dev, struct drm_plane *mdp5_plane_init(struct drm_device *dev,
enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset); enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset);
......
...@@ -31,8 +31,6 @@ struct mdp5_plane { ...@@ -31,8 +31,6 @@ struct mdp5_plane {
uint32_t nformats; uint32_t nformats;
uint32_t formats[32]; uint32_t formats[32];
bool enabled;
}; };
#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base) #define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base)
...@@ -56,22 +54,6 @@ static bool plane_enabled(struct drm_plane_state *state) ...@@ -56,22 +54,6 @@ static bool plane_enabled(struct drm_plane_state *state)
return state->fb && state->crtc; return state->fb && state->crtc;
} }
static int mdp5_plane_disable(struct drm_plane *plane)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct mdp5_kms *mdp5_kms = get_kms(plane);
enum mdp5_pipe pipe = mdp5_plane->pipe;
DBG("%s: disable", mdp5_plane->name);
if (mdp5_kms) {
/* Release the memory we requested earlier from the SMP: */
mdp5_smp_release(mdp5_kms->smp, pipe);
}
return 0;
}
static void mdp5_plane_destroy(struct drm_plane *plane) static void mdp5_plane_destroy(struct drm_plane *plane)
{ {
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
...@@ -224,7 +206,6 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane, ...@@ -224,7 +206,6 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane,
if (!plane_enabled(state)) { if (!plane_enabled(state)) {
to_mdp5_plane_state(state)->pending = true; to_mdp5_plane_state(state)->pending = true;
mdp5_plane_disable(plane);
} else if (to_mdp5_plane_state(state)->mode_changed) { } else if (to_mdp5_plane_state(state)->mode_changed) {
int ret; int ret;
to_mdp5_plane_state(state)->pending = true; to_mdp5_plane_state(state)->pending = true;
...@@ -602,6 +583,20 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane) ...@@ -602,6 +583,20 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
return mdp5_plane->flush_mask; return mdp5_plane->flush_mask;
} }
/* called after vsync in thread context */
void mdp5_plane_complete_commit(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct mdp5_kms *mdp5_kms = get_kms(plane);
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
enum mdp5_pipe pipe = mdp5_plane->pipe;
if (!plane_enabled(plane->state)) {
DBG("%s: free SMP", mdp5_plane->name);
mdp5_smp_release(mdp5_kms->smp, pipe);
}
}
/* initialize plane */ /* initialize plane */
struct drm_plane *mdp5_plane_init(struct drm_device *dev, struct drm_plane *mdp5_plane_init(struct drm_device *dev,
enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset) enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset)
......
...@@ -34,22 +34,44 @@ ...@@ -34,22 +34,44 @@
* and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0). * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0).
* *
* For each block that can be dynamically allocated, it can be either * For each block that can be dynamically allocated, it can be either
* free, or pending/in-use by a client. The updates happen in three steps: * free:
* The block is free.
*
* pending:
* The block is allocated to some client and not free.
*
* configured:
* The block is allocated to some client, and assigned to that
* client in MDP5_MDP_SMP_ALLOC registers.
*
* inuse:
* The block is being actively used by a client.
*
* The updates happen in the following steps:
* *
* 1) mdp5_smp_request(): * 1) mdp5_smp_request():
* When plane scanout is setup, calculate required number of * When plane scanout is setup, calculate required number of
* blocks needed per client, and request. Blocks not inuse or * blocks needed per client, and request. Blocks neither inuse nor
* pending by any other client are added to client's pending * configured nor pending by any other client are added to client's
* set. * pending set.
* For shrinking, blocks in pending but not in configured can be freed
* directly, but those already in configured will be freed later by
* mdp5_smp_commit.
* *
* 2) mdp5_smp_configure(): * 2) mdp5_smp_configure():
* As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers * As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers
* are configured for the union(pending, inuse) * are configured for the union(pending, inuse)
* Current pending is copied to configured.
* It is assumed that mdp5_smp_request and mdp5_smp_configure not run
* concurrently for the same pipe.
* *
* 3) mdp5_smp_commit(): * 3) mdp5_smp_commit():
* After next vblank, copy pending -> inuse. Optionally update * After next vblank, copy configured -> inuse. Optionally update
* MDP5_SMP_ALLOC registers if there are newly unused blocks * MDP5_SMP_ALLOC registers if there are newly unused blocks
* *
* 4) mdp5_smp_release():
* Must be called after the pipe is disabled and no longer uses any SMB
*
* On the next vblank after changes have been committed to hw, the * On the next vblank after changes have been committed to hw, the
* client's pending blocks become it's in-use blocks (and no-longer * client's pending blocks become it's in-use blocks (and no-longer
* in-use blocks become available to other clients). * in-use blocks become available to other clients).
...@@ -77,6 +99,9 @@ struct mdp5_smp { ...@@ -77,6 +99,9 @@ struct mdp5_smp {
struct mdp5_client_smp_state client_state[MAX_CLIENTS]; struct mdp5_client_smp_state client_state[MAX_CLIENTS];
}; };
static void update_smp_state(struct mdp5_smp *smp,
u32 cid, mdp5_smp_state_t *assigned);
static inline static inline
struct mdp5_kms *get_kms(struct mdp5_smp *smp) struct mdp5_kms *get_kms(struct mdp5_smp *smp)
{ {
...@@ -149,7 +174,12 @@ static int smp_request_block(struct mdp5_smp *smp, ...@@ -149,7 +174,12 @@ static int smp_request_block(struct mdp5_smp *smp,
for (i = cur_nblks; i > nblks; i--) { for (i = cur_nblks; i > nblks; i--) {
int blk = find_first_bit(ps->pending, cnt); int blk = find_first_bit(ps->pending, cnt);
clear_bit(blk, ps->pending); clear_bit(blk, ps->pending);
/* don't clear in global smp_state until _commit() */
/* clear in global smp_state if not in configured
* otherwise until _commit()
*/
if (!test_bit(blk, ps->configured))
clear_bit(blk, smp->state);
} }
} }
...@@ -223,10 +253,33 @@ int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 wid ...@@ -223,10 +253,33 @@ int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 wid
/* Release SMP blocks for all clients of the pipe */ /* Release SMP blocks for all clients of the pipe */
void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe) void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe)
{ {
int i, nblks; int i;
unsigned long flags;
int cnt = smp->blk_cnt;
for (i = 0; i < pipe2nclients(pipe); i++) {
mdp5_smp_state_t assigned;
u32 cid = pipe2client(pipe, i);
struct mdp5_client_smp_state *ps = &smp->client_state[cid];
spin_lock_irqsave(&smp->state_lock, flags);
/* clear hw assignment */
bitmap_or(assigned, ps->inuse, ps->configured, cnt);
update_smp_state(smp, CID_UNUSED, &assigned);
/* free to global pool */
bitmap_andnot(smp->state, smp->state, ps->pending, cnt);
bitmap_andnot(smp->state, smp->state, assigned, cnt);
/* clear client's infor */
bitmap_zero(ps->pending, cnt);
bitmap_zero(ps->configured, cnt);
bitmap_zero(ps->inuse, cnt);
spin_unlock_irqrestore(&smp->state_lock, flags);
}
for (i = 0, nblks = 0; i < pipe2nclients(pipe); i++)
smp_request_block(smp, pipe2client(pipe, i), 0);
set_fifo_thresholds(smp, pipe, 0); set_fifo_thresholds(smp, pipe, 0);
} }
...@@ -274,12 +327,20 @@ void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe) ...@@ -274,12 +327,20 @@ void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe)
u32 cid = pipe2client(pipe, i); u32 cid = pipe2client(pipe, i);
struct mdp5_client_smp_state *ps = &smp->client_state[cid]; struct mdp5_client_smp_state *ps = &smp->client_state[cid];
bitmap_or(assigned, ps->inuse, ps->pending, cnt); /*
* if vblank has not happened since last smp_configure
* skip the configure for now
*/
if (!bitmap_equal(ps->inuse, ps->configured, cnt))
continue;
bitmap_copy(ps->configured, ps->pending, cnt);
bitmap_or(assigned, ps->inuse, ps->configured, cnt);
update_smp_state(smp, cid, &assigned); update_smp_state(smp, cid, &assigned);
} }
} }
/* step #3: after vblank, copy pending -> inuse: */ /* step #3: after vblank, copy configured -> inuse: */
void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe) void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
{ {
int cnt = smp->blk_cnt; int cnt = smp->blk_cnt;
...@@ -295,7 +356,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe) ...@@ -295,7 +356,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
* using, which can be released and made available to other * using, which can be released and made available to other
* clients: * clients:
*/ */
if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) { if (bitmap_andnot(released, ps->inuse, ps->configured, cnt)) {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&smp->state_lock, flags); spin_lock_irqsave(&smp->state_lock, flags);
...@@ -306,7 +367,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe) ...@@ -306,7 +367,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
update_smp_state(smp, CID_UNUSED, &released); update_smp_state(smp, CID_UNUSED, &released);
} }
bitmap_copy(ps->inuse, ps->pending, cnt); bitmap_copy(ps->inuse, ps->configured, cnt);
} }
} }
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
struct mdp5_client_smp_state { struct mdp5_client_smp_state {
mdp5_smp_state_t inuse; mdp5_smp_state_t inuse;
mdp5_smp_state_t configured;
mdp5_smp_state_t pending; mdp5_smp_state_t pending;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment