Commit 58aa1c50 authored by Nicholas Kazlauskas's avatar Nicholas Kazlauskas Committed by Alex Deucher

drm/amd/display: Use vblank control events for PSR enable/disable

[Why]
PSR can disable the HUBP along with the OTG when PSR is active.

We'll hit a pageflip timeout when the OTG is disable because we're no
longer updating the CRTC vblank counter and the pflip high IRQ will
not fire on the flip.

In order to flip the page flip timeout occur we should modify the
enter/exit conditions to match DRM requirements.

[How]
Use our deferred handlers for DRM vblank control to notify DMCU(B)
when it can enable or disable PSR based on whether vblank is disabled or
enabled respectively.

We'll need to pass along the stream with the notification now because
we want to access the CRTC state while the CRTC is locked to get the
stream state prior to the commit.

Retain a reference to the stream so it remains safe to continue to
access and release that reference once we're done with it.

Enable/disable logic follows what we were previously doing in
update_planes.

The workqueue has to be flushed before programming streams or planes
to ensure that we exit out of idle optimizations and PSR before
these events occur if necessary.

To keep the skip count logic the same to avoid FBCON PSR enablement
requires copying the allow condition onto the DM IRQ parameters - a
field that we can actually access from the worker.
Reviewed-by: default avatarRoman Li <Roman.Li@amd.com>
Acked-by: default avatarWayne Lin <wayne.lin@amd.com>
Signed-off-by: default avatarNicholas Kazlauskas <nicholas.kazlauskas@amd.com>
Tested-by: default avatarDaniel Wheeler <daniel.wheeler@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 09a5df6c
...@@ -1061,7 +1061,22 @@ static void vblank_control_worker(struct work_struct *work) ...@@ -1061,7 +1061,22 @@ static void vblank_control_worker(struct work_struct *work)
DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0); DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
/* Control PSR based on vblank requirements from OS */
if (vblank_work->stream && vblank_work->stream->link) {
if (vblank_work->enable) {
if (vblank_work->stream->link->psr_settings.psr_allow_active)
amdgpu_dm_psr_disable(vblank_work->stream);
} else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
!vblank_work->stream->link->psr_settings.psr_allow_active &&
vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
amdgpu_dm_psr_enable(vblank_work->stream);
}
}
mutex_unlock(&dm->dc_lock); mutex_unlock(&dm->dc_lock);
dc_stream_release(vblank_work->stream);
kfree(vblank_work); kfree(vblank_work);
} }
...@@ -6018,6 +6033,11 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) ...@@ -6018,6 +6033,11 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
work->acrtc = acrtc; work->acrtc = acrtc;
work->enable = enable; work->enable = enable;
if (acrtc_state->stream) {
dc_stream_retain(acrtc_state->stream);
work->stream = acrtc_state->stream;
}
queue_work(dm->vblank_control_workqueue, &work->work); queue_work(dm->vblank_control_workqueue, &work->work);
#endif #endif
...@@ -8623,6 +8643,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, ...@@ -8623,6 +8643,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
/* Update the planes if changed or disable if we don't have any. */ /* Update the planes if changed or disable if we don't have any. */
if ((planes_count || acrtc_state->active_planes == 0) && if ((planes_count || acrtc_state->active_planes == 0) &&
acrtc_state->stream) { acrtc_state->stream) {
/*
* If PSR or idle optimizations are enabled then flush out
* any pending work before hardware programming.
*/
flush_workqueue(dm->vblank_control_workqueue);
bundle->stream_update.stream = acrtc_state->stream; bundle->stream_update.stream = acrtc_state->stream;
if (new_pcrtc_state->mode_changed) { if (new_pcrtc_state->mode_changed) {
bundle->stream_update.src = acrtc_state->stream->src; bundle->stream_update.src = acrtc_state->stream->src;
...@@ -8691,16 +8717,20 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, ...@@ -8691,16 +8717,20 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED && acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
!acrtc_state->stream->link->psr_settings.psr_feature_enabled) !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
amdgpu_dm_link_setup_psr(acrtc_state->stream); amdgpu_dm_link_setup_psr(acrtc_state->stream);
else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
acrtc_state->stream->link->psr_settings.psr_feature_enabled && /* Decrement skip count when PSR is enabled and we're doing fast updates. */
!acrtc_state->stream->link->psr_settings.psr_allow_active) { if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
struct amdgpu_dm_connector *aconn = (struct amdgpu_dm_connector *) acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
acrtc_state->stream->dm_stream_context; struct amdgpu_dm_connector *aconn =
(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
if (aconn->psr_skip_count > 0) if (aconn->psr_skip_count > 0)
aconn->psr_skip_count--; aconn->psr_skip_count--;
else
amdgpu_dm_psr_enable(acrtc_state->stream); /* Allow PSR when skip count is 0. */
acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
} else {
acrtc_attach->dm_irq_params.allow_psr_entry = false;
} }
mutex_unlock(&dm->dc_lock); mutex_unlock(&dm->dc_lock);
...@@ -8949,8 +8979,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) ...@@ -8949,8 +8979,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
if (dc_state) { if (dc_state) {
/* if there mode set or reset, disable eDP PSR */ /* if there mode set or reset, disable eDP PSR */
if (mode_set_reset_required) if (mode_set_reset_required) {
flush_workqueue(dm->vblank_control_workqueue);
amdgpu_dm_psr_disable_all(dm); amdgpu_dm_psr_disable_all(dm);
}
dm_enable_per_frame_crtc_master_sync(dc_state); dm_enable_per_frame_crtc_master_sync(dc_state);
mutex_lock(&dm->dc_lock); mutex_lock(&dm->dc_lock);
......
...@@ -91,12 +91,14 @@ struct dm_compressor_info { ...@@ -91,12 +91,14 @@ struct dm_compressor_info {
* @work: Kernel work data for the work event * @work: Kernel work data for the work event
* @dm: amdgpu display manager device * @dm: amdgpu display manager device
* @acrtc: amdgpu CRTC instance for which the event has occurred * @acrtc: amdgpu CRTC instance for which the event has occurred
* @stream: DC stream for which the event has occurred
* @enable: true if enabling vblank * @enable: true if enabling vblank
*/ */
struct vblank_control_work { struct vblank_control_work {
struct work_struct work; struct work_struct work;
struct amdgpu_display_manager *dm; struct amdgpu_display_manager *dm;
struct amdgpu_crtc *acrtc; struct amdgpu_crtc *acrtc;
struct dc_stream_state *stream;
bool enable; bool enable;
}; };
......
...@@ -33,6 +33,7 @@ struct dm_irq_params { ...@@ -33,6 +33,7 @@ struct dm_irq_params {
struct mod_vrr_params vrr_params; struct mod_vrr_params vrr_params;
struct dc_stream_state *stream; struct dc_stream_state *stream;
int active_planes; int active_planes;
bool allow_psr_entry;
struct mod_freesync_config freesync_config; struct mod_freesync_config freesync_config;
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment