Commit afca033f authored by Roman Li's avatar Roman Li Committed by Alex Deucher

drm/amd/display: Add periodic detection for IPS

[Why]
HPD interrupt cannot be handled in IPS2 state.
So if there's a display topology change while system in IPS2
it can be missed.

[How]
Implement worker to check each 5 sec in IPS for HPD.
Reviewed-by: default avatarHamza Mahfooz <hamza.mahfooz@amd.com>
Acked-by: default avatarWayne Lin <wayne.lin@amd.com>
Signed-off-by: default avatarRoman Li <roman.li@amd.com>
Tested-by: default avatarDaniel Wheeler <daniel.wheeler@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 5419a207
......@@ -1838,6 +1838,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
}
if (adev->dm.dc->caps.ips_support && adev->dm.dc->config.disable_ips == DMUB_IPS_ENABLE)
adev->dm.idle_workqueue = idle_create_workqueue(adev);
if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
......@@ -1935,6 +1938,16 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
adev->dm.vblank_control_workqueue = NULL;
}
if (adev->dm.idle_workqueue) {
if (adev->dm.idle_workqueue->running) {
adev->dm.idle_workqueue->enable = false;
flush_work(&adev->dm.idle_workqueue->work);
}
kfree(adev->dm.idle_workqueue);
adev->dm.idle_workqueue = NULL;
}
amdgpu_dm_destroy_drm_device(&adev->dm);
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
......
......@@ -137,6 +137,13 @@ struct vblank_control_work {
bool enable;
};
struct idle_workqueue {
struct work_struct work;
struct amdgpu_display_manager *dm;
bool enable;
bool running;
};
/**
* struct amdgpu_dm_backlight_caps - Information about backlight
*
......@@ -487,6 +494,7 @@ struct amdgpu_display_manager {
* Deferred work for vblank control events.
*/
struct workqueue_struct *vblank_control_workqueue;
struct idle_workqueue *idle_workqueue;
struct drm_atomic_state *cached_state;
struct dc_state *cached_dc_state;
......@@ -956,4 +964,5 @@ amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
struct drm_crtc *crtc);
int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth);
struct idle_workqueue *idle_create_workqueue(struct amdgpu_device *adev);
#endif /* __AMDGPU_DM_H__ */
......@@ -35,6 +35,9 @@
#include "amdgpu_dm_trace.h"
#include "amdgpu_dm_debugfs.h"
#define HPD_DETECTION_PERIOD_uS 5000000
#define HPD_DETECTION_TIME_uS 1000
void amdgpu_dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc)
{
struct drm_crtc *crtc = &acrtc->base;
......@@ -146,11 +149,65 @@ static void amdgpu_dm_crtc_set_panel_sr_feature(
struct amdgpu_dm_connector *aconn =
(struct amdgpu_dm_connector *) vblank_work->stream->dm_stream_context;
if (!aconn->disallow_edp_enter_psr)
if (!aconn->disallow_edp_enter_psr) {
struct amdgpu_display_manager *dm = vblank_work->dm;
amdgpu_dm_psr_enable(vblank_work->stream);
if (dm->idle_workqueue &&
dm->dc->idle_optimizations_allowed &&
dm->idle_workqueue->enable &&
!dm->idle_workqueue->running)
schedule_work(&dm->idle_workqueue->work);
}
}
}
static void amdgpu_dm_idle_worker(struct work_struct *work)
{
struct idle_workqueue *idle_work;
idle_work = container_of(work, struct idle_workqueue, work);
idle_work->dm->idle_workqueue->running = true;
fsleep(HPD_DETECTION_PERIOD_uS);
mutex_lock(&idle_work->dm->dc_lock);
while (idle_work->enable) {
if (!idle_work->dm->dc->idle_optimizations_allowed)
break;
dc_allow_idle_optimizations(idle_work->dm->dc, false);
mutex_unlock(&idle_work->dm->dc_lock);
fsleep(HPD_DETECTION_TIME_uS);
mutex_lock(&idle_work->dm->dc_lock);
if (!amdgpu_dm_psr_is_active_allowed(idle_work->dm))
break;
dc_allow_idle_optimizations(idle_work->dm->dc, true);
mutex_unlock(&idle_work->dm->dc_lock);
fsleep(HPD_DETECTION_PERIOD_uS);
mutex_lock(&idle_work->dm->dc_lock);
}
mutex_unlock(&idle_work->dm->dc_lock);
idle_work->dm->idle_workqueue->running = false;
}
struct idle_workqueue *idle_create_workqueue(struct amdgpu_device *adev)
{
struct idle_workqueue *idle_work;
idle_work = kzalloc(sizeof(*idle_work), GFP_KERNEL);
if (ZERO_OR_NULL_PTR(idle_work))
return NULL;
idle_work->dm = &adev->dm;
idle_work->enable = false;
idle_work->running = false;
INIT_WORK(&idle_work->work, amdgpu_dm_idle_worker);
return idle_work;
}
static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
{
struct vblank_control_work *vblank_work =
......
......@@ -1261,7 +1261,10 @@ void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz)
void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool enable)
{
/* TODO: add periodic detection implementation */
struct amdgpu_device *adev = ctx->driver_context;
if (adev->dm.idle_workqueue)
adev->dm.idle_workqueue->enable = enable;
}
void dm_helpers_dp_mst_update_branch_bandwidth(
......
......@@ -223,3 +223,31 @@ bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
return dc_set_psr_allow_active(dm->dc, false);
}
/*
* amdgpu_dm_psr_is_active_allowed() - check if psr is allowed on any stream
* @dm: pointer to amdgpu_display_manager
*
* Return: true if allowed
*/
bool amdgpu_dm_psr_is_active_allowed(struct amdgpu_display_manager *dm)
{
unsigned int i;
bool allow_active = false;
for (i = 0; i < dm->dc->current_state->stream_count ; i++) {
struct dc_link *link;
struct dc_stream_state *stream = dm->dc->current_state->streams[i];
link = stream->link;
if (!link)
continue;
if (link->psr_settings.psr_feature_enabled &&
link->psr_settings.psr_allow_active) {
allow_active = true;
break;
}
}
return allow_active;
}
......@@ -36,5 +36,6 @@ void amdgpu_dm_psr_enable(struct dc_stream_state *stream);
bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
bool amdgpu_dm_psr_is_active_allowed(struct amdgpu_display_manager *dm);
#endif /* AMDGPU_DM_AMDGPU_DM_PSR_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment