Commit ec5fa9fc authored by Wayne Lin's avatar Wayne Lin Committed by Alex Deucher

drm/amd/display: Adjust the MST resume flow

[Why]
In drm_dp_mst_topology_mgr_resume() today, it will resume the
mst branch to be ready handling mst mode and also consecutively do
the mst topology probing. Which will cause the dirver have chance
to fire hotplug event before restoring the old state. Then Userspace
will react to the hotplug event based on a wrong state.

[How]
Adjust the mst resume flow as:
1. set dpcd to resume mst branch status
2. restore source old state
3. Do mst resume topology probing

For drm_dp_mst_topology_mgr_resume(), it's better to adjust it to
pull out topology probing work into a 2nd part procedure of the mst
resume. Will have a follow up patch in drm.
Reviewed-by: default avatarChao-kai Wang <stylon.wang@amd.com>
Cc: Mario Limonciello <mario.limonciello@amd.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: stable@vger.kernel.org
Acked-by: default avatarStylon Wang <stylon.wang@amd.com>
Signed-off-by: default avatarWayne Lin <wayne.lin@amd.com>
Tested-by: default avatarDaniel Wheeler <daniel.wheeler@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent ffd6bde3
......@@ -2340,14 +2340,62 @@ static int dm_late_init(void *handle)
return detect_mst_link_for_all_connectors(adev_to_drm(adev));
}
static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr)
{
int ret;
u8 guid[16];
u64 tmp64;
mutex_lock(&mgr->lock);
if (!mgr->mst_primary)
goto out_fail;
if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) {
drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
goto out_fail;
}
ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
DP_MST_EN |
DP_UP_REQ_EN |
DP_UPSTREAM_IS_SRC);
if (ret < 0) {
drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n");
goto out_fail;
}
/* Some hubs forget their guids after they resume */
ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
if (ret != 16) {
drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
goto out_fail;
}
if (memchr_inv(guid, 0, 16) == NULL) {
tmp64 = get_jiffies_64();
memcpy(&guid[0], &tmp64, sizeof(u64));
memcpy(&guid[8], &tmp64, sizeof(u64));
ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, guid, 16);
if (ret != 16) {
drm_dbg_kms(mgr->dev, "check mstb guid failed - undocked during suspend?\n");
goto out_fail;
}
}
memcpy(mgr->mst_primary->guid, guid, 16);
out_fail:
mutex_unlock(&mgr->lock);
}
static void s3_handle_mst(struct drm_device *dev, bool suspend)
{
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
struct drm_connector_list_iter iter;
struct drm_dp_mst_topology_mgr *mgr;
int ret;
bool need_hotplug = false;
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
......@@ -2369,18 +2417,15 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
if (!dp_is_lttpr_present(aconnector->dc_link))
try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
ret = drm_dp_mst_topology_mgr_resume(mgr, true);
if (ret < 0) {
dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
aconnector->dc_link);
need_hotplug = true;
}
/* TODO: move resume_mst_branch_status() into drm mst resume again
* once topology probing work is pulled out from mst resume into mst
* resume 2nd step. mst resume 2nd step should be called after old
* state getting restored (i.e. drm_atomic_helper_resume()).
*/
resume_mst_branch_status(mgr);
}
}
drm_connector_list_iter_end(&iter);
if (need_hotplug)
drm_kms_helper_hotplug_event(dev);
}
static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
......@@ -2774,7 +2819,8 @@ static int dm_resume(void *handle)
struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
enum dc_connection_type new_connection_type = dc_connection_none;
struct dc_state *dc_state;
int i, r, j;
int i, r, j, ret;
bool need_hotplug = false;
if (amdgpu_in_reset(adev)) {
dc_state = dm->cached_dc_state;
......@@ -2872,7 +2918,7 @@ static int dm_resume(void *handle)
continue;
/*
* this is the case when traversing through already created
* this is the case when traversing through already created end sink
* MST connectors, should be skipped
*/
if (aconnector && aconnector->mst_root)
......@@ -2932,6 +2978,27 @@ static int dm_resume(void *handle)
dm->cached_state = NULL;
/* Do mst topology probing after resuming cached state*/
drm_connector_list_iter_begin(ddev, &iter);
drm_for_each_connector_iter(connector, &iter) {
aconnector = to_amdgpu_dm_connector(connector);
if (aconnector->dc_link->type != dc_connection_mst_branch ||
aconnector->mst_root)
continue;
ret = drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr, true);
if (ret < 0) {
dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
aconnector->dc_link);
need_hotplug = true;
}
}
drm_connector_list_iter_end(&iter);
if (need_hotplug)
drm_kms_helper_hotplug_event(ddev);
amdgpu_dm_irq_resume_late(adev);
amdgpu_dm_smu_write_watermarks_table(adev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment