Commit 1d65bd6b authored by Dave Airlie's avatar Dave Airlie

Merge tag 'amd-drm-fixes-6.2-2023-01-25' of...

Merge tag 'amd-drm-fixes-6.2-2023-01-25' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes

amd-drm-fixes-6.2-2023-01-25:

amdgpu:
- GC11.x fixes
- SMU13.0.0 fix
- Freesync video fix
- DP MST fixes

drm:
- DP MST kref fix
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230125220153.320248-1-alexander.deucher@amd.com
parents af0af908 4b069553
...@@ -35,6 +35,7 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_0_imu.bin"); ...@@ -35,6 +35,7 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_0_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_1_imu.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_1_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_2_imu.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_2_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_3_imu.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_3_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_4_imu.bin");
static int imu_v11_0_init_microcode(struct amdgpu_device *adev) static int imu_v11_0_init_microcode(struct amdgpu_device *adev)
{ {
......
...@@ -40,6 +40,8 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_2_mes.bin"); ...@@ -40,6 +40,8 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_2_mes.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_2_mes1.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_2_mes1.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_3_mes.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_3_mes.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_3_mes1.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_3_mes1.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_4_mes.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_4_mes1.bin");
static int mes_v11_0_hw_fini(void *handle); static int mes_v11_0_hw_fini(void *handle);
static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev); static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev);
...@@ -196,7 +198,6 @@ static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes, ...@@ -196,7 +198,6 @@ static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes,
mes_add_queue_pkt.trap_handler_addr = input->tba_addr; mes_add_queue_pkt.trap_handler_addr = input->tba_addr;
mes_add_queue_pkt.tma_addr = input->tma_addr; mes_add_queue_pkt.tma_addr = input->tma_addr;
mes_add_queue_pkt.is_kfd_process = input->is_kfd_process; mes_add_queue_pkt.is_kfd_process = input->is_kfd_process;
mes_add_queue_pkt.trap_en = 1;
/* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */ /* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */
mes_add_queue_pkt.is_aql_queue = input->is_aql_queue; mes_add_queue_pkt.is_aql_queue = input->is_aql_queue;
......
...@@ -8881,6 +8881,13 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, ...@@ -8881,6 +8881,13 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
if (!dm_old_crtc_state->stream) if (!dm_old_crtc_state->stream)
goto skip_modeset; goto skip_modeset;
/* Unset freesync video if it was active before */
if (dm_old_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) {
dm_new_crtc_state->freesync_config.state = VRR_STATE_INACTIVE;
dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = 0;
}
/* Now check if we should set freesync video mode */
if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream && if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
is_timing_unchanged_for_freesync(new_crtc_state, is_timing_unchanged_for_freesync(new_crtc_state,
old_crtc_state)) { old_crtc_state)) {
...@@ -9490,6 +9497,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, ...@@ -9490,6 +9497,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
struct drm_connector_state *old_con_state, *new_con_state; struct drm_connector_state *old_con_state, *new_con_state;
struct drm_crtc *crtc; struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state; struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_dp_mst_topology_mgr *mgr;
struct drm_dp_mst_topology_state *mst_state;
struct drm_plane *plane; struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state; struct drm_plane_state *old_plane_state, *new_plane_state;
enum dc_status status; enum dc_status status;
...@@ -9745,6 +9754,28 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, ...@@ -9745,6 +9754,28 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
lock_and_validation_needed = true; lock_and_validation_needed = true;
} }
#if defined(CONFIG_DRM_AMD_DC_DCN)
/* set the slot info for each mst_state based on the link encoding format */
for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
struct drm_connector_list_iter iter;
u8 link_coding_cap;
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
if (connector->index == mst_state->mgr->conn_base_id) {
aconnector = to_amdgpu_dm_connector(connector);
link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
drm_dp_mst_update_slots(mst_state, link_coding_cap);
break;
}
}
drm_connector_list_iter_end(&iter);
}
#endif
/** /**
* Streams and planes are reset when there are changes that affect * Streams and planes are reset when there are changes that affect
* bandwidth. Anything that affects bandwidth needs to go through * bandwidth. Anything that affects bandwidth needs to go through
......
...@@ -120,23 +120,50 @@ enum dc_edid_status dm_helpers_parse_edid_caps( ...@@ -120,23 +120,50 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
} }
static void static void
fill_dc_mst_payload_table_from_drm(struct drm_dp_mst_topology_state *mst_state, fill_dc_mst_payload_table_from_drm(struct dc_link *link,
struct amdgpu_dm_connector *aconnector, bool enable,
struct drm_dp_mst_atomic_payload *target_payload,
struct dc_dp_mst_stream_allocation_table *table) struct dc_dp_mst_stream_allocation_table *table)
{ {
struct dc_dp_mst_stream_allocation_table new_table = { 0 }; struct dc_dp_mst_stream_allocation_table new_table = { 0 };
struct dc_dp_mst_stream_allocation *sa; struct dc_dp_mst_stream_allocation *sa;
struct drm_dp_mst_atomic_payload *payload; struct link_mst_stream_allocation_table copy_of_link_table =
link->mst_stream_alloc_table;
int i;
int current_hw_table_stream_cnt = copy_of_link_table.stream_count;
struct link_mst_stream_allocation *dc_alloc;
/* TODO: refactor to set link->mst_stream_alloc_table directly if possible.*/
if (enable) {
dc_alloc =
&copy_of_link_table.stream_allocations[current_hw_table_stream_cnt];
dc_alloc->vcp_id = target_payload->vcpi;
dc_alloc->slot_count = target_payload->time_slots;
} else {
for (i = 0; i < copy_of_link_table.stream_count; i++) {
dc_alloc =
&copy_of_link_table.stream_allocations[i];
if (dc_alloc->vcp_id == target_payload->vcpi) {
dc_alloc->vcp_id = 0;
dc_alloc->slot_count = 0;
break;
}
}
ASSERT(i != copy_of_link_table.stream_count);
}
/* Fill payload info*/ /* Fill payload info*/
list_for_each_entry(payload, &mst_state->payloads, next) { for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
if (payload->delete) dc_alloc =
continue; &copy_of_link_table.stream_allocations[i];
if (dc_alloc->vcp_id > 0 && dc_alloc->slot_count > 0) {
sa = &new_table.stream_allocations[new_table.stream_count]; sa = &new_table.stream_allocations[new_table.stream_count];
sa->slot_count = payload->time_slots; sa->slot_count = dc_alloc->slot_count;
sa->vcp_id = payload->vcpi; sa->vcp_id = dc_alloc->vcp_id;
new_table.stream_count++; new_table.stream_count++;
}
} }
/* Overwrite the old table */ /* Overwrite the old table */
...@@ -185,7 +212,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( ...@@ -185,7 +212,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
* AUX message. The sequence is slot 1-63 allocated sequence for each * AUX message. The sequence is slot 1-63 allocated sequence for each
* stream. AMD ASIC stream slot allocation should follow the same * stream. AMD ASIC stream slot allocation should follow the same
* sequence. copy DRM MST allocation to dc */ * sequence. copy DRM MST allocation to dc */
fill_dc_mst_payload_table_from_drm(mst_state, aconnector, proposed_table); fill_dc_mst_payload_table_from_drm(stream->link, enable, payload, proposed_table);
return true; return true;
} }
......
...@@ -903,11 +903,6 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, ...@@ -903,11 +903,6 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
if (IS_ERR(mst_state)) if (IS_ERR(mst_state))
return PTR_ERR(mst_state); return PTR_ERR(mst_state);
mst_state->pbn_div = dm_mst_get_pbn_divider(dc_link);
#if defined(CONFIG_DRM_AMD_DC_DCN)
drm_dp_mst_update_slots(mst_state, dc_link_dp_mst_decide_link_encoding_format(dc_link));
#endif
/* Set up params */ /* Set up params */
for (i = 0; i < dc_state->stream_count; i++) { for (i = 0; i < dc_state->stream_count; i++) {
struct dc_dsc_policy dsc_policy = {0}; struct dc_dsc_policy dsc_policy = {0};
......
...@@ -3995,10 +3995,13 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) ...@@ -3995,10 +3995,13 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0); struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0);
int i; int i;
bool mst_mode = (link->type == dc_connection_mst_branch); bool mst_mode = (link->type == dc_connection_mst_branch);
/* adjust for drm changes*/
bool update_drm_mst_state = true;
const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
const struct dc_link_settings empty_link_settings = {0}; const struct dc_link_settings empty_link_settings = {0};
DC_LOGGER_INIT(link->ctx->logger); DC_LOGGER_INIT(link->ctx->logger);
/* deallocate_mst_payload is called before disable link. When mode or /* deallocate_mst_payload is called before disable link. When mode or
* disable/enable monitor, new stream is created which is not in link * disable/enable monitor, new stream is created which is not in link
* stream[] yet. For this, payload is not allocated yet, so de-alloc * stream[] yet. For this, payload is not allocated yet, so de-alloc
...@@ -4014,7 +4017,7 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) ...@@ -4014,7 +4017,7 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
&empty_link_settings, &empty_link_settings,
avg_time_slots_per_mtp); avg_time_slots_per_mtp);
if (mst_mode) { if (mst_mode || update_drm_mst_state) {
/* when link is in mst mode, reply on mst manager to remove /* when link is in mst mode, reply on mst manager to remove
* payload * payload
*/ */
...@@ -4077,11 +4080,18 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) ...@@ -4077,11 +4080,18 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
stream->ctx, stream->ctx,
stream); stream);
if (!update_drm_mst_state)
dm_helpers_dp_mst_send_payload_allocation(
stream->ctx,
stream,
false);
}
if (update_drm_mst_state)
dm_helpers_dp_mst_send_payload_allocation( dm_helpers_dp_mst_send_payload_allocation(
stream->ctx, stream->ctx,
stream, stream,
false); false);
}
return DC_OK; return DC_OK;
} }
......
...@@ -145,6 +145,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] = ...@@ -145,6 +145,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(SetBadMemoryPagesRetiredFlagsPerChannel, MSG_MAP(SetBadMemoryPagesRetiredFlagsPerChannel,
PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel, 0), PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel, 0),
MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0), MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0),
MSG_MAP(AllowIHHostInterrupt, PPSMC_MSG_AllowIHHostInterrupt, 0),
}; };
static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = { static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = {
......
...@@ -3372,6 +3372,9 @@ void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr, ...@@ -3372,6 +3372,9 @@ void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr,
mgr->payload_count--; mgr->payload_count--;
mgr->next_start_slot -= payload->time_slots; mgr->next_start_slot -= payload->time_slots;
if (payload->delete)
drm_dp_mst_put_port_malloc(payload->port);
} }
EXPORT_SYMBOL(drm_dp_remove_payload); EXPORT_SYMBOL(drm_dp_remove_payload);
...@@ -4327,7 +4330,6 @@ int drm_dp_atomic_release_time_slots(struct drm_atomic_state *state, ...@@ -4327,7 +4330,6 @@ int drm_dp_atomic_release_time_slots(struct drm_atomic_state *state,
drm_dbg_atomic(mgr->dev, "[MST PORT:%p] TU %d -> 0\n", port, payload->time_slots); drm_dbg_atomic(mgr->dev, "[MST PORT:%p] TU %d -> 0\n", port, payload->time_slots);
if (!payload->delete) { if (!payload->delete) {
drm_dp_mst_put_port_malloc(port);
payload->pbn = 0; payload->pbn = 0;
payload->delete = true; payload->delete = true;
topology_state->payload_mask &= ~BIT(payload->vcpi - 1); topology_state->payload_mask &= ~BIT(payload->vcpi - 1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment