Commit 1e939ea1 authored by Dillon Varone's avatar Dillon Varone Committed by Alex Deucher

drm/amd/display: Fix various dynamic ODM transitions on DCN32

[Why&How]

Several transitions were fixed that will allow Dynamic ODM and MPO
transitions to be supported on DCN32.

1) Due to resource limitations, in certain scenarios that require an MPO
plane to be split, the features cannot be combined with the current
policy. This is due to unsafe transitions being required (OPP instance
per MPCC being switched on active pipe is not supported by DCN), to
support the split plane with ODM active as it moves across the viewport.
Dynamic ODM will now be disabled when MPO is required.

2) When exiting MPO and re-entering ODM, DC assigns an inactive pipe for
the next ODM pipe, which under previous power gating policy would result
in programming a gated DSC HW block. New policy dynamically
gates/un-gates DSC blocks when Dynamic ODM is active to support

transitions on DCN32 only.

3) Entry and exit from 3 plane MPO and Dynamic ODM requires a minimal
transition so that all pipes which require their MPCC OPP instance to
be changed have a full frame to be disabled before reprogramming. To
solve this, the Dynamic ODM policy now utilizes minimal state
transitions when entering or exiting 3 plane scenarios.

4) Various fixes to DCN32 pipe merge/split algorithm to support Dynamic
ODM and MPO transitions.

In summary, this commit fixes various transitions to support ODM->MPO
and MPO->ODM.
Reviewed-by: default avatarMartin Leung <Martin.Leung@amd.com>
Reviewed-by: default avatarJun Lei <Jun.Lei@amd.com>
Acked-by: default avatarJasdeep Dhillon <jdhillon@amd.com>
Signed-off-by: default avatarDillon Varone <Dillon.Varone@amd.com>
Tested-by: default avatarDaniel Wheeler <daniel.wheeler@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent e32df0c7
......@@ -1750,6 +1750,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
context->stream_count == 0)
dc->hwss.prepare_bandwidth(dc, context);
if (dc->debug.enable_double_buffered_dsc_pg_support)
dc->hwss.update_dsc_pg(dc, context, false);
disable_dangling_plane(dc, context);
/* re-program planes for existing stream, in case we need to
* free up plane resource for later use
......@@ -1840,6 +1843,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc->hwss.optimize_bandwidth(dc, context);
}
if (dc->debug.enable_double_buffered_dsc_pg_support)
dc->hwss.update_dsc_pg(dc, context, true);
if (dc->ctx->dce_version >= DCE_VERSION_MAX)
TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
else
......@@ -2003,6 +2009,9 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
dc->hwss.optimize_bandwidth(dc, context);
if (dc->debug.enable_double_buffered_dsc_pg_support)
dc->hwss.update_dsc_pg(dc, context, true);
dc->optimized_required = false;
dc->wm_optimized_required = false;
}
......@@ -3194,6 +3203,9 @@ static void commit_planes_for_stream(struct dc *dc,
if (get_seamless_boot_stream_count(context) == 0)
dc->hwss.prepare_bandwidth(dc, context);
if (dc->debug.enable_double_buffered_dsc_pg_support)
dc->hwss.update_dsc_pg(dc, context, false);
context_clock_trace(dc, context);
}
......@@ -3517,11 +3529,59 @@ static void commit_planes_for_stream(struct dc *dc,
}
}
/* Determines if the incoming context requires a applying transition state with unnecessary
* pipe splitting and ODM disabled, due to hardware limitations. In a case where
* the OPP associated with an MPCC might change due to plane additions, this function
* returns true.
*/
static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
struct dc_stream_state *stream,
int surface_count,
bool *is_plane_addition)
{
struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
bool force_minimal_pipe_splitting = false;
*is_plane_addition = false;
if (cur_stream_status &&
dc->current_state->stream_count > 0 &&
dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) {
/* determine if minimal transition is required due to MPC*/
if (surface_count > 0) {
if (cur_stream_status->plane_count > surface_count) {
force_minimal_pipe_splitting = true;
} else if (cur_stream_status->plane_count < surface_count) {
force_minimal_pipe_splitting = true;
*is_plane_addition = true;
}
}
}
if (cur_stream_status &&
dc->current_state->stream_count == 1 &&
dc->debug.enable_single_display_2to1_odm_policy) {
/* determine if minimal transition is required due to dynamic ODM*/
if (surface_count > 0) {
if (cur_stream_status->plane_count > 2 && cur_stream_status->plane_count > surface_count) {
force_minimal_pipe_splitting = true;
} else if (surface_count > 2 && cur_stream_status->plane_count < surface_count) {
force_minimal_pipe_splitting = true;
*is_plane_addition = true;
}
}
}
return force_minimal_pipe_splitting;
}
static bool commit_minimal_transition_state(struct dc *dc,
struct dc_state *transition_base_context)
{
struct dc_state *transition_context = dc_create_state(dc);
enum pipe_split_policy tmp_policy;
enum pipe_split_policy tmp_mpc_policy;
bool temp_dynamic_odm_policy;
enum dc_status ret = DC_ERROR_UNEXPECTED;
unsigned int i, j;
......@@ -3529,10 +3589,13 @@ static bool commit_minimal_transition_state(struct dc *dc,
return false;
if (!dc->config.is_vmin_only_asic) {
tmp_policy = dc->debug.pipe_split_policy;
tmp_mpc_policy = dc->debug.pipe_split_policy;
dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
}
temp_dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy;
dc->debug.enable_single_display_2to1_odm_policy = false;
dc_resource_state_copy_construct(transition_base_context, transition_context);
//commit minimal state
......@@ -3553,20 +3616,22 @@ static bool commit_minimal_transition_state(struct dc *dc,
ret = dc_commit_state_no_check(dc, transition_context);
}
//always release as dc_commit_state_no_check retains in good case
/*always release as dc_commit_state_no_check retains in good case*/
dc_release_state(transition_context);
//restore previous pipe split policy
/*restore previous pipe split and odm policy*/
if (!dc->config.is_vmin_only_asic)
dc->debug.pipe_split_policy = tmp_policy;
dc->debug.pipe_split_policy = tmp_mpc_policy;
dc->debug.enable_single_display_2to1_odm_policy = temp_dynamic_odm_policy;
if (ret != DC_OK) {
//this should never happen
/*this should never happen*/
BREAK_TO_DEBUGGER();
return false;
}
//force full surface update
/*force full surface update*/
for (i = 0; i < dc->current_state->stream_count; i++) {
for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
dc->current_state->stream_status[i].plane_states[j]->update_flags.raw = 0xFFFFFFFF;
......@@ -3589,24 +3654,14 @@ bool dc_update_planes_and_stream(struct dc *dc,
* cause underflow. Apply stream configuration with minimal pipe
* split first to avoid unsupported transitions for active pipes.
*/
bool force_minimal_pipe_splitting = false;
bool is_plane_addition = false;
bool force_minimal_pipe_splitting;
bool is_plane_addition;
struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
if (cur_stream_status &&
dc->current_state->stream_count > 0 &&
dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) {
/* determine if minimal transition is required */
if (surface_count > 0) {
if (cur_stream_status->plane_count > surface_count) {
force_minimal_pipe_splitting = true;
} else if (cur_stream_status->plane_count < surface_count) {
force_minimal_pipe_splitting = true;
is_plane_addition = true;
}
}
}
force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes(
dc,
stream,
surface_count,
&is_plane_addition);
/* on plane addition, minimal state is the current one */
if (force_minimal_pipe_splitting && is_plane_addition &&
......@@ -3623,7 +3678,7 @@ bool dc_update_planes_and_stream(struct dc *dc,
&context))
return false;
/* on plane addition, minimal state is the new one */
/* on plane removal, minimal state is the new one */
if (force_minimal_pipe_splitting && !is_plane_addition) {
if (!commit_minimal_transition_state(dc, context)) {
dc_release_state(context);
......
......@@ -850,6 +850,7 @@ struct dc_debug_options {
bool use_legacy_soc_bb_mechanism;
bool exit_idle_opt_for_cursor_updates;
bool enable_single_display_2to1_odm_policy;
bool enable_double_buffered_dsc_pg_support;
bool enable_dp_dig_pixel_rate_div_policy;
enum lttpr_mode lttpr_mode_override;
};
......
......@@ -1364,3 +1364,57 @@ void dcn32_update_phantom_vp_position(struct dc *dc,
}
}
}
bool dcn32_dsc_pg_status(
struct dce_hwseq *hws,
unsigned int dsc_inst)
{
uint32_t pwr_status = 0;
switch (dsc_inst) {
case 0: /* DSC0 */
REG_GET(DOMAIN16_PG_STATUS,
DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
break;
case 1: /* DSC1 */
REG_GET(DOMAIN17_PG_STATUS,
DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
break;
case 2: /* DSC2 */
REG_GET(DOMAIN18_PG_STATUS,
DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
break;
case 3: /* DSC3 */
REG_GET(DOMAIN19_PG_STATUS,
DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
break;
default:
BREAK_TO_DEBUGGER();
break;
}
return pwr_status == 0 ? true : false;
}
void dcn32_update_dsc_pg(struct dc *dc,
struct dc_state *context,
bool safe_to_disable)
{
struct dce_hwseq *hws = dc->hwseq;
for (int i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
struct display_stream_compressor *dsc = dc->res_pool->dscs[i];
bool is_dsc_ungated = hws->funcs.dsc_pg_status(hws, dsc->inst);
if (context->res_ctx.is_dsc_acquired[i]) {
if (!is_dsc_ungated) {
hws->funcs.dsc_pg_control(hws, dsc->inst, true);
}
} else if (safe_to_disable) {
if (is_dsc_ungated) {
hws->funcs.dsc_pg_control(hws, dsc->inst, false);
}
}
}
}
......@@ -92,4 +92,12 @@ void dcn32_update_phantom_vp_position(struct dc *dc,
struct dc_state *context,
struct pipe_ctx *phantom_pipe);
bool dcn32_dsc_pg_status(
struct dce_hwseq *hws,
unsigned int dsc_inst);
void dcn32_update_dsc_pg(struct dc *dc,
struct dc_state *context,
bool safe_to_disable);
#endif /* __DC_HWSS_DCN32_H__ */
......@@ -109,6 +109,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.subvp_pipe_control_lock = dcn32_subvp_pipe_control_lock,
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
.update_phantom_vp_position = dcn32_update_phantom_vp_position,
.update_dsc_pg = dcn32_update_dsc_pg,
};
static const struct hwseq_private_funcs dcn32_private_funcs = {
......@@ -138,6 +139,7 @@ static const struct hwseq_private_funcs dcn32_private_funcs = {
.program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
.update_odm = dcn32_update_odm,
.dsc_pg_control = dcn32_dsc_pg_control,
.dsc_pg_status = dcn32_dsc_pg_status,
.set_hdr_multiplier = dcn10_set_hdr_multiplier,
.verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high,
.wait_for_blank_complete = dcn20_wait_for_blank_complete,
......
......@@ -718,6 +718,9 @@ static const struct dc_debug_options debug_defaults_drv = {
.force_disable_subvp = false,
.exit_idle_opt_for_cursor_updates = true,
.enable_single_display_2to1_odm_policy = true,
/* Must match enable_single_display_2to1_odm_policy to support dynamic ODM transitions*/
.enable_double_buffered_dsc_pg_support = true,
.enable_dp_dig_pixel_rate_div_policy = 1,
.allow_sw_cursor_fallback = false,
.alloc_extra_way_for_cursor = true,
......@@ -1846,7 +1849,7 @@ int dcn32_populate_dml_pipes_from_context(
struct resource_context *res_ctx = &context->res_ctx;
struct pipe_ctx *pipe;
bool subvp_in_use = false;
int plane_count = 0;
uint8_t is_pipe_split_expected[MAX_PIPES] = {0};
struct dc_crtc_timing *timing;
dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
......@@ -1865,12 +1868,12 @@ int dcn32_populate_dml_pipes_from_context(
timing = &pipe->stream->timing;
pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
if (context->stream_count == 1 && !dc_is_hdmi_signal(res_ctx->pipe_ctx[i].stream->signal) &&
is_h_timing_divisible_by_2(res_ctx->pipe_ctx[i].stream)) {
if (dc->debug.enable_single_display_2to1_odm_policy) {
if (!((plane_count > 2) && pipe->top_pipe))
pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
}
if (context->stream_count == 1 &&
context->stream_status[0].plane_count <= 1 &&
!dc_is_hdmi_signal(res_ctx->pipe_ctx[i].stream->signal) &&
is_h_timing_divisible_by_2(res_ctx->pipe_ctx[i].stream) &&
dc->debug.enable_single_display_2to1_odm_policy) {
pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
}
pipe_cnt++;
}
......@@ -1927,12 +1930,10 @@ int dcn32_populate_dml_pipes_from_context(
}
}
/* Calculate the number of planes we have so we can determine
* whether to apply ODM 2to1 policy or not
*/
if (pipe->stream && !pipe->prev_odm_pipe &&
(!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state))
++plane_count;
DC_FP_START();
is_pipe_split_expected[i] = dcn32_predict_pipe_split(context, &pipes[pipe_cnt]);
DC_FP_END();
pipe_cnt++;
}
......
......@@ -718,6 +718,9 @@ static const struct dc_debug_options debug_defaults_drv = {
.force_disable_subvp = false,
.exit_idle_opt_for_cursor_updates = true,
.enable_single_display_2to1_odm_policy = true,
/*must match enable_single_display_2to1_odm_policy to support dynamic ODM transitions*/
.enable_double_buffered_dsc_pg_support = true,
.enable_dp_dig_pixel_rate_div_policy = 1,
.allow_sw_cursor_fallback = false,
.alloc_extra_way_for_cursor = true,
......
......@@ -1120,7 +1120,9 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
dc->debug.force_subvp_mclk_switch)) {
dcn32_merge_pipes_for_subvp(dc, context);
// to re-initialize viewport after the pipe merge
memset(merge, 0, MAX_PIPES * sizeof(bool));
/* to re-initialize viewport after the pipe merge */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
......@@ -1589,6 +1591,28 @@ bool dcn32_internal_validate_bw(struct dc *dc,
if (pipe->next_odm_pipe)
pipe->next_odm_pipe->prev_odm_pipe = pipe->prev_odm_pipe;
/*2:1ODM+MPC Split MPO to Single Pipe + MPC Split MPO*/
if (pipe->bottom_pipe) {
if (pipe->bottom_pipe->prev_odm_pipe || pipe->bottom_pipe->next_odm_pipe) {
/*MPC split rules will handle this case*/
pipe->bottom_pipe->top_pipe = NULL;
} else {
if (pipe->prev_odm_pipe->bottom_pipe) {
/* 3 plane MPO*/
pipe->bottom_pipe->top_pipe = pipe->prev_odm_pipe->bottom_pipe;
pipe->prev_odm_pipe->bottom_pipe->bottom_pipe = pipe->bottom_pipe;
} else {
/* 2 plane MPO*/
pipe->bottom_pipe->top_pipe = pipe->prev_odm_pipe;
pipe->prev_odm_pipe->bottom_pipe = pipe->bottom_pipe;
}
}
}
if (pipe->top_pipe) {
pipe->top_pipe->bottom_pipe = NULL;
}
pipe->bottom_pipe = NULL;
pipe->next_odm_pipe = NULL;
pipe->plane_state = NULL;
......
......@@ -84,6 +84,7 @@ struct hw_sequencer_funcs {
struct pipe_ctx *pipe_ctx, bool enableTripleBuffer);
void (*update_pending_status)(struct pipe_ctx *pipe_ctx);
void (*power_down)(struct dc *dc);
void (*update_dsc_pg)(struct dc *dc, struct dc_state *context, bool safe_to_disable);
/* Pipe Lock Related */
void (*pipe_control_lock)(struct dc *dc,
......
......@@ -124,6 +124,8 @@ struct hwseq_private_funcs {
void (*dsc_pg_control)(struct dce_hwseq *hws,
unsigned int dsc_inst,
bool power_on);
bool (*dsc_pg_status)(struct dce_hwseq *hws,
unsigned int dsc_inst);
void (*update_odm)(struct dc *dc, struct dc_state *context,
struct pipe_ctx *pipe_ctx);
void (*program_all_writeback_pipes_in_tree)(struct dc *dc,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment