Commit 73b8a388 authored by Sung Joon Kim's avatar Sung Joon Kim Committed by Alex Deucher

drm/amd/display: Choose HUBP unbounded request based on DML output

[why]
Previously, we decide on the unbounded request
purely based on pipe_cnt which is a wrong variable
to use to determine how many pipes are in "use".
DML already accounts for number of pipes in use
along with other various factors and is a more reliable
method of determination.

[how]
Use UnboundedRequestEnabledThisState to decide
on unbounbded_req_enabled.
Reviewed-by: default avatarNicholas Kazlauskas <nicholas.kazlauskas@amd.com>
Signed-off-by: default avatarJerry Zuo <jerry.zuo@amd.com>
Signed-off-by: default avatarSung Joon Kim <sungjoon.kim@amd.com>
Tested-by: default avatarDaniel Wheeler <daniel.wheeler@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 032831f2
...@@ -288,7 +288,6 @@ void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *cont ...@@ -288,7 +288,6 @@ void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *cont
{ {
unsigned int dc_pipe_ctx_index, dml_pipe_idx, plane_id; unsigned int dc_pipe_ctx_index, dml_pipe_idx, plane_id;
enum mall_stream_type pipe_mall_type; enum mall_stream_type pipe_mall_type;
bool unbounded_req_enabled = false;
struct dml2_calculate_rq_and_dlg_params_scratch *s = &in_ctx->v20.scratch.calculate_rq_and_dlg_params_scratch; struct dml2_calculate_rq_and_dlg_params_scratch *s = &in_ctx->v20.scratch.calculate_rq_and_dlg_params_scratch;
context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = (unsigned int)in_ctx->v20.dml_core_ctx.mp.DCFCLKDeepSleep * 1000; context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = (unsigned int)in_ctx->v20.dml_core_ctx.mp.DCFCLKDeepSleep * 1000;
...@@ -302,14 +301,6 @@ void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *cont ...@@ -302,14 +301,6 @@ void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *cont
if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz) if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz)
context->bw_ctx.bw.dcn.clk.dispclk_khz = dc->debug.min_disp_clk_khz; context->bw_ctx.bw.dcn.clk.dispclk_khz = dc->debug.min_disp_clk_khz;
unbounded_req_enabled = in_ctx->v20.dml_core_ctx.ms.UnboundedRequestEnabledThisState;
if (unbounded_req_enabled && pipe_cnt > 1) {
// Unbounded requesting should not ever be used when more than 1 pipe is enabled.
//ASSERT(false);
unbounded_req_enabled = false;
}
context->bw_ctx.bw.dcn.compbuf_size_kb = in_ctx->v20.dml_core_ctx.ip.config_return_buffer_size_in_kbytes; context->bw_ctx.bw.dcn.compbuf_size_kb = in_ctx->v20.dml_core_ctx.ip.config_return_buffer_size_in_kbytes;
for (dc_pipe_ctx_index = 0; dc_pipe_ctx_index < pipe_cnt; dc_pipe_ctx_index++) { for (dc_pipe_ctx_index = 0; dc_pipe_ctx_index < pipe_cnt; dc_pipe_ctx_index++) {
...@@ -344,7 +335,8 @@ void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *cont ...@@ -344,7 +335,8 @@ void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *cont
context->res_ctx.pipe_ctx[dc_pipe_ctx_index].unbounded_req = false; context->res_ctx.pipe_ctx[dc_pipe_ctx_index].unbounded_req = false;
} else { } else {
context->res_ctx.pipe_ctx[dc_pipe_ctx_index].det_buffer_size_kb = dml_get_det_buffer_size_kbytes(&context->bw_ctx.dml2->v20.dml_core_ctx, dml_pipe_idx); context->res_ctx.pipe_ctx[dc_pipe_ctx_index].det_buffer_size_kb = dml_get_det_buffer_size_kbytes(&context->bw_ctx.dml2->v20.dml_core_ctx, dml_pipe_idx);
context->res_ctx.pipe_ctx[dc_pipe_ctx_index].unbounded_req = unbounded_req_enabled; // Unbounded requesting should not ever be used when more than 1 pipe is enabled.
context->res_ctx.pipe_ctx[dc_pipe_ctx_index].unbounded_req = in_ctx->v20.dml_core_ctx.ms.UnboundedRequestEnabledThisState;
} }
context->bw_ctx.bw.dcn.compbuf_size_kb -= context->res_ctx.pipe_ctx[dc_pipe_ctx_index].det_buffer_size_kb; context->bw_ctx.bw.dcn.compbuf_size_kb -= context->res_ctx.pipe_ctx[dc_pipe_ctx_index].det_buffer_size_kb;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment