Commit e2e0a1dc authored by Dmytro Laktyushkin's avatar Dmytro Laktyushkin Committed by Alex Deucher

drm/amd/display: move clock programming from set_bandwidth to dccg

This change moves dcn clock programming(with exception of dispclk)
into dccg. This should have no functional effect.
Signed-off-by: default avatarDmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Reviewed-by: default avatarTony Cheng <Tony.Cheng@amd.com>
Acked-by: default avatarHarry Wentland <harry.wentland@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 6ca11246
......@@ -997,7 +997,7 @@ bool dcn_validate_bandwidth(
}
context->bw.dcn.calc_clk.dppclk_khz = context->bw.dcn.calc_clk.dispclk_khz / v->dispclk_dppclk_ratio;
context->bw.dcn.calc_clk.phyclk_khz = v->phyclk_per_state[v->voltage_level];
switch (v->voltage_level) {
case 0:
context->bw.dcn.calc_clk.max_supported_dppclk_khz =
......
......@@ -523,14 +523,18 @@ static void dce_clock_read_ss_info(struct dce_dccg *clk_dce)
}
}
static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_clk)
{
return ((safe_to_lower && calc_clk < cur_clk) || calc_clk > cur_clk);
}
static void dce12_update_clocks(struct dccg *dccg,
struct dc_clocks *new_clocks,
bool safe_to_lower)
{
struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
if ((new_clocks->dispclk_khz < dccg->clks.dispclk_khz && safe_to_lower)
|| new_clocks->dispclk_khz > dccg->clks.dispclk_khz) {
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
clock_voltage_req.clocks_in_khz = new_clocks->dispclk_khz;
dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
......@@ -539,8 +543,7 @@ static void dce12_update_clocks(struct dccg *dccg,
dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
}
if ((new_clocks->phyclk_khz < dccg->clks.phyclk_khz && safe_to_lower)
|| new_clocks->phyclk_khz > dccg->clks.phyclk_khz) {
if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, dccg->clks.phyclk_khz)) {
clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK;
clock_voltage_req.clocks_in_khz = new_clocks->phyclk_khz;
dccg->clks.phyclk_khz = new_clocks->phyclk_khz;
......@@ -553,6 +556,11 @@ static void dcn_update_clocks(struct dccg *dccg,
struct dc_clocks *new_clocks,
bool safe_to_lower)
{
struct dc *dc = dccg->ctx->dc;
struct pp_smu_display_requirement_rv *smu_req_cur =
&dc->res_pool->pp_smu_req;
struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
bool send_request_to_increase = false;
bool send_request_to_lower = false;
......@@ -566,17 +574,14 @@ static void dcn_update_clocks(struct dccg *dccg,
#ifdef CONFIG_DRM_AMD_DC_DCN1_0
if (send_request_to_increase
) {
struct dc *core_dc = dccg->ctx->dc;
/*use dcfclk to request voltage*/
clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(core_dc, new_clocks);
clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
}
#endif
if ((new_clocks->dispclk_khz < dccg->clks.dispclk_khz && safe_to_lower)
|| new_clocks->dispclk_khz > dccg->clks.dispclk_khz) {
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
clock_voltage_req.clocks_in_khz = new_clocks->dispclk_khz;
/* TODO: ramp up - dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);*/
......@@ -586,8 +591,7 @@ static void dcn_update_clocks(struct dccg *dccg,
send_request_to_lower = true;
}
if ((new_clocks->phyclk_khz < dccg->clks.phyclk_khz && safe_to_lower)
|| new_clocks->phyclk_khz > dccg->clks.phyclk_khz) {
if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, dccg->clks.phyclk_khz)) {
dccg->clks.phyclk_khz = new_clocks->phyclk_khz;
clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK;
clock_voltage_req.clocks_in_khz = new_clocks->phyclk_khz;
......@@ -596,36 +600,50 @@ static void dcn_update_clocks(struct dccg *dccg,
send_request_to_lower = true;
}
if ((new_clocks->fclk_khz < dccg->clks.fclk_khz && safe_to_lower)
|| new_clocks->fclk_khz > dccg->clks.fclk_khz) {
if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, dccg->clks.fclk_khz)) {
dccg->clks.phyclk_khz = new_clocks->fclk_khz;
clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_FCLK;
clock_voltage_req.clocks_in_khz = new_clocks->fclk_khz;
smu_req.hard_min_fclk_khz = new_clocks->fclk_khz;
dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
send_request_to_lower = true;
}
if ((new_clocks->dcfclk_khz < dccg->clks.dcfclk_khz && safe_to_lower)
|| new_clocks->dcfclk_khz > dccg->clks.dcfclk_khz) {
if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, dccg->clks.dcfclk_khz)) {
dccg->clks.phyclk_khz = new_clocks->dcfclk_khz;
clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
clock_voltage_req.clocks_in_khz = new_clocks->dcfclk_khz;
smu_req.hard_min_dcefclk_khz = new_clocks->dcfclk_khz;
send_request_to_lower = true;
}
if (should_set_clock(safe_to_lower,
new_clocks->dcfclk_deep_sleep_khz, dccg->clks.dcfclk_deep_sleep_khz)) {
dccg->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
smu_req.min_deep_sleep_dcefclk_mhz = new_clocks->dcfclk_deep_sleep_khz;
}
#ifdef CONFIG_DRM_AMD_DC_DCN1_0
if (!send_request_to_increase && send_request_to_lower
) {
struct dc *core_dc = dccg->ctx->dc;
/*use dcfclk to request voltage*/
clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(core_dc, new_clocks);
clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
}
#endif
if (new_clocks->phyclk_khz)
smu_req.display_count = 1;
else
smu_req.display_count = 0;
if (pp_smu->set_display_requirement)
pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
*smu_req_cur = smu_req;
}
static void dce_update_clocks(struct dccg *dccg,
......@@ -642,8 +660,7 @@ static void dce_update_clocks(struct dccg *dccg,
dccg->cur_min_clks_state = level_change_req.power_level;
}
if ((new_clocks->dispclk_khz < dccg->clks.dispclk_khz && safe_to_lower)
|| new_clocks->dispclk_khz > dccg->clks.dispclk_khz) {
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
}
......
......@@ -2153,11 +2153,11 @@ static void dcn10_pplib_apply_display_requirements(
{
struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
pp_display_cfg->min_engine_clock_khz = context->bw.dcn.cur_clk.dcfclk_khz;
pp_display_cfg->min_memory_clock_khz = context->bw.dcn.cur_clk.fclk_khz;
pp_display_cfg->min_engine_clock_deep_sleep_khz = context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz;
pp_display_cfg->min_dcfc_deep_sleep_clock_khz = context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz;
pp_display_cfg->min_dcfclock_khz = context->bw.dcn.cur_clk.dcfclk_khz;
pp_display_cfg->min_engine_clock_khz = dc->res_pool->dccg->clks.dcfclk_khz;
pp_display_cfg->min_memory_clock_khz = dc->res_pool->dccg->clks.fclk_khz;
pp_display_cfg->min_engine_clock_deep_sleep_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz;
pp_display_cfg->min_dcfc_deep_sleep_clock_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz;
pp_display_cfg->min_dcfclock_khz = dc->res_pool->dccg->clks.dcfclk_khz;
pp_display_cfg->disp_clk_khz = context->bw.dcn.cur_clk.dispclk_khz;
dce110_fill_display_configs(context, pp_display_cfg);
......@@ -2361,11 +2361,6 @@ static void dcn10_apply_ctx_for_surface(
*/
}
static inline bool should_set_clock(bool decrease_allowed, int calc_clk, int cur_clk)
{
return ((decrease_allowed && calc_clk < cur_clk) || calc_clk > cur_clk);
}
static int determine_dppclk_threshold(struct dc *dc, struct dc_state *context)
{
bool request_dpp_div = context->bw.dcn.calc_clk.dispclk_khz >
......@@ -2456,16 +2451,16 @@ static void ramp_up_dispclk_with_dpp(struct dc *dc, struct dc_state *context)
context->bw.dcn.calc_clk.max_supported_dppclk_khz;
}
static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_clk)
{
return ((safe_to_lower && calc_clk < cur_clk) || calc_clk > cur_clk);
}
static void dcn10_set_bandwidth(
struct dc *dc,
struct dc_state *context,
bool decrease_allowed)
{
struct pp_smu_display_requirement_rv *smu_req_cur =
&dc->res_pool->pp_smu_req;
struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
if (dc->debug.sanity_checks) {
dcn10_verify_allow_pstate_change_high(dc);
}
......@@ -2473,45 +2468,14 @@ static void dcn10_set_bandwidth(
if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
return;
if (context->stream_count == 0)
context->bw.dcn.calc_clk.phyclk_khz = 0;
dc->res_pool->dccg->funcs->update_clocks(
dc->res_pool->dccg,
&context->bw.dcn.calc_clk,
decrease_allowed);
if (should_set_clock(
decrease_allowed,
context->bw.dcn.calc_clk.dcfclk_khz,
dc->current_state->bw.dcn.cur_clk.dcfclk_khz)) {
context->bw.dcn.cur_clk.dcfclk_khz =
context->bw.dcn.calc_clk.dcfclk_khz;
smu_req.hard_min_dcefclk_khz =
context->bw.dcn.calc_clk.dcfclk_khz;
}
if (should_set_clock(
decrease_allowed,
context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
dc->current_state->bw.dcn.cur_clk.dcfclk_deep_sleep_khz)) {
context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz =
context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz;
}
if (should_set_clock(
decrease_allowed,
context->bw.dcn.calc_clk.fclk_khz,
dc->current_state->bw.dcn.cur_clk.fclk_khz)) {
context->bw.dcn.cur_clk.fclk_khz =
context->bw.dcn.calc_clk.fclk_khz;
smu_req.hard_min_fclk_khz = context->bw.dcn.calc_clk.fclk_khz;
}
smu_req.display_count = context->stream_count;
if (pp_smu->set_display_requirement)
pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
*smu_req_cur = smu_req;
/* make sure dcf clk is before dpp clk to
* make sure we have enough voltage to run dpp clk
*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment