Commit da2d16fc authored by Nicholas Kazlauskas's avatar Nicholas Kazlauskas Committed by Alex Deucher

drm/amd/display: Fix IPS handshake for idle optimizations

[Why]
Intermittent reboot hangs are observed introduced by
"Improve x86 and dmub ips handshake".

[How]
Bring back the commit but fix the polling.

Avoid hanging in place forever by bounding the delay and ensure that
we still message DMCUB on IPS2 exit to notify driver idle has been
cleared.
Reviewed-by: default avatarDuncan Ma <duncan.ma@amd.com>
Reviewed-by: default avatarJun Lei <jun.lei@amd.com>
Acked-by: default avatarRoman Li <roman.li@amd.com>
Signed-off-by: default avatarNicholas Kazlauskas <nicholas.kazlauskas@amd.com>
Tested-by: default avatarDaniel Wheeler <daniel.wheeler@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 488bb99d
......@@ -808,6 +808,34 @@ static void dcn35_set_low_power_state(struct clk_mgr *clk_mgr_base)
}
}
static void dcn35_set_idle_state(struct clk_mgr *clk_mgr_base, bool allow_idle)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dc *dc = clk_mgr_base->ctx->dc;
uint32_t val = dcn35_smu_read_ips_scratch(clk_mgr);
if (dc->config.disable_ips == 0) {
val |= DMUB_IPS1_ALLOW_MASK;
val |= DMUB_IPS2_ALLOW_MASK;
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) {
val = val & ~DMUB_IPS1_ALLOW_MASK;
val = val & ~DMUB_IPS2_ALLOW_MASK;
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) {
val |= DMUB_IPS1_ALLOW_MASK;
val = val & ~DMUB_IPS2_ALLOW_MASK;
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) {
val |= DMUB_IPS1_ALLOW_MASK;
val |= DMUB_IPS2_ALLOW_MASK;
}
if (!allow_idle) {
val = val & ~DMUB_IPS1_ALLOW_MASK;
val = val & ~DMUB_IPS2_ALLOW_MASK;
}
dcn35_smu_write_ips_scratch(clk_mgr, val);
}
static void dcn35_exit_low_power_state(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
......@@ -827,6 +855,13 @@ static bool dcn35_is_ips_supported(struct clk_mgr *clk_mgr_base)
return ips_supported;
}
static uint32_t dcn35_get_idle_state(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
return dcn35_smu_read_ips_scratch(clk_mgr);
}
static void dcn35_init_clocks_fpga(struct clk_mgr *clk_mgr)
{
dcn35_init_clocks(clk_mgr);
......@@ -914,6 +949,8 @@ static struct clk_mgr_funcs dcn35_funcs = {
.set_low_power_state = dcn35_set_low_power_state,
.exit_low_power_state = dcn35_exit_low_power_state,
.is_ips_supported = dcn35_is_ips_supported,
.set_idle_state = dcn35_set_idle_state,
.get_idle_state = dcn35_get_idle_state
};
struct clk_mgr_funcs dcn35_fpga_funcs = {
......
......@@ -444,9 +444,9 @@ void dcn35_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *cl
enable);
}
void dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr)
int dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr)
{
dcn35_smu_send_msg_with_param(
return dcn35_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_DispPsrExit,
0);
......@@ -459,3 +459,13 @@ int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr)
VBIOSSMC_MSG_QueryIPS2Support,
0);
}
void dcn35_smu_write_ips_scratch(struct clk_mgr_internal *clk_mgr, uint32_t param)
{
REG_WRITE(MP1_SMN_C2PMSG_71, param);
}
uint32_t dcn35_smu_read_ips_scratch(struct clk_mgr_internal *clk_mgr)
{
return REG_READ(MP1_SMN_C2PMSG_71);
}
......@@ -194,8 +194,10 @@ void dcn35_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zst
void dcn35_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable);
void dcn35_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable);
void dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr);
int dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr);
int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr);
int dcn35_smu_get_dtbclk(struct clk_mgr_internal *clk_mgr);
int dcn35_smu_get_dprefclk(struct clk_mgr_internal *clk_mgr);
void dcn35_smu_write_ips_scratch(struct clk_mgr_internal *clk_mgr, uint32_t param);
uint32_t dcn35_smu_read_ips_scratch(struct clk_mgr_internal *clk_mgr);
#endif /* DAL_DC_35_SMU_H_ */
......@@ -975,6 +975,8 @@ struct dc_debug_options {
bool replay_skip_crtc_disabled;
bool ignore_pg;/*do nothing, let pmfw control it*/
bool psp_disabled_wa;
unsigned int ips2_eval_delay_us;
unsigned int ips2_entry_delay_us;
};
struct gpu_info_soc_bounding_box_v1_0;
......
......@@ -1100,31 +1100,80 @@ void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
cmd.idle_opt_notify_idle.cntl_data.driver_idle = allow_idle;
dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
if (allow_idle) {
if (dc->hwss.set_idle_state)
dc->hwss.set_idle_state(dc, true);
}
if (allow_idle)
udelay(500);
dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
{
const uint32_t max_num_polls = 10000;
uint32_t allow_state = 0;
uint32_t commit_state = 0;
uint32_t i;
if (dc->debug.dmcub_emulation)
return;
if (!dc->idle_optimizations_allowed)
return;
// Tell PMFW to exit low power state
if (dc->clk_mgr->funcs->exit_low_power_state)
dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
if (dc->hwss.get_idle_state &&
dc->hwss.set_idle_state &&
dc->clk_mgr->funcs->exit_low_power_state) {
allow_state = dc->hwss.get_idle_state(dc);
dc->hwss.set_idle_state(dc, false);
if (allow_state & DMUB_IPS2_ALLOW_MASK) {
// Wait for evaluation time
udelay(dc->debug.ips2_eval_delay_us);
commit_state = dc->hwss.get_idle_state(dc);
if (commit_state & DMUB_IPS2_COMMIT_MASK) {
// Tell PMFW to exit low power state
dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
// Wait for IPS2 entry upper bound
udelay(dc->debug.ips2_entry_delay_us);
dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
for (i = 0; i < max_num_polls; ++i) {
commit_state = dc->hwss.get_idle_state(dc);
if (!(commit_state & DMUB_IPS2_COMMIT_MASK))
break;
udelay(1);
}
ASSERT(i < max_num_polls);
if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
ASSERT(0);
/* TODO: See if we can return early here - IPS2 should go
* back directly to IPS0 and clear the flags, but it will
* be safer to directly notify DMCUB of this.
*/
allow_state = dc->hwss.get_idle_state(dc);
}
}
// Wait for dmcub to load up
dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true);
dc_dmub_srv_notify_idle(dc, false);
if (allow_state & DMUB_IPS1_ALLOW_MASK) {
for (i = 0; i < max_num_polls; ++i) {
commit_state = dc->hwss.get_idle_state(dc);
if (!(commit_state & DMUB_IPS1_COMMIT_MASK))
break;
// Notify dmcub disallow idle
dc_dmub_srv_notify_idle(dc, false);
udelay(1);
}
ASSERT(i < max_num_polls);
}
}
// Confirm dmu is powered up
dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true);
if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
ASSERT(0);
}
......@@ -120,6 +120,8 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
.calc_blocks_to_ungate = dcn35_calc_blocks_to_ungate,
.block_power_control = dcn35_block_power_control,
.root_clock_control = dcn35_root_clock_control,
.set_idle_state = dcn35_set_idle_state,
.get_idle_state = dcn35_get_idle_state
};
static const struct hwseq_private_funcs dcn35_private_funcs = {
......
......@@ -748,6 +748,8 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_z10 = false,
.ignore_pg = true,
.psp_disabled_wa = true,
.ips2_eval_delay_us = 200,
.ips2_entry_delay_us = 400
};
static const struct dc_panel_config panel_config_defaults = {
......
......@@ -648,18 +648,10 @@ bool dcn35_apply_idle_power_optimizations(struct dc *dc, bool enable)
// TODO: review other cases when idle optimization is allowed
if (!enable) {
// Tell PMFW to exit low power state
if (dc->clk_mgr->funcs->exit_low_power_state)
dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true);
}
dc_dmub_srv_notify_idle(dc, enable);
if (!enable)
dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true);
dc_dmub_srv_exit_low_power_state(dc);
else
dc_dmub_srv_notify_idle(dc, enable);
return true;
}
......@@ -1193,3 +1185,19 @@ void dcn35_optimize_bandwidth(
dc->hwss.root_clock_control(dc, &pg_update_state, false);
}
}
void dcn35_set_idle_state(const struct dc *dc, bool allow_idle)
{
// TODO: Find a more suitable communcation
if (dc->clk_mgr->funcs->set_idle_state)
dc->clk_mgr->funcs->set_idle_state(dc->clk_mgr, allow_idle);
}
uint32_t dcn35_get_idle_state(const struct dc *dc)
{
// TODO: Find a more suitable communcation
if (dc->clk_mgr->funcs->get_idle_state)
return dc->clk_mgr->funcs->get_idle_state(dc->clk_mgr);
return 0;
}
......@@ -81,4 +81,7 @@ void dcn35_dsc_pg_control(
struct dce_hwseq *hws,
unsigned int dsc_inst,
bool power_on);
void dcn35_set_idle_state(const struct dc *dc, bool allow_idle);
uint32_t dcn35_get_idle_state(const struct dc *dc);
#endif /* __DC_HWSS_DCN35_H__ */
......@@ -420,7 +420,6 @@ struct hw_sequencer_funcs {
struct pg_block_update *update_state, bool power_on);
void (*set_idle_state)(const struct dc *dc, bool allow_idle);
uint32_t (*get_idle_state)(const struct dc *dc);
bool (*is_pipe_topology_transition_seamless)(struct dc *dc,
const struct dc_state *cur_ctx,
const struct dc_state *new_ctx);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment