Commit 661b94f5 authored by Evan Quan's avatar Evan Quan Committed by Alex Deucher

drm/amd/powerplay: put setting hard limit common code in smu_v11_0.c

As designed the common code shared among all smu v11 ASCIs go to
smu_v11_0.c. This helps to maintain clear code layers.
Signed-off-by: default avatarEvan Quan <evan.quan@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent b2febc99
......@@ -255,42 +255,6 @@ int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
return ret;
}
int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min, uint32_t max)
{
int ret = 0, clk_id = 0;
uint32_t param;
if (min <= 0 && max <= 0)
return -EINVAL;
if (!smu_clk_dpm_is_enabled(smu, clk_type))
return 0;
clk_id = smu_clk_get_index(smu, clk_type);
if (clk_id < 0)
return clk_id;
if (max > 0) {
param = (uint32_t)((clk_id << 16) | (max & 0xffff));
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
param, NULL);
if (ret)
return ret;
}
if (min > 0) {
param = (uint32_t)((clk_id << 16) | (min & 0xffff));
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
param, NULL);
if (ret)
return ret;
}
return ret;
}
int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max, bool lock_needed)
{
......
......@@ -723,8 +723,6 @@ int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max, bool lock_needed);
int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min, uint32_t max, bool lock_needed);
int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min, uint32_t max);
int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *min_value, uint32_t *max_value);
enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu);
......
......@@ -252,6 +252,11 @@ int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min, uint32_t max);
int smu_v11_0_set_hard_freq_limited_range(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t min,
uint32_t max);
int smu_v11_0_override_pcie_parameters(struct smu_context *smu);
int smu_v11_0_set_performance_level(struct smu_context *smu,
......
......@@ -1146,7 +1146,7 @@ static int navi10_pre_display_config_changed(struct smu_context *smu)
ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &max_freq, false);
if (ret)
return ret;
ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, max_freq);
ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, 0, max_freq);
if (ret)
return ret;
}
......@@ -1549,7 +1549,7 @@ static int navi10_notify_smc_display_config(struct smu_context *smu)
}
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
ret = smu_set_hard_freq_range(smu, SMU_UCLK, min_clocks.memory_clock/100, 0);
ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, min_clocks.memory_clock/100, 0);
if (ret) {
dev_err(smu->adev->dev, "[%s] Set hard min uclk failed!", __func__);
return ret;
......@@ -1921,9 +1921,9 @@ static int navi10_display_disable_memory_clock_switch(struct smu_context *smu,
return 0;
if(disable_memory_clock_switch)
ret = smu_set_hard_freq_range(smu, SMU_UCLK, max_memory_clock, 0);
ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, max_memory_clock, 0);
else
ret = smu_set_hard_freq_range(smu, SMU_UCLK, min_memory_clock, 0);
ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, min_memory_clock, 0);
if(!ret)
smu->disable_uclk_switch = disable_memory_clock_switch;
......@@ -2344,12 +2344,12 @@ static int navi10_disable_umc_cdr_12gbps_workaround(struct smu_context *smu)
return ret;
/* Force UCLK out of the highest DPM */
ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, uclk_min);
ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, 0, uclk_min);
if (ret)
return ret;
/* Revert the UCLK Hardmax */
ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, uclk_max);
ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, 0, uclk_max);
if (ret)
return ret;
......
......@@ -945,7 +945,7 @@ static int sienna_cichlid_pre_display_config_changed(struct smu_context *smu)
ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &max_freq, false);
if (ret)
return ret;
ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, max_freq);
ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, 0, max_freq);
if (ret)
return ret;
}
......@@ -1353,7 +1353,7 @@ static int sienna_cichlid_notify_smc_display_config(struct smu_context *smu)
}
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
ret = smu_set_hard_freq_range(smu, SMU_UCLK, min_clocks.memory_clock/100, 0);
ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, min_clocks.memory_clock/100, 0);
if (ret) {
dev_err(smu->adev->dev, "[%s] Set hard min uclk failed!", __func__);
return ret;
......@@ -1657,9 +1657,9 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context
return 0;
if(disable_memory_clock_switch)
ret = smu_set_hard_freq_range(smu, SMU_UCLK, max_memory_clock, 0);
ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, max_memory_clock, 0);
else
ret = smu_set_hard_freq_range(smu, SMU_UCLK, min_memory_clock, 0);
ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, min_memory_clock, 0);
if(!ret)
smu->disable_uclk_switch = disable_memory_clock_switch;
......
......@@ -1192,7 +1192,7 @@ smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
if (clk_select == SMU_UCLK && smu->disable_uclk_switch)
return 0;
ret = smu_set_hard_freq_range(smu, clk_select, clk_freq, 0);
ret = smu_v11_0_set_hard_freq_limited_range(smu, clk_select, clk_freq, 0);
if(clk_select == SMU_UCLK)
smu->hard_min_uclk_req_from_dal = clk_freq;
......@@ -1752,6 +1752,43 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
return ret;
}
int smu_v11_0_set_hard_freq_limited_range(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t min,
uint32_t max)
{
int ret = 0, clk_id = 0;
uint32_t param;
if (min <= 0 && max <= 0)
return -EINVAL;
if (!smu_clk_dpm_is_enabled(smu, clk_type))
return 0;
clk_id = smu_clk_get_index(smu, clk_type);
if (clk_id < 0)
return clk_id;
if (max > 0) {
param = (uint32_t)((clk_id << 16) | (max & 0xffff));
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
param, NULL);
if (ret)
return ret;
}
if (min > 0) {
param = (uint32_t)((clk_id << 16) | (min & 0xffff));
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
param, NULL);
if (ret)
return ret;
}
return ret;
}
int smu_v11_0_override_pcie_parameters(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment