Commit c98f31d1 authored by Evan Quan's avatar Evan Quan Committed by Alex Deucher

drm/amd/powerplay: revise calling chain on setting soft limit

This helps to maintain clear code layers and drop unnecessary
parameter.
Signed-off-by: default avatarEvan Quan <evan.quan@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 661b94f5
......@@ -1464,7 +1464,9 @@ static int amdgpu_debugfs_sclk_set(void *data, u64 val)
ret = smu_get_dpm_freq_range(&adev->smu, SMU_SCLK, &min_freq, &max_freq, true);
if (ret || val > max_freq || val < min_freq)
return -EINVAL;
ret = smu_set_soft_freq_range(&adev->smu, SMU_SCLK, (uint32_t)val, (uint32_t)val, true);
ret = smu_set_soft_freq_range(&adev->smu, SMU_SCLK, (uint32_t)val, (uint32_t)val);
} else {
return 0;
}
pm_runtime_mark_last_busy(adev->ddev->dev);
......
......@@ -238,18 +238,24 @@ int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t
return ret;
}
int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min, uint32_t max, bool lock_needed)
int smu_set_soft_freq_range(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t min,
uint32_t max)
{
int ret = 0;
if (!smu_clk_dpm_is_enabled(smu, clk_type))
return 0;
if (lock_needed)
mutex_lock(&smu->mutex);
ret = smu_set_soft_freq_limited_range(smu, clk_type, min, max);
if (lock_needed)
if (smu->ppt_funcs->set_soft_freq_limited_range)
ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
clk_type,
min,
max);
mutex_unlock(&smu->mutex);
return ret;
......
......@@ -722,7 +722,7 @@ int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max, bool lock_needed);
int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min, uint32_t max, bool lock_needed);
uint32_t min, uint32_t max);
int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *min_value, uint32_t *max_value);
enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu);
......
......@@ -1066,7 +1066,7 @@ static int navi10_force_clk_levels(struct smu_context *smu,
if (ret)
return size;
ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq, false);
ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
if (ret)
return size;
break;
......@@ -1190,7 +1190,7 @@ static int navi10_force_dpm_limit_value(struct smu_context *smu, bool highest)
return ret;
force_freq = highest ? max_freq : min_freq;
ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq, false);
ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq);
if (ret)
return ret;
}
......@@ -1216,7 +1216,7 @@ static int navi10_unforce_dpm_levels(struct smu_context *smu)
if (ret)
return ret;
ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq, false);
ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
if (ret)
return ret;
}
......@@ -1772,10 +1772,10 @@ static int navi10_set_standard_performance_level(struct smu_context *smu)
return navi10_set_performance_level(smu, AMD_DPM_FORCED_LEVEL_AUTO);
}
ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq, false);
ret = smu_v11_0_set_soft_freq_limited_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
if (ret)
return ret;
ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq, false);
ret = smu_v11_0_set_soft_freq_limited_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
if (ret)
return ret;
......@@ -1840,10 +1840,10 @@ static int navi10_set_peak_performance_level(struct smu_context *smu)
if (ret)
return ret;
ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq, false);
ret = smu_v11_0_set_soft_freq_limited_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
if (ret)
return ret;
ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq, false);
ret = smu_v11_0_set_soft_freq_limited_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
if (ret)
return ret;
......
......@@ -439,7 +439,7 @@ static int renoir_force_dpm_limit_value(struct smu_context *smu, bool highest)
return ret;
force_freq = highest ? max_freq : min_freq;
ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq, false);
ret = smu_v12_0_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq);
if (ret)
return ret;
}
......@@ -472,7 +472,7 @@ static int renoir_unforce_dpm_levels(struct smu_context *smu) {
if (ret)
return ret;
ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq, false);
ret = smu_v12_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
if (ret)
return ret;
}
......@@ -720,7 +720,7 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu)
if (ret)
return ret;
ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq, false);
ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
if (ret)
return ret;
......@@ -728,7 +728,7 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu)
if (ret)
return ret;
ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq, false);
ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
if (ret)
return ret;
......
......@@ -858,6 +858,22 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
return size;
}
int sienna_cichlid_set_soft_freq_limited_range(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t min, uint32_t max)
{
struct amdgpu_device *adev = smu->adev;
int ret;
if (clk_type == SMU_GFXCLK)
amdgpu_gfx_off_ctrl(adev, false);
ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min, max);
if (clk_type == SMU_GFXCLK)
amdgpu_gfx_off_ctrl(adev, true);
return ret;
}
static int sienna_cichlid_force_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, uint32_t mask)
{
......@@ -893,7 +909,7 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu,
if (ret)
goto forec_level_out;
ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq, false);
ret = sienna_cichlid_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
if (ret)
goto forec_level_out;
break;
......@@ -991,7 +1007,7 @@ static int sienna_cichlid_force_dpm_limit_value(struct smu_context *smu, bool hi
return ret;
force_freq = highest ? max_freq : min_freq;
ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq, false);
ret = sienna_cichlid_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq);
if (ret)
return ret;
}
......@@ -1017,7 +1033,7 @@ static int sienna_cichlid_unforce_dpm_levels(struct smu_context *smu)
if (ret)
return ret;
ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq, false);
ret = sienna_cichlid_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
if (ret)
return ret;
}
......@@ -1568,10 +1584,10 @@ static int sienna_cichlid_set_standard_performance_level(struct smu_context *smu
return sienna_cichlid_set_performance_level(smu, AMD_DPM_FORCED_LEVEL_AUTO);
}
ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq, false);
ret = sienna_cichlid_set_soft_freq_limited_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
if (ret)
return ret;
ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq, false);
ret = sienna_cichlid_set_soft_freq_limited_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
if (ret)
return ret;
......@@ -1751,22 +1767,6 @@ static int sienna_cichlid_get_dpm_ultimate_freq(struct smu_context *smu,
return ret;
}
static int sienna_cichlid_set_soft_freq_limited_range(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t min, uint32_t max)
{
struct amdgpu_device *adev = smu->adev;
int ret;
if (clk_type == SMU_GFXCLK)
amdgpu_gfx_off_ctrl(adev, false);
ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min, max);
if (clk_type == SMU_GFXCLK)
amdgpu_gfx_off_ctrl(adev, true);
return ret;
}
static bool sienna_cichlid_is_baco_supported(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment