Commit 3697b339 authored by Evan Quan's avatar Evan Quan Committed by Alex Deucher

drm/amd/powerplay: add lock protection for swSMU APIs V2

This is a quick and low risk fix. Those APIs which
are exposed to other IPs or to support sysfs/hwmon
interfaces or DAL will have lock protection. Meanwhile
no lock protection is enforced for swSMU internal used
APIs. Future optimization is needed.

V2: strip the lock protection for all swSMU internal APIs
Signed-off-by: default avatarEvan Quan <evan.quan@amd.com>
Acked-by: default avatarAndrey Grodzovsky <andrey.grodzovsky@amd.com>
Acked-by: default avatarFeifei Xu <Feifei.Xu@amd.com>
Acked-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 6aec5bb4
...@@ -911,7 +911,8 @@ int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low) ...@@ -911,7 +911,8 @@ int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
if (is_support_sw_smu(adev)) { if (is_support_sw_smu(adev)) {
ret = smu_get_dpm_freq_range(&adev->smu, SMU_GFXCLK, ret = smu_get_dpm_freq_range(&adev->smu, SMU_GFXCLK,
low ? &clk_freq : NULL, low ? &clk_freq : NULL,
!low ? &clk_freq : NULL); !low ? &clk_freq : NULL,
true);
if (ret) if (ret)
return 0; return 0;
return clk_freq * 100; return clk_freq * 100;
...@@ -928,7 +929,8 @@ int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low) ...@@ -928,7 +929,8 @@ int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
if (is_support_sw_smu(adev)) { if (is_support_sw_smu(adev)) {
ret = smu_get_dpm_freq_range(&adev->smu, SMU_UCLK, ret = smu_get_dpm_freq_range(&adev->smu, SMU_UCLK,
low ? &clk_freq : NULL, low ? &clk_freq : NULL,
!low ? &clk_freq : NULL); !low ? &clk_freq : NULL,
true);
if (ret) if (ret)
return 0; return 0;
return clk_freq * 100; return clk_freq * 100;
......
...@@ -298,12 +298,6 @@ enum amdgpu_pcie_gen { ...@@ -298,12 +298,6 @@ enum amdgpu_pcie_gen {
#define amdgpu_dpm_get_current_power_state(adev) \ #define amdgpu_dpm_get_current_power_state(adev) \
((adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)) ((adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle))
#define amdgpu_smu_get_current_power_state(adev) \
((adev)->smu.ppt_funcs->get_current_power_state(&((adev)->smu)))
#define amdgpu_smu_set_power_state(adev) \
((adev)->smu.ppt_funcs->set_power_state(&((adev)->smu)))
#define amdgpu_dpm_get_pp_num_states(adev, data) \ #define amdgpu_dpm_get_pp_num_states(adev, data) \
((adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data)) ((adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data))
......
...@@ -161,7 +161,7 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev, ...@@ -161,7 +161,7 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
if (is_support_sw_smu(adev)) { if (is_support_sw_smu(adev)) {
if (adev->smu.ppt_funcs->get_current_power_state) if (adev->smu.ppt_funcs->get_current_power_state)
pm = amdgpu_smu_get_current_power_state(adev); pm = smu_get_current_power_state(&adev->smu);
else else
pm = adev->pm.dpm.user_state; pm = adev->pm.dpm.user_state;
} else if (adev->powerplay.pp_funcs->get_current_power_state) { } else if (adev->powerplay.pp_funcs->get_current_power_state) {
...@@ -907,7 +907,7 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, ...@@ -907,7 +907,7 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
return ret; return ret;
if (is_support_sw_smu(adev)) if (is_support_sw_smu(adev))
ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask); ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask, true);
else if (adev->powerplay.pp_funcs->force_clock_level) else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask); ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
...@@ -954,7 +954,7 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, ...@@ -954,7 +954,7 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
return ret; return ret;
if (is_support_sw_smu(adev)) if (is_support_sw_smu(adev))
ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask); ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask, true);
else if (adev->powerplay.pp_funcs->force_clock_level) else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask); ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
...@@ -994,7 +994,7 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev, ...@@ -994,7 +994,7 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
return ret; return ret;
if (is_support_sw_smu(adev)) if (is_support_sw_smu(adev))
ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask); ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask, true);
else if (adev->powerplay.pp_funcs->force_clock_level) else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask); ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask);
...@@ -1034,7 +1034,7 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev, ...@@ -1034,7 +1034,7 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
return ret; return ret;
if (is_support_sw_smu(adev)) if (is_support_sw_smu(adev))
ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask); ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask, true);
else if (adev->powerplay.pp_funcs->force_clock_level) else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask); ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask);
...@@ -1074,7 +1074,7 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev, ...@@ -1074,7 +1074,7 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
return ret; return ret;
if (is_support_sw_smu(adev)) if (is_support_sw_smu(adev))
ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask); ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask, true);
else if (adev->powerplay.pp_funcs->force_clock_level) else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask); ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask);
...@@ -1114,7 +1114,7 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, ...@@ -1114,7 +1114,7 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
return ret; return ret;
if (is_support_sw_smu(adev)) if (is_support_sw_smu(adev))
ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask); ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask, true);
else if (adev->powerplay.pp_funcs->force_clock_level) else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask); ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
...@@ -1306,7 +1306,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, ...@@ -1306,7 +1306,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
} }
parameter[parameter_size] = profile_mode; parameter[parameter_size] = profile_mode;
if (is_support_sw_smu(adev)) if (is_support_sw_smu(adev))
ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size); ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true);
else if (adev->powerplay.pp_funcs->set_power_profile_mode) else if (adev->powerplay.pp_funcs->set_power_profile_mode)
ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size); ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
if (!ret) if (!ret)
...@@ -2015,7 +2015,7 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev, ...@@ -2015,7 +2015,7 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
uint32_t limit = 0; uint32_t limit = 0;
if (is_support_sw_smu(adev)) { if (is_support_sw_smu(adev)) {
smu_get_power_limit(&adev->smu, &limit, true); smu_get_power_limit(&adev->smu, &limit, true, true);
return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
} else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true); adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
...@@ -2033,7 +2033,7 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev, ...@@ -2033,7 +2033,7 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
uint32_t limit = 0; uint32_t limit = 0;
if (is_support_sw_smu(adev)) { if (is_support_sw_smu(adev)) {
smu_get_power_limit(&adev->smu, &limit, false); smu_get_power_limit(&adev->smu, &limit, false, true);
return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
} else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false); adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
...@@ -3013,7 +3013,8 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) ...@@ -3013,7 +3013,8 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm; struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm;
smu_handle_task(&adev->smu, smu_handle_task(&adev->smu,
smu_dpm->dpm_level, smu_dpm->dpm_level,
AMD_PP_TASK_DISPLAY_CONFIG_CHANGE); AMD_PP_TASK_DISPLAY_CONFIG_CHANGE,
true);
} else { } else {
if (adev->powerplay.pp_funcs->dispatch_tasks) { if (adev->powerplay.pp_funcs->dispatch_tasks) {
if (!amdgpu_device_has_dc_support(adev)) { if (!amdgpu_device_has_dc_support(adev)) {
......
...@@ -865,7 +865,7 @@ enum pp_smu_status pp_nv_get_maximum_sustainable_clocks( ...@@ -865,7 +865,7 @@ enum pp_smu_status pp_nv_get_maximum_sustainable_clocks(
if (!smu->funcs->get_max_sustainable_clocks_by_dc) if (!smu->funcs->get_max_sustainable_clocks_by_dc)
return PP_SMU_RESULT_UNSUPPORTED; return PP_SMU_RESULT_UNSUPPORTED;
if (!smu->funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks)) if (!smu_get_max_sustainable_clocks_by_dc(smu, max_clocks))
return PP_SMU_RESULT_OK; return PP_SMU_RESULT_OK;
return PP_SMU_RESULT_FAIL; return PP_SMU_RESULT_FAIL;
...@@ -884,7 +884,7 @@ enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp, ...@@ -884,7 +884,7 @@ enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp,
if (!smu->ppt_funcs->get_uclk_dpm_states) if (!smu->ppt_funcs->get_uclk_dpm_states)
return PP_SMU_RESULT_UNSUPPORTED; return PP_SMU_RESULT_UNSUPPORTED;
if (!smu->ppt_funcs->get_uclk_dpm_states(smu, if (!smu_get_uclk_dpm_states(smu,
clock_values_in_khz, num_states)) clock_values_in_khz, num_states))
return PP_SMU_RESULT_OK; return PP_SMU_RESULT_OK;
...@@ -905,7 +905,7 @@ enum pp_smu_status pp_rn_get_dpm_clock_table( ...@@ -905,7 +905,7 @@ enum pp_smu_status pp_rn_get_dpm_clock_table(
if (!smu->ppt_funcs->get_dpm_clock_table) if (!smu->ppt_funcs->get_dpm_clock_table)
return PP_SMU_RESULT_UNSUPPORTED; return PP_SMU_RESULT_UNSUPPORTED;
if (!smu->ppt_funcs->get_dpm_clock_table(smu, clock_table)) if (!smu_get_dpm_clock_table(smu, clock_table))
return PP_SMU_RESULT_OK; return PP_SMU_RESULT_OK;
return PP_SMU_RESULT_FAIL; return PP_SMU_RESULT_FAIL;
......
...@@ -67,6 +67,8 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf) ...@@ -67,6 +67,8 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
uint32_t sort_feature[SMU_FEATURE_COUNT]; uint32_t sort_feature[SMU_FEATURE_COUNT];
uint64_t hw_feature_count = 0; uint64_t hw_feature_count = 0;
mutex_lock(&smu->mutex);
ret = smu_feature_get_enabled_mask(smu, feature_mask, 2); ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
if (ret) if (ret)
goto failed; goto failed;
...@@ -92,6 +94,8 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf) ...@@ -92,6 +94,8 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
} }
failed: failed:
mutex_unlock(&smu->mutex);
return size; return size;
} }
...@@ -149,9 +153,11 @@ int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask) ...@@ -149,9 +153,11 @@ int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
uint64_t feature_2_disabled = 0; uint64_t feature_2_disabled = 0;
uint64_t feature_enables = 0; uint64_t feature_enables = 0;
mutex_lock(&smu->mutex);
ret = smu_feature_get_enabled_mask(smu, feature_mask, 2); ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
if (ret) if (ret)
return ret; goto out;
feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]); feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]);
...@@ -161,14 +167,17 @@ int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask) ...@@ -161,14 +167,17 @@ int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
if (feature_2_enabled) { if (feature_2_enabled) {
ret = smu_feature_update_enable_state(smu, feature_2_enabled, true); ret = smu_feature_update_enable_state(smu, feature_2_enabled, true);
if (ret) if (ret)
return ret; goto out;
} }
if (feature_2_disabled) { if (feature_2_disabled) {
ret = smu_feature_update_enable_state(smu, feature_2_disabled, false); ret = smu_feature_update_enable_state(smu, feature_2_disabled, false);
if (ret) if (ret)
return ret; goto out;
} }
out:
mutex_unlock(&smu->mutex);
return ret; return ret;
} }
...@@ -254,7 +263,7 @@ int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, ...@@ -254,7 +263,7 @@ int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
} }
int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max) uint32_t *min, uint32_t *max, bool lock_needed)
{ {
uint32_t clock_limit; uint32_t clock_limit;
int ret = 0; int ret = 0;
...@@ -262,6 +271,9 @@ int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, ...@@ -262,6 +271,9 @@ int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
if (!min && !max) if (!min && !max)
return -EINVAL; return -EINVAL;
if (lock_needed)
mutex_lock(&smu->mutex);
if (!smu_clk_dpm_is_enabled(smu, clk_type)) { if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
switch (clk_type) { switch (clk_type) {
case SMU_MCLK: case SMU_MCLK:
...@@ -285,14 +297,17 @@ int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, ...@@ -285,14 +297,17 @@ int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
*min = clock_limit / 100; *min = clock_limit / 100;
if (max) if (max)
*max = clock_limit / 100; *max = clock_limit / 100;
} else {
return 0; /*
* Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the
* core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs).
*/
ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max);
} }
/*
* Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the if (lock_needed)
* core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs). mutex_unlock(&smu->mutex);
*/
ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max);
return ret; return ret;
} }
...@@ -369,6 +384,8 @@ int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type, ...@@ -369,6 +384,8 @@ int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
{ {
int ret = 0; int ret = 0;
mutex_lock(&smu->mutex);
switch (block_type) { switch (block_type) {
case AMD_IP_BLOCK_TYPE_UVD: case AMD_IP_BLOCK_TYPE_UVD:
ret = smu_dpm_set_uvd_enable(smu, gate); ret = smu_dpm_set_uvd_enable(smu, gate);
...@@ -386,13 +403,9 @@ int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type, ...@@ -386,13 +403,9 @@ int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
break; break;
} }
return ret; mutex_unlock(&smu->mutex);
}
enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu) return ret;
{
/* not support power state */
return POWER_STATE_TYPE_DEFAULT;
} }
int smu_get_power_num_states(struct smu_context *smu, int smu_get_power_num_states(struct smu_context *smu,
...@@ -520,16 +533,23 @@ bool is_support_sw_smu_xgmi(struct amdgpu_device *adev) ...@@ -520,16 +533,23 @@ bool is_support_sw_smu_xgmi(struct amdgpu_device *adev)
int smu_sys_get_pp_table(struct smu_context *smu, void **table) int smu_sys_get_pp_table(struct smu_context *smu, void **table)
{ {
struct smu_table_context *smu_table = &smu->smu_table; struct smu_table_context *smu_table = &smu->smu_table;
uint32_t powerplay_table_size;
if (!smu_table->power_play_table && !smu_table->hardcode_pptable) if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
return -EINVAL; return -EINVAL;
mutex_lock(&smu->mutex);
if (smu_table->hardcode_pptable) if (smu_table->hardcode_pptable)
*table = smu_table->hardcode_pptable; *table = smu_table->hardcode_pptable;
else else
*table = smu_table->power_play_table; *table = smu_table->power_play_table;
return smu_table->power_play_table_size; powerplay_table_size = smu_table->power_play_table_size;
mutex_unlock(&smu->mutex);
return powerplay_table_size;
} }
int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size) int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
...@@ -556,14 +576,11 @@ int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size) ...@@ -556,14 +576,11 @@ int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
memcpy(smu_table->hardcode_pptable, buf, size); memcpy(smu_table->hardcode_pptable, buf, size);
smu_table->power_play_table = smu_table->hardcode_pptable; smu_table->power_play_table = smu_table->hardcode_pptable;
smu_table->power_play_table_size = size; smu_table->power_play_table_size = size;
mutex_unlock(&smu->mutex);
ret = smu_reset(smu); ret = smu_reset(smu);
if (ret) if (ret)
pr_info("smu reset failed, ret = %d\n", ret); pr_info("smu reset failed, ret = %d\n", ret);
return ret;
failed: failed:
mutex_unlock(&smu->mutex); mutex_unlock(&smu->mutex);
return ret; return ret;
...@@ -726,11 +743,10 @@ static int smu_late_init(void *handle) ...@@ -726,11 +743,10 @@ static int smu_late_init(void *handle)
if (!smu->pm_enabled) if (!smu->pm_enabled)
return 0; return 0;
mutex_lock(&smu->mutex);
smu_handle_task(&adev->smu, smu_handle_task(&adev->smu,
smu->smu_dpm.dpm_level, smu->smu_dpm.dpm_level,
AMD_PP_TASK_COMPLETE_INIT); AMD_PP_TASK_COMPLETE_INIT,
mutex_unlock(&smu->mutex); false);
return 0; return 0;
} }
...@@ -1074,7 +1090,7 @@ static int smu_smc_table_hw_init(struct smu_context *smu, ...@@ -1074,7 +1090,7 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
if (ret) if (ret)
return ret; return ret;
ret = smu_get_power_limit(smu, &smu->default_power_limit, true); ret = smu_get_power_limit(smu, &smu->default_power_limit, true, false);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -1160,15 +1176,19 @@ static int smu_start_smc_engine(struct smu_context *smu) ...@@ -1160,15 +1176,19 @@ static int smu_start_smc_engine(struct smu_context *smu)
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
if (adev->asic_type < CHIP_NAVI10) { if (adev->asic_type < CHIP_NAVI10) {
ret = smu_load_microcode(smu); if (smu->funcs->load_microcode) {
if (ret) ret = smu->funcs->load_microcode(smu);
return ret; if (ret)
return ret;
}
} }
} }
ret = smu_check_fw_status(smu); if (smu->funcs->check_fw_status) {
if (ret) ret = smu->funcs->check_fw_status(smu);
pr_err("SMC is not ready\n"); if (ret)
pr_err("SMC is not ready\n");
}
return ret; return ret;
} }
...@@ -1335,8 +1355,6 @@ static int smu_resume(void *handle) ...@@ -1335,8 +1355,6 @@ static int smu_resume(void *handle)
pr_info("SMU is resuming...\n"); pr_info("SMU is resuming...\n");
mutex_lock(&smu->mutex);
ret = smu_start_smc_engine(smu); ret = smu_start_smc_engine(smu);
if (ret) { if (ret) {
pr_err("SMU is not ready yet!\n"); pr_err("SMU is not ready yet!\n");
...@@ -1356,13 +1374,11 @@ static int smu_resume(void *handle) ...@@ -1356,13 +1374,11 @@ static int smu_resume(void *handle)
smu->disable_uclk_switch = 0; smu->disable_uclk_switch = 0;
mutex_unlock(&smu->mutex);
pr_info("SMU is resumed successfully!\n"); pr_info("SMU is resumed successfully!\n");
return 0; return 0;
failed: failed:
mutex_unlock(&smu->mutex);
return ret; return ret;
} }
...@@ -1380,8 +1396,9 @@ int smu_display_configuration_change(struct smu_context *smu, ...@@ -1380,8 +1396,9 @@ int smu_display_configuration_change(struct smu_context *smu,
mutex_lock(&smu->mutex); mutex_lock(&smu->mutex);
smu_set_deep_sleep_dcefclk(smu, if (smu->funcs->set_deep_sleep_dcefclk)
display_config->min_dcef_deep_sleep_set_clk / 100); smu->funcs->set_deep_sleep_dcefclk(smu,
display_config->min_dcef_deep_sleep_set_clk / 100);
for (index = 0; index < display_config->num_path_including_non_display; index++) { for (index = 0; index < display_config->num_path_including_non_display; index++) {
if (display_config->displays[index].controller_id != 0) if (display_config->displays[index].controller_id != 0)
...@@ -1559,9 +1576,9 @@ static int smu_default_set_performance_level(struct smu_context *smu, enum amd_d ...@@ -1559,9 +1576,9 @@ static int smu_default_set_performance_level(struct smu_context *smu, enum amd_d
&soc_mask); &soc_mask);
if (ret) if (ret)
return ret; return ret;
smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask); smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false);
smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask); smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false);
smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask); smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false);
break; break;
case AMD_DPM_FORCED_LEVEL_MANUAL: case AMD_DPM_FORCED_LEVEL_MANUAL:
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
...@@ -1625,7 +1642,7 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu, ...@@ -1625,7 +1642,7 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
workload = smu->workload_setting[index]; workload = smu->workload_setting[index];
if (smu->power_profile_mode != workload) if (smu->power_profile_mode != workload)
smu_set_power_profile_mode(smu, &workload, 0); smu_set_power_profile_mode(smu, &workload, 0, false);
} }
return ret; return ret;
...@@ -1633,18 +1650,22 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu, ...@@ -1633,18 +1650,22 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
int smu_handle_task(struct smu_context *smu, int smu_handle_task(struct smu_context *smu,
enum amd_dpm_forced_level level, enum amd_dpm_forced_level level,
enum amd_pp_task task_id) enum amd_pp_task task_id,
bool lock_needed)
{ {
int ret = 0; int ret = 0;
if (lock_needed)
mutex_lock(&smu->mutex);
switch (task_id) { switch (task_id) {
case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE: case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
ret = smu_pre_display_config_changed(smu); ret = smu_pre_display_config_changed(smu);
if (ret) if (ret)
return ret; goto out;
ret = smu_set_cpu_power_state(smu); ret = smu_set_cpu_power_state(smu);
if (ret) if (ret)
return ret; goto out;
ret = smu_adjust_power_state_dynamic(smu, level, false); ret = smu_adjust_power_state_dynamic(smu, level, false);
break; break;
case AMD_PP_TASK_COMPLETE_INIT: case AMD_PP_TASK_COMPLETE_INIT:
...@@ -1655,6 +1676,10 @@ int smu_handle_task(struct smu_context *smu, ...@@ -1655,6 +1676,10 @@ int smu_handle_task(struct smu_context *smu,
break; break;
} }
out:
if (lock_needed)
mutex_unlock(&smu->mutex);
return ret; return ret;
} }
...@@ -1687,7 +1712,7 @@ int smu_switch_power_profile(struct smu_context *smu, ...@@ -1687,7 +1712,7 @@ int smu_switch_power_profile(struct smu_context *smu,
} }
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
smu_set_power_profile_mode(smu, &workload, 0); smu_set_power_profile_mode(smu, &workload, 0, false);
mutex_unlock(&smu->mutex); mutex_unlock(&smu->mutex);
...@@ -1717,12 +1742,19 @@ int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_lev ...@@ -1717,12 +1742,19 @@ int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_lev
if (!smu->is_apu && !smu_dpm_ctx->dpm_context) if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
return -EINVAL; return -EINVAL;
mutex_lock(&smu->mutex);
ret = smu_enable_umd_pstate(smu, &level); ret = smu_enable_umd_pstate(smu, &level);
if (ret) if (ret) {
mutex_unlock(&smu->mutex);
return ret; return ret;
}
ret = smu_handle_task(smu, level, ret = smu_handle_task(smu, level,
AMD_PP_TASK_READJUST_POWER_STATE); AMD_PP_TASK_READJUST_POWER_STATE,
false);
mutex_unlock(&smu->mutex);
return ret; return ret;
} }
...@@ -1740,7 +1772,8 @@ int smu_set_display_count(struct smu_context *smu, uint32_t count) ...@@ -1740,7 +1772,8 @@ int smu_set_display_count(struct smu_context *smu, uint32_t count)
int smu_force_clk_levels(struct smu_context *smu, int smu_force_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, enum smu_clk_type clk_type,
uint32_t mask) uint32_t mask,
bool lock_needed)
{ {
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
int ret = 0; int ret = 0;
...@@ -1750,9 +1783,15 @@ int smu_force_clk_levels(struct smu_context *smu, ...@@ -1750,9 +1783,15 @@ int smu_force_clk_levels(struct smu_context *smu,
return -EINVAL; return -EINVAL;
} }
if (lock_needed)
mutex_lock(&smu->mutex);
if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels)
ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask); ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
if (lock_needed)
mutex_unlock(&smu->mutex);
return ret; return ret;
} }
...@@ -1770,6 +1809,8 @@ int smu_set_mp1_state(struct smu_context *smu, ...@@ -1770,6 +1809,8 @@ int smu_set_mp1_state(struct smu_context *smu,
if (!smu->pm_enabled) if (!smu->pm_enabled)
return 0; return 0;
mutex_lock(&smu->mutex);
switch (mp1_state) { switch (mp1_state) {
case PP_MP1_STATE_SHUTDOWN: case PP_MP1_STATE_SHUTDOWN:
msg = SMU_MSG_PrepareMp1ForShutdown; msg = SMU_MSG_PrepareMp1ForShutdown;
...@@ -1782,17 +1823,22 @@ int smu_set_mp1_state(struct smu_context *smu, ...@@ -1782,17 +1823,22 @@ int smu_set_mp1_state(struct smu_context *smu,
break; break;
case PP_MP1_STATE_NONE: case PP_MP1_STATE_NONE:
default: default:
mutex_unlock(&smu->mutex);
return 0; return 0;
} }
/* some asics may not support those messages */ /* some asics may not support those messages */
if (smu_msg_get_index(smu, msg) < 0) if (smu_msg_get_index(smu, msg) < 0) {
mutex_unlock(&smu->mutex);
return 0; return 0;
}
ret = smu_send_smc_msg(smu, msg); ret = smu_send_smc_msg(smu, msg);
if (ret) if (ret)
pr_err("[PrepareMp1] Failed!\n"); pr_err("[PrepareMp1] Failed!\n");
mutex_unlock(&smu->mutex);
return ret; return ret;
} }
...@@ -1812,10 +1858,14 @@ int smu_set_df_cstate(struct smu_context *smu, ...@@ -1812,10 +1858,14 @@ int smu_set_df_cstate(struct smu_context *smu,
if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate) if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
return 0; return 0;
mutex_lock(&smu->mutex);
ret = smu->ppt_funcs->set_df_cstate(smu, state); ret = smu->ppt_funcs->set_df_cstate(smu, state);
if (ret) if (ret)
pr_err("[SetDfCstate] failed!\n"); pr_err("[SetDfCstate] failed!\n");
mutex_unlock(&smu->mutex);
return ret; return ret;
} }
...@@ -1843,6 +1893,8 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu, ...@@ -1843,6 +1893,8 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
struct smu_table *watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS]; struct smu_table *watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS];
void *table = watermarks->cpu_addr; void *table = watermarks->cpu_addr;
mutex_lock(&smu->mutex);
if (!smu->disable_watermark && if (!smu->disable_watermark &&
smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) && smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
...@@ -1851,6 +1903,8 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu, ...@@ -1851,6 +1903,8 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
smu->watermarks_bitmap &= ~WATERMARKS_LOADED; smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
} }
mutex_unlock(&smu->mutex);
return ret; return ret;
} }
...@@ -1890,3 +1944,549 @@ const struct amdgpu_ip_block_version smu_v12_0_ip_block = ...@@ -1890,3 +1944,549 @@ const struct amdgpu_ip_block_version smu_v12_0_ip_block =
.rev = 0, .rev = 0,
.funcs = &smu_ip_funcs, .funcs = &smu_ip_funcs,
}; };
int smu_load_microcode(struct smu_context *smu)
{
int ret = 0;
mutex_lock(&smu->mutex);
if (smu->funcs->load_microcode)
ret = smu->funcs->load_microcode(smu);
mutex_unlock(&smu->mutex);
return ret;
}
int smu_check_fw_status(struct smu_context *smu)
{
int ret = 0;
mutex_lock(&smu->mutex);
if (smu->funcs->check_fw_status)
ret = smu->funcs->check_fw_status(smu);
mutex_unlock(&smu->mutex);
return ret;
}
int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
{
int ret = 0;
mutex_lock(&smu->mutex);
if (smu->funcs->set_gfx_cgpg)
ret = smu->funcs->set_gfx_cgpg(smu, enabled);
mutex_unlock(&smu->mutex);
return ret;
}
int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
{
int ret = 0;
mutex_lock(&smu->mutex);
if (smu->funcs->set_fan_speed_rpm)
ret = smu->funcs->set_fan_speed_rpm(smu, speed);
mutex_unlock(&smu->mutex);
return ret;
}
int smu_get_power_limit(struct smu_context *smu,
uint32_t *limit,
bool def,
bool lock_needed)
{
int ret = 0;
if (lock_needed)
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_power_limit)
ret = smu->ppt_funcs->get_power_limit(smu, limit, def);
if (lock_needed)
mutex_unlock(&smu->mutex);
return ret;
}
int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
{
int ret = 0;
mutex_lock(&smu->mutex);
if (smu->funcs->set_power_limit)
ret = smu->funcs->set_power_limit(smu, limit);
mutex_unlock(&smu->mutex);
return ret;
}
int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
{
int ret = 0;
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->print_clk_levels)
ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
mutex_unlock(&smu->mutex);
return ret;
}
int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
{
int ret = 0;
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_od_percentage)
ret = smu->ppt_funcs->get_od_percentage(smu, type);
mutex_unlock(&smu->mutex);
return ret;
}
int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value)
{
int ret = 0;
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->set_od_percentage)
ret = smu->ppt_funcs->set_od_percentage(smu, type, value);
mutex_unlock(&smu->mutex);
return ret;
}
int smu_od_edit_dpm_table(struct smu_context *smu,
enum PP_OD_DPM_TABLE_COMMAND type,
long *input, uint32_t size)
{
int ret = 0;
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->od_edit_dpm_table)
ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
mutex_unlock(&smu->mutex);
return ret;
}
int smu_read_sensor(struct smu_context *smu,
enum amd_pp_sensors sensor,
void *data, uint32_t *size)
{
int ret = 0;
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->read_sensor)
ret = smu->ppt_funcs->read_sensor(smu, sensor, data, size);
mutex_unlock(&smu->mutex);
return ret;
}
int smu_get_power_profile_mode(struct smu_context *smu, char *buf)
{
int ret = 0;
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_power_profile_mode)
ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
mutex_unlock(&smu->mutex);
return ret;
}
int smu_set_power_profile_mode(struct smu_context *smu,
long *param,
uint32_t param_size,
bool lock_needed)
{
int ret = 0;
if (lock_needed)
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->set_power_profile_mode)
ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
if (lock_needed)
mutex_unlock(&smu->mutex);
return ret;
}
int smu_get_fan_control_mode(struct smu_context *smu)
{
int ret = 0;
mutex_lock(&smu->mutex);
if (smu->funcs->get_fan_control_mode)
ret = smu->funcs->get_fan_control_mode(smu);
mutex_unlock(&smu->mutex);
return ret;
}
int smu_set_fan_control_mode(struct smu_context *smu, int value)
{
int ret = 0;
mutex_lock(&smu->mutex);
if (smu->funcs->set_fan_control_mode)
ret = smu->funcs->set_fan_control_mode(smu, value);
mutex_unlock(&smu->mutex);
return ret;
}
int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
{
int ret = 0;
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_fan_speed_percent)
ret = smu->ppt_funcs->get_fan_speed_percent(smu, speed);
mutex_unlock(&smu->mutex);
return ret;
}
int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
{
int ret = 0;
mutex_lock(&smu->mutex);
if (smu->funcs->set_fan_speed_percent)
ret = smu->funcs->set_fan_speed_percent(smu, speed);
mutex_unlock(&smu->mutex);
return ret;
}
int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
{
int ret = 0;
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_fan_speed_rpm)
ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
mutex_unlock(&smu->mutex);
return ret;
}
int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
{
int ret = 0;
mutex_lock(&smu->mutex);
if (smu->funcs->set_deep_sleep_dcefclk)
ret = smu->funcs->set_deep_sleep_dcefclk(smu, clk);
mutex_unlock(&smu->mutex);
return ret;
}
int smu_set_active_display_count(struct smu_context *smu, uint32_t count)
{
int ret = 0;
mutex_lock(&smu->mutex);
if (smu->funcs->set_active_display_count)
ret = smu->funcs->set_active_display_count(smu, count);
mutex_unlock(&smu->mutex);
return ret;
}
int smu_get_clock_by_type(struct smu_context *smu,
enum amd_pp_clock_type type,
struct amd_pp_clocks *clocks)
{
int ret = 0;
mutex_lock(&smu->mutex);
if (smu->funcs->get_clock_by_type)
ret = smu->funcs->get_clock_by_type(smu, type, clocks);
mutex_unlock(&smu->mutex);
return ret;
}
int smu_get_max_high_clocks(struct smu_context *smu,
struct amd_pp_simple_clock_info *clocks)
{
int ret = 0;
mutex_lock(&smu->mutex);
if (smu->funcs->get_max_high_clocks)
ret = smu->funcs->get_max_high_clocks(smu, clocks);
mutex_unlock(&smu->mutex);
return ret;
}
int smu_get_clock_by_type_with_latency(struct smu_context *smu,
enum smu_clk_type clk_type,
struct pp_clock_levels_with_latency *clocks)
{
int ret = 0;
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_clock_by_type_with_latency)
ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
mutex_unlock(&smu->mutex);
return ret;
}
int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
enum amd_pp_clock_type type,
struct pp_clock_levels_with_voltage *clocks)
{
int ret = 0;
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_clock_by_type_with_voltage)
ret = smu->ppt_funcs->get_clock_by_type_with_voltage(smu, type, clocks);
mutex_unlock(&smu->mutex);
return ret;
}
int smu_display_clock_voltage_request(struct smu_context *smu,
struct pp_display_clock_request *clock_req)
{
int ret = 0;
mutex_lock(&smu->mutex);
if (smu->funcs->display_clock_voltage_request)
ret = smu->funcs->display_clock_voltage_request(smu, clock_req);
mutex_unlock(&smu->mutex);
return ret;
}
int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)
{
int ret = -EINVAL;
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->display_disable_memory_clock_switch)
ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
mutex_unlock(&smu->mutex);
return ret;
}
int smu_notify_smu_enable_pwe(struct smu_context *smu)
{
int ret = 0;
mutex_lock(&smu->mutex);
if (smu->funcs->notify_smu_enable_pwe)
ret = smu->funcs->notify_smu_enable_pwe(smu);
mutex_unlock(&smu->mutex);
return ret;
}
int smu_set_xgmi_pstate(struct smu_context *smu,
uint32_t pstate)
{
int ret = 0;
mutex_lock(&smu->mutex);
if (smu->funcs->set_xgmi_pstate)
ret = smu->funcs->set_xgmi_pstate(smu, pstate);
mutex_unlock(&smu->mutex);
return ret;
}
int smu_set_azalia_d3_pme(struct smu_context *smu)
{
int ret = 0;
mutex_lock(&smu->mutex);
if (smu->funcs->set_azalia_d3_pme)
ret = smu->funcs->set_azalia_d3_pme(smu);
mutex_unlock(&smu->mutex);
return ret;
}
bool smu_baco_is_support(struct smu_context *smu)
{
bool ret = false;
mutex_lock(&smu->mutex);
if (smu->funcs->baco_is_support)
ret = smu->funcs->baco_is_support(smu);
mutex_unlock(&smu->mutex);
return ret;
}
int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state)
{
if (smu->funcs->baco_get_state)
return -EINVAL;
mutex_lock(&smu->mutex);
*state = smu->funcs->baco_get_state(smu);
mutex_unlock(&smu->mutex);
return 0;
}
int smu_baco_reset(struct smu_context *smu)
{
int ret = 0;
mutex_lock(&smu->mutex);
if (smu->funcs->baco_reset)
ret = smu->funcs->baco_reset(smu);
mutex_unlock(&smu->mutex);
return ret;
}
int smu_mode2_reset(struct smu_context *smu)
{
int ret = 0;
mutex_lock(&smu->mutex);
if (smu->funcs->mode2_reset)
ret = smu->funcs->mode2_reset(smu);
mutex_unlock(&smu->mutex);
return ret;
}
int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
struct pp_smu_nv_clock_table *max_clocks)
{
int ret = 0;
mutex_lock(&smu->mutex);
if (smu->funcs->get_max_sustainable_clocks_by_dc)
ret = smu->funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
mutex_unlock(&smu->mutex);
return ret;
}
int smu_get_uclk_dpm_states(struct smu_context *smu,
unsigned int *clock_values_in_khz,
unsigned int *num_states)
{
int ret = 0;
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_uclk_dpm_states)
ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
mutex_unlock(&smu->mutex);
return ret;
}
enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
{
enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_current_power_state)
pm_state = smu->ppt_funcs->get_current_power_state(smu);
mutex_unlock(&smu->mutex);
return pm_state;
}
int smu_get_dpm_clock_table(struct smu_context *smu,
struct dpm_clocks *clock_table)
{
int ret = 0;
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_dpm_clock_table)
ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
mutex_unlock(&smu->mutex);
return ret;
}
...@@ -763,8 +763,6 @@ static int arcturus_force_clk_levels(struct smu_context *smu, ...@@ -763,8 +763,6 @@ static int arcturus_force_clk_levels(struct smu_context *smu,
uint32_t soft_min_level, soft_max_level; uint32_t soft_min_level, soft_max_level;
int ret = 0; int ret = 0;
mutex_lock(&(smu->mutex));
soft_min_level = mask ? (ffs(mask) - 1) : 0; soft_min_level = mask ? (ffs(mask) - 1) : 0;
soft_max_level = mask ? (fls(mask) - 1) : 0; soft_max_level = mask ? (fls(mask) - 1) : 0;
...@@ -883,7 +881,6 @@ static int arcturus_force_clk_levels(struct smu_context *smu, ...@@ -883,7 +881,6 @@ static int arcturus_force_clk_levels(struct smu_context *smu,
break; break;
} }
mutex_unlock(&(smu->mutex));
return ret; return ret;
} }
......
...@@ -563,18 +563,17 @@ struct smu_funcs ...@@ -563,18 +563,17 @@ struct smu_funcs
((smu)->funcs->init_power ? (smu)->funcs->init_power((smu)) : 0) ((smu)->funcs->init_power ? (smu)->funcs->init_power((smu)) : 0)
#define smu_fini_power(smu) \ #define smu_fini_power(smu) \
((smu)->funcs->fini_power ? (smu)->funcs->fini_power((smu)) : 0) ((smu)->funcs->fini_power ? (smu)->funcs->fini_power((smu)) : 0)
#define smu_load_microcode(smu) \ int smu_load_microcode(struct smu_context *smu);
((smu)->funcs->load_microcode ? (smu)->funcs->load_microcode((smu)) : 0)
#define smu_check_fw_status(smu) \ int smu_check_fw_status(struct smu_context *smu);
((smu)->funcs->check_fw_status ? (smu)->funcs->check_fw_status((smu)) : 0)
#define smu_setup_pptable(smu) \ #define smu_setup_pptable(smu) \
((smu)->funcs->setup_pptable ? (smu)->funcs->setup_pptable((smu)) : 0) ((smu)->funcs->setup_pptable ? (smu)->funcs->setup_pptable((smu)) : 0)
#define smu_powergate_sdma(smu, gate) \ #define smu_powergate_sdma(smu, gate) \
((smu)->funcs->powergate_sdma ? (smu)->funcs->powergate_sdma((smu), (gate)) : 0) ((smu)->funcs->powergate_sdma ? (smu)->funcs->powergate_sdma((smu), (gate)) : 0)
#define smu_powergate_vcn(smu, gate) \ #define smu_powergate_vcn(smu, gate) \
((smu)->funcs->powergate_vcn ? (smu)->funcs->powergate_vcn((smu), (gate)) : 0) ((smu)->funcs->powergate_vcn ? (smu)->funcs->powergate_vcn((smu), (gate)) : 0)
#define smu_set_gfx_cgpg(smu, enabled) \ int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
((smu)->funcs->set_gfx_cgpg ? (smu)->funcs->set_gfx_cgpg((smu), (enabled)) : 0)
#define smu_get_vbios_bootup_values(smu) \ #define smu_get_vbios_bootup_values(smu) \
((smu)->funcs->get_vbios_bootup_values ? (smu)->funcs->get_vbios_bootup_values((smu)) : 0) ((smu)->funcs->get_vbios_bootup_values ? (smu)->funcs->get_vbios_bootup_values((smu)) : 0)
#define smu_get_clk_info_from_vbios(smu) \ #define smu_get_clk_info_from_vbios(smu) \
...@@ -605,8 +604,8 @@ struct smu_funcs ...@@ -605,8 +604,8 @@ struct smu_funcs
((smu)->funcs->init_max_sustainable_clocks ? (smu)->funcs->init_max_sustainable_clocks((smu)) : 0) ((smu)->funcs->init_max_sustainable_clocks ? (smu)->funcs->init_max_sustainable_clocks((smu)) : 0)
#define smu_set_default_od_settings(smu, initialize) \ #define smu_set_default_od_settings(smu, initialize) \
((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0) ((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0)
#define smu_set_fan_speed_rpm(smu, speed) \ int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed);
((smu)->funcs->set_fan_speed_rpm ? (smu)->funcs->set_fan_speed_rpm((smu), (speed)) : 0)
#define smu_send_smc_msg(smu, msg) \ #define smu_send_smc_msg(smu, msg) \
((smu)->funcs->send_smc_msg? (smu)->funcs->send_smc_msg((smu), (msg)) : 0) ((smu)->funcs->send_smc_msg? (smu)->funcs->send_smc_msg((smu), (msg)) : 0)
#define smu_send_smc_msg_with_param(smu, msg, param) \ #define smu_send_smc_msg_with_param(smu, msg, param) \
...@@ -637,20 +636,22 @@ struct smu_funcs ...@@ -637,20 +636,22 @@ struct smu_funcs
((smu)->ppt_funcs->populate_umd_state_clk ? (smu)->ppt_funcs->populate_umd_state_clk((smu)) : 0) ((smu)->ppt_funcs->populate_umd_state_clk ? (smu)->ppt_funcs->populate_umd_state_clk((smu)) : 0)
#define smu_set_default_od8_settings(smu) \ #define smu_set_default_od8_settings(smu) \
((smu)->ppt_funcs->set_default_od8_settings ? (smu)->ppt_funcs->set_default_od8_settings((smu)) : 0) ((smu)->ppt_funcs->set_default_od8_settings ? (smu)->ppt_funcs->set_default_od8_settings((smu)) : 0)
#define smu_get_power_limit(smu, limit, def) \
((smu)->ppt_funcs->get_power_limit ? (smu)->ppt_funcs->get_power_limit((smu), (limit), (def)) : 0) int smu_get_power_limit(struct smu_context *smu,
#define smu_set_power_limit(smu, limit) \ uint32_t *limit,
((smu)->funcs->set_power_limit ? (smu)->funcs->set_power_limit((smu), (limit)) : 0) bool def,
bool lock_needed);
int smu_set_power_limit(struct smu_context *smu, uint32_t limit);
#define smu_get_current_clk_freq(smu, clk_id, value) \ #define smu_get_current_clk_freq(smu, clk_id, value) \
((smu)->funcs->get_current_clk_freq? (smu)->funcs->get_current_clk_freq((smu), (clk_id), (value)) : 0) ((smu)->funcs->get_current_clk_freq? (smu)->funcs->get_current_clk_freq((smu), (clk_id), (value)) : 0)
#define smu_print_clk_levels(smu, clk_type, buf) \ int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf);
((smu)->ppt_funcs->print_clk_levels ? (smu)->ppt_funcs->print_clk_levels((smu), (clk_type), (buf)) : 0) int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type);
#define smu_get_od_percentage(smu, type) \ int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value);
((smu)->ppt_funcs->get_od_percentage ? (smu)->ppt_funcs->get_od_percentage((smu), (type)) : 0)
#define smu_set_od_percentage(smu, type, value) \ int smu_od_edit_dpm_table(struct smu_context *smu,
((smu)->ppt_funcs->set_od_percentage ? (smu)->ppt_funcs->set_od_percentage((smu), (type), (value)) : 0) enum PP_OD_DPM_TABLE_COMMAND type,
#define smu_od_edit_dpm_table(smu, type, input, size) \ long *input, uint32_t size);
((smu)->ppt_funcs->od_edit_dpm_table ? (smu)->ppt_funcs->od_edit_dpm_table((smu), (type), (input), (size)) : 0)
#define smu_tables_init(smu, tab) \ #define smu_tables_init(smu, tab) \
((smu)->ppt_funcs->tables_init ? (smu)->ppt_funcs->tables_init((smu), (tab)) : 0) ((smu)->ppt_funcs->tables_init ? (smu)->ppt_funcs->tables_init((smu), (tab)) : 0)
#define smu_set_thermal_fan_table(smu) \ #define smu_set_thermal_fan_table(smu) \
...@@ -659,14 +660,18 @@ struct smu_funcs ...@@ -659,14 +660,18 @@ struct smu_funcs
((smu)->funcs->start_thermal_control? (smu)->funcs->start_thermal_control((smu)) : 0) ((smu)->funcs->start_thermal_control? (smu)->funcs->start_thermal_control((smu)) : 0)
#define smu_stop_thermal_control(smu) \ #define smu_stop_thermal_control(smu) \
((smu)->funcs->stop_thermal_control? (smu)->funcs->stop_thermal_control((smu)) : 0) ((smu)->funcs->stop_thermal_control? (smu)->funcs->stop_thermal_control((smu)) : 0)
#define smu_read_sensor(smu, sensor, data, size) \
((smu)->ppt_funcs->read_sensor? (smu)->ppt_funcs->read_sensor((smu), (sensor), (data), (size)) : 0) int smu_read_sensor(struct smu_context *smu,
enum amd_pp_sensors sensor,
void *data, uint32_t *size);
#define smu_smc_read_sensor(smu, sensor, data, size) \ #define smu_smc_read_sensor(smu, sensor, data, size) \
((smu)->funcs->read_sensor? (smu)->funcs->read_sensor((smu), (sensor), (data), (size)) : -EINVAL) ((smu)->funcs->read_sensor? (smu)->funcs->read_sensor((smu), (sensor), (data), (size)) : -EINVAL)
#define smu_get_power_profile_mode(smu, buf) \ int smu_get_power_profile_mode(struct smu_context *smu, char *buf);
((smu)->ppt_funcs->get_power_profile_mode ? (smu)->ppt_funcs->get_power_profile_mode((smu), buf) : 0)
#define smu_set_power_profile_mode(smu, param, param_size) \ int smu_set_power_profile_mode(struct smu_context *smu,
((smu)->ppt_funcs->set_power_profile_mode ? (smu)->ppt_funcs->set_power_profile_mode((smu), (param), (param_size)) : 0) long *param,
uint32_t param_size,
bool lock_needed);
#define smu_pre_display_config_changed(smu) \ #define smu_pre_display_config_changed(smu) \
((smu)->ppt_funcs->pre_display_config_changed ? (smu)->ppt_funcs->pre_display_config_changed((smu)) : 0) ((smu)->ppt_funcs->pre_display_config_changed ? (smu)->ppt_funcs->pre_display_config_changed((smu)) : 0)
#define smu_display_config_changed(smu) \ #define smu_display_config_changed(smu) \
...@@ -683,16 +688,11 @@ struct smu_funcs ...@@ -683,16 +688,11 @@ struct smu_funcs
((smu)->ppt_funcs->get_profiling_clk_mask ? (smu)->ppt_funcs->get_profiling_clk_mask((smu), (level), (sclk_mask), (mclk_mask), (soc_mask)) : 0) ((smu)->ppt_funcs->get_profiling_clk_mask ? (smu)->ppt_funcs->get_profiling_clk_mask((smu), (level), (sclk_mask), (mclk_mask), (soc_mask)) : 0)
#define smu_set_cpu_power_state(smu) \ #define smu_set_cpu_power_state(smu) \
((smu)->ppt_funcs->set_cpu_power_state ? (smu)->ppt_funcs->set_cpu_power_state((smu)) : 0) ((smu)->ppt_funcs->set_cpu_power_state ? (smu)->ppt_funcs->set_cpu_power_state((smu)) : 0)
#define smu_get_fan_control_mode(smu) \ int smu_get_fan_control_mode(struct smu_context *smu);
((smu)->funcs->get_fan_control_mode ? (smu)->funcs->get_fan_control_mode((smu)) : 0) int smu_set_fan_control_mode(struct smu_context *smu, int value);
#define smu_set_fan_control_mode(smu, value) \ int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed);
((smu)->funcs->set_fan_control_mode ? (smu)->funcs->set_fan_control_mode((smu), (value)) : 0) int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed);
#define smu_get_fan_speed_percent(smu, speed) \ int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed);
((smu)->ppt_funcs->get_fan_speed_percent ? (smu)->ppt_funcs->get_fan_speed_percent((smu), (speed)) : 0)
#define smu_set_fan_speed_percent(smu, speed) \
((smu)->funcs->set_fan_speed_percent ? (smu)->funcs->set_fan_speed_percent((smu), (speed)) : 0)
#define smu_get_fan_speed_rpm(smu, speed) \
((smu)->ppt_funcs->get_fan_speed_rpm ? (smu)->ppt_funcs->get_fan_speed_rpm((smu), (speed)) : 0)
#define smu_msg_get_index(smu, msg) \ #define smu_msg_get_index(smu, msg) \
((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_msg_index? (smu)->ppt_funcs->get_smu_msg_index((smu), (msg)) : -EINVAL) : -EINVAL) ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_msg_index? (smu)->ppt_funcs->get_smu_msg_index((smu), (msg)) : -EINVAL) : -EINVAL)
...@@ -710,38 +710,44 @@ struct smu_funcs ...@@ -710,38 +710,44 @@ struct smu_funcs
((smu)->ppt_funcs? ((smu)->ppt_funcs->run_btc? (smu)->ppt_funcs->run_btc((smu)) : 0) : 0) ((smu)->ppt_funcs? ((smu)->ppt_funcs->run_btc? (smu)->ppt_funcs->run_btc((smu)) : 0) : 0)
#define smu_get_allowed_feature_mask(smu, feature_mask, num) \ #define smu_get_allowed_feature_mask(smu, feature_mask, num) \
((smu)->ppt_funcs? ((smu)->ppt_funcs->get_allowed_feature_mask? (smu)->ppt_funcs->get_allowed_feature_mask((smu), (feature_mask), (num)) : 0) : 0) ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_allowed_feature_mask? (smu)->ppt_funcs->get_allowed_feature_mask((smu), (feature_mask), (num)) : 0) : 0)
#define smu_set_deep_sleep_dcefclk(smu, clk) \ int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk);
((smu)->funcs->set_deep_sleep_dcefclk ? (smu)->funcs->set_deep_sleep_dcefclk((smu), (clk)) : 0) int smu_set_active_display_count(struct smu_context *smu, uint32_t count);
#define smu_set_active_display_count(smu, count) \
((smu)->funcs->set_active_display_count ? (smu)->funcs->set_active_display_count((smu), (count)) : 0)
#define smu_store_cc6_data(smu, st, cc6_dis, pst_dis, pst_sw_dis) \ #define smu_store_cc6_data(smu, st, cc6_dis, pst_dis, pst_sw_dis) \
((smu)->funcs->store_cc6_data ? (smu)->funcs->store_cc6_data((smu), (st), (cc6_dis), (pst_dis), (pst_sw_dis)) : 0) ((smu)->funcs->store_cc6_data ? (smu)->funcs->store_cc6_data((smu), (st), (cc6_dis), (pst_dis), (pst_sw_dis)) : 0)
#define smu_get_clock_by_type(smu, type, clocks) \
((smu)->funcs->get_clock_by_type ? (smu)->funcs->get_clock_by_type((smu), (type), (clocks)) : 0) int smu_get_clock_by_type(struct smu_context *smu,
#define smu_get_max_high_clocks(smu, clocks) \ enum amd_pp_clock_type type,
((smu)->funcs->get_max_high_clocks ? (smu)->funcs->get_max_high_clocks((smu), (clocks)) : 0) struct amd_pp_clocks *clocks);
#define smu_get_clock_by_type_with_latency(smu, clk_type, clocks) \
((smu)->ppt_funcs->get_clock_by_type_with_latency ? (smu)->ppt_funcs->get_clock_by_type_with_latency((smu), (clk_type), (clocks)) : 0) int smu_get_max_high_clocks(struct smu_context *smu,
#define smu_get_clock_by_type_with_voltage(smu, type, clocks) \ struct amd_pp_simple_clock_info *clocks);
((smu)->ppt_funcs->get_clock_by_type_with_voltage ? (smu)->ppt_funcs->get_clock_by_type_with_voltage((smu), (type), (clocks)) : 0)
#define smu_display_clock_voltage_request(smu, clock_req) \ int smu_get_clock_by_type_with_latency(struct smu_context *smu,
((smu)->funcs->display_clock_voltage_request ? (smu)->funcs->display_clock_voltage_request((smu), (clock_req)) : 0) enum smu_clk_type clk_type,
#define smu_display_disable_memory_clock_switch(smu, disable_memory_clock_switch) \ struct pp_clock_levels_with_latency *clocks);
((smu)->ppt_funcs->display_disable_memory_clock_switch ? (smu)->ppt_funcs->display_disable_memory_clock_switch((smu), (disable_memory_clock_switch)) : -EINVAL)
int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
enum amd_pp_clock_type type,
struct pp_clock_levels_with_voltage *clocks);
int smu_display_clock_voltage_request(struct smu_context *smu,
struct pp_display_clock_request *clock_req);
int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch);
#define smu_get_dal_power_level(smu, clocks) \ #define smu_get_dal_power_level(smu, clocks) \
((smu)->funcs->get_dal_power_level ? (smu)->funcs->get_dal_power_level((smu), (clocks)) : 0) ((smu)->funcs->get_dal_power_level ? (smu)->funcs->get_dal_power_level((smu), (clocks)) : 0)
#define smu_get_perf_level(smu, designation, level) \ #define smu_get_perf_level(smu, designation, level) \
((smu)->funcs->get_perf_level ? (smu)->funcs->get_perf_level((smu), (designation), (level)) : 0) ((smu)->funcs->get_perf_level ? (smu)->funcs->get_perf_level((smu), (designation), (level)) : 0)
#define smu_get_current_shallow_sleep_clocks(smu, clocks) \ #define smu_get_current_shallow_sleep_clocks(smu, clocks) \
((smu)->funcs->get_current_shallow_sleep_clocks ? (smu)->funcs->get_current_shallow_sleep_clocks((smu), (clocks)) : 0) ((smu)->funcs->get_current_shallow_sleep_clocks ? (smu)->funcs->get_current_shallow_sleep_clocks((smu), (clocks)) : 0)
#define smu_notify_smu_enable_pwe(smu) \ int smu_notify_smu_enable_pwe(struct smu_context *smu);
((smu)->funcs->notify_smu_enable_pwe ? (smu)->funcs->notify_smu_enable_pwe((smu)) : 0)
#define smu_dpm_set_uvd_enable(smu, enable) \ #define smu_dpm_set_uvd_enable(smu, enable) \
((smu)->ppt_funcs->dpm_set_uvd_enable ? (smu)->ppt_funcs->dpm_set_uvd_enable((smu), (enable)) : 0) ((smu)->ppt_funcs->dpm_set_uvd_enable ? (smu)->ppt_funcs->dpm_set_uvd_enable((smu), (enable)) : 0)
#define smu_dpm_set_vce_enable(smu, enable) \ #define smu_dpm_set_vce_enable(smu, enable) \
((smu)->ppt_funcs->dpm_set_vce_enable ? (smu)->ppt_funcs->dpm_set_vce_enable((smu), (enable)) : 0) ((smu)->ppt_funcs->dpm_set_vce_enable ? (smu)->ppt_funcs->dpm_set_vce_enable((smu), (enable)) : 0)
#define smu_set_xgmi_pstate(smu, pstate) \
((smu)->funcs->set_xgmi_pstate ? (smu)->funcs->set_xgmi_pstate((smu), (pstate)) : 0) int smu_set_xgmi_pstate(struct smu_context *smu,
uint32_t pstate);
#define smu_set_watermarks_table(smu, tab, clock_ranges) \ #define smu_set_watermarks_table(smu, tab, clock_ranges) \
((smu)->ppt_funcs->set_watermarks_table ? (smu)->ppt_funcs->set_watermarks_table((smu), (tab), (clock_ranges)) : 0) ((smu)->ppt_funcs->set_watermarks_table ? (smu)->ppt_funcs->set_watermarks_table((smu), (tab), (clock_ranges)) : 0)
#define smu_get_current_clk_freq_by_table(smu, clk_type, value) \ #define smu_get_current_clk_freq_by_table(smu, clk_type, value) \
...@@ -752,22 +758,18 @@ struct smu_funcs ...@@ -752,22 +758,18 @@ struct smu_funcs
((smu)->ppt_funcs->get_thermal_temperature_range? (smu)->ppt_funcs->get_thermal_temperature_range((smu), (range)) : 0) ((smu)->ppt_funcs->get_thermal_temperature_range? (smu)->ppt_funcs->get_thermal_temperature_range((smu), (range)) : 0)
#define smu_register_irq_handler(smu) \ #define smu_register_irq_handler(smu) \
((smu)->funcs->register_irq_handler ? (smu)->funcs->register_irq_handler(smu) : 0) ((smu)->funcs->register_irq_handler ? (smu)->funcs->register_irq_handler(smu) : 0)
#define smu_set_azalia_d3_pme(smu) \
((smu)->funcs->set_azalia_d3_pme ? (smu)->funcs->set_azalia_d3_pme((smu)) : 0) int smu_set_azalia_d3_pme(struct smu_context *smu);
#define smu_get_dpm_ultimate_freq(smu, param, min, max) \ #define smu_get_dpm_ultimate_freq(smu, param, min, max) \
((smu)->funcs->get_dpm_ultimate_freq ? (smu)->funcs->get_dpm_ultimate_freq((smu), (param), (min), (max)) : 0) ((smu)->funcs->get_dpm_ultimate_freq ? (smu)->funcs->get_dpm_ultimate_freq((smu), (param), (min), (max)) : 0)
#define smu_get_uclk_dpm_states(smu, clocks_in_khz, num_states) \
((smu)->ppt_funcs->get_uclk_dpm_states ? (smu)->ppt_funcs->get_uclk_dpm_states((smu), (clocks_in_khz), (num_states)) : 0) bool smu_baco_is_support(struct smu_context *smu);
#define smu_get_max_sustainable_clocks_by_dc(smu, max_clocks) \
((smu)->funcs->get_max_sustainable_clocks_by_dc ? (smu)->funcs->get_max_sustainable_clocks_by_dc((smu), (max_clocks)) : 0) int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state);
#define smu_baco_is_support(smu) \
((smu)->funcs->baco_is_support? (smu)->funcs->baco_is_support((smu)) : false) int smu_baco_reset(struct smu_context *smu);
#define smu_baco_get_state(smu, state) \
((smu)->funcs->baco_get_state? (smu)->funcs->baco_get_state((smu), (state)) : 0) int smu_mode2_reset(struct smu_context *smu);
#define smu_baco_reset(smu) \
((smu)->funcs->baco_reset? (smu)->funcs->baco_reset((smu)) : 0)
#define smu_mode2_reset(smu) \
((smu)->funcs->mode2_reset? (smu)->funcs->mode2_reset((smu)) : 0)
#define smu_asic_set_performance_level(smu, level) \ #define smu_asic_set_performance_level(smu, level) \
((smu)->ppt_funcs->set_performance_level? (smu)->ppt_funcs->set_performance_level((smu), (level)) : -EINVAL); ((smu)->ppt_funcs->set_performance_level? (smu)->ppt_funcs->set_performance_level((smu), (level)) : -EINVAL);
#define smu_dump_pptable(smu) \ #define smu_dump_pptable(smu) \
...@@ -776,8 +778,6 @@ struct smu_funcs ...@@ -776,8 +778,6 @@ struct smu_funcs
((smu)->ppt_funcs->get_dpm_clk_limited ? (smu)->ppt_funcs->get_dpm_clk_limited((smu), (clk_type), (dpm_level), (freq)) : -EINVAL) ((smu)->ppt_funcs->get_dpm_clk_limited ? (smu)->ppt_funcs->get_dpm_clk_limited((smu), (clk_type), (dpm_level), (freq)) : -EINVAL)
#define smu_set_soft_freq_limited_range(smu, clk_type, min, max) \ #define smu_set_soft_freq_limited_range(smu, clk_type, min, max) \
((smu)->funcs->set_soft_freq_limited_range ? (smu)->funcs->set_soft_freq_limited_range((smu), (clk_type), (min), (max)) : -EINVAL) ((smu)->funcs->set_soft_freq_limited_range ? (smu)->funcs->set_soft_freq_limited_range((smu), (clk_type), (min), (max)) : -EINVAL)
#define smu_get_dpm_clock_table(smu, clock_table) \
((smu)->ppt_funcs->get_dpm_clock_table ? (smu)->ppt_funcs->get_dpm_clock_table((smu), (clock_table)) : -EINVAL)
#define smu_override_pcie_parameters(smu) \ #define smu_override_pcie_parameters(smu) \
((smu)->funcs->override_pcie_parameters ? (smu)->funcs->override_pcie_parameters((smu)) : 0) ((smu)->funcs->override_pcie_parameters ? (smu)->funcs->override_pcie_parameters((smu)) : 0)
...@@ -831,7 +831,8 @@ extern int smu_get_current_clocks(struct smu_context *smu, ...@@ -831,7 +831,8 @@ extern int smu_get_current_clocks(struct smu_context *smu,
extern int smu_dpm_set_power_gate(struct smu_context *smu,uint32_t block_type, bool gate); extern int smu_dpm_set_power_gate(struct smu_context *smu,uint32_t block_type, bool gate);
extern int smu_handle_task(struct smu_context *smu, extern int smu_handle_task(struct smu_context *smu,
enum amd_dpm_forced_level level, enum amd_dpm_forced_level level,
enum amd_pp_task task_id); enum amd_pp_task task_id,
bool lock_needed);
int smu_switch_power_profile(struct smu_context *smu, int smu_switch_power_profile(struct smu_context *smu,
enum PP_SMC_POWER_PROFILE type, enum PP_SMC_POWER_PROFILE type,
bool en); bool en);
...@@ -841,7 +842,7 @@ int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_typ ...@@ -841,7 +842,7 @@ int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_typ
int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type, int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *value); uint32_t *value);
int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max); uint32_t *min, uint32_t *max, bool lock_needed);
int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min, uint32_t max); uint32_t min, uint32_t max);
int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
...@@ -856,10 +857,21 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf); ...@@ -856,10 +857,21 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf);
int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask); int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask);
int smu_force_clk_levels(struct smu_context *smu, int smu_force_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, enum smu_clk_type clk_type,
uint32_t mask); uint32_t mask,
bool lock_needed);
int smu_set_mp1_state(struct smu_context *smu, int smu_set_mp1_state(struct smu_context *smu,
enum pp_mp1_state mp1_state); enum pp_mp1_state mp1_state);
int smu_set_df_cstate(struct smu_context *smu, int smu_set_df_cstate(struct smu_context *smu,
enum pp_df_cstate state); enum pp_df_cstate state);
int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
struct pp_smu_nv_clock_table *max_clocks);
int smu_get_uclk_dpm_states(struct smu_context *smu,
unsigned int *clock_values_in_khz,
unsigned int *num_states);
int smu_get_dpm_clock_table(struct smu_context *smu,
struct dpm_clocks *clock_table);
#endif #endif
...@@ -796,13 +796,13 @@ static int navi10_populate_umd_state_clk(struct smu_context *smu) ...@@ -796,13 +796,13 @@ static int navi10_populate_umd_state_clk(struct smu_context *smu)
int ret = 0; int ret = 0;
uint32_t min_sclk_freq = 0, min_mclk_freq = 0; uint32_t min_sclk_freq = 0, min_mclk_freq = 0;
ret = smu_get_dpm_freq_range(smu, SMU_SCLK, &min_sclk_freq, NULL); ret = smu_get_dpm_freq_range(smu, SMU_SCLK, &min_sclk_freq, NULL, false);
if (ret) if (ret)
return ret; return ret;
smu->pstate_sclk = min_sclk_freq * 100; smu->pstate_sclk = min_sclk_freq * 100;
ret = smu_get_dpm_freq_range(smu, SMU_MCLK, &min_mclk_freq, NULL); ret = smu_get_dpm_freq_range(smu, SMU_MCLK, &min_mclk_freq, NULL, false);
if (ret) if (ret)
return ret; return ret;
...@@ -855,7 +855,7 @@ static int navi10_pre_display_config_changed(struct smu_context *smu) ...@@ -855,7 +855,7 @@ static int navi10_pre_display_config_changed(struct smu_context *smu)
return ret; return ret;
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &max_freq); ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &max_freq, false);
if (ret) if (ret)
return ret; return ret;
ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, max_freq); ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, max_freq);
...@@ -905,7 +905,7 @@ static int navi10_force_dpm_limit_value(struct smu_context *smu, bool highest) ...@@ -905,7 +905,7 @@ static int navi10_force_dpm_limit_value(struct smu_context *smu, bool highest)
for (i = 0; i < ARRAY_SIZE(clks); i++) { for (i = 0; i < ARRAY_SIZE(clks); i++) {
clk_type = clks[i]; clk_type = clks[i];
ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq); ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false);
if (ret) if (ret)
return ret; return ret;
...@@ -932,7 +932,7 @@ static int navi10_unforce_dpm_levels(struct smu_context *smu) ...@@ -932,7 +932,7 @@ static int navi10_unforce_dpm_levels(struct smu_context *smu)
for (i = 0; i < ARRAY_SIZE(clks); i++) { for (i = 0; i < ARRAY_SIZE(clks); i++) {
clk_type = clks[i]; clk_type = clks[i];
ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq); ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false);
if (ret) if (ret)
return ret; return ret;
...@@ -1267,7 +1267,10 @@ static int navi10_notify_smc_dispaly_config(struct smu_context *smu) ...@@ -1267,7 +1267,10 @@ static int navi10_notify_smc_dispaly_config(struct smu_context *smu)
if (smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) { if (smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
clock_req.clock_type = amd_pp_dcef_clock; clock_req.clock_type = amd_pp_dcef_clock;
clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10; clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10;
if (!smu_display_clock_voltage_request(smu, &clock_req)) {
if (smu->funcs->display_clock_voltage_request)
ret = smu->funcs->display_clock_voltage_request(smu, &clock_req);
if (!ret) {
if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) { if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
ret = smu_send_smc_msg_with_param(smu, ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetMinDeepSleepDcefclk, SMU_MSG_SetMinDeepSleepDcefclk,
......
...@@ -194,7 +194,7 @@ static int renoir_print_clk_levels(struct smu_context *smu, ...@@ -194,7 +194,7 @@ static int renoir_print_clk_levels(struct smu_context *smu,
case SMU_SCLK: case SMU_SCLK:
/* retirve table returned paramters unit is MHz */ /* retirve table returned paramters unit is MHz */
cur_value = metrics.ClockFrequency[CLOCK_GFXCLK]; cur_value = metrics.ClockFrequency[CLOCK_GFXCLK];
ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, &min, &max); ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, &min, &max, false);
if (!ret) { if (!ret) {
/* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */ /* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */
if (cur_value == max) if (cur_value == max)
...@@ -251,7 +251,6 @@ static enum amd_pm_state_type renoir_get_current_power_state(struct smu_context ...@@ -251,7 +251,6 @@ static enum amd_pm_state_type renoir_get_current_power_state(struct smu_context
!smu_dpm_ctx->dpm_current_power_state) !smu_dpm_ctx->dpm_current_power_state)
return -EINVAL; return -EINVAL;
mutex_lock(&(smu->mutex));
switch (smu_dpm_ctx->dpm_current_power_state->classification.ui_label) { switch (smu_dpm_ctx->dpm_current_power_state->classification.ui_label) {
case SMU_STATE_UI_LABEL_BATTERY: case SMU_STATE_UI_LABEL_BATTERY:
pm_type = POWER_STATE_TYPE_BATTERY; pm_type = POWER_STATE_TYPE_BATTERY;
...@@ -269,7 +268,6 @@ static enum amd_pm_state_type renoir_get_current_power_state(struct smu_context ...@@ -269,7 +268,6 @@ static enum amd_pm_state_type renoir_get_current_power_state(struct smu_context
pm_type = POWER_STATE_TYPE_DEFAULT; pm_type = POWER_STATE_TYPE_DEFAULT;
break; break;
} }
mutex_unlock(&(smu->mutex));
return pm_type; return pm_type;
} }
...@@ -314,7 +312,7 @@ static int renoir_force_dpm_limit_value(struct smu_context *smu, bool highest) ...@@ -314,7 +312,7 @@ static int renoir_force_dpm_limit_value(struct smu_context *smu, bool highest)
for (i = 0; i < ARRAY_SIZE(clks); i++) { for (i = 0; i < ARRAY_SIZE(clks); i++) {
clk_type = clks[i]; clk_type = clks[i];
ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq); ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false);
if (ret) if (ret)
return ret; return ret;
...@@ -348,7 +346,7 @@ static int renoir_unforce_dpm_levels(struct smu_context *smu) { ...@@ -348,7 +346,7 @@ static int renoir_unforce_dpm_levels(struct smu_context *smu) {
clk_type = clk_feature_map[i].clk_type; clk_type = clk_feature_map[i].clk_type;
ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq); ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false);
if (ret) if (ret)
return ret; return ret;
...@@ -469,7 +467,7 @@ static int renoir_force_clk_levels(struct smu_context *smu, ...@@ -469,7 +467,7 @@ static int renoir_force_clk_levels(struct smu_context *smu,
return -EINVAL; return -EINVAL;
} }
ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, &min_freq, &max_freq); ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, &min_freq, &max_freq, false);
if (ret) if (ret)
return ret; return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
...@@ -545,7 +543,7 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu) ...@@ -545,7 +543,7 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu)
int ret = 0; int ret = 0;
uint32_t sclk_freq = 0, uclk_freq = 0; uint32_t sclk_freq = 0, uclk_freq = 0;
ret = smu_get_dpm_freq_range(smu, SMU_SCLK, NULL, &sclk_freq); ret = smu_get_dpm_freq_range(smu, SMU_SCLK, NULL, &sclk_freq, false);
if (ret) if (ret)
return ret; return ret;
...@@ -553,7 +551,7 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu) ...@@ -553,7 +551,7 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu)
if (ret) if (ret)
return ret; return ret;
ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &uclk_freq); ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &uclk_freq, false);
if (ret) if (ret)
return ret; return ret;
......
...@@ -792,8 +792,11 @@ static int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu) ...@@ -792,8 +792,11 @@ static int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
if (!table_context) if (!table_context)
return -EINVAL; return -EINVAL;
return smu_set_deep_sleep_dcefclk(smu, if (smu->funcs->set_deep_sleep_dcefclk)
table_context->boot_values.dcefclk / 100); return smu->funcs->set_deep_sleep_dcefclk(smu,
table_context->boot_values.dcefclk / 100);
return 0;
} }
static int smu_v11_0_set_tool_table_location(struct smu_context *smu) static int smu_v11_0_set_tool_table_location(struct smu_context *smu)
...@@ -1308,9 +1311,7 @@ smu_v11_0_display_clock_voltage_request(struct smu_context *smu, ...@@ -1308,9 +1311,7 @@ smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
if (clk_select == SMU_UCLK && smu->disable_uclk_switch) if (clk_select == SMU_UCLK && smu->disable_uclk_switch)
return 0; return 0;
mutex_lock(&smu->mutex);
ret = smu_set_hard_freq_range(smu, clk_select, clk_freq, 0); ret = smu_set_hard_freq_range(smu, clk_select, clk_freq, 0);
mutex_unlock(&smu->mutex);
if(clk_select == SMU_UCLK) if(clk_select == SMU_UCLK)
smu->hard_min_uclk_req_from_dal = clk_freq; smu->hard_min_uclk_req_from_dal = clk_freq;
...@@ -1333,12 +1334,10 @@ static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable) ...@@ -1333,12 +1334,10 @@ static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
case CHIP_NAVI12: case CHIP_NAVI12:
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return 0; return 0;
mutex_lock(&smu->mutex);
if (enable) if (enable)
ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff); ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff);
else else
ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff); ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff);
mutex_unlock(&smu->mutex);
break; break;
default: default:
break; break;
...@@ -1454,10 +1453,9 @@ static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu, ...@@ -1454,10 +1453,9 @@ static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
if (!speed) if (!speed)
return -EINVAL; return -EINVAL;
mutex_lock(&(smu->mutex));
ret = smu_v11_0_auto_fan_control(smu, 0); ret = smu_v11_0_auto_fan_control(smu, 0);
if (ret) if (ret)
goto set_fan_speed_rpm_failed; return ret;
crystal_clock_freq = amdgpu_asic_get_xclk(adev); crystal_clock_freq = amdgpu_asic_get_xclk(adev);
tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
...@@ -1468,8 +1466,6 @@ static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu, ...@@ -1468,8 +1466,6 @@ static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
ret = smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM); ret = smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM);
set_fan_speed_rpm_failed:
mutex_unlock(&(smu->mutex));
return ret; return ret;
} }
...@@ -1480,11 +1476,9 @@ static int smu_v11_0_set_xgmi_pstate(struct smu_context *smu, ...@@ -1480,11 +1476,9 @@ static int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
uint32_t pstate) uint32_t pstate)
{ {
int ret = 0; int ret = 0;
mutex_lock(&(smu->mutex));
ret = smu_send_smc_msg_with_param(smu, ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetXgmiMode, SMU_MSG_SetXgmiMode,
pstate ? XGMI_STATE_D0 : XGMI_STATE_D3); pstate ? XGMI_STATE_D0 : XGMI_STATE_D3);
mutex_unlock(&(smu->mutex));
return ret; return ret;
} }
...@@ -1597,9 +1591,7 @@ static int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu) ...@@ -1597,9 +1591,7 @@ static int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
{ {
int ret = 0; int ret = 0;
mutex_lock(&smu->mutex);
ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME); ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME);
mutex_unlock(&smu->mutex);
return ret; return ret;
} }
...@@ -1696,7 +1688,6 @@ static int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk ...@@ -1696,7 +1688,6 @@ static int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk
int ret = 0, clk_id = 0; int ret = 0, clk_id = 0;
uint32_t param = 0; uint32_t param = 0;
mutex_lock(&smu->mutex);
clk_id = smu_clk_get_index(smu, clk_type); clk_id = smu_clk_get_index(smu, clk_type);
if (clk_id < 0) { if (clk_id < 0) {
ret = -EINVAL; ret = -EINVAL;
...@@ -1723,7 +1714,6 @@ static int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk ...@@ -1723,7 +1714,6 @@ static int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk
} }
failed: failed:
mutex_unlock(&smu->mutex);
return ret; return ret;
} }
......
...@@ -316,8 +316,6 @@ static int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk ...@@ -316,8 +316,6 @@ static int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk
int ret = 0; int ret = 0;
uint32_t mclk_mask, soc_mask; uint32_t mclk_mask, soc_mask;
mutex_lock(&smu->mutex);
if (max) { if (max) {
ret = smu_get_profiling_clk_mask(smu, AMD_DPM_FORCED_LEVEL_PROFILE_PEAK, ret = smu_get_profiling_clk_mask(smu, AMD_DPM_FORCED_LEVEL_PROFILE_PEAK,
NULL, NULL,
...@@ -387,7 +385,6 @@ static int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk ...@@ -387,7 +385,6 @@ static int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk
} }
} }
failed: failed:
mutex_unlock(&smu->mutex);
return ret; return ret;
} }
......
...@@ -635,7 +635,6 @@ amd_pm_state_type vega20_get_current_power_state(struct smu_context *smu) ...@@ -635,7 +635,6 @@ amd_pm_state_type vega20_get_current_power_state(struct smu_context *smu)
!smu_dpm_ctx->dpm_current_power_state) !smu_dpm_ctx->dpm_current_power_state)
return -EINVAL; return -EINVAL;
mutex_lock(&(smu->mutex));
switch (smu_dpm_ctx->dpm_current_power_state->classification.ui_label) { switch (smu_dpm_ctx->dpm_current_power_state->classification.ui_label) {
case SMU_STATE_UI_LABEL_BATTERY: case SMU_STATE_UI_LABEL_BATTERY:
pm_type = POWER_STATE_TYPE_BATTERY; pm_type = POWER_STATE_TYPE_BATTERY;
...@@ -653,7 +652,6 @@ amd_pm_state_type vega20_get_current_power_state(struct smu_context *smu) ...@@ -653,7 +652,6 @@ amd_pm_state_type vega20_get_current_power_state(struct smu_context *smu)
pm_type = POWER_STATE_TYPE_DEFAULT; pm_type = POWER_STATE_TYPE_DEFAULT;
break; break;
} }
mutex_unlock(&(smu->mutex));
return pm_type; return pm_type;
} }
...@@ -1277,8 +1275,6 @@ static int vega20_force_clk_levels(struct smu_context *smu, ...@@ -1277,8 +1275,6 @@ static int vega20_force_clk_levels(struct smu_context *smu,
uint32_t soft_min_level, soft_max_level, hard_min_level; uint32_t soft_min_level, soft_max_level, hard_min_level;
int ret = 0; int ret = 0;
mutex_lock(&(smu->mutex));
soft_min_level = mask ? (ffs(mask) - 1) : 0; soft_min_level = mask ? (ffs(mask) - 1) : 0;
soft_max_level = mask ? (fls(mask) - 1) : 0; soft_max_level = mask ? (fls(mask) - 1) : 0;
...@@ -1431,7 +1427,6 @@ static int vega20_force_clk_levels(struct smu_context *smu, ...@@ -1431,7 +1427,6 @@ static int vega20_force_clk_levels(struct smu_context *smu,
break; break;
} }
mutex_unlock(&(smu->mutex));
return ret; return ret;
} }
...@@ -1446,8 +1441,6 @@ static int vega20_get_clock_by_type_with_latency(struct smu_context *smu, ...@@ -1446,8 +1441,6 @@ static int vega20_get_clock_by_type_with_latency(struct smu_context *smu,
dpm_table = smu_dpm->dpm_context; dpm_table = smu_dpm->dpm_context;
mutex_lock(&smu->mutex);
switch (clk_type) { switch (clk_type) {
case SMU_GFXCLK: case SMU_GFXCLK:
single_dpm_table = &(dpm_table->gfx_table); single_dpm_table = &(dpm_table->gfx_table);
...@@ -1469,7 +1462,6 @@ static int vega20_get_clock_by_type_with_latency(struct smu_context *smu, ...@@ -1469,7 +1462,6 @@ static int vega20_get_clock_by_type_with_latency(struct smu_context *smu,
ret = -EINVAL; ret = -EINVAL;
} }
mutex_unlock(&smu->mutex);
return ret; return ret;
} }
...@@ -2542,8 +2534,6 @@ static int vega20_set_od_percentage(struct smu_context *smu, ...@@ -2542,8 +2534,6 @@ static int vega20_set_od_percentage(struct smu_context *smu,
int feature_enabled; int feature_enabled;
PPCLK_e clk_id; PPCLK_e clk_id;
mutex_lock(&(smu->mutex));
dpm_table = smu_dpm->dpm_context; dpm_table = smu_dpm->dpm_context;
golden_table = smu_dpm->golden_dpm_context; golden_table = smu_dpm->golden_dpm_context;
...@@ -2593,11 +2583,10 @@ static int vega20_set_od_percentage(struct smu_context *smu, ...@@ -2593,11 +2583,10 @@ static int vega20_set_od_percentage(struct smu_context *smu,
} }
ret = smu_handle_task(smu, smu_dpm->dpm_level, ret = smu_handle_task(smu, smu_dpm->dpm_level,
AMD_PP_TASK_READJUST_POWER_STATE); AMD_PP_TASK_READJUST_POWER_STATE,
false);
set_od_failed: set_od_failed:
mutex_unlock(&(smu->mutex));
return ret; return ret;
} }
...@@ -2822,10 +2811,9 @@ static int vega20_odn_edit_dpm_table(struct smu_context *smu, ...@@ -2822,10 +2811,9 @@ static int vega20_odn_edit_dpm_table(struct smu_context *smu,
} }
if (type == PP_OD_COMMIT_DPM_TABLE) { if (type == PP_OD_COMMIT_DPM_TABLE) {
mutex_lock(&(smu->mutex));
ret = smu_handle_task(smu, smu_dpm->dpm_level, ret = smu_handle_task(smu, smu_dpm->dpm_level,
AMD_PP_TASK_READJUST_POWER_STATE); AMD_PP_TASK_READJUST_POWER_STATE,
mutex_unlock(&(smu->mutex)); false);
} }
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment